#define IXGBE_MIN_RING_DESC 32 ----------------
#define IXGBE_MAX_RING_DESC 4096
#define RTE_PMD_IXGBE_TX_MAX_BURST 32
#define RTE_PMD_IXGBE_RX_MAX_BURST 32
#define RTE_IXGBE_TX_MAX_FREE_BUF_SZ 64
#define RTE_IXGBE_DESCS_PER_LOOP 4
#ifdef RTE_IXGBE_INC_VECTOR
#define RTE_IXGBE_RXQ_REARM_THRESH 32
#define RTE_IXGBE_MAX_RX_BURST RTE_IXGBE_RXQ_REARM_THRESH
#endif
#define RX_RING_SZ ((IXGBE_MAX_RING_DESC + RTE_PMD_IXGBE_RX_MAX_BURST) * \
sizeof(union ixgbe_adv_rx_desc))
ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
{
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues;
dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues;
if (RTE_ETH_DEV_SRIOV(dev).active == 0) {
/*
* When DCB/VT is off, maximum number of queues changes,
* except for 82598EB, which remains constant.
*/
if (dev_conf->txmode.mq_mode == ETH_MQ_TX_NONE &&
hw->mac.type != ixgbe_mac_82598EB)
dev_info->max_tx_queues = IXGBE_NONE_MODE_TX_NB_QUEUES;
}
dev_info->min_rx_bufsize = 1024; /* cf BSIZEPACKET in SRRCTL register */
dev_info->max_rx_pktlen = 15872; /* includes CRC, cf MAXFRS register */
dev_info->max_mac_addrs = hw->mac.num_rar_entries;
dev_info->max_hash_mac_addrs = IXGBE_VMDQ_NUM_UC_MAC;
dev_info->max_vfs = pci_dev->max_vfs;
if (hw->mac.type == ixgbe_mac_82598EB)
dev_info->max_vmdq_pools = ETH_16_POOLS;
else
dev_info->max_vmdq_pools = ETH_64_POOLS;
dev_info->max_mtu = dev_info->max_rx_pktlen - IXGBE_ETH_OVERHEAD;
dev_info->min_mtu = RTE_ETHER_MIN_MTU;
dev_info->vmdq_queue_num = dev_info->max_rx_queues;
dev_info->rx_queue_offload_capa = ixgbe_get_rx_queue_offloads(dev);
dev_info->rx_offload_capa = (ixgbe_get_rx_port_offloads(dev) |
dev_info->rx_queue_offload_capa);
dev_info->tx_queue_offload_capa = ixgbe_get_tx_queue_offloads(dev);
dev_info->tx_offload_capa = ixgbe_get_tx_port_offloads(dev);
dev_info->default_rxconf = (struct rte_eth_rxconf) {
.rx_thresh = {
.pthresh = IXGBE_DEFAULT_RX_PTHRESH,
.hthresh = IXGBE_DEFAULT_RX_HTHRESH,
.wthresh = IXGBE_DEFAULT_RX_WTHRESH,
},
.rx_free_thresh = IXGBE_DEFAULT_RX_FREE_THRESH,
.rx_drop_en = 0,
.offloads = 0,
};
dev_info->default_txconf = (struct rte_eth_txconf) {
.tx_thresh = {
.pthresh = IXGBE_DEFAULT_TX_PTHRESH,
.hthresh = IXGBE_DEFAULT_TX_HTHRESH,
.wthresh = IXGBE_DEFAULT_TX_WTHRESH,
},
.tx_free_thresh = IXGBE_DEFAULT_TX_FREE_THRESH,
.tx_rs_thresh = IXGBE_DEFAULT_TX_RSBIT_THRESH,
.offloads = 0,
};
dev_info->rx_desc_lim = rx_desc_lim;
dev_info->tx_desc_lim = tx_desc_lim;
dev_info->hash_key_size = IXGBE_HKEY_MAX_INDEX * sizeof(uint32_t);
dev_info->reta_size = ixgbe_reta_size_get(hw->mac.type);
dev_info->flow_type_rss_offloads = IXGBE_RSS_OFFLOAD_ALL;
dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
if (hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T ||
hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)
dev_info->speed_capa = ETH_LINK_SPEED_10M |
ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G;
if (hw->mac.type == ixgbe_mac_X540 ||
hw->mac.type == ixgbe_mac_X540_vf ||
hw->mac.type == ixgbe_mac_X550 ||
hw->mac.type == ixgbe_mac_X550_vf) {
dev_info->speed_capa |= ETH_LINK_SPEED_100M;
}
if (hw->mac.type == ixgbe_mac_X550) {
dev_info->speed_capa |= ETH_LINK_SPEED_2_5G;
dev_info->speed_capa |= ETH_LINK_SPEED_5G;
}
/* Driver-preferred Rx/Tx parameters */
dev_info->default_rxportconf.burst_size = 32;
dev_info->default_txportconf.burst_size = 32;
dev_info->default_rxportconf.nb_queues = 1;
dev_info->default_txportconf.nb_queues = 1;
dev_info->default_rxportconf.ring_size = 256;
dev_info->default_txportconf.ring_size = 256;
return 0;
}