@@ -113,11 +113,6 @@ typedef void (*I40E_ADMINQ_CALLBACK)(struct i40e_hw *, struct i40e_aq_desc *);
#define I40E_HI_BYTE(x) ((u8)(((x) >> 8) & 0xFF))
#define I40E_LO_BYTE(x) ((u8)((x) & 0xFF))
-#undef container_of
-#define container_of(ptr, type, member) ({ \
- typeof(((type *)0)->member)(*__mptr) = (ptr); \
- (type *)((char *)__mptr - offsetof(type, member)); })
-
/* Number of Transmit Descriptors must be a multiple of 8. */
#define I40E_REQ_TX_DESCRIPTOR_MULTIPLE 8
/* Number of Receive Descriptors must be a multiple of 32 if
@@ -568,11 +563,6 @@ struct i40e_hw {
/* debug mask */
u32 debug_mask;
-
- bool rx_bulk_alloc_allowed;
- bool rx_vec_allowed;
- bool tx_simple_allowed;
- bool tx_vec_allowed;
};
static inline bool i40e_is_vf(struct i40e_hw *hw)
@@ -979,9 +969,6 @@ enum i40e_tx_desc_cmd_bits {
#define I40E_TXD_QW1_OFFSET_MASK (0x3FFFFULL << \
I40E_TXD_QW1_OFFSET_SHIFT)
-#define I40E_TD_CMD (I40E_TX_DESC_CMD_ICRC |\
- I40E_TX_DESC_CMD_EOP)
-
enum i40e_tx_desc_length_fields {
/* Note: These are predefined bit offsets */
I40E_TX_DESC_LENGTH_MACLEN_SHIFT = 0, /* 7 BITS */
@@ -670,18 +670,19 @@ eth_i40e_dev_uninit(struct rte_eth_dev *dev)
static int
i40e_dev_configure(struct rte_eth_dev *dev)
{
+ struct i40e_adapter *ad =
+ I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
- struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
enum rte_eth_rx_mq_mode mq_mode = dev->data->dev_conf.rxmode.mq_mode;
int ret;
/* Initialize to TRUE. If any of Rx queues doesn't meet the
* bulk allocation or vector Rx preconditions we will reset it.
*/
- hw->rx_bulk_alloc_allowed = true;
- hw->rx_vec_allowed = true;
- hw->tx_simple_allowed = true;
- hw->tx_vec_allowed = true;
+ ad->rx_bulk_alloc_allowed = true;
+ ad->rx_vec_allowed = true;
+ ad->tx_simple_allowed = true;
+ ad->tx_vec_allowed = true;
if (dev->data->dev_conf.fdir_conf.mode == RTE_FDIR_MODE_PERFECT) {
ret = i40e_fdir_setup(pf);
@@ -462,6 +462,12 @@ struct i40e_adapter {
struct i40e_pf pf;
struct i40e_vf vf;
};
+
+ /* for vector PMD */
+ bool rx_bulk_alloc_allowed;
+ bool rx_vec_allowed;
+ bool tx_simple_allowed;
+ bool tx_vec_allowed;
};
int i40e_dev_switch_queues(struct i40e_pf *pf, bool on);
@@ -1277,15 +1277,16 @@ PMD_REGISTER_DRIVER(rte_i40evf_driver);
static int
i40evf_dev_configure(struct rte_eth_dev *dev)
{
- struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct i40e_adapter *ad =
+ I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
/* Initialize to TRUE. If any of Rx queues doesn't meet the bulk
* allocation or vector Rx preconditions we will reset it.
*/
- hw->rx_bulk_alloc_allowed = true;
- hw->rx_vec_allowed = true;
- hw->tx_simple_allowed = true;
- hw->tx_vec_allowed = true;
+ ad->rx_bulk_alloc_allowed = true;
+ ad->rx_vec_allowed = true;
+ ad->tx_simple_allowed = true;
+ ad->tx_vec_allowed = true;
return i40evf_init_vlan(dev);
}
@@ -2105,6 +2105,8 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
struct i40e_vsi *vsi;
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_adapter *ad =
+ I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
struct i40e_rx_queue *rxq;
const struct rte_memzone *rz;
uint32_t ring_size;
@@ -2226,7 +2228,7 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
"or RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC is "
"not enabled on port=%d, queue=%d.",
rxq->port_id, rxq->queue_id);
- hw->rx_bulk_alloc_allowed = false;
+ ad->rx_bulk_alloc_allowed = false;
}
return 0;
@@ -3064,27 +3066,28 @@ i40e_fdir_setup_rx_resources(struct i40e_pf *pf)
void __attribute__((cold))
i40e_set_rx_function(struct rte_eth_dev *dev)
{
- struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct i40e_adapter *ad =
+ I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
uint16_t rx_using_sse, i;
/* In order to allow Vector Rx there are a few configuration
* conditions to be met and Rx Bulk Allocation should be allowed.
*/
if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
if (i40e_rx_vec_dev_conf_condition_check(dev) ||
- !hw->rx_bulk_alloc_allowed) {
+ !ad->rx_bulk_alloc_allowed) {
PMD_INIT_LOG(DEBUG, "Port[%d] doesn't meet"
" Vector Rx preconditions",
dev->data->port_id);
- hw->rx_vec_allowed = false;
+ ad->rx_vec_allowed = false;
}
- if (hw->rx_vec_allowed) {
+ if (ad->rx_vec_allowed) {
for (i = 0; i < dev->data->nb_rx_queues; i++) {
struct i40e_rx_queue *rxq =
dev->data->rx_queues[i];
if (i40e_rxq_vec_setup(rxq)) {
- hw->rx_vec_allowed = false;
+ ad->rx_vec_allowed = false;
break;
}
}
@@ -3095,7 +3098,7 @@ i40e_set_rx_function(struct rte_eth_dev *dev)
/* Set the non-LRO scattered callback: there are Vector and
* single allocation versions.
*/
- if (hw->rx_vec_allowed) {
+ if (ad->rx_vec_allowed) {
PMD_INIT_LOG(DEBUG, "Using Vector Scattered Rx "
"callback (port=%d).",
dev->data->port_id);
@@ -3113,14 +3116,14 @@ i40e_set_rx_function(struct rte_eth_dev *dev)
* - Bulk Allocation
* - Single buffer allocation (the simplest one)
*/
- } else if (hw->rx_vec_allowed) {
+ } else if (ad->rx_vec_allowed) {
PMD_INIT_LOG(DEBUG, "Vector rx enabled, please make sure RX "
"burst size no less than %d (port=%d).",
RTE_I40E_DESCS_PER_LOOP,
dev->data->port_id);
dev->rx_pkt_burst = i40e_recv_pkts_vec;
- } else if (hw->rx_bulk_alloc_allowed) {
+ } else if (ad->rx_bulk_alloc_allowed) {
PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
"satisfied. Rx Burst Bulk Alloc function "
"will be used on port=%d.",
@@ -3153,7 +3156,8 @@ i40e_set_rx_function(struct rte_eth_dev *dev)
void __attribute__((cold))
i40e_set_tx_function_flag(struct rte_eth_dev *dev, struct i40e_tx_queue *txq)
{
- struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct i40e_adapter *ad =
+ I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
/* Use a simple Tx queue (no offloads, no multi segs) if possible */
if (((txq->txq_flags & I40E_SIMPLE_FLAGS) == I40E_SIMPLE_FLAGS)
@@ -3163,35 +3167,36 @@ i40e_set_tx_function_flag(struct rte_eth_dev *dev, struct i40e_tx_queue *txq)
" can be enabled on this txq.");
} else {
- hw->tx_vec_allowed = false;
+ ad->tx_vec_allowed = false;
}
} else {
- hw->tx_simple_allowed = false;
+ ad->tx_simple_allowed = false;
}
}
void __attribute__((cold))
i40e_set_tx_function(struct rte_eth_dev *dev)
{
- struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct i40e_adapter *ad =
+ I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
int i;
if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
- if (hw->tx_vec_allowed) {
+ if (ad->tx_vec_allowed) {
for (i = 0; i < dev->data->nb_tx_queues; i++) {
struct i40e_tx_queue *txq =
dev->data->tx_queues[i];
if (i40e_txq_vec_setup(txq)) {
- hw->tx_vec_allowed = false;
+ ad->tx_vec_allowed = false;
break;
}
}
}
}
- if (hw->tx_simple_allowed) {
- if (hw->tx_vec_allowed) {
+ if (ad->tx_simple_allowed) {
+ if (ad->tx_vec_allowed) {
PMD_INIT_LOG(DEBUG, "Vector tx finally be used.");
dev->tx_pkt_burst = i40e_xmit_pkts_vec;
} else {
@@ -57,6 +57,14 @@
#define I40E_RXBUF_SZ_1024 1024
#define I40E_RXBUF_SZ_2048 2048
+#undef container_of
+#define container_of(ptr, type, member) ({ \
+ typeof(((type *)0)->member)(*__mptr) = (ptr); \
+ (type *)((char *)__mptr - offsetof(type, member)); })
+
+#define I40E_TD_CMD (I40E_TX_DESC_CMD_ICRC |\
+ I40E_TX_DESC_CMD_EOP)
+
enum i40e_header_split_mode {
i40e_header_split_none = 0,
i40e_header_split_enabled = 1,