[dpdk-dev,8/8] app/testpmd: set up DCB forwarding based on traffic class

Message ID 1443074591-19803-9-git-send-email-jingjing.wu@intel.com (mailing list archive)
State Superseded, archived
Headers

Commit Message

Jingjing Wu Sept. 24, 2015, 6:03 a.m. UTC
  This patch changes the testpmd DCB forwarding stream to make it
based on traffic class.
It also fixes some coding style issues.

Signed-off-by: Jingjing Wu <jingjing.wu@intel.com>
---
 app/test-pmd/cmdline.c |  39 +++++++-----
 app/test-pmd/config.c  | 159 +++++++++++++++++++++----------------------------
 app/test-pmd/testpmd.c | 151 +++++++++++++++++++++++++---------------------
 app/test-pmd/testpmd.h |  23 +------
 4 files changed, 176 insertions(+), 196 deletions(-)
  

Comments

Jijiang Liu Oct. 28, 2015, 1:46 a.m. UTC | #1
> -----Original Message-----
> From: Wu, Jingjing
> Sent: Thursday, September 24, 2015 2:03 PM
> To: dev@dpdk.org
> Cc: Wu, Jingjing; Liu, Jijiang; Zhang, Helin; Tao, Zhe; Pei, Yulong
> Subject: [PATCH 8/8] app/testpmd: set up DCB forwarding based on traffic
> class
> 
> This patch changes the testpmd DCB forwarding stream to make it based on
> traffic class.
> It also fixes some coding style issues.
> 
> Signed-off-by: Jingjing Wu <jingjing.wu@intel.com>
> ---
>  app/test-pmd/cmdline.c |  39 +++++++-----  app/test-pmd/config.c  | 159
> +++++++++++++++++++++----------------------------
>  app/test-pmd/testpmd.c | 151 +++++++++++++++++++++++++---------------------
>  app/test-pmd/testpmd.h |  23 +------
>  4 files changed, 176 insertions(+), 196 deletions(-)
> 
> diff --git a/app/test-pmd/cmdline.c b/app/test-pmd/cmdline.c index
> 0f8f48f..2ec855f 100644
> --- a/app/test-pmd/cmdline.c
> +++ b/app/test-pmd/cmdline.c
> @@ -1999,37 +1999,46 @@ cmd_config_dcb_parsed(void *parsed_result,
>                          __attribute__((unused)) void *data)  {
>  	struct cmd_config_dcb *res = parsed_result;
> -	struct dcb_config dcb_conf;
>  	portid_t port_id = res->port_id;
>  	struct rte_port *port;
> +	uint8_t pfc_en;
> +	int ret;
> 
>  	port = &ports[port_id];
>  	/** Check if the port is not started **/
>  	if (port->port_status != RTE_PORT_STOPPED) {
> -		printf("Please stop port %d first\n",port_id);
> +		printf("Please stop port %d first\n", port_id);
>  		return;
>  	}
> 
> -	dcb_conf.num_tcs = (enum rte_eth_nb_tcs) res->num_tcs;
> -	if ((dcb_conf.num_tcs != ETH_4_TCS) && (dcb_conf.num_tcs !=
> ETH_8_TCS)){
> -		printf("The invalid number of traffic class,only 4 or 8
> allowed\n");
> +	if ((res->num_tcs != ETH_4_TCS) && (res->num_tcs != ETH_8_TCS)) {
> +		printf("The invalid number of traffic class,"
> +			" only 4 or 8 allowed.\n");
>  		return;
>  	}
> 
> -	/* DCB in VT mode */
> -	if (!strncmp(res->vt_en, "on",2))
> -		dcb_conf.dcb_mode = DCB_VT_ENABLED;
> +	if (nb_fwd_lcores < res->num_tcs) {
> +		printf("nb_cores shouldn't be less than number of TCs.\n");
> +		return;
> +	}
> +	if (!strncmp(res->pfc_en, "on", 2))
> +		pfc_en = 1;
>  	else
> -		dcb_conf.dcb_mode = DCB_ENABLED;
> +		pfc_en = 0;
> 
> -	if (!strncmp(res->pfc_en, "on",2)) {
> -		dcb_conf.pfc_en = 1;
> -	}
> +	/* DCB in VT mode */
> +	if (!strncmp(res->vt_en, "on", 2))
> +		ret = init_port_dcb_config(port_id, DCB_VT_ENABLED,
> +				(enum rte_eth_nb_tcs)res->num_tcs,
> +				pfc_en);
>  	else
> -		dcb_conf.pfc_en = 0;
> +		ret = init_port_dcb_config(port_id, DCB_ENABLED,
> +				(enum rte_eth_nb_tcs)res->num_tcs,
> +				pfc_en);
> +
> 
> -	if (init_port_dcb_config(port_id,&dcb_conf) != 0) {
> -		printf("Cannot initialize network ports\n");
> +	if (ret != 0) {
> +		printf("Cannot initialize network ports.\n");
>  		return;
>  	}
> 
> diff --git a/app/test-pmd/config.c b/app/test-pmd/config.c index
> cf2aa6e..e10da57 100644
> --- a/app/test-pmd/config.c
> +++ b/app/test-pmd/config.c
> @@ -1128,113 +1128,92 @@ rss_fwd_config_setup(void)
>  	}
>  }
> 
> -/*
> - * In DCB and VT on,the mapping of 128 receive queues to 128 transmit
> queues.
> - */
> -static void
> -dcb_rxq_2_txq_mapping(queueid_t rxq, queueid_t *txq) -{
> -	if(dcb_q_mapping == DCB_4_TCS_Q_MAPPING) {
> -
> -		if (rxq < 32)
> -			/* tc0: 0-31 */
> -			*txq = rxq;
> -		else if (rxq < 64) {
> -			/* tc1: 64-95 */
> -			*txq =  (uint16_t)(rxq + 32);
> -		}
> -		else {
> -			/* tc2: 96-111;tc3:112-127 */
> -			*txq =  (uint16_t)(rxq/2 + 64);
> -		}
> -	}
> -	else {
> -		if (rxq < 16)
> -			/* tc0 mapping*/
> -			*txq = rxq;
> -		else if (rxq < 32) {
> -			/* tc1 mapping*/
> -			 *txq = (uint16_t)(rxq + 16);
> -		}
> -		else if (rxq < 64) {
> -			/*tc2,tc3 mapping */
> -			*txq =  (uint16_t)(rxq + 32);
> -		}
> -		else {
> -			/* tc4,tc5,tc6 and tc7 mapping */
> -			*txq =  (uint16_t)(rxq/2 + 64);
> -		}
> -	}
> -}
These codes are removed, and how to guarantee DCB function of 82599 NIC work normally? 
>  /**
> - * For the DCB forwarding test, each core is assigned on every port multi-
> transmit
> - * queue.
> + * For the DCB forwarding test, each core is assigned on each traffic class.
>   *
>   * Each core is assigned a multi-stream, each stream being composed of
>   * a RX queue to poll on a RX port for input messages, associated with
> - * a TX queue of a TX port where to send forwarded packets.
> - * All packets received on the RX queue of index "RxQj" of the RX port "RxPi"
> - * are sent on the TX queue "TxQl" of the TX port "TxPk" according to the
> two
> - * following rules:
> - * In VT mode,
> - *    - TxPk = (RxPi + 1) if RxPi is even, (RxPi - 1) if RxPi is odd
> - *    - TxQl = RxQj
> - * In non-VT mode,
> - *    - TxPk = (RxPi + 1) if RxPi is even, (RxPi - 1) if RxPi is odd
> - *    There is a mapping of RxQj to TxQl to be required,and the mapping was
> implemented
> - *    in dcb_rxq_2_txq_mapping function.
> + * a TX queue of a TX port where to send forwarded packets. All RX and
> + * TX queues are mapping to the same traffic class.
> + * If VMDQ and DCB co-exist, each traffic class on different POOLs
> + share
> + * the same core
>   */
>  static void

>  dcb_fwd_config_setup(void)
>  {
> -	portid_t   rxp;
> -	portid_t   txp;
> -	queueid_t  rxq;
> -	queueid_t  nb_q;
> +	struct rte_eth_dcb_info rxp_dcb_info, txp_dcb_info;
> +	portid_t txp, rxp = 0;
> +	queueid_t txq, rxq = 0;
>  	lcoreid_t  lc_id;
> -	uint16_t sm_id;
> -
> -	nb_q = nb_rxq;
> +	uint16_t nb_rx_queue, nb_tx_queue;
> +	uint16_t i, j, k, sm_id = 0;
> +	uint8_t tc = 0;
> 
>  	cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores;
>  	cur_fwd_config.nb_fwd_ports = nb_fwd_ports;
>  	cur_fwd_config.nb_fwd_streams =
> -		(streamid_t) (nb_q * cur_fwd_config.nb_fwd_ports);
> +		(streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports);
> 
>  	/* reinitialize forwarding streams */
>  	init_fwd_streams();
> +	sm_id = 0;
> +	if ((rxp & 0x1) == 0)
> +		txp = (portid_t) (rxp + 1);
> +	else
> +		txp = (portid_t) (rxp - 1);
> +	/* get the dcb info on the first RX and TX ports */
> +	rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info);
> +	rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info);
> 
> -	setup_fwd_config_of_each_lcore(&cur_fwd_config);
> -	rxp = 0; rxq = 0;
>  	for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) {
> -		/* a fwd core can run multi-streams */
> -		for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb;
> sm_id++)
> -		{
> -			struct fwd_stream *fs;
> -			fs = fwd_streams[fwd_lcores[lc_id]->stream_idx +
> sm_id];
> -			if ((rxp & 0x1) == 0)
> -				txp = (portid_t) (rxp + 1);
> -			else
> -				txp = (portid_t) (rxp - 1);
> -			fs->rx_port = fwd_ports_ids[rxp];
> -			fs->rx_queue = rxq;
> -			fs->tx_port = fwd_ports_ids[txp];
> -			if (dcb_q_mapping == DCB_VT_Q_MAPPING)
> -				fs->tx_queue = rxq;
> -			else
> -				dcb_rxq_2_txq_mapping(rxq, &fs->tx_queue);
> -			fs->peer_addr = fs->tx_port;
> -			rxq = (queueid_t) (rxq + 1);
> -			if (rxq < nb_q)
> -				continue;
> -			rxq = 0;
> -			if (numa_support && (nb_fwd_ports <= (nb_ports >>
> 1)))
> -				rxp = (portid_t)
> -					(rxp + ((nb_ports >> 1) /
> nb_fwd_ports));
> -			else
> -				rxp = (portid_t) (rxp + 1);
> +		fwd_lcores[lc_id]->stream_nb = 0;
> +		fwd_lcores[lc_id]->stream_idx = sm_id;
> +		for (i = 0; i < ETH_MAX_VMDQ_POOL; i++) {
> +			/* if the nb_queue is zero, means this tc is
> +			 * not enabled on the POOL
> +			 */
> +			if (rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue == 0)
> +				break;
> +			k = fwd_lcores[lc_id]->stream_nb +
> +				fwd_lcores[lc_id]->stream_idx;
> +			rxq = rxp_dcb_info.tc_queue.tc_rxq[i][tc].base;
> +			txq = txp_dcb_info.tc_queue.tc_txq[i][tc].base;
> +			nb_rx_queue =
> txp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue;
> +			nb_tx_queue =
> txp_dcb_info.tc_queue.tc_txq[i][tc].nb_queue;
> +			for (j = 0; j < nb_rx_queue; j++) {
> +				struct fwd_stream *fs;
> +
> +				fs = fwd_streams[k + j];
> +				fs->rx_port = fwd_ports_ids[rxp];
> +				fs->rx_queue = rxq + j;
> +				fs->tx_port = fwd_ports_ids[txp];
> +				fs->tx_queue = txq + j % nb_tx_queue;
> +				fs->peer_addr = fs->tx_port;
> +			}
> +			fwd_lcores[lc_id]->stream_nb +=
> +
> 	rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue;
>  		}
> +		sm_id = (streamid_t) (sm_id + fwd_lcores[lc_id]->stream_nb);
> +
> +		tc++;
> +		if (tc < rxp_dcb_info.nb_tcs)
> +			continue;
> +		/* Restart from TC 0 on next RX port */
> +		tc = 0;
> +		if (numa_support && (nb_fwd_ports <= (nb_ports >> 1)))
> +			rxp = (portid_t)
> +				(rxp + ((nb_ports >> 1) / nb_fwd_ports));
> +		else
> +			rxp++;
> +		if (rxp >= nb_fwd_ports)
> +			return;
> +		/* get the dcb information on next RX and TX ports */
> +		if ((rxp & 0x1) == 0)
> +			txp = (portid_t) (rxp + 1);
> +		else
> +			txp = (portid_t) (rxp - 1);
> +		rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp],
> &rxp_dcb_info);
> +		rte_eth_dev_get_dcb_info(fwd_ports_ids[txp],
> &txp_dcb_info);
>  	}
>  }
> 
> @@ -1354,10 +1333,6 @@ pkt_fwd_config_display(struct fwd_config *cfg)
> void
>  fwd_config_display(void)
>  {
> -	if((dcb_config) && (nb_fwd_lcores == 1)) {
> -		printf("In DCB mode,the nb forwarding cores should be
> larger than 1\n");
> -		return;
> -	}
>  	fwd_config_setup();
>  	pkt_fwd_config_display(&cur_fwd_config);
>  }
> diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c index
> c8ae909..25dadbc 100644
> --- a/app/test-pmd/testpmd.c
> +++ b/app/test-pmd/testpmd.c
> @@ -182,9 +182,6 @@ uint8_t dcb_config = 0;
>  /* Whether the dcb is in testing status */  uint8_t dcb_test = 0;
> 
> -/* DCB on and VT on mapping is default */ -enum
> dcb_queue_mapping_mode dcb_q_mapping = DCB_VT_Q_MAPPING;
> -
>  /*
>   * Configurable number of RX/TX queues.
>   */
> @@ -1840,115 +1837,131 @@ const uint16_t vlan_tags[] = {  };
> 
>  static  int
> -get_eth_dcb_conf(struct rte_eth_conf *eth_conf, struct dcb_config
> *dcb_conf)
> +get_eth_dcb_conf(struct rte_eth_conf *eth_conf,
> +		 enum dcb_mode_enable dcb_mode,
> +		 enum rte_eth_nb_tcs num_tcs,
> +		 uint8_t pfc_en)
>  {
> -        uint8_t i;
> +	uint8_t i;
> 
>  	/*
>  	 * Builds up the correct configuration for dcb+vt based on the vlan
> tags array
>  	 * given above, and the number of traffic classes available for use.
>  	 */
> -	if (dcb_conf->dcb_mode == DCB_VT_ENABLED) {
> -		struct rte_eth_vmdq_dcb_conf vmdq_rx_conf;
> -		struct rte_eth_vmdq_dcb_tx_conf vmdq_tx_conf;
> +	if (dcb_mode == DCB_VT_ENABLED) {
> +		struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
> +				&eth_conf->rx_adv_conf.vmdq_dcb_conf;
> +		struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
> +				&eth_conf->tx_adv_conf.vmdq_dcb_tx_conf;
> 
>  		/* VMDQ+DCB RX and TX configrations */
> -		vmdq_rx_conf.enable_default_pool = 0;
> -		vmdq_rx_conf.default_pool = 0;
> -		vmdq_rx_conf.nb_queue_pools =
> -			(dcb_conf->num_tcs ==  ETH_4_TCS ? ETH_32_POOLS :
> ETH_16_POOLS);
> -		vmdq_tx_conf.nb_queue_pools =
> -			(dcb_conf->num_tcs ==  ETH_4_TCS ? ETH_32_POOLS :
> ETH_16_POOLS);
> -
> -		vmdq_rx_conf.nb_pool_maps =
> sizeof( vlan_tags )/sizeof( vlan_tags[ 0 ]);
> -		for (i = 0; i < vmdq_rx_conf.nb_pool_maps; i++) {
> -			vmdq_rx_conf.pool_map[i].vlan_id = vlan_tags[ i ];
> -			vmdq_rx_conf.pool_map[i].pools = 1 << (i %
> vmdq_rx_conf.nb_queue_pools);
> +		vmdq_rx_conf->enable_default_pool = 0;
> +		vmdq_rx_conf->default_pool = 0;
> +		vmdq_rx_conf->nb_queue_pools =
> +			(num_tcs ==  ETH_4_TCS ? ETH_32_POOLS :
> ETH_16_POOLS);
> +		vmdq_tx_conf->nb_queue_pools =
> +			(num_tcs ==  ETH_4_TCS ? ETH_32_POOLS :
> ETH_16_POOLS);
> +
> +		vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf-
> >nb_queue_pools;
> +		for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
> +			vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
> +			vmdq_rx_conf->pool_map[i].pools =
> +				1 << (i % vmdq_rx_conf->nb_queue_pools);
>  		}
>  		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
> -			vmdq_rx_conf.dcb_tc[i] = i;
> -			vmdq_tx_conf.dcb_tc[i] = i;
> +			vmdq_rx_conf->dcb_tc[i] = i;
> +			vmdq_tx_conf->dcb_tc[i] = i;
>  		}
> 
> -		/*set DCB mode of RX and TX of multiple queues*/
> +		/* set DCB mode of RX and TX of multiple queues */
>  		eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
>  		eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
> -		if (dcb_conf->pfc_en)
> -			eth_conf->dcb_capability_en =
> ETH_DCB_PG_SUPPORT|ETH_DCB_PFC_SUPPORT;
> -		else
> -			eth_conf->dcb_capability_en =
> ETH_DCB_PG_SUPPORT;
> -
> -		(void)(rte_memcpy(&eth_conf->rx_adv_conf.vmdq_dcb_conf,
> &vmdq_rx_conf,
> -                                sizeof(struct rte_eth_vmdq_dcb_conf)));
> -		(void)(rte_memcpy(&eth_conf-
> >tx_adv_conf.vmdq_dcb_tx_conf, &vmdq_tx_conf,
> -                                sizeof(struct rte_eth_vmdq_dcb_tx_conf)));
> -	}
> -	else {
> -		struct rte_eth_dcb_rx_conf rx_conf;
> -		struct rte_eth_dcb_tx_conf tx_conf;
> -
> -		/* queue mapping configuration of DCB RX and TX */
> -		if (dcb_conf->num_tcs == ETH_4_TCS)
> -			dcb_q_mapping = DCB_4_TCS_Q_MAPPING;
> -		else
> -			dcb_q_mapping = DCB_8_TCS_Q_MAPPING;
> -
> -		rx_conf.nb_tcs = dcb_conf->num_tcs;
> -		tx_conf.nb_tcs = dcb_conf->num_tcs;
> -
> -		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++){
> -			rx_conf.dcb_tc[i] = i;
> -			tx_conf.dcb_tc[i] = i;
> +	} else {
> +		struct rte_eth_dcb_rx_conf *rx_conf =
> +				&eth_conf->rx_adv_conf.dcb_rx_conf;
> +		struct rte_eth_dcb_tx_conf *tx_conf =
> +				&eth_conf->tx_adv_conf.dcb_tx_conf;
> +
> +		rx_conf->nb_tcs = num_tcs;
> +		tx_conf->nb_tcs = num_tcs;
> +
> +		for (i = 0; i < num_tcs; i++) {
> +			rx_conf->dcb_tc[i] = i;
> +			tx_conf->dcb_tc[i] = i;
>  		}
> -		eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB;
> +		eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS;
> +		eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_hf;
>  		eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
> -		if (dcb_conf->pfc_en)
> -			eth_conf->dcb_capability_en =
> ETH_DCB_PG_SUPPORT|ETH_DCB_PFC_SUPPORT;
> -		else
> -			eth_conf->dcb_capability_en =
> ETH_DCB_PG_SUPPORT;
> -
> -		(void)(rte_memcpy(&eth_conf->rx_adv_conf.dcb_rx_conf,
> &rx_conf,
> -                                sizeof(struct rte_eth_dcb_rx_conf)));
> -		(void)(rte_memcpy(&eth_conf->tx_adv_conf.dcb_tx_conf,
> &tx_conf,
> -                                sizeof(struct rte_eth_dcb_tx_conf)));
>  	}
> 
> +	if (pfc_en)
> +		eth_conf->dcb_capability_en =
> +				ETH_DCB_PG_SUPPORT |
> ETH_DCB_PFC_SUPPORT;
> +	else
> +		eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
> +
>  	return 0;
>  }
> 
>  int
> -init_port_dcb_config(portid_t pid,struct dcb_config *dcb_conf)
> +init_port_dcb_config(portid_t pid,
> +		     enum dcb_mode_enable dcb_mode,
> +		     enum rte_eth_nb_tcs num_tcs,
> +		     uint8_t pfc_en)
>  {
>  	struct rte_eth_conf port_conf;
> +	struct rte_eth_dev_info dev_info;
>  	struct rte_port *rte_port;
>  	int retval;
> -	uint16_t nb_vlan;
>  	uint16_t i;
> 
> -	/* rxq and txq configuration in dcb mode */
> -	nb_rxq = 128;
> -	nb_txq = 128;
> +	rte_eth_dev_info_get(pid, &dev_info);
> +
> +	/* If dev_info.vmdq_pool_base is greater than 0,
> +	 * the queue id of vmdq pools is started after pf queues.
> +	 */
> +	if (dcb_mode == DCB_VT_ENABLED && dev_info.vmdq_pool_base >
> 0) {
> +		printf("VMDQ_DCB multi-queue mode is nonsensical"
> +			" for port %d.", pid);
> +		return -1;
> +	}
> +
> +	/* Assume the ports in testpmd have the same dcb capability
> +	 * and has the same number of rxq and txq in dcb mode
> +	 */
> +	if (dcb_mode == DCB_VT_ENABLED) {
> +		nb_rxq = dev_info.max_rx_queues;
> +		nb_txq = dev_info.max_tx_queues;
> +	} else {
> +		/*if vt is disabled, use all pf queues */
> +		if (dev_info.vmdq_pool_base == 0) {
> +			nb_rxq = dev_info.max_rx_queues;
> +			nb_txq = dev_info.max_tx_queues;
> +		} else {
> +			nb_rxq = (queueid_t)num_tcs;
> +			nb_txq = (queueid_t)num_tcs;
> +
> +		}
> +	}
>  	rx_free_thresh = 64;
> 
> -	memset(&port_conf,0,sizeof(struct rte_eth_conf));
> +	memset(&port_conf, 0, sizeof(struct rte_eth_conf));
>  	/* Enter DCB configuration status */
>  	dcb_config = 1;
> 
> -	nb_vlan = sizeof( vlan_tags )/sizeof( vlan_tags[ 0 ]);
>  	/*set configuration of DCB in vt mode and DCB in non-vt mode*/
> -	retval = get_eth_dcb_conf(&port_conf, dcb_conf);
> +	retval = get_eth_dcb_conf(&port_conf, dcb_mode, num_tcs, pfc_en);
>  	if (retval < 0)
>  		return retval;
> 
>  	rte_port = &ports[pid];
> -	memcpy(&rte_port->dev_conf, &port_conf,sizeof(struct
> rte_eth_conf));
> +	memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct
> rte_eth_conf));
> 
>  	rxtx_port_config(rte_port);
>  	/* VLAN filter */
>  	rte_port->dev_conf.rxmode.hw_vlan_filter = 1;
> -	for (i = 0; i < nb_vlan; i++){
> +	for (i = 0; i < RTE_DIM(vlan_tags); i++)
>  		rx_vft_set(pid, vlan_tags[i], 1);
> -	}
> 
>  	rte_eth_macaddr_get(pid, &rte_port->eth_addr);
>  	map_port_queue_stats_mapping_registers(pid, rte_port); diff --git
> a/app/test-pmd/testpmd.h b/app/test-pmd/testpmd.h index
> d287274..5818fdd 100644
> --- a/app/test-pmd/testpmd.h
> +++ b/app/test-pmd/testpmd.h
> @@ -255,25 +255,6 @@ enum dcb_mode_enable
>  	DCB_ENABLED
>  };
> 
> -/*
> - * DCB general config info
> - */
> -struct dcb_config {
> -	enum dcb_mode_enable dcb_mode;
> -	uint8_t vt_en;
> -	enum rte_eth_nb_tcs num_tcs;
> -	uint8_t pfc_en;
> -};
> -
> -/*
> - * In DCB io FWD mode, 128 RX queue to 128 TX queue mapping
> - */
> -enum dcb_queue_mapping_mode {
> -	DCB_VT_Q_MAPPING = 0,
> -	DCB_4_TCS_Q_MAPPING,
> -	DCB_8_TCS_Q_MAPPING
> -};
> -
>  #define MAX_TX_QUEUE_STATS_MAPPINGS 1024 /* MAX_PORT of 32 @ 32
> tx_queues/port */  #define MAX_RX_QUEUE_STATS_MAPPINGS 4096 /*
> MAX_PORT of 32 @ 128 rx_queues/port */
> 
> @@ -537,7 +518,9 @@ void dev_set_link_down(portid_t pid);  void
> init_port_config(void);  void set_port_slave_flag(portid_t slave_pid);  void
> clear_port_slave_flag(portid_t slave_pid); -int init_port_dcb_config(portid_t
> pid,struct dcb_config *dcb_conf);
> +int init_port_dcb_config(portid_t pid, enum dcb_mode_enable dcb_mode,
> +		     enum rte_eth_nb_tcs num_tcs,
> +		     uint8_t pfc_en);
>  int start_port(portid_t pid);
>  void stop_port(portid_t pid);
>  void close_port(portid_t pid);
> --
> 2.4.0
  
Jingjing Wu Oct. 28, 2015, 2:04 a.m. UTC | #2
> -----Original Message-----
> From: Liu, Jijiang
> Sent: Wednesday, October 28, 2015 9:46 AM
> To: Wu, Jingjing; dev@dpdk.org
> Cc: Zhang, Helin; Tao, Zhe; Pei, Yulong
> Subject: RE: [PATCH 8/8] app/testpmd: set up DCB forwarding based on
> traffic class
> 
> > -}
> These codes are removed, and how to guarantee DCB function of 82599 NIC
> work normally?

In this patch, the mapping relationship is not defined in testpmd.
It can be queried by the rte_eth_dev_get_dcb_info API, and the forwarding
Is setup based on TC. So DCB function of 82599 NIC in testpmd works too.

Thanks
Jingjing
  

Patch

diff --git a/app/test-pmd/cmdline.c b/app/test-pmd/cmdline.c
index 0f8f48f..2ec855f 100644
--- a/app/test-pmd/cmdline.c
+++ b/app/test-pmd/cmdline.c
@@ -1999,37 +1999,46 @@  cmd_config_dcb_parsed(void *parsed_result,
                         __attribute__((unused)) void *data)
 {
 	struct cmd_config_dcb *res = parsed_result;
-	struct dcb_config dcb_conf;
 	portid_t port_id = res->port_id;
 	struct rte_port *port;
+	uint8_t pfc_en;
+	int ret;
 
 	port = &ports[port_id];
 	/** Check if the port is not started **/
 	if (port->port_status != RTE_PORT_STOPPED) {
-		printf("Please stop port %d first\n",port_id);
+		printf("Please stop port %d first\n", port_id);
 		return;
 	}
 
-	dcb_conf.num_tcs = (enum rte_eth_nb_tcs) res->num_tcs;
-	if ((dcb_conf.num_tcs != ETH_4_TCS) && (dcb_conf.num_tcs != ETH_8_TCS)){
-		printf("The invalid number of traffic class,only 4 or 8 allowed\n");
+	if ((res->num_tcs != ETH_4_TCS) && (res->num_tcs != ETH_8_TCS)) {
+		printf("The invalid number of traffic class,"
+			" only 4 or 8 allowed.\n");
 		return;
 	}
 
-	/* DCB in VT mode */
-	if (!strncmp(res->vt_en, "on",2))
-		dcb_conf.dcb_mode = DCB_VT_ENABLED;
+	if (nb_fwd_lcores < res->num_tcs) {
+		printf("nb_cores shouldn't be less than number of TCs.\n");
+		return;
+	}
+	if (!strncmp(res->pfc_en, "on", 2))
+		pfc_en = 1;
 	else
-		dcb_conf.dcb_mode = DCB_ENABLED;
+		pfc_en = 0;
 
-	if (!strncmp(res->pfc_en, "on",2)) {
-		dcb_conf.pfc_en = 1;
-	}
+	/* DCB in VT mode */
+	if (!strncmp(res->vt_en, "on", 2))
+		ret = init_port_dcb_config(port_id, DCB_VT_ENABLED,
+				(enum rte_eth_nb_tcs)res->num_tcs,
+				pfc_en);
 	else
-		dcb_conf.pfc_en = 0;
+		ret = init_port_dcb_config(port_id, DCB_ENABLED,
+				(enum rte_eth_nb_tcs)res->num_tcs,
+				pfc_en);
+
 
-	if (init_port_dcb_config(port_id,&dcb_conf) != 0) {
-		printf("Cannot initialize network ports\n");
+	if (ret != 0) {
+		printf("Cannot initialize network ports.\n");
 		return;
 	}
 
diff --git a/app/test-pmd/config.c b/app/test-pmd/config.c
index cf2aa6e..e10da57 100644
--- a/app/test-pmd/config.c
+++ b/app/test-pmd/config.c
@@ -1128,113 +1128,92 @@  rss_fwd_config_setup(void)
 	}
 }
 
-/*
- * In DCB and VT on,the mapping of 128 receive queues to 128 transmit queues.
- */
-static void
-dcb_rxq_2_txq_mapping(queueid_t rxq, queueid_t *txq)
-{
-	if(dcb_q_mapping == DCB_4_TCS_Q_MAPPING) {
-
-		if (rxq < 32)
-			/* tc0: 0-31 */
-			*txq = rxq;
-		else if (rxq < 64) {
-			/* tc1: 64-95 */
-			*txq =  (uint16_t)(rxq + 32);
-		}
-		else {
-			/* tc2: 96-111;tc3:112-127 */
-			*txq =  (uint16_t)(rxq/2 + 64);
-		}
-	}
-	else {
-		if (rxq < 16)
-			/* tc0 mapping*/
-			*txq = rxq;
-		else if (rxq < 32) {
-			/* tc1 mapping*/
-			 *txq = (uint16_t)(rxq + 16);
-		}
-		else if (rxq < 64) {
-			/*tc2,tc3 mapping */
-			*txq =  (uint16_t)(rxq + 32);
-		}
-		else {
-			/* tc4,tc5,tc6 and tc7 mapping */
-			*txq =  (uint16_t)(rxq/2 + 64);
-		}
-	}
-}
-
 /**
- * For the DCB forwarding test, each core is assigned on every port multi-transmit
- * queue.
+ * For the DCB forwarding test, each core is assigned on each traffic class.
  *
  * Each core is assigned a multi-stream, each stream being composed of
  * a RX queue to poll on a RX port for input messages, associated with
- * a TX queue of a TX port where to send forwarded packets.
- * All packets received on the RX queue of index "RxQj" of the RX port "RxPi"
- * are sent on the TX queue "TxQl" of the TX port "TxPk" according to the two
- * following rules:
- * In VT mode,
- *    - TxPk = (RxPi + 1) if RxPi is even, (RxPi - 1) if RxPi is odd
- *    - TxQl = RxQj
- * In non-VT mode,
- *    - TxPk = (RxPi + 1) if RxPi is even, (RxPi - 1) if RxPi is odd
- *    There is a mapping of RxQj to TxQl to be required,and the mapping was implemented
- *    in dcb_rxq_2_txq_mapping function.
+ * a TX queue of a TX port where to send forwarded packets. All RX and
+ * TX queues are mapping to the same traffic class.
+ * If VMDQ and DCB co-exist, each traffic class on different POOLs share
+ * the same core
  */
 static void
 dcb_fwd_config_setup(void)
 {
-	portid_t   rxp;
-	portid_t   txp;
-	queueid_t  rxq;
-	queueid_t  nb_q;
+	struct rte_eth_dcb_info rxp_dcb_info, txp_dcb_info;
+	portid_t txp, rxp = 0;
+	queueid_t txq, rxq = 0;
 	lcoreid_t  lc_id;
-	uint16_t sm_id;
-
-	nb_q = nb_rxq;
+	uint16_t nb_rx_queue, nb_tx_queue;
+	uint16_t i, j, k, sm_id = 0;
+	uint8_t tc = 0;
 
 	cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores;
 	cur_fwd_config.nb_fwd_ports = nb_fwd_ports;
 	cur_fwd_config.nb_fwd_streams =
-		(streamid_t) (nb_q * cur_fwd_config.nb_fwd_ports);
+		(streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports);
 
 	/* reinitialize forwarding streams */
 	init_fwd_streams();
+	sm_id = 0;
+	if ((rxp & 0x1) == 0)
+		txp = (portid_t) (rxp + 1);
+	else
+		txp = (portid_t) (rxp - 1);
+	/* get the dcb info on the first RX and TX ports */
+	rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info);
+	rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info);
 
-	setup_fwd_config_of_each_lcore(&cur_fwd_config);
-	rxp = 0; rxq = 0;
 	for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) {
-		/* a fwd core can run multi-streams */
-		for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++)
-		{
-			struct fwd_stream *fs;
-			fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id];
-			if ((rxp & 0x1) == 0)
-				txp = (portid_t) (rxp + 1);
-			else
-				txp = (portid_t) (rxp - 1);
-			fs->rx_port = fwd_ports_ids[rxp];
-			fs->rx_queue = rxq;
-			fs->tx_port = fwd_ports_ids[txp];
-			if (dcb_q_mapping == DCB_VT_Q_MAPPING)
-				fs->tx_queue = rxq;
-			else
-				dcb_rxq_2_txq_mapping(rxq, &fs->tx_queue);
-			fs->peer_addr = fs->tx_port;
-			rxq = (queueid_t) (rxq + 1);
-			if (rxq < nb_q)
-				continue;
-			rxq = 0;
-			if (numa_support && (nb_fwd_ports <= (nb_ports >> 1)))
-				rxp = (portid_t)
-					(rxp + ((nb_ports >> 1) / nb_fwd_ports));
-			else
-				rxp = (portid_t) (rxp + 1);
+		fwd_lcores[lc_id]->stream_nb = 0;
+		fwd_lcores[lc_id]->stream_idx = sm_id;
+		for (i = 0; i < ETH_MAX_VMDQ_POOL; i++) {
+			/* if the nb_queue is zero, means this tc is
+			 * not enabled on the POOL
+			 */
+			if (rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue == 0)
+				break;
+			k = fwd_lcores[lc_id]->stream_nb +
+				fwd_lcores[lc_id]->stream_idx;
+			rxq = rxp_dcb_info.tc_queue.tc_rxq[i][tc].base;
+			txq = txp_dcb_info.tc_queue.tc_txq[i][tc].base;
+			nb_rx_queue = txp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue;
+			nb_tx_queue = txp_dcb_info.tc_queue.tc_txq[i][tc].nb_queue;
+			for (j = 0; j < nb_rx_queue; j++) {
+				struct fwd_stream *fs;
+
+				fs = fwd_streams[k + j];
+				fs->rx_port = fwd_ports_ids[rxp];
+				fs->rx_queue = rxq + j;
+				fs->tx_port = fwd_ports_ids[txp];
+				fs->tx_queue = txq + j % nb_tx_queue;
+				fs->peer_addr = fs->tx_port;
+			}
+			fwd_lcores[lc_id]->stream_nb +=
+				rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue;
 		}
+		sm_id = (streamid_t) (sm_id + fwd_lcores[lc_id]->stream_nb);
+
+		tc++;
+		if (tc < rxp_dcb_info.nb_tcs)
+			continue;
+		/* Restart from TC 0 on next RX port */
+		tc = 0;
+		if (numa_support && (nb_fwd_ports <= (nb_ports >> 1)))
+			rxp = (portid_t)
+				(rxp + ((nb_ports >> 1) / nb_fwd_ports));
+		else
+			rxp++;
+		if (rxp >= nb_fwd_ports)
+			return;
+		/* get the dcb information on next RX and TX ports */
+		if ((rxp & 0x1) == 0)
+			txp = (portid_t) (rxp + 1);
+		else
+			txp = (portid_t) (rxp - 1);
+		rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info);
+		rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info);
 	}
 }
 
@@ -1354,10 +1333,6 @@  pkt_fwd_config_display(struct fwd_config *cfg)
 void
 fwd_config_display(void)
 {
-	if((dcb_config) && (nb_fwd_lcores == 1)) {
-		printf("In DCB mode,the nb forwarding cores should be larger than 1\n");
-		return;
-	}
 	fwd_config_setup();
 	pkt_fwd_config_display(&cur_fwd_config);
 }
diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c
index c8ae909..25dadbc 100644
--- a/app/test-pmd/testpmd.c
+++ b/app/test-pmd/testpmd.c
@@ -182,9 +182,6 @@  uint8_t dcb_config = 0;
 /* Whether the dcb is in testing status */
 uint8_t dcb_test = 0;
 
-/* DCB on and VT on mapping is default */
-enum dcb_queue_mapping_mode dcb_q_mapping = DCB_VT_Q_MAPPING;
-
 /*
  * Configurable number of RX/TX queues.
  */
@@ -1840,115 +1837,131 @@  const uint16_t vlan_tags[] = {
 };
 
 static  int
-get_eth_dcb_conf(struct rte_eth_conf *eth_conf, struct dcb_config *dcb_conf)
+get_eth_dcb_conf(struct rte_eth_conf *eth_conf,
+		 enum dcb_mode_enable dcb_mode,
+		 enum rte_eth_nb_tcs num_tcs,
+		 uint8_t pfc_en)
 {
-        uint8_t i;
+	uint8_t i;
 
 	/*
 	 * Builds up the correct configuration for dcb+vt based on the vlan tags array
 	 * given above, and the number of traffic classes available for use.
 	 */
-	if (dcb_conf->dcb_mode == DCB_VT_ENABLED) {
-		struct rte_eth_vmdq_dcb_conf vmdq_rx_conf;
-		struct rte_eth_vmdq_dcb_tx_conf vmdq_tx_conf;
+	if (dcb_mode == DCB_VT_ENABLED) {
+		struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
+				&eth_conf->rx_adv_conf.vmdq_dcb_conf;
+		struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
+				&eth_conf->tx_adv_conf.vmdq_dcb_tx_conf;
 
 		/* VMDQ+DCB RX and TX configrations */
-		vmdq_rx_conf.enable_default_pool = 0;
-		vmdq_rx_conf.default_pool = 0;
-		vmdq_rx_conf.nb_queue_pools =
-			(dcb_conf->num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
-		vmdq_tx_conf.nb_queue_pools =
-			(dcb_conf->num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
-
-		vmdq_rx_conf.nb_pool_maps = sizeof( vlan_tags )/sizeof( vlan_tags[ 0 ]);
-		for (i = 0; i < vmdq_rx_conf.nb_pool_maps; i++) {
-			vmdq_rx_conf.pool_map[i].vlan_id = vlan_tags[ i ];
-			vmdq_rx_conf.pool_map[i].pools = 1 << (i % vmdq_rx_conf.nb_queue_pools);
+		vmdq_rx_conf->enable_default_pool = 0;
+		vmdq_rx_conf->default_pool = 0;
+		vmdq_rx_conf->nb_queue_pools =
+			(num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
+		vmdq_tx_conf->nb_queue_pools =
+			(num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
+
+		vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
+		for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
+			vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
+			vmdq_rx_conf->pool_map[i].pools =
+				1 << (i % vmdq_rx_conf->nb_queue_pools);
 		}
 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
-			vmdq_rx_conf.dcb_tc[i] = i;
-			vmdq_tx_conf.dcb_tc[i] = i;
+			vmdq_rx_conf->dcb_tc[i] = i;
+			vmdq_tx_conf->dcb_tc[i] = i;
 		}
 
-		/*set DCB mode of RX and TX of multiple queues*/
+		/* set DCB mode of RX and TX of multiple queues */
 		eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
 		eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
-		if (dcb_conf->pfc_en)
-			eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT|ETH_DCB_PFC_SUPPORT;
-		else
-			eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
-
-		(void)(rte_memcpy(&eth_conf->rx_adv_conf.vmdq_dcb_conf, &vmdq_rx_conf,
-                                sizeof(struct rte_eth_vmdq_dcb_conf)));
-		(void)(rte_memcpy(&eth_conf->tx_adv_conf.vmdq_dcb_tx_conf, &vmdq_tx_conf,
-                                sizeof(struct rte_eth_vmdq_dcb_tx_conf)));
-	}
-	else {
-		struct rte_eth_dcb_rx_conf rx_conf;
-		struct rte_eth_dcb_tx_conf tx_conf;
-
-		/* queue mapping configuration of DCB RX and TX */
-		if (dcb_conf->num_tcs == ETH_4_TCS)
-			dcb_q_mapping = DCB_4_TCS_Q_MAPPING;
-		else
-			dcb_q_mapping = DCB_8_TCS_Q_MAPPING;
-
-		rx_conf.nb_tcs = dcb_conf->num_tcs;
-		tx_conf.nb_tcs = dcb_conf->num_tcs;
-
-		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++){
-			rx_conf.dcb_tc[i] = i;
-			tx_conf.dcb_tc[i] = i;
+	} else {
+		struct rte_eth_dcb_rx_conf *rx_conf =
+				&eth_conf->rx_adv_conf.dcb_rx_conf;
+		struct rte_eth_dcb_tx_conf *tx_conf =
+				&eth_conf->tx_adv_conf.dcb_tx_conf;
+
+		rx_conf->nb_tcs = num_tcs;
+		tx_conf->nb_tcs = num_tcs;
+
+		for (i = 0; i < num_tcs; i++) {
+			rx_conf->dcb_tc[i] = i;
+			tx_conf->dcb_tc[i] = i;
 		}
-		eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB;
+		eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS;
+		eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_hf;
 		eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
-		if (dcb_conf->pfc_en)
-			eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT|ETH_DCB_PFC_SUPPORT;
-		else
-			eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
-
-		(void)(rte_memcpy(&eth_conf->rx_adv_conf.dcb_rx_conf, &rx_conf,
-                                sizeof(struct rte_eth_dcb_rx_conf)));
-		(void)(rte_memcpy(&eth_conf->tx_adv_conf.dcb_tx_conf, &tx_conf,
-                                sizeof(struct rte_eth_dcb_tx_conf)));
 	}
 
+	if (pfc_en)
+		eth_conf->dcb_capability_en =
+				ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
+	else
+		eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
+
 	return 0;
 }
 
 int
-init_port_dcb_config(portid_t pid,struct dcb_config *dcb_conf)
+init_port_dcb_config(portid_t pid,
+		     enum dcb_mode_enable dcb_mode,
+		     enum rte_eth_nb_tcs num_tcs,
+		     uint8_t pfc_en)
 {
 	struct rte_eth_conf port_conf;
+	struct rte_eth_dev_info dev_info;
 	struct rte_port *rte_port;
 	int retval;
-	uint16_t nb_vlan;
 	uint16_t i;
 
-	/* rxq and txq configuration in dcb mode */
-	nb_rxq = 128;
-	nb_txq = 128;
+	rte_eth_dev_info_get(pid, &dev_info);
+
+	/* If dev_info.vmdq_pool_base is greater than 0,
+	 * the queue id of vmdq pools is started after pf queues.
+	 */
+	if (dcb_mode == DCB_VT_ENABLED && dev_info.vmdq_pool_base > 0) {
+		printf("VMDQ_DCB multi-queue mode is nonsensical"
+			" for port %d.", pid);
+		return -1;
+	}
+
+	/* Assume the ports in testpmd have the same dcb capability
+	 * and has the same number of rxq and txq in dcb mode
+	 */
+	if (dcb_mode == DCB_VT_ENABLED) {
+		nb_rxq = dev_info.max_rx_queues;
+		nb_txq = dev_info.max_tx_queues;
+	} else {
+		/*if vt is disabled, use all pf queues */
+		if (dev_info.vmdq_pool_base == 0) {
+			nb_rxq = dev_info.max_rx_queues;
+			nb_txq = dev_info.max_tx_queues;
+		} else {
+			nb_rxq = (queueid_t)num_tcs;
+			nb_txq = (queueid_t)num_tcs;
+
+		}
+	}
 	rx_free_thresh = 64;
 
-	memset(&port_conf,0,sizeof(struct rte_eth_conf));
+	memset(&port_conf, 0, sizeof(struct rte_eth_conf));
 	/* Enter DCB configuration status */
 	dcb_config = 1;
 
-	nb_vlan = sizeof( vlan_tags )/sizeof( vlan_tags[ 0 ]);
 	/*set configuration of DCB in vt mode and DCB in non-vt mode*/
-	retval = get_eth_dcb_conf(&port_conf, dcb_conf);
+	retval = get_eth_dcb_conf(&port_conf, dcb_mode, num_tcs, pfc_en);
 	if (retval < 0)
 		return retval;
 
 	rte_port = &ports[pid];
-	memcpy(&rte_port->dev_conf, &port_conf,sizeof(struct rte_eth_conf));
+	memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
 
 	rxtx_port_config(rte_port);
 	/* VLAN filter */
 	rte_port->dev_conf.rxmode.hw_vlan_filter = 1;
-	for (i = 0; i < nb_vlan; i++){
+	for (i = 0; i < RTE_DIM(vlan_tags); i++)
 		rx_vft_set(pid, vlan_tags[i], 1);
-	}
 
 	rte_eth_macaddr_get(pid, &rte_port->eth_addr);
 	map_port_queue_stats_mapping_registers(pid, rte_port);
diff --git a/app/test-pmd/testpmd.h b/app/test-pmd/testpmd.h
index d287274..5818fdd 100644
--- a/app/test-pmd/testpmd.h
+++ b/app/test-pmd/testpmd.h
@@ -255,25 +255,6 @@  enum dcb_mode_enable
 	DCB_ENABLED
 };
 
-/*
- * DCB general config info
- */
-struct dcb_config {
-	enum dcb_mode_enable dcb_mode;
-	uint8_t vt_en;
-	enum rte_eth_nb_tcs num_tcs;
-	uint8_t pfc_en;
-};
-
-/*
- * In DCB io FWD mode, 128 RX queue to 128 TX queue mapping
- */
-enum dcb_queue_mapping_mode {
-	DCB_VT_Q_MAPPING = 0,
-	DCB_4_TCS_Q_MAPPING,
-	DCB_8_TCS_Q_MAPPING
-};
-
 #define MAX_TX_QUEUE_STATS_MAPPINGS 1024 /* MAX_PORT of 32 @ 32 tx_queues/port */
 #define MAX_RX_QUEUE_STATS_MAPPINGS 4096 /* MAX_PORT of 32 @ 128 rx_queues/port */
 
@@ -537,7 +518,9 @@  void dev_set_link_down(portid_t pid);
 void init_port_config(void);
 void set_port_slave_flag(portid_t slave_pid);
 void clear_port_slave_flag(portid_t slave_pid);
-int init_port_dcb_config(portid_t pid,struct dcb_config *dcb_conf);
+int init_port_dcb_config(portid_t pid, enum dcb_mode_enable dcb_mode,
+		     enum rte_eth_nb_tcs num_tcs,
+		     uint8_t pfc_en);
 int start_port(portid_t pid);
 void stop_port(portid_t pid);
 void close_port(portid_t pid);