[dpdk-dev,v3] ethdev: increase flow type limit from 32 to 64

Message ID 1516037612-69603-1-git-send-email-kirill.rybalchenko@intel.com (mailing list archive)
State Accepted, archived
Delegated to: Ferruh Yigit
Headers

Checks

Context Check Description
ci/checkpatch warning coding style issues
ci/Intel-compilation fail Compilation issues

Commit Message

Rybalchenko, Kirill Jan. 15, 2018, 5:33 p.m. UTC
  Increase the internal limit for flow types from 32 to 64
to support future flow type extensions.
Change type of variables from uint32_t[] to uint64_t[]:
rte_eth_fdir_info.flow_types_mask
rte_eth_hash_global_conf.sym_hash_enable_mask
rte_eth_hash_global_conf.valid_bit_mask

This modification affects the following components:
net/i40e
net/ixgbe
app/testpmd

v2:
implement versioning of rte_eth_dev_filter_ctrl function
for ABI backward compatibility with version 17.11 and older

v3:
fix code style warnings

Signed-off-by: Kirill Rybalchenko <kirill.rybalchenko@intel.com>
---
 app/test-pmd/cmdline.c                  |  22 ++---
 drivers/net/i40e/i40e_ethdev.c          |  38 ++++----
 drivers/net/i40e/i40e_fdir.c            |  25 ++---
 drivers/net/ixgbe/ixgbe_fdir.c          |  22 +++--
 lib/librte_ether/rte_eth_ctrl.h         |  12 +--
 lib/librte_ether/rte_ethdev.c           | 156 +++++++++++++++++++++++++++++++-
 lib/librte_ether/rte_ethdev_version.map |   7 ++
 7 files changed, 223 insertions(+), 59 deletions(-)
  

Comments

Thomas Monjalon Jan. 15, 2018, 9:27 p.m. UTC | #1
15/01/2018 18:33, Kirill Rybalchenko:
> --- a/lib/librte_ether/rte_eth_ctrl.h
> +++ b/lib/librte_ether/rte_eth_ctrl.h
> @@ -662,9 +662,9 @@ enum rte_fdir_mode {
>         RTE_FDIR_MODE_PERFECT_TUNNEL,   /**< Enable FDIR filter mode - tunnel. */
>  };
>  
> -#define UINT32_BIT (CHAR_BIT * sizeof(uint32_t))
> +#define UINT64_BIT (CHAR_BIT * sizeof(uint64_t))
>  #define RTE_FLOW_MASK_ARRAY_SIZE \
> -       (RTE_ALIGN(RTE_ETH_FLOW_MAX, UINT32_BIT)/UINT32_BIT)
> +       (RTE_ALIGN(RTE_ETH_FLOW_MAX, UINT64_BIT)/UINT64_BIT)
>  
>  /**
>   * A structure used to get the information of flow director filter.
> @@ -681,7 +681,7 @@ struct rte_eth_fdir_info {
>         uint32_t guarant_spc; /**< Guaranteed spaces.*/
>         uint32_t best_spc; /**< Best effort spaces.*/
>         /** Bit mask for every supported flow type. */
> -       uint32_t flow_types_mask[RTE_FLOW_MASK_ARRAY_SIZE];
> +       uint64_t flow_types_mask[RTE_FLOW_MASK_ARRAY_SIZE];
>         uint32_t max_flexpayload; /**< Total flex payload in bytes. */
>         /** Flexible payload unit in bytes. Size and alignments of all flex
>             payload segments should be multiplies of this value. */
> @@ -774,7 +774,7 @@ enum rte_eth_hash_function {
>  };
>  
>  #define RTE_SYM_HASH_MASK_ARRAY_SIZE \
> -       (RTE_ALIGN(RTE_ETH_FLOW_MAX, UINT32_BIT)/UINT32_BIT)
> +       (RTE_ALIGN(RTE_ETH_FLOW_MAX, UINT64_BIT)/UINT64_BIT)
>  /**
>   * A structure used to set or get global hash function configurations which
>   * include symmetric hash enable per flow type and hash function type.
> @@ -787,9 +787,9 @@ enum rte_eth_hash_function {
>  struct rte_eth_hash_global_conf {
>         enum rte_eth_hash_function hash_func; /**< Hash function type */
>         /** Bit mask for symmetric hash enable per flow type */
> -       uint32_t sym_hash_enable_mask[RTE_SYM_HASH_MASK_ARRAY_SIZE];
> +       uint64_t sym_hash_enable_mask[RTE_SYM_HASH_MASK_ARRAY_SIZE];
>         /** Bit mask indicates if the corresponding bit is valid */
> -       uint32_t valid_bit_mask[RTE_SYM_HASH_MASK_ARRAY_SIZE];
> +       uint64_t valid_bit_mask[RTE_SYM_HASH_MASK_ARRAY_SIZE];
>  };

This is still changing the ABI.
Am I missing something?
  
Rybalchenko, Kirill Jan. 16, 2018, 9:44 a.m. UTC | #2
Hi Thomas,

> -----Original Message-----
> From: Thomas Monjalon [mailto:thomas@monjalon.net]
> Sent: Monday 15 January 2018 21:27
> To: Rybalchenko, Kirill <kirill.rybalchenko@intel.com>
> Cc: dev@dpdk.org; Chilikin, Andrey <andrey.chilikin@intel.com>; Yigit,
> Ferruh <ferruh.yigit@intel.com>
> Subject: Re: [PATCH v3] ethdev: increase flow type limit from 32 to 64
> 
> 15/01/2018 18:33, Kirill Rybalchenko:
> > --- a/lib/librte_ether/rte_eth_ctrl.h
> > +++ b/lib/librte_ether/rte_eth_ctrl.h
> > @@ -662,9 +662,9 @@ enum rte_fdir_mode {
> >         RTE_FDIR_MODE_PERFECT_TUNNEL,   /**< Enable FDIR filter mode -
> tunnel. */
> >  };
> >
> > -#define UINT32_BIT (CHAR_BIT * sizeof(uint32_t))
> > +#define UINT64_BIT (CHAR_BIT * sizeof(uint64_t))
> >  #define RTE_FLOW_MASK_ARRAY_SIZE \
> > -       (RTE_ALIGN(RTE_ETH_FLOW_MAX, UINT32_BIT)/UINT32_BIT)
> > +       (RTE_ALIGN(RTE_ETH_FLOW_MAX, UINT64_BIT)/UINT64_BIT)
> >
> >  /**
> >   * A structure used to get the information of flow director filter.
> > @@ -681,7 +681,7 @@ struct rte_eth_fdir_info {
> >         uint32_t guarant_spc; /**< Guaranteed spaces.*/
> >         uint32_t best_spc; /**< Best effort spaces.*/
> >         /** Bit mask for every supported flow type. */
> > -       uint32_t flow_types_mask[RTE_FLOW_MASK_ARRAY_SIZE];
> > +       uint64_t flow_types_mask[RTE_FLOW_MASK_ARRAY_SIZE];
> >         uint32_t max_flexpayload; /**< Total flex payload in bytes. */
> >         /** Flexible payload unit in bytes. Size and alignments of all flex
> >             payload segments should be multiplies of this value. */ @@
> > -774,7 +774,7 @@ enum rte_eth_hash_function {  };
> >
> >  #define RTE_SYM_HASH_MASK_ARRAY_SIZE \
> > -       (RTE_ALIGN(RTE_ETH_FLOW_MAX, UINT32_BIT)/UINT32_BIT)
> > +       (RTE_ALIGN(RTE_ETH_FLOW_MAX, UINT64_BIT)/UINT64_BIT)
> >  /**
> >   * A structure used to set or get global hash function configurations which
> >   * include symmetric hash enable per flow type and hash function type.
> > @@ -787,9 +787,9 @@ enum rte_eth_hash_function {  struct
> > rte_eth_hash_global_conf {
> >         enum rte_eth_hash_function hash_func; /**< Hash function type */
> >         /** Bit mask for symmetric hash enable per flow type */
> > -       uint32_t
> sym_hash_enable_mask[RTE_SYM_HASH_MASK_ARRAY_SIZE];
> > +       uint64_t
> sym_hash_enable_mask[RTE_SYM_HASH_MASK_ARRAY_SIZE];
> >         /** Bit mask indicates if the corresponding bit is valid */
> > -       uint32_t valid_bit_mask[RTE_SYM_HASH_MASK_ARRAY_SIZE];
> > +       uint64_t valid_bit_mask[RTE_SYM_HASH_MASK_ARRAY_SIZE];
> >  };
> 
> This is still changing the ABI.
> Am I missing something?
> 
We change size of structures rte_eth_fdir_info and rte_eth_hash_filter_info.
Application can use these structures for DPDK library API call only in
rte_eth_dev_filter_ctrl() function call. In the patch this function is
modified in the way that it will be compatible with user binary applications
compiled with previous versions of DPDK library.

Thanks,
Kirill.
  
Thomas Monjalon Jan. 16, 2018, 9:47 a.m. UTC | #3
16/01/2018 10:44, Rybalchenko, Kirill:
> Hi Thomas,
> 
> From: Thomas Monjalon [mailto:thomas@monjalon.net]
> > 15/01/2018 18:33, Kirill Rybalchenko:
> > > --- a/lib/librte_ether/rte_eth_ctrl.h
> > > +++ b/lib/librte_ether/rte_eth_ctrl.h
> > > @@ -662,9 +662,9 @@ enum rte_fdir_mode {
> > >         RTE_FDIR_MODE_PERFECT_TUNNEL,   /**< Enable FDIR filter mode -
> > tunnel. */
> > >  };
> > >
> > > -#define UINT32_BIT (CHAR_BIT * sizeof(uint32_t))
> > > +#define UINT64_BIT (CHAR_BIT * sizeof(uint64_t))
> > >  #define RTE_FLOW_MASK_ARRAY_SIZE \
> > > -       (RTE_ALIGN(RTE_ETH_FLOW_MAX, UINT32_BIT)/UINT32_BIT)
> > > +       (RTE_ALIGN(RTE_ETH_FLOW_MAX, UINT64_BIT)/UINT64_BIT)
> > >
> > >  /**
> > >   * A structure used to get the information of flow director filter.
> > > @@ -681,7 +681,7 @@ struct rte_eth_fdir_info {
> > >         uint32_t guarant_spc; /**< Guaranteed spaces.*/
> > >         uint32_t best_spc; /**< Best effort spaces.*/
> > >         /** Bit mask for every supported flow type. */
> > > -       uint32_t flow_types_mask[RTE_FLOW_MASK_ARRAY_SIZE];
> > > +       uint64_t flow_types_mask[RTE_FLOW_MASK_ARRAY_SIZE];
> > >         uint32_t max_flexpayload; /**< Total flex payload in bytes. */
> > >         /** Flexible payload unit in bytes. Size and alignments of all flex
> > >             payload segments should be multiplies of this value. */ @@
> > > -774,7 +774,7 @@ enum rte_eth_hash_function {  };
> > >
> > >  #define RTE_SYM_HASH_MASK_ARRAY_SIZE \
> > > -       (RTE_ALIGN(RTE_ETH_FLOW_MAX, UINT32_BIT)/UINT32_BIT)
> > > +       (RTE_ALIGN(RTE_ETH_FLOW_MAX, UINT64_BIT)/UINT64_BIT)
> > >  /**
> > >   * A structure used to set or get global hash function configurations which
> > >   * include symmetric hash enable per flow type and hash function type.
> > > @@ -787,9 +787,9 @@ enum rte_eth_hash_function {  struct
> > > rte_eth_hash_global_conf {
> > >         enum rte_eth_hash_function hash_func; /**< Hash function type */
> > >         /** Bit mask for symmetric hash enable per flow type */
> > > -       uint32_t
> > sym_hash_enable_mask[RTE_SYM_HASH_MASK_ARRAY_SIZE];
> > > +       uint64_t
> > sym_hash_enable_mask[RTE_SYM_HASH_MASK_ARRAY_SIZE];
> > >         /** Bit mask indicates if the corresponding bit is valid */
> > > -       uint32_t valid_bit_mask[RTE_SYM_HASH_MASK_ARRAY_SIZE];
> > > +       uint64_t valid_bit_mask[RTE_SYM_HASH_MASK_ARRAY_SIZE];
> > >  };
> > 
> > This is still changing the ABI.
> > Am I missing something?
> > 
> We change size of structures rte_eth_fdir_info and rte_eth_hash_filter_info.

Yes, and these structures are allocated and read by the application?
So it is an ABI break.

> Application can use these structures for DPDK library API call only in
> rte_eth_dev_filter_ctrl() function call. In the patch this function is
> modified in the way that it will be compatible with user binary applications
> compiled with previous versions of DPDK library.

Have you tried to use a patched DPDK with a binary compiled with DPDK 17.11?
  
Rybalchenko, Kirill Jan. 16, 2018, 10:31 a.m. UTC | #4
> -----Original Message-----
> From: Thomas Monjalon [mailto:thomas@monjalon.net]
> Sent: Tuesday 16 January 2018 09:48
> To: Rybalchenko, Kirill <kirill.rybalchenko@intel.com>
> Cc: dev@dpdk.org; Chilikin, Andrey <andrey.chilikin@intel.com>; Yigit,
> Ferruh <ferruh.yigit@intel.com>
> Subject: Re: [PATCH v3] ethdev: increase flow type limit from 32 to 64
> 
> 16/01/2018 10:44, Rybalchenko, Kirill:
> > Hi Thomas,
> >
> > From: Thomas Monjalon [mailto:thomas@monjalon.net]
> > > 15/01/2018 18:33, Kirill Rybalchenko:
> > > > --- a/lib/librte_ether/rte_eth_ctrl.h
> > > > +++ b/lib/librte_ether/rte_eth_ctrl.h
> > > > @@ -662,9 +662,9 @@ enum rte_fdir_mode {
> > > >         RTE_FDIR_MODE_PERFECT_TUNNEL,   /**< Enable FDIR filter mode
> -
> > > tunnel. */
> > > >  };
> > > >
> > > > -#define UINT32_BIT (CHAR_BIT * sizeof(uint32_t))
> > > > +#define UINT64_BIT (CHAR_BIT * sizeof(uint64_t))
> > > >  #define RTE_FLOW_MASK_ARRAY_SIZE \
> > > > -       (RTE_ALIGN(RTE_ETH_FLOW_MAX, UINT32_BIT)/UINT32_BIT)
> > > > +       (RTE_ALIGN(RTE_ETH_FLOW_MAX, UINT64_BIT)/UINT64_BIT)
> > > >
> > > >  /**
> > > >   * A structure used to get the information of flow director filter.
> > > > @@ -681,7 +681,7 @@ struct rte_eth_fdir_info {
> > > >         uint32_t guarant_spc; /**< Guaranteed spaces.*/
> > > >         uint32_t best_spc; /**< Best effort spaces.*/
> > > >         /** Bit mask for every supported flow type. */
> > > > -       uint32_t flow_types_mask[RTE_FLOW_MASK_ARRAY_SIZE];
> > > > +       uint64_t flow_types_mask[RTE_FLOW_MASK_ARRAY_SIZE];
> > > >         uint32_t max_flexpayload; /**< Total flex payload in bytes. */
> > > >         /** Flexible payload unit in bytes. Size and alignments of all flex
> > > >             payload segments should be multiplies of this value.
> > > > */ @@
> > > > -774,7 +774,7 @@ enum rte_eth_hash_function {  };
> > > >
> > > >  #define RTE_SYM_HASH_MASK_ARRAY_SIZE \
> > > > -       (RTE_ALIGN(RTE_ETH_FLOW_MAX, UINT32_BIT)/UINT32_BIT)
> > > > +       (RTE_ALIGN(RTE_ETH_FLOW_MAX, UINT64_BIT)/UINT64_BIT)
> > > >  /**
> > > >   * A structure used to set or get global hash function configurations
> which
> > > >   * include symmetric hash enable per flow type and hash function type.
> > > > @@ -787,9 +787,9 @@ enum rte_eth_hash_function {  struct
> > > > rte_eth_hash_global_conf {
> > > >         enum rte_eth_hash_function hash_func; /**< Hash function type
> */
> > > >         /** Bit mask for symmetric hash enable per flow type */
> > > > -       uint32_t
> > > sym_hash_enable_mask[RTE_SYM_HASH_MASK_ARRAY_SIZE];
> > > > +       uint64_t
> > > sym_hash_enable_mask[RTE_SYM_HASH_MASK_ARRAY_SIZE];
> > > >         /** Bit mask indicates if the corresponding bit is valid */
> > > > -       uint32_t valid_bit_mask[RTE_SYM_HASH_MASK_ARRAY_SIZE];
> > > > +       uint64_t valid_bit_mask[RTE_SYM_HASH_MASK_ARRAY_SIZE];
> > > >  };
> > >
> > > This is still changing the ABI.
> > > Am I missing something?
> > >
> > We change size of structures rte_eth_fdir_info and
> rte_eth_hash_filter_info.
> 
> Yes, and these structures are allocated and read by the application?
> So it is an ABI break.
If application binary was compiled with previous version of DPDK it makes
no difference if these structures were used internally there - it still will work.
If application binary was recompiled with new version of DPDK - again,
It will work. 
The only issue is if application binary was compiled with old version of DPDK
library, but used with new version of DPDK shared library and uses those
structures to call functions from this DPDK library. But it can be done
only by   rte_eth_dev_filter_ctrl() function, which handles this case properly.
 
> 
> > Application can use these structures for DPDK library API call only in
> > rte_eth_dev_filter_ctrl() function call. In the patch this function is
> > modified in the way that it will be compatible with user binary
> > applications compiled with previous versions of DPDK library.
> 
> Have you tried to use a patched DPDK with a binary compiled with DPDK
> 17.11?

Yes, actually, I did run testpmd from 17.08 with patched DPDK shared library. 
It works fine, as described.

Thanks,
Kirill.
  
Thomas Monjalon Jan. 16, 2018, 10:38 a.m. UTC | #5
16/01/2018 11:31, Rybalchenko, Kirill:
> From: Thomas Monjalon [mailto:thomas@monjalon.net]
> > 16/01/2018 10:44, Rybalchenko, Kirill:
> > > Hi Thomas,
> > >
> > > From: Thomas Monjalon [mailto:thomas@monjalon.net]
> > > > 15/01/2018 18:33, Kirill Rybalchenko:
> > > > > --- a/lib/librte_ether/rte_eth_ctrl.h
> > > > > +++ b/lib/librte_ether/rte_eth_ctrl.h
> > > > > @@ -662,9 +662,9 @@ enum rte_fdir_mode {
> > > > >         RTE_FDIR_MODE_PERFECT_TUNNEL,   /**< Enable FDIR filter mode
> > -
> > > > tunnel. */
> > > > >  };
> > > > >
> > > > > -#define UINT32_BIT (CHAR_BIT * sizeof(uint32_t))
> > > > > +#define UINT64_BIT (CHAR_BIT * sizeof(uint64_t))
> > > > >  #define RTE_FLOW_MASK_ARRAY_SIZE \
> > > > > -       (RTE_ALIGN(RTE_ETH_FLOW_MAX, UINT32_BIT)/UINT32_BIT)
> > > > > +       (RTE_ALIGN(RTE_ETH_FLOW_MAX, UINT64_BIT)/UINT64_BIT)
> > > > >
> > > > >  /**
> > > > >   * A structure used to get the information of flow director filter.
> > > > > @@ -681,7 +681,7 @@ struct rte_eth_fdir_info {
> > > > >         uint32_t guarant_spc; /**< Guaranteed spaces.*/
> > > > >         uint32_t best_spc; /**< Best effort spaces.*/
> > > > >         /** Bit mask for every supported flow type. */
> > > > > -       uint32_t flow_types_mask[RTE_FLOW_MASK_ARRAY_SIZE];
> > > > > +       uint64_t flow_types_mask[RTE_FLOW_MASK_ARRAY_SIZE];
> > > > >         uint32_t max_flexpayload; /**< Total flex payload in bytes. */
> > > > >         /** Flexible payload unit in bytes. Size and alignments of all flex
> > > > >             payload segments should be multiplies of this value.
> > > > > */ @@
> > > > > -774,7 +774,7 @@ enum rte_eth_hash_function {  };
> > > > >
> > > > >  #define RTE_SYM_HASH_MASK_ARRAY_SIZE \
> > > > > -       (RTE_ALIGN(RTE_ETH_FLOW_MAX, UINT32_BIT)/UINT32_BIT)
> > > > > +       (RTE_ALIGN(RTE_ETH_FLOW_MAX, UINT64_BIT)/UINT64_BIT)
> > > > >  /**
> > > > >   * A structure used to set or get global hash function configurations
> > which
> > > > >   * include symmetric hash enable per flow type and hash function type.
> > > > > @@ -787,9 +787,9 @@ enum rte_eth_hash_function {  struct
> > > > > rte_eth_hash_global_conf {
> > > > >         enum rte_eth_hash_function hash_func; /**< Hash function type
> > */
> > > > >         /** Bit mask for symmetric hash enable per flow type */
> > > > > -       uint32_t
> > > > sym_hash_enable_mask[RTE_SYM_HASH_MASK_ARRAY_SIZE];
> > > > > +       uint64_t
> > > > sym_hash_enable_mask[RTE_SYM_HASH_MASK_ARRAY_SIZE];
> > > > >         /** Bit mask indicates if the corresponding bit is valid */
> > > > > -       uint32_t valid_bit_mask[RTE_SYM_HASH_MASK_ARRAY_SIZE];
> > > > > +       uint64_t valid_bit_mask[RTE_SYM_HASH_MASK_ARRAY_SIZE];
> > > > >  };
> > > >
> > > > This is still changing the ABI.
> > > > Am I missing something?
> > > >
> > > We change size of structures rte_eth_fdir_info and
> > rte_eth_hash_filter_info.
> > 
> > Yes, and these structures are allocated and read by the application?
> > So it is an ABI break.
> If application binary was compiled with previous version of DPDK it makes
> no difference if these structures were used internally there - it still will work.
> If application binary was recompiled with new version of DPDK - again,
> It will work. 
> The only issue is if application binary was compiled with old version of DPDK
> library, but used with new version of DPDK shared library and uses those
> structures to call functions from this DPDK library. But it can be done
> only by   rte_eth_dev_filter_ctrl() function, which handles this case properly.

OK got it.
Thanks for your patience :)

> > > Application can use these structures for DPDK library API call only in
> > > rte_eth_dev_filter_ctrl() function call. In the patch this function is
> > > modified in the way that it will be compatible with user binary
> > > applications compiled with previous versions of DPDK library.
> > 
> > Have you tried to use a patched DPDK with a binary compiled with DPDK
> > 17.11?
> 
> Yes, actually, I did run testpmd from 17.08 with patched DPDK shared library. 
> It works fine, as described.
> 
> Thanks,
> Kirill.
  
Ferruh Yigit Jan. 17, 2018, 4:56 p.m. UTC | #6
On 1/15/2018 5:33 PM, Kirill Rybalchenko wrote:
> Increase the internal limit for flow types from 32 to 64
> to support future flow type extensions.
> Change type of variables from uint32_t[] to uint64_t[]:
> rte_eth_fdir_info.flow_types_mask
> rte_eth_hash_global_conf.sym_hash_enable_mask
> rte_eth_hash_global_conf.valid_bit_mask
> 
> This modification affects the following components:
> net/i40e
> net/ixgbe
> app/testpmd
> 
> v2:
> implement versioning of rte_eth_dev_filter_ctrl function
> for ABI backward compatibility with version 17.11 and older
> 
> v3:
> fix code style warnings
> 
> Signed-off-by: Kirill Rybalchenko <kirill.rybalchenko@intel.com>

Reviewed-by: Ferruh Yigit <ferruh.yigit@intel.com>


I suggest keeping deprecation notice and clean versioning in next release, does
it make sense?
  
Rybalchenko, Kirill Jan. 18, 2018, 9:24 a.m. UTC | #7
> -----Original Message-----

> From: Yigit, Ferruh

> Sent: Wednesday 17 January 2018 16:57

> To: Rybalchenko, Kirill <kirill.rybalchenko@intel.com>; dev@dpdk.org

> Cc: Chilikin, Andrey <andrey.chilikin@intel.com>; thomas@monjalon.net

> Subject: Re: [dpdk-dev] [PATCH v3] ethdev: increase flow type limit from 32

> to 64

> 

> On 1/15/2018 5:33 PM, Kirill Rybalchenko wrote:

> > Increase the internal limit for flow types from 32 to 64 to support

> > future flow type extensions.

> > Change type of variables from uint32_t[] to uint64_t[]:

> > rte_eth_fdir_info.flow_types_mask

> > rte_eth_hash_global_conf.sym_hash_enable_mask

> > rte_eth_hash_global_conf.valid_bit_mask

> >

> > This modification affects the following components:

> > net/i40e

> > net/ixgbe

> > app/testpmd

> >

> > v2:

> > implement versioning of rte_eth_dev_filter_ctrl function for ABI

> > backward compatibility with version 17.11 and older

> >

> > v3:

> > fix code style warnings

> >

> > Signed-off-by: Kirill Rybalchenko <kirill.rybalchenko@intel.com>

> 

> Reviewed-by: Ferruh Yigit <ferruh.yigit@intel.com>

> 

> 

> I suggest keeping deprecation notice and clean versioning in next release,

> does it make sense?


Yes, I think it should be done in this way, just to keep source codes  tidy.
  
Ferruh Yigit Jan. 18, 2018, 12:25 p.m. UTC | #8
On 1/17/2018 4:56 PM, Ferruh Yigit wrote:
> On 1/15/2018 5:33 PM, Kirill Rybalchenko wrote:
>> Increase the internal limit for flow types from 32 to 64
>> to support future flow type extensions.
>> Change type of variables from uint32_t[] to uint64_t[]:
>> rte_eth_fdir_info.flow_types_mask
>> rte_eth_hash_global_conf.sym_hash_enable_mask
>> rte_eth_hash_global_conf.valid_bit_mask
>>
>> This modification affects the following components:
>> net/i40e
>> net/ixgbe
>> app/testpmd
>>
>> v2:
>> implement versioning of rte_eth_dev_filter_ctrl function
>> for ABI backward compatibility with version 17.11 and older
>>
>> v3:
>> fix code style warnings
>>
>> Signed-off-by: Kirill Rybalchenko <kirill.rybalchenko@intel.com>
> 
> Reviewed-by: Ferruh Yigit <ferruh.yigit@intel.com>

Applied to dpdk-next-net/master, thanks.
  

Patch

diff --git a/app/test-pmd/cmdline.c b/app/test-pmd/cmdline.c
index 6d71a5f..964d4ed 100644
--- a/app/test-pmd/cmdline.c
+++ b/app/test-pmd/cmdline.c
@@ -10803,7 +10803,7 @@  cmd_flow_director_flex_mask_parsed(void *parsed_result,
 	struct rte_eth_fdir_info fdir_info;
 	struct rte_eth_fdir_flex_mask flex_mask;
 	struct rte_port *port;
-	uint32_t flow_type_mask;
+	uint64_t flow_type_mask;
 	uint16_t i;
 	int ret;
 
@@ -10856,7 +10856,7 @@  cmd_flow_director_flex_mask_parsed(void *parsed_result,
 			return;
 		}
 		for (i = RTE_ETH_FLOW_UNKNOWN; i < RTE_ETH_FLOW_MAX; i++) {
-			if (flow_type_mask & (1 << i)) {
+			if (flow_type_mask & (1ULL << i)) {
 				flex_mask.flow_type = i;
 				fdir_set_flex_mask(res->port_id, &flex_mask);
 			}
@@ -10865,7 +10865,7 @@  cmd_flow_director_flex_mask_parsed(void *parsed_result,
 		return;
 	}
 	flex_mask.flow_type = str2flowtype(res->flow_type);
-	if (!(flow_type_mask & (1 << flex_mask.flow_type))) {
+	if (!(flow_type_mask & (1ULL << flex_mask.flow_type))) {
 		printf("Flow type %s not supported on port %d\n",
 				res->flow_type, res->port_id);
 		return;
@@ -11227,10 +11227,10 @@  cmd_get_hash_global_config_parsed(void *parsed_result,
 	}
 
 	for (i = 0; i < RTE_ETH_FLOW_MAX; i++) {
-		idx = i / UINT32_BIT;
-		offset = i % UINT32_BIT;
+		idx = i / UINT64_BIT;
+		offset = i % UINT64_BIT;
 		if (!(info.info.global_conf.valid_bit_mask[idx] &
-						(1UL << offset)))
+						(1ULL << offset)))
 			continue;
 		str = flowtype_to_str(i);
 		if (!str)
@@ -11238,7 +11238,7 @@  cmd_get_hash_global_config_parsed(void *parsed_result,
 		printf("Symmetric hash is %s globally for flow type %s "
 							"by port %d\n",
 			((info.info.global_conf.sym_hash_enable_mask[idx] &
-			(1UL << offset)) ? "enabled" : "disabled"), str,
+			(1ULL << offset)) ? "enabled" : "disabled"), str,
 							res->port_id);
 	}
 }
@@ -11299,12 +11299,12 @@  cmd_set_hash_global_config_parsed(void *parsed_result,
 			RTE_ETH_HASH_FUNCTION_DEFAULT;
 
 	ftype = str2flowtype(res->flow_type);
-	idx = ftype / (CHAR_BIT * sizeof(uint32_t));
-	offset = ftype % (CHAR_BIT * sizeof(uint32_t));
-	info.info.global_conf.valid_bit_mask[idx] |= (1UL << offset);
+	idx = ftype / UINT64_BIT;
+	offset = ftype % UINT64_BIT;
+	info.info.global_conf.valid_bit_mask[idx] |= (1ULL << offset);
 	if (!strcmp(res->enable, "enable"))
 		info.info.global_conf.sym_hash_enable_mask[idx] |=
-						(1UL << offset);
+						(1ULL << offset);
 	ret = rte_eth_dev_filter_ctrl(res->port_id, RTE_ETH_FILTER_HASH,
 					RTE_ETH_FILTER_SET, &info);
 	if (ret < 0)
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index 7796e9e..d0473f0 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -8134,14 +8134,17 @@  i40e_get_hash_filter_global_config(struct i40e_hw *hw,
 		(reg & I40E_GLQF_CTL_HTOEP_MASK) ? "Toeplitz" : "Simple XOR");
 
 	/*
-	 * We work only with lowest 32 bits which is not correct, but to work
-	 * properly the valid_bit_mask size should be increased up to 64 bits
-	 * and this will brake ABI. This modification will be done in next
-	 * release
+	 * As i40e supports less than 64 flow types, only first 64 bits need to
+	 * be checked.
 	 */
-	g_cfg->valid_bit_mask[0] = (uint32_t)adapter->flow_types_mask;
+	for (i = 1; i < RTE_SYM_HASH_MASK_ARRAY_SIZE; i++) {
+		g_cfg->valid_bit_mask[i] = 0ULL;
+		g_cfg->sym_hash_enable_mask[i] = 0ULL;
+	}
 
-	for (i = RTE_ETH_FLOW_UNKNOWN + 1; i < UINT32_BIT; i++) {
+	g_cfg->valid_bit_mask[0] = adapter->flow_types_mask;
+
+	for (i = RTE_ETH_FLOW_UNKNOWN + 1; i < UINT64_BIT; i++) {
 		if (!adapter->pctypes_tbl[i])
 			continue;
 		for (j = I40E_FILTER_PCTYPE_INVALID + 1;
@@ -8150,7 +8153,7 @@  i40e_get_hash_filter_global_config(struct i40e_hw *hw,
 				reg = i40e_read_rx_ctl(hw, I40E_GLQF_HSYM(j));
 				if (reg & I40E_GLQF_HSYM_SYMH_ENA_MASK) {
 					g_cfg->sym_hash_enable_mask[0] |=
-								(1UL << i);
+								(1ULL << i);
 				}
 			}
 		}
@@ -8164,7 +8167,7 @@  i40e_hash_global_config_check(const struct i40e_adapter *adapter,
 			      const struct rte_eth_hash_global_conf *g_cfg)
 {
 	uint32_t i;
-	uint32_t mask0, i40e_mask = adapter->flow_types_mask;
+	uint64_t mask0, i40e_mask = adapter->flow_types_mask;
 
 	if (g_cfg->hash_func != RTE_ETH_HASH_FUNCTION_TOEPLITZ &&
 		g_cfg->hash_func != RTE_ETH_HASH_FUNCTION_SIMPLE_XOR &&
@@ -8175,7 +8178,7 @@  i40e_hash_global_config_check(const struct i40e_adapter *adapter,
 	}
 
 	/*
-	 * As i40e supports less than 32 flow types, only first 32 bits need to
+	 * As i40e supports less than 64 flow types, only first 64 bits need to
 	 * be checked.
 	 */
 	mask0 = g_cfg->valid_bit_mask[0];
@@ -8211,23 +8214,20 @@  i40e_set_hash_filter_global_config(struct i40e_hw *hw,
 	int ret;
 	uint16_t i, j;
 	uint32_t reg;
-	/*
-	 * We work only with lowest 32 bits which is not correct, but to work
-	 * properly the valid_bit_mask size should be increased up to 64 bits
-	 * and this will brake ABI. This modification will be done in next
-	 * release
-	 */
-	uint32_t mask0 = g_cfg->valid_bit_mask[0] &
-					(uint32_t)adapter->flow_types_mask;
+	uint64_t mask0 = g_cfg->valid_bit_mask[0] & adapter->flow_types_mask;
 
 	/* Check the input parameters */
 	ret = i40e_hash_global_config_check(adapter, g_cfg);
 	if (ret < 0)
 		return ret;
 
-	for (i = RTE_ETH_FLOW_UNKNOWN + 1; mask0 && i < UINT32_BIT; i++) {
+	/*
+	 * As i40e supports less than 64 flow types, only first 64 bits need to
+	 * be configured.
+	 */
+	for (i = RTE_ETH_FLOW_UNKNOWN + 1; mask0 && i < UINT64_BIT; i++) {
 		if (mask0 & (1UL << i)) {
-			reg = (g_cfg->sym_hash_enable_mask[0] & (1UL << i)) ?
+			reg = (g_cfg->sym_hash_enable_mask[0] & (1ULL << i)) ?
 					I40E_GLQF_HSYM_SYMH_ENA_MASK : 0;
 
 			for (j = I40E_FILTER_PCTYPE_INVALID + 1;
diff --git a/drivers/net/i40e/i40e_fdir.c b/drivers/net/i40e/i40e_fdir.c
index 906c204..3a9e656 100644
--- a/drivers/net/i40e/i40e_fdir.c
+++ b/drivers/net/i40e/i40e_fdir.c
@@ -66,17 +66,17 @@ 
 #define I40E_COUNTER_INDEX_FDIR(pf_id)   (0 + (pf_id) * I40E_COUNTER_PF)
 
 #define I40E_FDIR_FLOWS ( \
-	(1 << RTE_ETH_FLOW_FRAG_IPV4) | \
-	(1 << RTE_ETH_FLOW_NONFRAG_IPV4_UDP) | \
-	(1 << RTE_ETH_FLOW_NONFRAG_IPV4_TCP) | \
-	(1 << RTE_ETH_FLOW_NONFRAG_IPV4_SCTP) | \
-	(1 << RTE_ETH_FLOW_NONFRAG_IPV4_OTHER) | \
-	(1 << RTE_ETH_FLOW_FRAG_IPV6) | \
-	(1 << RTE_ETH_FLOW_NONFRAG_IPV6_UDP) | \
-	(1 << RTE_ETH_FLOW_NONFRAG_IPV6_TCP) | \
-	(1 << RTE_ETH_FLOW_NONFRAG_IPV6_SCTP) | \
-	(1 << RTE_ETH_FLOW_NONFRAG_IPV6_OTHER) | \
-	(1 << RTE_ETH_FLOW_L2_PAYLOAD))
+	(1ULL << RTE_ETH_FLOW_FRAG_IPV4) | \
+	(1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_UDP) | \
+	(1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_TCP) | \
+	(1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_SCTP) | \
+	(1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_OTHER) | \
+	(1ULL << RTE_ETH_FLOW_FRAG_IPV6) | \
+	(1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_UDP) | \
+	(1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_TCP) | \
+	(1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_SCTP) | \
+	(1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_OTHER) | \
+	(1ULL << RTE_ETH_FLOW_L2_PAYLOAD))
 
 static int i40e_fdir_filter_programming(struct i40e_pf *pf,
 			enum i40e_filter_pctype pctype,
@@ -1999,6 +1999,7 @@  i40e_fdir_info_get(struct rte_eth_dev *dev, struct rte_eth_fdir_info *fdir)
 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
 	uint16_t num_flex_set = 0;
 	uint16_t num_flex_mask = 0;
+	uint16_t i;
 
 	if (dev->data->dev_conf.fdir_conf.mode == RTE_FDIR_MODE_PERFECT)
 		fdir->mode = RTE_FDIR_MODE_PERFECT;
@@ -2011,6 +2012,8 @@  i40e_fdir_info_get(struct rte_eth_dev *dev, struct rte_eth_fdir_info *fdir)
 		(uint32_t)hw->func_caps.fd_filters_best_effort;
 	fdir->max_flexpayload = I40E_FDIR_MAX_FLEX_LEN;
 	fdir->flow_types_mask[0] = I40E_FDIR_FLOWS;
+	for (i = 1; i < RTE_FLOW_MASK_ARRAY_SIZE; i++)
+		fdir->flow_types_mask[i] = 0ULL;
 	fdir->flex_payload_unit = sizeof(uint16_t);
 	fdir->flex_bitmask_unit = sizeof(uint16_t);
 	fdir->max_flex_payload_segment_num = I40E_MAX_FLXPLD_FIED;
diff --git a/drivers/net/ixgbe/ixgbe_fdir.c b/drivers/net/ixgbe/ixgbe_fdir.c
index 551580c..236ab8c 100644
--- a/drivers/net/ixgbe/ixgbe_fdir.c
+++ b/drivers/net/ixgbe/ixgbe_fdir.c
@@ -41,14 +41,14 @@ 
 #define IXGBE_FDIRCMD_CMD_INTERVAL_US   10
 
 #define IXGBE_FDIR_FLOW_TYPES ( \
-	(1 << RTE_ETH_FLOW_NONFRAG_IPV4_UDP) | \
-	(1 << RTE_ETH_FLOW_NONFRAG_IPV4_TCP) | \
-	(1 << RTE_ETH_FLOW_NONFRAG_IPV4_SCTP) | \
-	(1 << RTE_ETH_FLOW_NONFRAG_IPV4_OTHER) | \
-	(1 << RTE_ETH_FLOW_NONFRAG_IPV6_UDP) | \
-	(1 << RTE_ETH_FLOW_NONFRAG_IPV6_TCP) | \
-	(1 << RTE_ETH_FLOW_NONFRAG_IPV6_SCTP) | \
-	(1 << RTE_ETH_FLOW_NONFRAG_IPV6_OTHER))
+	(1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_UDP) | \
+	(1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_TCP) | \
+	(1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_SCTP) | \
+	(1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_OTHER) | \
+	(1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_UDP) | \
+	(1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_TCP) | \
+	(1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_SCTP) | \
+	(1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_OTHER))
 
 #define IPV6_ADDR_TO_MASK(ipaddr, ipv6m) do { \
 	uint8_t ipv6_addr[16]; \
@@ -1407,7 +1407,7 @@  ixgbe_fdir_info_get(struct rte_eth_dev *dev, struct rte_eth_fdir_info *fdir_info
 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 	struct ixgbe_hw_fdir_info *info =
 			IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
-	uint32_t fdirctrl, max_num;
+	uint32_t fdirctrl, max_num, i;
 	uint8_t offset;
 
 	fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL);
@@ -1439,9 +1439,11 @@  ixgbe_fdir_info_get(struct rte_eth_dev *dev, struct rte_eth_fdir_info *fdir_info
 
 	if (fdir_info->mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN ||
 	    fdir_info->mode == RTE_FDIR_MODE_PERFECT_TUNNEL)
-		fdir_info->flow_types_mask[0] = 0;
+		fdir_info->flow_types_mask[0] = 0ULL;
 	else
 		fdir_info->flow_types_mask[0] = IXGBE_FDIR_FLOW_TYPES;
+	for (i = 1; i < RTE_FLOW_MASK_ARRAY_SIZE; i++)
+		fdir_info->flow_types_mask[i] = 0ULL;
 
 	fdir_info->flex_payload_unit = sizeof(uint16_t);
 	fdir_info->max_flex_payload_segment_num = 1;
diff --git a/lib/librte_ether/rte_eth_ctrl.h b/lib/librte_ether/rte_eth_ctrl.h
index 7991efa..668f59a 100644
--- a/lib/librte_ether/rte_eth_ctrl.h
+++ b/lib/librte_ether/rte_eth_ctrl.h
@@ -662,9 +662,9 @@  enum rte_fdir_mode {
 	RTE_FDIR_MODE_PERFECT_TUNNEL,   /**< Enable FDIR filter mode - tunnel. */
 };
 
-#define UINT32_BIT (CHAR_BIT * sizeof(uint32_t))
+#define UINT64_BIT (CHAR_BIT * sizeof(uint64_t))
 #define RTE_FLOW_MASK_ARRAY_SIZE \
-	(RTE_ALIGN(RTE_ETH_FLOW_MAX, UINT32_BIT)/UINT32_BIT)
+	(RTE_ALIGN(RTE_ETH_FLOW_MAX, UINT64_BIT)/UINT64_BIT)
 
 /**
  * A structure used to get the information of flow director filter.
@@ -681,7 +681,7 @@  struct rte_eth_fdir_info {
 	uint32_t guarant_spc; /**< Guaranteed spaces.*/
 	uint32_t best_spc; /**< Best effort spaces.*/
 	/** Bit mask for every supported flow type. */
-	uint32_t flow_types_mask[RTE_FLOW_MASK_ARRAY_SIZE];
+	uint64_t flow_types_mask[RTE_FLOW_MASK_ARRAY_SIZE];
 	uint32_t max_flexpayload; /**< Total flex payload in bytes. */
 	/** Flexible payload unit in bytes. Size and alignments of all flex
 	    payload segments should be multiplies of this value. */
@@ -774,7 +774,7 @@  enum rte_eth_hash_function {
 };
 
 #define RTE_SYM_HASH_MASK_ARRAY_SIZE \
-	(RTE_ALIGN(RTE_ETH_FLOW_MAX, UINT32_BIT)/UINT32_BIT)
+	(RTE_ALIGN(RTE_ETH_FLOW_MAX, UINT64_BIT)/UINT64_BIT)
 /**
  * A structure used to set or get global hash function configurations which
  * include symmetric hash enable per flow type and hash function type.
@@ -787,9 +787,9 @@  enum rte_eth_hash_function {
 struct rte_eth_hash_global_conf {
 	enum rte_eth_hash_function hash_func; /**< Hash function type */
 	/** Bit mask for symmetric hash enable per flow type */
-	uint32_t sym_hash_enable_mask[RTE_SYM_HASH_MASK_ARRAY_SIZE];
+	uint64_t sym_hash_enable_mask[RTE_SYM_HASH_MASK_ARRAY_SIZE];
 	/** Bit mask indicates if the corresponding bit is valid */
-	uint32_t valid_bit_mask[RTE_SYM_HASH_MASK_ARRAY_SIZE];
+	uint64_t valid_bit_mask[RTE_SYM_HASH_MASK_ARRAY_SIZE];
 };
 
 /**
diff --git a/lib/librte_ether/rte_ethdev.c b/lib/librte_ether/rte_ethdev.c
index b349599..9fb560e 100644
--- a/lib/librte_ether/rte_ethdev.c
+++ b/lib/librte_ether/rte_ethdev.c
@@ -34,6 +34,7 @@ 
 #include <rte_errno.h>
 #include <rte_spinlock.h>
 #include <rte_string_fns.h>
+#include <rte_compat.h>
 
 #include "rte_ether.h"
 #include "rte_ethdev.h"
@@ -3148,8 +3149,153 @@  rte_eth_dev_filter_supported(uint16_t port_id,
 }
 
 int
-rte_eth_dev_filter_ctrl(uint16_t port_id, enum rte_filter_type filter_type,
-		       enum rte_filter_op filter_op, void *arg)
+rte_eth_dev_filter_ctrl_v22(uint16_t port_id,
+			    enum rte_filter_type filter_type,
+			    enum rte_filter_op filter_op, void *arg);
+
+int
+rte_eth_dev_filter_ctrl_v22(uint16_t port_id,
+			    enum rte_filter_type filter_type,
+			    enum rte_filter_op filter_op, void *arg)
+{
+	struct rte_eth_fdir_info_v22 {
+		enum rte_fdir_mode mode;
+		struct rte_eth_fdir_masks mask;
+		struct rte_eth_fdir_flex_conf flex_conf;
+		uint32_t guarant_spc;
+		uint32_t best_spc;
+		uint32_t flow_types_mask[1];
+		uint32_t max_flexpayload;
+		uint32_t flex_payload_unit;
+		uint32_t max_flex_payload_segment_num;
+		uint16_t flex_payload_limit;
+		uint32_t flex_bitmask_unit;
+		uint32_t max_flex_bitmask_num;
+	};
+
+	struct rte_eth_hash_global_conf_v22 {
+		enum rte_eth_hash_function hash_func;
+		uint32_t sym_hash_enable_mask[1];
+		uint32_t valid_bit_mask[1];
+	};
+
+	struct rte_eth_hash_filter_info_v22 {
+		enum rte_eth_hash_filter_info_type info_type;
+		union {
+			uint8_t enable;
+			struct rte_eth_hash_global_conf_v22 global_conf;
+			struct rte_eth_input_set_conf input_set_conf;
+		} info;
+	};
+
+	struct rte_eth_dev *dev;
+
+	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+
+	dev = &rte_eth_devices[port_id];
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
+	if (filter_op == RTE_ETH_FILTER_INFO) {
+		int retval;
+		struct rte_eth_fdir_info_v22 *fdir_info_v22;
+		struct rte_eth_fdir_info fdir_info;
+
+		fdir_info_v22 = (struct rte_eth_fdir_info_v22 *)arg;
+
+		retval = (*dev->dev_ops->filter_ctrl)(dev, filter_type,
+			  filter_op, (void *)&fdir_info);
+		fdir_info_v22->mode = fdir_info.mode;
+		fdir_info_v22->mask = fdir_info.mask;
+		fdir_info_v22->flex_conf = fdir_info.flex_conf;
+		fdir_info_v22->guarant_spc = fdir_info.guarant_spc;
+		fdir_info_v22->best_spc = fdir_info.best_spc;
+		fdir_info_v22->flow_types_mask[0] =
+			(uint32_t)fdir_info.flow_types_mask[0];
+		fdir_info_v22->max_flexpayload = fdir_info.max_flexpayload;
+		fdir_info_v22->flex_payload_unit = fdir_info.flex_payload_unit;
+		fdir_info_v22->max_flex_payload_segment_num =
+			fdir_info.max_flex_payload_segment_num;
+		fdir_info_v22->flex_payload_limit =
+			fdir_info.flex_payload_limit;
+		fdir_info_v22->flex_bitmask_unit = fdir_info.flex_bitmask_unit;
+		fdir_info_v22->max_flex_bitmask_num =
+			fdir_info.max_flex_bitmask_num;
+		return retval;
+	} else if (filter_op == RTE_ETH_FILTER_GET) {
+		int retval;
+		struct rte_eth_hash_filter_info f_info;
+		struct rte_eth_hash_filter_info_v22 *f_info_v22 =
+			(struct rte_eth_hash_filter_info_v22 *)arg;
+
+		f_info.info_type = f_info_v22->info_type;
+		retval = (*dev->dev_ops->filter_ctrl)(dev, filter_type,
+			  filter_op, (void *)&f_info);
+
+		switch (f_info_v22->info_type) {
+		case RTE_ETH_HASH_FILTER_SYM_HASH_ENA_PER_PORT:
+			f_info_v22->info.enable = f_info.info.enable;
+			break;
+		case RTE_ETH_HASH_FILTER_GLOBAL_CONFIG:
+			f_info_v22->info.global_conf.hash_func =
+				f_info.info.global_conf.hash_func;
+			f_info_v22->info.global_conf.sym_hash_enable_mask[0] =
+				(uint32_t)
+				f_info.info.global_conf.sym_hash_enable_mask[0];
+			f_info_v22->info.global_conf.valid_bit_mask[0] =
+				(uint32_t)
+				f_info.info.global_conf.valid_bit_mask[0];
+			break;
+		case RTE_ETH_HASH_FILTER_INPUT_SET_SELECT:
+			f_info_v22->info.input_set_conf =
+				f_info.info.input_set_conf;
+			break;
+		default:
+			break;
+		}
+		return retval;
+	} else if (filter_op == RTE_ETH_FILTER_SET) {
+		struct rte_eth_hash_filter_info f_info;
+		struct rte_eth_hash_filter_info_v22 *f_v22 =
+			(struct rte_eth_hash_filter_info_v22 *)arg;
+
+		f_info.info_type = f_v22->info_type;
+		switch (f_v22->info_type) {
+		case RTE_ETH_HASH_FILTER_SYM_HASH_ENA_PER_PORT:
+			f_info.info.enable = f_v22->info.enable;
+			break;
+		case RTE_ETH_HASH_FILTER_GLOBAL_CONFIG:
+			f_info.info.global_conf.hash_func =
+				f_v22->info.global_conf.hash_func;
+			f_info.info.global_conf.sym_hash_enable_mask[0] =
+				(uint32_t)
+				f_v22->info.global_conf.sym_hash_enable_mask[0];
+			f_info.info.global_conf.valid_bit_mask[0] =
+				(uint32_t)
+				f_v22->info.global_conf.valid_bit_mask[0];
+			break;
+		case RTE_ETH_HASH_FILTER_INPUT_SET_SELECT:
+			f_info.info.input_set_conf =
+				f_v22->info.input_set_conf;
+			break;
+		default:
+			break;
+		}
+		return (*dev->dev_ops->filter_ctrl)(dev, filter_type, filter_op,
+						    (void *)&f_info);
+	} else
+		return (*dev->dev_ops->filter_ctrl)(dev, filter_type, filter_op,
+						    arg);
+}
+VERSION_SYMBOL(rte_eth_dev_filter_ctrl, _v22, 2.2);
+
+int
+rte_eth_dev_filter_ctrl_v1802(uint16_t port_id,
+			      enum rte_filter_type filter_type,
+			      enum rte_filter_op filter_op, void *arg);
+
+int
+rte_eth_dev_filter_ctrl_v1802(uint16_t port_id,
+			      enum rte_filter_type filter_type,
+			      enum rte_filter_op filter_op, void *arg)
 {
 	struct rte_eth_dev *dev;
 
@@ -3159,6 +3305,12 @@  rte_eth_dev_filter_ctrl(uint16_t port_id, enum rte_filter_type filter_type,
 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
 	return (*dev->dev_ops->filter_ctrl)(dev, filter_type, filter_op, arg);
 }
+BIND_DEFAULT_SYMBOL(rte_eth_dev_filter_ctrl, _v1802, 18.02);
+MAP_STATIC_SYMBOL(int rte_eth_dev_filter_ctrl(uint16_t port_id,
+		  enum rte_filter_type filter_type,
+		  enum rte_filter_op filter_op, void *arg),
+		  rte_eth_dev_filter_ctrl_v1802);
+
 
 void *
 rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id,
diff --git a/lib/librte_ether/rte_ethdev_version.map b/lib/librte_ether/rte_ethdev_version.map
index e9681ac..2906a18 100644
--- a/lib/librte_ether/rte_ethdev_version.map
+++ b/lib/librte_ether/rte_ethdev_version.map
@@ -198,6 +198,13 @@  DPDK_17.11 {
 
 } DPDK_17.08;
 
+DPDK_18.02 {
+	global:
+
+	rte_eth_dev_filter_ctrl;
+
+} DPDK_17.11;
+
 EXPERIMENTAL {
 	global: