Merge ~paelzer/ubuntu/+source/dpdk:MRE-19.11.6-GROOVY into ubuntu/+source/dpdk:ubuntu/groovy-devel

Proposed by Christian Ehrhardt 
Status: Merged
Merged at revision: 4afebe11294e37366245adf9f448f8a23c3b7a08
Proposed branch: ~paelzer/ubuntu/+source/dpdk:MRE-19.11.6-GROOVY
Merge into: ubuntu/+source/dpdk:ubuntu/groovy-devel
Diff against target: 18933 lines (+5420/-3162)
368 files modified
MAINTAINERS (+11/-9)
VERSION (+1/-1)
app/test-bbdev/ldpc_enc_default.data (+1/-1)
app/test-crypto-perf/meson.build (+3/-0)
app/test-eventdev/evt_options.c (+4/-0)
app/test-pmd/bpf_cmd.c (+2/-2)
app/test-pmd/cmdline.c (+10/-1)
app/test-pmd/cmdline_flow.c (+2/-17)
app/test-pmd/config.c (+182/-50)
app/test-pmd/meson.build (+12/-0)
app/test-pmd/txonly.c (+32/-0)
app/test-sad/main.c (+1/-1)
app/test/meson.build (+2/-1)
app/test/test_cryptodev.c (+1/-3)
app/test/test_distributor.c (+140/-72)
app/test/test_event_crypto_adapter.c (+15/-29)
app/test/test_event_eth_tx_adapter.c (+2/-2)
app/test/test_mbuf.c (+7/-3)
app/test/test_rcu_qsbr.c (+33/-23)
app/test/test_ring.c (+2/-2)
buildtools/pkg-config/meson.build (+55/-0)
buildtools/pkg-config/set-static-linker-flags.py (+38/-0)
buildtools/pmdinfogen/pmdinfogen.h (+1/-1)
config/defconfig_arm64-graviton2-linux-gcc (+1/-0)
config/defconfig_arm64-graviton2-linuxapp-gcc (+13/-0)
config/defconfig_graviton2 (+1/-0)
config/meson.build (+9/-8)
config/rte_config.h (+3/-0)
debian/changelog (+8/-0)
debian/control (+2/-1)
devtools/check-forbidden-tokens.awk (+1/-1)
devtools/test-meson-builds.sh (+6/-7)
doc/build-sdk-meson.txt (+19/-11)
doc/guides/cryptodevs/features/octeontx.ini (+1/-0)
doc/guides/cryptodevs/features/octeontx2.ini (+1/-0)
doc/guides/linux_gsg/build_sample_apps.rst (+2/-2)
doc/guides/linux_gsg/enable_func.rst (+42/-18)
doc/guides/linux_gsg/linux_drivers.rst (+1/-1)
doc/guides/linux_gsg/nic_perf_intel_platform.rst (+3/-0)
doc/guides/linux_gsg/sys_reqs.rst (+48/-26)
doc/guides/nics/dpaa2.rst (+2/-2)
doc/guides/nics/features/iavf.ini (+1/-0)
doc/guides/nics/i40e.rst (+9/-0)
doc/guides/nics/nfp.rst (+27/-10)
doc/guides/nics/pcap_ring.rst (+1/-1)
doc/guides/nics/sfc_efx.rst (+2/-2)
doc/guides/prog_guide/kernel_nic_interface.rst (+1/-1)
doc/guides/prog_guide/multi_proc_support.rst (+1/-1)
doc/guides/prog_guide/packet_classif_access_ctrl.rst (+6/-0)
doc/guides/rel_notes/deprecation.rst (+0/-11)
doc/guides/rel_notes/release_19_11.rst (+561/-0)
doc/guides/sample_app_ug/flow_classify.rst (+1/-1)
doc/guides/sample_app_ug/flow_filtering.rst (+1/-1)
doc/guides/sample_app_ug/ipsec_secgw.rst (+2/-2)
doc/guides/sample_app_ug/l3_forward_access_ctrl.rst (+4/-4)
doc/guides/sample_app_ug/l3_forward_power_man.rst (+1/-1)
doc/guides/testpmd_app_ug/testpmd_funcs.rst (+3/-3)
drivers/baseband/fpga_lte_fec/fpga_lte_fec.c (+2/-2)
drivers/baseband/turbo_sw/bbdev_turbo_software.c (+26/-9)
drivers/bus/dpaa/base/qbman/qman_driver.c (+4/-6)
drivers/bus/fslmc/fslmc_vfio.c (+6/-3)
drivers/bus/fslmc/portal/dpaa2_hw_dpio.c (+7/-2)
drivers/bus/fslmc/qbman/qbman_portal.c (+2/-1)
drivers/bus/pci/bsd/pci.c (+0/-49)
drivers/bus/pci/linux/pci.c (+0/-12)
drivers/bus/pci/linux/pci_vfio.c (+5/-3)
drivers/bus/pci/private.h (+0/-15)
drivers/common/qat/qat_device.c (+1/-0)
drivers/compress/isal/isal_compress_pmd_ops.c (+17/-3)
drivers/crypto/aesni_mb/aesni_mb_pmd_private.h (+2/-2)
drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c (+25/-26)
drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c (+4/-4)
drivers/crypto/armv8/rte_armv8_pmd.c (+2/-2)
drivers/crypto/caam_jr/caam_jr.c (+9/-1)
drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c (+1/-7)
drivers/crypto/dpaa_sec/dpaa_sec.c (+2/-1)
drivers/crypto/octeontx/otx_cryptodev_ops.c (+1/-0)
drivers/crypto/octeontx2/otx2_cryptodev.c (+33/-25)
drivers/crypto/octeontx2/otx2_cryptodev.h (+2/-0)
drivers/crypto/octeontx2/otx2_cryptodev_ops.c (+17/-5)
drivers/crypto/scheduler/meson.build (+1/-1)
drivers/crypto/scheduler/scheduler_pmd_private.h (+0/-1)
drivers/event/dpaa2/dpaa2_eventdev.c (+4/-4)
drivers/event/dpaa2/dpaa2_eventdev_selftest.c (+1/-12)
drivers/event/octeontx2/otx2_evdev.c (+60/-40)
drivers/event/octeontx2/otx2_evdev.h (+12/-0)
drivers/mempool/octeontx/octeontx_fpavf.c (+3/-3)
drivers/meson.build (+5/-3)
drivers/net/af_xdp/rte_eth_af_xdp.c (+18/-12)
drivers/net/bnx2x/bnx2x_ethdev.c (+4/-0)
drivers/net/bnxt/bnxt.h (+10/-2)
drivers/net/bnxt/bnxt_cpr.c (+13/-1)
drivers/net/bnxt/bnxt_cpr.h (+1/-0)
drivers/net/bnxt/bnxt_ethdev.c (+46/-33)
drivers/net/bnxt/bnxt_filter.c (+9/-6)
drivers/net/bnxt/bnxt_hwrm.c (+31/-8)
drivers/net/bnxt/bnxt_hwrm.h (+1/-0)
drivers/net/bnxt/bnxt_ring.c (+1/-0)
drivers/net/bnxt/bnxt_ring.h (+21/-18)
drivers/net/bnxt/bnxt_rxq.c (+32/-21)
drivers/net/bnxt/bnxt_rxq.h (+4/-3)
drivers/net/bnxt/bnxt_rxr.c (+5/-8)
drivers/net/bnxt/bnxt_rxr.h (+0/-2)
drivers/net/bnxt/bnxt_rxtx_vec_sse.c (+2/-0)
drivers/net/bnxt/bnxt_stats.c (+0/-67)
drivers/net/bnxt/bnxt_txq.c (+21/-15)
drivers/net/bnxt/bnxt_txr.c (+2/-0)
drivers/net/bnxt/bnxt_txr.h (+0/-2)
drivers/net/bnxt/bnxt_vnic.c (+3/-0)
drivers/net/bonding/eth_bond_private.h (+2/-1)
drivers/net/bonding/rte_eth_bond_api.c (+0/-6)
drivers/net/bonding/rte_eth_bond_pmd.c (+9/-9)
drivers/net/cxgbe/base/adapter.h (+1/-0)
drivers/net/cxgbe/cxgbe_ethdev.c (+5/-6)
drivers/net/cxgbe/mps_tcam.c (+1/-0)
drivers/net/cxgbe/sge.c (+99/-76)
drivers/net/dpaa/dpaa_ethdev.c (+1/-1)
drivers/net/dpaa/rte_pmd_dpaa.h (+1/-1)
drivers/net/dpaa2/dpaa2_ethdev.c (+0/-2)
drivers/net/dpaa2/dpaa2_ethdev.h (+0/-4)
drivers/net/dpaa2/dpaa2_ptp.c (+2/-0)
drivers/net/dpaa2/meson.build (+2/-5)
drivers/net/ena/base/ena_com.c (+18/-14)
drivers/net/ena/base/ena_com.h (+2/-0)
drivers/net/ena/base/ena_plat_dpdk.h (+38/-29)
drivers/net/ena/ena_ethdev.c (+11/-9)
drivers/net/enic/enic_fm_flow.c (+35/-9)
drivers/net/failsafe/failsafe_ether.c (+18/-9)
drivers/net/fm10k/fm10k_ethdev.c (+6/-3)
drivers/net/fm10k/fm10k_rxtx_vec.c (+31/-8)
drivers/net/hinic/base/hinic_compat.h (+7/-1)
drivers/net/hinic/base/hinic_pmd_cfg.c (+5/-2)
drivers/net/hinic/base/hinic_pmd_cfg.h (+1/-0)
drivers/net/hinic/base/hinic_pmd_eqs.c (+4/-4)
drivers/net/hinic/base/hinic_pmd_eqs.h (+4/-0)
drivers/net/hinic/base/hinic_pmd_mbox.c (+70/-18)
drivers/net/hinic/base/hinic_pmd_mbox.h (+2/-5)
drivers/net/hinic/base/hinic_pmd_mgmt.c (+15/-8)
drivers/net/hinic/base/hinic_pmd_mgmt.h (+1/-1)
drivers/net/hinic/base/hinic_pmd_niccfg.c (+0/-38)
drivers/net/hinic/base/hinic_pmd_niccfg.h (+0/-2)
drivers/net/hinic/base/hinic_pmd_nicio.c (+0/-5)
drivers/net/hinic/hinic_pmd_ethdev.c (+35/-20)
drivers/net/hinic/hinic_pmd_flow.c (+9/-0)
drivers/net/hns3/hns3_dcb.c (+7/-1)
drivers/net/hns3/hns3_dcb.h (+12/-1)
drivers/net/hns3/hns3_ethdev.c (+101/-61)
drivers/net/hns3/hns3_ethdev.h (+37/-3)
drivers/net/hns3/hns3_ethdev_vf.c (+87/-17)
drivers/net/hns3/hns3_flow.c (+137/-108)
drivers/net/hns3/hns3_rss.c (+6/-7)
drivers/net/hns3/hns3_rss.h (+1/-0)
drivers/net/hns3/hns3_rxtx.c (+218/-149)
drivers/net/hns3/hns3_rxtx.h (+21/-9)
drivers/net/i40e/base/i40e_adminq_cmd.h (+1/-1)
drivers/net/i40e/base/i40e_common.c (+31/-9)
drivers/net/i40e/base/i40e_dcb.c (+2/-1)
drivers/net/i40e/base/i40e_nvm.c (+5/-3)
drivers/net/i40e/base/virtchnl.h (+15/-1)
drivers/net/i40e/i40e_ethdev.c (+41/-25)
drivers/net/i40e/i40e_ethdev.h (+23/-9)
drivers/net/i40e/i40e_ethdev_vf.c (+59/-5)
drivers/net/i40e/i40e_fdir.c (+197/-1)
drivers/net/i40e/i40e_flow.c (+39/-206)
drivers/net/i40e/i40e_rxtx_vec_altivec.c (+42/-17)
drivers/net/i40e/i40e_rxtx_vec_neon.c (+36/-12)
drivers/net/i40e/i40e_rxtx_vec_sse.c (+36/-12)
drivers/net/i40e/rte_pmd_i40e.h (+8/-0)
drivers/net/iavf/iavf.h (+8/-1)
drivers/net/iavf/iavf_ethdev.c (+74/-8)
drivers/net/iavf/iavf_rxtx.c (+3/-1)
drivers/net/iavf/iavf_rxtx_vec_sse.c (+37/-12)
drivers/net/iavf/iavf_vchnl.c (+61/-1)
drivers/net/ice/base/ice_adminq_cmd.h (+2/-2)
drivers/net/ice/base/ice_sched.c (+5/-1)
drivers/net/ice/ice_ethdev.c (+1/-1)
drivers/net/ice/ice_rxtx.h (+2/-2)
drivers/net/ice/ice_rxtx_vec_sse.c (+40/-16)
drivers/net/ice/ice_switch_filter.c (+81/-19)
drivers/net/ifc/base/ifcvf.h (+3/-0)
drivers/net/ixgbe/base/ixgbe_x540.c (+1/-1)
drivers/net/ixgbe/ixgbe_ethdev.c (+16/-3)
drivers/net/ixgbe/ixgbe_ethdev.h (+1/-1)
drivers/net/ixgbe/ixgbe_flow.c (+0/-9)
drivers/net/ixgbe/ixgbe_pf.c (+13/-3)
drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c (+38/-23)
drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c (+32/-15)
drivers/net/memif/rte_eth_memif.c (+21/-7)
drivers/net/mlx4/mlx4.c (+12/-9)
drivers/net/mlx5/mlx5.c (+10/-25)
drivers/net/mlx5/mlx5.h (+4/-6)
drivers/net/mlx5/mlx5_defs.h (+1/-1)
drivers/net/mlx5/mlx5_devx_cmds.c (+1/-1)
drivers/net/mlx5/mlx5_ethdev.c (+26/-14)
drivers/net/mlx5/mlx5_flow.c (+14/-1)
drivers/net/mlx5/mlx5_flow.h (+2/-2)
drivers/net/mlx5/mlx5_flow_dv.c (+2/-2)
drivers/net/mlx5/mlx5_mac.c (+0/-1)
drivers/net/mlx5/mlx5_mr.c (+1/-1)
drivers/net/mlx5/mlx5_rss.c (+3/-1)
drivers/net/mlx5/mlx5_rxq.c (+7/-1)
drivers/net/mlx5/mlx5_rxtx.c (+22/-12)
drivers/net/mlx5/mlx5_stats.c (+12/-4)
drivers/net/mlx5/mlx5_txq.c (+0/-4)
drivers/net/mlx5/mlx5_utils.h (+0/-4)
drivers/net/mvpp2/mrvl_mtr.c (+6/-6)
drivers/net/netvsc/hn_ethdev.c (+1/-1)
drivers/net/netvsc/hn_nvs.c (+9/-4)
drivers/net/netvsc/hn_rndis.c (+1/-1)
drivers/net/netvsc/hn_rxtx.c (+64/-35)
drivers/net/netvsc/hn_var.h (+10/-8)
drivers/net/netvsc/hn_vf.c (+46/-53)
drivers/net/nfp/nfp_net.c (+24/-6)
drivers/net/nfp/nfp_net_pmd.h (+6/-0)
drivers/net/octeontx2/otx2_ethdev.c (+32/-4)
drivers/net/octeontx2/otx2_ethdev.h (+2/-0)
drivers/net/octeontx2/otx2_ethdev_ops.c (+1/-8)
drivers/net/octeontx2/otx2_flow.c (+2/-0)
drivers/net/pcap/rte_eth_pcap.c (+9/-1)
drivers/net/pfe/pfe_ethdev.c (+0/-3)
drivers/net/qede/base/bcm_osal.h (+1/-2)
drivers/net/qede/base/ecore_sriov.c (+1/-1)
drivers/net/qede/qede_main.c (+4/-5)
drivers/net/qede/qede_rxtx.c (+2/-2)
drivers/net/ring/rte_eth_ring.c (+16/-2)
drivers/net/sfc/base/efx_tunnel.c (+9/-3)
drivers/net/sfc/sfc.c (+1/-0)
drivers/net/sfc/sfc.h (+2/-0)
drivers/net/sfc/sfc_ethdev.c (+34/-14)
drivers/net/sfc/sfc_flow.c (+57/-12)
drivers/net/sfc/sfc_flow.h (+2/-0)
drivers/net/sfc/sfc_rx.c (+8/-4)
drivers/net/sfc/sfc_rx.h (+1/-0)
drivers/net/softnic/parser.c (+4/-184)
drivers/net/tap/rte_eth_tap.c (+26/-17)
drivers/net/tap/rte_eth_tap.h (+1/-0)
drivers/net/thunderx/nicvf_ethdev.c (+1/-0)
drivers/net/vdev_netvsc/vdev_netvsc.c (+13/-14)
drivers/net/vhost/rte_eth_vhost.c (+38/-38)
drivers/net/virtio/virtio_ethdev.c (+11/-2)
drivers/net/virtio/virtio_rxtx.c (+3/-2)
drivers/net/virtio/virtio_user/vhost_kernel_tap.c (+1/-1)
drivers/net/virtio/virtqueue.h (+15/-2)
drivers/raw/dpaa2_qdma/dpaa2_qdma.c (+2/-1)
drivers/raw/ifpga/ifpga_rawdev.c (+30/-23)
drivers/raw/ioat/ioat_rawdev.c (+7/-0)
drivers/raw/skeleton/skeleton_rawdev.c (+3/-1)
drivers/raw/skeleton/skeleton_rawdev_test.c (+6/-0)
examples/bbdev_app/Makefile (+1/-1)
examples/bond/Makefile (+1/-1)
examples/cmdline/Makefile (+1/-1)
examples/distributor/Makefile (+1/-1)
examples/eventdev_pipeline/Makefile (+1/-1)
examples/fips_validation/Makefile (+3/-1)
examples/fips_validation/fips_validation.c (+40/-12)
examples/fips_validation/fips_validation.h (+3/-1)
examples/fips_validation/main.c (+4/-2)
examples/flow_classify/Makefile (+1/-1)
examples/flow_filtering/Makefile (+1/-1)
examples/helloworld/Makefile (+1/-1)
examples/ioat/Makefile (+1/-1)
examples/ioat/ioatfwd.c (+3/-3)
examples/ip_fragmentation/Makefile (+1/-1)
examples/ip_pipeline/Makefile (+2/-2)
examples/ip_pipeline/parser.c (+4/-186)
examples/ip_reassembly/Makefile (+1/-1)
examples/ipsec-secgw/Makefile (+1/-1)
examples/ipsec-secgw/parser.c (+5/-168)
examples/ipv4_multicast/Makefile (+1/-1)
examples/kni/Makefile (+3/-1)
examples/l2fwd-cat/Makefile (+1/-1)
examples/l2fwd-crypto/Makefile (+7/-1)
examples/l2fwd-crypto/meson.build (+3/-0)
examples/l2fwd-event/Makefile (+1/-1)
examples/l2fwd-jobstats/Makefile (+1/-1)
examples/l2fwd-keepalive/Makefile (+1/-1)
examples/l2fwd-keepalive/meson.build (+7/-1)
examples/l2fwd/Makefile (+1/-1)
examples/l3fwd-acl/Makefile (+1/-1)
examples/l3fwd-power/Makefile (+1/-1)
examples/l3fwd-power/main.c (+6/-3)
examples/l3fwd/Makefile (+1/-1)
examples/link_status_interrupt/Makefile (+1/-1)
examples/meson.build (+3/-10)
examples/multi_process/client_server_mp/mp_server/main.c (+10/-7)
examples/multi_process/client_server_mp/shared/common.h (+1/-1)
examples/ntb/Makefile (+2/-2)
examples/packet_ordering/Makefile (+1/-1)
examples/performance-thread/l3fwd-thread/main.c (+7/-2)
examples/ptpclient/Makefile (+1/-1)
examples/qos_meter/Makefile (+1/-1)
examples/qos_sched/Makefile (+1/-1)
examples/qos_sched/args.c (+1/-1)
examples/rxtx_callbacks/Makefile (+3/-1)
examples/service_cores/Makefile (+1/-1)
examples/skeleton/Makefile (+1/-1)
examples/tep_termination/Makefile (+1/-1)
examples/timer/Makefile (+1/-1)
examples/vdpa/Makefile (+1/-1)
examples/vhost/Makefile (+1/-1)
examples/vhost_blk/Makefile (+1/-1)
examples/vhost_blk/vhost_blk.c (+11/-1)
examples/vhost_crypto/Makefile (+1/-1)
examples/vhost_crypto/main.c (+1/-1)
examples/vm_power_manager/channel_manager.c (+8/-2)
examples/vm_power_manager/guest_cli/vm_power_cli_guest.c (+4/-2)
examples/vmdq/Makefile (+1/-1)
examples/vmdq_dcb/Makefile (+1/-1)
kernel/linux/kni/compat.h (+7/-1)
kernel/linux/kni/kni_dev.h (+5/-0)
lib/librte_acl/rte_acl.c (+1/-1)
lib/librte_cryptodev/rte_cryptodev_pmd.h (+2/-1)
lib/librte_distributor/distributor_private.h (+3/-0)
lib/librte_distributor/rte_distributor.c (+173/-44)
lib/librte_distributor/rte_distributor.h (+12/-11)
lib/librte_distributor/rte_distributor_single.c (+4/-0)
lib/librte_eal/common/eal_common_dev.c (+1/-0)
lib/librte_eal/common/eal_common_proc.c (+2/-2)
lib/librte_eal/common/include/arch/arm/meson.build (+2/-0)
lib/librte_eal/common/include/arch/arm/rte_atomic_64.h (+8/-0)
lib/librte_eal/common/include/arch/ppc_64/meson.build (+2/-0)
lib/librte_eal/common/include/arch/x86/meson.build (+2/-0)
lib/librte_eal/common/include/arch/x86/rte_memcpy.h (+1/-1)
lib/librte_eal/common/include/generic/rte_mcslock.h (+8/-1)
lib/librte_eal/common/include/generic/rte_memcpy.h (+4/-0)
lib/librte_eal/common/include/rte_eal.h (+3/-2)
lib/librte_eal/common/malloc_elem.c (+4/-4)
lib/librte_eal/freebsd/eal/eal_memory.c (+1/-1)
lib/librte_eal/linux/eal/eal_dev.c (+8/-4)
lib/librte_eal/linux/eal/eal_memalloc.c (+34/-11)
lib/librte_eal/linux/eal/eal_memory.c (+1/-1)
lib/librte_eal/linux/eal/eal_vfio.c (+13/-10)
lib/librte_eal/linux/eal/eal_vfio_mp_sync.c (+2/-2)
lib/librte_efd/rte_efd.c (+1/-0)
lib/librte_ethdev/rte_ethdev.c (+31/-13)
lib/librte_ethdev/rte_ethdev.h (+42/-15)
lib/librte_ethdev/rte_ethdev_pci.h (+0/-26)
lib/librte_ethdev/rte_ethdev_vdev.h (+0/-26)
lib/librte_ethdev/rte_flow.c (+15/-3)
lib/librte_eventdev/rte_event_crypto_adapter.c (+1/-0)
lib/librte_eventdev/rte_event_eth_tx_adapter.c (+2/-0)
lib/librte_gro/rte_gro.c (+10/-7)
lib/librte_gso/gso_udp4.c (+4/-1)
lib/librte_mbuf/rte_mbuf_dyn.c (+11/-19)
lib/librte_mbuf/rte_mbuf_dyn.h (+1/-1)
lib/librte_net/rte_ip.h (+3/-0)
lib/librte_port/rte_port_source_sink.c (+1/-1)
lib/librte_power/power_pstate_cpufreq.c (+59/-0)
lib/librte_rcu/rte_rcu_qsbr.c (+3/-3)
lib/librte_rcu/rte_rcu_qsbr.h (+5/-5)
lib/librte_stack/rte_stack_lf_c11.h (+3/-1)
lib/librte_stack/rte_stack_lf_generic.h (+1/-1)
lib/librte_table/rte_table_hash_key16.c (+17/-0)
lib/librte_table/rte_table_hash_key32.c (+17/-0)
lib/librte_table/rte_table_hash_key8.c (+16/-0)
lib/librte_timer/rte_timer.h (+12/-0)
lib/librte_vhost/iotlb.c (+1/-2)
lib/librte_vhost/vhost.c (+45/-14)
lib/librte_vhost/vhost_user.c (+52/-26)
lib/librte_vhost/virtio_net.c (+8/-22)
lib/meson.build (+5/-3)
license/bsd-2-clause.txt (+20/-0)
license/isc.txt (+11/-0)
license/mit.txt (+18/-0)
meson.build (+5/-23)
mk/machine/graviton2/rte.vars.mk (+34/-0)
usertools/cpu_layout.py (+1/-3)
usertools/dpdk-pmdinfo.py (+1/-1)
Reviewer Review Type Date Requested Status
Robie Basak Approve
Canonical Server Pending
git-ubuntu developers Pending
Review via email: mp+396552@code.launchpad.net
To post a comment you must log in.
Revision history for this message
Christian Ehrhardt  (paelzer) wrote :
Revision history for this message
Christian Ehrhardt  (paelzer) wrote :

Builds are good, testing ongoing but blocked by an RT that has to be resolved.
Nevertheless this is ready for review by the team.

Revision history for this message
Robie Basak (racb) :
review: Approve
Revision history for this message
Christian Ehrhardt  (paelzer) wrote :

 * [new tag] upload/19.11.6-0ubuntu0.20.10.1 -> upload/19.11.6-0ubuntu0.20.10.1

Uploading to ubuntu (via ftp to upload.ubuntu.com):
  Uploading dpdk_19.11.6-0ubuntu0.20.10.1.dsc: done.
  Uploading dpdk_19.11.6.orig.tar.xz: done.
  Uploading dpdk_19.11.6-0ubuntu0.20.10.1.debian.tar.xz: done.
  Uploading dpdk_19.11.6-0ubuntu0.20.10.1_source.buildinfo: done.
  Uploading dpdk_19.11.6-0ubuntu0.20.10.1_source.changes: done.
Successfully uploaded packages.

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1diff --git a/MAINTAINERS b/MAINTAINERS
2index c9b7dfa..952ded7 100644
3--- a/MAINTAINERS
4+++ b/MAINTAINERS
5@@ -46,7 +46,7 @@ M: Jerin Jacob <jerinj@marvell.com>
6 T: git://dpdk.org/next/dpdk-next-net-mrvl
7
8 Next-net-mlx Tree
9-M: Raslan Darawsheh <rasland@mellanox.com>
10+M: Raslan Darawsheh <rasland@nvidia.com>
11 T: git://dpdk.org/next/dpdk-next-net-mlx
12
13 Next-virtio Tree
14@@ -131,6 +131,8 @@ F: config/rte_config.h
15 F: buildtools/call-sphinx-build.py
16 F: buildtools/gen-pmdinfo-cfile.sh
17 F: buildtools/map_to_def.py
18+F: buildtools/list-dir-globs.py
19+F: buildtools/pkg-config/
20 F: buildtools/symlink-drivers-solibs.sh
21
22 Public CI
23@@ -371,7 +373,7 @@ F: devtools/test-null.sh
24 F: doc/guides/prog_guide/switch_representation.rst
25
26 Flow API
27-M: Ori Kam <orika@mellanox.com>
28+M: Ori Kam <orika@nvidia.com>
29 T: git://dpdk.org/next/dpdk-next-net
30 F: app/test-pmd/cmdline_flow.c
31 F: doc/guides/prog_guide/rte_flow.rst
32@@ -729,17 +731,17 @@ F: doc/guides/nics/features/octeontx2*.ini
33 F: doc/guides/nics/octeontx2.rst
34
35 Mellanox mlx4
36-M: Matan Azrad <matan@mellanox.com>
37-M: Shahaf Shuler <shahafs@mellanox.com>
38+M: Matan Azrad <matan@nvidia.com>
39+M: Shahaf Shuler <shahafs@nvidia.com>
40 T: git://dpdk.org/next/dpdk-next-net-mlx
41 F: drivers/net/mlx4/
42 F: doc/guides/nics/mlx4.rst
43 F: doc/guides/nics/features/mlx4.ini
44
45 Mellanox mlx5
46-M: Matan Azrad <matan@mellanox.com>
47-M: Shahaf Shuler <shahafs@mellanox.com>
48-M: Viacheslav Ovsiienko <viacheslavo@mellanox.com>
49+M: Matan Azrad <matan@nvidia.com>
50+M: Shahaf Shuler <shahafs@nvidia.com>
51+M: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
52 T: git://dpdk.org/next/dpdk-next-net-mlx
53 F: drivers/net/mlx5/
54 F: buildtools/options-ibverbs-static.sh
55@@ -747,7 +749,7 @@ F: doc/guides/nics/mlx5.rst
56 F: doc/guides/nics/features/mlx5.ini
57
58 Microsoft vdev_netvsc - EXPERIMENTAL
59-M: Matan Azrad <matan@mellanox.com>
60+M: Matan Azrad <matan@nvidia.com>
61 F: drivers/net/vdev_netvsc/
62 F: doc/guides/nics/vdev_netvsc.rst
63 F: doc/guides/nics/features/vdev_netvsc.ini
64@@ -1492,7 +1494,7 @@ M: Marko Kovacevic <marko.kovacevic@intel.com>
65 F: examples/fips_validation/
66 F: doc/guides/sample_app_ug/fips_validation.rst
67
68-M: Ori Kam <orika@mellanox.com>
69+M: Ori Kam <orika@nvidia.com>
70 F: examples/flow_filtering/
71 F: doc/guides/sample_app_ug/flow_filtering.rst
72
73diff --git a/VERSION b/VERSION
74index f4594fa..729b18d 100644
75--- a/VERSION
76+++ b/VERSION
77@@ -1 +1 @@
78-19.11.5
79+19.11.6
80diff --git a/app/test-bbdev/ldpc_enc_default.data b/app/test-bbdev/ldpc_enc_default.data
81index 371cbc6..52d51ae 120000
82--- a/app/test-bbdev/ldpc_enc_default.data
83+++ b/app/test-bbdev/ldpc_enc_default.data
84@@ -1 +1 @@
85-test_vectors/turbo_dec_c1_k6144_r0_e10376_crc24b_sbd_negllr_high_snr.data
86\ No newline at end of file
87+test_vectors/ldpc_enc_v2342.data
88\ No newline at end of file
89diff --git a/app/test-crypto-perf/meson.build b/app/test-crypto-perf/meson.build
90index 0674396..c416091 100644
91--- a/app/test-crypto-perf/meson.build
92+++ b/app/test-crypto-perf/meson.build
93@@ -13,3 +13,6 @@ sources = files('cperf_ops.c',
94 'cperf_test_verify.c',
95 'main.c')
96 deps += ['cryptodev', 'security']
97+if dpdk_conf.has('RTE_LIBRTE_PMD_CRYPTO_SCHEDULER')
98+ deps += 'pmd_crypto_scheduler'
99+endif
100diff --git a/app/test-eventdev/evt_options.c b/app/test-eventdev/evt_options.c
101index c60b61a..4f4800d 100644
102--- a/app/test-eventdev/evt_options.c
103+++ b/app/test-eventdev/evt_options.c
104@@ -197,6 +197,10 @@ evt_parse_nb_timer_adptrs(struct evt_options *opt, const char *arg)
105 int ret;
106
107 ret = parser_read_uint8(&(opt->nb_timer_adptrs), arg);
108+ if (opt->nb_timer_adptrs <= 0) {
109+ evt_err("Number of timer adapters cannot be <= 0");
110+ return -EINVAL;
111+ }
112
113 return ret;
114 }
115diff --git a/app/test-pmd/bpf_cmd.c b/app/test-pmd/bpf_cmd.c
116index 830bfc1..d2deadd 100644
117--- a/app/test-pmd/bpf_cmd.c
118+++ b/app/test-pmd/bpf_cmd.c
119@@ -55,7 +55,7 @@ static const struct rte_bpf_xsym bpf_xsym[] = {
120 struct cmd_bpf_ld_result {
121 cmdline_fixed_string_t bpf;
122 cmdline_fixed_string_t dir;
123- uint8_t port;
124+ uint16_t port;
125 uint16_t queue;
126 cmdline_fixed_string_t op;
127 cmdline_fixed_string_t flags;
128@@ -153,7 +153,7 @@ cmdline_parse_inst_t cmd_operate_bpf_ld_parse = {
129 struct cmd_bpf_unld_result {
130 cmdline_fixed_string_t bpf;
131 cmdline_fixed_string_t dir;
132- uint8_t port;
133+ uint16_t port;
134 uint16_t queue;
135 };
136
137diff --git a/app/test-pmd/cmdline.c b/app/test-pmd/cmdline.c
138index 2d18b6c..9287fa3 100644
139--- a/app/test-pmd/cmdline.c
140+++ b/app/test-pmd/cmdline.c
141@@ -614,7 +614,7 @@ static void cmd_help_long_parsed(void *parsed_result,
142 "set bonding mode IEEE802.3AD aggregator policy (port_id) (agg_name)"
143 " Set Aggregation mode for IEEE802.3AD (mode 4)"
144
145- "set bonding xmit_balance_policy (port_id) (l2|l23|l34)\n"
146+ "set bonding balance_xmit_policy (port_id) (l2|l23|l34)\n"
147 " Set the transmit balance policy for bonded device running in balance mode.\n\n"
148
149 "set bonding mon_period (port_id) (value)\n"
150@@ -4168,6 +4168,9 @@ cmd_tx_vlan_set_parsed(void *parsed_result,
151 {
152 struct cmd_tx_vlan_set_result *res = parsed_result;
153
154+ if (port_id_is_invalid(res->port_id, ENABLED_WARN))
155+ return;
156+
157 if (!port_is_stopped(res->port_id)) {
158 printf("Please stop port %d first\n", res->port_id);
159 return;
160@@ -4222,6 +4225,9 @@ cmd_tx_vlan_set_qinq_parsed(void *parsed_result,
161 {
162 struct cmd_tx_vlan_set_qinq_result *res = parsed_result;
163
164+ if (port_id_is_invalid(res->port_id, ENABLED_WARN))
165+ return;
166+
167 if (!port_is_stopped(res->port_id)) {
168 printf("Please stop port %d first\n", res->port_id);
169 return;
170@@ -4335,6 +4341,9 @@ cmd_tx_vlan_reset_parsed(void *parsed_result,
171 {
172 struct cmd_tx_vlan_reset_result *res = parsed_result;
173
174+ if (port_id_is_invalid(res->port_id, ENABLED_WARN))
175+ return;
176+
177 if (!port_is_stopped(res->port_id)) {
178 printf("Please stop port %d first\n", res->port_id);
179 return;
180diff --git a/app/test-pmd/cmdline_flow.c b/app/test-pmd/cmdline_flow.c
181index deced65..be39f4b 100644
182--- a/app/test-pmd/cmdline_flow.c
183+++ b/app/test-pmd/cmdline_flow.c
184@@ -3900,30 +3900,15 @@ parse_vc_action_rss(struct context *ctx, const struct token *token,
185 .func = RTE_ETH_HASH_FUNCTION_DEFAULT,
186 .level = 0,
187 .types = rss_hf,
188- .key_len = sizeof(action_rss_data->key),
189+ .key_len = 0,
190 .queue_num = RTE_MIN(nb_rxq, ACTION_RSS_QUEUE_NUM),
191- .key = action_rss_data->key,
192+ .key = NULL,
193 .queue = action_rss_data->queue,
194 },
195- .key = "testpmd's default RSS hash key, "
196- "override it for better balancing",
197 .queue = { 0 },
198 };
199 for (i = 0; i < action_rss_data->conf.queue_num; ++i)
200 action_rss_data->queue[i] = i;
201- if (!port_id_is_invalid(ctx->port, DISABLED_WARN) &&
202- ctx->port != (portid_t)RTE_PORT_ALL) {
203- struct rte_eth_dev_info info;
204- int ret2;
205-
206- ret2 = rte_eth_dev_info_get(ctx->port, &info);
207- if (ret2 != 0)
208- return ret2;
209-
210- action_rss_data->conf.key_len =
211- RTE_MIN(sizeof(action_rss_data->key),
212- info.hash_key_size);
213- }
214 action->conf = &action_rss_data->conf;
215 return ret;
216 }
217diff --git a/app/test-pmd/config.c b/app/test-pmd/config.c
218index 726a263..5f8905c 100644
219--- a/app/test-pmd/config.c
220+++ b/app/test-pmd/config.c
221@@ -1568,7 +1568,7 @@ port_flow_query(portid_t port_id, uint32_t rule,
222
223 /** List flow rules. */
224 void
225-port_flow_list(portid_t port_id, uint32_t n, const uint32_t group[n])
226+port_flow_list(portid_t port_id, uint32_t n, const uint32_t *group)
227 {
228 struct rte_port *port;
229 struct port_flow *pf;
230@@ -1685,22 +1685,102 @@ tx_queue_id_is_invalid(queueid_t txq_id)
231 }
232
233 static int
234-rx_desc_id_is_invalid(uint16_t rxdesc_id)
235+get_rx_ring_size(portid_t port_id, queueid_t rxq_id, uint16_t *ring_size)
236 {
237- if (rxdesc_id < nb_rxd)
238+ struct rte_port *port = &ports[port_id];
239+ struct rte_eth_rxq_info rx_qinfo;
240+ int ret;
241+
242+ ret = rte_eth_rx_queue_info_get(port_id, rxq_id, &rx_qinfo);
243+ if (ret == 0) {
244+ *ring_size = rx_qinfo.nb_desc;
245+ return ret;
246+ }
247+
248+ if (ret != -ENOTSUP)
249+ return ret;
250+ /*
251+ * If the rte_eth_rx_queue_info_get is not support for this PMD,
252+ * ring_size stored in testpmd will be used for validity verification.
253+ * When configure the rxq by rte_eth_rx_queue_setup with nb_rx_desc
254+ * being 0, it will use a default value provided by PMDs to setup this
255+ * rxq. If the default value is 0, it will use the
256+ * RTE_ETH_DEV_FALLBACK_RX_RINGSIZE to setup this rxq.
257+ */
258+ if (port->nb_rx_desc[rxq_id])
259+ *ring_size = port->nb_rx_desc[rxq_id];
260+ else if (port->dev_info.default_rxportconf.ring_size)
261+ *ring_size = port->dev_info.default_rxportconf.ring_size;
262+ else
263+ *ring_size = RTE_ETH_DEV_FALLBACK_RX_RINGSIZE;
264+ return 0;
265+}
266+
267+static int
268+get_tx_ring_size(portid_t port_id, queueid_t txq_id, uint16_t *ring_size)
269+{
270+ struct rte_port *port = &ports[port_id];
271+ struct rte_eth_txq_info tx_qinfo;
272+ int ret;
273+
274+ ret = rte_eth_tx_queue_info_get(port_id, txq_id, &tx_qinfo);
275+ if (ret == 0) {
276+ *ring_size = tx_qinfo.nb_desc;
277+ return ret;
278+ }
279+
280+ if (ret != -ENOTSUP)
281+ return ret;
282+ /*
283+ * If the rte_eth_tx_queue_info_get is not support for this PMD,
284+ * ring_size stored in testpmd will be used for validity verification.
285+ * When configure the txq by rte_eth_tx_queue_setup with nb_tx_desc
286+ * being 0, it will use a default value provided by PMDs to setup this
287+ * txq. If the default value is 0, it will use the
288+ * RTE_ETH_DEV_FALLBACK_TX_RINGSIZE to setup this txq.
289+ */
290+ if (port->nb_tx_desc[txq_id])
291+ *ring_size = port->nb_tx_desc[txq_id];
292+ else if (port->dev_info.default_txportconf.ring_size)
293+ *ring_size = port->dev_info.default_txportconf.ring_size;
294+ else
295+ *ring_size = RTE_ETH_DEV_FALLBACK_TX_RINGSIZE;
296+ return 0;
297+}
298+
299+static int
300+rx_desc_id_is_invalid(portid_t port_id, queueid_t rxq_id, uint16_t rxdesc_id)
301+{
302+ uint16_t ring_size;
303+ int ret;
304+
305+ ret = get_rx_ring_size(port_id, rxq_id, &ring_size);
306+ if (ret)
307+ return 1;
308+
309+ if (rxdesc_id < ring_size)
310 return 0;
311- printf("Invalid RX descriptor %d (must be < nb_rxd=%d)\n",
312- rxdesc_id, nb_rxd);
313+
314+ printf("Invalid RX descriptor %u (must be < ring_size=%u)\n",
315+ rxdesc_id, ring_size);
316 return 1;
317 }
318
319 static int
320-tx_desc_id_is_invalid(uint16_t txdesc_id)
321+tx_desc_id_is_invalid(portid_t port_id, queueid_t txq_id, uint16_t txdesc_id)
322 {
323- if (txdesc_id < nb_txd)
324+ uint16_t ring_size;
325+ int ret;
326+
327+ ret = get_tx_ring_size(port_id, txq_id, &ring_size);
328+ if (ret)
329+ return 1;
330+
331+ if (txdesc_id < ring_size)
332 return 0;
333- printf("Invalid TX descriptor %d (must be < nb_txd=%d)\n",
334- txdesc_id, nb_txd);
335+
336+ printf("Invalid TX descriptor %u (must be < ring_size=%u)\n",
337+ txdesc_id, ring_size);
338 return 1;
339 }
340
341@@ -1821,11 +1901,7 @@ rx_ring_desc_display(portid_t port_id, queueid_t rxq_id, uint16_t rxd_id)
342 {
343 const struct rte_memzone *rx_mz;
344
345- if (port_id_is_invalid(port_id, ENABLED_WARN))
346- return;
347- if (rx_queue_id_is_invalid(rxq_id))
348- return;
349- if (rx_desc_id_is_invalid(rxd_id))
350+ if (rx_desc_id_is_invalid(port_id, rxq_id, rxd_id))
351 return;
352 rx_mz = ring_dma_zone_lookup("rx_ring", port_id, rxq_id);
353 if (rx_mz == NULL)
354@@ -1838,11 +1914,7 @@ tx_ring_desc_display(portid_t port_id, queueid_t txq_id, uint16_t txd_id)
355 {
356 const struct rte_memzone *tx_mz;
357
358- if (port_id_is_invalid(port_id, ENABLED_WARN))
359- return;
360- if (tx_queue_id_is_invalid(txq_id))
361- return;
362- if (tx_desc_id_is_invalid(txd_id))
363+ if (tx_desc_id_is_invalid(port_id, txq_id, txd_id))
364 return;
365 tx_mz = ring_dma_zone_lookup("tx_ring", port_id, txq_id);
366 if (tx_mz == NULL)
367@@ -1883,10 +1955,17 @@ rxtx_config_display(void)
368 struct rte_eth_txconf *tx_conf = &ports[pid].tx_conf[0];
369 uint16_t *nb_rx_desc = &ports[pid].nb_rx_desc[0];
370 uint16_t *nb_tx_desc = &ports[pid].nb_tx_desc[0];
371- uint16_t nb_rx_desc_tmp;
372- uint16_t nb_tx_desc_tmp;
373 struct rte_eth_rxq_info rx_qinfo;
374 struct rte_eth_txq_info tx_qinfo;
375+ uint16_t rx_free_thresh_tmp;
376+ uint16_t tx_free_thresh_tmp;
377+ uint16_t tx_rs_thresh_tmp;
378+ uint16_t nb_rx_desc_tmp;
379+ uint16_t nb_tx_desc_tmp;
380+ uint64_t offloads_tmp;
381+ uint8_t pthresh_tmp;
382+ uint8_t hthresh_tmp;
383+ uint8_t wthresh_tmp;
384 int32_t rc;
385
386 /* per port config */
387@@ -1900,41 +1979,64 @@ rxtx_config_display(void)
388 /* per rx queue config only for first queue to be less verbose */
389 for (qid = 0; qid < 1; qid++) {
390 rc = rte_eth_rx_queue_info_get(pid, qid, &rx_qinfo);
391- if (rc)
392+ if (rc) {
393 nb_rx_desc_tmp = nb_rx_desc[qid];
394- else
395+ rx_free_thresh_tmp =
396+ rx_conf[qid].rx_free_thresh;
397+ pthresh_tmp = rx_conf[qid].rx_thresh.pthresh;
398+ hthresh_tmp = rx_conf[qid].rx_thresh.hthresh;
399+ wthresh_tmp = rx_conf[qid].rx_thresh.wthresh;
400+ offloads_tmp = rx_conf[qid].offloads;
401+ } else {
402 nb_rx_desc_tmp = rx_qinfo.nb_desc;
403+ rx_free_thresh_tmp =
404+ rx_qinfo.conf.rx_free_thresh;
405+ pthresh_tmp = rx_qinfo.conf.rx_thresh.pthresh;
406+ hthresh_tmp = rx_qinfo.conf.rx_thresh.hthresh;
407+ wthresh_tmp = rx_qinfo.conf.rx_thresh.wthresh;
408+ offloads_tmp = rx_qinfo.conf.offloads;
409+ }
410
411 printf(" RX queue: %d\n", qid);
412 printf(" RX desc=%d - RX free threshold=%d\n",
413- nb_rx_desc_tmp, rx_conf[qid].rx_free_thresh);
414+ nb_rx_desc_tmp, rx_free_thresh_tmp);
415 printf(" RX threshold registers: pthresh=%d hthresh=%d "
416 " wthresh=%d\n",
417- rx_conf[qid].rx_thresh.pthresh,
418- rx_conf[qid].rx_thresh.hthresh,
419- rx_conf[qid].rx_thresh.wthresh);
420- printf(" RX Offloads=0x%"PRIx64"\n",
421- rx_conf[qid].offloads);
422+ pthresh_tmp, hthresh_tmp, wthresh_tmp);
423+ printf(" RX Offloads=0x%"PRIx64"\n", offloads_tmp);
424 }
425
426 /* per tx queue config only for first queue to be less verbose */
427 for (qid = 0; qid < 1; qid++) {
428 rc = rte_eth_tx_queue_info_get(pid, qid, &tx_qinfo);
429- if (rc)
430+ if (rc) {
431 nb_tx_desc_tmp = nb_tx_desc[qid];
432- else
433+ tx_free_thresh_tmp =
434+ tx_conf[qid].tx_free_thresh;
435+ pthresh_tmp = tx_conf[qid].tx_thresh.pthresh;
436+ hthresh_tmp = tx_conf[qid].tx_thresh.hthresh;
437+ wthresh_tmp = tx_conf[qid].tx_thresh.wthresh;
438+ offloads_tmp = tx_conf[qid].offloads;
439+ tx_rs_thresh_tmp = tx_conf[qid].tx_rs_thresh;
440+ } else {
441 nb_tx_desc_tmp = tx_qinfo.nb_desc;
442+ tx_free_thresh_tmp =
443+ tx_qinfo.conf.tx_free_thresh;
444+ pthresh_tmp = tx_qinfo.conf.tx_thresh.pthresh;
445+ hthresh_tmp = tx_qinfo.conf.tx_thresh.hthresh;
446+ wthresh_tmp = tx_qinfo.conf.tx_thresh.wthresh;
447+ offloads_tmp = tx_qinfo.conf.offloads;
448+ tx_rs_thresh_tmp = tx_qinfo.conf.tx_rs_thresh;
449+ }
450
451 printf(" TX queue: %d\n", qid);
452 printf(" TX desc=%d - TX free threshold=%d\n",
453- nb_tx_desc_tmp, tx_conf[qid].tx_free_thresh);
454+ nb_tx_desc_tmp, tx_free_thresh_tmp);
455 printf(" TX threshold registers: pthresh=%d hthresh=%d "
456 " wthresh=%d\n",
457- tx_conf[qid].tx_thresh.pthresh,
458- tx_conf[qid].tx_thresh.hthresh,
459- tx_conf[qid].tx_thresh.wthresh);
460+ pthresh_tmp, hthresh_tmp, wthresh_tmp);
461 printf(" TX offloads=0x%"PRIx64" - TX RS bit threshold=%d\n",
462- tx_conf[qid].offloads, tx_conf->tx_rs_thresh);
463+ offloads_tmp, tx_rs_thresh_tmp);
464 }
465 }
466 }
467@@ -2579,6 +2681,10 @@ set_fwd_lcores_mask(uint64_t lcoremask)
468 void
469 set_fwd_lcores_number(uint16_t nb_lc)
470 {
471+ if (test_done == 0) {
472+ printf("Please stop forwarding first\n");
473+ return;
474+ }
475 if (nb_lc > nb_cfg_lcores) {
476 printf("nb fwd cores %u > %u (max. number of configured "
477 "lcores) - ignored\n",
478@@ -2726,17 +2832,41 @@ show_tx_pkt_segments(void)
479 printf("Split packet: %s\n", split);
480 }
481
482+static bool
483+nb_segs_is_invalid(unsigned int nb_segs)
484+{
485+ uint16_t ring_size;
486+ uint16_t queue_id;
487+ uint16_t port_id;
488+ int ret;
489+
490+ RTE_ETH_FOREACH_DEV(port_id) {
491+ for (queue_id = 0; queue_id < nb_txq; queue_id++) {
492+ ret = get_tx_ring_size(port_id, queue_id, &ring_size);
493+
494+ if (ret)
495+ return true;
496+
497+ if (ring_size < nb_segs) {
498+ printf("nb segments per TX packets=%u >= "
499+ "TX queue(%u) ring_size=%u - ignored\n",
500+ nb_segs, queue_id, ring_size);
501+ return true;
502+ }
503+ }
504+ }
505+
506+ return false;
507+}
508+
509 void
510 set_tx_pkt_segments(unsigned *seg_lengths, unsigned nb_segs)
511 {
512 uint16_t tx_pkt_len;
513 unsigned i;
514
515- if (nb_segs >= (unsigned) nb_txd) {
516- printf("nb segments per TX packets=%u >= nb_txd=%u - ignored\n",
517- nb_segs, (unsigned int) nb_txd);
518+ if (nb_segs_is_invalid(nb_segs))
519 return;
520- }
521
522 /*
523 * Check that each segment length is greater or equal than
524@@ -3080,9 +3210,11 @@ vlan_extend_set(portid_t port_id, int on)
525 }
526
527 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
528- if (diag < 0)
529+ if (diag < 0) {
530 printf("rx_vlan_extend_set(port_pi=%d, on=%d) failed "
531 "diag=%d\n", port_id, on, diag);
532+ return;
533+ }
534 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads;
535 }
536
537@@ -3107,9 +3239,11 @@ rx_vlan_strip_set(portid_t port_id, int on)
538 }
539
540 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
541- if (diag < 0)
542+ if (diag < 0) {
543 printf("rx_vlan_strip_set(port_pi=%d, on=%d) failed "
544 "diag=%d\n", port_id, on, diag);
545+ return;
546+ }
547 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads;
548 }
549
550@@ -3148,9 +3282,11 @@ rx_vlan_filter_set(portid_t port_id, int on)
551 }
552
553 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
554- if (diag < 0)
555+ if (diag < 0) {
556 printf("rx_vlan_filter_set(port_pi=%d, on=%d) failed "
557 "diag=%d\n", port_id, on, diag);
558+ return;
559+ }
560 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads;
561 }
562
563@@ -3175,9 +3311,11 @@ rx_vlan_qinq_strip_set(portid_t port_id, int on)
564 }
565
566 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
567- if (diag < 0)
568+ if (diag < 0) {
569 printf("%s(port_pi=%d, on=%d) failed "
570 "diag=%d\n", __func__, port_id, on, diag);
571+ return;
572+ }
573 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads;
574 }
575
576@@ -3235,8 +3373,6 @@ tx_vlan_set(portid_t port_id, uint16_t vlan_id)
577 struct rte_eth_dev_info dev_info;
578 int ret;
579
580- if (port_id_is_invalid(port_id, ENABLED_WARN))
581- return;
582 if (vlan_id_is_invalid(vlan_id))
583 return;
584
585@@ -3267,8 +3403,6 @@ tx_qinq_set(portid_t port_id, uint16_t vlan_id, uint16_t vlan_id_outer)
586 struct rte_eth_dev_info dev_info;
587 int ret;
588
589- if (port_id_is_invalid(port_id, ENABLED_WARN))
590- return;
591 if (vlan_id_is_invalid(vlan_id))
592 return;
593 if (vlan_id_is_invalid(vlan_id_outer))
594@@ -3294,8 +3428,6 @@ tx_qinq_set(portid_t port_id, uint16_t vlan_id, uint16_t vlan_id_outer)
595 void
596 tx_vlan_reset(portid_t port_id)
597 {
598- if (port_id_is_invalid(port_id, ENABLED_WARN))
599- return;
600 ports[port_id].dev_conf.txmode.offloads &=
601 ~(DEV_TX_OFFLOAD_VLAN_INSERT |
602 DEV_TX_OFFLOAD_QINQ_INSERT);
603diff --git a/app/test-pmd/meson.build b/app/test-pmd/meson.build
604index 6006c60..b0249bd 100644
605--- a/app/test-pmd/meson.build
606+++ b/app/test-pmd/meson.build
607@@ -28,6 +28,18 @@ deps += ['ethdev', 'gro', 'gso', 'cmdline', 'metrics', 'meter', 'bus_pci']
608 if dpdk_conf.has('RTE_LIBRTE_PDUMP')
609 deps += 'pdump'
610 endif
611+if dpdk_conf.has('RTE_LIBRTE_BITRATESTATS')
612+ deps += 'bitratestats'
613+endif
614+if dpdk_conf.has('RTE_LIBRTE_LATENCYSTATS')
615+ deps += 'latencystats'
616+endif
617+if dpdk_conf.has('RTE_LIBRTE_PMD_CRYPTO_SCHEDULER')
618+ deps += 'pmd_crypto_scheduler'
619+endif
620+if dpdk_conf.has('RTE_LIBRTE_BOND_PMD')
621+ deps += 'pmd_bond'
622+endif
623 if dpdk_conf.has('RTE_LIBRTE_BNXT_PMD')
624 deps += 'pmd_bnxt'
625 endif
626diff --git a/app/test-pmd/txonly.c b/app/test-pmd/txonly.c
627index 8ed436d..a1822c6 100644
628--- a/app/test-pmd/txonly.c
629+++ b/app/test-pmd/txonly.c
630@@ -147,6 +147,34 @@ setup_pkt_udp_ip_headers(struct rte_ipv4_hdr *ip_hdr,
631 ip_hdr->hdr_checksum = (uint16_t) ip_cksum;
632 }
633
634+static inline void
635+update_pkt_header(struct rte_mbuf *pkt, uint32_t total_pkt_len)
636+{
637+ struct rte_ipv4_hdr *ip_hdr;
638+ struct rte_udp_hdr *udp_hdr;
639+ uint16_t pkt_data_len;
640+ uint16_t pkt_len;
641+
642+ pkt_data_len = (uint16_t) (total_pkt_len - (
643+ sizeof(struct rte_ether_hdr) +
644+ sizeof(struct rte_ipv4_hdr) +
645+ sizeof(struct rte_udp_hdr)));
646+ /* updata udp pkt length */
647+ udp_hdr = rte_pktmbuf_mtod_offset(pkt, struct rte_udp_hdr *,
648+ sizeof(struct rte_ether_hdr) +
649+ sizeof(struct rte_ipv4_hdr));
650+ pkt_len = (uint16_t) (pkt_data_len + sizeof(struct rte_udp_hdr));
651+ udp_hdr->dgram_len = RTE_CPU_TO_BE_16(pkt_len);
652+
653+ /* updata ip pkt length and csum */
654+ ip_hdr = rte_pktmbuf_mtod_offset(pkt, struct rte_ipv4_hdr *,
655+ sizeof(struct rte_ether_hdr));
656+ ip_hdr->hdr_checksum = 0;
657+ pkt_len = (uint16_t) (pkt_len + sizeof(struct rte_ipv4_hdr));
658+ ip_hdr->total_length = RTE_CPU_TO_BE_16(pkt_len);
659+ ip_hdr->hdr_checksum = rte_ipv4_cksum(ip_hdr);
660+}
661+
662 static inline bool
663 pkt_burst_prepare(struct rte_mbuf *pkt, struct rte_mempool *mbp,
664 struct rte_ether_hdr *eth_hdr, const uint16_t vlan_tci,
665@@ -212,6 +240,10 @@ pkt_burst_prepare(struct rte_mbuf *pkt, struct rte_mempool *mbp,
666 copy_buf_to_pkt(&pkt_udp_hdr, sizeof(pkt_udp_hdr), pkt,
667 sizeof(struct rte_ether_hdr) +
668 sizeof(struct rte_ipv4_hdr));
669+
670+ if (unlikely(tx_pkt_split == TX_PKT_SPLIT_RND) || txonly_multi_flow)
671+ update_pkt_header(pkt, pkt_len);
672+
673 /*
674 * Complete first mbuf of packet and append it to the
675 * burst of packets to be transmitted.
676diff --git a/app/test-sad/main.c b/app/test-sad/main.c
677index b01e84c..8380fad 100644
678--- a/app/test-sad/main.c
679+++ b/app/test-sad/main.c
680@@ -617,7 +617,7 @@ main(int argc, char **argv)
681 {
682 int ret;
683 struct rte_ipsec_sad *sad;
684- struct rte_ipsec_sad_conf conf;
685+ struct rte_ipsec_sad_conf conf = {0};
686 unsigned int lcore_id;
687
688 ret = rte_eal_init(argc, argv);
689diff --git a/app/test/meson.build b/app/test/meson.build
690index 6d4039b..1fa9124 100644
691--- a/app/test/meson.build
692+++ b/app/test/meson.build
693@@ -380,6 +380,7 @@ endif
694
695 if dpdk_conf.has('RTE_LIBRTE_PMD_CRYPTO_SCHEDULER')
696 driver_test_names += 'cryptodev_scheduler_autotest'
697+ test_deps += 'pmd_crypto_scheduler'
698 endif
699
700 foreach d:test_deps
701@@ -390,7 +391,7 @@ test_dep_objs += cc.find_library('execinfo', required: false)
702
703 link_libs = []
704 if get_option('default_library') == 'static'
705- link_libs = dpdk_drivers
706+ link_libs = dpdk_static_libraries + dpdk_drivers
707 endif
708
709 dpdk_test = executable('dpdk-test',
710diff --git a/app/test/test_cryptodev.c b/app/test/test_cryptodev.c
711index fd1056d..a852040 100644
712--- a/app/test/test_cryptodev.c
713+++ b/app/test/test_cryptodev.c
714@@ -643,7 +643,7 @@ test_device_configure_invalid_dev_id(void)
715 "Need at least %d devices for test", 1);
716
717 /* valid dev_id values */
718- dev_id = ts_params->valid_devs[ts_params->valid_dev_count - 1];
719+ dev_id = ts_params->valid_devs[0];
720
721 /* Stop the device in case it's started so it can be configured */
722 rte_cryptodev_stop(dev_id);
723@@ -9425,9 +9425,7 @@ test_stats(void)
724 "rte_cryptodev_stats_get invalid Param failed");
725
726 /* Test expected values */
727- ut_setup();
728 test_AES_CBC_HMAC_SHA1_encrypt_digest();
729- ut_teardown();
730 TEST_ASSERT_SUCCESS(rte_cryptodev_stats_get(ts_params->valid_devs[0],
731 &stats),
732 "rte_cryptodev_stats_get failed");
733diff --git a/app/test/test_distributor.c b/app/test/test_distributor.c
734index ba1f81c..3b2a4cb 100644
735--- a/app/test/test_distributor.c
736+++ b/app/test/test_distributor.c
737@@ -27,7 +27,9 @@ struct worker_params worker_params;
738 /* statics - all zero-initialized by default */
739 static volatile int quit; /**< general quit variable for all threads */
740 static volatile int zero_quit; /**< var for when we just want thr0 to quit*/
741+static volatile int zero_sleep; /**< thr0 has quit basic loop and is sleeping*/
742 static volatile unsigned worker_idx;
743+static volatile unsigned zero_idx;
744
745 struct worker_stats {
746 volatile unsigned handled_packets;
747@@ -42,7 +44,8 @@ total_packet_count(void)
748 {
749 unsigned i, count = 0;
750 for (i = 0; i < worker_idx; i++)
751- count += worker_stats[i].handled_packets;
752+ count += __atomic_load_n(&worker_stats[i].handled_packets,
753+ __ATOMIC_RELAXED);
754 return count;
755 }
756
757@@ -50,7 +53,10 @@ total_packet_count(void)
758 static inline void
759 clear_packet_count(void)
760 {
761- memset(&worker_stats, 0, sizeof(worker_stats));
762+ unsigned int i;
763+ for (i = 0; i < RTE_MAX_LCORE; i++)
764+ __atomic_store_n(&worker_stats[i].handled_packets, 0,
765+ __ATOMIC_RELAXED);
766 }
767
768 /* this is the basic worker function for sanity test
769@@ -62,23 +68,18 @@ handle_work(void *arg)
770 struct rte_mbuf *buf[8] __rte_cache_aligned;
771 struct worker_params *wp = arg;
772 struct rte_distributor *db = wp->dist;
773- unsigned int count = 0, num = 0;
774+ unsigned int num;
775 unsigned int id = __atomic_fetch_add(&worker_idx, 1, __ATOMIC_RELAXED);
776- int i;
777
778- for (i = 0; i < 8; i++)
779- buf[i] = NULL;
780- num = rte_distributor_get_pkt(db, id, buf, buf, num);
781+ num = rte_distributor_get_pkt(db, id, buf, NULL, 0);
782 while (!quit) {
783 __atomic_fetch_add(&worker_stats[id].handled_packets, num,
784 __ATOMIC_RELAXED);
785- count += num;
786 num = rte_distributor_get_pkt(db, id,
787 buf, buf, num);
788 }
789 __atomic_fetch_add(&worker_stats[id].handled_packets, num,
790 __ATOMIC_RELAXED);
791- count += num;
792 rte_distributor_return_pkt(db, id, buf, num);
793 return 0;
794 }
795@@ -102,6 +103,7 @@ sanity_test(struct worker_params *wp, struct rte_mempool *p)
796 struct rte_mbuf *returns[BURST*2];
797 unsigned int i, count;
798 unsigned int retries;
799+ unsigned int processed;
800
801 printf("=== Basic distributor sanity tests ===\n");
802 clear_packet_count();
803@@ -115,7 +117,11 @@ sanity_test(struct worker_params *wp, struct rte_mempool *p)
804 for (i = 0; i < BURST; i++)
805 bufs[i]->hash.usr = 0;
806
807- rte_distributor_process(db, bufs, BURST);
808+ processed = 0;
809+ while (processed < BURST)
810+ processed += rte_distributor_process(db, &bufs[processed],
811+ BURST - processed);
812+
813 count = 0;
814 do {
815
816@@ -128,12 +134,14 @@ sanity_test(struct worker_params *wp, struct rte_mempool *p)
817 printf("Line %d: Error, not all packets flushed. "
818 "Expected %u, got %u\n",
819 __LINE__, BURST, total_packet_count());
820+ rte_mempool_put_bulk(p, (void *)bufs, BURST);
821 return -1;
822 }
823
824 for (i = 0; i < rte_lcore_count() - 1; i++)
825 printf("Worker %u handled %u packets\n", i,
826- worker_stats[i].handled_packets);
827+ __atomic_load_n(&worker_stats[i].handled_packets,
828+ __ATOMIC_RELAXED));
829 printf("Sanity test with all zero hashes done.\n");
830
831 /* pick two flows and check they go correctly */
832@@ -153,12 +161,15 @@ sanity_test(struct worker_params *wp, struct rte_mempool *p)
833 printf("Line %d: Error, not all packets flushed. "
834 "Expected %u, got %u\n",
835 __LINE__, BURST, total_packet_count());
836+ rte_mempool_put_bulk(p, (void *)bufs, BURST);
837 return -1;
838 }
839
840 for (i = 0; i < rte_lcore_count() - 1; i++)
841 printf("Worker %u handled %u packets\n", i,
842- worker_stats[i].handled_packets);
843+ __atomic_load_n(
844+ &worker_stats[i].handled_packets,
845+ __ATOMIC_RELAXED));
846 printf("Sanity test with two hash values done\n");
847 }
848
849@@ -179,12 +190,14 @@ sanity_test(struct worker_params *wp, struct rte_mempool *p)
850 printf("Line %d: Error, not all packets flushed. "
851 "Expected %u, got %u\n",
852 __LINE__, BURST, total_packet_count());
853+ rte_mempool_put_bulk(p, (void *)bufs, BURST);
854 return -1;
855 }
856
857 for (i = 0; i < rte_lcore_count() - 1; i++)
858 printf("Worker %u handled %u packets\n", i,
859- worker_stats[i].handled_packets);
860+ __atomic_load_n(&worker_stats[i].handled_packets,
861+ __ATOMIC_RELAXED));
862 printf("Sanity test with non-zero hashes done\n");
863
864 rte_mempool_put_bulk(p, (void *)bufs, BURST);
865@@ -233,6 +246,7 @@ sanity_test(struct worker_params *wp, struct rte_mempool *p)
866 if (num_returned != BIG_BATCH) {
867 printf("line %d: Missing packets, expected %d\n",
868 __LINE__, num_returned);
869+ rte_mempool_put_bulk(p, (void *)many_bufs, BIG_BATCH);
870 return -1;
871 }
872
873@@ -247,6 +261,7 @@ sanity_test(struct worker_params *wp, struct rte_mempool *p)
874
875 if (j == BIG_BATCH) {
876 printf("Error: could not find source packet #%u\n", i);
877+ rte_mempool_put_bulk(p, (void *)many_bufs, BIG_BATCH);
878 return -1;
879 }
880 }
881@@ -270,24 +285,20 @@ handle_work_with_free_mbufs(void *arg)
882 struct rte_mbuf *buf[8] __rte_cache_aligned;
883 struct worker_params *wp = arg;
884 struct rte_distributor *d = wp->dist;
885- unsigned int count = 0;
886 unsigned int i;
887- unsigned int num = 0;
888+ unsigned int num;
889 unsigned int id = __atomic_fetch_add(&worker_idx, 1, __ATOMIC_RELAXED);
890
891- for (i = 0; i < 8; i++)
892- buf[i] = NULL;
893- num = rte_distributor_get_pkt(d, id, buf, buf, num);
894+ num = rte_distributor_get_pkt(d, id, buf, NULL, 0);
895 while (!quit) {
896- worker_stats[id].handled_packets += num;
897- count += num;
898+ __atomic_fetch_add(&worker_stats[id].handled_packets, num,
899+ __ATOMIC_RELAXED);
900 for (i = 0; i < num; i++)
901 rte_pktmbuf_free(buf[i]);
902- num = rte_distributor_get_pkt(d,
903- id, buf, buf, num);
904+ num = rte_distributor_get_pkt(d, id, buf, NULL, 0);
905 }
906- worker_stats[id].handled_packets += num;
907- count += num;
908+ __atomic_fetch_add(&worker_stats[id].handled_packets, num,
909+ __ATOMIC_RELAXED);
910 rte_distributor_return_pkt(d, id, buf, num);
911 return 0;
912 }
913@@ -303,6 +314,7 @@ sanity_test_with_mbuf_alloc(struct worker_params *wp, struct rte_mempool *p)
914 struct rte_distributor *d = wp->dist;
915 unsigned i;
916 struct rte_mbuf *bufs[BURST];
917+ unsigned int processed;
918
919 printf("=== Sanity test with mbuf alloc/free (%s) ===\n", wp->name);
920
921@@ -313,10 +325,12 @@ sanity_test_with_mbuf_alloc(struct worker_params *wp, struct rte_mempool *p)
922 rte_distributor_process(d, NULL, 0);
923 for (j = 0; j < BURST; j++) {
924 bufs[j]->hash.usr = (i+j) << 1;
925- rte_mbuf_refcnt_set(bufs[j], 1);
926 }
927
928- rte_distributor_process(d, bufs, BURST);
929+ processed = 0;
930+ while (processed < BURST)
931+ processed += rte_distributor_process(d,
932+ &bufs[processed], BURST - processed);
933 }
934
935 rte_distributor_flush(d);
936@@ -337,55 +351,61 @@ sanity_test_with_mbuf_alloc(struct worker_params *wp, struct rte_mempool *p)
937 static int
938 handle_work_for_shutdown_test(void *arg)
939 {
940- struct rte_mbuf *pkt = NULL;
941 struct rte_mbuf *buf[8] __rte_cache_aligned;
942 struct worker_params *wp = arg;
943 struct rte_distributor *d = wp->dist;
944- unsigned int count = 0;
945- unsigned int num = 0;
946- unsigned int total = 0;
947- unsigned int i;
948- unsigned int returned = 0;
949+ unsigned int num;
950+ unsigned int zero_id = 0;
951+ unsigned int zero_unset;
952 const unsigned int id = __atomic_fetch_add(&worker_idx, 1,
953 __ATOMIC_RELAXED);
954
955- num = rte_distributor_get_pkt(d, id, buf, buf, num);
956+ num = rte_distributor_get_pkt(d, id, buf, NULL, 0);
957+
958+ if (num > 0) {
959+ zero_unset = RTE_MAX_LCORE;
960+ __atomic_compare_exchange_n(&zero_idx, &zero_unset, id,
961+ 0, __ATOMIC_ACQ_REL, __ATOMIC_ACQUIRE);
962+ }
963+ zero_id = __atomic_load_n(&zero_idx, __ATOMIC_ACQUIRE);
964
965 /* wait for quit single globally, or for worker zero, wait
966 * for zero_quit */
967- while (!quit && !(id == 0 && zero_quit)) {
968- worker_stats[id].handled_packets += num;
969- count += num;
970- for (i = 0; i < num; i++)
971- rte_pktmbuf_free(buf[i]);
972- num = rte_distributor_get_pkt(d,
973- id, buf, buf, num);
974- total += num;
975+ while (!quit && !(id == zero_id && zero_quit)) {
976+ __atomic_fetch_add(&worker_stats[id].handled_packets, num,
977+ __ATOMIC_RELAXED);
978+ num = rte_distributor_get_pkt(d, id, buf, NULL, 0);
979+
980+ if (num > 0) {
981+ zero_unset = RTE_MAX_LCORE;
982+ __atomic_compare_exchange_n(&zero_idx, &zero_unset, id,
983+ 0, __ATOMIC_ACQ_REL, __ATOMIC_ACQUIRE);
984+ }
985+ zero_id = __atomic_load_n(&zero_idx, __ATOMIC_ACQUIRE);
986 }
987- worker_stats[id].handled_packets += num;
988- count += num;
989- returned = rte_distributor_return_pkt(d, id, buf, num);
990
991- if (id == 0) {
992+ __atomic_fetch_add(&worker_stats[id].handled_packets, num,
993+ __ATOMIC_RELAXED);
994+ if (id == zero_id) {
995+ rte_distributor_return_pkt(d, id, NULL, 0);
996+
997 /* for worker zero, allow it to restart to pick up last packet
998 * when all workers are shutting down.
999 */
1000+ __atomic_store_n(&zero_sleep, 1, __ATOMIC_RELEASE);
1001 while (zero_quit)
1002 usleep(100);
1003+ __atomic_store_n(&zero_sleep, 0, __ATOMIC_RELEASE);
1004
1005- num = rte_distributor_get_pkt(d,
1006- id, buf, buf, num);
1007+ num = rte_distributor_get_pkt(d, id, buf, NULL, 0);
1008
1009 while (!quit) {
1010- worker_stats[id].handled_packets += num;
1011- count += num;
1012- rte_pktmbuf_free(pkt);
1013- num = rte_distributor_get_pkt(d, id, buf, buf, num);
1014+ __atomic_fetch_add(&worker_stats[id].handled_packets,
1015+ num, __ATOMIC_RELAXED);
1016+ num = rte_distributor_get_pkt(d, id, buf, NULL, 0);
1017 }
1018- returned = rte_distributor_return_pkt(d,
1019- id, buf, num);
1020- printf("Num returned = %d\n", returned);
1021 }
1022+ rte_distributor_return_pkt(d, id, buf, num);
1023 return 0;
1024 }
1025
1026@@ -401,7 +421,10 @@ sanity_test_with_worker_shutdown(struct worker_params *wp,
1027 {
1028 struct rte_distributor *d = wp->dist;
1029 struct rte_mbuf *bufs[BURST];
1030- unsigned i;
1031+ struct rte_mbuf *bufs2[BURST];
1032+ unsigned int i;
1033+ unsigned int failed = 0;
1034+ unsigned int processed = 0;
1035
1036 printf("=== Sanity test of worker shutdown ===\n");
1037
1038@@ -419,7 +442,10 @@ sanity_test_with_worker_shutdown(struct worker_params *wp,
1039 for (i = 0; i < BURST; i++)
1040 bufs[i]->hash.usr = 1;
1041
1042- rte_distributor_process(d, bufs, BURST);
1043+ processed = 0;
1044+ while (processed < BURST)
1045+ processed += rte_distributor_process(d, &bufs[processed],
1046+ BURST - processed);
1047 rte_distributor_flush(d);
1048
1049 /* at this point, we will have processed some packets and have a full
1050@@ -427,32 +453,45 @@ sanity_test_with_worker_shutdown(struct worker_params *wp,
1051 */
1052
1053 /* get more buffers to queue up, again setting them to the same flow */
1054- if (rte_mempool_get_bulk(p, (void *)bufs, BURST) != 0) {
1055+ if (rte_mempool_get_bulk(p, (void *)bufs2, BURST) != 0) {
1056 printf("line %d: Error getting mbufs from pool\n", __LINE__);
1057+ rte_mempool_put_bulk(p, (void *)bufs, BURST);
1058 return -1;
1059 }
1060 for (i = 0; i < BURST; i++)
1061- bufs[i]->hash.usr = 1;
1062+ bufs2[i]->hash.usr = 1;
1063
1064 /* get worker zero to quit */
1065 zero_quit = 1;
1066- rte_distributor_process(d, bufs, BURST);
1067+ rte_distributor_process(d, bufs2, BURST);
1068
1069 /* flush the distributor */
1070 rte_distributor_flush(d);
1071- rte_delay_us(10000);
1072+ while (!__atomic_load_n(&zero_sleep, __ATOMIC_ACQUIRE))
1073+ rte_distributor_flush(d);
1074+
1075+ zero_quit = 0;
1076+ while (__atomic_load_n(&zero_sleep, __ATOMIC_ACQUIRE))
1077+ rte_delay_us(100);
1078
1079 for (i = 0; i < rte_lcore_count() - 1; i++)
1080 printf("Worker %u handled %u packets\n", i,
1081- worker_stats[i].handled_packets);
1082+ __atomic_load_n(&worker_stats[i].handled_packets,
1083+ __ATOMIC_RELAXED));
1084
1085 if (total_packet_count() != BURST * 2) {
1086 printf("Line %d: Error, not all packets flushed. "
1087 "Expected %u, got %u\n",
1088 __LINE__, BURST * 2, total_packet_count());
1089- return -1;
1090+ failed = 1;
1091 }
1092
1093+ rte_mempool_put_bulk(p, (void *)bufs, BURST);
1094+ rte_mempool_put_bulk(p, (void *)bufs2, BURST);
1095+
1096+ if (failed)
1097+ return -1;
1098+
1099 printf("Sanity test with worker shutdown passed\n\n");
1100 return 0;
1101 }
1102@@ -466,7 +505,9 @@ test_flush_with_worker_shutdown(struct worker_params *wp,
1103 {
1104 struct rte_distributor *d = wp->dist;
1105 struct rte_mbuf *bufs[BURST];
1106- unsigned i;
1107+ unsigned int i;
1108+ unsigned int failed = 0;
1109+ unsigned int processed;
1110
1111 printf("=== Test flush fn with worker shutdown (%s) ===\n", wp->name);
1112
1113@@ -481,7 +522,10 @@ test_flush_with_worker_shutdown(struct worker_params *wp,
1114 for (i = 0; i < BURST; i++)
1115 bufs[i]->hash.usr = 0;
1116
1117- rte_distributor_process(d, bufs, BURST);
1118+ processed = 0;
1119+ while (processed < BURST)
1120+ processed += rte_distributor_process(d, &bufs[processed],
1121+ BURST - processed);
1122 /* at this point, we will have processed some packets and have a full
1123 * backlog for the other ones at worker 0.
1124 */
1125@@ -492,20 +536,31 @@ test_flush_with_worker_shutdown(struct worker_params *wp,
1126 /* flush the distributor */
1127 rte_distributor_flush(d);
1128
1129- rte_delay_us(10000);
1130+ while (!__atomic_load_n(&zero_sleep, __ATOMIC_ACQUIRE))
1131+ rte_distributor_flush(d);
1132
1133 zero_quit = 0;
1134+
1135+ while (__atomic_load_n(&zero_sleep, __ATOMIC_ACQUIRE))
1136+ rte_delay_us(100);
1137+
1138 for (i = 0; i < rte_lcore_count() - 1; i++)
1139 printf("Worker %u handled %u packets\n", i,
1140- worker_stats[i].handled_packets);
1141+ __atomic_load_n(&worker_stats[i].handled_packets,
1142+ __ATOMIC_RELAXED));
1143
1144 if (total_packet_count() != BURST) {
1145 printf("Line %d: Error, not all packets flushed. "
1146 "Expected %u, got %u\n",
1147 __LINE__, BURST, total_packet_count());
1148- return -1;
1149+ failed = 1;
1150 }
1151
1152+ rte_mempool_put_bulk(p, (void *)bufs, BURST);
1153+
1154+ if (failed)
1155+ return -1;
1156+
1157 printf("Flush test with worker shutdown passed\n\n");
1158 return 0;
1159 }
1160@@ -571,21 +626,34 @@ quit_workers(struct worker_params *wp, struct rte_mempool *p)
1161 const unsigned num_workers = rte_lcore_count() - 1;
1162 unsigned i;
1163 struct rte_mbuf *bufs[RTE_MAX_LCORE];
1164- rte_mempool_get_bulk(p, (void *)bufs, num_workers);
1165+ struct rte_mbuf *returns[RTE_MAX_LCORE];
1166+ if (rte_mempool_get_bulk(p, (void *)bufs, num_workers) != 0) {
1167+ printf("line %d: Error getting mbufs from pool\n", __LINE__);
1168+ return;
1169+ }
1170
1171 zero_quit = 0;
1172 quit = 1;
1173- for (i = 0; i < num_workers; i++)
1174+ for (i = 0; i < num_workers; i++) {
1175 bufs[i]->hash.usr = i << 1;
1176- rte_distributor_process(d, bufs, num_workers);
1177-
1178- rte_mempool_put_bulk(p, (void *)bufs, num_workers);
1179+ rte_distributor_process(d, &bufs[i], 1);
1180+ }
1181
1182 rte_distributor_process(d, NULL, 0);
1183 rte_distributor_flush(d);
1184 rte_eal_mp_wait_lcore();
1185+
1186+ while (rte_distributor_returned_pkts(d, returns, RTE_MAX_LCORE))
1187+ ;
1188+
1189+ rte_distributor_clear_returns(d);
1190+ rte_mempool_put_bulk(p, (void *)bufs, num_workers);
1191+
1192 quit = 0;
1193 worker_idx = 0;
1194+ zero_idx = RTE_MAX_LCORE;
1195+ zero_quit = 0;
1196+ zero_sleep = 0;
1197 }
1198
1199 static int
1200diff --git a/app/test/test_event_crypto_adapter.c b/app/test/test_event_crypto_adapter.c
1201index 8d42462..1a9aa06 100644
1202--- a/app/test/test_event_crypto_adapter.c
1203+++ b/app/test/test_event_crypto_adapter.c
1204@@ -171,7 +171,6 @@ test_op_forward_mode(uint8_t session_less)
1205 struct rte_event ev;
1206 uint32_t cap;
1207 int ret;
1208- uint8_t cipher_key[17];
1209
1210 memset(&m_data, 0, sizeof(m_data));
1211
1212@@ -183,14 +182,7 @@ test_op_forward_mode(uint8_t session_less)
1213 /* Setup Cipher Parameters */
1214 cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
1215 cipher_xform.next = NULL;
1216-
1217- cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
1218- cipher_xform.cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
1219-
1220- cipher_xform.cipher.key.data = cipher_key;
1221- cipher_xform.cipher.key.length = 16;
1222- cipher_xform.cipher.iv.offset = IV_OFFSET;
1223- cipher_xform.cipher.iv.length = 16;
1224+ cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_NULL;
1225
1226 op = rte_crypto_op_alloc(params.op_mpool,
1227 RTE_CRYPTO_OP_TYPE_SYMMETRIC);
1228@@ -209,8 +201,8 @@ test_op_forward_mode(uint8_t session_less)
1229 &cipher_xform, params.session_priv_mpool);
1230 TEST_ASSERT_SUCCESS(ret, "Failed to init session\n");
1231
1232- ret = rte_event_crypto_adapter_caps_get(TEST_ADAPTER_ID,
1233- evdev, &cap);
1234+ ret = rte_event_crypto_adapter_caps_get(evdev, TEST_CDEV_ID,
1235+ &cap);
1236 TEST_ASSERT_SUCCESS(ret, "Failed to get adapter capabilities\n");
1237
1238 if (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_SESSION_PRIVATE_DATA) {
1239@@ -296,7 +288,7 @@ test_sessionless_with_op_forward_mode(void)
1240 uint32_t cap;
1241 int ret;
1242
1243- ret = rte_event_crypto_adapter_caps_get(TEST_ADAPTER_ID, evdev, &cap);
1244+ ret = rte_event_crypto_adapter_caps_get(evdev, TEST_CDEV_ID, &cap);
1245 TEST_ASSERT_SUCCESS(ret, "Failed to get adapter capabilities\n");
1246
1247 if (!(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) &&
1248@@ -321,7 +313,7 @@ test_session_with_op_forward_mode(void)
1249 uint32_t cap;
1250 int ret;
1251
1252- ret = rte_event_crypto_adapter_caps_get(TEST_ADAPTER_ID, evdev, &cap);
1253+ ret = rte_event_crypto_adapter_caps_get(evdev, TEST_CDEV_ID, &cap);
1254 TEST_ASSERT_SUCCESS(ret, "Failed to get adapter capabilities\n");
1255
1256 if (!(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) &&
1257@@ -378,7 +370,6 @@ test_op_new_mode(uint8_t session_less)
1258 struct rte_mbuf *m;
1259 uint32_t cap;
1260 int ret;
1261- uint8_t cipher_key[17];
1262
1263 memset(&m_data, 0, sizeof(m_data));
1264
1265@@ -390,14 +381,7 @@ test_op_new_mode(uint8_t session_less)
1266 /* Setup Cipher Parameters */
1267 cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
1268 cipher_xform.next = NULL;
1269-
1270- cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
1271- cipher_xform.cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
1272-
1273- cipher_xform.cipher.key.data = cipher_key;
1274- cipher_xform.cipher.key.length = 16;
1275- cipher_xform.cipher.iv.offset = IV_OFFSET;
1276- cipher_xform.cipher.iv.length = 16;
1277+ cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_NULL;
1278
1279 op = rte_crypto_op_alloc(params.op_mpool,
1280 RTE_CRYPTO_OP_TYPE_SYMMETRIC);
1281@@ -410,8 +394,8 @@ test_op_new_mode(uint8_t session_less)
1282 params.session_mpool);
1283 TEST_ASSERT_NOT_NULL(sess, "Session creation failed\n");
1284
1285- ret = rte_event_crypto_adapter_caps_get(TEST_ADAPTER_ID,
1286- evdev, &cap);
1287+ ret = rte_event_crypto_adapter_caps_get(evdev, TEST_CDEV_ID,
1288+ &cap);
1289 TEST_ASSERT_SUCCESS(ret, "Failed to get adapter capabilities\n");
1290
1291 if (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_SESSION_PRIVATE_DATA) {
1292@@ -460,7 +444,7 @@ test_sessionless_with_op_new_mode(void)
1293 uint32_t cap;
1294 int ret;
1295
1296- ret = rte_event_crypto_adapter_caps_get(TEST_ADAPTER_ID, evdev, &cap);
1297+ ret = rte_event_crypto_adapter_caps_get(evdev, TEST_CDEV_ID, &cap);
1298 TEST_ASSERT_SUCCESS(ret, "Failed to get adapter capabilities\n");
1299
1300 if (!(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) &&
1301@@ -486,7 +470,7 @@ test_session_with_op_new_mode(void)
1302 uint32_t cap;
1303 int ret;
1304
1305- ret = rte_event_crypto_adapter_caps_get(TEST_ADAPTER_ID, evdev, &cap);
1306+ ret = rte_event_crypto_adapter_caps_get(evdev, TEST_CDEV_ID, &cap);
1307 TEST_ASSERT_SUCCESS(ret, "Failed to get adapter capabilities\n");
1308
1309 if (!(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) &&
1310@@ -564,7 +548,9 @@ configure_cryptodev(void)
1311
1312 params.session_mpool = rte_cryptodev_sym_session_pool_create(
1313 "CRYPTO_ADAPTER_SESSION_MP",
1314- MAX_NB_SESSIONS, 0, 0, 0, SOCKET_ID_ANY);
1315+ MAX_NB_SESSIONS, 0, 0,
1316+ sizeof(union rte_event_crypto_metadata),
1317+ SOCKET_ID_ANY);
1318 TEST_ASSERT_NOT_NULL(params.session_mpool,
1319 "session mempool allocation failed\n");
1320
1321@@ -706,7 +692,7 @@ test_crypto_adapter_create(void)
1322
1323 /* Create adapter with default port creation callback */
1324 ret = rte_event_crypto_adapter_create(TEST_ADAPTER_ID,
1325- TEST_CDEV_ID,
1326+ evdev,
1327 &conf, 0);
1328 TEST_ASSERT_SUCCESS(ret, "Failed to create event crypto adapter\n");
1329
1330@@ -719,7 +705,7 @@ test_crypto_adapter_qp_add_del(void)
1331 uint32_t cap;
1332 int ret;
1333
1334- ret = rte_event_crypto_adapter_caps_get(TEST_ADAPTER_ID, evdev, &cap);
1335+ ret = rte_event_crypto_adapter_caps_get(evdev, TEST_CDEV_ID, &cap);
1336 TEST_ASSERT_SUCCESS(ret, "Failed to get adapter capabilities\n");
1337
1338 if (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND) {
1339diff --git a/app/test/test_event_eth_tx_adapter.c b/app/test/test_event_eth_tx_adapter.c
1340index 3af7492..7073030 100644
1341--- a/app/test/test_event_eth_tx_adapter.c
1342+++ b/app/test/test_event_eth_tx_adapter.c
1343@@ -45,7 +45,7 @@ static uint64_t eid = ~0ULL;
1344 static uint32_t tid;
1345
1346 static inline int
1347-port_init_common(uint8_t port, const struct rte_eth_conf *port_conf,
1348+port_init_common(uint16_t port, const struct rte_eth_conf *port_conf,
1349 struct rte_mempool *mp)
1350 {
1351 const uint16_t rx_ring_size = RING_SIZE, tx_ring_size = RING_SIZE;
1352@@ -104,7 +104,7 @@ port_init_common(uint8_t port, const struct rte_eth_conf *port_conf,
1353 }
1354
1355 static inline int
1356-port_init(uint8_t port, struct rte_mempool *mp)
1357+port_init(uint16_t port, struct rte_mempool *mp)
1358 {
1359 struct rte_eth_conf conf = { 0 };
1360 return port_init_common(port, &conf, mp);
1361diff --git a/app/test/test_mbuf.c b/app/test/test_mbuf.c
1362index bf04025..a5bd169 100644
1363--- a/app/test/test_mbuf.c
1364+++ b/app/test/test_mbuf.c
1365@@ -2481,9 +2481,13 @@ test_mbuf_dyn(struct rte_mempool *pktmbuf_pool)
1366
1367 offset3 = rte_mbuf_dynfield_register_offset(&dynfield3,
1368 offsetof(struct rte_mbuf, dynfield1[1]));
1369- if (offset3 != offsetof(struct rte_mbuf, dynfield1[1]))
1370- GOTO_FAIL("failed to register dynamic field 3, offset=%d: %s",
1371- offset3, strerror(errno));
1372+ if (offset3 != offsetof(struct rte_mbuf, dynfield1[1])) {
1373+ if (rte_errno == EBUSY)
1374+ printf("mbuf test error skipped: dynfield is busy\n");
1375+ else
1376+ GOTO_FAIL("failed to register dynamic field 3, offset="
1377+ "%d: %s", offset3, strerror(errno));
1378+ }
1379
1380 printf("dynfield: offset=%d, offset2=%d, offset3=%d\n",
1381 offset, offset2, offset3);
1382diff --git a/app/test/test_rcu_qsbr.c b/app/test/test_rcu_qsbr.c
1383index b60dc50..5542b3c 100644
1384--- a/app/test/test_rcu_qsbr.c
1385+++ b/app/test/test_rcu_qsbr.c
1386@@ -273,13 +273,13 @@ static int
1387 test_rcu_qsbr_start(void)
1388 {
1389 uint64_t token;
1390- int i;
1391+ unsigned int i;
1392
1393 printf("\nTest rte_rcu_qsbr_start()\n");
1394
1395 rte_rcu_qsbr_init(t[0], RTE_MAX_LCORE);
1396
1397- for (i = 0; i < 3; i++)
1398+ for (i = 0; i < num_cores; i++)
1399 rte_rcu_qsbr_thread_register(t[0], enabled_core_ids[i]);
1400
1401 token = rte_rcu_qsbr_start(t[0]);
1402@@ -293,14 +293,18 @@ test_rcu_qsbr_check_reader(void *arg)
1403 {
1404 struct rte_rcu_qsbr *temp;
1405 uint8_t read_type = (uint8_t)((uintptr_t)arg);
1406+ unsigned int i;
1407
1408 temp = t[read_type];
1409
1410 /* Update quiescent state counter */
1411- rte_rcu_qsbr_quiescent(temp, enabled_core_ids[0]);
1412- rte_rcu_qsbr_quiescent(temp, enabled_core_ids[1]);
1413- rte_rcu_qsbr_thread_unregister(temp, enabled_core_ids[2]);
1414- rte_rcu_qsbr_quiescent(temp, enabled_core_ids[3]);
1415+ for (i = 0; i < num_cores; i++) {
1416+ if (i % 2 == 0)
1417+ rte_rcu_qsbr_quiescent(temp, enabled_core_ids[i]);
1418+ else
1419+ rte_rcu_qsbr_thread_unregister(temp,
1420+ enabled_core_ids[i]);
1421+ }
1422 return 0;
1423 }
1424
1425@@ -311,7 +315,8 @@ test_rcu_qsbr_check_reader(void *arg)
1426 static int
1427 test_rcu_qsbr_check(void)
1428 {
1429- int i, ret;
1430+ int ret;
1431+ unsigned int i;
1432 uint64_t token;
1433
1434 printf("\nTest rte_rcu_qsbr_check()\n");
1435@@ -329,7 +334,7 @@ test_rcu_qsbr_check(void)
1436 ret = rte_rcu_qsbr_check(t[0], token, true);
1437 TEST_RCU_QSBR_RETURN_IF_ERROR((ret == 0), "Blocking QSBR check");
1438
1439- for (i = 0; i < 3; i++)
1440+ for (i = 0; i < num_cores; i++)
1441 rte_rcu_qsbr_thread_register(t[0], enabled_core_ids[i]);
1442
1443 ret = rte_rcu_qsbr_check(t[0], token, false);
1444@@ -344,7 +349,7 @@ test_rcu_qsbr_check(void)
1445 /* Threads are offline, hence this should pass */
1446 TEST_RCU_QSBR_RETURN_IF_ERROR((ret == 0), "Non-blocking QSBR check");
1447
1448- for (i = 0; i < 3; i++)
1449+ for (i = 0; i < num_cores; i++)
1450 rte_rcu_qsbr_thread_unregister(t[0], enabled_core_ids[i]);
1451
1452 ret = rte_rcu_qsbr_check(t[0], token, true);
1453@@ -352,7 +357,7 @@ test_rcu_qsbr_check(void)
1454
1455 rte_rcu_qsbr_init(t[0], RTE_MAX_LCORE);
1456
1457- for (i = 0; i < 4; i++)
1458+ for (i = 0; i < num_cores; i++)
1459 rte_rcu_qsbr_thread_register(t[0], enabled_core_ids[i]);
1460
1461 token = rte_rcu_qsbr_start(t[0]);
1462@@ -591,7 +596,7 @@ test_rcu_qsbr_thread_offline(void)
1463 static int
1464 test_rcu_qsbr_dump(void)
1465 {
1466- int i;
1467+ unsigned int i;
1468
1469 printf("\nTest rte_rcu_qsbr_dump()\n");
1470
1471@@ -608,7 +613,7 @@ test_rcu_qsbr_dump(void)
1472
1473 rte_rcu_qsbr_thread_register(t[0], enabled_core_ids[0]);
1474
1475- for (i = 1; i < 3; i++)
1476+ for (i = 1; i < num_cores; i++)
1477 rte_rcu_qsbr_thread_register(t[1], enabled_core_ids[i]);
1478
1479 rte_rcu_qsbr_dump(stdout, t[0]);
1480@@ -758,7 +763,7 @@ test_rcu_qsbr_sw_sv_3qs(void)
1481 {
1482 uint64_t token[3];
1483 uint32_t c;
1484- int i;
1485+ int i, num_readers;
1486 int32_t pos[3];
1487
1488 writer_done = 0;
1489@@ -781,7 +786,11 @@ test_rcu_qsbr_sw_sv_3qs(void)
1490 thread_info[0].ih = 0;
1491
1492 /* Reader threads are launched */
1493- for (i = 0; i < 4; i++)
1494+ /* Keep the number of reader threads low to reduce
1495+ * the execution time.
1496+ */
1497+ num_readers = num_cores < 4 ? num_cores : 4;
1498+ for (i = 0; i < num_readers; i++)
1499 rte_eal_remote_launch(test_rcu_qsbr_reader, &thread_info[0],
1500 enabled_core_ids[i]);
1501
1502@@ -814,7 +823,7 @@ test_rcu_qsbr_sw_sv_3qs(void)
1503
1504 /* Check the quiescent state status */
1505 rte_rcu_qsbr_check(t[0], token[0], true);
1506- for (i = 0; i < 4; i++) {
1507+ for (i = 0; i < num_readers; i++) {
1508 c = hash_data[0][0][enabled_core_ids[i]];
1509 if (c != COUNTER_VALUE && c != 0) {
1510 printf("Reader lcore %d did not complete #0 = %d\n",
1511@@ -832,7 +841,7 @@ test_rcu_qsbr_sw_sv_3qs(void)
1512
1513 /* Check the quiescent state status */
1514 rte_rcu_qsbr_check(t[0], token[1], true);
1515- for (i = 0; i < 4; i++) {
1516+ for (i = 0; i < num_readers; i++) {
1517 c = hash_data[0][3][enabled_core_ids[i]];
1518 if (c != COUNTER_VALUE && c != 0) {
1519 printf("Reader lcore %d did not complete #3 = %d\n",
1520@@ -850,7 +859,7 @@ test_rcu_qsbr_sw_sv_3qs(void)
1521
1522 /* Check the quiescent state status */
1523 rte_rcu_qsbr_check(t[0], token[2], true);
1524- for (i = 0; i < 4; i++) {
1525+ for (i = 0; i < num_readers; i++) {
1526 c = hash_data[0][6][enabled_core_ids[i]];
1527 if (c != COUNTER_VALUE && c != 0) {
1528 printf("Reader lcore %d did not complete #6 = %d\n",
1529@@ -869,7 +878,7 @@ test_rcu_qsbr_sw_sv_3qs(void)
1530 writer_done = 1;
1531
1532 /* Wait and check return value from reader threads */
1533- for (i = 0; i < 4; i++)
1534+ for (i = 0; i < num_readers; i++)
1535 if (rte_eal_wait_lcore(enabled_core_ids[i]) < 0)
1536 goto error;
1537 rte_hash_free(h[0]);
1538@@ -899,6 +908,12 @@ test_rcu_qsbr_mw_mv_mqs(void)
1539 unsigned int i, j;
1540 unsigned int test_cores;
1541
1542+ if (RTE_MAX_LCORE < 5 || num_cores < 4) {
1543+ printf("Not enough cores for %s, expecting at least 5\n",
1544+ __func__);
1545+ return TEST_SKIPPED;
1546+ }
1547+
1548 writer_done = 0;
1549 test_cores = num_cores / 4;
1550 test_cores = test_cores * 4;
1551@@ -984,11 +999,6 @@ test_rcu_qsbr_main(void)
1552 {
1553 uint16_t core_id;
1554
1555- if (rte_lcore_count() < 5) {
1556- printf("Not enough cores for rcu_qsbr_autotest, expecting at least 5\n");
1557- return TEST_SKIPPED;
1558- }
1559-
1560 num_cores = 0;
1561 RTE_LCORE_FOREACH_SLAVE(core_id) {
1562 enabled_core_ids[num_cores] = core_id;
1563diff --git a/app/test/test_ring.c b/app/test/test_ring.c
1564index aaf1e70..4825c9e 100644
1565--- a/app/test/test_ring.c
1566+++ b/app/test/test_ring.c
1567@@ -696,7 +696,7 @@ test_ring_basic_ex(void)
1568
1569 printf("%u ring entries are now free\n", rte_ring_free_count(rp));
1570
1571- for (i = 0; i < RING_SIZE; i ++) {
1572+ for (i = 0; i < RING_SIZE - 1; i ++) {
1573 rte_ring_enqueue(rp, obj[i]);
1574 }
1575
1576@@ -705,7 +705,7 @@ test_ring_basic_ex(void)
1577 goto fail_test;
1578 }
1579
1580- for (i = 0; i < RING_SIZE; i ++) {
1581+ for (i = 0; i < RING_SIZE - 1; i ++) {
1582 rte_ring_dequeue(rp, &obj[i]);
1583 }
1584
1585diff --git a/buildtools/pkg-config/meson.build b/buildtools/pkg-config/meson.build
1586new file mode 100644
1587index 0000000..5f19304
1588--- /dev/null
1589+++ b/buildtools/pkg-config/meson.build
1590@@ -0,0 +1,55 @@
1591+# SPDX-License-Identifier: BSD-3-Clause
1592+# Copyright(c) 2020 Intel Corporation
1593+
1594+pkg = import('pkgconfig')
1595+pkg_extra_cflags = ['-include', 'rte_config.h'] + machine_args
1596+if is_freebsd
1597+ pkg_extra_cflags += ['-D__BSD_VISIBLE']
1598+endif
1599+
1600+# When calling pkg-config --static --libs, pkg-config will always output the
1601+# regular libs first, and then the extra libs from Libs.private field,
1602+# since the assumption is that those are additional dependencies for building
1603+# statically that the .a files depend upon. The output order of .pc fields is:
1604+# Libs Libs.private Requires Requires.private
1605+# The fields Requires* are for package names.
1606+# The flags of the DPDK libraries must be defined in Libs* fields.
1607+# However, the DPDK drivers are linked only in static builds (Libs.private),
1608+# and those need to come *before* the regular libraries (Libs field).
1609+# This requirement is satisfied by moving the regular libs in a separate file
1610+# included in the field Requires (after Libs.private).
1611+# Another requirement is to allow linking dependencies as shared libraries,
1612+# while linking static DPDK libraries and drivers. It is satisfied by
1613+# listing the static files in Libs.private with the explicit syntax -l:libfoo.a.
1614+# As a consequence, the regular DPDK libraries are already listed as static
1615+# in the field Libs.private. The second occurences of DPDK libraries,
1616+# included from Requires and used for shared library linkage case,
1617+# are skipped in the case of static linkage thanks to the flag --as-needed.
1618+
1619+
1620+pkg.generate(name: 'dpdk-libs',
1621+ filebase: 'libdpdk-libs',
1622+ description: '''Internal-only DPDK pkgconfig file. Not for direct use.
1623+Use libdpdk.pc instead of this file to query DPDK compile/link arguments''',
1624+ version: meson.project_version(),
1625+ subdirs: [get_option('include_subdir_arch'), '.'],
1626+ extra_cflags: pkg_extra_cflags,
1627+ libraries: ['-Wl,--as-needed'] + dpdk_libraries,
1628+ libraries_private: dpdk_extra_ldflags)
1629+
1630+pkg.generate(name: 'DPDK', # main DPDK pkgconfig file
1631+ filebase: 'libdpdk',
1632+ version: meson.project_version(),
1633+ description: '''The Data Plane Development Kit (DPDK).
1634+Note that CFLAGS might contain an -march flag higher than typical baseline.
1635+This is required for a number of static inline functions in the public headers.''',
1636+ requires: ['libdpdk-libs', libbsd], # may need libbsd for string funcs
1637+ # if libbsd is not enabled, then this is blank
1638+ libraries_private: ['-Wl,--whole-archive'] +
1639+ dpdk_drivers + dpdk_static_libraries +
1640+ ['-Wl,--no-whole-archive']
1641+)
1642+
1643+# For static linking with dependencies as shared libraries,
1644+# the internal static libraries must be flagged explicitly.
1645+run_command(py3, 'set-static-linker-flags.py', check: true)
1646diff --git a/buildtools/pkg-config/set-static-linker-flags.py b/buildtools/pkg-config/set-static-linker-flags.py
1647new file mode 100644
1648index 0000000..2745db3
1649--- /dev/null
1650+++ b/buildtools/pkg-config/set-static-linker-flags.py
1651@@ -0,0 +1,38 @@
1652+#!/usr/bin/env python3
1653+# SPDX-License-Identifier: BSD-3-Clause
1654+# Copyright(c) 2020 Intel Corporation
1655+
1656+# Script to fix flags for static linking in pkgconfig files from meson
1657+# Should be called from meson build itself
1658+import os
1659+import sys
1660+
1661+
1662+def fix_ldflag(f):
1663+ if not f.startswith('-lrte_'):
1664+ return f
1665+ return '-l:lib' + f[2:] + '.a'
1666+
1667+
1668+def fix_libs_private(line):
1669+ if not line.startswith('Libs.private'):
1670+ return line
1671+ ldflags = [fix_ldflag(flag) for flag in line.split()]
1672+ return ' '.join(ldflags) + '\n'
1673+
1674+
1675+def process_pc_file(filepath):
1676+ print('Processing', filepath)
1677+ with open(filepath) as src:
1678+ lines = src.readlines()
1679+ with open(filepath, 'w') as dst:
1680+ dst.writelines([fix_libs_private(line) for line in lines])
1681+
1682+
1683+if 'MESON_BUILD_ROOT' not in os.environ:
1684+ print('This script must be called from a meson build environment')
1685+ sys.exit(1)
1686+for root, dirs, files in os.walk(os.environ['MESON_BUILD_ROOT']):
1687+ pc_files = [f for f in files if f.endswith('.pc')]
1688+ for f in pc_files:
1689+ process_pc_file(os.path.join(root, f))
1690diff --git a/buildtools/pmdinfogen/pmdinfogen.h b/buildtools/pmdinfogen/pmdinfogen.h
1691index c8a9e21..467216d 100644
1692--- a/buildtools/pmdinfogen/pmdinfogen.h
1693+++ b/buildtools/pmdinfogen/pmdinfogen.h
1694@@ -82,7 +82,7 @@ if ((fend) == ELFDATA2LSB) \
1695 ___x = le##width##toh(x); \
1696 else \
1697 ___x = be##width##toh(x); \
1698- ___x; \
1699+___x; \
1700 })
1701
1702 #define TO_NATIVE(fend, width, x) CONVERT_NATIVE(fend, width, x)
1703diff --git a/config/defconfig_arm64-graviton2-linux-gcc b/config/defconfig_arm64-graviton2-linux-gcc
1704new file mode 120000
1705index 0000000..80ac94d
1706--- /dev/null
1707+++ b/config/defconfig_arm64-graviton2-linux-gcc
1708@@ -0,0 +1 @@
1709+defconfig_arm64-graviton2-linuxapp-gcc
1710\ No newline at end of file
1711diff --git a/config/defconfig_arm64-graviton2-linuxapp-gcc b/config/defconfig_arm64-graviton2-linuxapp-gcc
1712new file mode 100644
1713index 0000000..e99fef3
1714--- /dev/null
1715+++ b/config/defconfig_arm64-graviton2-linuxapp-gcc
1716@@ -0,0 +1,13 @@
1717+# SPDX-License-Identifier: BSD-3-Clause
1718+# Copyright(c) Amazon.com, Inc or its affiliates
1719+#
1720+
1721+#include "defconfig_arm64-armv8a-linux-gcc"
1722+
1723+CONFIG_RTE_MACHINE="graviton2"
1724+CONFIG_RTE_MAX_LCORE=64
1725+CONFIG_RTE_CACHE_LINE_SIZE=64
1726+CONFIG_RTE_MAX_MEM_MB=1048576
1727+CONFIG_RTE_MAX_NUMA_NODES=1
1728+CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES=n
1729+CONFIG_RTE_LIBRTE_VHOST_NUMA=n
1730diff --git a/config/defconfig_graviton2 b/config/defconfig_graviton2
1731new file mode 120000
1732index 0000000..80ac94d
1733--- /dev/null
1734+++ b/config/defconfig_graviton2
1735@@ -0,0 +1 @@
1736+defconfig_arm64-graviton2-linuxapp-gcc
1737\ No newline at end of file
1738diff --git a/config/meson.build b/config/meson.build
1739index 0a5e5b1..bab00f1 100644
1740--- a/config/meson.build
1741+++ b/config/meson.build
1742@@ -54,9 +54,11 @@ eal_pmd_path = join_paths(get_option('prefix'), driver_install_path)
1743 # driver .so files often depend upon the bus drivers for their connect bus,
1744 # e.g. ixgbe depends on librte_bus_pci. This means that the bus drivers need
1745 # to be in the library path, so symlink the drivers from the main lib directory.
1746-meson.add_install_script('../buildtools/symlink-drivers-solibs.sh',
1747- get_option('libdir'),
1748- pmd_subdir_opt)
1749+if not is_windows
1750+ meson.add_install_script('../buildtools/symlink-drivers-solibs.sh',
1751+ get_option('libdir'),
1752+ pmd_subdir_opt)
1753+endif
1754
1755 # set the machine type and cflags for it
1756 if meson.is_cross_build()
1757@@ -160,11 +162,9 @@ if libbsd.found()
1758 endif
1759
1760 # check for pcap
1761-pcap_dep = dependency('pcap', required: false)
1762-if pcap_dep.found()
1763- # pcap got a pkg-config file only in 1.9.0 and before that meson uses
1764- # an internal pcap-config finder, which is not compatible with
1765- # cross-compilation, so try to fallback to find_library
1766+pcap_dep = dependency('libpcap', required: false, method: 'pkg-config')
1767+if not pcap_dep.found()
1768+ # pcap got a pkg-config file only in 1.9.0
1769 pcap_dep = cc.find_library('pcap', required: false)
1770 endif
1771 if pcap_dep.found() and cc.has_header('pcap.h', dependencies: pcap_dep)
1772@@ -183,6 +183,7 @@ warning_flags = [
1773 # additional warnings in alphabetical order
1774 '-Wcast-qual',
1775 '-Wdeprecated',
1776+ '-Wformat',
1777 '-Wformat-nonliteral',
1778 '-Wformat-security',
1779 '-Wmissing-declarations',
1780diff --git a/config/rte_config.h b/config/rte_config.h
1781index 765251a..0f9dae3 100644
1782--- a/config/rte_config.h
1783+++ b/config/rte_config.h
1784@@ -99,6 +99,9 @@
1785
1786 /****** driver defines ********/
1787
1788+/* Packet prefetching in PMDs */
1789+#define RTE_PMD_PACKET_PREFETCH 1
1790+
1791 /* QuickAssist device */
1792 /* Max. number of QuickAssist devices which can be attached */
1793 #define RTE_PMD_QAT_MAX_PCI_DEVICES 48
1794diff --git a/debian/changelog b/debian/changelog
1795index 411529a..f6a0a12 100644
1796--- a/debian/changelog
1797+++ b/debian/changelog
1798@@ -1,3 +1,11 @@
1799+dpdk (19.11.6-0ubuntu0.20.10.1) groovy; urgency=medium
1800+
1801+ * Merge latest upstream stable minor release 19.11.6 (LP: #1912464)
1802+ For a detailed list of changes check out
1803+ https://doc.dpdk.org/guides-19.11/rel_notes/release_19_11.html#id14
1804+
1805+ -- Christian Ehrhardt <christian.ehrhardt@canonical.com> Wed, 20 Jan 2021 09:37:12 +0100
1806+
1807 dpdk (19.11.5-1) unstable; urgency=medium
1808
1809 * New upstream version 19.11.5
1810diff --git a/debian/control b/debian/control
1811index e0439ff..c1a845e 100644
1812--- a/debian/control
1813+++ b/debian/control
1814@@ -1,6 +1,7 @@
1815 Source: dpdk
1816 Priority: optional
1817-Maintainer: Debian DPDK Maintainers <pkg-dpdk-devel@lists.alioth.debian.org>
1818+Maintainer: Ubuntu Developers <ubuntu-devel-discuss@lists.ubuntu.com>
1819+XSBC-Original-Maintainer: Debian DPDK Maintainers <pkg-dpdk-devel@lists.alioth.debian.org>
1820 Uploaders: Luca Boccassi <bluca@debian.org>,
1821 Christian Ehrhardt <christian.ehrhardt@canonical.com>,
1822 Santiago Ruano Rincón <santiagorr@riseup.net>,
1823diff --git a/devtools/check-forbidden-tokens.awk b/devtools/check-forbidden-tokens.awk
1824index f86cbe8..61ba707 100755
1825--- a/devtools/check-forbidden-tokens.awk
1826+++ b/devtools/check-forbidden-tokens.awk
1827@@ -54,7 +54,7 @@ BEGIN {
1828 }
1829 for (i in deny_folders) {
1830 re = "^\\+\\+\\+ b/" deny_folders[i];
1831- if ($0 ~ deny_folders[i]) {
1832+ if ($0 ~ re) {
1833 in_file = 1
1834 last_file = $0
1835 }
1836diff --git a/devtools/test-meson-builds.sh b/devtools/test-meson-builds.sh
1837index 6885677..8678a3d 100755
1838--- a/devtools/test-meson-builds.sh
1839+++ b/devtools/test-meson-builds.sh
1840@@ -38,20 +38,21 @@ else
1841 fi
1842
1843 default_path=$PATH
1844-default_pkgpath=$PKG_CONFIG_PATH
1845 default_cppflags=$CPPFLAGS
1846 default_cflags=$CFLAGS
1847 default_ldflags=$LDFLAGS
1848+default_meson_options=$DPDK_MESON_OPTIONS
1849
1850 load_env () # <target compiler>
1851 {
1852 targetcc=$1
1853+ # reset variables before target-specific config
1854 export PATH=$default_path
1855- export PKG_CONFIG_PATH=$default_pkgpath
1856+ unset PKG_CONFIG_PATH # global default makes no sense
1857 export CPPFLAGS=$default_cppflags
1858 export CFLAGS=$default_cflags
1859 export LDFLAGS=$default_ldflags
1860- unset DPDK_MESON_OPTIONS
1861+ export DPDK_MESON_OPTIONS=$default_meson_options
1862 command -v $targetcc >/dev/null 2>&1 || return 1
1863 DPDK_TARGET=$($targetcc -v 2>&1 | sed -n 's,^Target: ,,p')
1864 . $srcdir/devtools/load-devel-config
1865@@ -134,19 +135,17 @@ done
1866
1867 # Test installation of the x86-default target, to be used for checking
1868 # the sample apps build using the pkg-config file for cflags and libs
1869+load_env cc
1870 build_path=$(readlink -f $builds_dir/build-x86-default)
1871 export DESTDIR=$build_path/install-root
1872 $ninja_cmd -C $build_path install
1873-
1874-load_env cc
1875 pc_file=$(find $DESTDIR -name libdpdk.pc)
1876 export PKG_CONFIG_PATH=$(dirname $pc_file):$PKG_CONFIG_PATH
1877-
1878 # if pkg-config defines the necessary flags, test building some examples
1879 if pkg-config --define-prefix libdpdk >/dev/null 2>&1; then
1880 export PKGCONF="pkg-config --define-prefix"
1881 for example in cmdline helloworld l2fwd l3fwd skeleton timer; do
1882 echo "## Building $example"
1883- $MAKE -C $DESTDIR/usr/local/share/dpdk/examples/$example clean all
1884+ $MAKE -C $DESTDIR/usr/local/share/dpdk/examples/$example clean shared static
1885 done
1886 fi
1887diff --git a/doc/build-sdk-meson.txt b/doc/build-sdk-meson.txt
1888index fc7fe37..8fb60a7 100644
1889--- a/doc/build-sdk-meson.txt
1890+++ b/doc/build-sdk-meson.txt
1891@@ -1,3 +1,6 @@
1892+.. SPDX-License-Identifier: BSD-3-Clause
1893+ Copyright(c) 2018 Intel Corporation.
1894+
1895 INSTALLING DPDK USING THE MESON BUILD SYSTEM
1896 ---------------------------------------------
1897
1898@@ -94,14 +97,17 @@ Examples of setting the same options using meson configure::
1899
1900 meson configure -Dmax_lcores=8
1901
1902-NOTE: once meson has been run to configure a build in a directory, it
1903-cannot be run again on the same directory. Instead ``meson configure``
1904-should be used to change the build settings within the directory, and when
1905-``ninja`` is called to do the build itself, it will trigger the necessary
1906-re-scan from meson.
1907+.. note::
1908+
1909+ once meson has been run to configure a build in a directory, it
1910+ cannot be run again on the same directory. Instead ``meson configure``
1911+ should be used to change the build settings within the directory, and when
1912+ ``ninja`` is called to do the build itself, it will trigger the necessary
1913+ re-scan from meson.
1914
1915-NOTE: machine=default uses a config that works on all supported architectures
1916-regardless of the capabilities of the machine where the build is happening.
1917+.. note::
1918+ machine=default uses a config that works on all supported architectures
1919+ regardless of the capabilities of the machine where the build is happening.
1920
1921 As well as those settings taken from ``meson configure``, other options
1922 such as the compiler to use can be passed via environment variables. For
1923@@ -109,9 +115,11 @@ example::
1924
1925 CC=clang meson clang-build
1926
1927-NOTE: for more comprehensive overriding of compilers or other environment
1928-settings, the tools for cross-compilation may be considered. However, for
1929-basic overriding of the compiler etc., the above form works as expected.
1930+.. note::
1931+
1932+ for more comprehensive overriding of compilers or other environment
1933+ settings, the tools for cross-compilation may be considered. However, for
1934+ basic overriding of the compiler etc., the above form works as expected.
1935
1936
1937 Performing the Build
1938@@ -182,7 +190,7 @@ From examples/helloworld/Makefile::
1939 PC_FILE := $(shell pkg-config --path libdpdk)
1940 CFLAGS += -O3 $(shell pkg-config --cflags libdpdk)
1941 LDFLAGS_SHARED = $(shell pkg-config --libs libdpdk)
1942- LDFLAGS_STATIC = -Wl,-Bstatic $(shell pkg-config --static --libs libdpdk)
1943+ LDFLAGS_STATIC = $(shell pkg-config --static --libs libdpdk)
1944
1945 build/$(APP)-shared: $(SRCS-y) Makefile $(PC_FILE) | build
1946 $(CC) $(CFLAGS) $(SRCS-y) -o $@ $(LDFLAGS) $(LDFLAGS_SHARED)
1947diff --git a/doc/guides/cryptodevs/features/octeontx.ini b/doc/guides/cryptodevs/features/octeontx.ini
1948index 1c036c5..629e6bf 100644
1949--- a/doc/guides/cryptodevs/features/octeontx.ini
1950+++ b/doc/guides/cryptodevs/features/octeontx.ini
1951@@ -11,6 +11,7 @@ HW Accelerated = Y
1952 In Place SGL = Y
1953 OOP SGL In LB Out = Y
1954 OOP SGL In SGL Out = Y
1955+OOP LB In LB Out = Y
1956 RSA PRIV OP KEY QT = Y
1957
1958 ;
1959diff --git a/doc/guides/cryptodevs/features/octeontx2.ini b/doc/guides/cryptodevs/features/octeontx2.ini
1960index 7d07053..6a20587 100644
1961--- a/doc/guides/cryptodevs/features/octeontx2.ini
1962+++ b/doc/guides/cryptodevs/features/octeontx2.ini
1963@@ -11,6 +11,7 @@ HW Accelerated = Y
1964 In Place SGL = Y
1965 OOP SGL In LB Out = Y
1966 OOP SGL In SGL Out = Y
1967+OOP LB In LB Out = Y
1968 RSA PRIV OP KEY QT = Y
1969
1970 ;
1971diff --git a/doc/guides/linux_gsg/build_sample_apps.rst b/doc/guides/linux_gsg/build_sample_apps.rst
1972index 2f60653..2c2f5fa 100644
1973--- a/doc/guides/linux_gsg/build_sample_apps.rst
1974+++ b/doc/guides/linux_gsg/build_sample_apps.rst
1975@@ -4,7 +4,7 @@
1976 Compiling and Running Sample Applications
1977 =========================================
1978
1979-The chapter describes how to compile and run applications in an DPDK environment.
1980+The chapter describes how to compile and run applications in a DPDK environment.
1981 It also provides a pointer to where sample applications are stored.
1982
1983 .. note::
1984@@ -185,7 +185,7 @@ Each bit of the mask corresponds to the equivalent logical core number as report
1985 Since these logical core numbers, and their mapping to specific cores on specific NUMA sockets, can vary from platform to platform,
1986 it is recommended that the core layout for each platform be considered when choosing the coremask/corelist to use in each case.
1987
1988-On initialization of the EAL layer by an DPDK application, the logical cores to be used and their socket location are displayed.
1989+On initialization of the EAL layer by a DPDK application, the logical cores to be used and their socket location are displayed.
1990 This information can also be determined for all cores on the system by examining the ``/proc/cpuinfo`` file, for example, by running cat ``/proc/cpuinfo``.
1991 The physical id attribute listed for each processor indicates the CPU socket to which it belongs.
1992 This can be useful when using other processors to understand the mapping of the logical cores to the sockets.
1993diff --git a/doc/guides/linux_gsg/enable_func.rst b/doc/guides/linux_gsg/enable_func.rst
1994index b2bda80..459a952 100644
1995--- a/doc/guides/linux_gsg/enable_func.rst
1996+++ b/doc/guides/linux_gsg/enable_func.rst
1997@@ -58,22 +58,51 @@ The application can then determine what action to take, if any, if the HPET is n
1998 if any, and on what is available on the system at runtime.
1999
2000 Running DPDK Applications Without Root Privileges
2001---------------------------------------------------------
2002+-------------------------------------------------
2003
2004-.. note::
2005+In order to run DPDK as non-root, the following Linux filesystem objects'
2006+permissions should be adjusted to ensure that the Linux account being used to
2007+run the DPDK application has access to them:
2008+
2009+* All directories which serve as hugepage mount points, for example, ``/dev/hugepages``
2010+
2011+* If the HPET is to be used, ``/dev/hpet``
2012+
2013+When running as non-root user, there may be some additional resource limits
2014+that are imposed by the system. Specifically, the following resource limits may
2015+need to be adjusted in order to ensure normal DPDK operation:
2016+
2017+* RLIMIT_LOCKS (number of file locks that can be held by a process)
2018+
2019+* RLIMIT_NOFILE (number of open file descriptors that can be held open by a process)
2020+
2021+* RLIMIT_MEMLOCK (amount of pinned pages the process is allowed to have)
2022+
2023+The above limits can usually be adjusted by editing
2024+``/etc/security/limits.conf`` file, and rebooting.
2025
2026- The instructions below will allow running DPDK as non-root with older
2027- Linux kernel versions. However, since version 4.0, the kernel does not allow
2028- unprivileged processes to read the physical address information from
2029- the pagemaps file, making it impossible for those processes to use HW
2030- devices which require physical addresses
2031+Additionally, depending on which kernel driver is in use, the relevant
2032+resources also should be accessible by the user running the DPDK application.
2033
2034-Although applications using the DPDK use network ports and other hardware resources directly,
2035-with a number of small permission adjustments it is possible to run these applications as a user other than "root".
2036-To do so, the ownership, or permissions, on the following Linux file system objects should be adjusted to ensure that
2037-the Linux user account being used to run the DPDK application has access to them:
2038+For ``vfio-pci`` kernel driver, the following Linux file system objects'
2039+permissions should be adjusted:
2040
2041-* All directories which serve as hugepage mount points, for example, ``/mnt/huge``
2042+* The VFIO device file, ``/dev/vfio/vfio``
2043+
2044+* The directories under ``/dev/vfio`` that correspond to IOMMU group numbers of
2045+ devices intended to be used by DPDK, for example, ``/dev/vfio/50``
2046+
2047+.. note::
2048+
2049+ The instructions below will allow running DPDK with ``igb_uio`` or
2050+ ``uio_pci_generic`` drivers as non-root with older Linux kernel versions.
2051+ However, since version 4.0, the kernel does not allow unprivileged processes
2052+ to read the physical address information from the pagemaps file, making it
2053+ impossible for those processes to be used by non-privileged users. In such
2054+ cases, using the VFIO driver is recommended.
2055+
2056+For ``igb_uio`` or ``uio_pci_generic`` kernel drivers, the following Linux file
2057+system objects' permissions should be adjusted:
2058
2059 * The userspace-io device files in ``/dev``, for example, ``/dev/uio0``, ``/dev/uio1``, and so on
2060
2061@@ -82,11 +111,6 @@ the Linux user account being used to run the DPDK application has access to them
2062 /sys/class/uio/uio0/device/config
2063 /sys/class/uio/uio0/device/resource*
2064
2065-* If the HPET is to be used, ``/dev/hpet``
2066-
2067-.. note::
2068-
2069- On some Linux installations, ``/dev/hugepages`` is also a hugepage mount point created by default.
2070
2071 Power Management and Power Saving Functionality
2072 -----------------------------------------------
2073@@ -112,7 +136,7 @@ In addition, C3 and C6 should be enabled as well for power management. The path
2074 Using Linux Core Isolation to Reduce Context Switches
2075 -----------------------------------------------------
2076
2077-While the threads used by an DPDK application are pinned to logical cores on the system,
2078+While the threads used by a DPDK application are pinned to logical cores on the system,
2079 it is possible for the Linux scheduler to run other tasks on those cores also.
2080 To help prevent additional workloads from running on those cores,
2081 it is possible to use the ``isolcpus`` Linux kernel parameter to isolate them from the general Linux scheduler.
2082diff --git a/doc/guides/linux_gsg/linux_drivers.rst b/doc/guides/linux_gsg/linux_drivers.rst
2083index e0816f9..96817e7 100644
2084--- a/doc/guides/linux_gsg/linux_drivers.rst
2085+++ b/doc/guides/linux_gsg/linux_drivers.rst
2086@@ -120,7 +120,7 @@ Binding and Unbinding Network Ports to/from the Kernel Modules
2087 PMDs Which use the bifurcated driver should not be unbind from their kernel drivers. this section is for PMDs which use the UIO or VFIO drivers.
2088
2089 As of release 1.4, DPDK applications no longer automatically unbind all supported network ports from the kernel driver in use.
2090-Instead, in case the PMD being used use the UIO or VFIO drivers, all ports that are to be used by an DPDK application must be bound to the
2091+Instead, in case the PMD being used use the UIO or VFIO drivers, all ports that are to be used by a DPDK application must be bound to the
2092 ``uio_pci_generic``, ``igb_uio`` or ``vfio-pci`` module before the application is run.
2093 For such PMDs, any network ports under Linux* control will be ignored and cannot be used by the application.
2094
2095diff --git a/doc/guides/linux_gsg/nic_perf_intel_platform.rst b/doc/guides/linux_gsg/nic_perf_intel_platform.rst
2096index 1dabbce..b70a151 100644
2097--- a/doc/guides/linux_gsg/nic_perf_intel_platform.rst
2098+++ b/doc/guides/linux_gsg/nic_perf_intel_platform.rst
2099@@ -1,3 +1,6 @@
2100+.. SPDX-License-Identifier: BSD-3-Clause
2101+ Copyright(c) 2015 Intel Corporation.
2102+
2103 How to get best performance with NICs on Intel platforms
2104 ========================================================
2105
2106diff --git a/doc/guides/linux_gsg/sys_reqs.rst b/doc/guides/linux_gsg/sys_reqs.rst
2107index 7c47ec0..0af0b22 100644
2108--- a/doc/guides/linux_gsg/sys_reqs.rst
2109+++ b/doc/guides/linux_gsg/sys_reqs.rst
2110@@ -107,7 +107,7 @@ e.g. :doc:`../nics/index`
2111 Running DPDK Applications
2112 -------------------------
2113
2114-To run an DPDK application, some customization may be required on the target machine.
2115+To run a DPDK application, some customization may be required on the target machine.
2116
2117 System Software
2118 ~~~~~~~~~~~~~~~
2119@@ -157,8 +157,36 @@ Without hugepages, high TLB miss rates would occur with the standard 4k page siz
2120 Reserving Hugepages for DPDK Use
2121 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
2122
2123-The allocation of hugepages should be done at boot time or as soon as possible after system boot
2124-to prevent memory from being fragmented in physical memory.
2125+The reservation of hugepages can be performed at run time.
2126+This is done by echoing the number of hugepages required
2127+to a ``nr_hugepages`` file in the ``/sys/kernel/`` directory
2128+corresponding to a specific page size (in Kilobytes).
2129+For a single-node system, the command to use is as follows
2130+(assuming that 1024 of 2MB pages are required)::
2131+
2132+ echo 1024 > /sys/kernel/mm/hugepages/hugepages-2048kB/nr_hugepages
2133+
2134+On a NUMA machine, the above command will usually divide the number of hugepages
2135+equally across all NUMA nodes (assuming there is enough memory on all NUMA nodes).
2136+However, pages can also be reserved explicitly on individual NUMA nodes
2137+using a ``nr_hugepages`` file in the ``/sys/devices/`` directory::
2138+
2139+ echo 1024 > /sys/devices/system/node/node0/hugepages/hugepages-2048kB/nr_hugepages
2140+ echo 1024 > /sys/devices/system/node/node1/hugepages/hugepages-2048kB/nr_hugepages
2141+
2142+.. note::
2143+
2144+ Some kernel versions may not allow reserving 1 GB hugepages at run time,
2145+ so reserving them at boot time may be the only option.
2146+ Please see below for instructions.
2147+
2148+**Alternative:**
2149+
2150+In the general case, reserving hugepages at run time is perfectly fine,
2151+but in use cases where having lots of physically contiguous memory is required,
2152+it is preferable to reserve hugepages at boot time,
2153+as that will help in preventing physical memory from becoming heavily fragmented.
2154+
2155 To reserve hugepages at boot time, a parameter is passed to the Linux kernel on the kernel command line.
2156
2157 For 2 MB pages, just pass the hugepages option to the kernel. For example, to reserve 1024 pages of 2 MB, use::
2158@@ -187,35 +215,29 @@ the number of hugepages reserved at boot time is generally divided equally betwe
2159
2160 See the Documentation/admin-guide/kernel-parameters.txt file in your Linux source tree for further details of these and other kernel options.
2161
2162-**Alternative:**
2163-
2164-For 2 MB pages, there is also the option of allocating hugepages after the system has booted.
2165-This is done by echoing the number of hugepages required to a nr_hugepages file in the ``/sys/devices/`` directory.
2166-For a single-node system, the command to use is as follows (assuming that 1024 pages are required)::
2167-
2168- echo 1024 > /sys/kernel/mm/hugepages/hugepages-2048kB/nr_hugepages
2169-
2170-On a NUMA machine, pages should be allocated explicitly on separate nodes::
2171-
2172- echo 1024 > /sys/devices/system/node/node0/hugepages/hugepages-2048kB/nr_hugepages
2173- echo 1024 > /sys/devices/system/node/node1/hugepages/hugepages-2048kB/nr_hugepages
2174+Using Hugepages with the DPDK
2175+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
2176
2177-.. note::
2178+If secondary process support is not required, DPDK is able to use hugepages
2179+without any configuration by using "in-memory" mode.
2180+Please see :doc:`linux_eal_parameters` for more details.
2181
2182- For 1G pages, it is not possible to reserve the hugepage memory after the system has booted.
2183+If secondary process support is required,
2184+mount points for hugepages need to be created.
2185+On modern Linux distributions, a default mount point for hugepages
2186+is provided by the system and is located at ``/dev/hugepages``.
2187+This mount point will use the default hugepage size
2188+set by the kernel parameters as described above.
2189
2190-Using Hugepages with the DPDK
2191-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
2192+However, in order to use hugepage sizes other than the default, it is necessary
2193+to manually create mount points for those hugepage sizes (e.g. 1GB pages).
2194
2195-Once the hugepage memory is reserved, to make the memory available for DPDK use, perform the following steps::
2196+To make the hugepages of size 1GB available for DPDK use,
2197+following steps must be performed::
2198
2199 mkdir /mnt/huge
2200- mount -t hugetlbfs nodev /mnt/huge
2201+ mount -t hugetlbfs pagesize=1GB /mnt/huge
2202
2203 The mount point can be made permanent across reboots, by adding the following line to the ``/etc/fstab`` file::
2204
2205- nodev /mnt/huge hugetlbfs defaults 0 0
2206-
2207-For 1GB pages, the page size must be specified as a mount option::
2208-
2209- nodev /mnt/huge_1GB hugetlbfs pagesize=1GB 0 0
2210+ nodev /mnt/huge hugetlbfs pagesize=1GB 0 0
2211diff --git a/doc/guides/nics/dpaa2.rst b/doc/guides/nics/dpaa2.rst
2212index fdfa6fd..e54a5ff 100644
2213--- a/doc/guides/nics/dpaa2.rst
2214+++ b/doc/guides/nics/dpaa2.rst
2215@@ -1,5 +1,5 @@
2216 .. SPDX-License-Identifier: BSD-3-Clause
2217- Copyright 2016 NXP
2218+ Copyright 2016,2020 NXP
2219
2220
2221 DPAA2 Poll Mode Driver
2222@@ -300,7 +300,7 @@ The diagram below shows the dpaa2 drivers involved in a networking
2223 scenario and the objects bound to each driver. A brief description
2224 of each driver follows.
2225
2226-.. code-block: console
2227+.. code-block:: console
2228
2229
2230 +------------+
2231diff --git a/doc/guides/nics/features/iavf.ini b/doc/guides/nics/features/iavf.ini
2232index f08392a..8014305 100644
2233--- a/doc/guides/nics/features/iavf.ini
2234+++ b/doc/guides/nics/features/iavf.ini
2235@@ -15,6 +15,7 @@ TSO = Y
2236 Promiscuous mode = Y
2237 Allmulticast mode = Y
2238 Unicast MAC filter = Y
2239+Multicast MAC filter = Y
2240 RSS hash = Y
2241 RSS key update = Y
2242 RSS reta update = Y
2243diff --git a/doc/guides/nics/i40e.rst b/doc/guides/nics/i40e.rst
2244index 61d72c2..cfeb156 100644
2245--- a/doc/guides/nics/i40e.rst
2246+++ b/doc/guides/nics/i40e.rst
2247@@ -582,6 +582,15 @@ When a packet is over maximum frame size, the packet is dropped.
2248 However, the Rx statistics, when calling `rte_eth_stats_get` incorrectly
2249 shows it as received.
2250
2251+RX/TX statistics may be incorrect when register overflowed
2252+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
2253+
2254+The rx_bytes/tx_bytes statistics register is 48 bit length.
2255+Although this limitation is enlarged to 64 bit length on the software side,
2256+but there is no way to detect if the overflow occurred more than once.
2257+So rx_bytes/tx_bytes statistics data is correct when statistics are
2258+updated at least once between two overflows.
2259+
2260 VF & TC max bandwidth setting
2261 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
2262
2263diff --git a/doc/guides/nics/nfp.rst b/doc/guides/nics/nfp.rst
2264index 5f2a069..020e37d 100644
2265--- a/doc/guides/nics/nfp.rst
2266+++ b/doc/guides/nics/nfp.rst
2267@@ -102,22 +102,39 @@ directory per firmware application. Options 1 and 2 for firmware filenames allow
2268 more than one SmartNIC, same type of SmartNIC or different ones, and to upload a
2269 different firmware to each SmartNIC.
2270
2271+ .. Note::
2272+ Currently the NFP PMD supports using the PF with Agilio Basic Firmware. See
2273+ https://help.netronome.com/support/solutions for more information on the
2274+ various firmwares supported by the Netronome Agilio CX smartNIC.
2275
2276 PF multiport support
2277 --------------------
2278
2279-Some NFP cards support several physical ports with just one single PCI device.
2280-The DPDK core is designed with a 1:1 relationship between PCI devices and DPDK
2281-ports, so NFP PMD PF support requires handling the multiport case specifically.
2282-During NFP PF initialization, the PMD will extract the information about the
2283-number of PF ports from the firmware and will create as many DPDK ports as
2284-needed.
2285+The NFP PMD can work with up to 8 ports on the same PF device. The number of
2286+available ports is firmware and hardware dependent, and the driver looks for a
2287+firmware symbol during initialization to know how many can be used.
2288
2289-Because the unusual relationship between a single PCI device and several DPDK
2290-ports, there are some limitations when using more than one PF DPDK port: there
2291-is no support for RX interrupts and it is not possible either to use those PF
2292-ports with the device hotplug functionality.
2293+DPDK apps work with ports, and a port is usually a PF or a VF PCI device.
2294+However, with the NFP PF multiport there is just one PF PCI device. Supporting
2295+this particular configuration requires the PMD to create ports in a special way,
2296+although once they are created, DPDK apps should be able to use them as normal
2297+PCI ports.
2298
2299+NFP ports belonging to same PF can be seen inside PMD initialization with a
2300+suffix added to the PCI ID: wwww:xx:yy.z_port_n. For example, a PF with PCI ID
2301+0000:03:00.0 and four ports is seen by the PMD code as:
2302+
2303+ .. code-block:: console
2304+
2305+ 0000:03:00.0_port_0
2306+ 0000:03:00.0_port_1
2307+ 0000:03:00.0_port_2
2308+ 0000:03:00.0_port_3
2309+
2310+ .. Note::
2311+
2312+ There are some limitations with multiport support: RX interrupts and
2313+ device hot-plugging are not supported.
2314
2315 PF multiprocess support
2316 -----------------------
2317diff --git a/doc/guides/nics/pcap_ring.rst b/doc/guides/nics/pcap_ring.rst
2318index cf230ae..8fdb491 100644
2319--- a/doc/guides/nics/pcap_ring.rst
2320+++ b/doc/guides/nics/pcap_ring.rst
2321@@ -166,7 +166,7 @@ Forward packets through two network interfaces:
2322 .. code-block:: console
2323
2324 $RTE_TARGET/app/testpmd -l 0-3 -n 4 \
2325- --vdev 'net_pcap0,iface=eth0' --vdev='net_pcap1;iface=eth1'
2326+ --vdev 'net_pcap0,iface=eth0' --vdev='net_pcap1,iface=eth1'
2327
2328 Enable 2 tx queues on a network interface:
2329
2330diff --git a/doc/guides/nics/sfc_efx.rst b/doc/guides/nics/sfc_efx.rst
2331index 67d9b05..f79ebf5 100644
2332--- a/doc/guides/nics/sfc_efx.rst
2333+++ b/doc/guides/nics/sfc_efx.rst
2334@@ -295,7 +295,7 @@ whitelist option like "-w 02:00.0,arg1=value1,...".
2335 Case-insensitive 1/y/yes/on or 0/n/no/off may be used to specify
2336 boolean parameters value.
2337
2338-- ``rx_datapath`` [auto|efx|ef10|ef10_esps] (default **auto**)
2339+- ``rx_datapath`` [auto|efx|ef10|ef10_essb] (default **auto**)
2340
2341 Choose receive datapath implementation.
2342 **auto** allows the driver itself to make a choice based on firmware
2343@@ -304,7 +304,7 @@ boolean parameters value.
2344 **ef10** chooses EF10 (SFN7xxx, SFN8xxx, X2xxx) native datapath which is
2345 more efficient than libefx-based and provides richer packet type
2346 classification.
2347- **ef10_esps** chooses SFNX2xxx equal stride packed stream datapath
2348+ **ef10_essb** chooses SFNX2xxx equal stride super-buffer datapath
2349 which may be used on DPDK firmware variant only
2350 (see notes about its limitations above).
2351
2352diff --git a/doc/guides/prog_guide/kernel_nic_interface.rst b/doc/guides/prog_guide/kernel_nic_interface.rst
2353index 32d09cc..f5904f4 100644
2354--- a/doc/guides/prog_guide/kernel_nic_interface.rst
2355+++ b/doc/guides/prog_guide/kernel_nic_interface.rst
2356@@ -178,7 +178,7 @@ KNI Creation and Deletion
2357 -------------------------
2358
2359 Before any KNI interfaces can be created, the ``rte_kni`` kernel module must
2360-be loaded into the kernel and configured withe ``rte_kni_init()`` function.
2361+be loaded into the kernel and configured with the ``rte_kni_init()`` function.
2362
2363 The KNI interfaces are created by a DPDK application dynamically via the
2364 ``rte_kni_alloc()`` function.
2365diff --git a/doc/guides/prog_guide/multi_proc_support.rst b/doc/guides/prog_guide/multi_proc_support.rst
2366index a84083b..1a4a9e2 100644
2367--- a/doc/guides/prog_guide/multi_proc_support.rst
2368+++ b/doc/guides/prog_guide/multi_proc_support.rst
2369@@ -75,7 +75,7 @@ and point to the same objects, in both processes.
2370
2371
2372 The EAL also supports an auto-detection mode (set by EAL ``--proc-type=auto`` flag ),
2373-whereby an DPDK process is started as a secondary instance if a primary instance is already running.
2374+whereby a DPDK process is started as a secondary instance if a primary instance is already running.
2375
2376 Deployment Models
2377 -----------------
2378diff --git a/doc/guides/prog_guide/packet_classif_access_ctrl.rst b/doc/guides/prog_guide/packet_classif_access_ctrl.rst
2379index 2945eac..f99302a 100644
2380--- a/doc/guides/prog_guide/packet_classif_access_ctrl.rst
2381+++ b/doc/guides/prog_guide/packet_classif_access_ctrl.rst
2382@@ -373,6 +373,12 @@ There are several implementations of classify algorithm:
2383
2384 * **RTE_ACL_CLASSIFY_AVX2**: vector implementation, can process up to 16 flows in parallel. Requires AVX2 support.
2385
2386+* **RTE_ACL_CLASSIFY_NEON**: vector implementation, can process up to 8 flows
2387+ in parallel. Requires NEON support.
2388+
2389+* **RTE_ACL_CLASSIFY_ALTIVEC**: vector implementation, can process up to 8
2390+ flows in parallel. Requires ALTIVEC support.
2391+
2392 It is purely a runtime decision which method to choose, there is no build-time difference.
2393 All implementations operates over the same internal RT structures and use similar principles. The main difference is that vector implementations can manually exploit IA SIMD instructions and process several input data flows in parallel.
2394 At startup ACL library determines the highest available classify method for the given platform and sets it as default one. Though the user has an ability to override the default classifier function for a given ACL context or perform particular search using non-default classify method. In that case it is user responsibility to make sure that given platform supports selected classify implementation.
2395diff --git a/doc/guides/rel_notes/deprecation.rst b/doc/guides/rel_notes/deprecation.rst
2396index afa94b4..5810959 100644
2397--- a/doc/guides/rel_notes/deprecation.rst
2398+++ b/doc/guides/rel_notes/deprecation.rst
2399@@ -67,12 +67,6 @@ Deprecation Notices
2400 In 19.11 PMDs will still update the field even when the offload is not
2401 enabled.
2402
2403-* cryptodev: support for using IV with all sizes is added, J0 still can
2404- be used but only when IV length in following structs ``rte_crypto_auth_xform``,
2405- ``rte_crypto_aead_xform`` is set to zero. When IV length is greater or equal
2406- to one it means it represents IV, when is set to zero it means J0 is used
2407- directly, in this case 16 bytes of J0 need to be passed.
2408-
2409 * sched: To allow more traffic classes, flexible mapping of pipe queues to
2410 traffic classes, and subport level configuration of pipes and queues
2411 changes will be made to macros, data structures and API functions defined
2412@@ -81,8 +75,3 @@ Deprecation Notices
2413
2414 * metrics: The function ``rte_metrics_init`` will have a non-void return
2415 in order to notify errors instead of calling ``rte_exit``.
2416-
2417-* power: ``rte_power_set_env`` function will no longer return 0 on attempt
2418- to set new power environment if power environment was already initialized.
2419- In this case the function will return -1 unless the environment is unset first
2420- (using ``rte_power_unset_env``). Other function usage scenarios will not change.
2421diff --git a/doc/guides/rel_notes/release_19_11.rst b/doc/guides/rel_notes/release_19_11.rst
2422index acd18a3..d17a05f 100644
2423--- a/doc/guides/rel_notes/release_19_11.rst
2424+++ b/doc/guides/rel_notes/release_19_11.rst
2425@@ -2358,3 +2358,564 @@ Tested Platforms
2426 * Basic cryptodev testing
2427
2428 * vhost_crypto Unit test and Function/Performance test
2429+
2430+19.11.6 Release Notes
2431+---------------------
2432+
2433+19.11.6 Fixes
2434+~~~~~~~~~~~~~
2435+
2436+* acl: fix x86 build for compiler without AVX2
2437+* app/bbdev: fix test vector symlink
2438+* app/eventdev: check timer adadpters number
2439+* app: fix ethdev port id size
2440+* app: fix missing dependencies
2441+* app/testpmd: do not allow dynamic change of core number
2442+* app/testpmd: fix bonding xmit balance policy command
2443+* app/testpmd: fix build with gcc 11
2444+* app/testpmd: fix descriptor id check
2445+* app/testpmd: fix displaying Rx/Tx queues information
2446+* app/testpmd: fix max Rx packet length for VLAN packet
2447+* app/testpmd: fix MTU after device configure
2448+* app/testpmd: fix name of bitrate library in meson build
2449+* app/testpmd: fix packet header in txonly mode
2450+* app/testpmd: fix port id check in Tx VLAN command
2451+* app/testpmd: fix RSS key for flow API RSS rule
2452+* app/testpmd: fix VLAN configuration on failure
2453+* app/testpmd: remove restriction on Tx segments set
2454+* app/testpmd: revert max Rx packet length adjustment
2455+* app/testpmd: revert setting MTU explicitly after configure
2456+* app/test-sad: fix uninitialized variable
2457+* baseband/fpga_lte_fec: fix crash with debug
2458+* baseband/turbo_sw: fix memory leak in error path
2459+* build: fix gcc warning requiring Wformat
2460+* build: fix install on Windows
2461+* build: fix MS linker flag with meson 0.54
2462+* build: skip detecting libpcap via pcap-config
2463+* bus/dpaa: fix fd check before close
2464+* bus/dpaa: remove logically dead code
2465+* bus/fslmc: fix atomic queues on NXP LX2 platform
2466+* bus/fslmc: fix dpio close
2467+* bus/fslmc: fix VFIO group descriptor check
2468+* bus/pci: fix leak on VFIO mapping error
2469+* bus/pci: fix memory leak when unmapping VFIO resource
2470+* bus/pci: remove duplicate declaration
2471+* bus/pci: remove unused scan by address
2472+* common/mlx5: fix DevX SQ object creation
2473+* common/mlx5: fix name for ConnectX VF device ID
2474+* common/mlx5: fix PCI address lookup
2475+* common/qat: add missing kmod dependency info
2476+* compress/isal: check allocation in queue setup
2477+* config: add Graviton2(arm64) defconfig
2478+* config: enable packet prefetching with Meson
2479+* crypto/aesni_mb: fix CCM digest size check
2480+* crypto/aesni_mb: fix GCM digest size check
2481+* crypto/armv8: fix mempool object returning
2482+* crypto/caam_jr: fix device tree parsing for SEC_ERA
2483+* cryptodev: fix parameter parsing
2484+* crypto/dpaa2_sec: fix stats query without queue pair
2485+* crypto/dpaa2_sec: remove dead code
2486+* crypto/dpaa_sec: fix a null pointer dereference
2487+* crypto/octeontx2: fix multi-process
2488+* crypto/octeontx2: fix out-of-place support
2489+* crypto/octeontx2: fix session-less mode
2490+* crypto/octeontx: fix out-of-place support
2491+* crypto/scheduler: fix header install with meson
2492+* crypto/scheduler: remove unused internal seqn
2493+* devtools: fix build test config inheritance from env
2494+* devtools: fix directory filter in forbidden token check
2495+* devtools: fix x86-default build test install env
2496+* distributor: fix API documentation
2497+* distributor: fix buffer use after free
2498+* distributor: fix clearing returns buffer
2499+* distributor: fix flushing in flight packets
2500+* distributor: fix handshake deadlock
2501+* distributor: fix handshake synchronization
2502+* distributor: fix return pkt calls in single mode
2503+* distributor: fix scalar matching
2504+* distributor: handle worker shutdown in burst mode
2505+* doc: add SPDX license tag header to Intel performance guide
2506+* doc: add SPDX license tag header to meson guide
2507+* doc: clarify instructions on running as non-root
2508+* doc: fix diagram in dpaa2 guide
2509+* doc: fix EF10 Rx mode name in sfc guide
2510+* doc: fix ethdev port id size
2511+* doc: fix formatting of notes in meson guide
2512+* doc: fix grammar
2513+* doc: fix missing classify methods in ACL guide
2514+* doc: fix rule file parameters in l3fwd-acl guide
2515+* doc: fix typo in ipsec-secgw guide
2516+* doc: fix typo in KNI guide
2517+* doc: fix typo in pcap guide
2518+* doc: improve multiport PF in nfp guide
2519+* doc: remove notice about AES-GCM IV and J0
2520+* doc: remove obsolete deprecation notice for power library
2521+* doc: update information on using hugepages
2522+* drivers/net: fix port id size
2523+* eal/arm: fix build with gcc optimization level 0
2524+* eal/arm: fix clang build of native target
2525+* eal: fix doxygen for EAL cleanup
2526+* eal: fix leak on device event callback unregister
2527+* eal: fix MCS lock and ticketlock headers install
2528+* eal/linux: change udev debug message
2529+* eal/linux: fix memory leak in uevent handling
2530+* eal/x86: fix memcpy AVX-512 enablement
2531+* efd: fix tailq entry leak in error path
2532+* ethdev: fix data type for port id
2533+* ethdev: fix memory ordering for callback functions
2534+* ethdev: fix RSS flow expansion in case of mismatch
2535+* ethdev: move non-offload capabilities
2536+* ethdev: remove redundant license text
2537+* eventdev: check allocation in Tx adapter
2538+* eventdev: fix adapter leak in error path
2539+* event/dpaa2: fix dereference before null check
2540+* event/dpaa2: remove dead code from self test
2541+* event/octeontx2: unlink queues during port release
2542+* examples/fips_validation: fix buffer overflow
2543+* examples/fips_validation: fix build with pkg-config
2544+* examples/fips_validation: fix missed version line
2545+* examples/fips_validation: fix version compatibility
2546+* examples: fix flattening directory layout on install
2547+* examples/ioat: fix stats print
2548+* examples/ip_pipeline: fix external build
2549+* examples/ip_pipeline: use POSIX network address conversion
2550+* examples/ipsec-secgw: use POSIX network address conversion
2551+* examples/kni: fix build with pkg-config
2552+* examples/l2fwd-crypto: fix build with pkg-config
2553+* examples/l2fwd-crypto: fix missing dependency
2554+* examples/l2fwd-keepalive: skip meson build if no librt
2555+* examples/l3fwd-power: check packet types after start
2556+* examples/multi_process: fix build on Ubuntu 20.04
2557+* examples/ntb: fix clean target
2558+* examples/performance-thread: fix build with low core count
2559+* examples/performance-thread: fix build with pkg-config
2560+* examples/qos_sched: fix usage string
2561+* examples/rxtx_callbacks: fix build with pkg-config
2562+* examples/vhost_blk: check driver start failure
2563+* examples/vhost_blk: fix build with pkg-config
2564+* examples/vhost_crypto: add new line character in usage
2565+* examples/vm_power: fix 32-bit build
2566+* examples/vm_power: fix build on Ubuntu 20.04
2567+* fix spellings that Lintian complains about
2568+* gro: fix packet type detection with IPv6 tunnel
2569+* gso: fix payload unit size for UDP
2570+* ipc: fix spelling in log and comment
2571+* kni: fix build on RHEL 8.3
2572+* kni: fix build with Linux 5.9
2573+* license: add licenses for exception cases
2574+* maintainers: update Mellanox emails
2575+* malloc: fix style in free list index computation
2576+* mbuf: fix dynamic fields and flags with multiprocess
2577+* mbuf: fix typo in dynamic field convention note
2578+* mcslock: fix hang in weak memory model
2579+* mem: fix allocation failure on non-NUMA kernel
2580+* mem: fix allocation in container with SELinux
2581+* mem: fix config name in error logs
2582+* mempool/octeontx: fix aura to pool mapping
2583+* net/af_xdp: avoid deadlock due to empty fill queue
2584+* net/af_xdp: change return value from Rx to unsigned
2585+* net/af_xdp: fix pointer storage size
2586+* net/af_xdp: fix umem size
2587+* net/af_xdp: use strlcpy instead of strncpy
2588+* net/bnx2x: add QLogic vendor id for BCM57840
2589+* net/bnxt: add memory allocation check in VF info init
2590+* net/bnxt: add separate mutex for FW health check
2591+* net/bnxt: fix boolean operator usage
2592+* net/bnxt: fix checking VNIC in shutdown path
2593+* net/bnxt: fix crash in vector mode Tx
2594+* net/bnxt: fix doorbell barrier location
2595+* net/bnxt: fix drop enable in get Rx queue info
2596+* net/bnxt: fix endianness while setting L4 destination port
2597+* net/bnxt: fix L2 filter allocation
2598+* net/bnxt: fix link status during device recovery
2599+* net/bnxt: fix link update
2600+* net/bnxt: fix LRO configuration
2601+* net/bnxt: fix memory leak when freeing VF info
2602+* net/bnxt: fix queue get info
2603+* net/bnxt: fix queue release
2604+* net/bnxt: fix resetting mbuf data offset
2605+* net/bnxt: fix Rx performance by removing spinlock
2606+* net/bnxt: fix shift operation
2607+* net/bnxt: fix structure variable initialization
2608+* net/bnxt: fix vnic Rx queue cnt updation
2609+* net/bnxt: fix xstats by id
2610+* net/bnxt: increase size of Rx CQ
2611+* net/bnxt: remove useless prefetches
2612+* net/bonding: fix possible unbalanced packet receiving
2613+* net/bonding: fix Rx queue conversion
2614+* net: check segment pointer in raw checksum processing
2615+* net/cxgbe: fix crash when accessing empty Tx mbuf list
2616+* net/cxgbe: fix duplicate MAC addresses in MPS TCAM
2617+* net/cxgbe: fix queue DMA ring leaks during port close
2618+* net/dpaa2: fix build with timesync functions
2619+* net/dpaa2: fix misuse of interface index
2620+* net/dpaa: fix port ID type in API
2621+* net/ena/base: align IO CQ allocation to 4K
2622+* net/ena/base: fix release of wait event
2623+* net/ena/base: specify delay operations
2624+* net/ena/base: use min/max macros with type conversion
2625+* net/ena: fix getting xstats global stats offset
2626+* net/ena: fix setting Rx checksum flags in mbuf
2627+* net/ena: remove unused macro
2628+* net/enic: fix header sizes when copying flow patterns
2629+* net/enic: generate VXLAN src port if it is zero in template
2630+* net/enic: ignore VLAN inner type when it is zero
2631+* net/failsafe: fix double space in warning log
2632+* net/failsafe: fix state synchro cleanup
2633+* net/fm10k: fix memory leak when thresh check fails
2634+* net/fm10k: fix memory leak when Tx thresh check fails
2635+* net/fm10k: fix vector Rx
2636+* net/hinic/base: add message check for command channel
2637+* net/hinic/base: fix clock definition with glibc version
2638+* net/hinic/base: fix log info for PF command channel
2639+* net/hinic/base: get default cos from chip
2640+* net/hinic/base: remove queue number limitation
2641+* net/hinic/base: support two or more AEQS for chip
2642+* net/hinic: fix filters on memory allocation failure
2643+* net/hinic: fix negative array index read
2644+* net/hinic: fix Rx nombuf stats
2645+* net/hinic: remove optical module operation
2646+* net/hns3: check PCI config space reads
2647+* net/hns3: check PCI config space write
2648+* net/hns3: check setting VF PCI bus return value
2649+* net/hns3: decrease non-nearby memory access in Rx
2650+* net/hns3: fix configurations of port-level scheduling rate
2651+* net/hns3: fix configuring device with RSS enabled
2652+* net/hns3: fix config when creating RSS rule after flush
2653+* net/hns3: fix crash with multi-TC
2654+* net/hns3: fix data type to store queue number
2655+* net/hns3: fix default MAC address from firmware
2656+* net/hns3: fix deleting default VLAN from PF
2657+* net/hns3: fix error type when validating RSS flow action
2658+* net/hns3: fix flow error type
2659+* net/hns3: fix flow RSS queue number 0
2660+* net/hns3: fix flushing RSS rule
2661+* net/hns3: fix out of bounds access
2662+* net/hns3: fix queue offload capability
2663+* net/hns3: fix reassembling multiple segment packets in Tx
2664+* net/hns3: fix RSS max queue id allowed in multi-TC
2665+* net/hns3: fix some incomplete command structures
2666+* net/hns3: fix storing RSS info when creating flow action
2667+* net/hns3: fix TX checksum with fix header length
2668+* net/hns3: reduce address calculation in Rx
2669+* net/hns3: report Rx drop packets enable configuration
2670+* net/hns3: report Rx free threshold
2671+* net/hns3: skip VF register access when PF in FLR
2672+* net/i40e: add C++ include guard
2673+* net/i40e/base: fix function header arguments
2674+* net/i40e/base: fix Rx only for unicast promisc on VLAN
2675+* net/i40e: fix build for log format specifier
2676+* net/i40e: fix byte counters
2677+* net/i40e: fix flow director for eth + VLAN pattern
2678+* net/i40e: fix incorrect FDIR flex configuration
2679+* net/i40e: fix link status
2680+* net/i40e: fix QinQ flow pattern to allow non full mask
2681+* net/i40e: fix recreating flexible flow director rule
2682+* net/i40e: fix vector Rx
2683+* net/i40e: fix virtual channel conflict
2684+* net/iavf: downgrade error log
2685+* net/iavf: enable port reset
2686+* net/iavf: fix command after PF reset
2687+* net/iavf: fix flow flush after PF reset
2688+* net/iavf: fix iterator for RSS LUT
2689+* net/iavf: fix performance drop after port reset
2690+* net/iavf: fix port start during configuration restore
2691+* net/iavf: fix releasing mbufs
2692+* net/iavf: fix scattered Rx enabling
2693+* net/iavf: fix setting of MAC address
2694+* net/iavf: fix unchecked Tx cleanup error
2695+* net/iavf: fix vector Rx
2696+* net/iavf: support multicast configuration
2697+* net/ice/base: fix issues around move nodes
2698+* net/ice/base: fix parameter name in comment
2699+* net/ice: fix flow validation for unsupported patterns
2700+* net/ice: fix ptype parsing
2701+* net/ice: fix Rx offload flags in SSE path
2702+* net/ice: fix vector Rx
2703+* net/ice: update writeback policy to reduce latency
2704+* net/ixgbe: check switch domain allocation result
2705+* net/ixgbe: fix vector Rx
2706+* net/ixgbe: fix VF reset HW error handling
2707+* net/ixgbe: remove redundant MAC flag check
2708+* net/memif: do not update local copy of tail in Tx
2709+* net/memif: relax load of ring head for M2S ring
2710+* net/memif: relax load of ring head for S2M ring
2711+* net/memif: relax load of ring tail for M2S ring
2712+* net/mlx5: fix debug configuration build issue
2713+* net/mlx5: fix hairpin dependency on destination DevX TIR
2714+* net/mlx5: fix meter table definitions
2715+* net/mlx5: fix missing meter packet
2716+* net/mlx5: fix port shared data reference count
2717+* net/mlx5: fix raw encap/decap limit
2718+* net/mlx5: fix representor interrupts handler
2719+* net/mlx5: fix RSS queue type validation
2720+* net/mlx5: fix RSS RETA reset on start
2721+* net/mlx5: fix Rx descriptor status
2722+* net/mlx5: fix Rx packet padding config via DevX
2723+* net/mlx5: fix Rx queue completion index consistency
2724+* net/mlx5: fix Rx queue count calculation
2725+* net/mlx5: fix Rx queue count calculation
2726+* net/mlx5: fix switch port id when representor in bonding
2727+* net/mlx5: fix xstats reset reinitialization
2728+* net/mlx5: free MR resource on device DMA unmap
2729+* net/mlx5: remove unused includes
2730+* net/mlx5: remove unused log macros
2731+* net/mlx5: remove unused variable in Tx queue creation
2732+* net/mlx5: validate MPLSoGRE with GRE key
2733+* net/mlx: do not enforce RSS hash offload
2734+* net/mvpp2: fix memory leak in error path
2735+* net/netvsc: allocate contiguous physical memory for RNDIS
2736+* net/netvsc: check for overflow on packet info from host
2737+* net/netvsc: disable external mbuf on Rx by default
2738+* net/netvsc: fix multiple channel Rx
2739+* net/netvsc: fix rndis packet addresses
2740+* net/netvsc: fix stale value after free
2741+* net/netvsc: fix Tx queue leak in error path
2742+* net/netvsc: manage VF port under read/write lock
2743+* net/netvsc: replace compiler builtin overflow check
2744+* net/nfp: expand device info get
2745+* net/octeontx2: fix multi segment mode for jumbo packets
2746+* net/octeontx2: fix RSS flow create
2747+* net/octeontx2: remove useless check before free
2748+* net/pcap: fix crash on exit for infinite Rx
2749+* net/pcap: fix input only Rx
2750+* net/pfe: fix misuse of interface index
2751+* net/qede: fix dereference before null check
2752+* net/qede: fix getting link details
2753+* net/qede: fix milliseconds sleep macro
2754+* net/ring: check internal arguments
2755+* net/ring: fix typo in log message
2756+* net/sfc/base: fix tunnel configuration
2757+* net/sfc: fix RSS hash flag when offload is disabled
2758+* net/sfc: fix RSS hash offload if queue action is used
2759+* net/softnic: use POSIX network address conversion
2760+* net/tap: free mempool when closing
2761+* net/thunderx: fix memory leak on rbdr desc ring failure
2762+* net/vdev_netvsc: fix device probing error flow
2763+* net/vhost: fix xstats after clearing stats
2764+* net/virtio: check raw checksum failure
2765+* net/virtio: fix packed ring indirect descricptors setup
2766+* pmdinfogen: fix build with gcc 11
2767+* port: remove useless assignment
2768+* power: fix current frequency index
2769+* raw/dpaa2_qdma: fix reset
2770+* raw/ifpga/base: fix interrupt handler instance usage
2771+* raw/ifpga/base: fix return of IRQ unregister
2772+* raw/ifpga/base: handle unsupported interrupt type
2773+* raw/ifpga: terminate string filled by readlink with null
2774+* raw/ifpga: use trusted buffer to free
2775+* raw/ioat: fix missing close function
2776+* raw/skeleton: allow closing already closed device
2777+* raw/skeleton: reset test statistics
2778+* rcu: avoid literal suffix warning in C++ mode
2779+* Revert "app/testpmd: fix name of bitrate library in meson build"
2780+* Revert "Revert "build: always link whole DPDK static libraries""
2781+* Revert "Revert "build/pkg-config: improve static linking flags""
2782+* Revert "Revert "build/pkg-config: move pkg-config file creation""
2783+* Revert "Revert "build/pkg-config: output drivers first for static build""
2784+* Revert "Revert "build/pkg-config: prevent overlinking""
2785+* Revert "Revert "devtools: test static linkage with pkg-config""
2786+* stack: fix uninitialized variable
2787+* stack: reload head when pop fails
2788+* table: fix hash for 32-bit
2789+* test/crypto: fix device number
2790+* test/crypto: fix stats test
2791+* test/distributor: collect return mbufs
2792+* test/distributor: ensure all packets are delivered
2793+* test/distributor: fix freeing mbufs
2794+* test/distributor: fix lcores statistics
2795+* test/distributor: fix mbuf leak on failure
2796+* test/distributor: fix quitting workers in burst mode
2797+* test/distributor: fix race conditions on shutdown
2798+* test/distributor: fix shutdown of busy worker
2799+* test/event_crypto_adapter: fix configuration
2800+* test/event: fix function arguments for crypto adapter
2801+* test/mbuf: skip field registration at busy offset
2802+* test/rcu: fix build with low core count
2803+* test/ring: fix number of single element enqueue/dequeue
2804+* timer: add limitation note for sync stop and reset
2805+* usertools: fix CPU layout script to be PEP8 compliant
2806+* usertools: fix pmdinfo parsing
2807+* vdpa/ifc: fix build with recent kernels
2808+* version: 19.11.6-rc1
2809+* vfio: fix group descriptor check
2810+* vhost: fix error path when setting memory tables
2811+* vhost: fix external mbuf creation
2812+* vhost: fix fd leak in dirty logging setup
2813+* vhost: fix fd leak in kick setup
2814+* vhost: fix IOTLB mempool single-consumer flag
2815+* vhost: fix virtio-net header length with packed ring
2816+* vhost: fix virtqueue initialization
2817+* vhost: fix virtqueues metadata allocation
2818+* vhost: validate index in available entries API
2819+* vhost: validate index in guest notification API
2820+* vhost: validate index in inflight API
2821+* vhost: validate index in live-migration API
2822+
2823+19.11.6 Validation
2824+~~~~~~~~~~~~~~~~~~
2825+
2826+* Intel(R) Testing
2827+
2828+ * Basic Intel(R) NIC(ixgbe, i40e and ice) testing
2829+ * PF (i40e)
2830+ * PF (ixgbe)
2831+ * PF (ice)
2832+ * VF (i40e)
2833+ * VF (ixgbe)
2834+ * VF (ice)
2835+ * Compile Testing
2836+ * Intel NIC single core/NIC performance
2837+
2838+ * Basic cryptodev and virtio testing
2839+
2840+ * vhost/virtio basic loopback, PVP and performance test
2841+ * cryptodev Function/Performance
2842+
2843+
2844+* Microsoft(R) Testing
2845+
2846+ * Platform
2847+
2848+ * Azure
2849+ * Ubuntu 16.04-LTS
2850+ * Ubuntu 18.04-DAILY-LTS
2851+ * RHEL 7.5
2852+ * Openlogic CentOS 7.5
2853+ * SLES-15-sp1 gen1
2854+ * Mellanox(R) ConnectX-4
2855+ * LISAv2 test framework
2856+
2857+ * Functionality
2858+
2859+ * VERIFY-DPDK-COMPLIANCE * verifies kernel is supported and that the build is successful
2860+ * VERIFY-DPDK-BUILD-AND-TESTPMD-TEST * verifies using testpmd that packets can be sent from a VM to another VM
2861+ * VERIFY-SRIOV-FAILSAFE-FOR-DPDK * disables/enables Accelerated Networking for the NICs under test and makes sure DPDK works in both scenarios
2862+ * VERIFY-DPDK-FAILSAFE-DURING-TRAFFIC * disables/enables Accelerated Networking for the NICs while generating traffic using testpmd
2863+ * PERF-DPDK-FWD-PPS-DS15 * verifies DPDK forwarding performance using testpmd on 2, 4, 8 cores, rx and io mode on size Standard_DS15_v2
2864+ * PERF-DPDK-SINGLE-CORE-PPS-DS4 * verifies DPDK performance using testpmd on 1 core, rx and io mode on size Standard_DS4_v2
2865+ * PERF-DPDK-SINGLE-CORE-PPS-DS15 * verifies DPDK performance using testpmd on 1 core, rx and io mode on size Standard_DS15_v2
2866+ * PERF-DPDK-MULTICORE-PPS-DS15 * verifies DPDK performance using testpmd on 2, 4, 8 cores, rx and io mode on size Standard_DS15_v2
2867+ * PERF-DPDK-MULTICORE-PPS-F32 * verifies DPDK performance using testpmd on 2, 4, 8, 16 cores, rx and io mode on size Standard_F32s_v2
2868+ * DPDK-RING-LATENCY * verifies DPDK CPU latency using dpdk-ring-ping
2869+ * VERIFY-DPDK-OVS * builds OVS with DPDK support and tests if the OVS DPDK ports can be created. Runs only on Ubuntu distro.
2870+ * VERIFY-DPDK-BUILD-AND-NETVSCPMD-TEST * verifies using testpmd with netvsc pmd that packets can be sent from a VM to another VM.
2871+ * VERIFY-SRIOV-FAILSAFE-FOR-DPDK-NETVSCPMD * disables/enables Accelerated Networking for the NICs under test and makes sure DPDK with netvsc pmd works in both scenarios.
2872+ * VERIFY-DPDK-FAILSAFE-DURING-TRAFFIC-NETVSCPMD * Verify Accelerated Networking (VF) removed and readded for the NICs while generating traffic using testpmd with netvsc pmd.
2873+
2874+
2875+* Red Hat(R) Testing
2876+
2877+ * Platform
2878+
2879+ * RHEL 8
2880+ * Kernel 4.18
2881+ * Qemu 5.2
2882+ * X540-AT2 NIC(ixgbe, 10G)
2883+
2884+ * Functionality
2885+
2886+ * Guest with device assignment(PF) throughput testing(1G hugepage size)
2887+ * Guest with device assignment(PF) throughput testing(2M hugepage size)
2888+ * Guest with device assignment(VF) throughput testing
2889+ * PVP (host dpdk testpmd as vswitch) 1Q: throughput testing
2890+ * PVP vhost-user 2Q throughput testing
2891+ * PVP vhost-user 1Q * cross numa node throughput testing
2892+ * Guest with vhost-user 2 queues throughput testing
2893+ * vhost-user reconnect with dpdk-client, qemu-server: qemu reconnect
2894+ * vhost-user reconnect with dpdk-client, qemu-server: ovs reconnect: PASS
2895+ * PVP 1Q live migration testing
2896+ * PVP 1Q cross numa node live migration testing
2897+ * Guest with ovs+dpdk+vhost-user 1Q live migration testing
2898+ * Guest with ovs+dpdk+vhost-user 1Q live migration testing (2M)
2899+ * Guest with ovs+dpdk+vhost-user 2Q live migration testing
2900+ * Allocate memory from the NUMA node which Virtio device locates
2901+ * Host PF + DPDK testing
2902+ * Host VF + DPDK testing
2903+
2904+
2905+* Intel(R) Testing with Open vSwitch
2906+
2907+ * OVS testing with OVS 2.14.1
2908+
2909+ * Performance
2910+
2911+ * ICE Device
2912+
2913+ * Basic performance tests (RFC2544 P2P, PVP_CONT, RFC2544 PVP_TPUT, RFC2544 PVVP_TPUT, PVPV) Jumbo frames RSS
2914+
2915+ * i40e Device
2916+
2917+ * Basic performance (RFC2544 P2P, PVP_CONT, RFC2544 PVP_TPUT, RFC2544 PVVP_TPUT, PVPV) Jumbo frames RSS Flow control
2918+
2919+ * ixgbe Device
2920+
2921+ * Basic performance tests (RFC2544 P2P, PVP_CONT, RFC2544 PVP_TPUT, RFC2544 PVVP_TPUT, PVPV) Jumbo frames RSS
2922+
2923+ * Functionality
2924+
2925+ * vhostuserclient device
2926+ * jumbo frames
2927+ * dpdkvhostuserclient re-connect
2928+ * dpdkvhostuserclient NUMA node
2929+
2930+
2931+* Nvidia(R) Testing
2932+
2933+ * Basic functionality with testpmd
2934+
2935+ * Tx/Rx
2936+ * xstats
2937+ * Timestamps
2938+ * Link status
2939+ * RTE flow and flow_director
2940+ * RSS
2941+ * VLAN stripping and insertion
2942+ * Checksum/TSO
2943+ * ptype
2944+ * l3fwd-power example application
2945+ * Multi-process example applications
2946+
2947+ * Build tests
2948+
2949+ * Ubuntu 20.04 with MLNX_OFED_LINUX-5.1-2.5.8.0.
2950+ * Ubuntu 20.04 with rdma-core master (6a5c1b7).
2951+ * Ubuntu 20.04 with rdma-core v28.0.
2952+ * Ubuntu 18.04 with rdma-core v17.1.
2953+ * Ubuntu 18.04 with rdma-core master (6a5c1b7) (i386).
2954+ * Ubuntu 16.04 with rdma-core v22.7.
2955+ * Fedora 32 with rdma-core v32.0.
2956+ * CentOS 7 7.9.2009 with rdma-core master (6a5c1b7).
2957+ * CentOS 7 7.9.2009 with MLNX_OFED_LINUX-5.1-2.5.8.0.
2958+ * CentOS 8 8.3.2011 with rdma-core master (6a5c1b7).
2959+ * openSUSE Leap 15.2 with rdma-core v27.1.
2960+
2961+ * ConnectX-5
2962+
2963+ * RHEL 7.4
2964+ * Driver MLNX_OFED_LINUX-5.1-2.5.8.0
2965+ * fw 14.28.2006
2966+
2967+ * ConnectX-4 Lx
2968+
2969+ * RHEL 7.4
2970+ * Driver MLNX_OFED_LINUX-5.1-2.5.8.0
2971+ * fw 16.28.2006
2972+
2973+19.11.6 Known Issues
2974+~~~~~~~~~~~~~~~~~~~~
2975+
2976+* i40e
2977+
2978+ * rss_to_rte_flow/set_key_keylen: create rule can fail.
2979+ https://bugs.dpdk.org/show_bug.cgi?id=573
2980+ * inconsistency with expected queue after creating a flow rule - firmware issue.
2981+
2982+* vhost/virtio
2983+
2984+ * udp-fragmentation-offload cannot be setup on Ubuntu 19.10 VMs.
2985+ https://bugzilla.kernel.org/show_bug.cgi?id=207075
2986+
2987+* vdev_netvsc
2988+
2989+ * hot-removal of VF driver can fail
2990diff --git a/doc/guides/sample_app_ug/flow_classify.rst b/doc/guides/sample_app_ug/flow_classify.rst
2991index bc234b5..451a0db 100644
2992--- a/doc/guides/sample_app_ug/flow_classify.rst
2993+++ b/doc/guides/sample_app_ug/flow_classify.rst
2994@@ -271,7 +271,7 @@ Forwarding application is shown below:
2995 .. code-block:: c
2996
2997 static inline int
2998- port_init(uint8_t port, struct rte_mempool *mbuf_pool)
2999+ port_init(uint16_t port, struct rte_mempool *mbuf_pool)
3000 {
3001 struct rte_eth_conf port_conf = port_conf_default;
3002 const uint16_t rx_rings = 1, tx_rings = 1;
3003diff --git a/doc/guides/sample_app_ug/flow_filtering.rst b/doc/guides/sample_app_ug/flow_filtering.rst
3004index 5e5a6cd..d3653e5 100644
3005--- a/doc/guides/sample_app_ug/flow_filtering.rst
3006+++ b/doc/guides/sample_app_ug/flow_filtering.rst
3007@@ -384,7 +384,7 @@ This function is located in the ``flow_blocks.c`` file.
3008 .. code-block:: c
3009
3010 static struct rte_flow *
3011- generate_ipv4_flow(uint8_t port_id, uint16_t rx_q,
3012+ generate_ipv4_flow(uint16_t port_id, uint16_t rx_q,
3013 uint32_t src_ip, uint32_t src_mask,
3014 uint32_t dest_ip, uint32_t dest_mask,
3015 struct rte_flow_error *error)
3016diff --git a/doc/guides/sample_app_ug/ipsec_secgw.rst b/doc/guides/sample_app_ug/ipsec_secgw.rst
3017index d6d8d44..eb1a57a 100644
3018--- a/doc/guides/sample_app_ug/ipsec_secgw.rst
3019+++ b/doc/guides/sample_app_ug/ipsec_secgw.rst
3020@@ -92,7 +92,7 @@ The application has a number of command line options::
3021
3022 ./build/ipsec-secgw [EAL options] --
3023 -p PORTMASK -P -u PORTMASK -j FRAMESIZE
3024- -l -w REPLAY_WINOW_SIZE -e -a
3025+ -l -w REPLAY_WINDOW_SIZE -e -a
3026 --config (port,queue,lcore)[,(port,queue,lcore]
3027 --single-sa SAIDX
3028 --rxoffload MASK
3029@@ -122,7 +122,7 @@ Where:
3030
3031 * ``-l``: enables code-path that uses librte_ipsec.
3032
3033-* ``-w REPLAY_WINOW_SIZE``: specifies the IPsec sequence number replay window
3034+* ``-w REPLAY_WINDOW_SIZE``: specifies the IPsec sequence number replay window
3035 size for each Security Association (available only with librte_ipsec
3036 code path).
3037
3038diff --git a/doc/guides/sample_app_ug/l3_forward_access_ctrl.rst b/doc/guides/sample_app_ug/l3_forward_access_ctrl.rst
3039index a44fbcd..4e58c6c 100644
3040--- a/doc/guides/sample_app_ug/l3_forward_access_ctrl.rst
3041+++ b/doc/guides/sample_app_ug/l3_forward_access_ctrl.rst
3042@@ -236,7 +236,7 @@ The application has a number of command line options:
3043
3044 .. code-block:: console
3045
3046- ./build/l3fwd-acl [EAL options] -- -p PORTMASK [-P] --config(port,queue,lcore)[,(port,queue,lcore)] --rule_ipv4 FILENAME rule_ipv6 FILENAME [--scalar] [--enable-jumbo [--max-pkt-len PKTLEN]] [--no-numa]
3047+ ./build/l3fwd-acl [EAL options] -- -p PORTMASK [-P] --config(port,queue,lcore)[,(port,queue,lcore)] --rule_ipv4 FILENAME --rule_ipv6 FILENAME [--scalar] [--enable-jumbo [--max-pkt-len PKTLEN]] [--no-numa]
3048
3049
3050 where,
3051@@ -268,7 +268,7 @@ To enable L3 forwarding between two ports, assuming that both ports are in the s
3052
3053 .. code-block:: console
3054
3055- ./build/l3fwd-acl -l 1,2 -n 4 -- -p 0x3 --config="(0,0,1),(1,0,2)" --rule_ipv4="./rule_ipv4.db" -- rule_ipv6="./rule_ipv6.db" --scalar
3056+ ./build/l3fwd-acl -l 1,2 -n 4 -- -p 0x3 --config="(0,0,1),(1,0,2)" --rule_ipv4="./rule_ipv4.db" --rule_ipv6="./rule_ipv6.db" --scalar
3057
3058 In this command:
3059
3060@@ -290,9 +290,9 @@ In this command:
3061 | | | | |
3062 +----------+------------+-----------+-------------------------------------+
3063
3064-* The --rule_ipv4 option specifies the reading of IPv4 rules sets from the ./ rule_ipv4.db file.
3065+* The --rule_ipv4 option specifies the reading of IPv4 rules sets from the rule_ipv4.db file.
3066
3067-* The --rule_ipv6 option specifies the reading of IPv6 rules sets from the ./ rule_ipv6.db file.
3068+* The --rule_ipv6 option specifies the reading of IPv6 rules sets from the rule_ipv6.db file.
3069
3070 * The --scalar option specifies the performing of rule lookup with a scalar function.
3071
3072diff --git a/doc/guides/sample_app_ug/l3_forward_power_man.rst b/doc/guides/sample_app_ug/l3_forward_power_man.rst
3073index 6ec24f4..dbdaa74 100644
3074--- a/doc/guides/sample_app_ug/l3_forward_power_man.rst
3075+++ b/doc/guides/sample_app_ug/l3_forward_power_man.rst
3076@@ -49,7 +49,7 @@ to set the CPUFreq governor and set the frequency of specific cores.
3077
3078 This application includes a P-state power management algorithm to generate a frequency hint to be sent to CPUFreq.
3079 The algorithm uses the number of received and available Rx packets on recent polls to make a heuristic decision to scale frequency up/down.
3080-Specifically, some thresholds are checked to see whether a specific core running an DPDK polling thread needs to increase frequency
3081+Specifically, some thresholds are checked to see whether a specific core running a DPDK polling thread needs to increase frequency
3082 a step up based on the near to full trend of polled Rx queues.
3083 Also, it decreases frequency a step if packet processed per loop is far less than the expected threshold
3084 or the thread's sleeping time exceeds a threshold.
3085diff --git a/doc/guides/testpmd_app_ug/testpmd_funcs.rst b/doc/guides/testpmd_app_ug/testpmd_funcs.rst
3086index 78bdf60..4dd29ee 100644
3087--- a/doc/guides/testpmd_app_ug/testpmd_funcs.rst
3088+++ b/doc/guides/testpmd_app_ug/testpmd_funcs.rst
3089@@ -2437,16 +2437,16 @@ For example, to set the MAC address of a Link Bonding device (port 10) to 00:00:
3090
3091 testpmd> set bonding mac 10 00:00:00:00:00:01
3092
3093-set bonding xmit_balance_policy
3094+set bonding balance_xmit_policy
3095 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
3096
3097 Set the transmission policy for a Link Bonding device when it is in Balance XOR mode::
3098
3099- testpmd> set bonding xmit_balance_policy (port_id) (l2|l23|l34)
3100+ testpmd> set bonding balance_xmit_policy (port_id) (l2|l23|l34)
3101
3102 For example, set a Link Bonding device (port 10) to use a balance policy of layer 3+4 (IP addresses & UDP ports)::
3103
3104- testpmd> set bonding xmit_balance_policy 10 l34
3105+ testpmd> set bonding balance_xmit_policy 10 l34
3106
3107
3108 set bonding mon_period
3109diff --git a/drivers/baseband/fpga_lte_fec/fpga_lte_fec.c b/drivers/baseband/fpga_lte_fec/fpga_lte_fec.c
3110index 8bd10b4..0a75a0f 100644
3111--- a/drivers/baseband/fpga_lte_fec/fpga_lte_fec.c
3112+++ b/drivers/baseband/fpga_lte_fec/fpga_lte_fec.c
3113@@ -2325,7 +2325,7 @@ fpga_lte_fec_init(struct rte_bbdev *dev, struct rte_pci_driver *drv)
3114
3115 rte_bbdev_log_debug(
3116 "Init device %s [%s] @ virtaddr %p phyaddr %#"PRIx64,
3117- dev->device->driver->name, dev->data->name,
3118+ drv->driver.name, dev->data->name,
3119 (void *)pci_dev->mem_resource[0].addr,
3120 pci_dev->mem_resource[0].phys_addr);
3121 }
3122@@ -2380,7 +2380,7 @@ fpga_lte_fec_probe(struct rte_pci_driver *pci_drv,
3123 ((uint16_t)(version_id >> 16)), ((uint16_t)version_id));
3124
3125 #ifdef RTE_LIBRTE_BBDEV_DEBUG
3126- if (!strcmp(bbdev->device->driver->name,
3127+ if (!strcmp(pci_drv->driver.name,
3128 RTE_STR(FPGA_LTE_FEC_PF_DRIVER_NAME)))
3129 print_static_reg_debug_info(d->mmio_base);
3130 #endif
3131diff --git a/drivers/baseband/turbo_sw/bbdev_turbo_software.c b/drivers/baseband/turbo_sw/bbdev_turbo_software.c
3132index e6d9501..18c4649 100644
3133--- a/drivers/baseband/turbo_sw/bbdev_turbo_software.c
3134+++ b/drivers/baseband/turbo_sw/bbdev_turbo_software.c
3135@@ -10,6 +10,7 @@
3136 #include <rte_ring.h>
3137 #include <rte_kvargs.h>
3138 #include <rte_cycles.h>
3139+#include <rte_errno.h>
3140
3141 #include <rte_bbdev.h>
3142 #include <rte_bbdev_pmd.h>
3143@@ -303,7 +304,8 @@ q_setup(struct rte_bbdev *dev, uint16_t q_id,
3144 rte_bbdev_log(ERR,
3145 "Creating queue name for device %u queue %u failed",
3146 dev->data->dev_id, q_id);
3147- return -ENAMETOOLONG;
3148+ ret = -ENAMETOOLONG;
3149+ goto free_q;
3150 }
3151 q->enc_out = rte_zmalloc_socket(name,
3152 ((RTE_BBDEV_TURBO_MAX_TB_SIZE >> 3) + 3) *
3153@@ -312,6 +314,7 @@ q_setup(struct rte_bbdev *dev, uint16_t q_id,
3154 if (q->enc_out == NULL) {
3155 rte_bbdev_log(ERR,
3156 "Failed to allocate queue memory for %s", name);
3157+ ret = -ENOMEM;
3158 goto free_q;
3159 }
3160
3161@@ -323,7 +326,8 @@ q_setup(struct rte_bbdev *dev, uint16_t q_id,
3162 rte_bbdev_log(ERR,
3163 "Creating queue name for device %u queue %u failed",
3164 dev->data->dev_id, q_id);
3165- return -ENAMETOOLONG;
3166+ ret = -ENAMETOOLONG;
3167+ goto free_q;
3168 }
3169 q->enc_in = rte_zmalloc_socket(name,
3170 (RTE_BBDEV_LDPC_MAX_CB_SIZE >> 3) * sizeof(*q->enc_in),
3171@@ -331,6 +335,7 @@ q_setup(struct rte_bbdev *dev, uint16_t q_id,
3172 if (q->enc_in == NULL) {
3173 rte_bbdev_log(ERR,
3174 "Failed to allocate queue memory for %s", name);
3175+ ret = -ENOMEM;
3176 goto free_q;
3177 }
3178
3179@@ -341,7 +346,8 @@ q_setup(struct rte_bbdev *dev, uint16_t q_id,
3180 rte_bbdev_log(ERR,
3181 "Creating queue name for device %u queue %u failed",
3182 dev->data->dev_id, q_id);
3183- return -ENAMETOOLONG;
3184+ ret = -ENAMETOOLONG;
3185+ goto free_q;
3186 }
3187 q->ag = rte_zmalloc_socket(name,
3188 RTE_BBDEV_TURBO_MAX_CB_SIZE * 10 * sizeof(*q->ag),
3189@@ -349,6 +355,7 @@ q_setup(struct rte_bbdev *dev, uint16_t q_id,
3190 if (q->ag == NULL) {
3191 rte_bbdev_log(ERR,
3192 "Failed to allocate queue memory for %s", name);
3193+ ret = -ENOMEM;
3194 goto free_q;
3195 }
3196
3197@@ -359,7 +366,8 @@ q_setup(struct rte_bbdev *dev, uint16_t q_id,
3198 rte_bbdev_log(ERR,
3199 "Creating queue name for device %u queue %u failed",
3200 dev->data->dev_id, q_id);
3201- return -ENAMETOOLONG;
3202+ ret = -ENAMETOOLONG;
3203+ goto free_q;
3204 }
3205 q->code_block = rte_zmalloc_socket(name,
3206 RTE_BBDEV_TURBO_MAX_CB_SIZE * sizeof(*q->code_block),
3207@@ -367,6 +375,7 @@ q_setup(struct rte_bbdev *dev, uint16_t q_id,
3208 if (q->code_block == NULL) {
3209 rte_bbdev_log(ERR,
3210 "Failed to allocate queue memory for %s", name);
3211+ ret = -ENOMEM;
3212 goto free_q;
3213 }
3214
3215@@ -378,7 +387,8 @@ q_setup(struct rte_bbdev *dev, uint16_t q_id,
3216 rte_bbdev_log(ERR,
3217 "Creating queue name for device %u queue %u failed",
3218 dev->data->dev_id, q_id);
3219- return -ENAMETOOLONG;
3220+ ret = -ENAMETOOLONG;
3221+ goto free_q;
3222 }
3223 q->deint_input = rte_zmalloc_socket(name,
3224 DEINT_INPUT_BUF_SIZE * sizeof(*q->deint_input),
3225@@ -386,6 +396,7 @@ q_setup(struct rte_bbdev *dev, uint16_t q_id,
3226 if (q->deint_input == NULL) {
3227 rte_bbdev_log(ERR,
3228 "Failed to allocate queue memory for %s", name);
3229+ ret = -ENOMEM;
3230 goto free_q;
3231 }
3232
3233@@ -397,7 +408,8 @@ q_setup(struct rte_bbdev *dev, uint16_t q_id,
3234 rte_bbdev_log(ERR,
3235 "Creating queue name for device %u queue %u failed",
3236 dev->data->dev_id, q_id);
3237- return -ENAMETOOLONG;
3238+ ret = -ENAMETOOLONG;
3239+ goto free_q;
3240 }
3241 q->deint_output = rte_zmalloc_socket(NULL,
3242 DEINT_OUTPUT_BUF_SIZE * sizeof(*q->deint_output),
3243@@ -405,6 +417,7 @@ q_setup(struct rte_bbdev *dev, uint16_t q_id,
3244 if (q->deint_output == NULL) {
3245 rte_bbdev_log(ERR,
3246 "Failed to allocate queue memory for %s", name);
3247+ ret = -ENOMEM;
3248 goto free_q;
3249 }
3250
3251@@ -416,7 +429,8 @@ q_setup(struct rte_bbdev *dev, uint16_t q_id,
3252 rte_bbdev_log(ERR,
3253 "Creating queue name for device %u queue %u failed",
3254 dev->data->dev_id, q_id);
3255- return -ENAMETOOLONG;
3256+ ret = -ENAMETOOLONG;
3257+ goto free_q;
3258 }
3259 q->adapter_output = rte_zmalloc_socket(NULL,
3260 ADAPTER_OUTPUT_BUF_SIZE * sizeof(*q->adapter_output),
3261@@ -424,6 +438,7 @@ q_setup(struct rte_bbdev *dev, uint16_t q_id,
3262 if (q->adapter_output == NULL) {
3263 rte_bbdev_log(ERR,
3264 "Failed to allocate queue memory for %s", name);
3265+ ret = -ENOMEM;
3266 goto free_q;
3267 }
3268
3269@@ -434,12 +449,14 @@ q_setup(struct rte_bbdev *dev, uint16_t q_id,
3270 rte_bbdev_log(ERR,
3271 "Creating queue name for device %u queue %u failed",
3272 dev->data->dev_id, q_id);
3273- return -ENAMETOOLONG;
3274+ ret = -ENAMETOOLONG;
3275+ goto free_q;
3276 }
3277 q->processed_pkts = rte_ring_create(name, queue_conf->queue_size,
3278 queue_conf->socket, RING_F_SP_ENQ | RING_F_SC_DEQ);
3279 if (q->processed_pkts == NULL) {
3280 rte_bbdev_log(ERR, "Failed to create ring for %s", name);
3281+ ret = -rte_errno;
3282 goto free_q;
3283 }
3284
3285@@ -459,7 +476,7 @@ free_q:
3286 rte_free(q->deint_output);
3287 rte_free(q->adapter_output);
3288 rte_free(q);
3289- return -EFAULT;
3290+ return ret;
3291 }
3292
3293 static const struct rte_bbdev_ops pmd_ops = {
3294diff --git a/drivers/bus/dpaa/base/qbman/qman_driver.c b/drivers/bus/dpaa/base/qbman/qman_driver.c
3295index 69244ef..e1dee17 100644
3296--- a/drivers/bus/dpaa/base/qbman/qman_driver.c
3297+++ b/drivers/bus/dpaa/base/qbman/qman_driver.c
3298@@ -132,7 +132,7 @@ struct qman_portal *fsl_qman_fq_portal_create(int *fd)
3299 struct qm_portal_config *q_pcfg;
3300 struct dpaa_ioctl_irq_map irq_map;
3301 struct dpaa_ioctl_portal_map q_map = {0};
3302- int q_fd = 0, ret;
3303+ int q_fd, ret;
3304
3305 q_pcfg = kzalloc((sizeof(struct qm_portal_config)), 0);
3306 if (!q_pcfg) {
3307@@ -169,7 +169,7 @@ struct qman_portal *fsl_qman_fq_portal_create(int *fd)
3308 if (!portal) {
3309 pr_err("Qman portal initialisation failed (%d)\n",
3310 q_pcfg->cpu);
3311- goto err;
3312+ goto err_alloc;
3313 }
3314
3315 irq_map.type = dpaa_portal_qman;
3316@@ -178,11 +178,9 @@ struct qman_portal *fsl_qman_fq_portal_create(int *fd)
3317
3318 *fd = q_fd;
3319 return portal;
3320+err_alloc:
3321+ close(q_fd);
3322 err:
3323- if (portal)
3324- qman_free_global_portal(portal);
3325- if (q_fd)
3326- close(q_fd);
3327 process_portal_unmap(&q_map.addr);
3328 kfree(q_pcfg);
3329 return NULL;
3330diff --git a/drivers/bus/fslmc/fslmc_vfio.c b/drivers/bus/fslmc/fslmc_vfio.c
3331index abf2338..bf9e3e4 100644
3332--- a/drivers/bus/fslmc/fslmc_vfio.c
3333+++ b/drivers/bus/fslmc/fslmc_vfio.c
3334@@ -448,11 +448,14 @@ fslmc_vfio_setup_device(const char *sysfs_base, const char *dev_addr,
3335
3336 /* get the actual group fd */
3337 vfio_group_fd = rte_vfio_get_group_fd(iommu_group_no);
3338- if (vfio_group_fd < 0)
3339+ if (vfio_group_fd < 0 && vfio_group_fd != -ENOENT)
3340 return -1;
3341
3342- /* if group_fd == 0, that means the device isn't managed by VFIO */
3343- if (vfio_group_fd == 0) {
3344+ /*
3345+ * if vfio_group_fd == -ENOENT, that means the device
3346+ * isn't managed by VFIO
3347+ */
3348+ if (vfio_group_fd == -ENOENT) {
3349 RTE_LOG(WARNING, EAL, " %s not managed by VFIO driver, skipping\n",
3350 dev_addr);
3351 return 1;
3352diff --git a/drivers/bus/fslmc/portal/dpaa2_hw_dpio.c b/drivers/bus/fslmc/portal/dpaa2_hw_dpio.c
3353index 3ca3ae4..9c09c69 100644
3354--- a/drivers/bus/fslmc/portal/dpaa2_hw_dpio.c
3355+++ b/drivers/bus/fslmc/portal/dpaa2_hw_dpio.c
3356@@ -546,8 +546,13 @@ dpaa2_create_dpio_device(int vdev_fd,
3357
3358 err:
3359 if (dpio_dev->dpio) {
3360- dpio_disable(dpio_dev->dpio, CMD_PRI_LOW, dpio_dev->token);
3361- dpio_close(dpio_dev->dpio, CMD_PRI_LOW, dpio_dev->token);
3362+ if (dpio_dev->token) {
3363+ dpio_disable(dpio_dev->dpio, CMD_PRI_LOW,
3364+ dpio_dev->token);
3365+ dpio_close(dpio_dev->dpio, CMD_PRI_LOW,
3366+ dpio_dev->token);
3367+ }
3368+
3369 rte_free(dpio_dev->dpio);
3370 }
3371
3372diff --git a/drivers/bus/fslmc/qbman/qbman_portal.c b/drivers/bus/fslmc/qbman/qbman_portal.c
3373index d4223bd..54bea97 100644
3374--- a/drivers/bus/fslmc/qbman/qbman_portal.c
3375+++ b/drivers/bus/fslmc/qbman/qbman_portal.c
3376@@ -999,6 +999,8 @@ static int qbman_swp_enqueue_multiple_mem_back(struct qbman_swp *s,
3377 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
3378 memcpy(&p[1], &cl[1], 28);
3379 memcpy(&p[8], &fd[i], sizeof(*fd));
3380+ p[0] = cl[0] | s->eqcr.pi_vb;
3381+
3382 if (flags && (flags[i] & QBMAN_ENQUEUE_FLAG_DCA)) {
3383 struct qbman_eq_desc *d = (struct qbman_eq_desc *)p;
3384
3385@@ -1006,7 +1008,6 @@ static int qbman_swp_enqueue_multiple_mem_back(struct qbman_swp *s,
3386 ((flags[i]) & QBMAN_EQCR_DCA_IDXMASK);
3387 }
3388 eqcr_pi++;
3389- p[0] = cl[0] | s->eqcr.pi_vb;
3390
3391 if (!(eqcr_pi & half_mask))
3392 s->eqcr.pi_vb ^= QB_VALID_BIT;
3393diff --git a/drivers/bus/pci/bsd/pci.c b/drivers/bus/pci/bsd/pci.c
3394index ebbfeb1..081c62a 100644
3395--- a/drivers/bus/pci/bsd/pci.c
3396+++ b/drivers/bus/pci/bsd/pci.c
3397@@ -392,55 +392,6 @@ pci_device_iova_mode(const struct rte_pci_driver *pdrv __rte_unused,
3398 return RTE_IOVA_PA;
3399 }
3400
3401-int
3402-pci_update_device(const struct rte_pci_addr *addr)
3403-{
3404- int fd;
3405- struct pci_conf matches[2];
3406- struct pci_match_conf match = {
3407- .pc_sel = {
3408- .pc_domain = addr->domain,
3409- .pc_bus = addr->bus,
3410- .pc_dev = addr->devid,
3411- .pc_func = addr->function,
3412- },
3413- };
3414- struct pci_conf_io conf_io = {
3415- .pat_buf_len = 0,
3416- .num_patterns = 1,
3417- .patterns = &match,
3418- .match_buf_len = sizeof(matches),
3419- .matches = &matches[0],
3420- };
3421-
3422- fd = open("/dev/pci", O_RDONLY);
3423- if (fd < 0) {
3424- RTE_LOG(ERR, EAL, "%s(): error opening /dev/pci\n", __func__);
3425- goto error;
3426- }
3427-
3428- if (ioctl(fd, PCIOCGETCONF, &conf_io) < 0) {
3429- RTE_LOG(ERR, EAL, "%s(): error with ioctl on /dev/pci: %s\n",
3430- __func__, strerror(errno));
3431- goto error;
3432- }
3433-
3434- if (conf_io.num_matches != 1)
3435- goto error;
3436-
3437- if (pci_scan_one(fd, &matches[0]) < 0)
3438- goto error;
3439-
3440- close(fd);
3441-
3442- return 0;
3443-
3444-error:
3445- if (fd >= 0)
3446- close(fd);
3447- return -1;
3448-}
3449-
3450 /* Read PCI config space. */
3451 int rte_pci_read_config(const struct rte_pci_device *dev,
3452 void *buf, size_t len, off_t offset)
3453diff --git a/drivers/bus/pci/linux/pci.c b/drivers/bus/pci/linux/pci.c
3454index 71b0a30..dba87f1 100644
3455--- a/drivers/bus/pci/linux/pci.c
3456+++ b/drivers/bus/pci/linux/pci.c
3457@@ -394,18 +394,6 @@ pci_scan_one(const char *dirname, const struct rte_pci_addr *addr)
3458 return 0;
3459 }
3460
3461-int
3462-pci_update_device(const struct rte_pci_addr *addr)
3463-{
3464- char filename[PATH_MAX];
3465-
3466- snprintf(filename, sizeof(filename), "%s/" PCI_PRI_FMT,
3467- rte_pci_get_sysfs_path(), addr->domain, addr->bus, addr->devid,
3468- addr->function);
3469-
3470- return pci_scan_one(filename, addr);
3471-}
3472-
3473 /*
3474 * split up a pci address into its constituent parts.
3475 */
3476diff --git a/drivers/bus/pci/linux/pci_vfio.c b/drivers/bus/pci/linux/pci_vfio.c
3477index ba60e7c..a0bb1f5 100644
3478--- a/drivers/bus/pci/linux/pci_vfio.c
3479+++ b/drivers/bus/pci/linux/pci_vfio.c
3480@@ -826,7 +826,8 @@ pci_vfio_map_resource_primary(struct rte_pci_device *dev)
3481 err_vfio_res:
3482 rte_free(vfio_res);
3483 err_vfio_dev_fd:
3484- close(vfio_dev_fd);
3485+ rte_vfio_release_device(rte_pci_get_sysfs_path(),
3486+ pci_addr, vfio_dev_fd);
3487 return -1;
3488 }
3489
3490@@ -894,7 +895,8 @@ pci_vfio_map_resource_secondary(struct rte_pci_device *dev)
3491
3492 return 0;
3493 err_vfio_dev_fd:
3494- close(vfio_dev_fd);
3495+ rte_vfio_release_device(rte_pci_get_sysfs_path(),
3496+ pci_addr, vfio_dev_fd);
3497 return -1;
3498 }
3499
3500@@ -1003,7 +1005,7 @@ pci_vfio_unmap_resource_primary(struct rte_pci_device *dev)
3501 }
3502
3503 TAILQ_REMOVE(vfio_res_list, vfio_res, next);
3504-
3505+ rte_free(vfio_res);
3506 return 0;
3507 }
3508
3509diff --git a/drivers/bus/pci/private.h b/drivers/bus/pci/private.h
3510index af1c7ae..81735e4 100644
3511--- a/drivers/bus/pci/private.h
3512+++ b/drivers/bus/pci/private.h
3513@@ -15,8 +15,6 @@ extern struct rte_pci_bus rte_pci_bus;
3514 struct rte_pci_driver;
3515 struct rte_pci_device;
3516
3517-extern struct rte_pci_bus rte_pci_bus;
3518-
3519 /**
3520 * Scan the content of the PCI bus, and the devices in the devices
3521 * list
3522@@ -58,19 +56,6 @@ void rte_pci_insert_device(struct rte_pci_device *exist_pci_dev,
3523 struct rte_pci_device *new_pci_dev);
3524
3525 /**
3526- * Update a pci device object by asking the kernel for the latest information.
3527- *
3528- * This function is private to EAL.
3529- *
3530- * @param addr
3531- * The PCI Bus-Device-Function address to look for
3532- * @return
3533- * - 0 on success.
3534- * - negative on error.
3535- */
3536-int pci_update_device(const struct rte_pci_addr *addr);
3537-
3538-/**
3539 * Map the PCI resource of a PCI device in virtual memory
3540 *
3541 * This function is private to EAL.
3542diff --git a/drivers/common/qat/qat_device.c b/drivers/common/qat/qat_device.c
3543index 03f5fd1..e3d2b9c 100644
3544--- a/drivers/common/qat/qat_device.c
3545+++ b/drivers/common/qat/qat_device.c
3546@@ -421,3 +421,4 @@ qat_comp_dev_destroy(struct qat_pci_device *qat_pci_dev __rte_unused)
3547
3548 RTE_PMD_REGISTER_PCI(QAT_PCI_NAME, rte_qat_pmd);
3549 RTE_PMD_REGISTER_PCI_TABLE(QAT_PCI_NAME, pci_id_qat_map);
3550+RTE_PMD_REGISTER_KMOD_DEP(QAT_PCI_NAME, "* igb_uio | uio_pci_generic | vfio-pci");
3551diff --git a/drivers/compress/isal/isal_compress_pmd_ops.c b/drivers/compress/isal/isal_compress_pmd_ops.c
3552index 31c4559..7d03749 100644
3553--- a/drivers/compress/isal/isal_compress_pmd_ops.c
3554+++ b/drivers/compress/isal/isal_compress_pmd_ops.c
3555@@ -249,16 +249,27 @@ isal_comp_pmd_qp_setup(struct rte_compressdev *dev, uint16_t qp_id,
3556 qp->stream = rte_zmalloc_socket("Isa-l compression stream ",
3557 sizeof(struct isal_zstream), RTE_CACHE_LINE_SIZE,
3558 socket_id);
3559-
3560+ if (qp->stream == NULL) {
3561+ ISAL_PMD_LOG(ERR, "Failed to allocate compression stream memory");
3562+ goto qp_setup_cleanup;
3563+ }
3564 /* Initialize memory for compression level buffer */
3565 qp->stream->level_buf = rte_zmalloc_socket("Isa-l compression lev_buf",
3566 ISAL_DEF_LVL3_DEFAULT, RTE_CACHE_LINE_SIZE,
3567 socket_id);
3568+ if (qp->stream->level_buf == NULL) {
3569+ ISAL_PMD_LOG(ERR, "Failed to allocate compression level_buf memory");
3570+ goto qp_setup_cleanup;
3571+ }
3572
3573 /* Initialize memory for decompression state structure */
3574 qp->state = rte_zmalloc_socket("Isa-l decompression state",
3575 sizeof(struct inflate_state), RTE_CACHE_LINE_SIZE,
3576 socket_id);
3577+ if (qp->state == NULL) {
3578+ ISAL_PMD_LOG(ERR, "Failed to allocate decompression state memory");
3579+ goto qp_setup_cleanup;
3580+ }
3581
3582 qp->id = qp_id;
3583 dev->data->queue_pairs[qp_id] = qp;
3584@@ -284,8 +295,11 @@ isal_comp_pmd_qp_setup(struct rte_compressdev *dev, uint16_t qp_id,
3585 return 0;
3586
3587 qp_setup_cleanup:
3588- if (qp)
3589- rte_free(qp);
3590+ if (qp->stream)
3591+ rte_free(qp->stream->level_buf);
3592+ rte_free(qp->stream);
3593+ rte_free(qp->state);
3594+ rte_free(qp);
3595
3596 return -1;
3597 }
3598diff --git a/drivers/crypto/aesni_mb/aesni_mb_pmd_private.h b/drivers/crypto/aesni_mb/aesni_mb_pmd_private.h
3599index b3cb2f1..03da3dc 100644
3600--- a/drivers/crypto/aesni_mb/aesni_mb_pmd_private.h
3601+++ b/drivers/crypto/aesni_mb/aesni_mb_pmd_private.h
3602@@ -74,7 +74,7 @@ static const unsigned auth_truncated_digest_byte_lengths[] = {
3603 [AES_CMAC] = 12,
3604 [AES_CCM] = 8,
3605 [NULL_HASH] = 0,
3606- [AES_GMAC] = 16,
3607+ [AES_GMAC] = 12,
3608 [PLAIN_SHA1] = 20,
3609 [PLAIN_SHA_224] = 28,
3610 [PLAIN_SHA_256] = 32,
3611@@ -105,7 +105,7 @@ static const unsigned auth_digest_byte_lengths[] = {
3612 [AES_XCBC] = 16,
3613 [AES_CMAC] = 16,
3614 [AES_CCM] = 16,
3615- [AES_GMAC] = 12,
3616+ [AES_GMAC] = 16,
3617 [NULL_HASH] = 0,
3618 [PLAIN_SHA1] = 20,
3619 [PLAIN_SHA_224] = 28,
3620diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
3621index 40feae3..d2fa066 100644
3622--- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
3623+++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
3624@@ -203,19 +203,11 @@ aesni_mb_set_session_auth_parameters(const MB_MGR *mb_mgr,
3625 sess->cipher.direction = DECRYPT;
3626
3627 sess->auth.algo = AES_GMAC;
3628- /*
3629- * Multi-buffer lib supports 8, 12 and 16 bytes of digest.
3630- * If size requested is different, generate the full digest
3631- * (16 bytes) in a temporary location and then memcpy
3632- * the requested number of bytes.
3633- */
3634- if (sess->auth.req_digest_len != 16 &&
3635- sess->auth.req_digest_len != 12 &&
3636- sess->auth.req_digest_len != 8) {
3637- sess->auth.gen_digest_len = 16;
3638- } else {
3639- sess->auth.gen_digest_len = sess->auth.req_digest_len;
3640+ if (sess->auth.req_digest_len > get_digest_byte_length(AES_GMAC)) {
3641+ AESNI_MB_LOG(ERR, "Invalid digest size\n");
3642+ return -EINVAL;
3643 }
3644+ sess->auth.gen_digest_len = sess->auth.req_digest_len;
3645 sess->iv.length = xform->auth.iv.length;
3646 sess->iv.offset = xform->auth.iv.offset;
3647
3648@@ -537,6 +529,14 @@ aesni_mb_set_session_aead_parameters(const MB_MGR *mb_mgr,
3649 return -EINVAL;
3650 }
3651
3652+ /* Set IV parameters */
3653+ sess->iv.offset = xform->aead.iv.offset;
3654+ sess->iv.length = xform->aead.iv.length;
3655+
3656+ /* Set digest sizes */
3657+ sess->auth.req_digest_len = xform->aead.digest_length;
3658+ sess->auth.gen_digest_len = sess->auth.req_digest_len;
3659+
3660 switch (xform->aead.algo) {
3661 case RTE_CRYPTO_AEAD_AES_CCM:
3662 sess->cipher.mode = CCM;
3663@@ -555,6 +555,13 @@ aesni_mb_set_session_aead_parameters(const MB_MGR *mb_mgr,
3664 return -EINVAL;
3665 }
3666
3667+ /* CCM digests must be between 4 and 16 and an even number */
3668+ if (sess->auth.req_digest_len < AES_CCM_DIGEST_MIN_LEN ||
3669+ sess->auth.req_digest_len > AES_CCM_DIGEST_MAX_LEN ||
3670+ (sess->auth.req_digest_len & 1) == 1) {
3671+ AESNI_MB_LOG(ERR, "Invalid digest size\n");
3672+ return -EINVAL;
3673+ }
3674 break;
3675
3676 case RTE_CRYPTO_AEAD_AES_GCM:
3677@@ -582,6 +589,12 @@ aesni_mb_set_session_aead_parameters(const MB_MGR *mb_mgr,
3678 return -EINVAL;
3679 }
3680
3681+ /* GCM digest size must be between 1 and 16 */
3682+ if (sess->auth.req_digest_len == 0 ||
3683+ sess->auth.req_digest_len > 16) {
3684+ AESNI_MB_LOG(ERR, "Invalid digest size\n");
3685+ return -EINVAL;
3686+ }
3687 break;
3688
3689 default:
3690@@ -589,20 +602,6 @@ aesni_mb_set_session_aead_parameters(const MB_MGR *mb_mgr,
3691 return -ENOTSUP;
3692 }
3693
3694- /* Set IV parameters */
3695- sess->iv.offset = xform->aead.iv.offset;
3696- sess->iv.length = xform->aead.iv.length;
3697-
3698- sess->auth.req_digest_len = xform->aead.digest_length;
3699- /* CCM digests must be between 4 and 16 and an even number */
3700- if (sess->auth.req_digest_len < AES_CCM_DIGEST_MIN_LEN ||
3701- sess->auth.req_digest_len > AES_CCM_DIGEST_MAX_LEN ||
3702- (sess->auth.req_digest_len & 1) == 1) {
3703- AESNI_MB_LOG(ERR, "Invalid digest size\n");
3704- return -EINVAL;
3705- }
3706- sess->auth.gen_digest_len = sess->auth.req_digest_len;
3707-
3708 return 0;
3709 }
3710
3711diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c
3712index d8609ad..da61476 100644
3713--- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c
3714+++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c
3715@@ -449,9 +449,9 @@ static const struct rte_cryptodev_capabilities aesni_mb_pmd_capabilities[] = {
3716 .increment = 8
3717 },
3718 .digest_size = {
3719- .min = 8,
3720+ .min = 1,
3721 .max = 16,
3722- .increment = 4
3723+ .increment = 1
3724 },
3725 .aad_size = {
3726 .min = 0,
3727@@ -479,9 +479,9 @@ static const struct rte_cryptodev_capabilities aesni_mb_pmd_capabilities[] = {
3728 .increment = 8
3729 },
3730 .digest_size = {
3731- .min = 8,
3732+ .min = 1,
3733 .max = 16,
3734- .increment = 4
3735+ .increment = 1
3736 },
3737 .iv_size = {
3738 .min = 12,
3739diff --git a/drivers/crypto/armv8/rte_armv8_pmd.c b/drivers/crypto/armv8/rte_armv8_pmd.c
3740index bc897ac..3c5af17 100644
3741--- a/drivers/crypto/armv8/rte_armv8_pmd.c
3742+++ b/drivers/crypto/armv8/rte_armv8_pmd.c
3743@@ -666,8 +666,8 @@ process_op(struct armv8_crypto_qp *qp, struct rte_crypto_op *op,
3744 memset(op->sym->session, 0,
3745 rte_cryptodev_sym_get_existing_header_session_size(
3746 op->sym->session));
3747- rte_mempool_put(qp->sess_mp, sess);
3748- rte_mempool_put(qp->sess_mp_priv, op->sym->session);
3749+ rte_mempool_put(qp->sess_mp_priv, sess);
3750+ rte_mempool_put(qp->sess_mp, op->sym->session);
3751 op->sym->session = NULL;
3752 }
3753
3754diff --git a/drivers/crypto/caam_jr/caam_jr.c b/drivers/crypto/caam_jr/caam_jr.c
3755index 86aa9a1..d6fa8cf 100644
3756--- a/drivers/crypto/caam_jr/caam_jr.c
3757+++ b/drivers/crypto/caam_jr/caam_jr.c
3758@@ -2398,6 +2398,8 @@ init_error:
3759 static int
3760 cryptodev_caam_jr_probe(struct rte_vdev_device *vdev)
3761 {
3762+ int ret;
3763+
3764 struct rte_cryptodev_pmd_init_params init_params = {
3765 "",
3766 sizeof(struct sec_job_ring_t),
3767@@ -2414,6 +2416,12 @@ cryptodev_caam_jr_probe(struct rte_vdev_device *vdev)
3768 input_args = rte_vdev_device_args(vdev);
3769 rte_cryptodev_pmd_parse_input_args(&init_params, input_args);
3770
3771+ ret = of_init();
3772+ if (ret) {
3773+ RTE_LOG(ERR, PMD,
3774+ "of_init failed\n");
3775+ return -EINVAL;
3776+ }
3777 /* if sec device version is not configured */
3778 if (!rta_get_sec_era()) {
3779 const struct device_node *caam_node;
3780@@ -2424,7 +2432,7 @@ cryptodev_caam_jr_probe(struct rte_vdev_device *vdev)
3781 NULL);
3782 if (prop) {
3783 rta_set_sec_era(
3784- INTL_SEC_ERA(cpu_to_caam32(*prop)));
3785+ INTL_SEC_ERA(rte_be_to_cpu_32(*prop)));
3786 break;
3787 }
3788 }
3789diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
3790index 0cc98b3..5eb78ec 100644
3791--- a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
3792+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
3793@@ -2746,12 +2746,6 @@ dpaa2_sec_ipsec_proto_init(struct rte_crypto_cipher_xform *cipher_xform,
3794 return 0;
3795 }
3796
3797-#ifdef RTE_LIBRTE_SECURITY_TEST
3798-static uint8_t aes_cbc_iv[] = {
3799- 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
3800- 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f };
3801-#endif
3802-
3803 static int
3804 dpaa2_sec_set_ipsec_session(struct rte_cryptodev *dev,
3805 struct rte_security_session_conf *conf,
3806@@ -3492,7 +3486,7 @@ void dpaa2_sec_stats_get(struct rte_cryptodev *dev,
3807 return;
3808 }
3809 for (i = 0; i < dev->data->nb_queue_pairs; i++) {
3810- if (qp[i] == NULL) {
3811+ if (qp == NULL || qp[i] == NULL) {
3812 DPAA2_SEC_DEBUG("Uninitialised queue pair");
3813 continue;
3814 }
3815diff --git a/drivers/crypto/dpaa_sec/dpaa_sec.c b/drivers/crypto/dpaa_sec/dpaa_sec.c
3816index 35f1fea..a650313 100644
3817--- a/drivers/crypto/dpaa_sec/dpaa_sec.c
3818+++ b/drivers/crypto/dpaa_sec/dpaa_sec.c
3819@@ -2959,7 +2959,8 @@ dpaa_sec_set_pdcp_session(struct rte_cryptodev *dev,
3820 session->pdcp.hfn = pdcp_xform->hfn;
3821 session->pdcp.hfn_threshold = pdcp_xform->hfn_threshold;
3822 session->pdcp.hfn_ovd = pdcp_xform->hfn_ovrd;
3823- session->pdcp.hfn_ovd_offset = cipher_xform->iv.offset;
3824+ if (cipher_xform)
3825+ session->pdcp.hfn_ovd_offset = cipher_xform->iv.offset;
3826
3827 rte_spinlock_lock(&dev_priv->lock);
3828 for (i = 0; i < MAX_DPAA_CORES; i++) {
3829diff --git a/drivers/crypto/octeontx/otx_cryptodev_ops.c b/drivers/crypto/octeontx/otx_cryptodev_ops.c
3830index ba56b21..4eadb77 100644
3831--- a/drivers/crypto/octeontx/otx_cryptodev_ops.c
3832+++ b/drivers/crypto/octeontx/otx_cryptodev_ops.c
3833@@ -905,6 +905,7 @@ otx_cpt_dev_create(struct rte_cryptodev *c_dev)
3834 RTE_CRYPTODEV_FF_HW_ACCELERATED |
3835 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
3836 RTE_CRYPTODEV_FF_IN_PLACE_SGL |
3837+ RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT |
3838 RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
3839 RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT;
3840 break;
3841diff --git a/drivers/crypto/octeontx2/otx2_cryptodev.c b/drivers/crypto/octeontx2/otx2_cryptodev.c
3842index 417eda6..8523817 100644
3843--- a/drivers/crypto/octeontx2/otx2_cryptodev.c
3844+++ b/drivers/crypto/octeontx2/otx2_cryptodev.c
3845@@ -70,45 +70,53 @@ otx2_cpt_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
3846
3847 otx2_dev = &vf->otx2_dev;
3848
3849- /* Initialize the base otx2_dev object */
3850- ret = otx2_dev_init(pci_dev, otx2_dev);
3851- if (ret) {
3852- CPT_LOG_ERR("Could not initialize otx2_dev");
3853- goto pmd_destroy;
3854+ if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
3855+ /* Initialize the base otx2_dev object */
3856+ ret = otx2_dev_init(pci_dev, otx2_dev);
3857+ if (ret) {
3858+ CPT_LOG_ERR("Could not initialize otx2_dev");
3859+ goto pmd_destroy;
3860+ }
3861+
3862+ /* Get number of queues available on the device */
3863+ ret = otx2_cpt_available_queues_get(dev, &nb_queues);
3864+ if (ret) {
3865+ CPT_LOG_ERR("Could not determine the number of queues available");
3866+ goto otx2_dev_fini;
3867+ }
3868+
3869+ /* Don't exceed the limits set per VF */
3870+ nb_queues = RTE_MIN(nb_queues, OTX2_CPT_MAX_QUEUES_PER_VF);
3871+
3872+ if (nb_queues == 0) {
3873+ CPT_LOG_ERR("No free queues available on the device");
3874+ goto otx2_dev_fini;
3875+ }
3876+
3877+ vf->max_queues = nb_queues;
3878+
3879+ CPT_LOG_INFO("Max queues supported by device: %d",
3880+ vf->max_queues);
3881 }
3882
3883- /* Get number of queues available on the device */
3884- ret = otx2_cpt_available_queues_get(dev, &nb_queues);
3885- if (ret) {
3886- CPT_LOG_ERR("Could not determine the number of queues available");
3887- goto otx2_dev_fini;
3888- }
3889-
3890- /* Don't exceed the limits set per VF */
3891- nb_queues = RTE_MIN(nb_queues, OTX2_CPT_MAX_QUEUES_PER_VF);
3892-
3893- if (nb_queues == 0) {
3894- CPT_LOG_ERR("No free queues available on the device");
3895- goto otx2_dev_fini;
3896- }
3897-
3898- vf->max_queues = nb_queues;
3899-
3900- CPT_LOG_INFO("Max queues supported by device: %d", vf->max_queues);
3901-
3902 dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
3903 RTE_CRYPTODEV_FF_HW_ACCELERATED |
3904 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
3905 RTE_CRYPTODEV_FF_IN_PLACE_SGL |
3906+ RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT |
3907 RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
3908 RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT |
3909 RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO |
3910 RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_QT;
3911
3912+ if (rte_eal_process_type() == RTE_PROC_SECONDARY)
3913+ otx2_cpt_set_enqdeq_fns(dev);
3914+
3915 return 0;
3916
3917 otx2_dev_fini:
3918- otx2_dev_fini(pci_dev, otx2_dev);
3919+ if (rte_eal_process_type() == RTE_PROC_PRIMARY)
3920+ otx2_dev_fini(pci_dev, otx2_dev);
3921 pmd_destroy:
3922 rte_cryptodev_pmd_destroy(dev);
3923 exit:
3924diff --git a/drivers/crypto/octeontx2/otx2_cryptodev.h b/drivers/crypto/octeontx2/otx2_cryptodev.h
3925index c0aa661..17c0bee 100644
3926--- a/drivers/crypto/octeontx2/otx2_cryptodev.h
3927+++ b/drivers/crypto/octeontx2/otx2_cryptodev.h
3928@@ -40,4 +40,6 @@ extern int otx2_cpt_logtype;
3929 */
3930 extern uint8_t otx2_cryptodev_driver_id;
3931
3932+void otx2_cpt_set_enqdeq_fns(struct rte_cryptodev *dev);
3933+
3934 #endif /* _OTX2_CRYPTODEV_H_ */
3935diff --git a/drivers/crypto/octeontx2/otx2_cryptodev_ops.c b/drivers/crypto/octeontx2/otx2_cryptodev_ops.c
3936index 65101b0..292aff1 100644
3937--- a/drivers/crypto/octeontx2/otx2_cryptodev_ops.c
3938+++ b/drivers/crypto/octeontx2/otx2_cryptodev_ops.c
3939@@ -518,8 +518,8 @@ otx2_cpt_enqueue_sym_sessless(struct otx2_cpt_qp *qp, struct rte_crypto_op *op,
3940 int ret;
3941
3942 /* Create temporary session */
3943-
3944- if (rte_mempool_get(qp->sess_mp, (void **)&sess))
3945+ sess = rte_cryptodev_sym_session_create(qp->sess_mp);
3946+ if (sess == NULL)
3947 return -ENOMEM;
3948
3949 ret = sym_session_configure(driver_id, sym_op->xform, sess,
3950@@ -671,6 +671,8 @@ static inline void
3951 otx2_cpt_dequeue_post_process(struct otx2_cpt_qp *qp, struct rte_crypto_op *cop,
3952 uintptr_t *rsp, uint8_t cc)
3953 {
3954+ unsigned int sz;
3955+
3956 if (cop->type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
3957 if (likely(cc == NO_ERR)) {
3958 /* Verify authentication data if required */
3959@@ -689,6 +691,9 @@ otx2_cpt_dequeue_post_process(struct otx2_cpt_qp *qp, struct rte_crypto_op *cop,
3960 if (unlikely(cop->sess_type == RTE_CRYPTO_OP_SESSIONLESS)) {
3961 sym_session_clear(otx2_cryptodev_driver_id,
3962 cop->sym->session);
3963+ sz = rte_cryptodev_sym_get_existing_header_session_size(
3964+ cop->sym->session);
3965+ memset(cop->sym->session, 0, sz);
3966 rte_mempool_put(qp->sess_mp, cop->sym->session);
3967 cop->sym->session = NULL;
3968 }
3969@@ -808,6 +813,15 @@ otx2_cpt_dequeue_burst(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops)
3970 return nb_completed;
3971 }
3972
3973+void
3974+otx2_cpt_set_enqdeq_fns(struct rte_cryptodev *dev)
3975+{
3976+ dev->enqueue_burst = otx2_cpt_enqueue_burst;
3977+ dev->dequeue_burst = otx2_cpt_dequeue_burst;
3978+
3979+ rte_mb();
3980+}
3981+
3982 /* PMD ops */
3983
3984 static int
3985@@ -857,10 +871,8 @@ otx2_cpt_dev_config(struct rte_cryptodev *dev,
3986 goto queues_detach;
3987 }
3988
3989- dev->enqueue_burst = otx2_cpt_enqueue_burst;
3990- dev->dequeue_burst = otx2_cpt_dequeue_burst;
3991+ otx2_cpt_set_enqdeq_fns(dev);
3992
3993- rte_mb();
3994 return 0;
3995
3996 queues_detach:
3997diff --git a/drivers/crypto/scheduler/meson.build b/drivers/crypto/scheduler/meson.build
3998index c5ba2d6..cb0f3a8 100644
3999--- a/drivers/crypto/scheduler/meson.build
4000+++ b/drivers/crypto/scheduler/meson.build
4001@@ -13,7 +13,7 @@ sources = files(
4002 'scheduler_roundrobin.c',
4003 )
4004
4005-headers = files(
4006+install_headers(
4007 'rte_cryptodev_scheduler.h',
4008 'rte_cryptodev_scheduler_operations.h',
4009 )
4010diff --git a/drivers/crypto/scheduler/scheduler_pmd_private.h b/drivers/crypto/scheduler/scheduler_pmd_private.h
4011index 3ed480c..d687017 100644
4012--- a/drivers/crypto/scheduler/scheduler_pmd_private.h
4013+++ b/drivers/crypto/scheduler/scheduler_pmd_private.h
4014@@ -59,7 +59,6 @@ struct scheduler_qp_ctx {
4015 uint32_t max_nb_objs;
4016
4017 struct rte_ring *order_ring;
4018- uint32_t seqn;
4019 } __rte_cache_aligned;
4020
4021
4022diff --git a/drivers/event/dpaa2/dpaa2_eventdev.c b/drivers/event/dpaa2/dpaa2_eventdev.c
4023index ede7b01..5ae76d4 100644
4024--- a/drivers/event/dpaa2/dpaa2_eventdev.c
4025+++ b/drivers/event/dpaa2/dpaa2_eventdev.c
4026@@ -567,14 +567,14 @@ dpaa2_eventdev_port_release(void *port)
4027
4028 EVENTDEV_INIT_FUNC_TRACE();
4029
4030+ if (portal == NULL)
4031+ return;
4032+
4033 /* TODO: Cleanup is required when ports are in linked state. */
4034 if (portal->is_port_linked)
4035 DPAA2_EVENTDEV_WARN("Event port must be unlinked before release");
4036
4037- if (portal)
4038- rte_free(portal);
4039-
4040- portal = NULL;
4041+ rte_free(portal);
4042 }
4043
4044 static int
4045diff --git a/drivers/event/dpaa2/dpaa2_eventdev_selftest.c b/drivers/event/dpaa2/dpaa2_eventdev_selftest.c
4046index ba4f4bd..1f35807 100644
4047--- a/drivers/event/dpaa2/dpaa2_eventdev_selftest.c
4048+++ b/drivers/event/dpaa2/dpaa2_eventdev_selftest.c
4049@@ -47,17 +47,6 @@ struct event_attr {
4050 uint8_t seq;
4051 };
4052
4053-static uint32_t seqn_list_index;
4054-static int seqn_list[NUM_PACKETS];
4055-
4056-static void
4057-seqn_list_init(void)
4058-{
4059- RTE_BUILD_BUG_ON(NUM_PACKETS < MAX_EVENTS);
4060- memset(seqn_list, 0, sizeof(seqn_list));
4061- seqn_list_index = 0;
4062-}
4063-
4064 struct test_core_param {
4065 rte_atomic32_t *total_events;
4066 uint64_t dequeue_tmo_ticks;
4067@@ -516,7 +505,7 @@ launch_workers_and_wait(int (*master_worker)(void *),
4068 return 0;
4069
4070 rte_atomic32_set(&atomic_total_events, total_events);
4071- seqn_list_init();
4072+ RTE_BUILD_BUG_ON(NUM_PACKETS < MAX_EVENTS);
4073
4074 param = malloc(sizeof(struct test_core_param) * nb_workers);
4075 if (!param)
4076diff --git a/drivers/event/octeontx2/otx2_evdev.c b/drivers/event/octeontx2/otx2_evdev.c
4077index f776681..5bdf23c 100644
4078--- a/drivers/event/octeontx2/otx2_evdev.c
4079+++ b/drivers/event/octeontx2/otx2_evdev.c
4080@@ -648,7 +648,36 @@ sso_lf_cfg(struct otx2_sso_evdev *dev, struct otx2_mbox *mbox,
4081 static void
4082 otx2_sso_port_release(void *port)
4083 {
4084- rte_free(port);
4085+ struct otx2_ssogws_cookie *gws_cookie = ssogws_get_cookie(port);
4086+ struct otx2_sso_evdev *dev;
4087+ int i;
4088+
4089+ if (!gws_cookie->configured)
4090+ goto free;
4091+
4092+ dev = sso_pmd_priv(gws_cookie->event_dev);
4093+ if (dev->dual_ws) {
4094+ struct otx2_ssogws_dual *ws = port;
4095+
4096+ for (i = 0; i < dev->nb_event_queues; i++) {
4097+ sso_port_link_modify((struct otx2_ssogws *)
4098+ &ws->ws_state[0], i, false);
4099+ sso_port_link_modify((struct otx2_ssogws *)
4100+ &ws->ws_state[1], i, false);
4101+ }
4102+ memset(ws, 0, sizeof(*ws));
4103+ } else {
4104+ struct otx2_ssogws *ws = port;
4105+
4106+ for (i = 0; i < dev->nb_event_queues; i++)
4107+ sso_port_link_modify(ws, i, false);
4108+ memset(ws, 0, sizeof(*ws));
4109+ }
4110+
4111+ memset(gws_cookie, 0, sizeof(*gws_cookie));
4112+
4113+free:
4114+ rte_free(gws_cookie);
4115 }
4116
4117 static void
4118@@ -659,33 +688,6 @@ otx2_sso_queue_release(struct rte_eventdev *event_dev, uint8_t queue_id)
4119 }
4120
4121 static void
4122-sso_clr_links(const struct rte_eventdev *event_dev)
4123-{
4124- struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
4125- int i, j;
4126-
4127- for (i = 0; i < dev->nb_event_ports; i++) {
4128- if (dev->dual_ws) {
4129- struct otx2_ssogws_dual *ws;
4130-
4131- ws = event_dev->data->ports[i];
4132- for (j = 0; j < dev->nb_event_queues; j++) {
4133- sso_port_link_modify((struct otx2_ssogws *)
4134- &ws->ws_state[0], j, false);
4135- sso_port_link_modify((struct otx2_ssogws *)
4136- &ws->ws_state[1], j, false);
4137- }
4138- } else {
4139- struct otx2_ssogws *ws;
4140-
4141- ws = event_dev->data->ports[i];
4142- for (j = 0; j < dev->nb_event_queues; j++)
4143- sso_port_link_modify(ws, j, false);
4144- }
4145- }
4146-}
4147-
4148-static void
4149 sso_restore_links(const struct rte_eventdev *event_dev)
4150 {
4151 struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
4152@@ -762,6 +764,7 @@ sso_configure_dual_ports(const struct rte_eventdev *event_dev)
4153 }
4154
4155 for (i = 0; i < dev->nb_event_ports; i++) {
4156+ struct otx2_ssogws_cookie *gws_cookie;
4157 struct otx2_ssogws_dual *ws;
4158 uintptr_t base;
4159
4160@@ -770,14 +773,20 @@ sso_configure_dual_ports(const struct rte_eventdev *event_dev)
4161 } else {
4162 /* Allocate event port memory */
4163 ws = rte_zmalloc_socket("otx2_sso_ws",
4164- sizeof(struct otx2_ssogws_dual),
4165+ sizeof(struct otx2_ssogws_dual) +
4166+ RTE_CACHE_LINE_SIZE,
4167 RTE_CACHE_LINE_SIZE,
4168 event_dev->data->socket_id);
4169- }
4170- if (ws == NULL) {
4171- otx2_err("Failed to alloc memory for port=%d", i);
4172- rc = -ENOMEM;
4173- break;
4174+ if (ws == NULL) {
4175+ otx2_err("Failed to alloc memory for port=%d",
4176+ i);
4177+ rc = -ENOMEM;
4178+ break;
4179+ }
4180+
4181+ /* First cache line is reserved for cookie */
4182+ ws = (struct otx2_ssogws_dual *)
4183+ ((uint8_t *)ws + RTE_CACHE_LINE_SIZE);
4184 }
4185
4186 ws->port = i;
4187@@ -789,6 +798,10 @@ sso_configure_dual_ports(const struct rte_eventdev *event_dev)
4188 sso_set_port_ops((struct otx2_ssogws *)&ws->ws_state[1], base);
4189 vws++;
4190
4191+ gws_cookie = ssogws_get_cookie(ws);
4192+ gws_cookie->event_dev = event_dev;
4193+ gws_cookie->configured = 1;
4194+
4195 event_dev->data->ports[i] = ws;
4196 }
4197
4198@@ -825,19 +838,21 @@ sso_configure_ports(const struct rte_eventdev *event_dev)
4199 }
4200
4201 for (i = 0; i < nb_lf; i++) {
4202+ struct otx2_ssogws_cookie *gws_cookie;
4203 struct otx2_ssogws *ws;
4204 uintptr_t base;
4205
4206 /* Free memory prior to re-allocation if needed */
4207 if (event_dev->data->ports[i] != NULL) {
4208 ws = event_dev->data->ports[i];
4209- rte_free(ws);
4210+ rte_free(ssogws_get_cookie(ws));
4211 ws = NULL;
4212 }
4213
4214 /* Allocate event port memory */
4215 ws = rte_zmalloc_socket("otx2_sso_ws",
4216- sizeof(struct otx2_ssogws),
4217+ sizeof(struct otx2_ssogws) +
4218+ RTE_CACHE_LINE_SIZE,
4219 RTE_CACHE_LINE_SIZE,
4220 event_dev->data->socket_id);
4221 if (ws == NULL) {
4222@@ -846,10 +861,18 @@ sso_configure_ports(const struct rte_eventdev *event_dev)
4223 break;
4224 }
4225
4226+ /* First cache line is reserved for cookie */
4227+ ws = (struct otx2_ssogws *)
4228+ ((uint8_t *)ws + RTE_CACHE_LINE_SIZE);
4229+
4230 ws->port = i;
4231 base = dev->bar2 + (RVU_BLOCK_ADDR_SSOW << 20 | i << 12);
4232 sso_set_port_ops(ws, base);
4233
4234+ gws_cookie = ssogws_get_cookie(ws);
4235+ gws_cookie->event_dev = event_dev;
4236+ gws_cookie->configured = 1;
4237+
4238 event_dev->data->ports[i] = ws;
4239 }
4240
4241@@ -1058,11 +1081,8 @@ otx2_sso_configure(const struct rte_eventdev *event_dev)
4242 return -EINVAL;
4243 }
4244
4245- if (dev->configured) {
4246+ if (dev->configured)
4247 sso_unregister_irqs(event_dev);
4248- /* Clear any prior port-queue mapping. */
4249- sso_clr_links(event_dev);
4250- }
4251
4252 if (dev->nb_event_queues) {
4253 /* Finit any previous queues. */
4254diff --git a/drivers/event/octeontx2/otx2_evdev.h b/drivers/event/octeontx2/otx2_evdev.h
4255index ef523dc..e134ea5 100644
4256--- a/drivers/event/octeontx2/otx2_evdev.h
4257+++ b/drivers/event/octeontx2/otx2_evdev.h
4258@@ -212,6 +212,18 @@ sso_pmd_priv(const struct rte_eventdev *event_dev)
4259 return event_dev->data->dev_private;
4260 }
4261
4262+struct otx2_ssogws_cookie {
4263+ const struct rte_eventdev *event_dev;
4264+ bool configured;
4265+};
4266+
4267+static inline struct otx2_ssogws_cookie *
4268+ssogws_get_cookie(void *ws)
4269+{
4270+ return (struct otx2_ssogws_cookie *)
4271+ ((uint8_t *)ws - RTE_CACHE_LINE_SIZE);
4272+}
4273+
4274 static const union mbuf_initializer mbuf_init = {
4275 .fields = {
4276 .data_off = RTE_PKTMBUF_HEADROOM,
4277diff --git a/drivers/mempool/octeontx/octeontx_fpavf.c b/drivers/mempool/octeontx/octeontx_fpavf.c
4278index 63f8fb3..e37b844 100644
4279--- a/drivers/mempool/octeontx/octeontx_fpavf.c
4280+++ b/drivers/mempool/octeontx/octeontx_fpavf.c
4281@@ -267,7 +267,7 @@ octeontx_fpapf_pool_setup(unsigned int gpool, unsigned int buf_size,
4282 POOL_LTYPE(0x2) | POOL_STYPE(0) | POOL_SET_NAT_ALIGN |
4283 POOL_ENA;
4284
4285- cfg.aid = FPA_AURA_IDX(gpool);
4286+ cfg.aid = 0;
4287 cfg.pool_cfg = reg;
4288 cfg.pool_stack_base = phys_addr;
4289 cfg.pool_stack_end = phys_addr + memsz;
4290@@ -353,7 +353,7 @@ octeontx_fpapf_aura_attach(unsigned int gpool_index)
4291 hdr.vfid = gpool_index;
4292 hdr.res_code = 0;
4293 memset(&cfg, 0x0, sizeof(struct octeontx_mbox_fpa_cfg));
4294- cfg.aid = FPA_AURA_IDX(gpool_index);
4295+ cfg.aid = 0;
4296
4297 ret = octeontx_mbox_send(&hdr, &cfg,
4298 sizeof(struct octeontx_mbox_fpa_cfg),
4299@@ -382,7 +382,7 @@ octeontx_fpapf_aura_detach(unsigned int gpool_index)
4300 goto err;
4301 }
4302
4303- cfg.aid = FPA_AURA_IDX(gpool_index);
4304+ cfg.aid = 0;
4305 hdr.coproc = FPA_COPROC;
4306 hdr.msg = FPA_DETACHAURA;
4307 hdr.vfid = gpool_index;
4308diff --git a/drivers/meson.build b/drivers/meson.build
4309index 4246cc3..6960796 100644
4310--- a/drivers/meson.build
4311+++ b/drivers/meson.build
4312@@ -160,8 +160,10 @@ foreach class:dpdk_driver_classes
4313 lk_deps = [version_map, def_file]
4314 if is_windows
4315 if is_ms_linker
4316- lk_args = ['-Wl,/def:' + def_file.full_path(),
4317- '-Wl,/implib:drivers\\' + implib]
4318+ lk_args = ['-Wl,/def:' + def_file.full_path()]
4319+ if meson.version().version_compare('<0.54.0')
4320+ lk_args += ['-Wl,/implib:drivers\\' + implib]
4321+ endif
4322 else
4323 lk_args = []
4324 endif
4325@@ -196,7 +198,7 @@ foreach class:dpdk_driver_classes
4326 shared_dep = declare_dependency(link_with: shared_lib,
4327 include_directories: includes,
4328 dependencies: shared_deps)
4329- static_dep = declare_dependency(link_with: static_lib,
4330+ static_dep = declare_dependency(
4331 include_directories: includes,
4332 dependencies: static_deps)
4333
4334diff --git a/drivers/net/af_xdp/rte_eth_af_xdp.c b/drivers/net/af_xdp/rte_eth_af_xdp.c
4335index f5fca63..e008921 100644
4336--- a/drivers/net/af_xdp/rte_eth_af_xdp.c
4337+++ b/drivers/net/af_xdp/rte_eth_af_xdp.c
4338@@ -237,7 +237,7 @@ af_xdp_rx_zc(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
4339 if (rte_pktmbuf_alloc_bulk(umem->mb_pool, fq_bufs, nb_pkts)) {
4340 AF_XDP_LOG(DEBUG,
4341 "Failed to get enough buffers for fq.\n");
4342- return -1;
4343+ return 0;
4344 }
4345
4346 rcvd = xsk_ring_cons__peek(rx, nb_pkts, &idx_rx);
4347@@ -305,6 +305,10 @@ af_xdp_rx_cp(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
4348 uint32_t free_thresh = fq->size >> 1;
4349 struct rte_mbuf *mbufs[ETH_AF_XDP_RX_BATCH_SIZE];
4350
4351+ if (xsk_prod_nb_free(fq, free_thresh) >= free_thresh)
4352+ (void)reserve_fill_queue(umem, ETH_AF_XDP_RX_BATCH_SIZE, NULL);
4353+
4354+
4355 if (unlikely(rte_pktmbuf_alloc_bulk(rxq->mb_pool, mbufs, nb_pkts) != 0))
4356 return 0;
4357
4358@@ -318,9 +322,6 @@ af_xdp_rx_cp(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
4359 goto out;
4360 }
4361
4362- if (xsk_prod_nb_free(fq, free_thresh) >= free_thresh)
4363- (void)reserve_fill_queue(umem, ETH_AF_XDP_RX_BATCH_SIZE, NULL);
4364-
4365 for (i = 0; i < rcvd; i++) {
4366 const struct xdp_desc *desc;
4367 uint64_t addr;
4368@@ -743,12 +744,17 @@ eth_link_update(struct rte_eth_dev *dev __rte_unused,
4369 }
4370
4371 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
4372-static inline uint64_t get_base_addr(struct rte_mempool *mp)
4373+static inline uintptr_t get_base_addr(struct rte_mempool *mp, uint64_t *align)
4374 {
4375 struct rte_mempool_memhdr *memhdr;
4376+ uintptr_t memhdr_addr, aligned_addr;
4377
4378 memhdr = STAILQ_FIRST(&mp->mem_list);
4379- return (uint64_t)memhdr->addr & ~(getpagesize() - 1);
4380+ memhdr_addr = (uintptr_t)memhdr->addr;
4381+ aligned_addr = memhdr_addr & ~(getpagesize() - 1);
4382+ *align = memhdr_addr - aligned_addr;
4383+
4384+ return aligned_addr;
4385 }
4386
4387 static struct
4388@@ -763,6 +769,7 @@ xsk_umem_info *xdp_umem_configure(struct pmd_internals *internals __rte_unused,
4389 .flags = XDP_UMEM_UNALIGNED_CHUNK_FLAG};
4390 void *base_addr = NULL;
4391 struct rte_mempool *mb_pool = rxq->mb_pool;
4392+ uint64_t umem_size, align = 0;
4393
4394 usr_config.frame_size = rte_mempool_calc_obj_size(mb_pool->elt_size,
4395 mb_pool->flags,
4396@@ -779,12 +786,11 @@ xsk_umem_info *xdp_umem_configure(struct pmd_internals *internals __rte_unused,
4397 }
4398
4399 umem->mb_pool = mb_pool;
4400- base_addr = (void *)get_base_addr(mb_pool);
4401+ base_addr = (void *)get_base_addr(mb_pool, &align);
4402+ umem_size = mb_pool->populated_size * usr_config.frame_size + align;
4403
4404- ret = xsk_umem__create(&umem->umem, base_addr,
4405- mb_pool->populated_size * usr_config.frame_size,
4406- &umem->fq, &umem->cq,
4407- &usr_config);
4408+ ret = xsk_umem__create(&umem->umem, base_addr, umem_size,
4409+ &umem->fq, &umem->cq, &usr_config);
4410
4411 if (ret) {
4412 AF_XDP_LOG(ERR, "Failed to create umem");
4413@@ -1110,7 +1116,7 @@ xdp_get_channels_info(const char *if_name, int *max_queues,
4414
4415 channels.cmd = ETHTOOL_GCHANNELS;
4416 ifr.ifr_data = (void *)&channels;
4417- strncpy(ifr.ifr_name, if_name, IFNAMSIZ);
4418+ strlcpy(ifr.ifr_name, if_name, IFNAMSIZ);
4419 ret = ioctl(fd, SIOCETHTOOL, &ifr);
4420 if (ret) {
4421 if (errno == EOPNOTSUPP) {
4422diff --git a/drivers/net/bnx2x/bnx2x_ethdev.c b/drivers/net/bnx2x/bnx2x_ethdev.c
4423index 7864b5b..8a1a3fc 100644
4424--- a/drivers/net/bnx2x/bnx2x_ethdev.c
4425+++ b/drivers/net/bnx2x/bnx2x_ethdev.c
4426@@ -20,6 +20,7 @@ int bnx2x_logtype_driver;
4427 * The set of PCI devices this driver supports
4428 */
4429 #define BROADCOM_PCI_VENDOR_ID 0x14E4
4430+#define QLOGIC_PCI_VENDOR_ID 0x1077
4431 static const struct rte_pci_id pci_id_bnx2x_map[] = {
4432 { RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57800) },
4433 { RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57711) },
4434@@ -27,11 +28,13 @@ static const struct rte_pci_id pci_id_bnx2x_map[] = {
4435 { RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57811) },
4436 { RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57840_OBS) },
4437 { RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57840_4_10) },
4438+ { RTE_PCI_DEVICE(QLOGIC_PCI_VENDOR_ID, CHIP_NUM_57840_4_10) },
4439 { RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57840_2_20) },
4440 #ifdef RTE_LIBRTE_BNX2X_MF_SUPPORT
4441 { RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57810_MF) },
4442 { RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57811_MF) },
4443 { RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57840_MF) },
4444+ { RTE_PCI_DEVICE(QLOGIC_PCI_VENDOR_ID, CHIP_NUM_57840_MF) },
4445 #endif
4446 { .vendor_id = 0, }
4447 };
4448@@ -41,6 +44,7 @@ static const struct rte_pci_id pci_id_bnx2xvf_map[] = {
4449 { RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57810_VF) },
4450 { RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57811_VF) },
4451 { RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57840_VF) },
4452+ { RTE_PCI_DEVICE(QLOGIC_PCI_VENDOR_ID, CHIP_NUM_57840_VF) },
4453 { .vendor_id = 0, }
4454 };
4455
4456diff --git a/drivers/net/bnxt/bnxt.h b/drivers/net/bnxt/bnxt.h
4457index 46cf418..3700a27 100644
4458--- a/drivers/net/bnxt/bnxt.h
4459+++ b/drivers/net/bnxt/bnxt.h
4460@@ -233,8 +233,8 @@ struct bnxt_pf_info {
4461 };
4462
4463 /* Max wait time for link up is 10s and link down is 500ms */
4464-#define BNXT_LINK_UP_WAIT_CNT 200
4465-#define BNXT_LINK_DOWN_WAIT_CNT 10
4466+#define BNXT_MAX_LINK_WAIT_CNT 200
4467+#define BNXT_MIN_LINK_WAIT_CNT 10
4468 #define BNXT_LINK_WAIT_INTERVAL 50
4469 struct bnxt_link_info {
4470 uint32_t phy_flags;
4471@@ -593,6 +593,7 @@ struct bnxt {
4472 rte_iova_t hwrm_short_cmd_req_dma_addr;
4473 rte_spinlock_t hwrm_lock;
4474 pthread_mutex_t def_cp_lock;
4475+ pthread_mutex_t health_check_lock;
4476 uint16_t max_req_len;
4477 uint16_t max_resp_len;
4478 uint16_t hwrm_max_ext_req_len;
4479@@ -680,6 +681,13 @@ void bnxt_schedule_fw_health_check(struct bnxt *bp);
4480
4481 bool is_bnxt_supported(struct rte_eth_dev *dev);
4482 bool bnxt_stratus_device(struct bnxt *bp);
4483+int bnxt_link_update_op(struct rte_eth_dev *eth_dev,
4484+ int wait_to_complete);
4485+uint16_t bnxt_dummy_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
4486+ uint16_t nb_pkts);
4487+uint16_t bnxt_dummy_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
4488+ uint16_t nb_pkts);
4489+
4490 extern const struct rte_flow_ops bnxt_flow_ops;
4491 #define bnxt_acquire_flow_lock(bp) \
4492 pthread_mutex_lock(&(bp)->flow_lock)
4493diff --git a/drivers/net/bnxt/bnxt_cpr.c b/drivers/net/bnxt/bnxt_cpr.c
4494index c0e492e..26c7dae 100644
4495--- a/drivers/net/bnxt/bnxt_cpr.c
4496+++ b/drivers/net/bnxt/bnxt_cpr.c
4497@@ -63,7 +63,7 @@ void bnxt_handle_async_event(struct bnxt *bp,
4498 case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE:
4499 case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE:
4500 /* FALLTHROUGH */
4501- bnxt_link_update(bp->eth_dev, 0, ETH_LINK_UP);
4502+ bnxt_link_update_op(bp->eth_dev, 0);
4503 break;
4504 case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD:
4505 PMD_DRV_LOG(INFO, "Async event: PF driver unloaded\n");
4506@@ -76,6 +76,12 @@ void bnxt_handle_async_event(struct bnxt *bp,
4507 PMD_DRV_LOG(INFO, "Port conn async event\n");
4508 break;
4509 case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY:
4510+ /*
4511+ * Avoid any rx/tx packet processing during firmware reset
4512+ * operation.
4513+ */
4514+ bnxt_stop_rxtx(bp);
4515+
4516 /* Ignore reset notify async events when stopping the port */
4517 if (!bp->eth_dev->data->dev_started) {
4518 bp->flags |= BNXT_FLAG_FATAL_ERROR;
4519@@ -282,3 +288,9 @@ bool bnxt_is_recovery_enabled(struct bnxt *bp)
4520
4521 return false;
4522 }
4523+
4524+void bnxt_stop_rxtx(struct bnxt *bp)
4525+{
4526+ bp->eth_dev->rx_pkt_burst = &bnxt_dummy_recv_pkts;
4527+ bp->eth_dev->tx_pkt_burst = &bnxt_dummy_xmit_pkts;
4528+}
4529diff --git a/drivers/net/bnxt/bnxt_cpr.h b/drivers/net/bnxt/bnxt_cpr.h
4530index cccd6cd..ff9697f 100644
4531--- a/drivers/net/bnxt/bnxt_cpr.h
4532+++ b/drivers/net/bnxt/bnxt_cpr.h
4533@@ -126,4 +126,5 @@ void bnxt_wait_for_device_shutdown(struct bnxt *bp);
4534 bool bnxt_is_recovery_enabled(struct bnxt *bp);
4535 bool bnxt_is_master_func(struct bnxt *bp);
4536
4537+void bnxt_stop_rxtx(struct bnxt *bp);
4538 #endif
4539diff --git a/drivers/net/bnxt/bnxt_ethdev.c b/drivers/net/bnxt/bnxt_ethdev.c
4540index fe240b6..fc8532a 100644
4541--- a/drivers/net/bnxt/bnxt_ethdev.c
4542+++ b/drivers/net/bnxt/bnxt_ethdev.c
4543@@ -296,8 +296,12 @@ static int bnxt_setup_one_vnic(struct bnxt *bp, uint16_t vnic_id)
4544
4545 if (BNXT_HAS_RING_GRPS(bp) && rxq->rx_deferred_start)
4546 rxq->vnic->fw_grp_ids[j] = INVALID_HW_RING_ID;
4547+ else
4548+ vnic->rx_queue_cnt++;
4549 }
4550
4551+ PMD_DRV_LOG(DEBUG, "vnic->rx_queue_cnt = %d\n", vnic->rx_queue_cnt);
4552+
4553 rc = bnxt_vnic_rss_configure(bp, vnic);
4554 if (rc)
4555 goto err_out;
4556@@ -541,8 +545,7 @@ static int bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev,
4557 .wthresh = 0,
4558 },
4559 .rx_free_thresh = 32,
4560- /* If no descriptors available, pkts are dropped by default */
4561- .rx_drop_en = 1,
4562+ .rx_drop_en = BNXT_DEFAULT_RX_DROP_EN,
4563 };
4564
4565 dev_info->default_txconf = (struct rte_eth_txconf) {
4566@@ -872,7 +875,7 @@ static int bnxt_dev_start_op(struct rte_eth_dev *eth_dev)
4567 eth_dev->data->scattered_rx = bnxt_scattered_rx(eth_dev);
4568 eth_dev->data->dev_started = 1;
4569
4570- bnxt_link_update(eth_dev, 1, ETH_LINK_UP);
4571+ bnxt_link_update_op(eth_dev, 1);
4572
4573 if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
4574 vlan_mask |= ETH_VLAN_FILTER_MASK;
4575@@ -885,9 +888,8 @@ static int bnxt_dev_start_op(struct rte_eth_dev *eth_dev)
4576 eth_dev->rx_pkt_burst = bnxt_receive_function(eth_dev);
4577 eth_dev->tx_pkt_burst = bnxt_transmit_function(eth_dev);
4578
4579- pthread_mutex_lock(&bp->def_cp_lock);
4580 bnxt_schedule_fw_health_check(bp);
4581- pthread_mutex_unlock(&bp->def_cp_lock);
4582+
4583 return 0;
4584
4585 error:
4586@@ -930,6 +932,7 @@ static void bnxt_dev_stop_op(struct rte_eth_dev *eth_dev)
4587 struct bnxt *bp = eth_dev->data->dev_private;
4588 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
4589 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
4590+ struct rte_eth_link link;
4591
4592 eth_dev->data->dev_started = 0;
4593 /* Prevent crashes when queues are still in use */
4594@@ -943,14 +946,16 @@ static void bnxt_dev_stop_op(struct rte_eth_dev *eth_dev)
4595
4596 bnxt_cancel_fw_health_check(bp);
4597
4598- bnxt_dev_set_link_down_op(eth_dev);
4599-
4600- /* Wait for link to be reset and the async notification to process.
4601- * During reset recovery, there is no need to wait and
4602- * VF/NPAR functions do not have privilege to change PHY config.
4603- */
4604- if (!is_bnxt_in_error(bp) && BNXT_SINGLE_PF(bp))
4605- bnxt_link_update(eth_dev, 1, ETH_LINK_DOWN);
4606+ /* Do not bring link down during reset recovery */
4607+ if (!is_bnxt_in_error(bp)) {
4608+ bnxt_dev_set_link_down_op(eth_dev);
4609+ /* Wait for link to be reset */
4610+ if (BNXT_SINGLE_PF(bp))
4611+ rte_delay_ms(500);
4612+ /* clear the recorded link status */
4613+ memset(&link, 0, sizeof(link));
4614+ rte_eth_linkstatus_set(eth_dev, &link);
4615+ }
4616
4617 /* Clean queue intr-vector mapping */
4618 rte_intr_efd_disable(intr_handle);
4619@@ -1087,7 +1092,7 @@ static int bnxt_mac_addr_add_op(struct rte_eth_dev *eth_dev,
4620 if (rc)
4621 return rc;
4622
4623- if (BNXT_VF(bp) & !BNXT_VF_IS_TRUSTED(bp)) {
4624+ if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp)) {
4625 PMD_DRV_LOG(ERR, "Cannot add MAC address to a VF interface\n");
4626 return -ENOTSUP;
4627 }
4628@@ -1106,14 +1111,13 @@ static int bnxt_mac_addr_add_op(struct rte_eth_dev *eth_dev,
4629 return rc;
4630 }
4631
4632-int bnxt_link_update(struct rte_eth_dev *eth_dev, int wait_to_complete,
4633- bool exp_link_status)
4634+int bnxt_link_update_op(struct rte_eth_dev *eth_dev, int wait_to_complete)
4635 {
4636 int rc = 0;
4637 struct bnxt *bp = eth_dev->data->dev_private;
4638 struct rte_eth_link new;
4639- int cnt = exp_link_status ? BNXT_LINK_UP_WAIT_CNT :
4640- BNXT_LINK_DOWN_WAIT_CNT;
4641+ int cnt = wait_to_complete ? BNXT_MAX_LINK_WAIT_CNT :
4642+ BNXT_MIN_LINK_WAIT_CNT;
4643
4644 rc = is_bnxt_in_error(bp);
4645 if (rc)
4646@@ -1131,12 +1135,18 @@ int bnxt_link_update(struct rte_eth_dev *eth_dev, int wait_to_complete,
4647 goto out;
4648 }
4649
4650- if (!wait_to_complete || new.link_status == exp_link_status)
4651+ if (!wait_to_complete || new.link_status)
4652 break;
4653
4654 rte_delay_ms(BNXT_LINK_WAIT_INTERVAL);
4655 } while (cnt--);
4656
4657+ /* Only single function PF can bring phy down.
4658+ * When port is stopped, report link down for VF/MH/NPAR functions.
4659+ */
4660+ if (!BNXT_SINGLE_PF(bp) && !eth_dev->data->dev_started)
4661+ memset(&new, 0, sizeof(new));
4662+
4663 out:
4664 /* Timed out or success */
4665 if (new.link_status != eth_dev->data->dev_link.link_status ||
4666@@ -1153,12 +1163,6 @@ out:
4667 return rc;
4668 }
4669
4670-static int bnxt_link_update_op(struct rte_eth_dev *eth_dev,
4671- int wait_to_complete)
4672-{
4673- return bnxt_link_update(eth_dev, wait_to_complete, ETH_LINK_UP);
4674-}
4675-
4676 static int bnxt_promiscuous_enable_op(struct rte_eth_dev *eth_dev)
4677 {
4678 struct bnxt *bp = eth_dev->data->dev_private;
4679@@ -2203,8 +2207,9 @@ bnxt_rxq_info_get_op(struct rte_eth_dev *dev, uint16_t queue_id,
4680 qinfo->nb_desc = rxq->nb_rx_desc;
4681
4682 qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
4683- qinfo->conf.rx_drop_en = 0;
4684+ qinfo->conf.rx_drop_en = rxq->drop_en;
4685 qinfo->conf.rx_deferred_start = rxq->rx_deferred_start;
4686+ qinfo->conf.offloads = dev->data->dev_conf.rxmode.offloads;
4687 }
4688
4689 static void
4690@@ -2228,6 +2233,7 @@ bnxt_txq_info_get_op(struct rte_eth_dev *dev, uint16_t queue_id,
4691 qinfo->conf.tx_free_thresh = txq->tx_free_thresh;
4692 qinfo->conf.tx_rs_thresh = 0;
4693 qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
4694+ qinfo->conf.offloads = dev->data->dev_conf.txmode.offloads;
4695 }
4696
4697 int bnxt_mtu_set_op(struct rte_eth_dev *eth_dev, uint16_t new_mtu)
4698@@ -3809,8 +3815,6 @@ static const struct eth_dev_ops bnxt_dev_ops = {
4699 .txq_info_get = bnxt_txq_info_get_op,
4700 .dev_led_on = bnxt_dev_led_on_op,
4701 .dev_led_off = bnxt_dev_led_off_op,
4702- .xstats_get_by_id = bnxt_dev_xstats_get_by_id_op,
4703- .xstats_get_names_by_id = bnxt_dev_xstats_get_names_by_id_op,
4704 .rx_queue_count = bnxt_rx_queue_count_op,
4705 .rx_descriptor_status = bnxt_rx_descriptor_status_op,
4706 .tx_descriptor_status = bnxt_tx_descriptor_status_op,
4707@@ -3909,7 +3913,7 @@ static void bnxt_write_fw_reset_reg(struct bnxt *bp, uint32_t index)
4708
4709 static void bnxt_dev_cleanup(struct bnxt *bp)
4710 {
4711- bnxt_set_hwrm_link_config(bp, false);
4712+ bp->eth_dev->data->dev_link.link_status = 0;
4713 bp->link_info.link_up = 0;
4714 if (bp->eth_dev->data->dev_started)
4715 bnxt_dev_stop_op(bp->eth_dev);
4716@@ -4205,17 +4209,22 @@ void bnxt_schedule_fw_health_check(struct bnxt *bp)
4717 {
4718 uint32_t polling_freq;
4719
4720+ pthread_mutex_lock(&bp->health_check_lock);
4721+
4722 if (!bnxt_is_recovery_enabled(bp))
4723- return;
4724+ goto done;
4725
4726 if (bp->flags & BNXT_FLAG_FW_HEALTH_CHECK_SCHEDULED)
4727- return;
4728+ goto done;
4729
4730 polling_freq = bp->recovery_info->driver_polling_freq;
4731
4732 rte_eal_alarm_set(US_PER_MS * polling_freq,
4733 bnxt_check_fw_health, (void *)bp);
4734 bp->flags |= BNXT_FLAG_FW_HEALTH_CHECK_SCHEDULED;
4735+
4736+done:
4737+ pthread_mutex_unlock(&bp->health_check_lock);
4738 }
4739
4740 static void bnxt_cancel_fw_health_check(struct bnxt *bp)
4741@@ -4747,6 +4756,10 @@ bnxt_init_locks(struct bnxt *bp)
4742 err = pthread_mutex_init(&bp->def_cp_lock, NULL);
4743 if (err)
4744 PMD_DRV_LOG(ERR, "Unable to initialize def_cp_lock\n");
4745+
4746+ err = pthread_mutex_init(&bp->health_check_lock, NULL);
4747+ if (err)
4748+ PMD_DRV_LOG(ERR, "Unable to initialize health_check_lock\n");
4749 return err;
4750 }
4751
4752@@ -4888,6 +4901,7 @@ bnxt_uninit_locks(struct bnxt *bp)
4753 {
4754 pthread_mutex_destroy(&bp->flow_lock);
4755 pthread_mutex_destroy(&bp->def_cp_lock);
4756+ pthread_mutex_destroy(&bp->health_check_lock);
4757 }
4758
4759 static int
4760@@ -4941,8 +4955,7 @@ bnxt_dev_uninit(struct rte_eth_dev *eth_dev)
4761
4762 if (eth_dev->data->dev_started)
4763 bnxt_dev_close_op(eth_dev);
4764- if (bp->pf.vf_info)
4765- rte_free(bp->pf.vf_info);
4766+ bnxt_hwrm_free_vf_info(bp);
4767 eth_dev->dev_ops = NULL;
4768 eth_dev->rx_pkt_burst = NULL;
4769 eth_dev->tx_pkt_burst = NULL;
4770diff --git a/drivers/net/bnxt/bnxt_filter.c b/drivers/net/bnxt/bnxt_filter.c
4771index 622a9bb..f4b18d5 100644
4772--- a/drivers/net/bnxt/bnxt_filter.c
4773+++ b/drivers/net/bnxt/bnxt_filter.c
4774@@ -81,6 +81,15 @@ void bnxt_free_all_filters(struct bnxt *bp)
4775 struct bnxt_filter_info *filter, *temp_filter;
4776 unsigned int i;
4777
4778+ for (i = 0; i < bp->pf.max_vfs; i++) {
4779+ STAILQ_FOREACH(filter, &bp->pf.vf_info[i].filter, next) {
4780+ bnxt_hwrm_clear_l2_filter(bp, filter);
4781+ }
4782+ }
4783+
4784+ if (bp->vnic_info == NULL)
4785+ return;
4786+
4787 for (i = 0; i < bp->nr_vnics; i++) {
4788 vnic = &bp->vnic_info[i];
4789 filter = STAILQ_FIRST(&vnic->filter);
4790@@ -94,12 +103,6 @@ void bnxt_free_all_filters(struct bnxt *bp)
4791 }
4792 STAILQ_INIT(&vnic->filter);
4793 }
4794-
4795- for (i = 0; i < bp->pf.max_vfs; i++) {
4796- STAILQ_FOREACH(filter, &bp->pf.vf_info[i].filter, next) {
4797- bnxt_hwrm_clear_l2_filter(bp, filter);
4798- }
4799- }
4800 }
4801
4802 void bnxt_free_filter_mem(struct bnxt *bp)
4803diff --git a/drivers/net/bnxt/bnxt_hwrm.c b/drivers/net/bnxt/bnxt_hwrm.c
4804index fed1aa3..5e7b7f0 100644
4805--- a/drivers/net/bnxt/bnxt_hwrm.c
4806+++ b/drivers/net/bnxt/bnxt_hwrm.c
4807@@ -52,7 +52,7 @@ static int page_getenum(size_t size)
4808 if (size <= 1 << 30)
4809 return 30;
4810 PMD_DRV_LOG(ERR, "Page size %zu out of range\n", size);
4811- return sizeof(void *) * 8 - 1;
4812+ return sizeof(int) * 8 - 1;
4813 }
4814
4815 static int page_roundup(size_t size)
4816@@ -447,6 +447,9 @@ int bnxt_hwrm_set_l2_filter(struct bnxt *bp,
4817
4818 HWRM_PREP(req, CFA_L2_FILTER_ALLOC, BNXT_USE_CHIMP_MB);
4819
4820+ /* PMD does not support XDP and RoCE */
4821+ filter->flags |= HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_XDP_DISABLE |
4822+ HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_TRAFFIC_L2;
4823 req.flags = rte_cpu_to_le_32(filter->flags);
4824
4825 enables = filter->enables |
4826@@ -586,6 +589,20 @@ static int bnxt_hwrm_ptp_qcfg(struct bnxt *bp)
4827 return 0;
4828 }
4829
4830+void bnxt_hwrm_free_vf_info(struct bnxt *bp)
4831+{
4832+ uint16_t i;
4833+
4834+ for (i = 0; i < bp->pf.max_vfs; i++) {
4835+ rte_free(bp->pf.vf_info[i].vlan_table);
4836+ bp->pf.vf_info[i].vlan_table = NULL;
4837+ rte_free(bp->pf.vf_info[i].vlan_as_table);
4838+ bp->pf.vf_info[i].vlan_as_table = NULL;
4839+ }
4840+ rte_free(bp->pf.vf_info);
4841+ bp->pf.vf_info = NULL;
4842+}
4843+
4844 static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
4845 {
4846 int rc = 0;
4847@@ -612,9 +629,13 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
4848 new_max_vfs = bp->pdev->max_vfs;
4849 if (new_max_vfs != bp->pf.max_vfs) {
4850 if (bp->pf.vf_info)
4851- rte_free(bp->pf.vf_info);
4852- bp->pf.vf_info = rte_malloc("bnxt_vf_info",
4853+ bnxt_hwrm_free_vf_info(bp);
4854+ bp->pf.vf_info = rte_zmalloc("bnxt_vf_info",
4855 sizeof(bp->pf.vf_info[0]) * new_max_vfs, 0);
4856+ if (bp->pf.vf_info == NULL) {
4857+ PMD_DRV_LOG(ERR, "Alloc vf info fail\n");
4858+ return -ENOMEM;
4859+ }
4860 bp->pf.max_vfs = new_max_vfs;
4861 for (i = 0; i < new_max_vfs; i++) {
4862 bp->pf.vf_info[i].fid = bp->pf.first_vf_id + i;
4863@@ -2123,8 +2144,8 @@ int bnxt_hwrm_vnic_tpa_cfg(struct bnxt *bp,
4864 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO |
4865 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_ECN |
4866 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_SAME_GRE_SEQ);
4867- req.max_agg_segs = rte_cpu_to_le_16(BNXT_TPA_MAX_AGGS(bp));
4868- req.max_aggs = rte_cpu_to_le_16(BNXT_TPA_MAX_SEGS(bp));
4869+ req.max_aggs = rte_cpu_to_le_16(BNXT_TPA_MAX_AGGS(bp));
4870+ req.max_agg_segs = rte_cpu_to_le_16(BNXT_TPA_MAX_SEGS(bp));
4871 req.min_agg_len = rte_cpu_to_le_32(512);
4872 }
4873 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
4874@@ -3346,17 +3367,19 @@ int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, uint16_t port,
4875
4876 HWRM_PREP(req, TUNNEL_DST_PORT_ALLOC, BNXT_USE_CHIMP_MB);
4877 req.tunnel_type = tunnel_type;
4878- req.tunnel_dst_port_val = port;
4879+ req.tunnel_dst_port_val = rte_cpu_to_be_16(port);
4880 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4881 HWRM_CHECK_RESULT();
4882
4883 switch (tunnel_type) {
4884 case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN:
4885- bp->vxlan_fw_dst_port_id = resp->tunnel_dst_port_id;
4886+ bp->vxlan_fw_dst_port_id =
4887+ rte_le_to_cpu_16(resp->tunnel_dst_port_id);
4888 bp->vxlan_port = port;
4889 break;
4890 case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_GENEVE:
4891- bp->geneve_fw_dst_port_id = resp->tunnel_dst_port_id;
4892+ bp->geneve_fw_dst_port_id =
4893+ rte_le_to_cpu_16(resp->tunnel_dst_port_id);
4894 bp->geneve_port = port;
4895 break;
4896 default:
4897diff --git a/drivers/net/bnxt/bnxt_hwrm.h b/drivers/net/bnxt/bnxt_hwrm.h
4898index e647993..8ceaeb5 100644
4899--- a/drivers/net/bnxt/bnxt_hwrm.h
4900+++ b/drivers/net/bnxt/bnxt_hwrm.h
4901@@ -89,6 +89,7 @@ int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp);
4902 int bnxt_hwrm_func_buf_unrgtr(struct bnxt *bp);
4903 int bnxt_hwrm_func_driver_register(struct bnxt *bp);
4904 int bnxt_hwrm_func_qcaps(struct bnxt *bp);
4905+void bnxt_hwrm_free_vf_info(struct bnxt *bp);
4906 int bnxt_hwrm_func_reset(struct bnxt *bp);
4907 int bnxt_hwrm_func_driver_unregister(struct bnxt *bp, uint32_t flags);
4908 int bnxt_hwrm_func_qstats(struct bnxt *bp, uint16_t fid,
4909diff --git a/drivers/net/bnxt/bnxt_ring.c b/drivers/net/bnxt/bnxt_ring.c
4910index 1999cd7..bb60f8a 100644
4911--- a/drivers/net/bnxt/bnxt_ring.c
4912+++ b/drivers/net/bnxt/bnxt_ring.c
4913@@ -451,6 +451,7 @@ int bnxt_alloc_rxtx_nq_ring(struct bnxt *bp)
4914 ring->ring_mask = ring->ring_size - 1;
4915 ring->vmem_size = 0;
4916 ring->vmem = NULL;
4917+ ring->fw_ring_id = INVALID_HW_RING_ID;
4918
4919 nqr->cp_ring_struct = ring;
4920 rc = bnxt_alloc_rings(bp, 0, NULL, NULL, nqr, NULL, "l2_nqr");
4921diff --git a/drivers/net/bnxt/bnxt_ring.h b/drivers/net/bnxt/bnxt_ring.h
4922index 9913aed..0a4685d 100644
4923--- a/drivers/net/bnxt/bnxt_ring.h
4924+++ b/drivers/net/bnxt/bnxt_ring.h
4925@@ -27,7 +27,7 @@
4926 #define DEFAULT_RX_RING_SIZE 256
4927 #define DEFAULT_TX_RING_SIZE 256
4928
4929-#define AGG_RING_SIZE_FACTOR 2
4930+#define AGG_RING_SIZE_FACTOR 4
4931 #define AGG_RING_MULTIPLIER 2
4932
4933 /* These assume 4k pages */
4934@@ -82,12 +82,10 @@ void bnxt_free_rxtx_nq_ring(struct bnxt *bp);
4935
4936 static inline void bnxt_db_write(struct bnxt_db_info *db, uint32_t idx)
4937 {
4938- rte_cio_wmb();
4939-
4940 if (db->db_64)
4941- rte_write64_relaxed(db->db_key64 | idx, db->doorbell);
4942+ rte_write64(db->db_key64 | idx, db->doorbell);
4943 else
4944- rte_write32_relaxed(db->db_key32 | idx, db->doorbell);
4945+ rte_write32(db->db_key32 | idx, db->doorbell);
4946 }
4947
4948 /* Ring an NQ doorbell and disable interrupts for the ring. */
4949@@ -96,10 +94,9 @@ static inline void bnxt_db_nq(struct bnxt_cp_ring_info *cpr)
4950 if (unlikely(!cpr->cp_db.db_64))
4951 return;
4952
4953- rte_cio_wmb();
4954- rte_write64_relaxed(cpr->cp_db.db_key64 | DBR_TYPE_NQ |
4955- RING_CMP(cpr->cp_ring_struct, cpr->cp_raw_cons),
4956- cpr->cp_db.doorbell);
4957+ rte_write64(cpr->cp_db.db_key64 | DBR_TYPE_NQ |
4958+ RING_CMP(cpr->cp_ring_struct, cpr->cp_raw_cons),
4959+ cpr->cp_db.doorbell);
4960 }
4961
4962 /* Ring an NQ doorbell and enable interrupts for the ring. */
4963@@ -108,10 +105,9 @@ static inline void bnxt_db_nq_arm(struct bnxt_cp_ring_info *cpr)
4964 if (unlikely(!cpr->cp_db.db_64))
4965 return;
4966
4967- rte_cio_wmb();
4968- rte_write64_relaxed(cpr->cp_db.db_key64 | DBR_TYPE_NQ_ARM |
4969- RING_CMP(cpr->cp_ring_struct, cpr->cp_raw_cons),
4970- cpr->cp_db.doorbell);
4971+ rte_write64(cpr->cp_db.db_key64 | DBR_TYPE_NQ_ARM |
4972+ RING_CMP(cpr->cp_ring_struct, cpr->cp_raw_cons),
4973+ cpr->cp_db.doorbell);
4974 }
4975
4976 static inline void bnxt_db_cq(struct bnxt_cp_ring_info *cpr)
4977@@ -119,11 +115,18 @@ static inline void bnxt_db_cq(struct bnxt_cp_ring_info *cpr)
4978 struct bnxt_db_info *db = &cpr->cp_db;
4979 uint32_t idx = RING_CMP(cpr->cp_ring_struct, cpr->cp_raw_cons);
4980
4981- rte_compiler_barrier();
4982- if (db->db_64)
4983- rte_write64_relaxed(db->db_key64 | idx, db->doorbell);
4984- else
4985- B_CP_DIS_DB(cpr, cpr->cp_raw_cons);
4986+ if (db->db_64) {
4987+ uint64_t key_idx = db->db_key64 | idx;
4988+ void *doorbell = db->doorbell;
4989+
4990+ rte_compiler_barrier();
4991+ rte_write64_relaxed(key_idx, doorbell);
4992+ } else {
4993+ uint32_t cp_raw_cons = cpr->cp_raw_cons;
4994+
4995+ rte_compiler_barrier();
4996+ B_CP_DIS_DB(cpr, cp_raw_cons);
4997+ }
4998 }
4999
5000 #endif
The diff has been truncated for viewing.

Subscribers

People subscribed via source and target branches