Merge ~paelzer/ubuntu/+source/dpdk:lp-1940913-MRE-20.11.3-IMPISH into ubuntu/+source/dpdk:ubuntu/impish-devel

Proposed by Christian Ehrhardt 
Status: Merged
Approved by: Christian Ehrhardt 
Approved revision: de567772108afbdc7b6e4af0bb2d9d71a8d61c2d
Merged at revision: de567772108afbdc7b6e4af0bb2d9d71a8d61c2d
Proposed branch: ~paelzer/ubuntu/+source/dpdk:lp-1940913-MRE-20.11.3-IMPISH
Merge into: ubuntu/+source/dpdk:ubuntu/impish-devel
Diff against target: 10834 lines (+3323/-1413)
218 files modified
MAINTAINERS (+1/-0)
VERSION (+1/-1)
app/test-crypto-perf/cperf_test_common.c (+1/-1)
app/test-pmd/cmdline.c (+9/-9)
app/test-pmd/config.c (+3/-2)
app/test-pmd/csumonly.c (+11/-10)
app/test-pmd/testpmd.c (+72/-85)
app/test-pmd/testpmd.h (+1/-1)
app/test/packet_burst_generator.c (+2/-2)
app/test/test_cmdline_lib.c (+27/-12)
app/test/test_cryptodev.c (+16/-15)
app/test/test_cryptodev_aes_test_vectors.h (+1/-1)
app/test/test_eal_flags.c (+1/-0)
app/test/test_event_crypto_adapter.c (+4/-5)
app/test/test_mbuf.c (+1/-1)
app/test/test_power_cpufreq.c (+18/-11)
buildtools/meson.build (+1/-1)
buildtools/symlink-drivers-solibs.py (+49/-0)
config/meson.build (+4/-0)
debian/changelog (+11/-0)
debian/patches/series (+1/-0)
debian/patches/ubuntu/lp-1940957-net-i40e-support-25G-AOC-ACC-cables.patch (+42/-0)
devtools/check-maintainers.sh (+2/-2)
doc/guides/contributing/coding_style.rst (+1/-1)
doc/guides/cryptodevs/scheduler.rst (+1/-1)
doc/guides/howto/pvp_reference_benchmark.rst (+1/-1)
doc/guides/nics/bnx2x.rst (+1/-1)
doc/guides/nics/dpaa.rst (+3/-3)
doc/guides/nics/ena.rst (+1/-1)
doc/guides/nics/mlx5.rst (+8/-1)
doc/guides/nics/octeontx2.rst (+2/-2)
doc/guides/nics/virtio.rst (+1/-1)
doc/guides/platform/dpaa.rst (+1/-1)
doc/guides/prog_guide/bbdev.rst (+1/-1)
doc/guides/prog_guide/env_abstraction_layer.rst (+1/-1)
doc/guides/prog_guide/eventdev.rst (+1/-1)
doc/guides/prog_guide/multi_proc_support.rst (+1/-1)
doc/guides/prog_guide/qos_framework.rst (+1/-1)
doc/guides/prog_guide/regexdev.rst (+1/-1)
doc/guides/prog_guide/writing_efficient_code.rst (+8/-7)
doc/guides/rawdevs/ioat.rst (+1/-1)
doc/guides/rawdevs/ntb.rst (+1/-1)
doc/guides/regexdevs/features_overview.rst (+1/-1)
doc/guides/rel_notes/deprecation.rst (+10/-14)
doc/guides/rel_notes/release_16_11.rst (+1/-1)
doc/guides/rel_notes/release_19_08.rst (+1/-1)
doc/guides/rel_notes/release_20_11.rst (+340/-0)
doc/guides/rel_notes/release_2_2.rst (+1/-1)
doc/guides/sample_app_ug/fips_validation.rst (+1/-1)
doc/guides/sample_app_ug/hello_world.rst (+1/-1)
doc/guides/sample_app_ug/ipsec_secgw.rst (+1/-1)
doc/guides/sample_app_ug/performance_thread.rst (+1/-1)
doc/guides/testpmd_app_ug/run_app.rst (+1/-1)
doc/guides/testpmd_app_ug/testpmd_funcs.rst (+2/-2)
doc/guides/tools/hugepages.rst (+3/-3)
doc/meson.build (+1/-1)
drivers/bus/dpaa/base/fman/fman.c (+1/-1)
drivers/bus/pci/linux/pci.c (+2/-2)
drivers/bus/pci/pci_common.c (+4/-1)
drivers/bus/vmbus/vmbus_common.c (+4/-1)
drivers/common/iavf/iavf_impl.c (+3/-2)
drivers/common/mlx5/linux/meson.build (+4/-0)
drivers/common/mlx5/linux/mlx5_glue.c (+47/-10)
drivers/common/mlx5/linux/mlx5_glue.h (+15/-1)
drivers/common/mlx5/linux/mlx5_nl.c (+76/-15)
drivers/common/mlx5/mlx5_common_mr.c (+89/-0)
drivers/common/mlx5/mlx5_common_mr.h (+3/-0)
drivers/common/mlx5/version.map (+1/-0)
drivers/common/octeontx2/otx2_dev.h (+3/-0)
drivers/common/sfc_efx/meson.build (+1/-1)
drivers/crypto/aesni_gcm/aesni_gcm_pmd.c (+8/-2)
drivers/crypto/mvsam/rte_mrvl_pmd.c (+10/-2)
drivers/crypto/mvsam/rte_mrvl_pmd_ops.c (+7/-7)
drivers/crypto/octeontx/otx_cryptodev.c (+4/-1)
drivers/crypto/octeontx2/otx2_ipsec_po_ops.h (+2/-12)
drivers/crypto/octeontx2/otx2_security.h (+6/-4)
drivers/crypto/qat/qat_asym_pmd.c (+4/-0)
drivers/crypto/qat/qat_sym_session.c (+3/-0)
drivers/event/octeontx2/otx2_worker.h (+1/-1)
drivers/mempool/octeontx2/otx2_mempool_ops.c (+4/-2)
drivers/net/bnxt/bnxt.h (+45/-0)
drivers/net/bnxt/bnxt_cpr.c (+2/-0)
drivers/net/bnxt/bnxt_cpr.h (+32/-4)
drivers/net/bnxt/bnxt_ethdev.c (+92/-49)
drivers/net/bnxt/bnxt_flow.c (+72/-44)
drivers/net/bnxt/bnxt_hwrm.c (+195/-96)
drivers/net/bnxt/bnxt_hwrm.h (+3/-3)
drivers/net/bnxt/bnxt_irq.c (+8/-4)
drivers/net/bnxt/bnxt_ring.c (+14/-16)
drivers/net/bnxt/bnxt_ring.h (+1/-1)
drivers/net/bnxt/bnxt_rxq.c (+2/-2)
drivers/net/bnxt/bnxt_rxq.h (+1/-0)
drivers/net/bnxt/bnxt_rxr.c (+103/-5)
drivers/net/bnxt/bnxt_rxr.h (+1/-0)
drivers/net/bnxt/bnxt_rxtx_vec_neon.c (+24/-7)
drivers/net/bnxt/bnxt_rxtx_vec_sse.c (+24/-7)
drivers/net/bnxt/bnxt_stats.c (+132/-11)
drivers/net/bnxt/bnxt_txq.c (+2/-2)
drivers/net/bnxt/bnxt_txr.c (+11/-13)
drivers/net/bnxt/bnxt_vnic.c (+2/-1)
drivers/net/bnxt/meson.build (+1/-1)
drivers/net/bonding/rte_eth_bond_pmd.c (+13/-6)
drivers/net/dpaa/dpaa_flow.c (+4/-1)
drivers/net/ena/ena_ethdev.c (+6/-0)
drivers/net/hinic/base/hinic_pmd_niccfg.h (+0/-9)
drivers/net/hinic/base/hinic_pmd_nicio.c (+2/-2)
drivers/net/hinic/hinic_pmd_ethdev.c (+3/-9)
drivers/net/hinic/hinic_pmd_ethdev.h (+17/-0)
drivers/net/hns3/hns3_cmd.c (+3/-1)
drivers/net/hns3/hns3_ethdev.c (+23/-45)
drivers/net/hns3/hns3_ethdev.h (+3/-0)
drivers/net/hns3/hns3_ethdev_vf.c (+15/-34)
drivers/net/hns3/hns3_fdir.h (+1/-6)
drivers/net/hns3/hns3_flow.c (+73/-87)
drivers/net/hns3/hns3_intr.c (+2/-2)
drivers/net/hns3/hns3_rxtx.c (+5/-2)
drivers/net/hns3/meson.build (+19/-1)
drivers/net/i40e/i40e_ethdev.c (+30/-23)
drivers/net/i40e/i40e_ethdev.h (+5/-4)
drivers/net/i40e/i40e_fdir.c (+10/-8)
drivers/net/i40e/i40e_flow.c (+2/-2)
drivers/net/i40e/i40e_rxtx.c (+15/-5)
drivers/net/i40e/i40e_vf_representor.c (+20/-17)
drivers/net/iavf/iavf_ethdev.c (+1/-1)
drivers/net/iavf/iavf_rxtx.c (+6/-1)
drivers/net/iavf/iavf_vchnl.c (+2/-2)
drivers/net/ice/base/ice_osdep.h (+3/-2)
drivers/net/ice/ice_dcf_ethdev.c (+4/-3)
drivers/net/ice/ice_dcf_parent.c (+0/-1)
drivers/net/ice/ice_ethdev.c (+40/-17)
drivers/net/ice/ice_ethdev.h (+6/-3)
drivers/net/ice/ice_fdir_filter.c (+3/-3)
drivers/net/ice/ice_rxtx.c (+31/-37)
drivers/net/ice/ice_rxtx.h (+2/-2)
drivers/net/ice/ice_rxtx_vec_avx2.c (+1/-1)
drivers/net/ice/ice_rxtx_vec_avx512.c (+1/-1)
drivers/net/ice/ice_rxtx_vec_common.h (+1/-1)
drivers/net/ice/ice_rxtx_vec_sse.c (+1/-1)
drivers/net/ice/ice_switch_filter.c (+3/-3)
drivers/net/ixgbe/ixgbe_flow.c (+1/-0)
drivers/net/memif/memif_socket.c (+12/-3)
drivers/net/mlx5/linux/mlx5_os.c (+166/-49)
drivers/net/mlx5/meson.build (+1/-0)
drivers/net/mlx5/mlx5.c (+9/-0)
drivers/net/mlx5/mlx5.h (+3/-0)
drivers/net/mlx5/mlx5_devx.c (+2/-0)
drivers/net/mlx5/mlx5_flow.c (+122/-45)
drivers/net/mlx5/mlx5_flow.h (+1/-0)
drivers/net/mlx5/mlx5_flow_dv.c (+85/-10)
drivers/net/mlx5/mlx5_flow_verbs.c (+6/-4)
drivers/net/mlx5/mlx5_mr.c (+5/-96)
drivers/net/mlx5/mlx5_rxq.c (+3/-9)
drivers/net/mlx5/mlx5_rxtx.c (+15/-16)
drivers/net/mlx5/mlx5_rxtx_vec.c (+2/-1)
drivers/net/mlx5/mlx5_rxtx_vec_altivec.h (+1/-1)
drivers/net/mlx5/mlx5_rxtx_vec_neon.h (+6/-7)
drivers/net/mlx5/mlx5_rxtx_vec_sse.h (+1/-1)
drivers/net/mlx5/mlx5_trigger.c (+5/-7)
drivers/net/mlx5/mlx5_txpp.c (+15/-6)
drivers/net/mlx5/mlx5_txq.c (+1/-1)
drivers/net/mvpp2/mrvl_ethdev.c (+9/-3)
drivers/net/mvpp2/mrvl_ethdev.h (+2/-1)
drivers/net/mvpp2/mrvl_tm.c (+66/-1)
drivers/net/octeontx/base/octeontx_pkivf.h (+0/-1)
drivers/net/octeontx2/otx2_ethdev.c (+41/-31)
drivers/net/octeontx2/otx2_ethdev.h (+12/-1)
drivers/net/octeontx2/otx2_ethdev_devargs.c (+1/-1)
drivers/net/octeontx2/otx2_flow.c (+14/-2)
drivers/net/octeontx2/otx2_flow.h (+0/-1)
drivers/net/octeontx2/otx2_tm.c (+6/-0)
drivers/net/octeontx2/otx2_tx.c (+6/-2)
drivers/net/octeontx2/otx2_tx.h (+7/-5)
drivers/net/pfe/pfe_ethdev.c (+0/-5)
drivers/net/sfc/meson.build (+1/-1)
drivers/net/sfc/sfc.h (+3/-2)
drivers/net/sfc/sfc_ef100_rx.c (+1/-1)
drivers/net/sfc/sfc_ethdev.c (+50/-36)
drivers/net/sfc/sfc_mae.c (+22/-13)
drivers/net/sfc/sfc_port.c (+32/-17)
drivers/net/softnic/conn.c (+1/-0)
drivers/net/softnic/rte_eth_softnic.c (+28/-4)
drivers/net/softnic/rte_eth_softnic_action.c (+1/-0)
drivers/net/softnic/rte_eth_softnic_internals.h (+3/-2)
drivers/net/tap/rte_eth_tap.c (+14/-8)
drivers/net/virtio/meson.build (+1/-1)
drivers/net/virtio/virtio_ethdev.c (+67/-0)
drivers/net/virtio/virtio_ethdev.h (+5/-0)
drivers/net/virtio/virtio_pci.h (+2/-0)
drivers/net/virtio/virtio_rxtx.c (+61/-19)
drivers/net/virtio/virtio_user/virtio_user_dev.c (+6/-0)
drivers/raw/ioat/dpdk_idxd_cfg.py (+2/-2)
drivers/regex/mlx5/mlx5_regex.c (+0/-2)
drivers/regex/mlx5/mlx5_regex_fastpath.c (+2/-2)
drivers/vdpa/mlx5/mlx5_vdpa_event.c (+1/-1)
drivers/vdpa/mlx5/mlx5_vdpa_virtq.c (+7/-0)
examples/l2fwd/main.c (+14/-3)
kernel/linux/kni/kni_net.c (+3/-3)
lib/librte_cryptodev/rte_cryptodev_pmd.c (+2/-1)
lib/librte_distributor/rte_distributor.c (+1/-1)
lib/librte_eal/common/eal_common_proc.c (+14/-8)
lib/librte_eal/include/rte_bitmap.h (+1/-2)
lib/librte_eal/include/rte_malloc.h (+1/-1)
lib/librte_eal/include/rte_vfio.h (+1/-0)
lib/librte_eal/windows/eal.c (+1/-0)
lib/librte_eal/windows/eal_alarm.c (+12/-0)
lib/librte_eal/windows/eal_memory.c (+7/-0)
lib/librte_eal/windows/eal_windows.h (+5/-0)
lib/librte_ethdev/rte_flow.h (+1/-1)
lib/librte_eventdev/rte_event_eth_tx_adapter.c (+0/-1)
lib/librte_flow_classify/rte_flow_classify.c (+2/-2)
lib/librte_graph/graph_stats.c (+5/-2)
lib/librte_kni/rte_kni.c (+3/-2)
lib/librte_rib/rte_rib6.c (+21/-8)
lib/librte_sched/rte_sched.c (+32/-28)
lib/librte_table/rte_swx_table_em.c (+1/-1)
lib/librte_vhost/vhost_crypto.c (+4/-2)
lib/librte_vhost/vhost_user.c (+42/-8)
lib/librte_vhost/virtio_net.c (+89/-28)
Reviewer Review Type Date Requested Status
Bryce Harrington (community) Approve
Canonical Server Pending
Canonical Server packageset reviewers Pending
Review via email: mp+408161@code.launchpad.net
To post a comment you must log in.
Revision history for this message
Christian Ehrhardt  (paelzer) wrote :
Revision history for this message
Christian Ehrhardt  (paelzer) wrote :

FYI this follows Debians model of importing the upstream not as a single commit, but as

To ease comparison I recommend from <email address hidden>:debian/dpdk.git the branch upstream-20.11-stable

With that you can easily see that it matches the upstream release and other than that doesn't change too much.

Revision history for this message
Bryce Harrington (bryce) wrote :

LGTM, see focal MP for detail.

review: Approve
Revision history for this message
Christian Ehrhardt  (paelzer) wrote :

Thank you, uploaded

Uploading to ubuntu (via ftp to upload.ubuntu.com):
  Uploading dpdk_20.11.3-0ubuntu1.dsc: done.
  Uploading dpdk_20.11.3.orig.tar.xz: done.
  Uploading dpdk_20.11.3-0ubuntu1.debian.tar.xz: done.
  Uploading dpdk_20.11.3-0ubuntu1_source.buildinfo: done.
  Uploading dpdk_20.11.3-0ubuntu1_source.changes: done.
Successfully uploaded packages.

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1diff --git a/MAINTAINERS b/MAINTAINERS
2index f45c8c1..dcde2ab 100644
3--- a/MAINTAINERS
4+++ b/MAINTAINERS
5@@ -103,6 +103,7 @@ F: buildtools/gen-pmdinfo-cfile.sh
6 F: buildtools/list-dir-globs.py
7 F: buildtools/pkg-config/
8 F: buildtools/symlink-drivers-solibs.sh
9+F: buildtools/symlink-drivers-solibs.py
10 F: devtools/test-meson-builds.sh
11
12 Public CI
13diff --git a/VERSION b/VERSION
14index 0951ac2..bc6fac4 100644
15--- a/VERSION
16+++ b/VERSION
17@@ -1 +1 @@
18-20.11.2
19+20.11.3
20diff --git a/app/test-crypto-perf/cperf_test_common.c b/app/test-crypto-perf/cperf_test_common.c
21index 058e0ba..12925c7 100644
22--- a/app/test-crypto-perf/cperf_test_common.c
23+++ b/app/test-crypto-perf/cperf_test_common.c
24@@ -194,7 +194,7 @@ cperf_alloc_common_memory(const struct cperf_options *options,
25 (mbuf_size * segments_nb);
26 params.dst_buf_offset = *dst_buf_offset;
27 /* Destination buffer will be one segment only */
28- obj_size += max_size;
29+ obj_size += max_size + sizeof(struct rte_mbuf);
30 }
31
32 *pool = rte_mempool_create_empty(pool_name,
33diff --git a/app/test-pmd/cmdline.c b/app/test-pmd/cmdline.c
34index 3712daa..58ebff5 100644
35--- a/app/test-pmd/cmdline.c
36+++ b/app/test-pmd/cmdline.c
37@@ -1225,7 +1225,7 @@ cmdline_parse_token_string_t cmd_operate_port_all_all =
38 cmdline_parse_inst_t cmd_operate_port = {
39 .f = cmd_operate_port_parsed,
40 .data = NULL,
41- .help_str = "port start|stop|close all: Start/Stop/Close/Reset all ports",
42+ .help_str = "port start|stop|close|reset all: Start/Stop/Close/Reset all ports",
43 .tokens = {
44 (void *)&cmd_operate_port_all_cmd,
45 (void *)&cmd_operate_port_all_port,
46@@ -1272,7 +1272,7 @@ cmdline_parse_token_num_t cmd_operate_specific_port_id =
47 cmdline_parse_inst_t cmd_operate_specific_port = {
48 .f = cmd_operate_specific_port_parsed,
49 .data = NULL,
50- .help_str = "port start|stop|close <port_id>: Start/Stop/Close/Reset port_id",
51+ .help_str = "port start|stop|close|reset <port_id>: Start/Stop/Close/Reset port_id",
52 .tokens = {
53 (void *)&cmd_operate_specific_port_cmd,
54 (void *)&cmd_operate_specific_port_port,
55@@ -1607,13 +1607,13 @@ cmd_config_speed_specific_parsed(void *parsed_result,
56 struct cmd_config_speed_specific *res = parsed_result;
57 uint32_t link_speed;
58
59- if (!all_ports_stopped()) {
60- printf("Please stop all ports first\n");
61+ if (port_id_is_invalid(res->id, ENABLED_WARN))
62 return;
63- }
64
65- if (port_id_is_invalid(res->id, ENABLED_WARN))
66+ if (!port_is_stopped(res->id)) {
67+ printf("Please stop port %d first\n", res->id);
68 return;
69+ }
70
71 if (parse_and_check_speed_duplex(res->value1, res->value2,
72 &link_speed) < 0)
73@@ -16449,17 +16449,17 @@ cmd_set_port_fec_mode_parsed(
74 {
75 struct cmd_set_port_fec_mode *res = parsed_result;
76 uint16_t port_id = res->port_id;
77- uint32_t mode;
78+ uint32_t fec_capa;
79 int ret;
80
81- ret = parse_fec_mode(res->fec_value, &mode);
82+ ret = parse_fec_mode(res->fec_value, &fec_capa);
83 if (ret < 0) {
84 printf("Unknown fec mode: %s for Port %d\n", res->fec_value,
85 port_id);
86 return;
87 }
88
89- ret = rte_eth_fec_set(port_id, mode);
90+ ret = rte_eth_fec_set(port_id, fec_capa);
91 if (ret == -ENOTSUP) {
92 printf("Function not implemented\n");
93 return;
94diff --git a/app/test-pmd/config.c b/app/test-pmd/config.c
95index 937a90f..4847c36 100644
96--- a/app/test-pmd/config.c
97+++ b/app/test-pmd/config.c
98@@ -3760,13 +3760,14 @@ set_tx_pkt_split(const char *name)
99 }
100
101 int
102-parse_fec_mode(const char *name, uint32_t *mode)
103+parse_fec_mode(const char *name, uint32_t *fec_capa)
104 {
105 uint8_t i;
106
107 for (i = 0; i < RTE_DIM(fec_mode_name); i++) {
108 if (strcmp(fec_mode_name[i].name, name) == 0) {
109- *mode = RTE_ETH_FEC_MODE_TO_CAPA(fec_mode_name[i].mode);
110+ *fec_capa =
111+ RTE_ETH_FEC_MODE_TO_CAPA(fec_mode_name[i].mode);
112 return 0;
113 }
114 }
115diff --git a/app/test-pmd/csumonly.c b/app/test-pmd/csumonly.c
116index d813d4f..b0a58e8 100644
117--- a/app/test-pmd/csumonly.c
118+++ b/app/test-pmd/csumonly.c
119@@ -480,17 +480,18 @@ process_inner_cksums(void *l3_hdr, const struct testpmd_offload_info *info,
120
121 if (info->ethertype == _htons(RTE_ETHER_TYPE_IPV4)) {
122 ipv4_hdr = l3_hdr;
123- ipv4_hdr->hdr_checksum = 0;
124
125 ol_flags |= PKT_TX_IPV4;
126 if (info->l4_proto == IPPROTO_TCP && tso_segsz) {
127 ol_flags |= PKT_TX_IP_CKSUM;
128 } else {
129- if (tx_offloads & DEV_TX_OFFLOAD_IPV4_CKSUM)
130+ if (tx_offloads & DEV_TX_OFFLOAD_IPV4_CKSUM) {
131 ol_flags |= PKT_TX_IP_CKSUM;
132- else
133+ } else {
134+ ipv4_hdr->hdr_checksum = 0;
135 ipv4_hdr->hdr_checksum =
136 rte_ipv4_cksum(ipv4_hdr);
137+ }
138 }
139 } else if (info->ethertype == _htons(RTE_ETHER_TYPE_IPV6))
140 ol_flags |= PKT_TX_IPV6;
141@@ -501,10 +502,10 @@ process_inner_cksums(void *l3_hdr, const struct testpmd_offload_info *info,
142 udp_hdr = (struct rte_udp_hdr *)((char *)l3_hdr + info->l3_len);
143 /* do not recalculate udp cksum if it was 0 */
144 if (udp_hdr->dgram_cksum != 0) {
145- udp_hdr->dgram_cksum = 0;
146- if (tx_offloads & DEV_TX_OFFLOAD_UDP_CKSUM)
147+ if (tx_offloads & DEV_TX_OFFLOAD_UDP_CKSUM) {
148 ol_flags |= PKT_TX_UDP_CKSUM;
149- else {
150+ } else {
151+ udp_hdr->dgram_cksum = 0;
152 udp_hdr->dgram_cksum =
153 get_udptcp_checksum(l3_hdr, udp_hdr,
154 info->ethertype);
155@@ -514,12 +515,12 @@ process_inner_cksums(void *l3_hdr, const struct testpmd_offload_info *info,
156 ol_flags |= PKT_TX_UDP_SEG;
157 } else if (info->l4_proto == IPPROTO_TCP) {
158 tcp_hdr = (struct rte_tcp_hdr *)((char *)l3_hdr + info->l3_len);
159- tcp_hdr->cksum = 0;
160 if (tso_segsz)
161 ol_flags |= PKT_TX_TCP_SEG;
162- else if (tx_offloads & DEV_TX_OFFLOAD_TCP_CKSUM)
163+ else if (tx_offloads & DEV_TX_OFFLOAD_TCP_CKSUM) {
164 ol_flags |= PKT_TX_TCP_CKSUM;
165- else {
166+ } else {
167+ tcp_hdr->cksum = 0;
168 tcp_hdr->cksum =
169 get_udptcp_checksum(l3_hdr, tcp_hdr,
170 info->ethertype);
171@@ -529,13 +530,13 @@ process_inner_cksums(void *l3_hdr, const struct testpmd_offload_info *info,
172 } else if (info->l4_proto == IPPROTO_SCTP) {
173 sctp_hdr = (struct rte_sctp_hdr *)
174 ((char *)l3_hdr + info->l3_len);
175- sctp_hdr->cksum = 0;
176 /* sctp payload must be a multiple of 4 to be
177 * offloaded */
178 if ((tx_offloads & DEV_TX_OFFLOAD_SCTP_CKSUM) &&
179 ((ipv4_hdr->total_length & 0x3) == 0)) {
180 ol_flags |= PKT_TX_SCTP_CKSUM;
181 } else {
182+ sctp_hdr->cksum = 0;
183 /* XXX implement CRC32c, example available in
184 * RFC3309 */
185 }
186diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c
187index d830fe3..5288c04 100644
188--- a/app/test-pmd/testpmd.c
189+++ b/app/test-pmd/testpmd.c
190@@ -1399,22 +1399,69 @@ check_nb_hairpinq(queueid_t hairpinq)
191 }
192
193 static void
194+init_config_port_offloads(portid_t pid, uint32_t socket_id)
195+{
196+ struct rte_port *port = &ports[pid];
197+ uint16_t data_size;
198+ int ret;
199+ int i;
200+
201+ port->dev_conf.txmode = tx_mode;
202+ port->dev_conf.rxmode = rx_mode;
203+
204+ ret = eth_dev_info_get_print_err(pid, &port->dev_info);
205+ if (ret != 0)
206+ rte_exit(EXIT_FAILURE, "rte_eth_dev_info_get() failed\n");
207+
208+ ret = update_jumbo_frame_offload(pid);
209+ if (ret != 0)
210+ printf("Updating jumbo frame offload failed for port %u\n",
211+ pid);
212+
213+ if (!(port->dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE))
214+ port->dev_conf.txmode.offloads &=
215+ ~DEV_TX_OFFLOAD_MBUF_FAST_FREE;
216+
217+ /* Apply Rx offloads configuration */
218+ for (i = 0; i < port->dev_info.max_rx_queues; i++)
219+ port->rx_conf[i].offloads = port->dev_conf.rxmode.offloads;
220+ /* Apply Tx offloads configuration */
221+ for (i = 0; i < port->dev_info.max_tx_queues; i++)
222+ port->tx_conf[i].offloads = port->dev_conf.txmode.offloads;
223+
224+ /* set flag to initialize port/queue */
225+ port->need_reconfig = 1;
226+ port->need_reconfig_queues = 1;
227+ port->socket_id = socket_id;
228+ port->tx_metadata = 0;
229+
230+ /*
231+ * Check for maximum number of segments per MTU.
232+ * Accordingly update the mbuf data size.
233+ */
234+ if (port->dev_info.rx_desc_lim.nb_mtu_seg_max != UINT16_MAX &&
235+ port->dev_info.rx_desc_lim.nb_mtu_seg_max != 0) {
236+ data_size = rx_mode.max_rx_pkt_len /
237+ port->dev_info.rx_desc_lim.nb_mtu_seg_max;
238+
239+ if ((data_size + RTE_PKTMBUF_HEADROOM) > mbuf_data_size[0]) {
240+ mbuf_data_size[0] = data_size + RTE_PKTMBUF_HEADROOM;
241+ TESTPMD_LOG(WARNING,
242+ "Configured mbuf size of the first segment %hu\n",
243+ mbuf_data_size[0]);
244+ }
245+ }
246+}
247+
248+static void
249 init_config(void)
250 {
251 portid_t pid;
252- struct rte_port *port;
253 struct rte_mempool *mbp;
254 unsigned int nb_mbuf_per_pool;
255 lcoreid_t lc_id;
256- uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
257 struct rte_gro_param gro_param;
258 uint32_t gso_types;
259- uint16_t data_size;
260- bool warning = 0;
261- int k;
262- int ret;
263-
264- memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
265
266 /* Configuration of logical cores. */
267 fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
268@@ -1436,30 +1483,12 @@ init_config(void)
269 }
270
271 RTE_ETH_FOREACH_DEV(pid) {
272- port = &ports[pid];
273- /* Apply default TxRx configuration for all ports */
274- port->dev_conf.txmode = tx_mode;
275- port->dev_conf.rxmode = rx_mode;
276+ uint32_t socket_id;
277
278- ret = eth_dev_info_get_print_err(pid, &port->dev_info);
279- if (ret != 0)
280- rte_exit(EXIT_FAILURE,
281- "rte_eth_dev_info_get() failed\n");
282-
283- ret = update_jumbo_frame_offload(pid);
284- if (ret != 0)
285- printf("Updating jumbo frame offload failed for port %u\n",
286- pid);
287-
288- if (!(port->dev_info.tx_offload_capa &
289- DEV_TX_OFFLOAD_MBUF_FAST_FREE))
290- port->dev_conf.txmode.offloads &=
291- ~DEV_TX_OFFLOAD_MBUF_FAST_FREE;
292 if (numa_support) {
293- if (port_numa[pid] != NUMA_NO_CONFIG)
294- port_per_socket[port_numa[pid]]++;
295- else {
296- uint32_t socket_id = rte_eth_dev_socket_id(pid);
297+ socket_id = port_numa[pid];
298+ if (port_numa[pid] == NUMA_NO_CONFIG) {
299+ socket_id = rte_eth_dev_socket_id(pid);
300
301 /*
302 * if socket_id is invalid,
303@@ -1467,45 +1496,14 @@ init_config(void)
304 */
305 if (check_socket_id(socket_id) < 0)
306 socket_id = socket_ids[0];
307- port_per_socket[socket_id]++;
308- }
309- }
310-
311- /* Apply Rx offloads configuration */
312- for (k = 0; k < port->dev_info.max_rx_queues; k++)
313- port->rx_conf[k].offloads =
314- port->dev_conf.rxmode.offloads;
315- /* Apply Tx offloads configuration */
316- for (k = 0; k < port->dev_info.max_tx_queues; k++)
317- port->tx_conf[k].offloads =
318- port->dev_conf.txmode.offloads;
319-
320- /* set flag to initialize port/queue */
321- port->need_reconfig = 1;
322- port->need_reconfig_queues = 1;
323- port->tx_metadata = 0;
324-
325- /* Check for maximum number of segments per MTU. Accordingly
326- * update the mbuf data size.
327- */
328- if (port->dev_info.rx_desc_lim.nb_mtu_seg_max != UINT16_MAX &&
329- port->dev_info.rx_desc_lim.nb_mtu_seg_max != 0) {
330- data_size = rx_mode.max_rx_pkt_len /
331- port->dev_info.rx_desc_lim.nb_mtu_seg_max;
332-
333- if ((data_size + RTE_PKTMBUF_HEADROOM) >
334- mbuf_data_size[0]) {
335- mbuf_data_size[0] = data_size +
336- RTE_PKTMBUF_HEADROOM;
337- warning = 1;
338 }
339+ } else {
340+ socket_id = (socket_num == UMA_NO_CONFIG) ?
341+ 0 : socket_num;
342 }
343+ /* Apply default TxRx configuration for all ports */
344+ init_config_port_offloads(pid, socket_id);
345 }
346-
347- if (warning)
348- TESTPMD_LOG(WARNING,
349- "Configured mbuf size of the first segment %hu\n",
350- mbuf_data_size[0]);
351 /*
352 * Create pools of mbuf.
353 * If NUMA support is disabled, create a single pool of mbuf in
354@@ -1592,21 +1590,8 @@ init_config(void)
355 void
356 reconfig(portid_t new_port_id, unsigned socket_id)
357 {
358- struct rte_port *port;
359- int ret;
360-
361 /* Reconfiguration of Ethernet ports. */
362- port = &ports[new_port_id];
363-
364- ret = eth_dev_info_get_print_err(new_port_id, &port->dev_info);
365- if (ret != 0)
366- return;
367-
368- /* set flag to initialize port/queue */
369- port->need_reconfig = 1;
370- port->need_reconfig_queues = 1;
371- port->socket_id = socket_id;
372-
373+ init_config_port_offloads(new_port_id, socket_id);
374 init_port_config();
375 }
376
377@@ -2429,7 +2414,6 @@ start_port(portid_t pid)
378 int peer_pi;
379 queueid_t qi;
380 struct rte_port *port;
381- struct rte_ether_addr mac_addr;
382 struct rte_eth_hairpin_cap cap;
383
384 if (port_id_is_invalid(pid, ENABLED_WARN))
385@@ -2598,11 +2582,14 @@ start_port(portid_t pid)
386 RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
387 printf("Port %d can not be set into started\n", pi);
388
389- if (eth_macaddr_get_print_err(pi, &mac_addr) == 0)
390+ if (eth_macaddr_get_print_err(pi, &port->eth_addr) == 0)
391 printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
392- mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
393- mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
394- mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
395+ port->eth_addr.addr_bytes[0],
396+ port->eth_addr.addr_bytes[1],
397+ port->eth_addr.addr_bytes[2],
398+ port->eth_addr.addr_bytes[3],
399+ port->eth_addr.addr_bytes[4],
400+ port->eth_addr.addr_bytes[5]);
401
402 /* at least one port started, need checking link status */
403 need_check_link_status = 1;
404diff --git a/app/test-pmd/testpmd.h b/app/test-pmd/testpmd.h
405index aad2eb3..927ca58 100644
406--- a/app/test-pmd/testpmd.h
407+++ b/app/test-pmd/testpmd.h
408@@ -878,7 +878,7 @@ void show_tx_pkt_segments(void);
409 void set_tx_pkt_times(unsigned int *tx_times);
410 void show_tx_pkt_times(void);
411 void set_tx_pkt_split(const char *name);
412-int parse_fec_mode(const char *name, enum rte_eth_fec_mode *mode);
413+int parse_fec_mode(const char *name, uint32_t *fec_capa);
414 void show_fec_capability(uint32_t num, struct rte_eth_fec_capa *speed_fec_capa);
415 void set_nb_pkt_per_burst(uint16_t pkt_burst);
416 char *list_pkt_forwarding_modes(void);
417diff --git a/app/test/packet_burst_generator.c b/app/test/packet_burst_generator.c
418index c05ea7a..0fd7290 100644
419--- a/app/test/packet_burst_generator.c
420+++ b/app/test/packet_burst_generator.c
421@@ -142,8 +142,8 @@ uint16_t
422 initialize_ipv6_header(struct rte_ipv6_hdr *ip_hdr, uint8_t *src_addr,
423 uint8_t *dst_addr, uint16_t pkt_data_len)
424 {
425- ip_hdr->vtc_flow = 0;
426- ip_hdr->payload_len = pkt_data_len;
427+ ip_hdr->vtc_flow = rte_cpu_to_be_32(0x60000000); /* Set version to 6. */
428+ ip_hdr->payload_len = rte_cpu_to_be_16(pkt_data_len);
429 ip_hdr->proto = IPPROTO_UDP;
430 ip_hdr->hop_limits = IP_DEFTTL;
431
432diff --git a/app/test/test_cmdline_lib.c b/app/test/test_cmdline_lib.c
433index bd72df0..d5a09b4 100644
434--- a/app/test/test_cmdline_lib.c
435+++ b/app/test/test_cmdline_lib.c
436@@ -71,10 +71,12 @@ test_cmdline_parse_fns(void)
437 if (cmdline_complete(cl, "buffer", &i, NULL, sizeof(dst)) >= 0)
438 goto error;
439
440+ cmdline_free(cl);
441 return 0;
442
443 error:
444 printf("Error: function accepted null parameter!\n");
445+ cmdline_free(cl);
446 return -1;
447 }
448
449@@ -140,22 +142,31 @@ static int
450 test_cmdline_socket_fns(void)
451 {
452 cmdline_parse_ctx_t ctx;
453+ struct cmdline *cl;
454
455- if (cmdline_stdin_new(NULL, "prompt") != NULL)
456+ cl = cmdline_stdin_new(NULL, "prompt");
457+ if (cl != NULL)
458 goto error;
459- if (cmdline_stdin_new(&ctx, NULL) != NULL)
460+ cl = cmdline_stdin_new(&ctx, NULL);
461+ if (cl != NULL)
462 goto error;
463- if (cmdline_file_new(NULL, "prompt", "/dev/null") != NULL)
464+ cl = cmdline_file_new(NULL, "prompt", "/dev/null");
465+ if (cl != NULL)
466 goto error;
467- if (cmdline_file_new(&ctx, NULL, "/dev/null") != NULL)
468+ cl = cmdline_file_new(&ctx, NULL, "/dev/null");
469+ if (cl != NULL)
470 goto error;
471- if (cmdline_file_new(&ctx, "prompt", NULL) != NULL)
472+ cl = cmdline_file_new(&ctx, "prompt", NULL);
473+ if (cl != NULL)
474 goto error;
475- if (cmdline_file_new(&ctx, "prompt", "-/invalid/~/path") != NULL) {
476+ cl = cmdline_file_new(&ctx, "prompt", "-/invalid/~/path");
477+ if (cl != NULL) {
478 printf("Error: succeeded in opening invalid file for reading!");
479+ cmdline_free(cl);
480 return -1;
481 }
482- if (cmdline_file_new(&ctx, "prompt", "/dev/null") == NULL) {
483+ cl = cmdline_file_new(&ctx, "prompt", "/dev/null");
484+ if (cl == NULL) {
485 printf("Error: failed to open /dev/null for reading!");
486 return -1;
487 }
488@@ -163,9 +174,11 @@ test_cmdline_socket_fns(void)
489 /* void functions */
490 cmdline_stdin_exit(NULL);
491
492+ cmdline_free(cl);
493 return 0;
494 error:
495 printf("Error: function accepted null parameter!\n");
496+ cmdline_free(cl);
497 return -1;
498 }
499
500@@ -176,13 +189,14 @@ test_cmdline_fns(void)
501 struct cmdline *cl;
502
503 memset(&ctx, 0, sizeof(ctx));
504- cl = cmdline_new(&ctx, "test", -1, -1);
505- if (cl == NULL)
506+ cl = cmdline_new(NULL, "prompt", 0, 0);
507+ if (cl != NULL)
508 goto error;
509-
510- if (cmdline_new(NULL, "prompt", 0, 0) != NULL)
511+ cl = cmdline_new(&ctx, NULL, 0, 0);
512+ if (cl != NULL)
513 goto error;
514- if (cmdline_new(&ctx, NULL, 0, 0) != NULL)
515+ cl = cmdline_new(&ctx, "test", -1, -1);
516+ if (cl == NULL)
517 goto error;
518 if (cmdline_in(NULL, "buffer", CMDLINE_TEST_BUFSIZE) >= 0)
519 goto error;
520@@ -198,6 +212,7 @@ test_cmdline_fns(void)
521 cmdline_interact(NULL);
522 cmdline_quit(NULL);
523
524+ cmdline_free(cl);
525 return 0;
526
527 error:
528diff --git a/app/test/test_cryptodev.c b/app/test/test_cryptodev.c
529index ea24e8e..dd2b035 100644
530--- a/app/test/test_cryptodev.c
531+++ b/app/test/test_cryptodev.c
532@@ -135,10 +135,11 @@ setup_test_string(struct rte_mempool *mpool,
533 struct rte_mbuf *m = rte_pktmbuf_alloc(mpool);
534 size_t t_len = len - (blocksize ? (len % blocksize) : 0);
535
536- memset(m->buf_addr, 0, m->buf_len);
537 if (m) {
538- char *dst = rte_pktmbuf_append(m, t_len);
539+ char *dst;
540
541+ memset(m->buf_addr, 0, m->buf_len);
542+ dst = rte_pktmbuf_append(m, t_len);
543 if (!dst) {
544 rte_pktmbuf_free(m);
545 return NULL;
546@@ -12024,7 +12025,7 @@ test_authenticated_decryption_fail_when_corruption(
547 }
548
549 static int
550-test_authenticated_encryt_with_esn(
551+test_authenticated_encrypt_with_esn(
552 struct crypto_testsuite_params *ts_params,
553 struct crypto_unittest_params *ut_params,
554 const struct test_crypto_vector *reference)
555@@ -12811,7 +12812,7 @@ auth_decryption_AES128CBC_HMAC_SHA1_fail_tag_corrupt(void)
556 static int
557 auth_encrypt_AES128CBC_HMAC_SHA1_esn_check(void)
558 {
559- return test_authenticated_encryt_with_esn(
560+ return test_authenticated_encrypt_with_esn(
561 &testsuite_params,
562 &unittest_params,
563 &aes128cbc_hmac_sha1_aad_test_vector);
564@@ -13786,7 +13787,7 @@ static struct unit_test_suite cryptodev_ccp_testsuite = {
565 };
566
567 static int
568-test_cryptodev_qat(void /*argv __rte_unused, int argc __rte_unused*/)
569+test_cryptodev_qat(void)
570 {
571 gbl_driver_id = rte_cryptodev_driver_id_get(
572 RTE_STR(CRYPTODEV_NAME_QAT_SYM_PMD));
573@@ -13800,7 +13801,7 @@ test_cryptodev_qat(void /*argv __rte_unused, int argc __rte_unused*/)
574 }
575
576 static int
577-test_cryptodev_virtio(void /*argv __rte_unused, int argc __rte_unused*/)
578+test_cryptodev_virtio(void)
579 {
580 gbl_driver_id = rte_cryptodev_driver_id_get(
581 RTE_STR(CRYPTODEV_NAME_VIRTIO_PMD));
582@@ -13814,7 +13815,7 @@ test_cryptodev_virtio(void /*argv __rte_unused, int argc __rte_unused*/)
583 }
584
585 static int
586-test_cryptodev_aesni_mb(void /*argv __rte_unused, int argc __rte_unused*/)
587+test_cryptodev_aesni_mb(void)
588 {
589 gbl_driver_id = rte_cryptodev_driver_id_get(
590 RTE_STR(CRYPTODEV_NAME_AESNI_MB_PMD));
591@@ -13912,7 +13913,7 @@ test_cryptodev_null(void)
592 }
593
594 static int
595-test_cryptodev_sw_snow3g(void /*argv __rte_unused, int argc __rte_unused*/)
596+test_cryptodev_sw_snow3g(void)
597 {
598 gbl_driver_id = rte_cryptodev_driver_id_get(
599 RTE_STR(CRYPTODEV_NAME_SNOW3G_PMD));
600@@ -13926,7 +13927,7 @@ test_cryptodev_sw_snow3g(void /*argv __rte_unused, int argc __rte_unused*/)
601 }
602
603 static int
604-test_cryptodev_sw_kasumi(void /*argv __rte_unused, int argc __rte_unused*/)
605+test_cryptodev_sw_kasumi(void)
606 {
607 gbl_driver_id = rte_cryptodev_driver_id_get(
608 RTE_STR(CRYPTODEV_NAME_KASUMI_PMD));
609@@ -13940,7 +13941,7 @@ test_cryptodev_sw_kasumi(void /*argv __rte_unused, int argc __rte_unused*/)
610 }
611
612 static int
613-test_cryptodev_sw_zuc(void /*argv __rte_unused, int argc __rte_unused*/)
614+test_cryptodev_sw_zuc(void)
615 {
616 gbl_driver_id = rte_cryptodev_driver_id_get(
617 RTE_STR(CRYPTODEV_NAME_ZUC_PMD));
618@@ -13984,7 +13985,7 @@ test_cryptodev_mrvl(void)
619 #ifdef RTE_CRYPTO_SCHEDULER
620
621 static int
622-test_cryptodev_scheduler(void /*argv __rte_unused, int argc __rte_unused*/)
623+test_cryptodev_scheduler(void)
624 {
625 gbl_driver_id = rte_cryptodev_driver_id_get(
626 RTE_STR(CRYPTODEV_NAME_SCHEDULER_PMD));
627@@ -14007,7 +14008,7 @@ REGISTER_TEST_COMMAND(cryptodev_scheduler_autotest, test_cryptodev_scheduler);
628 #endif
629
630 static int
631-test_cryptodev_dpaa2_sec(void /*argv __rte_unused, int argc __rte_unused*/)
632+test_cryptodev_dpaa2_sec(void)
633 {
634 gbl_driver_id = rte_cryptodev_driver_id_get(
635 RTE_STR(CRYPTODEV_NAME_DPAA2_SEC_PMD));
636@@ -14021,7 +14022,7 @@ test_cryptodev_dpaa2_sec(void /*argv __rte_unused, int argc __rte_unused*/)
637 }
638
639 static int
640-test_cryptodev_dpaa_sec(void /*argv __rte_unused, int argc __rte_unused*/)
641+test_cryptodev_dpaa_sec(void)
642 {
643 gbl_driver_id = rte_cryptodev_driver_id_get(
644 RTE_STR(CRYPTODEV_NAME_DPAA_SEC_PMD));
645@@ -14073,7 +14074,7 @@ test_cryptodev_octeontx2(void)
646 }
647
648 static int
649-test_cryptodev_caam_jr(void /*argv __rte_unused, int argc __rte_unused*/)
650+test_cryptodev_caam_jr(void)
651 {
652 gbl_driver_id = rte_cryptodev_driver_id_get(
653 RTE_STR(CRYPTODEV_NAME_CAAM_JR_PMD));
654@@ -14115,7 +14116,7 @@ test_cryptodev_bcmfs(void)
655 }
656
657 static int
658-test_cryptodev_qat_raw_api(void /*argv __rte_unused, int argc __rte_unused*/)
659+test_cryptodev_qat_raw_api(void)
660 {
661 int ret;
662
663diff --git a/app/test/test_cryptodev_aes_test_vectors.h b/app/test/test_cryptodev_aes_test_vectors.h
664index c192d75..bb5f09f 100644
665--- a/app/test/test_cryptodev_aes_test_vectors.h
666+++ b/app/test/test_cryptodev_aes_test_vectors.h
667@@ -2650,7 +2650,7 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
668 .op_mask = BLOCKCIPHER_TEST_OP_ENCRYPT,
669 },
670 {
671- .test_descr = "AES-192-CBC Encryption Scater gather",
672+ .test_descr = "AES-192-CBC Encryption Scatter gather",
673 .test_data = &aes_test_data_10,
674 .op_mask = BLOCKCIPHER_TEST_OP_ENCRYPT,
675 .feature_mask = BLOCKCIPHER_TEST_FEATURE_SG |
676diff --git a/app/test/test_eal_flags.c b/app/test/test_eal_flags.c
677index 932fbe3..b4880ee 100644
678--- a/app/test/test_eal_flags.c
679+++ b/app/test/test_eal_flags.c
680@@ -124,6 +124,7 @@ process_hugefiles(const char * prefix, enum hugepage_action action)
681 case HUGEPAGE_CHECK_EXISTS:
682 {
683 /* file exists, return */
684+ closedir(hugepage_dir);
685 result = 1;
686 goto end;
687 }
688diff --git a/app/test/test_event_crypto_adapter.c b/app/test/test_event_crypto_adapter.c
689index 335211c..71b58fc 100644
690--- a/app/test/test_event_crypto_adapter.c
691+++ b/app/test/test_event_crypto_adapter.c
692@@ -224,8 +224,7 @@ test_op_forward_mode(uint8_t session_less)
693 op->sess_type = RTE_CRYPTO_OP_SESSIONLESS;
694 first_xform = &cipher_xform;
695 sym_op->xform = first_xform;
696- uint32_t len = IV_OFFSET + MAXIMUM_IV_LENGTH +
697- (sizeof(struct rte_crypto_sym_xform) * 2);
698+ uint32_t len = IV_OFFSET + MAXIMUM_IV_LENGTH;
699 op->private_data_offset = len;
700 /* Fill in private data information */
701 rte_memcpy(&m_data.response_info, &response_info,
702@@ -419,8 +418,7 @@ test_op_new_mode(uint8_t session_less)
703 op->sess_type = RTE_CRYPTO_OP_SESSIONLESS;
704 first_xform = &cipher_xform;
705 sym_op->xform = first_xform;
706- uint32_t len = IV_OFFSET + MAXIMUM_IV_LENGTH +
707- (sizeof(struct rte_crypto_sym_xform) * 2);
708+ uint32_t len = IV_OFFSET + MAXIMUM_IV_LENGTH;
709 op->private_data_offset = len;
710 /* Fill in private data information */
711 rte_memcpy(&m_data.response_info, &response_info,
712@@ -516,7 +514,8 @@ configure_cryptodev(void)
713 NUM_MBUFS, MBUF_CACHE_SIZE,
714 DEFAULT_NUM_XFORMS *
715 sizeof(struct rte_crypto_sym_xform) +
716- MAXIMUM_IV_LENGTH,
717+ MAXIMUM_IV_LENGTH +
718+ sizeof(union rte_event_crypto_metadata),
719 rte_socket_id());
720 if (params.op_mpool == NULL) {
721 RTE_LOG(ERR, USER1, "Can't create CRYPTO_OP_POOL\n");
722diff --git a/app/test/test_mbuf.c b/app/test/test_mbuf.c
723index 47a7b19..5bb4786 100644
724--- a/app/test/test_mbuf.c
725+++ b/app/test/test_mbuf.c
726@@ -2363,7 +2363,7 @@ test_pktmbuf_ext_shinfo_init_helper(struct rte_mempool *pktmbuf_pool)
727 if (rte_mbuf_refcnt_read(m) != 1)
728 GOTO_FAIL("%s: Invalid refcnt in mbuf\n", __func__);
729
730- buf_iova = rte_mempool_virt2iova(ext_buf_addr);
731+ buf_iova = rte_mem_virt2iova(ext_buf_addr);
732 rte_pktmbuf_attach_extbuf(m, ext_buf_addr, buf_iova, buf_len,
733 ret_shinfo);
734 if (m->ol_flags != EXT_ATTACHED_MBUF)
735diff --git a/app/test/test_power_cpufreq.c b/app/test/test_power_cpufreq.c
736index 0c3adc5..94a3801 100644
737--- a/app/test/test_power_cpufreq.c
738+++ b/app/test/test_power_cpufreq.c
739@@ -55,18 +55,20 @@ check_cur_freq(unsigned int lcore_id, uint32_t idx, bool turbo)
740 FILE *f;
741 char fullpath[PATH_MAX];
742 char buf[BUFSIZ];
743+ enum power_management_env env;
744 uint32_t cur_freq;
745+ uint32_t freq_conv;
746 int ret = -1;
747 int i;
748
749 if (snprintf(fullpath, sizeof(fullpath),
750- TEST_POWER_SYSFILE_SCALING_FREQ, lcore_id) < 0) {
751+ TEST_POWER_SYSFILE_CPUINFO_FREQ, lcore_id) < 0) {
752 return 0;
753 }
754 f = fopen(fullpath, "r");
755 if (f == NULL) {
756 if (snprintf(fullpath, sizeof(fullpath),
757- TEST_POWER_SYSFILE_CPUINFO_FREQ, lcore_id) < 0) {
758+ TEST_POWER_SYSFILE_SCALING_FREQ, lcore_id) < 0) {
759 return 0;
760 }
761 f = fopen(fullpath, "r");
762@@ -80,15 +82,20 @@ check_cur_freq(unsigned int lcore_id, uint32_t idx, bool turbo)
763 goto fail_all;
764
765 cur_freq = strtoul(buf, NULL, TEST_POWER_CONVERT_TO_DECIMAL);
766-
767- /* convert the frequency to nearest 100000 value
768- * Ex: if cur_freq=1396789 then freq_conv=1400000
769- * Ex: if cur_freq=800030 then freq_conv=800000
770- */
771- unsigned int freq_conv = 0;
772- freq_conv = (cur_freq + TEST_FREQ_ROUNDING_DELTA)
773- / TEST_ROUND_FREQ_TO_N_100000;
774- freq_conv = freq_conv * TEST_ROUND_FREQ_TO_N_100000;
775+ freq_conv = cur_freq;
776+
777+ env = rte_power_get_env();
778+
779+ if (env == PM_ENV_PSTATE_CPUFREQ) {
780+ /* convert the frequency to nearest 100000 value
781+ * Ex: if cur_freq=1396789 then freq_conv=1400000
782+ * Ex: if cur_freq=800030 then freq_conv=800000
783+ */
784+ unsigned int freq_conv = 0;
785+ freq_conv = (cur_freq + TEST_FREQ_ROUNDING_DELTA)
786+ / TEST_ROUND_FREQ_TO_N_100000;
787+ freq_conv = freq_conv * TEST_ROUND_FREQ_TO_N_100000;
788+ }
789
790 if (turbo)
791 ret = (freqs[idx] <= freq_conv ? 0 : -1);
792diff --git a/buildtools/meson.build b/buildtools/meson.build
793index 36161af..cfad51f 100644
794--- a/buildtools/meson.build
795+++ b/buildtools/meson.build
796@@ -7,13 +7,13 @@ check_symbols = find_program('check-symbols.sh')
797 ldflags_ibverbs_static = find_program('options-ibverbs-static.sh')
798 binutils_avx512_check = find_program('binutils-avx512-check.sh')
799
800-# set up map-to-win script using python, either built-in or external
801 python3 = import('python').find_installation(required: false)
802 if python3.found()
803 py3 = [python3]
804 else
805 py3 = ['meson', 'runpython']
806 endif
807+echo = py3 + ['-c', 'import sys; print(*sys.argv[1:])']
808 list_dir_globs = py3 + files('list-dir-globs.py')
809 map_to_win_cmd = py3 + files('map_to_win.py')
810 sphinx_wrapper = py3 + files('call-sphinx-build.py')
811diff --git a/buildtools/symlink-drivers-solibs.py b/buildtools/symlink-drivers-solibs.py
812new file mode 100644
813index 0000000..9c99950
814--- /dev/null
815+++ b/buildtools/symlink-drivers-solibs.py
816@@ -0,0 +1,49 @@
817+#!/usr/bin/env python3
818+# SPDX-License-Identifier: BSD-3-Clause
819+# Copyright(c) 2021 Intel Corporation
820+
821+import os
822+import sys
823+import glob
824+import shutil
825+
826+# post-install script for meson/ninja builds to symlink the PMDs stored in
827+# $libdir/dpdk/pmds-*/ to $libdir. This is needed as some PMDs depend on
828+# others, e.g. PCI device PMDs depending on the PCI bus driver.
829+
830+# parameters to script are paths relative to install prefix:
831+# 1. directory for installed regular libs e.g. lib64
832+# 2. subdirectory of libdir where the PMDs are
833+# 3. directory for installed regular binaries e.g. bin
834+
835+os.chdir(os.environ['MESON_INSTALL_DESTDIR_PREFIX'])
836+
837+lib_dir = sys.argv[1]
838+pmd_subdir = sys.argv[2]
839+bin_dir = sys.argv[3]
840+pmd_dir = os.path.join(lib_dir, pmd_subdir)
841+
842+# copy Windows PMDs to avoid any issues with symlinks since the
843+# build could be a cross-compilation under WSL, Msys or Cygnus.
844+# the filenames are dependent upon the specific toolchain in use.
845+
846+def copy_pmd_files(pattern, to_dir):
847+ for file in glob.glob(os.path.join(pmd_dir, pattern)):
848+ to = os.path.join(to_dir, os.path.basename(file))
849+ shutil.copy2(file, to)
850+ print(to + ' -> ' + file)
851+
852+copy_pmd_files('*rte_*.dll', bin_dir)
853+copy_pmd_files('*rte_*.pdb', bin_dir)
854+copy_pmd_files('*rte_*.lib', lib_dir)
855+copy_pmd_files('*rte_*.dll.a', lib_dir)
856+
857+# symlink shared objects
858+
859+os.chdir(lib_dir)
860+for file in glob.glob(os.path.join(pmd_subdir, 'librte_*.so*')):
861+ to = os.path.basename(file)
862+ if os.path.exists(to):
863+ os.remove(to)
864+ os.symlink(file, to)
865+ print(to + ' -> ' + file)
866diff --git a/config/meson.build b/config/meson.build
867index 5b7439a..b2734fc 100644
868--- a/config/meson.build
869+++ b/config/meson.build
870@@ -61,6 +61,10 @@ if not is_windows
871 meson.add_install_script('../buildtools/symlink-drivers-solibs.sh',
872 get_option('libdir'),
873 pmd_subdir_opt)
874+elif meson.version().version_compare('>=0.55.0')
875+ # 0.55.0 is required to use external program with add_install_script
876+ meson.add_install_script(py3, '../buildtools/symlink-drivers-solibs.py',
877+ get_option('libdir'), pmd_subdir_opt, get_option('bindir'))
878 endif
879
880 # set the machine type and cflags for it
881diff --git a/debian/changelog b/debian/changelog
882index ac0c210..490571c 100644
883--- a/debian/changelog
884+++ b/debian/changelog
885@@ -1,3 +1,14 @@
886+dpdk (20.11.3-0ubuntu1) impish; urgency=medium
887+
888+ * Merge LTS stable release 20.11.3 (LP: #1940913)
889+ Release notes are available at:
890+ https://doc.dpdk.org/guides-20.11/rel_notes/release_20_11.html#id1
891+ - Remove test-catch-coredumps.patch [now part of upstream]
892+ * d/p/u/lp-1940957-net-i40e-support-25G-AOC-ACC-cables.patch: fix issues
893+ with 25G AOC cables (LP: #1940957)
894+
895+ -- Christian Ehrhardt <christian.ehrhardt@canonical.com> Tue, 24 Aug 2021 12:28:59 +0200
896+
897 dpdk (20.11.2-1ubuntu3) impish; urgency=medium
898
899 * No-change rebuild against libipsec-mb1
900diff --git a/debian/patches/series b/debian/patches/series
901index 37037a5..ef7c9a1 100644
902--- a/debian/patches/series
903+++ b/debian/patches/series
904@@ -3,3 +3,4 @@ disable_lcores_autotest_ppc.patch
905 disable_autopkgtest_fails.patch
906 disable_armhf_autopkgtest_fails.patch
907 disable_ppc64_autopkgtest_fails.patch
908+ubuntu/lp-1940957-net-i40e-support-25G-AOC-ACC-cables.patch
909diff --git a/debian/patches/ubuntu/lp-1940957-net-i40e-support-25G-AOC-ACC-cables.patch b/debian/patches/ubuntu/lp-1940957-net-i40e-support-25G-AOC-ACC-cables.patch
910new file mode 100644
911index 0000000..8606792
912--- /dev/null
913+++ b/debian/patches/ubuntu/lp-1940957-net-i40e-support-25G-AOC-ACC-cables.patch
914@@ -0,0 +1,42 @@
915+From f81d60e607bb8c13fcb13450e85d74721f1d9c8f Mon Sep 17 00:00:00 2001
916+From: Yury Kylulin <yury.kylulin@intel.com>
917+Date: Tue, 13 Apr 2021 18:29:50 +0300
918+Subject: [PATCH] net/i40e: support 25G AOC/ACC cables
919+
920+[ upstream commit b1daa3461429e7674206a714c17adca65e9b44b4 ]
921+
922+Enable additional PHY types (25G-AOC and 25G-ACC) for set PHY config
923+command.
924+
925+Ubuntu bug: https://bugs.launchpad.net/ubuntu/+source/dpdk/+bug/1940957
926+Cc: stable@dpdk.org
927+
928+Signed-off-by: Yury Kylulin <yury.kylulin@intel.com>
929+Tested-by: Ashish Paul <apaul@juniper.net>
930+Signed-off-by: Christian Ehrhardt <christian.ehrhardt@canonical.com>
931+
932+Origin: upstream, https://github.com/cpaelzer/dpdk-stable-queue/commit/f81d60e607bb8c13fcb13450e85d74721f1d9c8f
933+Bug-Ubuntu: https://bugs.launchpad.net/bugs/1940957
934+Last-Update: 2021-09-06
935+
936+---
937+ drivers/net/i40e/i40e_ethdev.c | 3 ++-
938+ 1 file changed, 2 insertions(+), 1 deletion(-)
939+
940+diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
941+index 508f90595f..0c896ea915 100644
942+--- a/drivers/net/i40e/i40e_ethdev.c
943++++ b/drivers/net/i40e/i40e_ethdev.c
944+@@ -2264,7 +2264,8 @@ i40e_phy_conf_link(struct i40e_hw *hw,
945+ phy_conf.phy_type = is_up ? cpu_to_le32(phy_type_mask) : 0;
946+ phy_conf.phy_type_ext = is_up ? (I40E_AQ_PHY_TYPE_EXT_25G_KR |
947+ I40E_AQ_PHY_TYPE_EXT_25G_CR | I40E_AQ_PHY_TYPE_EXT_25G_SR |
948+- I40E_AQ_PHY_TYPE_EXT_25G_LR) : 0;
949++ I40E_AQ_PHY_TYPE_EXT_25G_LR | I40E_AQ_PHY_TYPE_EXT_25G_AOC |
950++ I40E_AQ_PHY_TYPE_EXT_25G_ACC) : 0;
951+ phy_conf.fec_config = phy_ab.fec_cfg_curr_mod_ext_info;
952+ phy_conf.eee_capability = phy_ab.eee_capability;
953+ phy_conf.eeer = phy_ab.eeer_val;
954+--
955+2.32.0
956+
957diff --git a/devtools/check-maintainers.sh b/devtools/check-maintainers.sh
958index df3f740..71697bb 100755
959--- a/devtools/check-maintainers.sh
960+++ b/devtools/check-maintainers.sh
961@@ -15,10 +15,10 @@ files () # <path> [<path> ...]
962 if [ -z "$1" ] ; then
963 return
964 fi
965- if [ -d .git ] ; then
966+ if [ -r .git ] ; then
967 git ls-files "$1"
968 else
969- find "$1" -type f |
970+ find $1 -type f |
971 sed 's,^\./,,'
972 fi |
973 # if not ended by /
974diff --git a/doc/guides/contributing/coding_style.rst b/doc/guides/contributing/coding_style.rst
975index bb3f3ef..a2a8130 100644
976--- a/doc/guides/contributing/coding_style.rst
977+++ b/doc/guides/contributing/coding_style.rst
978@@ -55,7 +55,7 @@ License Header
979 ~~~~~~~~~~~~~~
980
981 Each file must begin with a special comment containing the
982-`Software Package Data Exchange (SPDX) License Identfier <https://spdx.org/using-spdx-license-identifier>`_.
983+`Software Package Data Exchange (SPDX) License Identifier <https://spdx.org/using-spdx-license-identifier>`_.
984
985 Generally this is the BSD License, except for code granted special exceptions.
986 The SPDX licences identifier is sufficient, a file should not contain
987diff --git a/doc/guides/cryptodevs/scheduler.rst b/doc/guides/cryptodevs/scheduler.rst
988index 835d999..d08207f 100644
989--- a/doc/guides/cryptodevs/scheduler.rst
990+++ b/doc/guides/cryptodevs/scheduler.rst
991@@ -118,7 +118,7 @@ operation:
992 than the designated threshold, otherwise it will be handled by the secondary
993 worker.
994
995- A typical usecase in this mode is with the QAT cryptodev as the primary and
996+ A typical use case in this mode is with the QAT cryptodev as the primary and
997 a software cryptodev as the secondary worker. This may help applications to
998 process additional crypto workload than what the QAT cryptodev can handle on
999 its own, by making use of the available CPU cycles to deal with smaller
1000diff --git a/doc/guides/howto/pvp_reference_benchmark.rst b/doc/guides/howto/pvp_reference_benchmark.rst
1001index 553458d..484de3b 100644
1002--- a/doc/guides/howto/pvp_reference_benchmark.rst
1003+++ b/doc/guides/howto/pvp_reference_benchmark.rst
1004@@ -26,7 +26,7 @@ Setup overview
1005
1006 PVP setup using 2 NICs
1007
1008-In this diagram, each red arrow represents one logical core. This use-case
1009+In this diagram, each red arrow represents one logical core. This use case
1010 requires 6 dedicated logical cores. A forwarding configuration with a single
1011 NIC is also possible, requiring 3 logical cores.
1012
1013diff --git a/doc/guides/nics/bnx2x.rst b/doc/guides/nics/bnx2x.rst
1014index 9ad4f9f..788a6da 100644
1015--- a/doc/guides/nics/bnx2x.rst
1016+++ b/doc/guides/nics/bnx2x.rst
1017@@ -105,7 +105,7 @@ Jumbo: Limitation
1018 -----------------
1019
1020 Rx descriptor limit for number of segments per MTU is set to 1.
1021-PMD doesn't support Jumbo Rx scatter gather. Some applciations can
1022+PMD doesn't support Jumbo Rx scatter gather. Some applications can
1023 adjust mbuf_size based on this param and max_pkt_len.
1024
1025 For others, PMD detects the condition where Rx packet length cannot
1026diff --git a/doc/guides/nics/dpaa.rst b/doc/guides/nics/dpaa.rst
1027index 917482d..7355ec3 100644
1028--- a/doc/guides/nics/dpaa.rst
1029+++ b/doc/guides/nics/dpaa.rst
1030@@ -297,7 +297,7 @@ FMC - FMAN Configuration Tool
1031
1032
1033 The details can be found in FMC Doc at:
1034- `Frame Mnager Configuration Tool <https://www.nxp.com/docs/en/application-note/AN4760.pdf>`_.
1035+ `Frame Manager Configuration Tool <https://www.nxp.com/docs/en/application-note/AN4760.pdf>`_.
1036
1037 FMLIB
1038 ~~~~~
1039@@ -307,7 +307,7 @@ FMLIB
1040
1041 This is an alternate to the FMC based configuration. This library provides
1042 direct ioctl based interfaces for FMAN configuration as used by the FMC tool
1043- as well. This helps in overcoming the main limitaiton of FMC - i.e. lack
1044+ as well. This helps in overcoming the main limitation of FMC - i.e. lack
1045 of dynamic configuration.
1046
1047 The location for the fmd driver as used by FMLIB and FMC is as follows:
1048@@ -319,7 +319,7 @@ VSP (Virtual Storage Profile)
1049 The storage profiled are means to provide virtualized interface. A ranges of
1050 storage profiles cab be associated to Ethernet ports.
1051 They are selected during classification. Specify how the frame should be
1052- written to memory and which buffer pool to select for packet storange in
1053+ written to memory and which buffer pool to select for packet storage in
1054 queues. Start and End margin of buffer can also be configured.
1055
1056 Limitations
1057diff --git a/doc/guides/nics/ena.rst b/doc/guides/nics/ena.rst
1058index 0f1f63f..df72020 100644
1059--- a/doc/guides/nics/ena.rst
1060+++ b/doc/guides/nics/ena.rst
1061@@ -234,7 +234,7 @@ Example output:
1062
1063 [...]
1064 EAL: PCI device 0000:00:06.0 on NUMA socket -1
1065- EAL: Invalid NUMA socket, default to 0
1066+ EAL: Device 0000:00:06.0 is not NUMA-aware, defaulting socket to 0
1067 EAL: probe driver: 1d0f:ec20 net_ena
1068
1069 Interactive-mode selected
1070diff --git a/doc/guides/nics/mlx5.rst b/doc/guides/nics/mlx5.rst
1071index 24d5a69..84e58fc 100644
1072--- a/doc/guides/nics/mlx5.rst
1073+++ b/doc/guides/nics/mlx5.rst
1074@@ -591,6 +591,13 @@ Driver options
1075 it is not recommended and may prevent NIC from sending packets over
1076 some configurations.
1077
1078+ For ConnectX-4 and ConnectX-4 Lx NICs, automatically configured value
1079+ is insufficient for some traffic, because they require at least all L2 headers
1080+ to be inlined. For example, Q-in-Q adds 4 bytes to default 18 bytes
1081+ of Ethernet and VLAN, thus ``txq_inline_min`` must be set to 22.
1082+ MPLS would add 4 bytes per label. Final value must account for all possible
1083+ L2 encapsulation headers used in particular environment.
1084+
1085 Please, note, this minimal data inlining disengages eMPW feature (Enhanced
1086 Multi-Packet Write), because last one does not support partial packet inlining.
1087 This is not very critical due to minimal data inlining is mostly required
1088@@ -1281,7 +1288,7 @@ the DPDK application.
1089
1090 echo -n "<device pci address" > /sys/bus/pci/drivers/mlx5_core/unbind
1091
1092-5. Enbale switchdev mode::
1093+5. Enable switchdev mode::
1094
1095 echo switchdev > /sys/class/net/<net device>/compat/devlink/mode
1096
1097diff --git a/doc/guides/nics/octeontx2.rst b/doc/guides/nics/octeontx2.rst
1098index a4f2244..7b86cf5 100644
1099--- a/doc/guides/nics/octeontx2.rst
1100+++ b/doc/guides/nics/octeontx2.rst
1101@@ -153,7 +153,7 @@ Runtime Config Options
1102
1103 -a 0002:02:00.0,max_sqb_count=64
1104
1105- With the above configuration, each send queue's decscriptor buffer count is
1106+ With the above configuration, each send queue's descriptor buffer count is
1107 limited to a maximum of 64 buffers.
1108
1109 - ``Switch header enable`` (default ``none``)
1110@@ -242,7 +242,7 @@ configure the following features:
1111 #. Hierarchical scheduling
1112 #. Single rate - Two color, Two rate - Three color shaping
1113
1114-Both DWRR and Static Priority(SP) hierarchial scheduling is supported.
1115+Both DWRR and Static Priority(SP) hierarchical scheduling is supported.
1116
1117 Every parent can have atmost 10 SP Children and unlimited DWRR children.
1118
1119diff --git a/doc/guides/nics/virtio.rst b/doc/guides/nics/virtio.rst
1120index aabd0f1..1f0aba3 100644
1121--- a/doc/guides/nics/virtio.rst
1122+++ b/doc/guides/nics/virtio.rst
1123@@ -509,7 +509,7 @@ are shown in below table:
1124 Split virtqueue in-order non-mergeable path virtio_recv_pkts_inorder virtio_xmit_pkts_inorder
1125 Split virtqueue vectorized Rx path virtio_recv_pkts_vec virtio_xmit_pkts
1126 Packed virtqueue mergeable path virtio_recv_mergeable_pkts_packed virtio_xmit_pkts_packed
1127- Packed virtqueue non-meregable path virtio_recv_pkts_packed virtio_xmit_pkts_packed
1128+ Packed virtqueue non-mergeable path virtio_recv_pkts_packed virtio_xmit_pkts_packed
1129 Packed virtqueue in-order mergeable path virtio_recv_mergeable_pkts_packed virtio_xmit_pkts_packed
1130 Packed virtqueue in-order non-mergeable path virtio_recv_pkts_packed virtio_xmit_pkts_packed
1131 Packed virtqueue vectorized Rx path virtio_recv_pkts_packed_vec virtio_xmit_pkts_packed
1132diff --git a/doc/guides/platform/dpaa.rst b/doc/guides/platform/dpaa.rst
1133index 20a0e39..3896929 100644
1134--- a/doc/guides/platform/dpaa.rst
1135+++ b/doc/guides/platform/dpaa.rst
1136@@ -78,7 +78,7 @@ compatible board:
1137 based config (if /tmp/fmc.bin is present). DPAA FMD will be used only if no
1138 previous fmc config is existing.
1139
1140- Note that fmlib based integratin rely on underlying fmd driver in kernel,
1141+ Note that fmlib based integration rely on underlying fmd driver in kernel,
1142 which is available as part of NXP kernel or NXP SDK.
1143
1144 The following dependencies are not part of DPDK and must be installed
1145diff --git a/doc/guides/prog_guide/bbdev.rst b/doc/guides/prog_guide/bbdev.rst
1146index 6b2bd54..9619280 100644
1147--- a/doc/guides/prog_guide/bbdev.rst
1148+++ b/doc/guides/prog_guide/bbdev.rst
1149@@ -639,7 +639,7 @@ optionally the ``soft_output`` mbuf data pointers.
1150 "soft output","soft LLR output buffer (optional)"
1151 "op_flags","bitmask of all active operation capabilities"
1152 "rv_index","redundancy version index [0..3]"
1153- "iter_max","maximum number of iterations to perofrm in decode all CBs"
1154+ "iter_max","maximum number of iterations to perform in decode all CBs"
1155 "iter_min","minimum number of iterations to perform in decoding all CBs"
1156 "iter_count","number of iterations to performed in decoding all CBs"
1157 "ext_scale","scale factor on extrinsic info (5 bits)"
1158diff --git a/doc/guides/prog_guide/env_abstraction_layer.rst b/doc/guides/prog_guide/env_abstraction_layer.rst
1159index 1f30e13..d124eb0 100644
1160--- a/doc/guides/prog_guide/env_abstraction_layer.rst
1161+++ b/doc/guides/prog_guide/env_abstraction_layer.rst
1162@@ -465,7 +465,7 @@ devices would fail anyway.
1163 - By default, the mempool, first asks for IOVA-contiguous memory using
1164 ``RTE_MEMZONE_IOVA_CONTIG``. This is slow in RTE_IOVA_PA mode and it may
1165 affect the application boot time.
1166- - It is easy to enable large amount of IOVA-contiguous memory use-cases
1167+ - It is easy to enable large amount of IOVA-contiguous memory use cases
1168 with IOVA in VA mode.
1169
1170 It is expected that all PCI drivers work in both RTE_IOVA_PA and
1171diff --git a/doc/guides/prog_guide/eventdev.rst b/doc/guides/prog_guide/eventdev.rst
1172index ccde086..347203f 100644
1173--- a/doc/guides/prog_guide/eventdev.rst
1174+++ b/doc/guides/prog_guide/eventdev.rst
1175@@ -120,7 +120,7 @@ Ports
1176 ~~~~~
1177
1178 Ports are the points of contact between worker cores and the eventdev. The
1179-general use-case will see one CPU core using one port to enqueue and dequeue
1180+general use case will see one CPU core using one port to enqueue and dequeue
1181 events from an eventdev. Ports are linked to queues in order to retrieve events
1182 from those queues (more details in `Linking Queues and Ports`_ below).
1183
1184diff --git a/doc/guides/prog_guide/multi_proc_support.rst b/doc/guides/prog_guide/multi_proc_support.rst
1185index 6b0ac30..815e8bd 100644
1186--- a/doc/guides/prog_guide/multi_proc_support.rst
1187+++ b/doc/guides/prog_guide/multi_proc_support.rst
1188@@ -325,7 +325,7 @@ supported. However, since sending messages (not requests) does not involve an
1189 IPC thread, sending messages while processing another message or request is
1190 supported.
1191
1192-Since the memory sybsystem uses IPC internally, memory allocations and IPC must
1193+Since the memory subsystem uses IPC internally, memory allocations and IPC must
1194 not be mixed: it is not safe to use IPC inside a memory-related callback, nor is
1195 it safe to allocate/free memory inside IPC callbacks. Attempting to do so may
1196 lead to a deadlock.
1197diff --git a/doc/guides/prog_guide/qos_framework.rst b/doc/guides/prog_guide/qos_framework.rst
1198index 4e4ea33..e403b90 100644
1199--- a/doc/guides/prog_guide/qos_framework.rst
1200+++ b/doc/guides/prog_guide/qos_framework.rst
1201@@ -737,7 +737,7 @@ Strict priority scheduling of traffic classes within the same pipe is implemente
1202 which selects the queues in ascending order.
1203 Therefore, queue 0 (associated with TC 0, highest priority TC) is handled before
1204 queue 1 (TC 1, lower priority than TC 0),
1205-which is handled before queue 2 (TC 2, lower priority than TC 1) and it conitnues until queues of all TCs except the
1206+which is handled before queue 2 (TC 2, lower priority than TC 1) and it continues until queues of all TCs except the
1207 lowest priority TC are handled. At last, queues 12..15 (best effort TC, lowest priority TC) are handled.
1208
1209 Upper Limit Enforcement
1210diff --git a/doc/guides/prog_guide/regexdev.rst b/doc/guides/prog_guide/regexdev.rst
1211index 3d8b591..5ca7e0c 100644
1212--- a/doc/guides/prog_guide/regexdev.rst
1213+++ b/doc/guides/prog_guide/regexdev.rst
1214@@ -124,7 +124,7 @@ The configuration mode is depended on the PMD capabilities.
1215
1216 Online rule configuration is done using the following API functions:
1217 ``rte_regexdev_rule_db_update`` which add / remove rules from the rules
1218-precomplied list, and ``rte_regexdev_rule_db_compile_activate``
1219+precompiled list, and ``rte_regexdev_rule_db_compile_activate``
1220 which compile the rules and loads them to the RegEx HW.
1221
1222 Offline rule configuration can be done by adding a pointer to the compiled
1223diff --git a/doc/guides/prog_guide/writing_efficient_code.rst b/doc/guides/prog_guide/writing_efficient_code.rst
1224index 7baeaae..a61e832 100644
1225--- a/doc/guides/prog_guide/writing_efficient_code.rst
1226+++ b/doc/guides/prog_guide/writing_efficient_code.rst
1227@@ -143,20 +143,21 @@ In order to achieve higher throughput,
1228 the DPDK attempts to aggregate the cost of processing each packet individually by processing packets in bursts.
1229
1230 Using the testpmd application as an example,
1231-the burst size can be set on the command line to a value of 16 (also the default value).
1232-This allows the application to request 16 packets at a time from the PMD.
1233+the burst size can be set on the command line to a value of 32 (also the default value).
1234+This allows the application to request 32 packets at a time from the PMD.
1235 The testpmd application then immediately attempts to transmit all the packets that were received,
1236-in this case, all 16 packets.
1237+in this case, all 32 packets.
1238
1239 The packets are not transmitted until the tail pointer is updated on the corresponding TX queue of the network port.
1240 This behavior is desirable when tuning for high throughput because
1241-the cost of tail pointer updates to both the RX and TX queues can be spread across 16 packets,
1242+the cost of tail pointer updates to both the RX and TX queues can be spread
1243+across 32 packets,
1244 effectively hiding the relatively slow MMIO cost of writing to the PCIe* device.
1245 However, this is not very desirable when tuning for low latency because
1246-the first packet that was received must also wait for another 15 packets to be received.
1247-It cannot be transmitted until the other 15 packets have also been processed because
1248+the first packet that was received must also wait for another 31 packets to be received.
1249+It cannot be transmitted until the other 31 packets have also been processed because
1250 the NIC will not know to transmit the packets until the TX tail pointer has been updated,
1251-which is not done until all 16 packets have been processed for transmission.
1252+which is not done until all 32 packets have been processed for transmission.
1253
1254 To consistently achieve low latency, even under heavy system load,
1255 the application developer should avoid processing packets in bunches.
1256diff --git a/doc/guides/rawdevs/ioat.rst b/doc/guides/rawdevs/ioat.rst
1257index 250cfc4..59ba207 100644
1258--- a/doc/guides/rawdevs/ioat.rst
1259+++ b/doc/guides/rawdevs/ioat.rst
1260@@ -65,7 +65,7 @@ To assign an engine to a group::
1261 $ accel-config config-engine dsa0/engine0.1 --group-id=1
1262
1263 To assign work queues to groups for passing descriptors to the engines a similar accel-config command can be used.
1264-However, the work queues also need to be configured depending on the use-case.
1265+However, the work queues also need to be configured depending on the use case.
1266 Some configuration options include:
1267
1268 * mode (Dedicated/Shared): Indicates whether a WQ may accept jobs from multiple queues simultaneously.
1269diff --git a/doc/guides/rawdevs/ntb.rst b/doc/guides/rawdevs/ntb.rst
1270index 2c5fa76..2bb115d 100644
1271--- a/doc/guides/rawdevs/ntb.rst
1272+++ b/doc/guides/rawdevs/ntb.rst
1273@@ -17,7 +17,7 @@ some information by using scratchpad registers.
1274 BIOS setting on Intel Xeon
1275 --------------------------
1276
1277-Intel Non-transparent Bridge needs special BIOS setting. The referencce for
1278+Intel Non-transparent Bridge needs special BIOS setting. The reference for
1279 Skylake is https://www.intel.com/content/dam/support/us/en/documents/server-products/Intel_Xeon_Processor_Scalable_Family_BIOS_User_Guide.pdf
1280
1281 - Set the needed PCIe port as NTB to NTB mode on both hosts.
1282diff --git a/doc/guides/regexdevs/features_overview.rst b/doc/guides/regexdevs/features_overview.rst
1283index f90b394..c512bde 100644
1284--- a/doc/guides/regexdevs/features_overview.rst
1285+++ b/doc/guides/regexdevs/features_overview.rst
1286@@ -16,7 +16,7 @@ PCRE atomic grouping
1287 Support PCRE atomic grouping.
1288
1289 PCRE back reference
1290- Support PCRE back regerence.
1291+ Support PCRE back reference.
1292
1293 PCRE back tracking ctrl
1294 Support PCRE back tracking ctrl.
1295diff --git a/doc/guides/rel_notes/deprecation.rst b/doc/guides/rel_notes/deprecation.rst
1296index 2f498a0..c0d7efa 100644
1297--- a/doc/guides/rel_notes/deprecation.rst
1298+++ b/doc/guides/rel_notes/deprecation.rst
1299@@ -27,16 +27,18 @@ Deprecation Notices
1300
1301 * rte_atomicNN_xxx: These APIs do not take memory order parameter. This does
1302 not allow for writing optimized code for all the CPU architectures supported
1303- in DPDK. DPDK will adopt C11 atomic operations semantics and provide wrappers
1304- using C11 atomic built-ins. These wrappers must be used for patches that
1305- need to be merged in 20.08 onwards. This change will not introduce any
1306- performance degradation.
1307+ in DPDK. DPDK has adopted the atomic operations from
1308+ https://gcc.gnu.org/onlinedocs/gcc/_005f_005fatomic-Builtins.html. These
1309+ operations must be used for patches that need to be merged in 20.08 onwards.
1310+ This change will not introduce any performance degradation.
1311
1312 * rte_smp_*mb: These APIs provide full barrier functionality. However, many
1313- use cases do not require full barriers. To support such use cases, DPDK will
1314- adopt C11 barrier semantics and provide wrappers using C11 atomic built-ins.
1315- These wrappers must be used for patches that need to be merged in 20.08
1316- onwards. This change will not introduce any performance degradation.
1317+ use cases do not require full barriers. To support such use cases, DPDK has
1318+ adopted atomic operations from
1319+ https://gcc.gnu.org/onlinedocs/gcc/_005f_005fatomic-Builtins.html. These
1320+ operations and a new wrapper ``rte_atomic_thread_fence`` instead of
1321+ ``__atomic_thread_fence`` must be used for patches that need to be merged in
1322+ 20.08 onwards. This change will not introduce any performance degradation.
1323
1324 * lib: will fix extending some enum/define breaking the ABI. There are multiple
1325 samples in DPDK that enum/define terminated with a ``.*MAX.*`` value which is
1326@@ -127,12 +129,6 @@ Deprecation Notices
1327 from the release: ``0x16c8, 0x16c9, 0x16ca, 0x16ce, 0x16cf, 0x16df,``
1328 ``0x16d0, 0x16d1, 0x16d2, 0x16d4, 0x16d5, 0x16e7, 0x16e8, 0x16e9``.
1329
1330-* sched: To allow more traffic classes, flexible mapping of pipe queues to
1331- traffic classes, and subport level configuration of pipes and queues
1332- changes will be made to macros, data structures and API functions defined
1333- in "rte_sched.h". These changes are aligned to improvements suggested in the
1334- RFC https://mails.dpdk.org/archives/dev/2018-November/120035.html.
1335-
1336 * metrics: The function ``rte_metrics_init`` will have a non-void return
1337 in order to notify errors instead of calling ``rte_exit``.
1338
1339diff --git a/doc/guides/rel_notes/release_16_11.rst b/doc/guides/rel_notes/release_16_11.rst
1340index 92e0ec6..3cec914 100644
1341--- a/doc/guides/rel_notes/release_16_11.rst
1342+++ b/doc/guides/rel_notes/release_16_11.rst
1343@@ -77,7 +77,7 @@ New Features
1344 the current version, even 64 bytes packets take two slots with Virtio PMD on guest
1345 side.
1346
1347- The main impact is better performance for 0% packet loss use-cases, as it
1348+ The main impact is better performance for 0% packet loss use cases, as it
1349 behaves as if the virtqueue size was enlarged, so more packets can be buffered
1350 in the case of system perturbations. On the downside, small performance degradations
1351 were measured when running micro-benchmarks.
1352diff --git a/doc/guides/rel_notes/release_19_08.rst b/doc/guides/rel_notes/release_19_08.rst
1353index cbb27e8..d2baa82 100644
1354--- a/doc/guides/rel_notes/release_19_08.rst
1355+++ b/doc/guides/rel_notes/release_19_08.rst
1356@@ -151,7 +151,7 @@ New Features
1357 * Added multi-queue support to allow one af_xdp vdev with multiple netdev
1358 queues.
1359 * Enabled "need_wakeup" feature which can provide efficient support for the
1360- usecase where the application and driver executing on the same core.
1361+ use case where the application and driver executing on the same core.
1362
1363 * **Enabled infinite Rx in the PCAP PMD.**
1364
1365diff --git a/doc/guides/rel_notes/release_20_11.rst b/doc/guides/rel_notes/release_20_11.rst
1366index 552c97e..b2f76bc 100644
1367--- a/doc/guides/rel_notes/release_20_11.rst
1368+++ b/doc/guides/rel_notes/release_20_11.rst
1369@@ -2109,3 +2109,343 @@ Tested Platforms
1370 Fixed in 21.08.
1371 * The hash value remains unchanged when the SCTP port value changed.
1372 Fixed in 21.08 new feature.
1373+
1374+20.11.3 Release Notes
1375+---------------------
1376+
1377+20.11.3 Fixes
1378+~~~~~~~~~~~~~
1379+
1380+* app/crypto-perf: fix out-of-place mempool allocation
1381+* app/test: fix IPv6 header initialization
1382+* app/testpmd: change port link speed without stopping all
1383+* app/testpmd: fix help string for port reset
1384+* app/testpmd: fix IPv4 checksum
1385+* app/testpmd: fix MAC address after port reset
1386+* app/testpmd: fix offloads for newly attached port
1387+* app/testpmd: fix Tx checksum calculation for tunnel
1388+* app/testpmd: fix type of FEC mode parsing output
1389+* bitmap: fix buffer overrun in bitmap init
1390+* build: support drivers symlink on Windows
1391+* bus: clarify log for non-NUMA-aware devices
1392+* bus/dpaa: fix freeing in FMAN interface destructor
1393+* bus/pci: fix IOVA as VA support for PowerNV
1394+* bus/pci: fix leak for unbound devices
1395+* common/mlx5: fix compatibility with OFED port query API
1396+* common/mlx5: fix memory region leak
1397+* common/mlx5: fix Netlink port name padding in probing
1398+* common/mlx5: fix Netlink receive message buffer size
1399+* common/mlx5: use new port query API if available
1400+* crypto/aesni_gcm: fix performance on some AVX512 CPUs
1401+* cryptodev: fix freeing after device release
1402+* crypto/mvsam: fix AES-GCM session parameters
1403+* crypto/mvsam: fix capabilities
1404+* crypto/mvsam: fix options parsing
1405+* crypto/mvsam: fix session data reset
1406+* crypto/octeontx2: fix IPsec session member overlap
1407+* crypto/octeontx2: fix lookaside IPsec IV pointer
1408+* crypto/octeontx: fix freeing after device release
1409+* crypto/qat: disable asymmetric crypto on GEN3
1410+* crypto/qat: fix Arm build with special memcpy
1411+* devtools: fix file listing in maintainers check
1412+* distributor: fix 128-bit write alignment
1413+* doc: add limitation for ConnectX-4 with L2 in mlx5 guide
1414+* doc: fix build on Windows with Meson 0.58
1415+* doc: fix default burst size in testpmd
1416+* doc: fix spelling
1417+* doc: fix typo in SPDX tag
1418+* doc: remove old deprecation notice for sched
1419+* doc: update atomic operation deprecation
1420+* drivers/net: fix memzone allocations for DMA memory
1421+* eal/windows: check callback parameter of alarm functions
1422+* eal/windows: cleanup virt2phys handle
1423+* ethdev: fix doc of flow action
1424+* eventdev: fix event port setup in Tx adapter
1425+* examples/l2fwd: fix [no-]mac-updating options
1426+* flow_classify: fix leaking rules on delete
1427+* graph: fix memory leak in stats
1428+* graph: fix null dereference in stats
1429+* ipc: stop mp control thread on cleanup
1430+* kni: fix crash on userspace VA for segmented packets
1431+* kni: fix mbuf allocation for kernel side use
1432+* malloc: fix size annotation for NUMA-aware realloc
1433+* mempool/octeontx2: fix shift calculation
1434+* net/bnxt: check access to possible null pointer
1435+* net/bnxt: cleanup code
1436+* net/bnxt: clear cached statistics
1437+* net/bnxt: detect bad opaque in Rx completion
1438+* net/bnxt: fix aarch32 build
1439+* net/bnxt: fix auto-negociation on Whitney+
1440+* net/bnxt: fix check for PTP support in FW
1441+* net/bnxt: fix error handling in VNIC prepare
1442+* net/bnxt: fix error messages in VNIC prepare
1443+* net/bnxt: fix missing barriers in completion handling
1444+* net/bnxt: fix nested lock during bonding
1445+* net/bnxt: fix null dereference in interrupt handler
1446+* net/bnxt: fix ring allocation and free
1447+* net/bnxt: fix ring and context memory allocation
1448+* net/bnxt: fix Rx burst size constraint
1449+* net/bnxt: fix Rx interrupt setting
1450+* net/bnxt: fix scalar Tx completion handling
1451+* net/bnxt: fix Tx descriptor status implementation
1452+* net/bnxt: fix typo in log message
1453+* net/bnxt: improve probing log message
1454+* net/bnxt: invoke device removal event on recovery failure
1455+* net/bnxt: remove unnecessary code
1456+* net/bnxt: remove unnecessary comment
1457+* net/bnxt: remove workaround for default VNIC
1458+* net/bnxt: set flow error after tunnel redirection free
1459+* net/bnxt: set flow error when free filter not available
1460+* net/bnxt: use common function to free VNIC resource
1461+* net/bnxt: workaround spurious zero stats in Thor
1462+* net/bonding: check flow setting
1463+* net/bonding: fix error message on flow verify
1464+* net/dpaa: fix headroom in VSP case
1465+* net/ena: enable multi-segment in Tx offload flags
1466+* net/ena: trigger reset on Tx prepare failure
1467+* net/hinic/base: fix LRO
1468+* net/hinic: fix MTU consistency with firmware
1469+* net/hinic: increase protection of the VLAN
1470+* net/hns3: fix Arm SVE build with GCC 8.3
1471+* net/hns3: fix delay for waiting to stop Rx/Tx
1472+* net/hns3: fix fake queue rollback
1473+* net/hns3: fix filter parsing comment
1474+* net/hns3: fix flow rule list in multi-process
1475+* net/hns3: fix maximum queues on configuration failure
1476+* net/hns3: fix residual MAC address entry
1477+* net/hns3: fix timing of clearing interrupt source
1478+* net/hns3: fix Tx prepare after stop
1479+* net/hns3: fix VLAN strip log
1480+* net/hns3: increase VF reset retry maximum
1481+* net/i40e: fix descriptor scan on Arm
1482+* net/i40e: fix flow director input set conflict
1483+* net/i40e: fix multi-process shared data
1484+* net/i40e: fix raw packet flow director
1485+* net/i40e: fix use after free in FDIR release
1486+* net/iavf: fix handling of unsupported promiscuous
1487+* net/iavf: fix RSS key access out of bound
1488+* net/iavf: fix scalar Rx
1489+* net/iavf: fix Tx threshold check
1490+* net/ice: fix data path in secondary process
1491+* net/ice: fix data path selection in secondary process
1492+* net/ice: fix default RSS key generation
1493+* net/ice: fix memzone leak when firmware is missing
1494+* net/ice: fix overflow in maximum packet length config
1495+* net/ixgbe: fix flow entry access after freeing
1496+* net/memif: fix abstract socket address length
1497+* net/mlx5: add Tx scheduling check on queue creation
1498+* net/mlx5: export PMD-specific API file
1499+* net/mlx5: fix default queue number in RSS flow rule
1500+* net/mlx5: fix flow engine type in function name
1501+* net/mlx5: fix imissed statistics
1502+* net/mlx5: fix indirect action modify rollback
1503+* net/mlx5: fix IPIP multi-tunnel validation
1504+* net/mlx5: fix match MPLS over GRE with key
1505+* net/mlx5: fix missing RSS expandable items
1506+* net/mlx5: fix missing RSS expansion of IPv6 frag
1507+* net/mlx5: fix MPLS RSS expansion
1508+* net/mlx5: fix multi-segment inline for the first segments
1509+* net/mlx5: fix overflow in mempool argument
1510+* net/mlx5: fix pattern expansion in RSS flow rules
1511+* net/mlx5: fix queue leaking in hairpin auto bind check
1512+* net/mlx5: fix representor interrupt handler
1513+* net/mlx5: fix RoCE LAG bond device probing
1514+* net/mlx5: fix RSS expansion for GTP
1515+* net/mlx5: fix RSS flow rule with L4 mismatch
1516+* net/mlx5: fix RSS pattern expansion
1517+* net/mlx5: fix r/w lock usage in DMA unmap
1518+* net/mlx5: fix Rx/Tx queue checks
1519+* net/mlx5: fix switchdev mode recognition
1520+* net/mlx5: fix threshold for mbuf replenishment in MPRQ
1521+* net/mlx5: fix timestamp initialization on empty clock queue
1522+* net/mlx5: fix TSO multi-segment inline length
1523+* net/mlx5: fix typo in vectorized Rx comments
1524+* net/mlx5: reject inner ethernet matching in GTP
1525+* net/mlx5: remove redundant operations in NEON Rx
1526+* net/mlx5: remove unsupported flow item MPLS over IP
1527+* net/mlx5: workaround drop action with old kernel
1528+* net/mvpp2: fix configured state dependency
1529+* net/mvpp2: fix port speed overflow
1530+* net/octeontx2: fix default MCAM allocation size
1531+* net/octeontx2: fix flow creation limit on CN98xx
1532+* net/octeontx2: fix TM node statistics query
1533+* net/octeontx2: use runtime LSO format indices
1534+* net/octeontx/base: fix debug build with clang
1535+* net/pfe: remove unnecessary null check
1536+* net/sfc: check ID overflow in action port ID
1537+* net/sfc: fix aarch32 build
1538+* net/sfc: fix MAC stats lock in xstats query by ID
1539+* net/sfc: fix MAC stats update for stopped device
1540+* net/sfc: fix outer L4 checksum Rx
1541+* net/sfc: fix outer match in MAE backend
1542+* net/sfc: fix reading adapter state without locking
1543+* net/sfc: fix xstats query by ID according to ethdev
1544+* net/sfc: fix xstats query by unsorted list of IDs
1545+* net/softnic: fix connection memory leak
1546+* net/softnic: fix memory leak as profile is freed
1547+* net/softnic: fix memory leak in arguments parsing
1548+* net/softnic: fix null dereference in arguments parsing
1549+* net/tap: fix Rx checksum flags on IP options packets
1550+* net/tap: fix Rx checksum flags on TCP packets
1551+* net/virtio: fix aarch32 build
1552+* net/virtio: fix default duplex mode
1553+* net/virtio: fix interrupt handle leak
1554+* net/virtio: fix refill order in packed ring datapath
1555+* net/virtio: fix Rx scatter offload
1556+* net/virtio: report maximum MTU in device info
1557+* raw/ioat: fix config script queue size calculation
1558+* regex/mlx5: fix redundancy in device removal
1559+* regex/mlx5: fix size of setup constants
1560+* rib: fix max depth IPv6 lookup
1561+* sched: fix profile allocation failure handling
1562+* sched: rework configuration failure handling
1563+* table: fix bucket empty check
1564+* test/crypto: fix autotest function parameters
1565+* test/crypto: fix mbuf reset after null check
1566+* test/crypto: fix mempool size for session-less
1567+* test/crypto: fix typo in AES case
1568+* test/crypto: fix typo in ESN case
1569+* test/mbuf: fix virtual address conversion
1570+* test/power: fix CPU frequency check for intel_pstate
1571+* test/power: fix CPU frequency when turbo enabled
1572+* tests/cmdline: fix memory leaks
1573+* tests/eal: fix memory leak
1574+* vdpa/mlx5: fix overflow in queue attribute
1575+* vdpa/mlx5: fix TSO offload without checksum
1576+* version: 20.11.3-rc1
1577+* vfio: add stdbool include
1578+* vhost: check header for legacy dequeue offload
1579+* vhost/crypto: check request pointer before dereference
1580+* vhost: fix crash on reconnect
1581+* vhost: fix lock on device readiness notification
1582+* vhost: fix missing guest pages table NUMA realloc
1583+* vhost: fix missing memory table NUMA realloc
1584+* vhost: fix NUMA reallocation with multi-queue
1585+
1586+20.11.3 Validation
1587+~~~~~~~~~~~~~~~~~~
1588+
1589+* Intel(R) Testing
1590+
1591+ * Basic Intel(R) NIC(ixgbe, i40e and ice) testing
1592+ * PF (i40e)
1593+ * PF (ixgbe)
1594+ * PF (ice)
1595+ * VF (i40e)
1596+ * VF (ixgbe)
1597+ * VF (ice)
1598+ * Compile Testing
1599+ * Intel NIC single core/NIC performance
1600+ * Power and IPsec
1601+
1602+ * Basic cryptodev and virtio testing
1603+
1604+ * vhost/virtio basic loopback, PVP and performance test
1605+ * cryptodev Function/Performance
1606+
1607+
1608+* Nvidia(R) Testing
1609+
1610+ * Basic functionality with testpmd
1611+
1612+ * Tx/Rx
1613+ * xstats
1614+ * Timestamps
1615+ * Link status
1616+ * RTE flow and flow_director
1617+ * RSS
1618+ * VLAN stripping and insertion
1619+ * Checksum/TSO
1620+ * ptype
1621+ * link_status_interrupt example application
1622+ * l3fwd-power example application
1623+ * Multi-process example applications
1624+ * Hardware LRO tests
1625+
1626+ * Build tests
1627+
1628+ * Ubuntu 20.04.2 with MLNX_OFED_LINUX-5.4-1.0.3.0.
1629+ * Ubuntu 20.04.2 with rdma-core master (64d1ae5).
1630+ * Ubuntu 20.04.2 with rdma-core v28.0.
1631+ * Ubuntu 18.04.5 with rdma-core v17.1.
1632+ * Ubuntu 18.04.5 with rdma-core master (5b0f5b2) (i386).
1633+ * Ubuntu 16.04.7 with rdma-core v22.7.
1634+ * Fedora 34 with rdma-core v36.0.
1635+ * Fedora 36 (Rawhide) with rdma-core v36.0 (only with gcc).
1636+ * CentOS 7 7.9.2009 with rdma-core master (64d1ae5).
1637+ * CentOS 7 7.9.2009 with MLNX_OFED_LINUX-5.4-1.0.3.0.
1638+ * CentOS 8 8.3.2011 with rdma-core master (64d1ae5).
1639+ * OpenSUSE Leap 15.3 with rdma-core v31.0.
1640+
1641+ * ConnectX-5
1642+
1643+ * Ubuntu 20.04
1644+ * Driver MLNX_OFED_LINUX-5.4-1.0.3.0
1645+ * Kernel: 5.14.0-rc6 / Driver: rdma-core v36.0
1646+ * fw 16.31.1014
1647+
1648+ * ConnectX-4 Lx
1649+
1650+ * Ubuntu 20.04
1651+ * Driver MLNX_OFED_LINUX-5.4-1.0.3.0
1652+ * Kernel: 5.14.0-rc6 / Driver: rdma-core v36.0
1653+ * fw 14.31.1014
1654+
1655+
1656+* Red Hat(R) Testing
1657+
1658+ * Platform
1659+
1660+ * RHEL 8
1661+ * Kernel 4.18
1662+ * Qemu 6.0
1663+ * X540-AT2 NIC(ixgbe, 10G)
1664+
1665+ * Functionality
1666+
1667+ * Guest with device assignment(PF) throughput testing(1G hugepage size)
1668+ * Guest with device assignment(PF) throughput testing(2M hugepage size)
1669+ * Guest with device assignment(VF) throughput testing
1670+ * PVP (host dpdk testpmd as vswitch) 1Q: throughput testing
1671+ * PVP vhost-user 2Q throughput testing
1672+ * PVP vhost-user 1Q cross numa node throughput testing
1673+ * Guest with vhost-user 2 queues throughput testing
1674+ * vhost-user reconnect with dpdk-client, qemu-server: qemu reconnect
1675+ * vhost-user reconnect with dpdk-client, qemu-server: ovs reconnect
1676+ * PVP 1Q live migration testing
1677+ * PVP 1Q cross numa node live migration testing
1678+ * Guest with ovs+dpdk+vhost-user 1Q live migration testing
1679+ * Guest with ovs+dpdk+vhost-user 1Q live migration testing (2M)
1680+ * Guest with ovs+dpdk+vhost-user 2Q live migration testing
1681+ * Host PF + DPDK testing
1682+ * Host VF + DPDK testing
1683+
1684+* Canonical(R) Testing
1685+
1686+ * Build tests of DPDK & OVS 2.15.0 on Ubuntu 21.04 (meson based)
1687+ * Functional and performance tests based on OVS-DPDK on x86_64
1688+ * Autopkgtests for DPDK and OpenvSwitch
1689+
1690+20.11.3 Known Issues
1691+~~~~~~~~~~~~~~~~~~~~
1692+
1693+* ICE
1694+
1695+ * creating 512 acl rules after creating a full mask switch rule fails.
1696+
1697+* vhost/virtio
1698+
1699+ * udp-fragmentation-offload cannot be setup on Ubuntu 19.10 VMs.
1700+ https://bugzilla.kernel.org/show_bug.cgi?id=207075
1701+ * vm2vm virtio-net connectivity between two vms randomly fails due
1702+ to lost connection after vhost reconnect.
1703+
1704+* unit tests
1705+
1706+ * unit_tests_power/power_cpufreq fails.
1707+ https://bugs.dpdk.org/show_bug.cgi?id=790
1708+
1709+* IAVF
1710+
1711+ * cvl_advanced_iavf_rss: after changing the SCTP port value, the hash value
1712+ remains unchanged.
1713diff --git a/doc/guides/rel_notes/release_2_2.rst b/doc/guides/rel_notes/release_2_2.rst
1714index cea5c87..8273473 100644
1715--- a/doc/guides/rel_notes/release_2_2.rst
1716+++ b/doc/guides/rel_notes/release_2_2.rst
1717@@ -322,7 +322,7 @@ Drivers
1718
1719 Several customers have reported a link flap issue on 82579. The symptoms
1720 are random and intermittent link losses when 82579 is connected to specific
1721- switches. the Issue was root caused as an inter-operability problem between
1722+ switches. the Issue was root caused as an interoperability problem between
1723 the NIC and at least some Broadcom PHYs in the Energy Efficient Ethernet
1724 wake mechanism.
1725
1726diff --git a/doc/guides/sample_app_ug/fips_validation.rst b/doc/guides/sample_app_ug/fips_validation.rst
1727index ca37fc0..56df434 100644
1728--- a/doc/guides/sample_app_ug/fips_validation.rst
1729+++ b/doc/guides/sample_app_ug/fips_validation.rst
1730@@ -113,7 +113,7 @@ where,
1731 * mbuf-dataroom: By default the application creates mbuf pool with maximum
1732 possible data room (65535 bytes). If the user wants to test scatter-gather
1733 list feature of the PMD he or she may set this value to reduce the dataroom
1734- size so that the input data may be dividied into multiple chained mbufs.
1735+ size so that the input data may be divided into multiple chained mbufs.
1736
1737
1738 To run the application in linux environment to test one AES FIPS test data
1739diff --git a/doc/guides/sample_app_ug/hello_world.rst b/doc/guides/sample_app_ug/hello_world.rst
1740index 7cb9279..6ec93e0 100644
1741--- a/doc/guides/sample_app_ug/hello_world.rst
1742+++ b/doc/guides/sample_app_ug/hello_world.rst
1743@@ -1,4 +1,4 @@
1744-o.. SPDX-License-Identifier: BSD-3-Clause
1745+.. SPDX-License-Identifier: BSD-3-Clause
1746 Copyright(c) 2010-2014 Intel Corporation.
1747
1748 Hello World Sample Application
1749diff --git a/doc/guides/sample_app_ug/ipsec_secgw.rst b/doc/guides/sample_app_ug/ipsec_secgw.rst
1750index 176e292..c07275e 100644
1751--- a/doc/guides/sample_app_ug/ipsec_secgw.rst
1752+++ b/doc/guides/sample_app_ug/ipsec_secgw.rst
1753@@ -93,7 +93,7 @@ Additionally the event mode introduces two submodes of processing packets:
1754 protocol use case, the worker thread resembles l2fwd worker thread as the IPsec
1755 processing is done entirely in HW. This mode can be used to benchmark the raw
1756 performance of the HW. The driver submode is selected with --single-sa option
1757- (used also by poll mode). When --single-sa option is used in conjution with event
1758+ (used also by poll mode). When --single-sa option is used in conjunction with event
1759 mode then index passed to --single-sa is ignored.
1760
1761 * App submode: This submode has all the features currently implemented with the
1762diff --git a/doc/guides/sample_app_ug/performance_thread.rst b/doc/guides/sample_app_ug/performance_thread.rst
1763index 4c6a1db..9b09838 100644
1764--- a/doc/guides/sample_app_ug/performance_thread.rst
1765+++ b/doc/guides/sample_app_ug/performance_thread.rst
1766@@ -1176,7 +1176,7 @@ Tracing of events can be individually masked, and the mask may be programmed
1767 at run time. An unmasked event results in a callback that provides information
1768 about the event. The default callback simply prints trace information. The
1769 default mask is 0 (all events off) the mask can be modified by calling the
1770-function ``lthread_diagniostic_set_mask()``.
1771+function ``lthread_diagnostic_set_mask()``.
1772
1773 It is possible register a user callback function to implement more
1774 sophisticated diagnostic functions.
1775diff --git a/doc/guides/testpmd_app_ug/run_app.rst b/doc/guides/testpmd_app_ug/run_app.rst
1776index 6f9ff13..d76eb10 100644
1777--- a/doc/guides/testpmd_app_ug/run_app.rst
1778+++ b/doc/guides/testpmd_app_ug/run_app.rst
1779@@ -112,7 +112,7 @@ The command line options are:
1780 Set the data size of the mbufs used to N bytes, where N < 65536.
1781 The default value is 2048. If multiple mbuf-size values are specified the
1782 extra memory pools will be created for allocating mbufs to receive packets
1783- with buffer splittling features.
1784+ with buffer splitting features.
1785
1786 * ``--total-num-mbufs=N``
1787
1788diff --git a/doc/guides/testpmd_app_ug/testpmd_funcs.rst b/doc/guides/testpmd_app_ug/testpmd_funcs.rst
1789index 3187756..a004d89 100644
1790--- a/doc/guides/testpmd_app_ug/testpmd_funcs.rst
1791+++ b/doc/guides/testpmd_app_ug/testpmd_funcs.rst
1792@@ -1732,7 +1732,7 @@ List all items from the ptype mapping table::
1793
1794 Where:
1795
1796-* ``valid_only``: A flag indicates if only list valid items(=1) or all itemss(=0).
1797+* ``valid_only``: A flag indicates if only list valid items(=1) or all items(=0).
1798
1799 Replace a specific or a group of software defined ptype with a new one::
1800
1801@@ -4767,7 +4767,7 @@ Sample Raw encapsulation rule
1802
1803 Raw encapsulation configuration can be set by the following commands
1804
1805-Eecapsulating VxLAN::
1806+Encapsulating VxLAN::
1807
1808 testpmd> set raw_encap 4 eth src is 10:11:22:33:44:55 / vlan tci is 1
1809 inner_type is 0x0800 / ipv4 / udp dst is 4789 / vxlan vni
1810diff --git a/doc/guides/tools/hugepages.rst b/doc/guides/tools/hugepages.rst
1811index 6d3f410..dd24c80 100644
1812--- a/doc/guides/tools/hugepages.rst
1813+++ b/doc/guides/tools/hugepages.rst
1814@@ -62,7 +62,7 @@ Options
1815
1816 .. warning::
1817
1818- While any user can run the ``dpdk-hugpages.py`` script to view the
1819+ While any user can run the ``dpdk-hugepages.py`` script to view the
1820 status of huge pages, modifying the setup requires root privileges.
1821
1822
1823@@ -71,8 +71,8 @@ Examples
1824
1825 To display current huge page settings::
1826
1827- dpdk-hugpages.py -s
1828+ dpdk-hugepages.py -s
1829
1830 To a complete setup of with 2 Gigabyte of 1G huge pages::
1831
1832- dpdk-hugpages.py -p 1G --setup 2G
1833+ dpdk-hugepages.py -p 1G --setup 2G
1834diff --git a/doc/meson.build b/doc/meson.build
1835index c5410d8..d6cf85a 100644
1836--- a/doc/meson.build
1837+++ b/doc/meson.build
1838@@ -11,5 +11,5 @@ if doc_targets.length() == 0
1839 else
1840 message = 'Building docs:'
1841 endif
1842-run_target('doc', command: ['echo', message, doc_target_names],
1843+run_target('doc', command: [echo, message, doc_target_names],
1844 depends: doc_targets)
1845diff --git a/drivers/bus/dpaa/base/fman/fman.c b/drivers/bus/dpaa/base/fman/fman.c
1846index 39102bc..997c94f 100644
1847--- a/drivers/bus/dpaa/base/fman/fman.c
1848+++ b/drivers/bus/dpaa/base/fman/fman.c
1849@@ -50,7 +50,7 @@ if_destructor(struct __fman_if *__if)
1850 free(bp);
1851 }
1852 cleanup:
1853- free(__if);
1854+ rte_free(__if);
1855 }
1856
1857 static int
1858diff --git a/drivers/bus/pci/linux/pci.c b/drivers/bus/pci/linux/pci.c
1859index 2e1808b..e8d1faa 100644
1860--- a/drivers/bus/pci/linux/pci.c
1861+++ b/drivers/bus/pci/linux/pci.c
1862@@ -331,7 +331,7 @@ pci_scan_one(const char *dirname, const struct rte_pci_addr *addr)
1863 else
1864 dev->kdrv = RTE_PCI_KDRV_UNKNOWN;
1865 } else {
1866- dev->kdrv = RTE_PCI_KDRV_NONE;
1867+ free(dev);
1868 return 0;
1869 }
1870 /* device is valid, add in list (sorted) */
1871@@ -569,7 +569,7 @@ pci_device_iommu_support_va(__rte_unused const struct rte_pci_device *dev)
1872
1873 /* Check for a PowerNV platform */
1874 while (getline(&line, &len, fp) != -1) {
1875- if (strstr(line, "platform") != NULL)
1876+ if (strstr(line, "platform") == NULL)
1877 continue;
1878
1879 if (strstr(line, "PowerNV") != NULL) {
1880diff --git a/drivers/bus/pci/pci_common.c b/drivers/bus/pci/pci_common.c
1881index 9b8d769..fa887de 100644
1882--- a/drivers/bus/pci/pci_common.c
1883+++ b/drivers/bus/pci/pci_common.c
1884@@ -16,6 +16,7 @@
1885 #include <rte_bus.h>
1886 #include <rte_pci.h>
1887 #include <rte_bus_pci.h>
1888+#include <rte_lcore.h>
1889 #include <rte_per_lcore.h>
1890 #include <rte_memory.h>
1891 #include <rte_eal.h>
1892@@ -190,7 +191,9 @@ rte_pci_probe_one_driver(struct rte_pci_driver *dr,
1893 }
1894
1895 if (dev->device.numa_node < 0) {
1896- RTE_LOG(WARNING, EAL, " Invalid NUMA socket, default to 0\n");
1897+ if (rte_socket_count() > 1)
1898+ RTE_LOG(INFO, EAL, "Device %s is not NUMA-aware, defaulting socket to 0\n",
1899+ dev->name);
1900 dev->device.numa_node = 0;
1901 }
1902
1903diff --git a/drivers/bus/vmbus/vmbus_common.c b/drivers/bus/vmbus/vmbus_common.c
1904index 39b3308..09b8c3c 100644
1905--- a/drivers/bus/vmbus/vmbus_common.c
1906+++ b/drivers/bus/vmbus/vmbus_common.c
1907@@ -15,6 +15,7 @@
1908 #include <rte_eal.h>
1909 #include <rte_tailq.h>
1910 #include <rte_devargs.h>
1911+#include <rte_lcore.h>
1912 #include <rte_malloc.h>
1913 #include <rte_errno.h>
1914 #include <rte_memory.h>
1915@@ -112,7 +113,9 @@ vmbus_probe_one_driver(struct rte_vmbus_driver *dr,
1916 dev->driver = dr;
1917
1918 if (dev->device.numa_node < 0) {
1919- VMBUS_LOG(WARNING, " Invalid NUMA socket, default to 0");
1920+ if (rte_socket_count() > 1)
1921+ VMBUS_LOG(INFO, "Device %s is not NUMA-aware, defaulting socket to 0",
1922+ guid);
1923 dev->device.numa_node = 0;
1924 }
1925
1926diff --git a/drivers/common/iavf/iavf_impl.c b/drivers/common/iavf/iavf_impl.c
1927index fc0da31..f80878b 100644
1928--- a/drivers/common/iavf/iavf_impl.c
1929+++ b/drivers/common/iavf/iavf_impl.c
1930@@ -6,7 +6,6 @@
1931 #include <inttypes.h>
1932
1933 #include <rte_common.h>
1934-#include <rte_random.h>
1935 #include <rte_malloc.h>
1936 #include <rte_memzone.h>
1937
1938@@ -19,13 +18,15 @@ iavf_allocate_dma_mem_d(__rte_unused struct iavf_hw *hw,
1939 u64 size,
1940 u32 alignment)
1941 {
1942+ static uint64_t iavf_dma_memzone_id;
1943 const struct rte_memzone *mz = NULL;
1944 char z_name[RTE_MEMZONE_NAMESIZE];
1945
1946 if (!mem)
1947 return IAVF_ERR_PARAM;
1948
1949- snprintf(z_name, sizeof(z_name), "iavf_dma_%"PRIu64, rte_rand());
1950+ snprintf(z_name, sizeof(z_name), "iavf_dma_%" PRIu64,
1951+ __atomic_fetch_add(&iavf_dma_memzone_id, 1, __ATOMIC_RELAXED));
1952 mz = rte_memzone_reserve_bounded(z_name, size, SOCKET_ID_ANY,
1953 RTE_MEMZONE_IOVA_CONTIG, alignment,
1954 RTE_PGSIZE_2M);
1955diff --git a/drivers/common/mlx5/linux/meson.build b/drivers/common/mlx5/linux/meson.build
1956index fa9686f..c2d580c 100644
1957--- a/drivers/common/mlx5/linux/meson.build
1958+++ b/drivers/common/mlx5/linux/meson.build
1959@@ -94,6 +94,10 @@ has_sym_args = [
1960 'IBV_WQ_FLAG_RX_END_PADDING' ],
1961 [ 'HAVE_MLX5DV_DR_DEVX_PORT', 'infiniband/mlx5dv.h',
1962 'mlx5dv_query_devx_port' ],
1963+ [ 'HAVE_MLX5DV_DR_DEVX_PORT_V35', 'infiniband/mlx5dv.h',
1964+ 'mlx5dv_query_port' ],
1965+ [ 'HAVE_MLX5DV_DR_CREATE_DEST_IB_PORT', 'infiniband/mlx5dv.h',
1966+ 'mlx5dv_dr_action_create_dest_ib_port' ],
1967 [ 'HAVE_IBV_DEVX_OBJ', 'infiniband/mlx5dv.h',
1968 'mlx5dv_devx_obj_create' ],
1969 [ 'HAVE_IBV_FLOW_DEVX_COUNTERS', 'infiniband/mlx5dv.h',
1970diff --git a/drivers/common/mlx5/linux/mlx5_glue.c b/drivers/common/mlx5/linux/mlx5_glue.c
1971index 964f7e7..09fdce1 100644
1972--- a/drivers/common/mlx5/linux/mlx5_glue.c
1973+++ b/drivers/common/mlx5/linux/mlx5_glue.c
1974@@ -391,7 +391,7 @@ mlx5_glue_dr_create_flow_action_dest_flow_tbl(void *tbl)
1975 static void *
1976 mlx5_glue_dr_create_flow_action_dest_port(void *domain, uint32_t port)
1977 {
1978-#ifdef HAVE_MLX5DV_DR_DEVX_PORT
1979+#ifdef HAVE_MLX5DV_DR_CREATE_DEST_IB_PORT
1980 return mlx5dv_dr_action_create_dest_ib_port(domain, port);
1981 #else
1982 #ifdef HAVE_MLX5DV_DR_ESWITCH
1983@@ -1087,17 +1087,54 @@ mlx5_glue_devx_wq_query(struct ibv_wq *wq, const void *in, size_t inlen,
1984 static int
1985 mlx5_glue_devx_port_query(struct ibv_context *ctx,
1986 uint32_t port_num,
1987- struct mlx5dv_devx_port *mlx5_devx_port)
1988-{
1989+ struct mlx5_port_info *info)
1990+{
1991+ int err = 0;
1992+
1993+ info->query_flags = 0;
1994+#ifdef HAVE_MLX5DV_DR_DEVX_PORT_V35
1995+ /* The DevX port query API is implemented (rdma-core v35 and above). */
1996+ struct mlx5_ib_uapi_query_port devx_port;
1997+
1998+ memset(&devx_port, 0, sizeof(devx_port));
1999+ err = mlx5dv_query_port(ctx, port_num, &devx_port);
2000+ if (err)
2001+ return err;
2002+ if (devx_port.flags & MLX5DV_QUERY_PORT_VPORT_REG_C0) {
2003+ info->vport_meta_tag = devx_port.reg_c0.value;
2004+ info->vport_meta_mask = devx_port.reg_c0.mask;
2005+ info->query_flags |= MLX5_PORT_QUERY_REG_C0;
2006+ }
2007+ if (devx_port.flags & MLX5DV_QUERY_PORT_VPORT) {
2008+ info->vport_id = devx_port.vport;
2009+ info->query_flags |= MLX5_PORT_QUERY_VPORT;
2010+ }
2011+#else
2012 #ifdef HAVE_MLX5DV_DR_DEVX_PORT
2013- return mlx5dv_query_devx_port(ctx, port_num, mlx5_devx_port);
2014+ /* The legacy DevX port query API is implemented (prior v35). */
2015+ struct mlx5dv_devx_port devx_port = {
2016+ .comp_mask = MLX5DV_DEVX_PORT_VPORT |
2017+ MLX5DV_DEVX_PORT_MATCH_REG_C_0
2018+ };
2019+
2020+ err = mlx5dv_query_devx_port(ctx, port_num, &devx_port);
2021+ if (err)
2022+ return err;
2023+ if (devx_port.comp_mask & MLX5DV_DEVX_PORT_MATCH_REG_C_0) {
2024+ info->vport_meta_tag = devx_port.reg_c_0.value;
2025+ info->vport_meta_mask = devx_port.reg_c_0.mask;
2026+ info->query_flags |= MLX5_PORT_QUERY_REG_C0;
2027+ }
2028+ if (devx_port.comp_mask & MLX5DV_DEVX_PORT_VPORT) {
2029+ info->vport_id = devx_port.vport_num;
2030+ info->query_flags |= MLX5_PORT_QUERY_VPORT;
2031+ }
2032 #else
2033- (void)ctx;
2034- (void)port_num;
2035- (void)mlx5_devx_port;
2036- errno = ENOTSUP;
2037- return errno;
2038-#endif
2039+ RTE_SET_USED(ctx);
2040+ RTE_SET_USED(port_num);
2041+#endif /* HAVE_MLX5DV_DR_DEVX_PORT */
2042+#endif /* HAVE_MLX5DV_DR_DEVX_PORT_V35 */
2043+ return err;
2044 }
2045
2046 static int
2047diff --git a/drivers/common/mlx5/linux/mlx5_glue.h b/drivers/common/mlx5/linux/mlx5_glue.h
2048index 9e385be..f08c837 100644
2049--- a/drivers/common/mlx5/linux/mlx5_glue.h
2050+++ b/drivers/common/mlx5/linux/mlx5_glue.h
2051@@ -84,6 +84,20 @@ struct mlx5dv_dr_action;
2052 struct mlx5dv_devx_port;
2053 #endif
2054
2055+#ifndef HAVE_MLX5DV_DR_DEVX_PORT_V35
2056+struct mlx5dv_port;
2057+#endif
2058+
2059+#define MLX5_PORT_QUERY_VPORT (1u << 0)
2060+#define MLX5_PORT_QUERY_REG_C0 (1u << 1)
2061+
2062+struct mlx5_port_info {
2063+ uint16_t query_flags;
2064+ uint16_t vport_id; /* Associated VF vport index (if any). */
2065+ uint32_t vport_meta_tag; /* Used for vport index match ove VF LAG. */
2066+ uint32_t vport_meta_mask; /* Used for vport index field match mask. */
2067+};
2068+
2069 #ifndef HAVE_MLX5_DR_CREATE_ACTION_FLOW_METER
2070 struct mlx5dv_dr_flow_meter_attr;
2071 #endif
2072@@ -311,7 +325,7 @@ struct mlx5_glue {
2073 void *out, size_t outlen);
2074 int (*devx_port_query)(struct ibv_context *ctx,
2075 uint32_t port_num,
2076- struct mlx5dv_devx_port *mlx5_devx_port);
2077+ struct mlx5_port_info *info);
2078 int (*dr_dump_domain)(FILE *file, void *domain);
2079 int (*devx_query_eqn)(struct ibv_context *context, uint32_t cpus,
2080 uint32_t *eqn);
2081diff --git a/drivers/common/mlx5/linux/mlx5_nl.c b/drivers/common/mlx5/linux/mlx5_nl.c
2082index ef7a521..1f765dc 100644
2083--- a/drivers/common/mlx5/linux/mlx5_nl.c
2084+++ b/drivers/common/mlx5/linux/mlx5_nl.c
2085@@ -33,6 +33,8 @@
2086 #define MLX5_SEND_BUF_SIZE 32768
2087 /* Receive buffer size for the Netlink socket */
2088 #define MLX5_RECV_BUF_SIZE 32768
2089+/* Maximal physical port name length. */
2090+#define MLX5_PHYS_PORT_NAME_MAX 128
2091
2092 /** Parameters of VLAN devices created by driver. */
2093 #define MLX5_VMWA_VLAN_DEVICE_PFX "evmlx"
2094@@ -187,8 +189,8 @@ int
2095 mlx5_nl_init(int protocol)
2096 {
2097 int fd;
2098- int sndbuf_size = MLX5_SEND_BUF_SIZE;
2099- int rcvbuf_size = MLX5_RECV_BUF_SIZE;
2100+ int buf_size;
2101+ socklen_t opt_size;
2102 struct sockaddr_nl local = {
2103 .nl_family = AF_NETLINK,
2104 };
2105@@ -199,16 +201,36 @@ mlx5_nl_init(int protocol)
2106 rte_errno = errno;
2107 return -rte_errno;
2108 }
2109- ret = setsockopt(fd, SOL_SOCKET, SO_SNDBUF, &sndbuf_size, sizeof(int));
2110+ opt_size = sizeof(buf_size);
2111+ ret = getsockopt(fd, SOL_SOCKET, SO_SNDBUF, &buf_size, &opt_size);
2112 if (ret == -1) {
2113 rte_errno = errno;
2114 goto error;
2115 }
2116- ret = setsockopt(fd, SOL_SOCKET, SO_RCVBUF, &rcvbuf_size, sizeof(int));
2117+ DRV_LOG(DEBUG, "Netlink socket send buffer: %d", buf_size);
2118+ if (buf_size < MLX5_SEND_BUF_SIZE) {
2119+ ret = setsockopt(fd, SOL_SOCKET, SO_SNDBUF,
2120+ &buf_size, sizeof(buf_size));
2121+ if (ret == -1) {
2122+ rte_errno = errno;
2123+ goto error;
2124+ }
2125+ }
2126+ opt_size = sizeof(buf_size);
2127+ ret = getsockopt(fd, SOL_SOCKET, SO_RCVBUF, &buf_size, &opt_size);
2128 if (ret == -1) {
2129 rte_errno = errno;
2130 goto error;
2131 }
2132+ DRV_LOG(DEBUG, "Netlink socket recv buffer: %d", buf_size);
2133+ if (buf_size < MLX5_RECV_BUF_SIZE) {
2134+ ret = setsockopt(fd, SOL_SOCKET, SO_RCVBUF,
2135+ &buf_size, sizeof(buf_size));
2136+ if (ret == -1) {
2137+ rte_errno = errno;
2138+ goto error;
2139+ }
2140+ }
2141 ret = bind(fd, (struct sockaddr *)&local, sizeof(local));
2142 if (ret == -1) {
2143 rte_errno = errno;
2144@@ -330,11 +352,7 @@ mlx5_nl_recv(int nlsk_fd, uint32_t sn, int (*cb)(struct nlmsghdr *, void *arg),
2145 void *arg)
2146 {
2147 struct sockaddr_nl sa;
2148- void *buf = mlx5_malloc(0, MLX5_RECV_BUF_SIZE, 0, SOCKET_ID_ANY);
2149- struct iovec iov = {
2150- .iov_base = buf,
2151- .iov_len = MLX5_RECV_BUF_SIZE,
2152- };
2153+ struct iovec iov;
2154 struct msghdr msg = {
2155 .msg_name = &sa,
2156 .msg_namelen = sizeof(sa),
2157@@ -342,18 +360,43 @@ mlx5_nl_recv(int nlsk_fd, uint32_t sn, int (*cb)(struct nlmsghdr *, void *arg),
2158 /* One message at a time */
2159 .msg_iovlen = 1,
2160 };
2161+ void *buf = NULL;
2162 int multipart = 0;
2163 int ret = 0;
2164
2165- if (!buf) {
2166- rte_errno = ENOMEM;
2167- return -rte_errno;
2168- }
2169 do {
2170 struct nlmsghdr *nh;
2171- int recv_bytes = 0;
2172+ int recv_bytes;
2173
2174 do {
2175+ /* Query length of incoming message. */
2176+ iov.iov_base = NULL;
2177+ iov.iov_len = 0;
2178+ recv_bytes = recvmsg(nlsk_fd, &msg,
2179+ MSG_PEEK | MSG_TRUNC);
2180+ if (recv_bytes < 0) {
2181+ rte_errno = errno;
2182+ ret = -rte_errno;
2183+ goto exit;
2184+ }
2185+ if (recv_bytes == 0) {
2186+ rte_errno = ENODATA;
2187+ ret = -rte_errno;
2188+ goto exit;
2189+ }
2190+ /* Allocate buffer to fetch the message. */
2191+ if (recv_bytes < MLX5_RECV_BUF_SIZE)
2192+ recv_bytes = MLX5_RECV_BUF_SIZE;
2193+ mlx5_free(buf);
2194+ buf = mlx5_malloc(0, recv_bytes, 0, SOCKET_ID_ANY);
2195+ if (!buf) {
2196+ rte_errno = ENOMEM;
2197+ ret = -rte_errno;
2198+ goto exit;
2199+ }
2200+ /* Fetch the message. */
2201+ iov.iov_base = buf;
2202+ iov.iov_len = recv_bytes;
2203 recv_bytes = recvmsg(nlsk_fd, &msg, 0);
2204 if (recv_bytes == -1) {
2205 rte_errno = errno;
2206@@ -1188,6 +1231,7 @@ mlx5_nl_switch_info_cb(struct nlmsghdr *nh, void *arg)
2207 size_t off = NLMSG_LENGTH(sizeof(struct ifinfomsg));
2208 bool switch_id_set = false;
2209 bool num_vf_set = false;
2210+ int len;
2211
2212 if (nh->nlmsg_type != RTM_NEWLINK)
2213 goto error;
2214@@ -1203,7 +1247,24 @@ mlx5_nl_switch_info_cb(struct nlmsghdr *nh, void *arg)
2215 num_vf_set = true;
2216 break;
2217 case IFLA_PHYS_PORT_NAME:
2218- mlx5_translate_port_name((char *)payload, &info);
2219+ len = RTA_PAYLOAD(ra);
2220+ /* Some kernels do not pad attributes with zero. */
2221+ if (len > 0 && len < MLX5_PHYS_PORT_NAME_MAX) {
2222+ char name[MLX5_PHYS_PORT_NAME_MAX];
2223+
2224+ /*
2225+ * We can't just patch the message with padding
2226+ * zero - it might corrupt the following items
2227+ * in the message, we have to copy the string
2228+ * by attribute length and pad the copied one.
2229+ */
2230+ memcpy(name, payload, len);
2231+ name[len] = 0;
2232+ mlx5_translate_port_name(name, &info);
2233+ } else {
2234+ info.name_type =
2235+ MLX5_PHYS_PORT_NAME_TYPE_UNKNOWN;
2236+ }
2237 break;
2238 case IFLA_PHYS_SWITCH_ID:
2239 info.switch_id = 0;
2240diff --git a/drivers/common/mlx5/mlx5_common_mr.c b/drivers/common/mlx5/mlx5_common_mr.c
2241index 7c25541..d01f868 100644
2242--- a/drivers/common/mlx5/mlx5_common_mr.c
2243+++ b/drivers/common/mlx5/mlx5_common_mr.c
2244@@ -1061,6 +1061,95 @@ mlx5_create_mr_ext(void *pd, uintptr_t addr, size_t len, int socket_id,
2245 }
2246
2247 /**
2248+ * Callback for memory free event. Iterate freed memsegs and check whether it
2249+ * belongs to an existing MR. If found, clear the bit from bitmap of MR. As a
2250+ * result, the MR would be fragmented. If it becomes empty, the MR will be freed
2251+ * later by mlx5_mr_garbage_collect(). Even if this callback is called from a
2252+ * secondary process, the garbage collector will be called in primary process
2253+ * as the secondary process can't call mlx5_mr_create().
2254+ *
2255+ * The global cache must be rebuilt if there's any change and this event has to
2256+ * be propagated to dataplane threads to flush the local caches.
2257+ *
2258+ * @param share_cache
2259+ * Pointer to a global shared MR cache.
2260+ * @param ibdev_name
2261+ * Name of ibv device.
2262+ * @param addr
2263+ * Address of freed memory.
2264+ * @param len
2265+ * Size of freed memory.
2266+ */
2267+void
2268+mlx5_free_mr_by_addr(struct mlx5_mr_share_cache *share_cache,
2269+ const char *ibdev_name, const void *addr, size_t len)
2270+{
2271+ const struct rte_memseg_list *msl;
2272+ struct mlx5_mr *mr;
2273+ int ms_n;
2274+ int i;
2275+ int rebuild = 0;
2276+
2277+ DRV_LOG(DEBUG, "device %s free callback: addr=%p, len=%zu",
2278+ ibdev_name, addr, len);
2279+ msl = rte_mem_virt2memseg_list(addr);
2280+ /* addr and len must be page-aligned. */
2281+ MLX5_ASSERT((uintptr_t)addr ==
2282+ RTE_ALIGN((uintptr_t)addr, msl->page_sz));
2283+ MLX5_ASSERT(len == RTE_ALIGN(len, msl->page_sz));
2284+ ms_n = len / msl->page_sz;
2285+ rte_rwlock_write_lock(&share_cache->rwlock);
2286+ /* Clear bits of freed memsegs from MR. */
2287+ for (i = 0; i < ms_n; ++i) {
2288+ const struct rte_memseg *ms;
2289+ struct mr_cache_entry entry;
2290+ uintptr_t start;
2291+ int ms_idx;
2292+ uint32_t pos;
2293+
2294+ /* Find MR having this memseg. */
2295+ start = (uintptr_t)addr + i * msl->page_sz;
2296+ mr = mlx5_mr_lookup_list(share_cache, &entry, start);
2297+ if (mr == NULL)
2298+ continue;
2299+ MLX5_ASSERT(mr->msl); /* Can't be external memory. */
2300+ ms = rte_mem_virt2memseg((void *)start, msl);
2301+ MLX5_ASSERT(ms != NULL);
2302+ MLX5_ASSERT(msl->page_sz == ms->hugepage_sz);
2303+ ms_idx = rte_fbarray_find_idx(&msl->memseg_arr, ms);
2304+ pos = ms_idx - mr->ms_base_idx;
2305+ MLX5_ASSERT(rte_bitmap_get(mr->ms_bmp, pos));
2306+ MLX5_ASSERT(pos < mr->ms_bmp_n);
2307+ DRV_LOG(DEBUG, "device %s MR(%p): clear bitmap[%u] for addr %p",
2308+ ibdev_name, (void *)mr, pos, (void *)start);
2309+ rte_bitmap_clear(mr->ms_bmp, pos);
2310+ if (--mr->ms_n == 0) {
2311+ LIST_REMOVE(mr, mr);
2312+ LIST_INSERT_HEAD(&share_cache->mr_free_list, mr, mr);
2313+ DRV_LOG(DEBUG, "device %s remove MR(%p) from list",
2314+ ibdev_name, (void *)mr);
2315+ }
2316+ /*
2317+ * MR is fragmented or will be freed. the global cache must be
2318+ * rebuilt.
2319+ */
2320+ rebuild = 1;
2321+ }
2322+ if (rebuild) {
2323+ mlx5_mr_rebuild_cache(share_cache);
2324+ /*
2325+ * No explicit wmb is needed after updating dev_gen due to
2326+ * store-release ordering in unlock that provides the
2327+ * implicit barrier at the software visible level.
2328+ */
2329+ ++share_cache->dev_gen;
2330+ DRV_LOG(DEBUG, "broadcasting local cache flush, gen=%d",
2331+ share_cache->dev_gen);
2332+ }
2333+ rte_rwlock_write_unlock(&share_cache->rwlock);
2334+}
2335+
2336+/**
2337 * Dump all the created MRs and the global cache entries.
2338 *
2339 * @param sh
2340diff --git a/drivers/common/mlx5/mlx5_common_mr.h b/drivers/common/mlx5/mlx5_common_mr.h
2341index da0a0f0..09d39dd 100644
2342--- a/drivers/common/mlx5/mlx5_common_mr.h
2343+++ b/drivers/common/mlx5/mlx5_common_mr.h
2344@@ -143,6 +143,9 @@ void mlx5_mr_rebuild_cache(struct mlx5_mr_share_cache *share_cache);
2345 __rte_internal
2346 void mlx5_mr_flush_local_cache(struct mlx5_mr_ctrl *mr_ctrl);
2347 __rte_internal
2348+void mlx5_free_mr_by_addr(struct mlx5_mr_share_cache *share_cache,
2349+ const char *ibdev_name, const void *addr, size_t len);
2350+__rte_internal
2351 int
2352 mlx5_mr_insert_cache(struct mlx5_mr_share_cache *share_cache,
2353 struct mlx5_mr *mr);
2354diff --git a/drivers/common/mlx5/version.map b/drivers/common/mlx5/version.map
2355index fd6019b..02aedbc 100644
2356--- a/drivers/common/mlx5/version.map
2357+++ b/drivers/common/mlx5/version.map
2358@@ -69,6 +69,7 @@ INTERNAL {
2359 mlx5_mr_create_primary;
2360 mlx5_mr_flush_local_cache;
2361 mlx5_mr_free;
2362+ mlx5_free_mr_by_addr;
2363
2364 mlx5_nl_allmulti;
2365 mlx5_nl_devlink_family_id_get;
2366diff --git a/drivers/common/octeontx2/otx2_dev.h b/drivers/common/octeontx2/otx2_dev.h
2367index cd4fe51..9d8dcca 100644
2368--- a/drivers/common/octeontx2/otx2_dev.h
2369+++ b/drivers/common/octeontx2/otx2_dev.h
2370@@ -55,6 +55,9 @@
2371 (RVU_PCI_REV_MINOR(otx2_dev_revid(dev)) == 0x0) && \
2372 (RVU_PCI_REV_MIDR_ID(otx2_dev_revid(dev)) == 0x0))
2373
2374+#define otx2_dev_is_98xx(dev) \
2375+ (RVU_PCI_REV_MIDR_ID(otx2_dev_revid(dev)) == 0x3)
2376+
2377 struct otx2_dev;
2378
2379 /* Link status callback */
2380diff --git a/drivers/common/sfc_efx/meson.build b/drivers/common/sfc_efx/meson.build
2381index 6cb9f07..1e17f1f 100644
2382--- a/drivers/common/sfc_efx/meson.build
2383+++ b/drivers/common/sfc_efx/meson.build
2384@@ -5,7 +5,7 @@
2385 # This software was jointly developed between OKTET Labs (under contract
2386 # for Solarflare) and Solarflare Communications, Inc.
2387
2388-if (arch_subdir != 'x86' or not dpdk_conf.get('RTE_ARCH_64')) and (arch_subdir != 'arm' or not host_machine.cpu_family().startswith('aarch64'))
2389+if (arch_subdir != 'x86' and arch_subdir != 'arm') or (not dpdk_conf.get('RTE_ARCH_64'))
2390 build = false
2391 reason = 'only supported on x86_64 and aarch64'
2392 endif
2393diff --git a/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c b/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c
2394index 0de5120..cc4e085 100644
2395--- a/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c
2396+++ b/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c
2397@@ -842,8 +842,14 @@ aesni_gcm_create(const char *name,
2398 init_mb_mgr_avx2(mb_mgr);
2399 break;
2400 case RTE_AESNI_GCM_AVX512:
2401- dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX512;
2402- init_mb_mgr_avx512(mb_mgr);
2403+ if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_VAES)) {
2404+ dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX512;
2405+ init_mb_mgr_avx512(mb_mgr);
2406+ } else {
2407+ dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX2;
2408+ init_mb_mgr_avx2(mb_mgr);
2409+ vector_mode = RTE_AESNI_GCM_AVX2;
2410+ }
2411 break;
2412 default:
2413 AESNI_GCM_LOG(ERR, "Unsupported vector mode %u\n", vector_mode);
2414diff --git a/drivers/crypto/mvsam/rte_mrvl_pmd.c b/drivers/crypto/mvsam/rte_mrvl_pmd.c
2415index bec51c9..5712042 100644
2416--- a/drivers/crypto/mvsam/rte_mrvl_pmd.c
2417+++ b/drivers/crypto/mvsam/rte_mrvl_pmd.c
2418@@ -360,6 +360,14 @@ mrvl_crypto_set_aead_session_parameters(struct mrvl_crypto_session *sess,
2419 sess->sam_sess_params.cipher_mode =
2420 aead_map[aead_xform->aead.algo].cipher_mode;
2421
2422+ if (sess->sam_sess_params.cipher_mode == SAM_CIPHER_GCM) {
2423+ /* IV must include nonce for all counter modes */
2424+ sess->cipher_iv_offset = aead_xform->cipher.iv.offset;
2425+
2426+ /* Set order of authentication then encryption to 0 in GCM */
2427+ sess->sam_sess_params.u.basic.auth_then_encrypt = 0;
2428+ }
2429+
2430 /* Assume IV will be passed together with data. */
2431 sess->sam_sess_params.cipher_iv = NULL;
2432
2433@@ -916,14 +924,14 @@ mrvl_pmd_parse_input_args(struct mrvl_pmd_init_params *params,
2434 ret = rte_kvargs_process(kvlist,
2435 RTE_CRYPTODEV_PMD_NAME_ARG,
2436 &parse_name_arg,
2437- &params->common);
2438+ &params->common.name);
2439 if (ret < 0)
2440 goto free_kvlist;
2441
2442 ret = rte_kvargs_process(kvlist,
2443 MRVL_PMD_MAX_NB_SESS_ARG,
2444 &parse_integer_arg,
2445- params);
2446+ &params->max_nb_sessions);
2447 if (ret < 0)
2448 goto free_kvlist;
2449
2450diff --git a/drivers/crypto/mvsam/rte_mrvl_pmd_ops.c b/drivers/crypto/mvsam/rte_mrvl_pmd_ops.c
2451index c61bdca..4eb7ec9 100644
2452--- a/drivers/crypto/mvsam/rte_mrvl_pmd_ops.c
2453+++ b/drivers/crypto/mvsam/rte_mrvl_pmd_ops.c
2454@@ -111,7 +111,7 @@ static const struct rte_cryptodev_capabilities
2455 .increment = 1
2456 },
2457 .digest_size = {
2458- .min = 28,
2459+ .min = 12,
2460 .max = 28,
2461 .increment = 0
2462 },
2463@@ -232,7 +232,7 @@ static const struct rte_cryptodev_capabilities
2464 },
2465 .digest_size = {
2466 .min = 12,
2467- .max = 48,
2468+ .max = 64,
2469 .increment = 4
2470 },
2471 }, }
2472@@ -252,7 +252,7 @@ static const struct rte_cryptodev_capabilities
2473 },
2474 .digest_size = {
2475 .min = 12,
2476- .max = 48,
2477+ .max = 64,
2478 .increment = 0
2479 },
2480 }, }
2481@@ -336,9 +336,9 @@ static const struct rte_cryptodev_capabilities
2482 .increment = 0
2483 },
2484 .aad_size = {
2485- .min = 8,
2486- .max = 12,
2487- .increment = 4
2488+ .min = 0,
2489+ .max = 64,
2490+ .increment = 1
2491 },
2492 .iv_size = {
2493 .min = 12,
2494@@ -793,7 +793,7 @@ mrvl_crypto_pmd_sym_session_clear(struct rte_cryptodev *dev,
2495 MRVL_LOG(ERR, "Error while destroying session!");
2496 }
2497
2498- memset(sess, 0, sizeof(struct mrvl_crypto_session));
2499+ memset(mrvl_sess, 0, sizeof(struct mrvl_crypto_session));
2500 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
2501 set_sym_session_private_data(sess, index, NULL);
2502 rte_mempool_put(sess_mp, sess_priv);
2503diff --git a/drivers/crypto/octeontx/otx_cryptodev.c b/drivers/crypto/octeontx/otx_cryptodev.c
2504index 5ce1cf8..36cedb3 100644
2505--- a/drivers/crypto/octeontx/otx_cryptodev.c
2506+++ b/drivers/crypto/octeontx/otx_cryptodev.c
2507@@ -71,6 +71,7 @@ otx_cpt_pci_remove(struct rte_pci_device *pci_dev)
2508 {
2509 struct rte_cryptodev *cryptodev;
2510 char name[RTE_CRYPTODEV_NAME_MAX_LEN];
2511+ void *dev_priv;
2512
2513 if (pci_dev == NULL)
2514 return -EINVAL;
2515@@ -84,11 +85,13 @@ otx_cpt_pci_remove(struct rte_pci_device *pci_dev)
2516 if (pci_dev->driver == NULL)
2517 return -ENODEV;
2518
2519+ dev_priv = cryptodev->data->dev_private;
2520+
2521 /* free crypto device */
2522 rte_cryptodev_pmd_release_device(cryptodev);
2523
2524 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
2525- rte_free(cryptodev->data->dev_private);
2526+ rte_free(dev_priv);
2527
2528 cryptodev->device->driver = NULL;
2529 cryptodev->device = NULL;
2530diff --git a/drivers/crypto/octeontx2/otx2_ipsec_po_ops.h b/drivers/crypto/octeontx2/otx2_ipsec_po_ops.h
2531index bc702d5..ee30131 100644
2532--- a/drivers/crypto/octeontx2/otx2_ipsec_po_ops.h
2533+++ b/drivers/crypto/octeontx2/otx2_ipsec_po_ops.h
2534@@ -61,16 +61,12 @@ process_outb_sa(struct rte_crypto_op *cop,
2535 uint32_t dlen, rlen, extend_head, extend_tail;
2536 struct rte_crypto_sym_op *sym_op = cop->sym;
2537 struct rte_mbuf *m_src = sym_op->m_src;
2538- struct otx2_ipsec_po_sa_ctl *ctl_wrd;
2539 struct cpt_request_info *req = NULL;
2540 struct otx2_ipsec_po_out_hdr *hdr;
2541- struct otx2_ipsec_po_out_sa *sa;
2542 int hdr_len, mdata_len, ret = 0;
2543 vq_cmd_word0_t word0;
2544 char *mdata, *data;
2545
2546- sa = &sess->out_sa;
2547- ctl_wrd = &sa->ctl;
2548 hdr_len = sizeof(*hdr);
2549
2550 dlen = rte_pktmbuf_pkt_len(m_src) + hdr_len;
2551@@ -107,14 +103,8 @@ process_outb_sa(struct rte_crypto_op *cop,
2552 hdr = (struct otx2_ipsec_po_out_hdr *)rte_pktmbuf_adj(m_src,
2553 RTE_ETHER_HDR_LEN);
2554
2555- if (ctl_wrd->enc_type == OTX2_IPSEC_FP_SA_ENC_AES_GCM) {
2556- memcpy(&hdr->iv[0], &sa->iv.gcm.nonce, 4);
2557- memcpy(&hdr->iv[4], rte_crypto_op_ctod_offset(cop, uint8_t *,
2558- sess->iv_offset), sess->iv_length);
2559- } else if (ctl_wrd->auth_type == OTX2_IPSEC_FP_SA_ENC_AES_CBC) {
2560- memcpy(&hdr->iv[0], rte_crypto_op_ctod_offset(cop, uint8_t *,
2561- sess->iv_offset), sess->iv_length);
2562- }
2563+ memcpy(&hdr->iv[0], rte_crypto_op_ctod_offset(cop, uint8_t *,
2564+ sess->iv_offset), sess->iv_length);
2565
2566 /* Prepare CPT instruction */
2567 word0.u64 = sess->ucmd_w0;
2568diff --git a/drivers/crypto/octeontx2/otx2_security.h b/drivers/crypto/octeontx2/otx2_security.h
2569index 33d3b15..7087ea3 100644
2570--- a/drivers/crypto/octeontx2/otx2_security.h
2571+++ b/drivers/crypto/octeontx2/otx2_security.h
2572@@ -19,14 +19,16 @@
2573 #define OTX2_SEC_AES_GCM_ROUNDUP_BYTE_LEN 4
2574 #define OTX2_SEC_AES_CBC_ROUNDUP_BYTE_LEN 16
2575
2576-union otx2_sec_session_ipsec {
2577- struct otx2_sec_session_ipsec_ip ip;
2578- struct otx2_sec_session_ipsec_lp lp;
2579+struct otx2_sec_session_ipsec {
2580+ union {
2581+ struct otx2_sec_session_ipsec_ip ip;
2582+ struct otx2_sec_session_ipsec_lp lp;
2583+ };
2584 enum rte_security_ipsec_sa_direction dir;
2585 };
2586
2587 struct otx2_sec_session {
2588- union otx2_sec_session_ipsec ipsec;
2589+ struct otx2_sec_session_ipsec ipsec;
2590 void *userdata;
2591 /**< Userdata registered by the application */
2592 } __rte_cache_aligned;
2593diff --git a/drivers/crypto/qat/qat_asym_pmd.c b/drivers/crypto/qat/qat_asym_pmd.c
2594index a2c8aca..ef47e28 100644
2595--- a/drivers/crypto/qat/qat_asym_pmd.c
2596+++ b/drivers/crypto/qat/qat_asym_pmd.c
2597@@ -251,6 +251,10 @@ qat_asym_dev_create(struct qat_pci_device *qat_pci_dev,
2598 struct rte_cryptodev *cryptodev;
2599 struct qat_asym_dev_private *internals;
2600
2601+ if (qat_pci_dev->qat_dev_gen == QAT_GEN3) {
2602+ QAT_LOG(ERR, "Asymmetric crypto PMD not supported on QAT c4xxx");
2603+ return -EFAULT;
2604+ }
2605 snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN, "%s_%s",
2606 qat_pci_dev->name, "asym");
2607 QAT_LOG(DEBUG, "Creating QAT ASYM device %s\n", name);
2608diff --git a/drivers/crypto/qat/qat_sym_session.c b/drivers/crypto/qat/qat_sym_session.c
2609index 23d059b..6e2193f 100644
2610--- a/drivers/crypto/qat/qat_sym_session.c
2611+++ b/drivers/crypto/qat/qat_sym_session.c
2612@@ -1190,6 +1190,9 @@ static int partial_hash_compute(enum icp_qat_hw_auth_algo hash_alg,
2613 uint64_t *hash_state_out_be64;
2614 int i;
2615
2616+ /* Initialize to avoid gcc warning */
2617+ memset(digest, 0, sizeof(digest));
2618+
2619 digest_size = qat_hash_get_digest_size(hash_alg);
2620 if (digest_size <= 0)
2621 return -EFAULT;
2622diff --git a/drivers/event/octeontx2/otx2_worker.h b/drivers/event/octeontx2/otx2_worker.h
2623index 0a7d667..89dc79d 100644
2624--- a/drivers/event/octeontx2/otx2_worker.h
2625+++ b/drivers/event/octeontx2/otx2_worker.h
2626@@ -272,7 +272,7 @@ otx2_ssogws_prepare_pkt(const struct otx2_eth_txq *txq, struct rte_mbuf *m,
2627 uint64_t *cmd, const uint32_t flags)
2628 {
2629 otx2_lmt_mov(cmd, txq->cmd, otx2_nix_tx_ext_subs(flags));
2630- otx2_nix_xmit_prepare(m, cmd, flags);
2631+ otx2_nix_xmit_prepare(m, cmd, flags, txq->lso_tun_fmt);
2632 }
2633
2634 static __rte_always_inline uint16_t
2635diff --git a/drivers/mempool/octeontx2/otx2_mempool_ops.c b/drivers/mempool/octeontx2/otx2_mempool_ops.c
2636index 9ff71bc..d827fd8 100644
2637--- a/drivers/mempool/octeontx2/otx2_mempool_ops.c
2638+++ b/drivers/mempool/octeontx2/otx2_mempool_ops.c
2639@@ -611,7 +611,8 @@ npa_lf_aura_pool_pair_alloc(struct otx2_npa_lf *lf, const uint32_t block_size,
2640 /* Update aura fields */
2641 aura->pool_addr = pool_id;/* AF will translate to associated poolctx */
2642 aura->ena = 1;
2643- aura->shift = __builtin_clz(block_count) - 8;
2644+ aura->shift = rte_log2_u32(block_count);
2645+ aura->shift = aura->shift < 8 ? 0 : aura->shift - 8;
2646 aura->limit = block_count;
2647 aura->pool_caching = 1;
2648 aura->err_int_ena = BIT(NPA_AURA_ERR_INT_AURA_ADD_OVER);
2649@@ -626,7 +627,8 @@ npa_lf_aura_pool_pair_alloc(struct otx2_npa_lf *lf, const uint32_t block_size,
2650 pool->ena = 1;
2651 pool->buf_size = block_size / OTX2_ALIGN;
2652 pool->stack_max_pages = stack_size;
2653- pool->shift = __builtin_clz(block_count) - 8;
2654+ pool->shift = rte_log2_u32(block_count);
2655+ pool->shift = pool->shift < 8 ? 0 : pool->shift - 8;
2656 pool->ptr_start = 0;
2657 pool->ptr_end = ~0;
2658 pool->stack_caching = 1;
2659diff --git a/drivers/net/bnxt/bnxt.h b/drivers/net/bnxt/bnxt.h
2660index 0fb195b..f854098 100644
2661--- a/drivers/net/bnxt/bnxt.h
2662+++ b/drivers/net/bnxt/bnxt.h
2663@@ -605,6 +605,49 @@ struct bnxt_flow_stat_info {
2664 struct bnxt_ctx_mem_buf_info tx_fc_out_tbl;
2665 };
2666
2667+struct bnxt_ring_stats {
2668+ /* Number of transmitted unicast packets */
2669+ uint64_t tx_ucast_pkts;
2670+ /* Number of transmitted multicast packets */
2671+ uint64_t tx_mcast_pkts;
2672+ /* Number of transmitted broadcast packets */
2673+ uint64_t tx_bcast_pkts;
2674+ /* Number of packets discarded in transmit path */
2675+ uint64_t tx_discard_pkts;
2676+ /* Number of packets in transmit path with error */
2677+ uint64_t tx_error_pkts;
2678+ /* Number of transmitted bytes for unicast traffic */
2679+ uint64_t tx_ucast_bytes;
2680+ /* Number of transmitted bytes for multicast traffic */
2681+ uint64_t tx_mcast_bytes;
2682+ /* Number of transmitted bytes for broadcast traffic */
2683+ uint64_t tx_bcast_bytes;
2684+ /* Number of received unicast packets */
2685+ uint64_t rx_ucast_pkts;
2686+ /* Number of received multicast packets */
2687+ uint64_t rx_mcast_pkts;
2688+ /* Number of received broadcast packets */
2689+ uint64_t rx_bcast_pkts;
2690+ /* Number of packets discarded in receive path */
2691+ uint64_t rx_discard_pkts;
2692+ /* Number of packets in receive path with errors */
2693+ uint64_t rx_error_pkts;
2694+ /* Number of received bytes for unicast traffic */
2695+ uint64_t rx_ucast_bytes;
2696+ /* Number of received bytes for multicast traffic */
2697+ uint64_t rx_mcast_bytes;
2698+ /* Number of received bytes for broadcast traffic */
2699+ uint64_t rx_bcast_bytes;
2700+ /* Number of aggregated unicast packets */
2701+ uint64_t rx_agg_pkts;
2702+ /* Number of aggregated unicast bytes */
2703+ uint64_t rx_agg_bytes;
2704+ /* Number of aggregation events */
2705+ uint64_t rx_agg_events;
2706+ /* Number of aborted aggregations */
2707+ uint64_t rx_agg_aborts;
2708+};
2709+
2710 struct bnxt {
2711 void *bar0;
2712
2713@@ -808,6 +851,8 @@ struct bnxt {
2714 uint8_t flow_xstat;
2715 uint16_t max_num_kflows;
2716 uint16_t tx_cfa_action;
2717+ struct bnxt_ring_stats *prev_rx_ring_stats;
2718+ struct bnxt_ring_stats *prev_tx_ring_stats;
2719 };
2720
2721 static
2722diff --git a/drivers/net/bnxt/bnxt_cpr.c b/drivers/net/bnxt/bnxt_cpr.c
2723index ee96ae8..7bfda01 100644
2724--- a/drivers/net/bnxt/bnxt_cpr.c
2725+++ b/drivers/net/bnxt/bnxt_cpr.c
2726@@ -109,6 +109,8 @@ void bnxt_handle_async_event(struct bnxt *bp,
2727 case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE:
2728 /* FALLTHROUGH */
2729 bnxt_link_update_op(bp->eth_dev, 0);
2730+ rte_eth_dev_callback_process(bp->eth_dev,
2731+ RTE_ETH_EVENT_INTR_LSC, NULL);
2732 break;
2733 case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD:
2734 PMD_DRV_LOG(INFO, "Async event: PF driver unloaded\n");
2735diff --git a/drivers/net/bnxt/bnxt_cpr.h b/drivers/net/bnxt/bnxt_cpr.h
2736index c769bde..fedfb47 100644
2737--- a/drivers/net/bnxt/bnxt_cpr.h
2738+++ b/drivers/net/bnxt/bnxt_cpr.h
2739@@ -8,13 +8,10 @@
2740 #include <stdbool.h>
2741
2742 #include <rte_io.h>
2743+#include "hsi_struct_def_dpdk.h"
2744
2745 struct bnxt_db_info;
2746
2747-#define CMP_VALID(cmp, raw_cons, ring) \
2748- (!!(rte_le_to_cpu_32(((struct cmpl_base *)(cmp))->info3_v) & \
2749- CMPL_BASE_V) == !((raw_cons) & ((ring)->ring_size)))
2750-
2751 #define CMPL_VALID(cmp, v) \
2752 (!!(rte_le_to_cpu_32(((struct cmpl_base *)(cmp))->info3_v) & \
2753 CMPL_BASE_V) == !(v))
2754@@ -131,4 +128,35 @@ bool bnxt_is_recovery_enabled(struct bnxt *bp);
2755 bool bnxt_is_master_func(struct bnxt *bp);
2756
2757 void bnxt_stop_rxtx(struct bnxt *bp);
2758+
2759+/**
2760+ * Check validity of a completion ring entry. If the entry is valid, include a
2761+ * C11 __ATOMIC_ACQUIRE fence to ensure that subsequent loads of fields in the
2762+ * completion are not hoisted by the compiler or by the CPU to come before the
2763+ * loading of the "valid" field.
2764+ *
2765+ * Note: the caller must not access any fields in the specified completion
2766+ * entry prior to calling this function.
2767+ *
2768+ * @param cmpl
2769+ * Pointer to an entry in the completion ring.
2770+ * @param raw_cons
2771+ * Raw consumer index of entry in completion ring.
2772+ * @param ring_size
2773+ * Size of completion ring.
2774+ */
2775+static __rte_always_inline bool
2776+bnxt_cpr_cmp_valid(const void *cmpl, uint32_t raw_cons, uint32_t ring_size)
2777+{
2778+ const struct cmpl_base *c = cmpl;
2779+ bool expected, valid;
2780+
2781+ expected = !(raw_cons & ring_size);
2782+ valid = !!(rte_le_to_cpu_32(c->info3_v) & CMPL_BASE_V);
2783+ if (valid == expected) {
2784+ rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
2785+ return true;
2786+ }
2787+ return false;
2788+}
2789 #endif
2790diff --git a/drivers/net/bnxt/bnxt_ethdev.c b/drivers/net/bnxt/bnxt_ethdev.c
2791index 1498f4a..8afe72b 100644
2792--- a/drivers/net/bnxt/bnxt_ethdev.c
2793+++ b/drivers/net/bnxt/bnxt_ethdev.c
2794@@ -583,13 +583,14 @@ static int bnxt_register_fc_ctx_mem(struct bnxt *bp)
2795 return rc;
2796 }
2797
2798-static int bnxt_alloc_ctx_mem_buf(char *type, size_t size,
2799+static int bnxt_alloc_ctx_mem_buf(struct bnxt *bp, char *type, size_t size,
2800 struct bnxt_ctx_mem_buf_info *ctx)
2801 {
2802 if (!ctx)
2803 return -EINVAL;
2804
2805- ctx->va = rte_zmalloc(type, size, 0);
2806+ ctx->va = rte_zmalloc_socket(type, size, 0,
2807+ bp->eth_dev->device->numa_node);
2808 if (ctx->va == NULL)
2809 return -ENOMEM;
2810 rte_mem_lock_page(ctx->va);
2811@@ -613,7 +614,7 @@ static int bnxt_init_fc_ctx_mem(struct bnxt *bp)
2812 sprintf(type, "bnxt_rx_fc_in_" PCI_PRI_FMT, pdev->addr.domain,
2813 pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
2814 /* 4 bytes for each counter-id */
2815- rc = bnxt_alloc_ctx_mem_buf(type,
2816+ rc = bnxt_alloc_ctx_mem_buf(bp, type,
2817 max_fc * 4,
2818 &bp->flow_stat->rx_fc_in_tbl);
2819 if (rc)
2820@@ -622,7 +623,7 @@ static int bnxt_init_fc_ctx_mem(struct bnxt *bp)
2821 sprintf(type, "bnxt_rx_fc_out_" PCI_PRI_FMT, pdev->addr.domain,
2822 pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
2823 /* 16 bytes for each counter - 8 bytes pkt_count, 8 bytes byte_count */
2824- rc = bnxt_alloc_ctx_mem_buf(type,
2825+ rc = bnxt_alloc_ctx_mem_buf(bp, type,
2826 max_fc * 16,
2827 &bp->flow_stat->rx_fc_out_tbl);
2828 if (rc)
2829@@ -631,7 +632,7 @@ static int bnxt_init_fc_ctx_mem(struct bnxt *bp)
2830 sprintf(type, "bnxt_tx_fc_in_" PCI_PRI_FMT, pdev->addr.domain,
2831 pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
2832 /* 4 bytes for each counter-id */
2833- rc = bnxt_alloc_ctx_mem_buf(type,
2834+ rc = bnxt_alloc_ctx_mem_buf(bp, type,
2835 max_fc * 4,
2836 &bp->flow_stat->tx_fc_in_tbl);
2837 if (rc)
2838@@ -640,7 +641,7 @@ static int bnxt_init_fc_ctx_mem(struct bnxt *bp)
2839 sprintf(type, "bnxt_tx_fc_out_" PCI_PRI_FMT, pdev->addr.domain,
2840 pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
2841 /* 16 bytes for each counter - 8 bytes pkt_count, 8 bytes byte_count */
2842- rc = bnxt_alloc_ctx_mem_buf(type,
2843+ rc = bnxt_alloc_ctx_mem_buf(bp, type,
2844 max_fc * 16,
2845 &bp->flow_stat->tx_fc_out_tbl);
2846 if (rc)
2847@@ -696,6 +697,38 @@ static int bnxt_update_phy_setting(struct bnxt *bp)
2848 return rc;
2849 }
2850
2851+static void bnxt_free_prev_ring_stats(struct bnxt *bp)
2852+{
2853+ rte_free(bp->prev_rx_ring_stats);
2854+ rte_free(bp->prev_tx_ring_stats);
2855+
2856+ bp->prev_rx_ring_stats = NULL;
2857+ bp->prev_tx_ring_stats = NULL;
2858+}
2859+
2860+static int bnxt_alloc_prev_ring_stats(struct bnxt *bp)
2861+{
2862+ bp->prev_rx_ring_stats = rte_zmalloc("bnxt_prev_rx_ring_stats",
2863+ sizeof(struct bnxt_ring_stats) *
2864+ bp->rx_cp_nr_rings,
2865+ 0);
2866+ if (bp->prev_rx_ring_stats == NULL)
2867+ return -ENOMEM;
2868+
2869+ bp->prev_tx_ring_stats = rte_zmalloc("bnxt_prev_tx_ring_stats",
2870+ sizeof(struct bnxt_ring_stats) *
2871+ bp->tx_cp_nr_rings,
2872+ 0);
2873+ if (bp->prev_tx_ring_stats == NULL)
2874+ goto error;
2875+
2876+ return 0;
2877+
2878+error:
2879+ bnxt_free_prev_ring_stats(bp);
2880+ return -ENOMEM;
2881+}
2882+
2883 static int bnxt_start_nic(struct bnxt *bp)
2884 {
2885 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(bp->eth_dev);
2886@@ -933,7 +966,7 @@ static int bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev,
2887 dev_info->max_rx_queues = max_rx_rings;
2888 dev_info->max_tx_queues = max_rx_rings;
2889 dev_info->reta_size = bnxt_rss_hash_tbl_size(bp);
2890- dev_info->hash_key_size = 40;
2891+ dev_info->hash_key_size = HW_HASH_KEY_SIZE;
2892 max_vnics = bp->max_vnics;
2893
2894 /* MTU specifics */
2895@@ -954,7 +987,6 @@ static int bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev,
2896
2897 dev_info->speed_capa = bnxt_get_speed_capabilities(bp);
2898
2899- /* *INDENT-OFF* */
2900 dev_info->default_rxconf = (struct rte_eth_rxconf) {
2901 .rx_thresh = {
2902 .pthresh = 8,
2903@@ -976,7 +1008,6 @@ static int bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev,
2904 };
2905 eth_dev->data->dev_conf.intr_conf.lsc = 1;
2906
2907- eth_dev->data->dev_conf.intr_conf.rxq = 1;
2908 dev_info->rx_desc_lim.nb_min = BNXT_MIN_RING_DESC;
2909 dev_info->rx_desc_lim.nb_max = BNXT_MAX_RX_RING_DESC;
2910 dev_info->tx_desc_lim.nb_min = BNXT_MIN_RING_DESC;
2911@@ -990,8 +1021,6 @@ static int bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev,
2912 BNXT_SWITCH_PORT_ID_TRUSTED_VF;
2913 }
2914
2915- /* *INDENT-ON* */
2916-
2917 /*
2918 * TODO: default_rxconf, default_txconf, rx_desc_lim, and tx_desc_lim
2919 * need further investigation.
2920@@ -1435,6 +1464,7 @@ static int bnxt_dev_stop_op(struct rte_eth_dev *eth_dev)
2921 bnxt_shutdown_nic(bp);
2922 bnxt_hwrm_if_change(bp, false);
2923
2924+ bnxt_free_prev_ring_stats(bp);
2925 rte_free(bp->mark_table);
2926 bp->mark_table = NULL;
2927
2928@@ -1489,6 +1519,10 @@ static int bnxt_dev_start_op(struct rte_eth_dev *eth_dev)
2929 if (rc)
2930 goto error;
2931
2932+ rc = bnxt_alloc_prev_ring_stats(bp);
2933+ if (rc)
2934+ goto error;
2935+
2936 eth_dev->data->dev_started = 1;
2937
2938 bnxt_link_update_op(eth_dev, 1);
2939@@ -1732,11 +1766,6 @@ out:
2940 if (new.link_status != eth_dev->data->dev_link.link_status ||
2941 new.link_speed != eth_dev->data->dev_link.link_speed) {
2942 rte_eth_linkstatus_set(eth_dev, &new);
2943-
2944- rte_eth_dev_callback_process(eth_dev,
2945- RTE_ETH_EVENT_INTR_LSC,
2946- NULL);
2947-
2948 bnxt_print_link_info(eth_dev);
2949 }
2950
2951@@ -1962,7 +1991,6 @@ static int bnxt_reta_query_op(struct rte_eth_dev *eth_dev,
2952 if (rc)
2953 return rc;
2954
2955- /* Retrieve from the default VNIC */
2956 if (!vnic)
2957 return -EINVAL;
2958 if (!vnic->rss_table)
2959@@ -2044,7 +2072,8 @@ static int bnxt_rss_hash_update_op(struct rte_eth_dev *eth_dev,
2960
2961 if (rss_conf->rss_key_len != HW_HASH_KEY_SIZE) {
2962 PMD_DRV_LOG(ERR,
2963- "Invalid hashkey length, should be 16 bytes\n");
2964+ "Invalid hashkey length, should be %d bytes\n",
2965+ HW_HASH_KEY_SIZE);
2966 return -EINVAL;
2967 }
2968 memcpy(vnic->rss_hash_key, rss_conf->rss_key, rss_conf->rss_key_len);
2969@@ -3007,7 +3036,7 @@ bnxt_rx_queue_count_op(struct rte_eth_dev *dev, uint16_t rx_queue_id)
2970 {
2971 struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
2972 struct bnxt_cp_ring_info *cpr;
2973- uint32_t desc = 0, raw_cons;
2974+ uint32_t desc = 0, raw_cons, cp_ring_size;
2975 struct bnxt_rx_queue *rxq;
2976 struct rx_pkt_cmpl *rxcmp;
2977 int rc;
2978@@ -3019,6 +3048,7 @@ bnxt_rx_queue_count_op(struct rte_eth_dev *dev, uint16_t rx_queue_id)
2979 rxq = dev->data->rx_queues[rx_queue_id];
2980 cpr = rxq->cp_ring;
2981 raw_cons = cpr->cp_raw_cons;
2982+ cp_ring_size = cpr->cp_ring_struct->ring_size;
2983
2984 while (1) {
2985 uint32_t agg_cnt, cons, cmpl_type;
2986@@ -3026,7 +3056,7 @@ bnxt_rx_queue_count_op(struct rte_eth_dev *dev, uint16_t rx_queue_id)
2987 cons = RING_CMP(cpr->cp_ring_struct, raw_cons);
2988 rxcmp = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[cons];
2989
2990- if (!CMP_VALID(rxcmp, raw_cons, cpr->cp_ring_struct))
2991+ if (!bnxt_cpr_cmp_valid(rxcmp, raw_cons, cp_ring_size))
2992 break;
2993
2994 cmpl_type = CMP_TYPE(rxcmp);
2995@@ -3070,7 +3100,7 @@ bnxt_rx_descriptor_status_op(void *rx_queue, uint16_t offset)
2996 struct bnxt_rx_queue *rxq = rx_queue;
2997 struct bnxt_cp_ring_info *cpr;
2998 struct bnxt_rx_ring_info *rxr;
2999- uint32_t desc, raw_cons;
3000+ uint32_t desc, raw_cons, cp_ring_size;
3001 struct bnxt *bp = rxq->bp;
3002 struct rx_pkt_cmpl *rxcmp;
3003 int rc;
3004@@ -3084,6 +3114,7 @@ bnxt_rx_descriptor_status_op(void *rx_queue, uint16_t offset)
3005
3006 rxr = rxq->rx_ring;
3007 cpr = rxq->cp_ring;
3008+ cp_ring_size = cpr->cp_ring_struct->ring_size;
3009
3010 /*
3011 * For the vector receive case, the completion at the requested
3012@@ -3100,7 +3131,7 @@ bnxt_rx_descriptor_status_op(void *rx_queue, uint16_t offset)
3013 cons = RING_CMP(cpr->cp_ring_struct, raw_cons);
3014 rxcmp = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[cons];
3015
3016- if (CMP_VALID(rxcmp, raw_cons, cpr->cp_ring_struct))
3017+ if (bnxt_cpr_cmp_valid(rxcmp, raw_cons, cp_ring_size))
3018 return RTE_ETH_RX_DESC_DONE;
3019
3020 /* Check whether rx desc has an mbuf attached. */
3021@@ -3126,7 +3157,7 @@ bnxt_rx_descriptor_status_op(void *rx_queue, uint16_t offset)
3022 cons = RING_CMP(cpr->cp_ring_struct, raw_cons);
3023 rxcmp = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[cons];
3024
3025- if (!CMP_VALID(rxcmp, raw_cons, cpr->cp_ring_struct))
3026+ if (!bnxt_cpr_cmp_valid(rxcmp, raw_cons, cp_ring_size))
3027 break;
3028
3029 cmpl_type = CMP_TYPE(rxcmp);
3030@@ -3178,41 +3209,47 @@ static int
3031 bnxt_tx_descriptor_status_op(void *tx_queue, uint16_t offset)
3032 {
3033 struct bnxt_tx_queue *txq = (struct bnxt_tx_queue *)tx_queue;
3034- struct bnxt_tx_ring_info *txr;
3035- struct bnxt_cp_ring_info *cpr;
3036- struct bnxt_sw_tx_bd *tx_buf;
3037- struct tx_pkt_cmpl *txcmp;
3038- uint32_t cons, cp_cons;
3039+ struct bnxt_cp_ring_info *cpr = txq->cp_ring;
3040+ uint32_t ring_mask, raw_cons, nb_tx_pkts = 0;
3041+ struct cmpl_base *cp_desc_ring;
3042 int rc;
3043
3044- if (!txq)
3045- return -EINVAL;
3046-
3047 rc = is_bnxt_in_error(txq->bp);
3048 if (rc)
3049 return rc;
3050
3051- cpr = txq->cp_ring;
3052- txr = txq->tx_ring;
3053-
3054 if (offset >= txq->nb_tx_desc)
3055 return -EINVAL;
3056
3057- cons = RING_CMP(cpr->cp_ring_struct, offset);
3058- txcmp = (struct tx_pkt_cmpl *)&cpr->cp_desc_ring[cons];
3059- cp_cons = cpr->cp_raw_cons;
3060+ /* Return "desc done" if descriptor is available for use. */
3061+ if (bnxt_tx_bds_in_hw(txq) <= offset)
3062+ return RTE_ETH_TX_DESC_DONE;
3063
3064- if (cons > cp_cons) {
3065- if (CMPL_VALID(txcmp, cpr->valid))
3066- return RTE_ETH_TX_DESC_UNAVAIL;
3067- } else {
3068- if (CMPL_VALID(txcmp, !cpr->valid))
3069- return RTE_ETH_TX_DESC_UNAVAIL;
3070+ raw_cons = cpr->cp_raw_cons;
3071+ cp_desc_ring = cpr->cp_desc_ring;
3072+ ring_mask = cpr->cp_ring_struct->ring_mask;
3073+
3074+ /* Check to see if hw has posted a completion for the descriptor. */
3075+ while (1) {
3076+ struct tx_cmpl *txcmp;
3077+ uint32_t cons;
3078+
3079+ cons = RING_CMPL(ring_mask, raw_cons);
3080+ txcmp = (struct tx_cmpl *)&cp_desc_ring[cons];
3081+
3082+ if (!bnxt_cpr_cmp_valid(txcmp, raw_cons, ring_mask + 1))
3083+ break;
3084+
3085+ if (CMP_TYPE(txcmp) == TX_CMPL_TYPE_TX_L2)
3086+ nb_tx_pkts += rte_le_to_cpu_32(txcmp->opaque);
3087+
3088+ if (nb_tx_pkts > offset)
3089+ return RTE_ETH_TX_DESC_DONE;
3090+
3091+ raw_cons = NEXT_RAW_CMP(raw_cons);
3092 }
3093- tx_buf = &txr->tx_buf_ring[cons];
3094- if (tx_buf->mbuf == NULL)
3095- return RTE_ETH_TX_DESC_DONE;
3096
3097+ /* Descriptor is pending transmit, not yet completed by hardware. */
3098 return RTE_ETH_TX_DESC_FULL;
3099 }
3100
3101@@ -4096,6 +4133,10 @@ err_start:
3102 err:
3103 bp->flags |= BNXT_FLAG_FATAL_ERROR;
3104 bnxt_uninit_resources(bp, false);
3105+ if (bp->eth_dev->data->dev_conf.intr_conf.rmv)
3106+ rte_eth_dev_callback_process(bp->eth_dev,
3107+ RTE_ETH_EVENT_INTR_RMV,
3108+ NULL);
3109 PMD_DRV_LOG(ERR, "Failed to recover from FW reset\n");
3110 }
3111
3112@@ -4405,7 +4446,7 @@ static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp,
3113 if (!mz) {
3114 mz = rte_memzone_reserve_aligned(mz_name,
3115 rmem->nr_pages * 8,
3116- SOCKET_ID_ANY,
3117+ bp->eth_dev->device->numa_node,
3118 RTE_MEMZONE_2MB |
3119 RTE_MEMZONE_SIZE_HINT_ONLY |
3120 RTE_MEMZONE_IOVA_CONTIG,
3121@@ -4428,7 +4469,7 @@ static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp,
3122 if (!mz) {
3123 mz = rte_memzone_reserve_aligned(mz_name,
3124 mem_size,
3125- SOCKET_ID_ANY,
3126+ bp->eth_dev->device->numa_node,
3127 RTE_MEMZONE_1GB |
3128 RTE_MEMZONE_SIZE_HINT_ONLY |
3129 RTE_MEMZONE_IOVA_CONTIG,
3130@@ -5652,7 +5693,8 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev, void *params __rte_unused)
3131 goto error_free;
3132
3133 PMD_DRV_LOG(INFO,
3134- DRV_MODULE_NAME "found at mem %" PRIX64 ", node addr %pM\n",
3135+ "Found %s device at mem %" PRIX64 ", node addr %pM\n",
3136+ DRV_MODULE_NAME,
3137 pci_dev->mem_resource[0].phys_addr,
3138 pci_dev->mem_resource[0].addr);
3139
3140@@ -6106,6 +6148,7 @@ static int bnxt_pci_remove(struct rte_pci_device *pci_dev)
3141 static struct rte_pci_driver bnxt_rte_pmd = {
3142 .id_table = bnxt_pci_id_map,
3143 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
3144+ RTE_PCI_DRV_INTR_RMV |
3145 RTE_PCI_DRV_PROBE_AGAIN, /* Needed in case of VF-REPs
3146 * and OVS-DPDK
3147 */
3148diff --git a/drivers/net/bnxt/bnxt_flow.c b/drivers/net/bnxt/bnxt_flow.c
3149index 11034b6..646d8dc 100644
3150--- a/drivers/net/bnxt/bnxt_flow.c
3151+++ b/drivers/net/bnxt/bnxt_flow.c
3152@@ -919,36 +919,59 @@ bnxt_get_l2_filter(struct bnxt *bp, struct bnxt_filter_info *nf,
3153 return l2_filter;
3154 }
3155
3156-static int bnxt_vnic_prep(struct bnxt *bp, struct bnxt_vnic_info *vnic)
3157+static void bnxt_vnic_cleanup(struct bnxt *bp, struct bnxt_vnic_info *vnic)
3158+{
3159+ if (vnic->rx_queue_cnt > 1)
3160+ bnxt_hwrm_vnic_ctx_free(bp, vnic);
3161+
3162+ bnxt_hwrm_vnic_free(bp, vnic);
3163+
3164+ rte_free(vnic->fw_grp_ids);
3165+ vnic->fw_grp_ids = NULL;
3166+
3167+ vnic->rx_queue_cnt = 0;
3168+}
3169+
3170+static int bnxt_vnic_prep(struct bnxt *bp, struct bnxt_vnic_info *vnic,
3171+ const struct rte_flow_action *act,
3172+ struct rte_flow_error *error)
3173 {
3174 struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
3175 uint64_t rx_offloads = dev_conf->rxmode.offloads;
3176 int rc;
3177
3178 if (bp->nr_vnics > bp->max_vnics - 1)
3179- return -ENOMEM;
3180+ return rte_flow_error_set(error, EINVAL,
3181+ RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
3182+ NULL,
3183+ "Group id is invalid");
3184
3185 rc = bnxt_vnic_grp_alloc(bp, vnic);
3186 if (rc)
3187- goto ret;
3188+ return rte_flow_error_set(error, -rc,
3189+ RTE_FLOW_ERROR_TYPE_ACTION,
3190+ act,
3191+ "Failed to alloc VNIC group");
3192
3193 rc = bnxt_hwrm_vnic_alloc(bp, vnic);
3194 if (rc) {
3195- PMD_DRV_LOG(ERR, "HWRM vnic alloc failure rc: %x\n", rc);
3196+ rte_flow_error_set(error, -rc,
3197+ RTE_FLOW_ERROR_TYPE_ACTION,
3198+ act,
3199+ "Failed to alloc VNIC");
3200 goto ret;
3201 }
3202- bp->nr_vnics++;
3203
3204 /* RSS context is required only when there is more than one RSS ring */
3205 if (vnic->rx_queue_cnt > 1) {
3206- rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic, 0 /* ctx_idx 0 */);
3207+ rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic, 0);
3208 if (rc) {
3209- PMD_DRV_LOG(ERR,
3210- "HWRM vnic ctx alloc failure: %x\n", rc);
3211+ rte_flow_error_set(error, -rc,
3212+ RTE_FLOW_ERROR_TYPE_ACTION,
3213+ act,
3214+ "Failed to alloc VNIC context");
3215 goto ret;
3216 }
3217- } else {
3218- PMD_DRV_LOG(DEBUG, "No RSS context required\n");
3219 }
3220
3221 if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
3222@@ -957,12 +980,29 @@ static int bnxt_vnic_prep(struct bnxt *bp, struct bnxt_vnic_info *vnic)
3223 vnic->vlan_strip = false;
3224
3225 rc = bnxt_hwrm_vnic_cfg(bp, vnic);
3226- if (rc)
3227+ if (rc) {
3228+ rte_flow_error_set(error, -rc,
3229+ RTE_FLOW_ERROR_TYPE_ACTION,
3230+ act,
3231+ "Failed to configure VNIC");
3232+ goto ret;
3233+ }
3234+
3235+ rc = bnxt_hwrm_vnic_plcmode_cfg(bp, vnic);
3236+ if (rc) {
3237+ rte_flow_error_set(error, -rc,
3238+ RTE_FLOW_ERROR_TYPE_ACTION,
3239+ act,
3240+ "Failed to configure VNIC plcmode");
3241 goto ret;
3242+ }
3243
3244- bnxt_hwrm_vnic_plcmode_cfg(bp, vnic);
3245+ bp->nr_vnics++;
3246+
3247+ return 0;
3248
3249 ret:
3250+ bnxt_vnic_cleanup(bp, vnic);
3251 return rc;
3252 }
3253
3254@@ -1135,16 +1175,9 @@ start:
3255
3256 PMD_DRV_LOG(DEBUG, "VNIC found\n");
3257
3258- rc = bnxt_vnic_prep(bp, vnic);
3259- if (rc) {
3260- rte_flow_error_set(error,
3261- EINVAL,
3262- RTE_FLOW_ERROR_TYPE_ACTION,
3263- act,
3264- "VNIC prep fail");
3265- rc = -rte_errno;
3266+ rc = bnxt_vnic_prep(bp, vnic, act, error);
3267+ if (rc)
3268 goto ret;
3269- }
3270
3271 PMD_DRV_LOG(DEBUG,
3272 "vnic[%d] = %p vnic->fw_grp_ids = %p\n",
3273@@ -1355,16 +1388,9 @@ use_vnic:
3274 vnic->end_grp_id = rss->queue[rss->queue_num - 1];
3275 vnic->func_default = 0; //This is not a default VNIC.
3276
3277- rc = bnxt_vnic_prep(bp, vnic);
3278- if (rc) {
3279- rte_flow_error_set(error,
3280- EINVAL,
3281- RTE_FLOW_ERROR_TYPE_ACTION,
3282- act,
3283- "VNIC prep fail");
3284- rc = -rte_errno;
3285+ rc = bnxt_vnic_prep(bp, vnic, act, error);
3286+ if (rc)
3287 goto ret;
3288- }
3289
3290 PMD_DRV_LOG(DEBUG,
3291 "vnic[%d] = %p vnic->fw_grp_ids = %p\n",
3292@@ -1529,9 +1555,11 @@ bnxt_flow_validate(struct rte_eth_dev *dev,
3293
3294 filter = bnxt_get_unused_filter(bp);
3295 if (filter == NULL) {
3296- PMD_DRV_LOG(ERR, "Not enough resources for a new flow.\n");
3297+ rte_flow_error_set(error, ENOSPC,
3298+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
3299+ "Not enough resources for a new flow");
3300 bnxt_release_flow_lock(bp);
3301- return -ENOMEM;
3302+ return -ENOSPC;
3303 }
3304
3305 ret = bnxt_validate_and_parse_flow(dev, pattern, actions, attr,
3306@@ -1542,10 +1570,7 @@ bnxt_flow_validate(struct rte_eth_dev *dev,
3307 vnic = find_matching_vnic(bp, filter);
3308 if (vnic) {
3309 if (STAILQ_EMPTY(&vnic->filter)) {
3310- rte_free(vnic->fw_grp_ids);
3311- bnxt_hwrm_vnic_ctx_free(bp, vnic);
3312- bnxt_hwrm_vnic_free(bp, vnic);
3313- vnic->rx_queue_cnt = 0;
3314+ bnxt_vnic_cleanup(bp, vnic);
3315 bp->nr_vnics--;
3316 PMD_DRV_LOG(DEBUG, "Free VNIC\n");
3317 }
3318@@ -1931,12 +1956,20 @@ static int bnxt_handle_tunnel_redirect_destroy(struct bnxt *bp,
3319 /* Tunnel doesn't belong to this VF, so don't send HWRM
3320 * cmd, just delete the flow from driver
3321 */
3322- if (bp->fw_fid != (tun_dst_fid + bp->first_vf_id))
3323+ if (bp->fw_fid != (tun_dst_fid + bp->first_vf_id)) {
3324 PMD_DRV_LOG(ERR,
3325 "Tunnel does not belong to this VF, skip hwrm_tunnel_redirect_free\n");
3326- else
3327+ } else {
3328 ret = bnxt_hwrm_tunnel_redirect_free(bp,
3329 filter->tunnel_type);
3330+ if (ret) {
3331+ rte_flow_error_set(error, -ret,
3332+ RTE_FLOW_ERROR_TYPE_HANDLE,
3333+ NULL,
3334+ "Unable to free tunnel redirection");
3335+ return ret;
3336+ }
3337+ }
3338 }
3339 return ret;
3340 }
3341@@ -1999,12 +2032,7 @@ done:
3342 */
3343 if (vnic && !vnic->func_default &&
3344 STAILQ_EMPTY(&vnic->flow_list)) {
3345- rte_free(vnic->fw_grp_ids);
3346- if (vnic->rx_queue_cnt > 1)
3347- bnxt_hwrm_vnic_ctx_free(bp, vnic);
3348-
3349- bnxt_hwrm_vnic_free(bp, vnic);
3350- vnic->rx_queue_cnt = 0;
3351+ bnxt_vnic_cleanup(bp, vnic);
3352 bp->nr_vnics--;
3353 }
3354 } else {
3355diff --git a/drivers/net/bnxt/bnxt_hwrm.c b/drivers/net/bnxt/bnxt_hwrm.c
3356index 5ed38c9..39778e3 100644
3357--- a/drivers/net/bnxt/bnxt_hwrm.c
3358+++ b/drivers/net/bnxt/bnxt_hwrm.c
3359@@ -635,9 +635,13 @@ static int bnxt_hwrm_ptp_qcfg(struct bnxt *bp)
3360
3361 HWRM_CHECK_RESULT();
3362
3363- if (!BNXT_CHIP_THOR(bp) &&
3364- !(resp->flags & HWRM_PORT_MAC_PTP_QCFG_OUTPUT_FLAGS_DIRECT_ACCESS))
3365- return 0;
3366+ if (BNXT_CHIP_THOR(bp)) {
3367+ if (!(resp->flags & HWRM_PORT_MAC_PTP_QCFG_OUTPUT_FLAGS_HWRM_ACCESS))
3368+ return 0;
3369+ } else {
3370+ if (!(resp->flags & HWRM_PORT_MAC_PTP_QCFG_OUTPUT_FLAGS_DIRECT_ACCESS))
3371+ return 0;
3372+ }
3373
3374 if (resp->flags & HWRM_PORT_MAC_PTP_QCFG_OUTPUT_FLAGS_ONE_STEP_TX_TS)
3375 bp->flags |= BNXT_FLAG_FW_CAP_ONE_STEP_TX_TS;
3376@@ -1359,6 +1363,7 @@ static int bnxt_hwrm_port_phy_qcfg(struct bnxt *bp,
3377
3378 link_info->support_speeds = rte_le_to_cpu_16(resp->support_speeds);
3379 link_info->auto_link_speed = rte_le_to_cpu_16(resp->auto_link_speed);
3380+ link_info->auto_link_speed_mask = rte_le_to_cpu_16(resp->auto_link_speed_mask);
3381 link_info->preemphasis = rte_le_to_cpu_32(resp->preemphasis);
3382 link_info->force_link_speed = rte_le_to_cpu_16(resp->force_link_speed);
3383 link_info->phy_ver[0] = resp->phy_maj;
3384@@ -1412,6 +1417,12 @@ int bnxt_hwrm_port_phy_qcaps(struct bnxt *bp)
3385
3386 HWRM_UNLOCK();
3387
3388+ /* Older firmware does not have supported_auto_speeds, so assume
3389+ * that all supported speeds can be autonegotiated.
3390+ */
3391+ if (link_info->auto_link_speed_mask && !link_info->support_auto_speeds)
3392+ link_info->support_auto_speeds = link_info->support_speeds;
3393+
3394 return 0;
3395 }
3396
3397@@ -1665,12 +1676,16 @@ int bnxt_hwrm_ring_free(struct bnxt *bp,
3398 struct hwrm_ring_free_input req = {.req_type = 0 };
3399 struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
3400
3401+ if (ring->fw_ring_id == INVALID_HW_RING_ID)
3402+ return -EINVAL;
3403+
3404 HWRM_PREP(&req, HWRM_RING_FREE, BNXT_USE_CHIMP_MB);
3405
3406 req.ring_type = ring_type;
3407 req.ring_id = rte_cpu_to_le_16(ring->fw_ring_id);
3408
3409 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3410+ ring->fw_ring_id = INVALID_HW_RING_ID;
3411
3412 if (rc || resp->error_code) {
3413 if (rc == 0 && resp->error_code)
3414@@ -1756,7 +1771,7 @@ int bnxt_hwrm_stat_clear(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
3415 struct hwrm_stat_ctx_clr_stats_input req = {.req_type = 0 };
3416 struct hwrm_stat_ctx_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
3417
3418- if (cpr->hw_stats_ctx_id == (uint32_t)HWRM_NA_SIGNATURE)
3419+ if (cpr->hw_stats_ctx_id == HWRM_NA_SIGNATURE)
3420 return rc;
3421
3422 HWRM_PREP(&req, HWRM_STAT_CTX_CLR_STATS, BNXT_USE_CHIMP_MB);
3423@@ -1777,6 +1792,9 @@ static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp, struct bnxt_cp_ring_info *c
3424 struct hwrm_stat_ctx_alloc_input req = {.req_type = 0 };
3425 struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
3426
3427+ if (cpr->hw_stats_ctx_id != HWRM_NA_SIGNATURE)
3428+ return 0;
3429+
3430 HWRM_PREP(&req, HWRM_STAT_CTX_ALLOC, BNXT_USE_CHIMP_MB);
3431
3432 req.update_period_ms = rte_cpu_to_le_32(0);
3433@@ -1800,6 +1818,9 @@ static int bnxt_hwrm_stat_ctx_free(struct bnxt *bp, struct bnxt_cp_ring_info *cp
3434 struct hwrm_stat_ctx_free_input req = {.req_type = 0 };
3435 struct hwrm_stat_ctx_free_output *resp = bp->hwrm_cmd_resp_addr;
3436
3437+ if (cpr->hw_stats_ctx_id == HWRM_NA_SIGNATURE)
3438+ return 0;
3439+
3440 HWRM_PREP(&req, HWRM_STAT_CTX_FREE, BNXT_USE_CHIMP_MB);
3441
3442 req.stat_ctx_id = rte_cpu_to_le_32(cpr->hw_stats_ctx_id);
3443@@ -1809,6 +1830,8 @@ static int bnxt_hwrm_stat_ctx_free(struct bnxt *bp, struct bnxt_cp_ring_info *cp
3444 HWRM_CHECK_RESULT();
3445 HWRM_UNLOCK();
3446
3447+ cpr->hw_stats_ctx_id = HWRM_NA_SIGNATURE;
3448+
3449 return rc;
3450 }
3451
3452@@ -2449,48 +2472,54 @@ bnxt_free_all_hwrm_stat_ctxs(struct bnxt *bp)
3453 unsigned int i;
3454 struct bnxt_cp_ring_info *cpr;
3455
3456- for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
3457+ for (i = 0; i < bp->rx_cp_nr_rings; i++) {
3458
3459- if (i >= bp->rx_cp_nr_rings) {
3460- cpr = bp->tx_queues[i - bp->rx_cp_nr_rings]->cp_ring;
3461- } else {
3462- cpr = bp->rx_queues[i]->cp_ring;
3463- if (BNXT_HAS_RING_GRPS(bp))
3464- bp->grp_info[i].fw_stats_ctx = -1;
3465- }
3466- if (cpr->hw_stats_ctx_id != HWRM_NA_SIGNATURE) {
3467- rc = bnxt_hwrm_stat_ctx_free(bp, cpr);
3468- cpr->hw_stats_ctx_id = HWRM_NA_SIGNATURE;
3469- if (rc)
3470- return rc;
3471- }
3472+ cpr = bp->rx_queues[i]->cp_ring;
3473+ if (BNXT_HAS_RING_GRPS(bp))
3474+ bp->grp_info[i].fw_stats_ctx = -1;
3475+ rc = bnxt_hwrm_stat_ctx_free(bp, cpr);
3476+ if (rc)
3477+ return rc;
3478 }
3479+
3480+ for (i = 0; i < bp->tx_cp_nr_rings; i++) {
3481+ cpr = bp->tx_queues[i]->cp_ring;
3482+ rc = bnxt_hwrm_stat_ctx_free(bp, cpr);
3483+ if (rc)
3484+ return rc;
3485+ }
3486+
3487 return 0;
3488 }
3489
3490 int bnxt_alloc_all_hwrm_stat_ctxs(struct bnxt *bp)
3491 {
3492+ struct bnxt_cp_ring_info *cpr;
3493 unsigned int i;
3494 int rc = 0;
3495
3496- for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
3497- struct bnxt_tx_queue *txq;
3498- struct bnxt_rx_queue *rxq;
3499- struct bnxt_cp_ring_info *cpr;
3500+ for (i = 0; i < bp->rx_cp_nr_rings; i++) {
3501+ struct bnxt_rx_queue *rxq = bp->rx_queues[i];
3502
3503- if (i >= bp->rx_cp_nr_rings) {
3504- txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
3505- cpr = txq->cp_ring;
3506- } else {
3507- rxq = bp->rx_queues[i];
3508- cpr = rxq->cp_ring;
3509+ cpr = rxq->cp_ring;
3510+ if (cpr->hw_stats_ctx_id == HWRM_NA_SIGNATURE) {
3511+ rc = bnxt_hwrm_stat_ctx_alloc(bp, cpr);
3512+ if (rc)
3513+ return rc;
3514 }
3515+ }
3516
3517- rc = bnxt_hwrm_stat_ctx_alloc(bp, cpr);
3518+ for (i = 0; i < bp->tx_cp_nr_rings; i++) {
3519+ struct bnxt_tx_queue *txq = bp->tx_queues[i];
3520
3521- if (rc)
3522- return rc;
3523+ cpr = txq->cp_ring;
3524+ if (cpr->hw_stats_ctx_id == HWRM_NA_SIGNATURE) {
3525+ rc = bnxt_hwrm_stat_ctx_alloc(bp, cpr);
3526+ if (rc)
3527+ return rc;
3528+ }
3529 }
3530+
3531 return rc;
3532 }
3533
3534@@ -2523,10 +2552,9 @@ void bnxt_free_nq_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
3535 bnxt_hwrm_ring_free(bp, cp_ring,
3536 HWRM_RING_FREE_INPUT_RING_TYPE_NQ);
3537 cp_ring->fw_ring_id = INVALID_HW_RING_ID;
3538- memset(cpr->cp_desc_ring, 0, cpr->cp_ring_struct->ring_size *
3539- sizeof(*cpr->cp_desc_ring));
3540+ memset(cpr->cp_desc_ring, 0,
3541+ cpr->cp_ring_struct->ring_size * sizeof(*cpr->cp_desc_ring));
3542 cpr->cp_raw_cons = 0;
3543- cpr->valid = 0;
3544 }
3545
3546 void bnxt_free_cp_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
3547@@ -2534,12 +2562,11 @@ void bnxt_free_cp_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
3548 struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
3549
3550 bnxt_hwrm_ring_free(bp, cp_ring,
3551- HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL);
3552+ HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL);
3553 cp_ring->fw_ring_id = INVALID_HW_RING_ID;
3554- memset(cpr->cp_desc_ring, 0, cpr->cp_ring_struct->ring_size *
3555- sizeof(*cpr->cp_desc_ring));
3556+ memset(cpr->cp_desc_ring, 0,
3557+ cpr->cp_ring_struct->ring_size * sizeof(*cpr->cp_desc_ring));
3558 cpr->cp_raw_cons = 0;
3559- cpr->valid = 0;
3560 }
3561
3562 void bnxt_free_hwrm_rx_ring(struct bnxt *bp, int queue_index)
3563@@ -2549,31 +2576,46 @@ void bnxt_free_hwrm_rx_ring(struct bnxt *bp, int queue_index)
3564 struct bnxt_ring *ring = rxr->rx_ring_struct;
3565 struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
3566
3567- if (ring->fw_ring_id != INVALID_HW_RING_ID) {
3568- bnxt_hwrm_ring_free(bp, ring,
3569- HWRM_RING_FREE_INPUT_RING_TYPE_RX);
3570- ring->fw_ring_id = INVALID_HW_RING_ID;
3571- if (BNXT_HAS_RING_GRPS(bp))
3572- bp->grp_info[queue_index].rx_fw_ring_id =
3573- INVALID_HW_RING_ID;
3574- }
3575+ bnxt_hwrm_ring_free(bp, ring,
3576+ HWRM_RING_FREE_INPUT_RING_TYPE_RX);
3577+ if (BNXT_HAS_RING_GRPS(bp))
3578+ bp->grp_info[queue_index].rx_fw_ring_id =
3579+ INVALID_HW_RING_ID;
3580+
3581 ring = rxr->ag_ring_struct;
3582- if (ring->fw_ring_id != INVALID_HW_RING_ID) {
3583- bnxt_hwrm_ring_free(bp, ring,
3584- BNXT_CHIP_THOR(bp) ?
3585- HWRM_RING_FREE_INPUT_RING_TYPE_RX_AGG :
3586- HWRM_RING_FREE_INPUT_RING_TYPE_RX);
3587- if (BNXT_HAS_RING_GRPS(bp))
3588- bp->grp_info[queue_index].ag_fw_ring_id =
3589- INVALID_HW_RING_ID;
3590- }
3591- if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID)
3592- bnxt_free_cp_ring(bp, cpr);
3593+ bnxt_hwrm_ring_free(bp, ring,
3594+ BNXT_CHIP_THOR(bp) ?
3595+ HWRM_RING_FREE_INPUT_RING_TYPE_RX_AGG :
3596+ HWRM_RING_FREE_INPUT_RING_TYPE_RX);
3597+ if (BNXT_HAS_RING_GRPS(bp))
3598+ bp->grp_info[queue_index].ag_fw_ring_id =
3599+ INVALID_HW_RING_ID;
3600+
3601+ bnxt_free_cp_ring(bp, cpr);
3602
3603 if (BNXT_HAS_RING_GRPS(bp))
3604 bp->grp_info[queue_index].cp_fw_ring_id = INVALID_HW_RING_ID;
3605 }
3606
3607+int bnxt_hwrm_rx_ring_reset(struct bnxt *bp, int queue_index)
3608+{
3609+ int rc;
3610+ struct hwrm_ring_reset_input req = {.req_type = 0 };
3611+ struct hwrm_ring_reset_output *resp = bp->hwrm_cmd_resp_addr;
3612+
3613+ HWRM_PREP(&req, HWRM_RING_RESET, BNXT_USE_CHIMP_MB);
3614+
3615+ req.ring_type = HWRM_RING_RESET_INPUT_RING_TYPE_RX_RING_GRP;
3616+ req.ring_id = rte_cpu_to_le_16(bp->grp_info[queue_index].fw_grp_id);
3617+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3618+
3619+ HWRM_CHECK_RESULT();
3620+
3621+ HWRM_UNLOCK();
3622+
3623+ return rc;
3624+}
3625+
3626 static int
3627 bnxt_free_all_hwrm_rings(struct bnxt *bp)
3628 {
3629@@ -3081,15 +3123,8 @@ int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up)
3630 speed = bnxt_parse_eth_link_speed(dev_conf->link_speeds,
3631 bp->link_info->link_signal_mode);
3632 link_req.phy_flags = HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY;
3633- /* Autoneg can be done only when the FW allows.
3634- * When user configures fixed speed of 40G and later changes to
3635- * any other speed, auto_link_speed/force_link_speed is still set
3636- * to 40G until link comes up at new speed.
3637- */
3638- if (autoneg == 1 &&
3639- !(!BNXT_CHIP_THOR(bp) &&
3640- (bp->link_info->auto_link_speed ||
3641- bp->link_info->force_link_speed))) {
3642+ /* Autoneg can be done only when the FW allows. */
3643+ if (autoneg == 1 && bp->link_info->support_auto_speeds) {
3644 link_req.phy_flags |=
3645 HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG;
3646 link_req.auto_link_speed_mask =
3647@@ -3145,7 +3180,6 @@ error:
3648 return rc;
3649 }
3650
3651-/* JIRA 22088 */
3652 int bnxt_hwrm_func_qcfg(struct bnxt *bp, uint16_t *mtu)
3653 {
3654 struct hwrm_func_qcfg_input req = {0};
3655@@ -3162,8 +3196,7 @@ int bnxt_hwrm_func_qcfg(struct bnxt *bp, uint16_t *mtu)
3656
3657 HWRM_CHECK_RESULT();
3658
3659- /* Hard Coded.. 0xfff VLAN ID mask */
3660- bp->vlan = rte_le_to_cpu_16(resp->vlan) & 0xfff;
3661+ bp->vlan = rte_le_to_cpu_16(resp->vlan) & ETH_VLAN_ID_MAX;
3662
3663 svif_info = rte_le_to_cpu_16(resp->svif_info);
3664 if (svif_info & HWRM_FUNC_QCFG_OUTPUT_SVIF_INFO_SVIF_VALID)
3665@@ -3233,16 +3266,6 @@ int bnxt_hwrm_parent_pf_qcfg(struct bnxt *bp)
3666 bp->parent->fid = rte_le_to_cpu_16(resp->fid);
3667 bp->parent->port_id = rte_le_to_cpu_16(resp->port_id);
3668
3669- /* FIXME: Temporary workaround - remove when firmware issue is fixed. */
3670- if (bp->parent->vnic == 0) {
3671- PMD_DRV_LOG(DEBUG, "parent VNIC unavailable.\n");
3672- /* Use hard-coded values appropriate for current Wh+ fw. */
3673- if (bp->parent->fid == 2)
3674- bp->parent->vnic = 0x100;
3675- else
3676- bp->parent->vnic = 1;
3677- }
3678-
3679 HWRM_UNLOCK();
3680
3681 return 0;
3682@@ -4145,8 +4168,20 @@ int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, uint16_t target_id,
3683 return rc;
3684 }
3685
3686-int bnxt_hwrm_ctx_qstats(struct bnxt *bp, uint32_t cid, int idx,
3687- struct rte_eth_stats *stats, uint8_t rx)
3688+static void bnxt_update_prev_stat(uint64_t *cntr, uint64_t *prev_cntr)
3689+{
3690+ /* One of the HW stat values that make up this counter was zero as
3691+ * returned by HW in this iteration, so use the previous
3692+ * iteration's counter value
3693+ */
3694+ if (*prev_cntr && *cntr == 0)
3695+ *cntr = *prev_cntr;
3696+ else
3697+ *prev_cntr = *cntr;
3698+}
3699+
3700+int bnxt_hwrm_ring_stats(struct bnxt *bp, uint32_t cid, int idx,
3701+ struct bnxt_ring_stats *ring_stats, bool rx)
3702 {
3703 int rc = 0;
3704 struct hwrm_stat_ctx_query_input req = {.req_type = 0};
3705@@ -4161,21 +4196,85 @@ int bnxt_hwrm_ctx_qstats(struct bnxt *bp, uint32_t cid, int idx,
3706 HWRM_CHECK_RESULT();
3707
3708 if (rx) {
3709- stats->q_ipackets[idx] = rte_le_to_cpu_64(resp->rx_ucast_pkts);
3710- stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_mcast_pkts);
3711- stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_bcast_pkts);
3712- stats->q_ibytes[idx] = rte_le_to_cpu_64(resp->rx_ucast_bytes);
3713- stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_mcast_bytes);
3714- stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_bcast_bytes);
3715- stats->q_errors[idx] = rte_le_to_cpu_64(resp->rx_discard_pkts);
3716- stats->q_errors[idx] += rte_le_to_cpu_64(resp->rx_error_pkts);
3717+ struct bnxt_ring_stats *prev_stats = &bp->prev_rx_ring_stats[idx];
3718+
3719+ ring_stats->rx_ucast_pkts = rte_le_to_cpu_64(resp->rx_ucast_pkts);
3720+ bnxt_update_prev_stat(&ring_stats->rx_ucast_pkts,
3721+ &prev_stats->rx_ucast_pkts);
3722+
3723+ ring_stats->rx_mcast_pkts = rte_le_to_cpu_64(resp->rx_mcast_pkts);
3724+ bnxt_update_prev_stat(&ring_stats->rx_mcast_pkts,
3725+ &prev_stats->rx_mcast_pkts);
3726+
3727+ ring_stats->rx_bcast_pkts = rte_le_to_cpu_64(resp->rx_bcast_pkts);
3728+ bnxt_update_prev_stat(&ring_stats->rx_bcast_pkts,
3729+ &prev_stats->rx_bcast_pkts);
3730+
3731+ ring_stats->rx_ucast_bytes = rte_le_to_cpu_64(resp->rx_ucast_bytes);
3732+ bnxt_update_prev_stat(&ring_stats->rx_ucast_bytes,
3733+ &prev_stats->rx_ucast_bytes);
3734+
3735+ ring_stats->rx_mcast_bytes = rte_le_to_cpu_64(resp->rx_mcast_bytes);
3736+ bnxt_update_prev_stat(&ring_stats->rx_mcast_bytes,
3737+ &prev_stats->rx_mcast_bytes);
3738+
3739+ ring_stats->rx_bcast_bytes = rte_le_to_cpu_64(resp->rx_bcast_bytes);
3740+ bnxt_update_prev_stat(&ring_stats->rx_bcast_bytes,
3741+ &prev_stats->rx_bcast_bytes);
3742+
3743+ ring_stats->rx_discard_pkts = rte_le_to_cpu_64(resp->rx_discard_pkts);
3744+ bnxt_update_prev_stat(&ring_stats->rx_discard_pkts,
3745+ &prev_stats->rx_discard_pkts);
3746+
3747+ ring_stats->rx_error_pkts = rte_le_to_cpu_64(resp->rx_error_pkts);
3748+ bnxt_update_prev_stat(&ring_stats->rx_error_pkts,
3749+ &prev_stats->rx_error_pkts);
3750+
3751+ ring_stats->rx_agg_pkts = rte_le_to_cpu_64(resp->rx_agg_pkts);
3752+ bnxt_update_prev_stat(&ring_stats->rx_agg_pkts,
3753+ &prev_stats->rx_agg_pkts);
3754+
3755+ ring_stats->rx_agg_bytes = rte_le_to_cpu_64(resp->rx_agg_bytes);
3756+ bnxt_update_prev_stat(&ring_stats->rx_agg_bytes,
3757+ &prev_stats->rx_agg_bytes);
3758+
3759+ ring_stats->rx_agg_events = rte_le_to_cpu_64(resp->rx_agg_events);
3760+ bnxt_update_prev_stat(&ring_stats->rx_agg_events,
3761+ &prev_stats->rx_agg_events);
3762+
3763+ ring_stats->rx_agg_aborts = rte_le_to_cpu_64(resp->rx_agg_aborts);
3764+ bnxt_update_prev_stat(&ring_stats->rx_agg_aborts,
3765+ &prev_stats->rx_agg_aborts);
3766 } else {
3767- stats->q_opackets[idx] = rte_le_to_cpu_64(resp->tx_ucast_pkts);
3768- stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_mcast_pkts);
3769- stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_bcast_pkts);
3770- stats->q_obytes[idx] = rte_le_to_cpu_64(resp->tx_ucast_bytes);
3771- stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_mcast_bytes);
3772- stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_bcast_bytes);
3773+ struct bnxt_ring_stats *prev_stats = &bp->prev_tx_ring_stats[idx];
3774+
3775+ ring_stats->tx_ucast_pkts = rte_le_to_cpu_64(resp->tx_ucast_pkts);
3776+ bnxt_update_prev_stat(&ring_stats->tx_ucast_pkts,
3777+ &prev_stats->tx_ucast_pkts);
3778+
3779+ ring_stats->tx_mcast_pkts = rte_le_to_cpu_64(resp->tx_mcast_pkts);
3780+ bnxt_update_prev_stat(&ring_stats->tx_mcast_pkts,
3781+ &prev_stats->tx_mcast_pkts);
3782+
3783+ ring_stats->tx_bcast_pkts = rte_le_to_cpu_64(resp->tx_bcast_pkts);
3784+ bnxt_update_prev_stat(&ring_stats->tx_bcast_pkts,
3785+ &prev_stats->tx_bcast_pkts);
3786+
3787+ ring_stats->tx_ucast_bytes = rte_le_to_cpu_64(resp->tx_ucast_bytes);
3788+ bnxt_update_prev_stat(&ring_stats->tx_ucast_bytes,
3789+ &prev_stats->tx_ucast_bytes);
3790+
3791+ ring_stats->tx_mcast_bytes = rte_le_to_cpu_64(resp->tx_mcast_bytes);
3792+ bnxt_update_prev_stat(&ring_stats->tx_mcast_bytes,
3793+ &prev_stats->tx_mcast_bytes);
3794+
3795+ ring_stats->tx_bcast_bytes = rte_le_to_cpu_64(resp->tx_bcast_bytes);
3796+ bnxt_update_prev_stat(&ring_stats->tx_bcast_bytes,
3797+ &prev_stats->tx_bcast_bytes);
3798+
3799+ ring_stats->tx_discard_pkts = rte_le_to_cpu_64(resp->tx_discard_pkts);
3800+ bnxt_update_prev_stat(&ring_stats->tx_discard_pkts,
3801+ &prev_stats->tx_discard_pkts);
3802 }
3803
3804 HWRM_UNLOCK();
3805@@ -5841,4 +5940,4 @@ int bnxt_hwrm_poll_ver_get(struct bnxt *bp)
3806 HWRM_UNLOCK();
3807
3808 return rc;
3809-}
3810\ No newline at end of file
3811+}
3812diff --git a/drivers/net/bnxt/bnxt_hwrm.h b/drivers/net/bnxt/bnxt_hwrm.h
3813index a67a17b..82010e6 100644
3814--- a/drivers/net/bnxt/bnxt_hwrm.h
3815+++ b/drivers/net/bnxt/bnxt_hwrm.h
3816@@ -165,9 +165,6 @@ int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp, unsigned int idx);
3817 int bnxt_hwrm_ring_grp_free(struct bnxt *bp, unsigned int idx);
3818
3819 int bnxt_hwrm_stat_clear(struct bnxt *bp, struct bnxt_cp_ring_info *cpr);
3820-int bnxt_hwrm_ctx_qstats(struct bnxt *bp, uint32_t cid, int idx,
3821- struct rte_eth_stats *stats, uint8_t rx);
3822-
3823 int bnxt_hwrm_ver_get(struct bnxt *bp, uint32_t timeout);
3824
3825 int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic);
3826@@ -297,4 +294,7 @@ int bnxt_hwrm_first_vf_id_query(struct bnxt *bp, uint16_t fid,
3827 int bnxt_hwrm_cfa_pair_alloc(struct bnxt *bp, struct bnxt_representor *rep);
3828 int bnxt_hwrm_cfa_pair_free(struct bnxt *bp, struct bnxt_representor *rep);
3829 int bnxt_hwrm_poll_ver_get(struct bnxt *bp);
3830+int bnxt_hwrm_ring_stats(struct bnxt *bp, uint32_t cid, int idx,
3831+ struct bnxt_ring_stats *stats, bool rx);
3832+int bnxt_hwrm_rx_ring_reset(struct bnxt *bp, int queue_index);
3833 #endif
3834diff --git a/drivers/net/bnxt/bnxt_irq.c b/drivers/net/bnxt/bnxt_irq.c
3835index 40e1b0c..1a99508 100644
3836--- a/drivers/net/bnxt/bnxt_irq.c
3837+++ b/drivers/net/bnxt/bnxt_irq.c
3838@@ -21,11 +21,14 @@ void bnxt_int_handler(void *param)
3839 {
3840 struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param;
3841 struct bnxt *bp = eth_dev->data->dev_private;
3842- struct bnxt_cp_ring_info *cpr = bp->async_cp_ring;
3843+ uint32_t cons, raw_cons, cp_ring_size;
3844+ struct bnxt_cp_ring_info *cpr;
3845 struct cmpl_base *cmp;
3846- uint32_t raw_cons;
3847- uint32_t cons;
3848
3849+
3850+ if (bp == NULL)
3851+ return;
3852+ cpr = bp->async_cp_ring;
3853 if (cpr == NULL)
3854 return;
3855
3856@@ -42,10 +45,11 @@ void bnxt_int_handler(void *param)
3857 return;
3858 }
3859
3860+ cp_ring_size = cpr->cp_ring_struct->ring_size;
3861 cons = RING_CMP(cpr->cp_ring_struct, raw_cons);
3862 cmp = &cpr->cp_desc_ring[cons];
3863
3864- if (!CMP_VALID(cmp, raw_cons, cpr->cp_ring_struct))
3865+ if (!bnxt_cpr_cmp_valid(cmp, raw_cons, cp_ring_size))
3866 break;
3867
3868 bnxt_event_hwrm_resp_handler(bp, cmp);
3869diff --git a/drivers/net/bnxt/bnxt_ring.c b/drivers/net/bnxt/bnxt_ring.c
3870index 94cf7d3..6834244 100644
3871--- a/drivers/net/bnxt/bnxt_ring.c
3872+++ b/drivers/net/bnxt/bnxt_ring.c
3873@@ -94,7 +94,7 @@ int bnxt_alloc_ring_grps(struct bnxt *bp)
3874 * tx bd ring - Only non-zero length if tx_ring_info is not NULL
3875 * rx bd ring - Only non-zero length if rx_ring_info is not NULL
3876 */
3877-int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx,
3878+int bnxt_alloc_rings(struct bnxt *bp, unsigned int socket_id, uint16_t qidx,
3879 struct bnxt_tx_queue *txq,
3880 struct bnxt_rx_queue *rxq,
3881 struct bnxt_cp_ring_info *cp_ring_info,
3882@@ -203,7 +203,7 @@ int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx,
3883 mz = rte_memzone_lookup(mz_name);
3884 if (!mz) {
3885 mz = rte_memzone_reserve_aligned(mz_name, total_alloc_len,
3886- SOCKET_ID_ANY,
3887+ socket_id,
3888 RTE_MEMZONE_2MB |
3889 RTE_MEMZONE_SIZE_HINT_ONLY |
3890 RTE_MEMZONE_IOVA_CONTIG,
3891@@ -422,24 +422,23 @@ int bnxt_alloc_rxtx_nq_ring(struct bnxt *bp)
3892 struct bnxt_cp_ring_info *nqr;
3893 struct bnxt_ring *ring;
3894 int ring_index = BNXT_NUM_ASYNC_CPR(bp);
3895- unsigned int socket_id;
3896 uint8_t ring_type;
3897 int rc = 0;
3898
3899 if (!BNXT_HAS_NQ(bp) || bp->rxtx_nq_ring)
3900 return 0;
3901
3902- socket_id = rte_lcore_to_socket_id(rte_get_main_lcore());
3903-
3904 nqr = rte_zmalloc_socket("nqr",
3905 sizeof(struct bnxt_cp_ring_info),
3906- RTE_CACHE_LINE_SIZE, socket_id);
3907+ RTE_CACHE_LINE_SIZE,
3908+ bp->eth_dev->device->numa_node);
3909 if (nqr == NULL)
3910 return -ENOMEM;
3911
3912 ring = rte_zmalloc_socket("bnxt_cp_ring_struct",
3913 sizeof(struct bnxt_ring),
3914- RTE_CACHE_LINE_SIZE, socket_id);
3915+ RTE_CACHE_LINE_SIZE,
3916+ bp->eth_dev->device->numa_node);
3917 if (ring == NULL) {
3918 rte_free(nqr);
3919 return -ENOMEM;
3920@@ -454,7 +453,8 @@ int bnxt_alloc_rxtx_nq_ring(struct bnxt *bp)
3921 ring->fw_ring_id = INVALID_HW_RING_ID;
3922
3923 nqr->cp_ring_struct = ring;
3924- rc = bnxt_alloc_rings(bp, 0, NULL, NULL, nqr, NULL, "l2_nqr");
3925+ rc = bnxt_alloc_rings(bp, bp->eth_dev->device->numa_node, 0, NULL,
3926+ NULL, nqr, NULL, "l2_nqr");
3927 if (rc) {
3928 rte_free(ring);
3929 rte_free(nqr);
3930@@ -837,22 +837,21 @@ int bnxt_alloc_async_ring_struct(struct bnxt *bp)
3931 {
3932 struct bnxt_cp_ring_info *cpr = NULL;
3933 struct bnxt_ring *ring = NULL;
3934- unsigned int socket_id;
3935
3936 if (BNXT_NUM_ASYNC_CPR(bp) == 0)
3937 return 0;
3938
3939- socket_id = rte_lcore_to_socket_id(rte_get_main_lcore());
3940-
3941 cpr = rte_zmalloc_socket("cpr",
3942 sizeof(struct bnxt_cp_ring_info),
3943- RTE_CACHE_LINE_SIZE, socket_id);
3944+ RTE_CACHE_LINE_SIZE,
3945+ bp->eth_dev->device->numa_node);
3946 if (cpr == NULL)
3947 return -ENOMEM;
3948
3949 ring = rte_zmalloc_socket("bnxt_cp_ring_struct",
3950 sizeof(struct bnxt_ring),
3951- RTE_CACHE_LINE_SIZE, socket_id);
3952+ RTE_CACHE_LINE_SIZE,
3953+ bp->eth_dev->device->numa_node);
3954 if (ring == NULL) {
3955 rte_free(cpr);
3956 return -ENOMEM;
3957@@ -868,7 +867,6 @@ int bnxt_alloc_async_ring_struct(struct bnxt *bp)
3958 bp->async_cp_ring = cpr;
3959 cpr->cp_ring_struct = ring;
3960
3961- return bnxt_alloc_rings(bp, 0, NULL, NULL,
3962- bp->async_cp_ring, NULL,
3963- "def_cp");
3964+ return bnxt_alloc_rings(bp, bp->eth_dev->device->numa_node, 0, NULL,
3965+ NULL, bp->async_cp_ring, NULL, "def_cp");
3966 }
3967diff --git a/drivers/net/bnxt/bnxt_ring.h b/drivers/net/bnxt/bnxt_ring.h
3968index 0a4685d..201b391 100644
3969--- a/drivers/net/bnxt/bnxt_ring.h
3970+++ b/drivers/net/bnxt/bnxt_ring.h
3971@@ -66,7 +66,7 @@ struct bnxt_rx_ring_info;
3972 struct bnxt_cp_ring_info;
3973 void bnxt_free_ring(struct bnxt_ring *ring);
3974 int bnxt_alloc_ring_grps(struct bnxt *bp);
3975-int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx,
3976+int bnxt_alloc_rings(struct bnxt *bp, unsigned int socket_id, uint16_t qidx,
3977 struct bnxt_tx_queue *txq,
3978 struct bnxt_rx_queue *rxq,
3979 struct bnxt_cp_ring_info *cp_ring_info,
3980diff --git a/drivers/net/bnxt/bnxt_rxq.c b/drivers/net/bnxt/bnxt_rxq.c
3981index ffb7193..a2feb52 100644
3982--- a/drivers/net/bnxt/bnxt_rxq.c
3983+++ b/drivers/net/bnxt/bnxt_rxq.c
3984@@ -335,8 +335,8 @@ int bnxt_rx_queue_setup_op(struct rte_eth_dev *eth_dev,
3985
3986 eth_dev->data->rx_queues[queue_idx] = rxq;
3987 /* Allocate RX ring hardware descriptors */
3988- rc = bnxt_alloc_rings(bp, queue_idx, NULL, rxq, rxq->cp_ring, NULL,
3989- "rxr");
3990+ rc = bnxt_alloc_rings(bp, socket_id, queue_idx, NULL, rxq, rxq->cp_ring,
3991+ NULL, "rxr");
3992 if (rc) {
3993 PMD_DRV_LOG(ERR,
3994 "ring_dma_zone_reserve for rx_ring failed!\n");
3995diff --git a/drivers/net/bnxt/bnxt_rxq.h b/drivers/net/bnxt/bnxt_rxq.h
3996index c72105c..980a16a 100644
3997--- a/drivers/net/bnxt/bnxt_rxq.h
3998+++ b/drivers/net/bnxt/bnxt_rxq.h
3999@@ -30,6 +30,7 @@ struct bnxt_rx_queue {
4000 uint8_t rx_deferred_start; /* not in global dev start */
4001 uint8_t rx_started; /* RX queue is started */
4002 uint8_t drop_en; /* Drop when rx desc not available. */
4003+ uint8_t in_reset; /* Rx ring is scheduled for reset */
4004
4005 struct bnxt *bp;
4006 int index;
4007diff --git a/drivers/net/bnxt/bnxt_rxr.c b/drivers/net/bnxt/bnxt_rxr.c
4008index d16340a..41be742 100644
4009--- a/drivers/net/bnxt/bnxt_rxr.c
4010+++ b/drivers/net/bnxt/bnxt_rxr.c
4011@@ -10,6 +10,7 @@
4012 #include <rte_byteorder.h>
4013 #include <rte_malloc.h>
4014 #include <rte_memory.h>
4015+#include <rte_alarm.h>
4016
4017 #include "bnxt.h"
4018 #include "bnxt_reps.h"
4019@@ -17,9 +18,7 @@
4020 #include "bnxt_rxr.h"
4021 #include "bnxt_rxq.h"
4022 #include "hsi_struct_def_dpdk.h"
4023-#ifdef RTE_LIBRTE_IEEE1588
4024 #include "bnxt_hwrm.h"
4025-#endif
4026
4027 #include <bnxt_tf_common.h>
4028 #include <ulp_mark_mgr.h>
4029@@ -127,6 +126,50 @@ struct rte_mbuf *bnxt_consume_rx_buf(struct bnxt_rx_ring_info *rxr,
4030 return mbuf;
4031 }
4032
4033+static void bnxt_rx_ring_reset(void *arg)
4034+{
4035+ struct bnxt *bp = arg;
4036+ int i, rc = 0;
4037+ struct bnxt_rx_queue *rxq;
4038+
4039+
4040+ for (i = 0; i < (int)bp->rx_nr_rings; i++) {
4041+ struct bnxt_rx_ring_info *rxr;
4042+
4043+ rxq = bp->rx_queues[i];
4044+ if (!rxq || !rxq->in_reset)
4045+ continue;
4046+
4047+ rxr = rxq->rx_ring;
4048+ /* Disable and flush TPA before resetting the RX ring */
4049+ if (rxr->tpa_info)
4050+ bnxt_hwrm_vnic_tpa_cfg(bp, rxq->vnic, false);
4051+ rc = bnxt_hwrm_rx_ring_reset(bp, i);
4052+ if (rc) {
4053+ PMD_DRV_LOG(ERR, "Rx ring%d reset failed\n", i);
4054+ continue;
4055+ }
4056+
4057+ bnxt_rx_queue_release_mbufs(rxq);
4058+ rxr->rx_prod = 0;
4059+ rxr->ag_prod = 0;
4060+ rxr->rx_next_cons = 0;
4061+ bnxt_init_one_rx_ring(rxq);
4062+ bnxt_db_write(&rxr->rx_db, rxr->rx_prod);
4063+ bnxt_db_write(&rxr->ag_db, rxr->ag_prod);
4064+ if (rxr->tpa_info)
4065+ bnxt_hwrm_vnic_tpa_cfg(bp, rxq->vnic, true);
4066+
4067+ rxq->in_reset = 0;
4068+ }
4069+}
4070+
4071+static void bnxt_sched_ring_reset(struct bnxt_rx_queue *rxq)
4072+{
4073+ rxq->in_reset = 1;
4074+ rte_eal_alarm_set(1, bnxt_rx_ring_reset, (void *)rxq->bp);
4075+}
4076+
4077 static void bnxt_tpa_start(struct bnxt_rx_queue *rxq,
4078 struct rx_tpa_start_cmpl *tpa_start,
4079 struct rx_tpa_start_cmpl_hi *tpa_start1)
4080@@ -141,6 +184,12 @@ static void bnxt_tpa_start(struct bnxt_rx_queue *rxq,
4081
4082 data_cons = tpa_start->opaque;
4083 tpa_info = &rxr->tpa_info[agg_id];
4084+ if (unlikely(data_cons != rxr->rx_next_cons)) {
4085+ PMD_DRV_LOG(ERR, "TPA cons %x, expected cons %x\n",
4086+ data_cons, rxr->rx_next_cons);
4087+ bnxt_sched_ring_reset(rxq);
4088+ return;
4089+ }
4090
4091 mbuf = bnxt_consume_rx_buf(rxr, data_cons);
4092
4093@@ -177,6 +226,8 @@ static void bnxt_tpa_start(struct bnxt_rx_queue *rxq,
4094 /* recycle next mbuf */
4095 data_cons = RING_NEXT(rxr->rx_ring_struct, data_cons);
4096 bnxt_reuse_rx_mbuf(rxr, bnxt_consume_rx_buf(rxr, data_cons));
4097+
4098+ rxr->rx_next_cons = RING_NEXT(rxr->rx_ring_struct, data_cons);
4099 }
4100
4101 static int bnxt_agg_bufs_valid(struct bnxt_cp_ring_info *cpr,
4102@@ -191,7 +242,8 @@ static int bnxt_agg_bufs_valid(struct bnxt_cp_ring_info *cpr,
4103 cpr->valid = FLIP_VALID(raw_cp_cons,
4104 cpr->cp_ring_struct->ring_mask,
4105 cpr->valid);
4106- return CMP_VALID(agg_cmpl, raw_cp_cons, cpr->cp_ring_struct);
4107+ return bnxt_cpr_cmp_valid(agg_cmpl, raw_cp_cons,
4108+ cpr->cp_ring_struct->ring_size);
4109 }
4110
4111 /* TPA consume agg buffer out of order, allocate connected data only */
4112@@ -272,6 +324,34 @@ static int bnxt_rx_pages(struct bnxt_rx_queue *rxq,
4113 return 0;
4114 }
4115
4116+static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
4117+ uint32_t *raw_cons, void *cmp)
4118+{
4119+ struct rx_pkt_cmpl *rxcmp = cmp;
4120+ uint32_t tmp_raw_cons = *raw_cons;
4121+ uint8_t cmp_type, agg_bufs = 0;
4122+
4123+ cmp_type = CMP_TYPE(rxcmp);
4124+
4125+ if (cmp_type == CMPL_BASE_TYPE_RX_L2) {
4126+ agg_bufs = BNXT_RX_L2_AGG_BUFS(rxcmp);
4127+ } else if (cmp_type == RX_TPA_END_CMPL_TYPE_RX_TPA_END) {
4128+ struct rx_tpa_end_cmpl *tpa_end = cmp;
4129+
4130+ if (BNXT_CHIP_THOR(bp))
4131+ return 0;
4132+
4133+ agg_bufs = BNXT_TPA_END_AGG_BUFS(tpa_end);
4134+ }
4135+
4136+ if (agg_bufs) {
4137+ if (!bnxt_agg_bufs_valid(cpr, agg_bufs, tmp_raw_cons))
4138+ return -EBUSY;
4139+ }
4140+ *raw_cons = tmp_raw_cons;
4141+ return 0;
4142+}
4143+
4144 static inline struct rte_mbuf *bnxt_tpa_end(
4145 struct bnxt_rx_queue *rxq,
4146 uint32_t *raw_cp_cons,
4147@@ -286,6 +366,13 @@ static inline struct rte_mbuf *bnxt_tpa_end(
4148 uint8_t payload_offset;
4149 struct bnxt_tpa_info *tpa_info;
4150
4151+ if (unlikely(rxq->in_reset)) {
4152+ PMD_DRV_LOG(ERR, "rxq->in_reset: raw_cp_cons:%d\n",
4153+ *raw_cp_cons);
4154+ bnxt_discard_rx(rxq->bp, cpr, raw_cp_cons, tpa_end);
4155+ return NULL;
4156+ }
4157+
4158 if (BNXT_CHIP_THOR(rxq->bp)) {
4159 struct rx_tpa_v2_end_cmpl *th_tpa_end;
4160 struct rx_tpa_v2_end_cmpl_hi *th_tpa_end1;
4161@@ -741,7 +828,8 @@ static int bnxt_rx_pkt(struct rte_mbuf **rx_pkt,
4162 cp_cons = RING_CMP(cpr->cp_ring_struct, tmp_raw_cons);
4163 rxcmp1 = (struct rx_pkt_cmpl_hi *)&cpr->cp_desc_ring[cp_cons];
4164
4165- if (!CMP_VALID(rxcmp1, tmp_raw_cons, cpr->cp_ring_struct))
4166+ if (!bnxt_cpr_cmp_valid(rxcmp1, tmp_raw_cons,
4167+ cpr->cp_ring_struct->ring_size))
4168 return -EBUSY;
4169
4170 cpr->valid = FLIP_VALID(cp_cons,
4171@@ -773,6 +861,14 @@ static int bnxt_rx_pkt(struct rte_mbuf **rx_pkt,
4172 prod = rxr->rx_prod;
4173
4174 cons = rxcmp->opaque;
4175+ if (unlikely(cons != rxr->rx_next_cons)) {
4176+ bnxt_discard_rx(bp, cpr, &tmp_raw_cons, rxcmp);
4177+ PMD_DRV_LOG(ERR, "RX cons %x != expected cons %x\n",
4178+ cons, rxr->rx_next_cons);
4179+ bnxt_sched_ring_reset(rxq);
4180+ rc = -EBUSY;
4181+ goto next_rx;
4182+ }
4183 mbuf = bnxt_consume_rx_buf(rxr, cons);
4184 if (mbuf == NULL)
4185 return -EBUSY;
4186@@ -837,6 +933,7 @@ static int bnxt_rx_pkt(struct rte_mbuf **rx_pkt,
4187 goto rx;
4188 }
4189 rxr->rx_prod = prod;
4190+ rxr->rx_next_cons = RING_NEXT(rxr->rx_ring_struct, cons);
4191
4192 if (BNXT_TRUFLOW_EN(bp) && (BNXT_VF_IS_TRUSTED(bp) || BNXT_PF(bp)) &&
4193 vfr_flag) {
4194@@ -911,7 +1008,8 @@ uint16_t bnxt_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
4195 cons = RING_CMP(cpr->cp_ring_struct, raw_cons);
4196 rxcmp = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[cons];
4197
4198- if (!CMP_VALID(rxcmp, raw_cons, cpr->cp_ring_struct))
4199+ if (!bnxt_cpr_cmp_valid(rxcmp, raw_cons,
4200+ cpr->cp_ring_struct->ring_size))
4201 break;
4202 cpr->valid = FLIP_VALID(cons,
4203 cpr->cp_ring_struct->ring_mask,
4204diff --git a/drivers/net/bnxt/bnxt_rxr.h b/drivers/net/bnxt/bnxt_rxr.h
4205index f8eb9b7..46b2840 100644
4206--- a/drivers/net/bnxt/bnxt_rxr.h
4207+++ b/drivers/net/bnxt/bnxt_rxr.h
4208@@ -58,6 +58,7 @@ struct bnxt_rx_ring_info {
4209 uint16_t rx_prod;
4210 uint16_t ag_prod;
4211 uint16_t rx_cons; /* Needed for representor */
4212+ uint16_t rx_next_cons;
4213 struct bnxt_db_info rx_db;
4214 struct bnxt_db_info ag_db;
4215
4216diff --git a/drivers/net/bnxt/bnxt_rxtx_vec_neon.c b/drivers/net/bnxt/bnxt_rxtx_vec_neon.c
4217index 54f47a3..3cb9492 100644
4218--- a/drivers/net/bnxt/bnxt_rxtx_vec_neon.c
4219+++ b/drivers/net/bnxt/bnxt_rxtx_vec_neon.c
4220@@ -151,9 +151,8 @@ descs_to_mbufs(uint32x4_t mm_rxcmp[4], uint32x4_t mm_rxcmp1[4],
4221 vst1q_u32((uint32_t *)&mbuf[3]->rx_descriptor_fields1, tmp);
4222 }
4223
4224-uint16_t
4225-bnxt_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
4226- uint16_t nb_pkts)
4227+static uint16_t
4228+recv_burst_vec_neon(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
4229 {
4230 struct bnxt_rx_queue *rxq = rx_queue;
4231 struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
4232@@ -178,9 +177,6 @@ bnxt_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
4233 if (rxq->rxrearm_nb >= rxq->rx_free_thresh)
4234 bnxt_rxq_rearm(rxq, rxr);
4235
4236- /* Return no more than RTE_BNXT_MAX_RX_BURST per call. */
4237- nb_pkts = RTE_MIN(nb_pkts, RTE_BNXT_MAX_RX_BURST);
4238-
4239 cons = raw_cons & (cp_ring_size - 1);
4240 mbcons = (raw_cons / 2) & (rx_ring_size - 1);
4241
4242@@ -314,6 +310,27 @@ out:
4243 return nb_rx_pkts;
4244 }
4245
4246+uint16_t
4247+bnxt_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
4248+{
4249+ uint16_t cnt = 0;
4250+
4251+ while (nb_pkts > RTE_BNXT_MAX_RX_BURST) {
4252+ uint16_t burst;
4253+
4254+ burst = recv_burst_vec_neon(rx_queue, rx_pkts + cnt,
4255+ RTE_BNXT_MAX_RX_BURST);
4256+
4257+ cnt += burst;
4258+ nb_pkts -= burst;
4259+
4260+ if (burst < RTE_BNXT_MAX_RX_BURST)
4261+ return cnt;
4262+ }
4263+
4264+ return cnt + recv_burst_vec_neon(rx_queue, rx_pkts + cnt, nb_pkts);
4265+}
4266+
4267 static void
4268 bnxt_handle_tx_cp_vec(struct bnxt_tx_queue *txq)
4269 {
4270@@ -330,7 +347,7 @@ bnxt_handle_tx_cp_vec(struct bnxt_tx_queue *txq)
4271 cons = RING_CMPL(ring_mask, raw_cons);
4272 txcmp = (struct tx_cmpl *)&cp_desc_ring[cons];
4273
4274- if (!CMP_VALID(txcmp, raw_cons, cp_ring_struct))
4275+ if (!bnxt_cpr_cmp_valid(txcmp, raw_cons, ring_mask + 1))
4276 break;
4277
4278 if (likely(CMP_TYPE(txcmp) == TX_CMPL_TYPE_TX_L2))
4279diff --git a/drivers/net/bnxt/bnxt_rxtx_vec_sse.c b/drivers/net/bnxt/bnxt_rxtx_vec_sse.c
4280index 621f567..5974b47 100644
4281--- a/drivers/net/bnxt/bnxt_rxtx_vec_sse.c
4282+++ b/drivers/net/bnxt/bnxt_rxtx_vec_sse.c
4283@@ -143,9 +143,8 @@ descs_to_mbufs(__m128i mm_rxcmp[4], __m128i mm_rxcmp1[4],
4284 _mm_store_si128((void *)&mbuf[3]->rx_descriptor_fields1, t0);
4285 }
4286
4287-uint16_t
4288-bnxt_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
4289- uint16_t nb_pkts)
4290+static uint16_t
4291+recv_burst_vec_sse(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
4292 {
4293 struct bnxt_rx_queue *rxq = rx_queue;
4294 const __m128i mbuf_init = _mm_set_epi64x(0, rxq->mbuf_initializer);
4295@@ -170,9 +169,6 @@ bnxt_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
4296 if (rxq->rxrearm_nb >= rxq->rx_free_thresh)
4297 bnxt_rxq_rearm(rxq, rxr);
4298
4299- /* Return no more than RTE_BNXT_MAX_RX_BURST per call. */
4300- nb_pkts = RTE_MIN(nb_pkts, RTE_BNXT_MAX_RX_BURST);
4301-
4302 cons = raw_cons & (cp_ring_size - 1);
4303 mbcons = (raw_cons / 2) & (rx_ring_size - 1);
4304
4305@@ -296,6 +292,27 @@ out:
4306 return nb_rx_pkts;
4307 }
4308
4309+uint16_t
4310+bnxt_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
4311+{
4312+ uint16_t cnt = 0;
4313+
4314+ while (nb_pkts > RTE_BNXT_MAX_RX_BURST) {
4315+ uint16_t burst;
4316+
4317+ burst = recv_burst_vec_sse(rx_queue, rx_pkts + cnt,
4318+ RTE_BNXT_MAX_RX_BURST);
4319+
4320+ cnt += burst;
4321+ nb_pkts -= burst;
4322+
4323+ if (burst < RTE_BNXT_MAX_RX_BURST)
4324+ return cnt;
4325+ }
4326+
4327+ return cnt + recv_burst_vec_sse(rx_queue, rx_pkts + cnt, nb_pkts);
4328+}
4329+
4330 static void
4331 bnxt_handle_tx_cp_vec(struct bnxt_tx_queue *txq)
4332 {
4333@@ -312,7 +329,7 @@ bnxt_handle_tx_cp_vec(struct bnxt_tx_queue *txq)
4334 cons = RING_CMPL(ring_mask, raw_cons);
4335 txcmp = (struct tx_cmpl *)&cp_desc_ring[cons];
4336
4337- if (!CMP_VALID(txcmp, raw_cons, cp_ring_struct))
4338+ if (!bnxt_cpr_cmp_valid(txcmp, raw_cons, ring_mask + 1))
4339 break;
4340
4341 if (likely(CMP_TYPE(txcmp) == TX_CMPL_TYPE_TX_L2))
4342diff --git a/drivers/net/bnxt/bnxt_stats.c b/drivers/net/bnxt/bnxt_stats.c
4343index 0cf3ee7..47dcf40 100644
4344--- a/drivers/net/bnxt/bnxt_stats.c
4345+++ b/drivers/net/bnxt/bnxt_stats.c
4346@@ -506,8 +506,47 @@ void bnxt_free_stats(struct bnxt *bp)
4347 }
4348 }
4349
4350+static void bnxt_fill_rte_eth_stats(struct rte_eth_stats *stats,
4351+ struct bnxt_ring_stats *ring_stats,
4352+ unsigned int i, bool rx)
4353+{
4354+ if (rx) {
4355+ stats->q_ipackets[i] = ring_stats->rx_ucast_pkts;
4356+ stats->q_ipackets[i] += ring_stats->rx_mcast_pkts;
4357+ stats->q_ipackets[i] += ring_stats->rx_bcast_pkts;
4358+
4359+ stats->ipackets += stats->q_ipackets[i];
4360+
4361+ stats->q_ibytes[i] = ring_stats->rx_ucast_bytes;
4362+ stats->q_ibytes[i] += ring_stats->rx_mcast_bytes;
4363+ stats->q_ibytes[i] += ring_stats->rx_bcast_bytes;
4364+
4365+ stats->ibytes += stats->q_ibytes[i];
4366+
4367+ stats->q_errors[i] = ring_stats->rx_discard_pkts;
4368+ stats->q_errors[i] += ring_stats->rx_error_pkts;
4369+
4370+ stats->imissed += ring_stats->rx_discard_pkts;
4371+ stats->ierrors += ring_stats->rx_error_pkts;
4372+ } else {
4373+ stats->q_opackets[i] = ring_stats->tx_ucast_pkts;
4374+ stats->q_opackets[i] += ring_stats->tx_mcast_pkts;
4375+ stats->q_opackets[i] += ring_stats->tx_bcast_pkts;
4376+
4377+ stats->opackets += stats->q_opackets[i];
4378+
4379+ stats->q_obytes[i] = ring_stats->tx_ucast_bytes;
4380+ stats->q_obytes[i] += ring_stats->tx_mcast_bytes;
4381+ stats->q_obytes[i] += ring_stats->tx_bcast_bytes;
4382+
4383+ stats->obytes += stats->q_obytes[i];
4384+
4385+ stats->oerrors += ring_stats->tx_discard_pkts;
4386+ }
4387+}
4388+
4389 int bnxt_stats_get_op(struct rte_eth_dev *eth_dev,
4390- struct rte_eth_stats *bnxt_stats)
4391+ struct rte_eth_stats *bnxt_stats)
4392 {
4393 int rc = 0;
4394 unsigned int i;
4395@@ -527,11 +566,14 @@ int bnxt_stats_get_op(struct rte_eth_dev *eth_dev,
4396 for (i = 0; i < num_q_stats; i++) {
4397 struct bnxt_rx_queue *rxq = bp->rx_queues[i];
4398 struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
4399+ struct bnxt_ring_stats ring_stats = {0};
4400
4401- rc = bnxt_hwrm_ctx_qstats(bp, cpr->hw_stats_ctx_id, i,
4402- bnxt_stats, 1);
4403+ rc = bnxt_hwrm_ring_stats(bp, cpr->hw_stats_ctx_id, i,
4404+ &ring_stats, true);
4405 if (unlikely(rc))
4406 return rc;
4407+
4408+ bnxt_fill_rte_eth_stats(bnxt_stats, &ring_stats, i, true);
4409 bnxt_stats->rx_nombuf +=
4410 rte_atomic64_read(&rxq->rx_mbuf_alloc_fail);
4411 }
4412@@ -542,17 +584,29 @@ int bnxt_stats_get_op(struct rte_eth_dev *eth_dev,
4413 for (i = 0; i < num_q_stats; i++) {
4414 struct bnxt_tx_queue *txq = bp->tx_queues[i];
4415 struct bnxt_cp_ring_info *cpr = txq->cp_ring;
4416+ struct bnxt_ring_stats ring_stats = {0};
4417
4418- rc = bnxt_hwrm_ctx_qstats(bp, cpr->hw_stats_ctx_id, i,
4419- bnxt_stats, 0);
4420+ rc = bnxt_hwrm_ring_stats(bp, cpr->hw_stats_ctx_id, i,
4421+ &ring_stats, false);
4422 if (unlikely(rc))
4423 return rc;
4424+
4425+ bnxt_fill_rte_eth_stats(bnxt_stats, &ring_stats, i, false);
4426 }
4427
4428- rc = bnxt_hwrm_func_qstats(bp, 0xffff, bnxt_stats, NULL);
4429 return rc;
4430 }
4431
4432+static void bnxt_clear_prev_stat(struct bnxt *bp)
4433+{
4434+ /*
4435+ * Clear the cached values of stats returned by HW in the previous
4436+ * get operation.
4437+ */
4438+ memset(bp->prev_rx_ring_stats, 0, sizeof(struct bnxt_ring_stats) * bp->rx_cp_nr_rings);
4439+ memset(bp->prev_tx_ring_stats, 0, sizeof(struct bnxt_ring_stats) * bp->tx_cp_nr_rings);
4440+}
4441+
4442 int bnxt_stats_reset_op(struct rte_eth_dev *eth_dev)
4443 {
4444 struct bnxt *bp = eth_dev->data->dev_private;
4445@@ -575,9 +629,45 @@ int bnxt_stats_reset_op(struct rte_eth_dev *eth_dev)
4446 rte_atomic64_clear(&rxq->rx_mbuf_alloc_fail);
4447 }
4448
4449+ bnxt_clear_prev_stat(bp);
4450+
4451 return ret;
4452 }
4453
4454+static void bnxt_fill_func_qstats(struct hwrm_func_qstats_output *func_qstats,
4455+ struct bnxt_ring_stats *ring_stats,
4456+ bool rx)
4457+{
4458+ if (rx) {
4459+ func_qstats->rx_ucast_pkts += ring_stats->rx_ucast_pkts;
4460+ func_qstats->rx_mcast_pkts += ring_stats->rx_mcast_pkts;
4461+ func_qstats->rx_bcast_pkts += ring_stats->rx_bcast_pkts;
4462+
4463+ func_qstats->rx_ucast_bytes += ring_stats->rx_ucast_bytes;
4464+ func_qstats->rx_mcast_bytes += ring_stats->rx_mcast_bytes;
4465+ func_qstats->rx_bcast_bytes += ring_stats->rx_bcast_bytes;
4466+
4467+ func_qstats->rx_discard_pkts += ring_stats->rx_discard_pkts;
4468+ func_qstats->rx_drop_pkts += ring_stats->rx_error_pkts;
4469+
4470+ func_qstats->rx_agg_pkts += ring_stats->rx_agg_pkts;
4471+ func_qstats->rx_agg_bytes += ring_stats->rx_agg_bytes;
4472+ func_qstats->rx_agg_events += ring_stats->rx_agg_events;
4473+ func_qstats->rx_agg_aborts += ring_stats->rx_agg_aborts;
4474+ } else {
4475+ func_qstats->tx_ucast_pkts += ring_stats->tx_ucast_pkts;
4476+ func_qstats->tx_mcast_pkts += ring_stats->tx_mcast_pkts;
4477+ func_qstats->tx_bcast_pkts += ring_stats->tx_bcast_pkts;
4478+
4479+ func_qstats->tx_ucast_bytes += ring_stats->tx_ucast_bytes;
4480+ func_qstats->tx_mcast_bytes += ring_stats->tx_mcast_bytes;
4481+ func_qstats->tx_bcast_bytes += ring_stats->tx_bcast_bytes;
4482+
4483+ func_qstats->tx_drop_pkts += ring_stats->tx_error_pkts;
4484+ func_qstats->tx_discard_pkts += ring_stats->tx_discard_pkts;
4485+ }
4486+}
4487+
4488 int bnxt_dev_xstats_get_op(struct rte_eth_dev *eth_dev,
4489 struct rte_eth_xstat *xstats, unsigned int n)
4490 {
4491@@ -604,7 +694,38 @@ int bnxt_dev_xstats_get_op(struct rte_eth_dev *eth_dev,
4492 if (n < stat_count || xstats == NULL)
4493 return stat_count;
4494
4495- bnxt_hwrm_func_qstats(bp, 0xffff, NULL, &func_qstats);
4496+ for (i = 0; i < bp->rx_cp_nr_rings; i++) {
4497+ struct bnxt_rx_queue *rxq = bp->rx_queues[i];
4498+ struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
4499+ struct bnxt_ring_stats ring_stats = {0};
4500+
4501+ if (!rxq->rx_started)
4502+ continue;
4503+
4504+ rc = bnxt_hwrm_ring_stats(bp, cpr->hw_stats_ctx_id, i,
4505+ &ring_stats, true);
4506+ if (unlikely(rc))
4507+ return rc;
4508+
4509+ bnxt_fill_func_qstats(&func_qstats, &ring_stats, true);
4510+ }
4511+
4512+ for (i = 0; i < bp->tx_cp_nr_rings; i++) {
4513+ struct bnxt_tx_queue *txq = bp->tx_queues[i];
4514+ struct bnxt_cp_ring_info *cpr = txq->cp_ring;
4515+ struct bnxt_ring_stats ring_stats = {0};
4516+
4517+ if (!txq->tx_started)
4518+ continue;
4519+
4520+ rc = bnxt_hwrm_ring_stats(bp, cpr->hw_stats_ctx_id, i,
4521+ &ring_stats, false);
4522+ if (unlikely(rc))
4523+ return rc;
4524+
4525+ bnxt_fill_func_qstats(&func_qstats, &ring_stats, false);
4526+ }
4527+
4528 bnxt_hwrm_port_qstats(bp);
4529 bnxt_hwrm_ext_port_qstats(bp);
4530 rx_port_stats_ext_cnt = RTE_MIN(RTE_DIM(bnxt_rx_ext_stats_strings),
4531@@ -637,13 +758,11 @@ int bnxt_dev_xstats_get_op(struct rte_eth_dev *eth_dev,
4532
4533 for (i = 0; i < RTE_DIM(bnxt_func_stats_strings); i++) {
4534 xstats[count].id = count;
4535- xstats[count].value =
4536- rte_le_to_cpu_64(*(uint64_t *)((char *)&func_qstats +
4537- bnxt_func_stats_strings[i].offset));
4538+ xstats[count].value = *(uint64_t *)((char *)&func_qstats +
4539+ bnxt_func_stats_strings[i].offset);
4540 count++;
4541 }
4542
4543-
4544 for (i = 0; i < rx_port_stats_ext_cnt; i++) {
4545 uint64_t *rx_stats_ext = (uint64_t *)bp->hw_rx_port_stats_ext;
4546
4547@@ -820,6 +939,8 @@ int bnxt_dev_xstats_reset_op(struct rte_eth_dev *eth_dev)
4548 PMD_DRV_LOG(ERR, "Failed to reset xstats: %s\n",
4549 strerror(-ret));
4550
4551+ bnxt_clear_prev_stat(bp);
4552+
4553 return ret;
4554 }
4555
4556diff --git a/drivers/net/bnxt/bnxt_txq.c b/drivers/net/bnxt/bnxt_txq.c
4557index 99a31ce..795056e 100644
4558--- a/drivers/net/bnxt/bnxt_txq.c
4559+++ b/drivers/net/bnxt/bnxt_txq.c
4560@@ -149,8 +149,8 @@ int bnxt_tx_queue_setup_op(struct rte_eth_dev *eth_dev,
4561 txq->port_id = eth_dev->data->port_id;
4562
4563 /* Allocate TX ring hardware descriptors */
4564- if (bnxt_alloc_rings(bp, queue_idx, txq, NULL, txq->cp_ring, NULL,
4565- "txr")) {
4566+ if (bnxt_alloc_rings(bp, socket_id, queue_idx, txq, NULL, txq->cp_ring,
4567+ NULL, "txr")) {
4568 PMD_DRV_LOG(ERR, "ring_dma_zone_reserve for tx_ring failed!");
4569 rc = -ENOMEM;
4570 goto err;
4571diff --git a/drivers/net/bnxt/bnxt_txr.c b/drivers/net/bnxt/bnxt_txr.c
4572index 0cf6ece..ad78fa1 100644
4573--- a/drivers/net/bnxt/bnxt_txr.c
4574+++ b/drivers/net/bnxt/bnxt_txr.c
4575@@ -427,30 +427,26 @@ static void bnxt_tx_cmp(struct bnxt_tx_queue *txq, int nr_pkts)
4576
4577 static int bnxt_handle_tx_cp(struct bnxt_tx_queue *txq)
4578 {
4579+ uint32_t nb_tx_pkts = 0, cons, ring_mask, opaque;
4580 struct bnxt_cp_ring_info *cpr = txq->cp_ring;
4581 uint32_t raw_cons = cpr->cp_raw_cons;
4582- uint32_t cons;
4583- uint32_t nb_tx_pkts = 0;
4584+ struct bnxt_ring *cp_ring_struct;
4585 struct tx_cmpl *txcmp;
4586- struct cmpl_base *cp_desc_ring = cpr->cp_desc_ring;
4587- struct bnxt_ring *cp_ring_struct = cpr->cp_ring_struct;
4588- uint32_t ring_mask = cp_ring_struct->ring_mask;
4589- uint32_t opaque = 0;
4590
4591 if (bnxt_tx_bds_in_hw(txq) < txq->tx_free_thresh)
4592 return 0;
4593
4594+ cp_ring_struct = cpr->cp_ring_struct;
4595+ ring_mask = cp_ring_struct->ring_mask;
4596+
4597 do {
4598 cons = RING_CMPL(ring_mask, raw_cons);
4599 txcmp = (struct tx_cmpl *)&cpr->cp_desc_ring[cons];
4600- rte_prefetch_non_temporal(&cp_desc_ring[(cons + 2) &
4601- ring_mask]);
4602
4603- if (!CMPL_VALID(txcmp, cpr->valid))
4604+ if (!bnxt_cpr_cmp_valid(txcmp, raw_cons, ring_mask + 1))
4605 break;
4606- opaque = rte_cpu_to_le_32(txcmp->opaque);
4607- NEXT_CMPL(cpr, cons, cpr->valid, 1);
4608- rte_prefetch0(&cp_desc_ring[cons]);
4609+
4610+ opaque = rte_le_to_cpu_32(txcmp->opaque);
4611
4612 if (CMP_TYPE(txcmp) == TX_CMPL_TYPE_TX_L2)
4613 nb_tx_pkts += opaque;
4614@@ -458,9 +454,11 @@ static int bnxt_handle_tx_cp(struct bnxt_tx_queue *txq)
4615 RTE_LOG_DP(ERR, PMD,
4616 "Unhandled CMP type %02x\n",
4617 CMP_TYPE(txcmp));
4618- raw_cons = cons;
4619+ raw_cons = NEXT_RAW_CMP(raw_cons);
4620 } while (nb_tx_pkts < ring_mask);
4621
4622+ cpr->valid = !!(raw_cons & cp_ring_struct->ring_size);
4623+
4624 if (nb_tx_pkts) {
4625 if (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
4626 bnxt_tx_cmp_fast(txq, nb_tx_pkts);
4627diff --git a/drivers/net/bnxt/bnxt_vnic.c b/drivers/net/bnxt/bnxt_vnic.c
4628index 007f7e9..40cad0f 100644
4629--- a/drivers/net/bnxt/bnxt_vnic.c
4630+++ b/drivers/net/bnxt/bnxt_vnic.c
4631@@ -145,7 +145,8 @@ int bnxt_alloc_vnic_attributes(struct bnxt *bp)
4632 mz = rte_memzone_lookup(mz_name);
4633 if (!mz) {
4634 mz = rte_memzone_reserve(mz_name,
4635- entry_length * max_vnics, SOCKET_ID_ANY,
4636+ entry_length * max_vnics,
4637+ bp->eth_dev->device->numa_node,
4638 RTE_MEMZONE_2MB |
4639 RTE_MEMZONE_SIZE_HINT_ONLY |
4640 RTE_MEMZONE_IOVA_CONTIG);
4641diff --git a/drivers/net/bnxt/meson.build b/drivers/net/bnxt/meson.build
4642index 2896337..a2fd494 100644
4643--- a/drivers/net/bnxt/meson.build
4644+++ b/drivers/net/bnxt/meson.build
4645@@ -74,6 +74,6 @@ sources = files('bnxt_cpr.c',
4646
4647 if arch_subdir == 'x86'
4648 sources += files('bnxt_rxtx_vec_sse.c')
4649-elif arch_subdir == 'arm' and host_machine.cpu_family().startswith('aarch64')
4650+elif arch_subdir == 'arm' and dpdk_conf.get('RTE_ARCH_64')
4651 sources += files('bnxt_rxtx_vec_neon.c')
4652 endif
4653diff --git a/drivers/net/bonding/rte_eth_bond_pmd.c b/drivers/net/bonding/rte_eth_bond_pmd.c
4654index ec7db3d..a1d3703 100644
4655--- a/drivers/net/bonding/rte_eth_bond_pmd.c
4656+++ b/drivers/net/bonding/rte_eth_bond_pmd.c
4657@@ -1794,12 +1794,13 @@ slave_configure(struct rte_eth_dev *bonded_eth_dev,
4658 != 0)
4659 return errval;
4660
4661- if (bond_ethdev_8023ad_flow_verify(bonded_eth_dev,
4662- slave_eth_dev->data->port_id) != 0) {
4663+ errval = bond_ethdev_8023ad_flow_verify(bonded_eth_dev,
4664+ slave_eth_dev->data->port_id);
4665+ if (errval != 0) {
4666 RTE_BOND_LOG(ERR,
4667- "rte_eth_tx_queue_setup: port=%d queue_id %d, err (%d)",
4668- slave_eth_dev->data->port_id, q_id, errval);
4669- return -1;
4670+ "bond_ethdev_8023ad_flow_verify: port=%d, err (%d)",
4671+ slave_eth_dev->data->port_id, errval);
4672+ return errval;
4673 }
4674
4675 if (internals->mode4.dedicated_queues.flow[slave_eth_dev->data->port_id] != NULL)
4676@@ -1807,8 +1808,14 @@ slave_configure(struct rte_eth_dev *bonded_eth_dev,
4677 internals->mode4.dedicated_queues.flow[slave_eth_dev->data->port_id],
4678 &flow_error);
4679
4680- bond_ethdev_8023ad_flow_set(bonded_eth_dev,
4681+ errval = bond_ethdev_8023ad_flow_set(bonded_eth_dev,
4682 slave_eth_dev->data->port_id);
4683+ if (errval != 0) {
4684+ RTE_BOND_LOG(ERR,
4685+ "bond_ethdev_8023ad_flow_set: port=%d, err (%d)",
4686+ slave_eth_dev->data->port_id, errval);
4687+ return errval;
4688+ }
4689 }
4690
4691 /* Start device */
4692diff --git a/drivers/net/dpaa/dpaa_flow.c b/drivers/net/dpaa/dpaa_flow.c
4693index a0087df..c5b5ec8 100644
4694--- a/drivers/net/dpaa/dpaa_flow.c
4695+++ b/drivers/net/dpaa/dpaa_flow.c
4696@@ -1,5 +1,5 @@
4697 /* SPDX-License-Identifier: BSD-3-Clause
4698- * Copyright 2017-2019 NXP
4699+ * Copyright 2017-2019,2021 NXP
4700 */
4701
4702 /* System headers */
4703@@ -999,6 +999,9 @@ static int dpaa_port_vsp_configure(struct dpaa_if *dpaa_intf,
4704 buf_prefix_cont.pass_time_stamp = true;
4705 buf_prefix_cont.pass_hash_result = false;
4706 buf_prefix_cont.pass_all_other_pcdinfo = false;
4707+ buf_prefix_cont.manip_ext_space =
4708+ RTE_PKTMBUF_HEADROOM - DPAA_MBUF_HW_ANNOTATION;
4709+
4710 ret = fm_vsp_config_buffer_prefix_content(dpaa_intf->vsp_handle[vsp_id],
4711 &buf_prefix_cont);
4712 if (ret != E_OK) {
4713diff --git a/drivers/net/ena/ena_ethdev.c b/drivers/net/ena/ena_ethdev.c
4714index 69198a6..3f2c979 100644
4715--- a/drivers/net/ena/ena_ethdev.c
4716+++ b/drivers/net/ena/ena_ethdev.c
4717@@ -1963,6 +1963,7 @@ static int ena_dev_configure(struct rte_eth_dev *dev)
4718
4719 if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
4720 dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
4721+ dev->data->dev_conf.txmode.offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
4722
4723 adapter->tx_selected_offloads = dev->data->dev_conf.txmode.offloads;
4724 adapter->rx_selected_offloads = dev->data->dev_conf.rxmode.offloads;
4725@@ -2037,6 +2038,7 @@ static int ena_infos_get(struct rte_eth_dev *dev,
4726 DEV_RX_OFFLOAD_TCP_CKSUM;
4727
4728 rx_feat |= DEV_RX_OFFLOAD_JUMBO_FRAME;
4729+ tx_feat |= DEV_TX_OFFLOAD_MULTI_SEGS;
4730
4731 /* Inform framework about available features */
4732 dev_info->rx_offload_capa = rx_feat;
4733@@ -2537,7 +2539,11 @@ static int ena_xmit_mbuf(struct ena_ring *tx_ring, struct rte_mbuf *mbuf)
4734 rc = ena_com_prepare_tx(tx_ring->ena_com_io_sq, &ena_tx_ctx,
4735 &nb_hw_desc);
4736 if (unlikely(rc)) {
4737+ PMD_DRV_LOG(ERR, "Failed to prepare Tx buffers, rc: %d\n", rc);
4738 ++tx_ring->tx_stats.prepare_ctx_err;
4739+ tx_ring->adapter->reset_reason =
4740+ ENA_REGS_RESET_DRIVER_INVALID_STATE;
4741+ tx_ring->adapter->trigger_reset = true;
4742 return rc;
4743 }
4744
4745diff --git a/drivers/net/hinic/base/hinic_pmd_niccfg.h b/drivers/net/hinic/base/hinic_pmd_niccfg.h
4746index 04cd374..0d0a670 100644
4747--- a/drivers/net/hinic/base/hinic_pmd_niccfg.h
4748+++ b/drivers/net/hinic/base/hinic_pmd_niccfg.h
4749@@ -116,15 +116,6 @@ enum hinic_link_mode {
4750 #define HINIC_DEFAULT_RX_MODE (HINIC_RX_MODE_UC | HINIC_RX_MODE_MC | \
4751 HINIC_RX_MODE_BC)
4752
4753-#define HINIC_MAX_MTU_SIZE (9600)
4754-#define HINIC_MIN_MTU_SIZE (256)
4755-
4756-/* MIN_MTU + ETH_HLEN + CRC (256+14+4) */
4757-#define HINIC_MIN_FRAME_SIZE 274
4758-
4759-/* MAX_MTU + ETH_HLEN + CRC + VLAN(9600+14+4+4) */
4760-#define HINIC_MAX_JUMBO_FRAME_SIZE (9622)
4761-
4762 #define HINIC_PORT_DISABLE 0x0
4763 #define HINIC_PORT_ENABLE 0x3
4764
4765diff --git a/drivers/net/hinic/base/hinic_pmd_nicio.c b/drivers/net/hinic/base/hinic_pmd_nicio.c
4766index 162308b..ad5db9f 100644
4767--- a/drivers/net/hinic/base/hinic_pmd_nicio.c
4768+++ b/drivers/net/hinic/base/hinic_pmd_nicio.c
4769@@ -230,8 +230,8 @@ static void hinic_rq_prepare_ctxt(struct hinic_rq *rq,
4770 wq_block_pfn_hi = upper_32_bits(wq_block_pfn);
4771 wq_block_pfn_lo = lower_32_bits(wq_block_pfn);
4772
4773- /* must config as ceq enable but do not generate ceq */
4774- rq_ctxt->ceq_attr = RQ_CTXT_CEQ_ATTR_SET(1, EN) |
4775+ /* config as ceq disable, but must set msix state disable */
4776+ rq_ctxt->ceq_attr = RQ_CTXT_CEQ_ATTR_SET(0, EN) |
4777 RQ_CTXT_CEQ_ATTR_SET(1, OWNER);
4778
4779 rq_ctxt->pi_intr_attr = RQ_CTXT_PI_SET(pi_start, IDX) |
4780diff --git a/drivers/net/hinic/hinic_pmd_ethdev.c b/drivers/net/hinic/hinic_pmd_ethdev.c
4781index f1b3ba3..1a43597 100644
4782--- a/drivers/net/hinic/hinic_pmd_ethdev.c
4783+++ b/drivers/net/hinic/hinic_pmd_ethdev.c
4784@@ -69,15 +69,6 @@
4785
4786 #define HINIC_VLAN_FILTER_EN (1U << 0)
4787
4788-#define HINIC_MTU_TO_PKTLEN(mtu) \
4789- ((mtu) + ETH_HLEN + ETH_CRC_LEN)
4790-
4791-#define HINIC_PKTLEN_TO_MTU(pktlen) \
4792- ((pktlen) - (ETH_HLEN + ETH_CRC_LEN))
4793-
4794-/* The max frame size with default MTU */
4795-#define HINIC_ETH_MAX_LEN (RTE_ETHER_MTU + ETH_HLEN + ETH_CRC_LEN)
4796-
4797 /* lro numer limit for one packet */
4798 #define HINIC_LRO_WQE_NUM_DEFAULT 8
4799
4800@@ -1617,6 +1608,9 @@ static int hinic_vlan_filter_set(struct rte_eth_dev *dev,
4801 if (vlan_id > RTE_ETHER_MAX_VLAN_ID)
4802 return -EINVAL;
4803
4804+ if (vlan_id == 0)
4805+ return 0;
4806+
4807 func_id = hinic_global_func_id(nic_dev->hwdev);
4808
4809 if (enable) {
4810diff --git a/drivers/net/hinic/hinic_pmd_ethdev.h b/drivers/net/hinic/hinic_pmd_ethdev.h
4811index c7338d8..fafccb9 100644
4812--- a/drivers/net/hinic/hinic_pmd_ethdev.h
4813+++ b/drivers/net/hinic/hinic_pmd_ethdev.h
4814@@ -32,6 +32,23 @@
4815 #define HINIC_UINT32_BIT_SIZE (CHAR_BIT * sizeof(uint32_t))
4816 #define HINIC_VFTA_SIZE (4096 / HINIC_UINT32_BIT_SIZE)
4817
4818+#define HINIC_MAX_MTU_SIZE 9600
4819+#define HINIC_MIN_MTU_SIZE 256
4820+
4821+#define HINIC_VLAN_TAG_SIZE 4
4822+#define HINIC_ETH_OVERHEAD \
4823+ (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + HINIC_VLAN_TAG_SIZE * 2)
4824+
4825+#define HINIC_MIN_FRAME_SIZE (HINIC_MIN_MTU_SIZE + HINIC_ETH_OVERHEAD)
4826+#define HINIC_MAX_JUMBO_FRAME_SIZE (HINIC_MAX_MTU_SIZE + HINIC_ETH_OVERHEAD)
4827+
4828+#define HINIC_MTU_TO_PKTLEN(mtu) ((mtu) + HINIC_ETH_OVERHEAD)
4829+
4830+#define HINIC_PKTLEN_TO_MTU(pktlen) ((pktlen) - HINIC_ETH_OVERHEAD)
4831+
4832+/* The max frame size with default MTU */
4833+#define HINIC_ETH_MAX_LEN (RTE_ETHER_MTU + HINIC_ETH_OVERHEAD)
4834+
4835 enum hinic_dev_status {
4836 HINIC_DEV_INIT,
4837 HINIC_DEV_CLOSE,
4838diff --git a/drivers/net/hns3/hns3_cmd.c b/drivers/net/hns3/hns3_cmd.c
4839index c6552b4..dc65ef8 100644
4840--- a/drivers/net/hns3/hns3_cmd.c
4841+++ b/drivers/net/hns3/hns3_cmd.c
4842@@ -44,10 +44,12 @@ static int
4843 hns3_allocate_dma_mem(struct hns3_hw *hw, struct hns3_cmq_ring *ring,
4844 uint64_t size, uint32_t alignment)
4845 {
4846+ static uint64_t hns3_dma_memzone_id;
4847 const struct rte_memzone *mz = NULL;
4848 char z_name[RTE_MEMZONE_NAMESIZE];
4849
4850- snprintf(z_name, sizeof(z_name), "hns3_dma_%" PRIu64, rte_rand());
4851+ snprintf(z_name, sizeof(z_name), "hns3_dma_%" PRIu64,
4852+ __atomic_fetch_add(&hns3_dma_memzone_id, 1, __ATOMIC_RELAXED));
4853 mz = rte_memzone_reserve_bounded(z_name, size, SOCKET_ID_ANY,
4854 RTE_MEMZONE_IOVA_CONTIG, alignment,
4855 RTE_PGSIZE_2M);
4856diff --git a/drivers/net/hns3/hns3_ethdev.c b/drivers/net/hns3/hns3_ethdev.c
4857index 637e613..abe31c8 100644
4858--- a/drivers/net/hns3/hns3_ethdev.c
4859+++ b/drivers/net/hns3/hns3_ethdev.c
4860@@ -239,6 +239,7 @@ hns3_interrupt_handler(void *param)
4861 hns3_pf_disable_irq0(hw);
4862
4863 event_cause = hns3_check_event_cause(hns, &clearval);
4864+ hns3_clear_event_cause(hw, event_cause, clearval);
4865 /* vector 0 interrupt is shared with reset and mailbox source events. */
4866 if (event_cause == HNS3_VECTOR0_EVENT_ERR) {
4867 hns3_warn(hw, "Received err interrupt");
4868@@ -253,7 +254,6 @@ hns3_interrupt_handler(void *param)
4869 else
4870 hns3_err(hw, "Received unknown event");
4871
4872- hns3_clear_event_cause(hw, event_cause, clearval);
4873 /* Enable interrupt if it is not cause by reset */
4874 hns3_pf_enable_irq0(hw);
4875 }
4876@@ -558,7 +558,8 @@ hns3_en_hw_strip_rxvtag(struct hns3_adapter *hns, bool enable)
4877
4878 ret = hns3_set_vlan_rx_offload_cfg(hns, &rxvlan_cfg);
4879 if (ret) {
4880- hns3_err(hw, "enable strip rx vtag failed, ret =%d", ret);
4881+ hns3_err(hw, "%s strip rx vtag failed, ret = %d.",
4882+ enable ? "enable" : "disable", ret);
4883 return ret;
4884 }
4885
4886@@ -1665,7 +1666,6 @@ hns3_set_default_mac_addr(struct rte_eth_dev *dev,
4887 struct rte_ether_addr *oaddr;
4888 char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
4889 bool default_addr_setted;
4890- bool rm_succes = false;
4891 int ret, ret_val;
4892
4893 /*
4894@@ -1685,9 +1685,10 @@ hns3_set_default_mac_addr(struct rte_eth_dev *dev,
4895 oaddr);
4896 hns3_warn(hw, "Remove old uc mac address(%s) fail: %d",
4897 mac_str, ret);
4898- rm_succes = false;
4899- } else
4900- rm_succes = true;
4901+
4902+ rte_spinlock_unlock(&hw->lock);
4903+ return ret;
4904+ }
4905 }
4906
4907 ret = hns3_add_uc_addr_common(hw, mac_addr);
4908@@ -1722,16 +1723,12 @@ err_pause_addr_cfg:
4909 }
4910
4911 err_add_uc_addr:
4912- if (rm_succes) {
4913- ret_val = hns3_add_uc_addr_common(hw, oaddr);
4914- if (ret_val) {
4915- rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
4916- oaddr);
4917- hns3_warn(hw,
4918- "Failed to restore old uc mac addr(%s): %d",
4919+ ret_val = hns3_add_uc_addr_common(hw, oaddr);
4920+ if (ret_val) {
4921+ rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, oaddr);
4922+ hns3_warn(hw, "Failed to restore old uc mac addr(%s): %d",
4923 mac_str, ret_val);
4924- hw->mac.default_addr_setted = false;
4925- }
4926+ hw->mac.default_addr_setted = false;
4927 }
4928 rte_spinlock_unlock(&hw->lock);
4929
4930@@ -2376,13 +2373,11 @@ hns3_dev_configure(struct rte_eth_dev *dev)
4931 * work as usual. But these fake queues are imperceptible, and can not
4932 * be used by upper applications.
4933 */
4934- if (!hns3_dev_indep_txrx_supported(hw)) {
4935- ret = hns3_set_fake_rx_or_tx_queues(dev, nb_rx_q, nb_tx_q);
4936- if (ret) {
4937- hns3_err(hw, "fail to set Rx/Tx fake queues, ret = %d.",
4938- ret);
4939- return ret;
4940- }
4941+ ret = hns3_set_fake_rx_or_tx_queues(dev, nb_rx_q, nb_tx_q);
4942+ if (ret) {
4943+ hns3_err(hw, "fail to set Rx/Tx fake queues, ret = %d.", ret);
4944+ hw->cfg_max_queues = 0;
4945+ return ret;
4946 }
4947
4948 hw->adapter_state = HNS3_NIC_CONFIGURING;
4949@@ -2441,6 +2436,7 @@ hns3_dev_configure(struct rte_eth_dev *dev)
4950 return 0;
4951
4952 cfg_err:
4953+ hw->cfg_max_queues = 0;
4954 (void)hns3_set_fake_rx_or_tx_queues(dev, 0, 0);
4955 hw->adapter_state = HNS3_NIC_INITIALIZED;
4956
4957@@ -4856,6 +4852,7 @@ hns3_uninit_pf(struct rte_eth_dev *eth_dev)
4958 hns3_rss_uninit(hns);
4959 (void)hns3_config_gro(hw, false);
4960 hns3_promisc_uninit(hw);
4961+ hns3_flow_uninit(eth_dev);
4962 hns3_fdir_filter_uninit(hns);
4963 (void)hns3_firmware_compat_config(hw, false);
4964 hns3_uninit_umv_space(hw);
4965@@ -5169,7 +5166,7 @@ hns3_dev_stop(struct rte_eth_dev *dev)
4966 /* Disable datapath on secondary process. */
4967 hns3_mp_req_stop_rxtx(dev);
4968 /* Prevent crashes when queues are still in use. */
4969- rte_delay_ms(hw->tqps_num);
4970+ rte_delay_ms(hw->cfg_max_queues);
4971
4972 rte_spinlock_lock(&hw->lock);
4973 if (rte_atomic16_read(&hw->reset.resetting) == 0) {
4974@@ -5192,11 +5189,8 @@ hns3_dev_close(struct rte_eth_dev *eth_dev)
4975 struct hns3_hw *hw = &hns->hw;
4976 int ret = 0;
4977
4978- if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
4979- rte_free(eth_dev->process_private);
4980- eth_dev->process_private = NULL;
4981+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
4982 return 0;
4983- }
4984
4985 if (hw->adapter_state == HNS3_NIC_STARTED)
4986 ret = hns3_dev_stop(eth_dev);
4987@@ -5211,8 +5205,6 @@ hns3_dev_close(struct rte_eth_dev *eth_dev)
4988 hns3_uninit_pf(eth_dev);
4989 hns3_free_all_queues(eth_dev);
4990 rte_free(hw->reset.wait_data);
4991- rte_free(eth_dev->process_private);
4992- eth_dev->process_private = NULL;
4993 hns3_mp_uninit_primary();
4994 hns3_warn(hw, "Close port %u finished", hw->data->port_id);
4995
4996@@ -5657,7 +5649,7 @@ hns3_stop_service(struct hns3_adapter *hns)
4997 rte_wmb();
4998 /* Disable datapath on secondary process. */
4999 hns3_mp_req_stop_rxtx(eth_dev);
5000- rte_delay_ms(hw->tqps_num);
The diff has been truncated for viewing.

Subscribers

People subscribed via source and target branches