Merge ~paelzer/ubuntu/+source/dpdk:merge-17.11.10-BIONIC into ubuntu/+source/dpdk:ubuntu/bionic-devel

Proposed by Christian Ehrhardt 
Status: Merged
Approved by: Christian Ehrhardt 
Approved revision: 11e2c6d6c42d9d99eec31dd7087a89156c83b391
Merged at revision: 11e2c6d6c42d9d99eec31dd7087a89156c83b391
Proposed branch: ~paelzer/ubuntu/+source/dpdk:merge-17.11.10-BIONIC
Merge into: ubuntu/+source/dpdk:ubuntu/bionic-devel
Diff against target: 6185 lines (+1762/-821)
151 files modified
MAINTAINERS (+2/-1)
app/proc_info/main.c (+18/-18)
app/test-crypto-perf/cperf_test_verify.c (+11/-3)
app/test-pmd/cmdline.c (+3/-4)
app/test-pmd/config.c (+8/-2)
app/test-pmd/csumonly.c (+3/-0)
app/test-pmd/parameters.c (+2/-2)
app/test-pmd/testpmd.c (+7/-0)
app/test-pmd/txonly.c (+1/-1)
config/common_base (+0/-1)
debian/changelog (+18/-0)
debian/patches/eal-arm64-define-coherent-I-O-memory-barriers.patch (+34/-0)
debian/patches/series (+1/-1)
dev/null (+0/-32)
doc/guides/conf.py (+3/-0)
doc/guides/contributing/coding_style.rst (+4/-4)
doc/guides/contributing/versioning.rst (+2/-2)
doc/guides/cryptodevs/aesni_gcm.rst (+2/-1)
doc/guides/cryptodevs/zuc.rst (+1/-1)
doc/guides/linux_gsg/nic_perf_intel_platform.rst (+1/-1)
doc/guides/nics/fm10k.rst (+3/-3)
doc/guides/nics/liquidio.rst (+2/-2)
doc/guides/nics/octeontx.rst (+2/-2)
doc/guides/nics/tap.rst (+3/-3)
doc/guides/nics/thunderx.rst (+2/-2)
doc/guides/nics/virtio.rst (+0/-1)
doc/guides/prog_guide/generic_segmentation_offload_lib.rst (+1/-1)
doc/guides/prog_guide/packet_classif_access_ctrl.rst (+2/-2)
doc/guides/prog_guide/rte_security.rst (+2/-2)
doc/guides/rel_notes/release_17_11.rst (+245/-1)
doc/guides/sample_app_ug/ethtool.rst (+1/-1)
doc/guides/sample_app_ug/l2_forward_crypto.rst (+1/-1)
doc/guides/sample_app_ug/performance_thread.rst (+1/-1)
doc/guides/testpmd_app_ug/testpmd_funcs.rst (+11/-2)
drivers/bus/fslmc/portal/dpaa2_hw_pvt.h (+2/-0)
drivers/bus/pci/Makefile (+1/-1)
drivers/bus/pci/linux/pci.c (+7/-6)
drivers/bus/pci/linux/pci_uio.c (+2/-0)
drivers/bus/pci/linux/pci_vfio.c (+9/-4)
drivers/crypto/armv8/rte_armv8_pmd.c (+0/-1)
drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c (+1/-1)
drivers/crypto/mrvl/rte_mrvl_pmd.c (+0/-1)
drivers/crypto/openssl/rte_openssl_pmd.c (+25/-10)
drivers/crypto/qat/qat_crypto.c (+2/-4)
drivers/crypto/qat/qat_crypto_capabilities.h (+2/-2)
drivers/event/dpaa2/dpaa2_eventdev.c (+2/-3)
drivers/event/octeontx/Makefile (+1/-0)
drivers/event/octeontx/ssovf_worker.h (+15/-2)
drivers/event/sw/sw_evdev_xstats.c (+2/-3)
drivers/mempool/dpaa2/dpaa2_hw_mempool.c (+19/-8)
drivers/net/af_packet/rte_eth_af_packet.c (+12/-2)
drivers/net/bnxt/bnxt.h (+45/-0)
drivers/net/bnxt/bnxt_cpr.c (+1/-4)
drivers/net/bnxt/bnxt_ethdev.c (+47/-86)
drivers/net/bnxt/bnxt_hwrm.c (+29/-38)
drivers/net/bnxt/bnxt_irq.c (+1/-2)
drivers/net/bnxt/bnxt_irq.h (+1/-0)
drivers/net/bnxt/bnxt_ring.c (+0/-1)
drivers/net/bnxt/bnxt_rxq.c (+6/-9)
drivers/net/bnxt/bnxt_rxr.c (+10/-7)
drivers/net/bnxt/bnxt_stats.c (+35/-0)
drivers/net/bnxt/bnxt_txq.c (+1/-2)
drivers/net/bnxt/bnxt_txr.c (+0/-1)
drivers/net/bonding/rte_eth_bond_8023ad.c (+34/-24)
drivers/net/bonding/rte_eth_bond_args.c (+2/-3)
drivers/net/bonding/rte_eth_bond_pmd.c (+30/-56)
drivers/net/cxgbe/cxgbe_ethdev.c (+7/-2)
drivers/net/cxgbe/sge.c (+0/-1)
drivers/net/dpaa2/dpaa2_rxtx.c (+38/-13)
drivers/net/dpaa2/mc/dpkg.c (+4/-1)
drivers/net/e1000/e1000_ethdev.h (+5/-5)
drivers/net/e1000/igb_ethdev.c (+8/-4)
drivers/net/e1000/igb_flow.c (+6/-0)
drivers/net/fm10k/base/fm10k_api.c (+16/-4)
drivers/net/fm10k/base/fm10k_pf.c (+2/-2)
drivers/net/fm10k/base/fm10k_pf.h (+6/-0)
drivers/net/fm10k/base/fm10k_vf.c (+2/-2)
drivers/net/fm10k/base/fm10k_vf.h (+5/-0)
drivers/net/fm10k/fm10k_rxtx_vec.c (+9/-2)
drivers/net/i40e/i40e_ethdev_vf.c (+1/-1)
drivers/net/i40e/i40e_rxtx_vec_neon.c (+1/-4)
drivers/net/ixgbe/ixgbe_ethdev.c (+20/-3)
drivers/net/ixgbe/ixgbe_pf.c (+1/-0)
drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c (+67/-3)
drivers/net/mlx4/Makefile (+6/-2)
drivers/net/mlx4/mlx4_utils.h (+10/-0)
drivers/net/mlx5/Makefile (+6/-2)
drivers/net/mlx5/mlx5_rxtx_vec_neon.h (+1/-1)
drivers/net/qede/qede_ethdev.c (+7/-3)
drivers/net/qede/qede_ethdev.h (+1/-0)
drivers/net/qede/qede_rxtx.c (+3/-3)
drivers/net/sfc/sfc_flow.c (+2/-2)
drivers/net/tap/rte_eth_tap.c (+4/-3)
drivers/net/vhost/rte_eth_vhost.c (+4/-0)
drivers/net/virtio/virtio_ethdev.c (+2/-0)
drivers/net/virtio/virtio_rxtx.c (+22/-8)
drivers/net/virtio/virtio_rxtx_simple_neon.c (+3/-2)
drivers/net/virtio/virtio_rxtx_simple_sse.c (+3/-2)
drivers/net/virtio/virtio_user/virtio_user_dev.c (+4/-0)
examples/ethtool/lib/rte_ethtool.c (+0/-2)
examples/ipsec-secgw/ep0.cfg (+4/-4)
examples/ipsec-secgw/ep1.cfg (+6/-6)
examples/ipsec-secgw/sa.c (+2/-2)
examples/kni/main.c (+0/-1)
examples/l3fwd-power/main.c (+2/-0)
examples/multi_process/client_server_mp/mp_client/client.c (+9/-9)
examples/vm_power_manager/channel_monitor.c (+12/-0)
examples/vm_power_manager/guest_cli/vm_power_cli_guest.c (+1/-1)
lib/librte_compat/rte_compat.h (+2/-2)
lib/librte_cryptodev/rte_cryptodev.c (+54/-17)
lib/librte_cryptodev/rte_cryptodev_pmd.h (+0/-1)
lib/librte_distributor/rte_distributor.c (+50/-18)
lib/librte_distributor/rte_distributor_v20.c (+42/-17)
lib/librte_eal/common/eal_common_lcore.c (+0/-11)
lib/librte_eal/common/eal_common_log.c (+1/-1)
lib/librte_eal/common/eal_hugepages.h (+1/-1)
lib/librte_eal/common/include/rte_dev.h (+1/-1)
lib/librte_eal/common/include/rte_version.h (+1/-1)
lib/librte_eal/common/malloc_elem.c (+7/-0)
lib/librte_eal/common/rte_malloc.c (+2/-1)
lib/librte_eal/common/rte_service.c (+10/-10)
lib/librte_eal/linuxapp/kni/kni_net.c (+1/-1)
lib/librte_efd/rte_efd.c (+1/-1)
lib/librte_ether/rte_eth_ctrl.h (+1/-1)
lib/librte_ether/rte_ethdev.c (+7/-1)
lib/librte_ether/rte_ethdev.h (+5/-5)
lib/librte_ether/rte_flow.h (+1/-1)
lib/librte_power/guest_channel.c (+6/-6)
lib/librte_power/rte_power_acpi_cpufreq.c (+4/-3)
lib/librte_ring/rte_ring.h (+6/-3)
lib/librte_security/rte_security.h (+6/-6)
lib/librte_vhost/rte_vhost.h (+1/-1)
lib/librte_vhost/socket.c (+9/-1)
lib/librte_vhost/vhost.c (+212/-8)
lib/librte_vhost/vhost.h (+62/-119)
lib/librte_vhost/vhost_user.c (+76/-40)
lib/librte_vhost/virtio_net.c (+11/-8)
mk/rte.app.mk (+0/-4)
pkg/dpdk.spec (+1/-1)
test/test/process.h (+45/-7)
test/test/test_distributor_perf.c (+1/-1)
test/test/test_efd.c (+1/-1)
test/test/test_efd_perf.c (+3/-3)
test/test/test_hash_perf.c (+6/-6)
test/test/test_interrupts.c (+7/-3)
test/test/test_link_bonding.c (+5/-0)
test/test/test_lpm_perf.c (+2/-2)
test/test/test_mbuf.c (+3/-0)
test/test/test_member_perf.c (+8/-8)
test/test/test_service_cores.c (+1/-1)
usertools/dpdk-pmdinfo.py (+46/-19)
Reviewer Review Type Date Requested Status
Rafael David Tinoco (community) Approve
Canonical Server Pending
Canonical Server packageset reviewers Pending
git-ubuntu developers Pending
Review via email: mp+386721@code.launchpad.net
To post a comment you must log in.
Revision history for this message
Christian Ehrhardt  (paelzer) wrote :

Intentionally not taking any of the latter packaging changes as they might change the SRU too much.
Instead these are clean cherry-picks from the upstream branches.
At https://salsa.debian.org/debian/dpdk/-/tree/upstream-17.11-stable there are gbp import based commits to pick for that which I did.

If you look at just the diff you'll see a lot due to the upstream changes, but commit by commit they should be easy to review.

A few changes were droppable due to upstream integrating them.

PPA: https://launchpad.net/~ci-train-ppa-service/+archive/ubuntu/4131/+packages

Revision history for this message
Christian Ehrhardt  (paelzer) wrote :

Interesting arm64 build error:

 /<<PKGBUILDDIR>>/debian/build/static-root/lib/librte_pmd_mlx5.a(mlx5_rxtx_vec.o): In function `rxq_burst_v':
 /<<PKGBUILDDIR>>/drivers/net/mlx5/mlx5_rxtx_vec_neon.h:1035: undefined reference to `rte_cio_wmb'

Due to
https://git.dpdk.org/dpdk-stable/commit/?id=4cd784b02d29f706ff2297b6f65eeb680c0af9ff

Revision history for this message
Christian Ehrhardt  (paelzer) wrote :

https://git.dpdk.org/dpdk/commit/?id=52bd60a5 missing on 17.11.x is the issue

I've let upstream know and added it to our build.
New upload in the PPA.
Changes pushed to this branch.

Revision history for this message
Rafael David Tinoco (rafaeldtinoco) wrote :

Ill review this one...

Revision history for this message
Rafael David Tinoco (rafaeldtinoco) wrote :

Debian (origin/upstream-17.11-stable)

5168a4ac New upstream version 17.11.10
25bc9f91 New upstream version 17.11.9

$ git log -p 25bc9f91..5168a4ac | diffstat -s
 147 files changed, 1709 insertions(+), 788 deletions(-)

Ubuntu (merge-17.11.10-BIONIC)

11e2c6d6c changelog 17.11.10-0ubuntu0.1 for bionic
453349ac6 d/p/eal-arm64-define-coherent-I-O-memory-barriers.patch
2d81cb9aa drop d/p/fix_kernel_53_build.patch being in 17.11.10
3a2dd7836 New upstream version 17.11.10
e1b4486d3 17.11.9-0ubuntu18.04.2 (patches unapplied)

$ git log -p e1b4486d3..3a2dd7836 | diffstat -s
 147 files changed, 1709 insertions(+), 788 deletions(-)

Pretty much the same.. analysing the delta now...

Revision history for this message
Rafael David Tinoco (rafaeldtinoco) wrote :

For commit:

commit 2d81cb9aa
Author: Christian Ehrhardt <email address hidden>
Date: Thu Jul 2 07:16:16 2020

    drop d/p/fix_kernel_53_build.patch being in 17.11.10

    Signed-off-by: Christian Ehrhardt <email address hidden>

Reversion is good:

$ patch -p1 -R < debian/patches/fix_kernel_53_build.patch
patching file lib/librte_eal/linuxapp/kni/kni_net.c

Revision history for this message
Rafael David Tinoco (rafaeldtinoco) wrote :

For commit:

commit 453349ac6
Author: Christian Ehrhardt <email address hidden>
Date: Thu Jul 2 12:19:56 2020

    d/p/eal-arm64-define-coherent-I-O-memory-barriers.patch: fix FTBFS on arm64

    Signed-off-by: Christian Ehrhardt <email address hidden>

solves:

...mlx5/mlx5_rxtx_vec_neon.h:1035: undefined reference to `rte_cio_wmb'

for build issue.

Revision history for this message
Rafael David Tinoco (rafaeldtinoco) wrote :

# CHECKLIST
----------------------------
 [.] changelog entry correct:
 [.] targeted to correct codename
 [.] version number is correct
 [.] update-maintainer has been run before
 ----
 [-] changes forwarded upstream/debian (if appropriate)
 [.] patches match what was proposed upstream
 ----
 [.] patches correctly included in debian/patches/series?
 [.] patches have correct DEP3 metadata
 ----
 [.] relying on PPA only for build check ?
 [-] if relying on PPA, did it install correctly ?
     - I'll rely on your functional tests, doing logical
       review only. Let me know if you need more, pls.
 ----
 [-] if building locally, was source build good ?
 [-] if building locally, was binary build good ?
 ----
 [-] was autopkgtest tested ?
 ----
 [.] is this a SRU ?
 [.] if a SRU, does the public bug have a template ?
 [.] is this a bundle of fixes ?
 ----
 [.] is this a MERGE ?
 [.] if MERGE, is there a public bug referred ?
 [.] if MERGE, does it add/remove existing packages ?
 [-] if MERGE, does it bump library SONAME ?
----------------------------
 [.] = ok | [x] = not ok | [?] = question | [!] = note | [-] = n/a
----------------------------

# comments:

 dpdk | 2.2.0-0ubuntu7 | xenial | source
 dpdk | 2.2.0-0ubuntu8 | xenial-updates | source
 dpdk | 17.11.1-6 | bionic | source
 dpdk | 17.11.9-0ubuntu18.04.2 | bionic-security | source
 dpdk | 17.11.9-0ubuntu18.04.2 | bionic-updates | source
 dpdk | 18.11.2-4 | eoan | source
 dpdk | 18.11.5-0ubuntu0.19.10.2 | eoan-security | source
 dpdk | 18.11.5-0ubuntu0.19.10.2 | eoan-updates | source
 dpdk | 19.11.1-0ubuntu1 | focal | source
 dpdk | 19.11.1-0ubuntu1.1 | focal-security | source
 dpdk | 19.11.1-0ubuntu1.1 | focal-updates | source
 dpdk | 19.11.3-1 | groovy | source

All good, +1.

review: Approve
Revision history for this message
Christian Ehrhardt  (paelzer) wrote :

To ssh://git.launchpad.net/~usd-import-team/ubuntu/+source/dpdk
 * [new tag] upload/17.11.10-0ubuntu0.1 -> upload/17.11.10-0ubuntu0.1

Uploading to ubuntu (via ftp to upload.ubuntu.com):
  Uploading dpdk_17.11.10-0ubuntu0.1.dsc: done.
  Uploading dpdk_17.11.10.orig.tar.xz: done.
  Uploading dpdk_17.11.10-0ubuntu0.1.debian.tar.xz: done.
  Uploading dpdk_17.11.10-0ubuntu0.1_source.buildinfo: done.
  Uploading dpdk_17.11.10-0ubuntu0.1_source.changes: done.
Successfully uploaded packages.

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1diff --git a/MAINTAINERS b/MAINTAINERS
2index 1837bb5..4c9dd84 100644
3--- a/MAINTAINERS
4+++ b/MAINTAINERS
5@@ -28,7 +28,8 @@ M: Ferruh Yigit <ferruh.yigit@intel.com>
6 T: git://dpdk.org/dpdk
7
8 Stable Branches
9-M: Yuanhan Liu <yliu@fridaylinux.org>
10+M: Luca Boccassi <bluca@debian.org>
11+M: Kevin Traynor <ktraynor@redhat.com>
12 T: git://dpdk.org/dpdk-stable
13
14 Security Issues
15diff --git a/app/proc_info/main.c b/app/proc_info/main.c
16index 2893bec..caf4e48 100644
17--- a/app/proc_info/main.c
18+++ b/app/proc_info/main.c
19@@ -196,7 +196,7 @@ proc_info_preparse_args(int argc, char **argv)
20 int err = gethostname(host_id, MAX_LONG_OPT_SZ-1);
21
22 if (err)
23- strcpy(host_id, "unknown");
24+ strlcpy(host_id, "unknown", sizeof(host_id));
25 }
26
27 return 0;
28@@ -363,50 +363,50 @@ static void collectd_resolve_cnt_type(char *cnt_type, size_t cnt_type_len,
29 if ((type_end != NULL) &&
30 (strncmp(cnt_name, "rx_", strlen("rx_")) == 0)) {
31 if (strncmp(type_end, "_errors", strlen("_errors")) == 0)
32- strncpy(cnt_type, "if_rx_errors", cnt_type_len);
33+ strlcpy(cnt_type, "if_rx_errors", cnt_type_len);
34 else if (strncmp(type_end, "_dropped", strlen("_dropped")) == 0)
35- strncpy(cnt_type, "if_rx_dropped", cnt_type_len);
36+ strlcpy(cnt_type, "if_rx_dropped", cnt_type_len);
37 else if (strncmp(type_end, "_bytes", strlen("_bytes")) == 0)
38- strncpy(cnt_type, "if_rx_octets", cnt_type_len);
39+ strlcpy(cnt_type, "if_rx_octets", cnt_type_len);
40 else if (strncmp(type_end, "_packets", strlen("_packets")) == 0)
41- strncpy(cnt_type, "if_rx_packets", cnt_type_len);
42+ strlcpy(cnt_type, "if_rx_packets", cnt_type_len);
43 else if (strncmp(type_end, "_placement",
44 strlen("_placement")) == 0)
45- strncpy(cnt_type, "if_rx_errors", cnt_type_len);
46+ strlcpy(cnt_type, "if_rx_errors", cnt_type_len);
47 else if (strncmp(type_end, "_buff", strlen("_buff")) == 0)
48- strncpy(cnt_type, "if_rx_errors", cnt_type_len);
49+ strlcpy(cnt_type, "if_rx_errors", cnt_type_len);
50 else
51 /* Does not fit obvious type: use a more generic one */
52- strncpy(cnt_type, "derive", cnt_type_len);
53+ strlcpy(cnt_type, "derive", cnt_type_len);
54 } else if ((type_end != NULL) &&
55 (strncmp(cnt_name, "tx_", strlen("tx_"))) == 0) {
56 if (strncmp(type_end, "_errors", strlen("_errors")) == 0)
57- strncpy(cnt_type, "if_tx_errors", cnt_type_len);
58+ strlcpy(cnt_type, "if_tx_errors", cnt_type_len);
59 else if (strncmp(type_end, "_dropped", strlen("_dropped")) == 0)
60- strncpy(cnt_type, "if_tx_dropped", cnt_type_len);
61+ strlcpy(cnt_type, "if_tx_dropped", cnt_type_len);
62 else if (strncmp(type_end, "_bytes", strlen("_bytes")) == 0)
63- strncpy(cnt_type, "if_tx_octets", cnt_type_len);
64+ strlcpy(cnt_type, "if_tx_octets", cnt_type_len);
65 else if (strncmp(type_end, "_packets", strlen("_packets")) == 0)
66- strncpy(cnt_type, "if_tx_packets", cnt_type_len);
67+ strlcpy(cnt_type, "if_tx_packets", cnt_type_len);
68 else
69 /* Does not fit obvious type: use a more generic one */
70- strncpy(cnt_type, "derive", cnt_type_len);
71+ strlcpy(cnt_type, "derive", cnt_type_len);
72 } else if ((type_end != NULL) &&
73 (strncmp(cnt_name, "flow_", strlen("flow_"))) == 0) {
74 if (strncmp(type_end, "_filters", strlen("_filters")) == 0)
75- strncpy(cnt_type, "operations", cnt_type_len);
76+ strlcpy(cnt_type, "operations", cnt_type_len);
77 else if (strncmp(type_end, "_errors", strlen("_errors")) == 0)
78- strncpy(cnt_type, "errors", cnt_type_len);
79+ strlcpy(cnt_type, "errors", cnt_type_len);
80 else if (strncmp(type_end, "_filters", strlen("_filters")) == 0)
81- strncpy(cnt_type, "filter_result", cnt_type_len);
82+ strlcpy(cnt_type, "filter_result", cnt_type_len);
83 } else if ((type_end != NULL) &&
84 (strncmp(cnt_name, "mac_", strlen("mac_"))) == 0) {
85 if (strncmp(type_end, "_errors", strlen("_errors")) == 0)
86- strncpy(cnt_type, "errors", cnt_type_len);
87+ strlcpy(cnt_type, "errors", cnt_type_len);
88 } else {
89 /* Does not fit obvious type, or strrchr error: */
90 /* use a more generic type */
91- strncpy(cnt_type, "derive", cnt_type_len);
92+ strlcpy(cnt_type, "derive", cnt_type_len);
93 }
94 }
95
96diff --git a/app/test-crypto-perf/cperf_test_verify.c b/app/test-crypto-perf/cperf_test_verify.c
97index 6945c8b..2935642 100644
98--- a/app/test-crypto-perf/cperf_test_verify.c
99+++ b/app/test-crypto-perf/cperf_test_verify.c
100@@ -230,11 +230,19 @@ cperf_mbuf_set(struct rte_mbuf *mbuf,
101 {
102 uint32_t segment_sz = options->segment_sz;
103 uint8_t *mbuf_data;
104- uint8_t *test_data =
105- (options->cipher_op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
106+ uint8_t *test_data;
107+ uint32_t remaining_bytes = options->max_buffer_size;
108+
109+ if (options->op_type == CPERF_AEAD) {
110+ test_data = (options->aead_op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
111 test_vector->plaintext.data :
112 test_vector->ciphertext.data;
113- uint32_t remaining_bytes = options->max_buffer_size;
114+ } else {
115+ test_data =
116+ (options->cipher_op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
117+ test_vector->plaintext.data :
118+ test_vector->ciphertext.data;
119+ }
120
121 while (remaining_bytes) {
122 mbuf_data = rte_pktmbuf_mtod(mbuf, uint8_t *);
123diff --git a/app/test-pmd/cmdline.c b/app/test-pmd/cmdline.c
124index 7a3a818..2365249 100644
125--- a/app/test-pmd/cmdline.c
126+++ b/app/test-pmd/cmdline.c
127@@ -799,6 +799,9 @@ static void cmd_help_long_parsed(void *parsed_result,
128 "port close (port_id|all)\n"
129 " Close all ports or port_id.\n\n"
130
131+ "port reset (port_id|all)\n"
132+ " Reset all ports or port_id.\n\n"
133+
134 "port attach (ident)\n"
135 " Attach physical or virtual dev by pci address or virtual device name\n\n"
136
137@@ -1974,7 +1977,6 @@ cmd_config_rss_hash_key_parsed(void *parsed_result,
138 uint8_t hash_key_size;
139 uint32_t key_len;
140
141- memset(&dev_info, 0, sizeof(dev_info));
142 rte_eth_dev_info_get(res->port_id, &dev_info);
143 if (dev_info.hash_key_size > 0 &&
144 dev_info.hash_key_size <= sizeof(hash_key))
145@@ -2214,7 +2216,6 @@ cmd_set_rss_reta_parsed(void *parsed_result,
146 struct rte_eth_rss_reta_entry64 reta_conf[8];
147 struct cmd_config_rss_reta *res = parsed_result;
148
149- memset(&dev_info, 0, sizeof(dev_info));
150 rte_eth_dev_info_get(res->port_id, &dev_info);
151 if (dev_info.reta_size == 0) {
152 printf("Redirection table size is 0 which is "
153@@ -2334,7 +2335,6 @@ cmd_showport_reta_parsed(void *parsed_result,
154 struct rte_eth_dev_info dev_info;
155 uint16_t max_reta_size;
156
157- memset(&dev_info, 0, sizeof(dev_info));
158 rte_eth_dev_info_get(res->port_id, &dev_info);
159 max_reta_size = RTE_MIN(dev_info.reta_size, ETH_RSS_RETA_SIZE_512);
160 if (res->size == 0 || res->size > max_reta_size) {
161@@ -10046,7 +10046,6 @@ cmd_flow_director_filter_parsed(void *parsed_result,
162 else if (!strncmp(res->pf_vf, "vf", 2)) {
163 struct rte_eth_dev_info dev_info;
164
165- memset(&dev_info, 0, sizeof(dev_info));
166 rte_eth_dev_info_get(res->port_id, &dev_info);
167 errno = 0;
168 vf_id = strtoul(res->pf_vf + 2, &end, 10);
169diff --git a/app/test-pmd/config.c b/app/test-pmd/config.c
170index 61608d1..4e3531b 100644
171--- a/app/test-pmd/config.c
172+++ b/app/test-pmd/config.c
173@@ -242,6 +242,10 @@ nic_xstats_display(portid_t port_id)
174 int cnt_xstats, idx_xstat;
175 struct rte_eth_xstat_name *xstats_names;
176
177+ if (port_id_is_invalid(port_id, ENABLED_WARN)) {
178+ print_valid_ports();
179+ return;
180+ }
181 printf("###### NIC extended statistics for port %-2d\n", port_id);
182 if (!rte_eth_dev_is_valid_port(port_id)) {
183 printf("Error: Invalid port number %i\n", port_id);
184@@ -297,6 +301,10 @@ nic_xstats_display(portid_t port_id)
185 void
186 nic_xstats_clear(portid_t port_id)
187 {
188+ if (port_id_is_invalid(port_id, ENABLED_WARN)) {
189+ print_valid_ports();
190+ return;
191+ }
192 rte_eth_xstats_reset(port_id);
193 }
194
195@@ -429,7 +437,6 @@ port_infos_display(portid_t port_id)
196 }
197 port = &ports[port_id];
198 rte_eth_link_get_nowait(port_id, &link);
199- memset(&dev_info, 0, sizeof(dev_info));
200 rte_eth_dev_info_get(port_id, &dev_info);
201 printf("\n%s Infos for port %-2d %s\n",
202 info_border, port_id, info_border);
203@@ -1558,7 +1565,6 @@ ring_rx_descriptor_display(const struct rte_memzone *ring_mz,
204 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
205 struct rte_eth_dev_info dev_info;
206
207- memset(&dev_info, 0, sizeof(dev_info));
208 rte_eth_dev_info_get(port_id, &dev_info);
209 if (strstr(dev_info.driver_name, "i40e") != NULL) {
210 /* 32 bytes RX descriptor, i40e only */
211diff --git a/app/test-pmd/csumonly.c b/app/test-pmd/csumonly.c
212index ae59e76..70f3b39 100644
213--- a/app/test-pmd/csumonly.c
214+++ b/app/test-pmd/csumonly.c
215@@ -432,6 +432,9 @@ process_outer_cksums(void *outer_l3_hdr, struct testpmd_offload_info *info,
216
217 udp_hdr = (struct udp_hdr *)((char *)outer_l3_hdr + info->outer_l3_len);
218
219+ if (tso_enabled)
220+ ol_flags |= PKT_TX_TCP_SEG;
221+
222 /* outer UDP checksum is done in software as we have no hardware
223 * supporting it today, and no API for it. In the other side, for
224 * UDP tunneling, like VXLAN or Geneve, outer UDP checksum can be
225diff --git a/app/test-pmd/parameters.c b/app/test-pmd/parameters.c
226index 1dfbcc4..8fcd72d 100644
227--- a/app/test-pmd/parameters.c
228+++ b/app/test-pmd/parameters.c
229@@ -168,8 +168,8 @@ usage(char* progname)
230 printf(" --disable-hw-vlan-extend: disable hardware vlan extend.\n");
231 printf(" --enable-drop-en: enable per queue packet drop.\n");
232 printf(" --disable-rss: disable rss.\n");
233- printf(" --port-topology=N: set port topology (N: paired (default) or "
234- "chained).\n");
235+ printf(" --port-topology=<paired|chained|loop>: set port topology (paired "
236+ "is default).\n");
237 printf(" --forward-mode=N: set forwarding mode (N: %s).\n",
238 list_pkt_forwarding_modes());
239 printf(" --rss-ip: set RSS functions to IPv4/IPv6 only .\n");
240diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c
241index f8c76a6..c2d6a75 100644
242--- a/app/test-pmd/testpmd.c
243+++ b/app/test-pmd/testpmd.c
244@@ -1860,6 +1860,13 @@ reset_port(portid_t pid)
245 if (port_id_is_invalid(pid, ENABLED_WARN))
246 return;
247
248+ port = &ports[pid];
249+ if ((pid == (portid_t)RTE_PORT_ALL && !all_ports_stopped()) ||
250+ (pid != (portid_t)RTE_PORT_ALL && !((port->port_status != RTE_PORT_STOPPED) && (port->slave_flag == 0)))) {
251+ printf("Can not reset port(s), please stop port(s) first.\n");
252+ return;
253+ }
254+
255 printf("Resetting ports...\n");
256
257 RTE_ETH_FOREACH_DEV(pi) {
258diff --git a/app/test-pmd/txonly.c b/app/test-pmd/txonly.c
259index 4ce4d61..c9e6db9 100644
260--- a/app/test-pmd/txonly.c
261+++ b/app/test-pmd/txonly.c
262@@ -234,7 +234,7 @@ pkt_burst_transmit(struct fwd_stream *fs)
263 pkt->data_len = tx_pkt_seg_lengths[0];
264 pkt_seg = pkt;
265 if (tx_pkt_split == TX_PKT_SPLIT_RND)
266- nb_segs = random() % tx_pkt_nb_segs + 1;
267+ nb_segs = rte_rand() % tx_pkt_nb_segs + 1;
268 else
269 nb_segs = tx_pkt_nb_segs;
270 pkt_len = pkt->data_len;
271diff --git a/config/common_base b/config/common_base
272index 7b47922..4355cad 100644
273--- a/config/common_base
274+++ b/config/common_base
275@@ -99,7 +99,6 @@ CONFIG_RTE_LOG_DP_LEVEL=RTE_LOG_INFO
276 CONFIG_RTE_LOG_HISTORY=256
277 CONFIG_RTE_BACKTRACE=y
278 CONFIG_RTE_LIBEAL_USE_HPET=n
279-CONFIG_RTE_EAL_ALLOW_INV_SOCKET_ID=n
280 CONFIG_RTE_EAL_ALWAYS_PANIC_ON_ERROR=n
281 CONFIG_RTE_EAL_IGB_UIO=n
282 CONFIG_RTE_EAL_VFIO=n
283diff --git a/debian/changelog b/debian/changelog
284index d1e3458..0b80de6 100644
285--- a/debian/changelog
286+++ b/debian/changelog
287@@ -1,3 +1,21 @@
288+dpdk (17.11.10-0ubuntu0.1) bionic; urgency=medium
289+
290+ * Merge latest stable DPDK release 17.11.10; for a full list of changes see:
291+ https://doc.dpdk.org/guides-17.11/rel_notes/release_17_11.html#id13
292+ After 17.11.8 being a security release and 17.11.9 a regression fix
293+ for 17.11.8 this is the fiirst "real" stable release in a while.
294+ (LP: #1885915)
295+ * Remaining changes:
296+ - SECURITY UPDATE: Integer overflow in vhost_user_set_log_base()
297+ - SECURITY UPDATE: Int truncation in vhost_user_check_and_alloc_queue_pair()
298+ * Dropped changes:
299+ - d/p/fix_kernel_53_build.patch is part of 17.11.10
300+ * Added changes
301+ - d/p/eal-arm64-define-coherent-I-O-memory-barriers.patch: fix FTBFS of
302+ 17.11.10 on arm64
303+
304+ -- Christian Ehrhardt <christian.ehrhardt@canonical.com> Thu, 02 Jul 2020 09:16:48 +0200
305+
306 dpdk (17.11.9-0ubuntu18.04.2) bionic-security; urgency=medium
307
308 * SECURITY UPDATE: Integer overflow in vhost_user_set_log_base()
309diff --git a/debian/patches/eal-arm64-define-coherent-I-O-memory-barriers.patch b/debian/patches/eal-arm64-define-coherent-I-O-memory-barriers.patch
310new file mode 100644
311index 0000000..de5da03
312--- /dev/null
313+++ b/debian/patches/eal-arm64-define-coherent-I-O-memory-barriers.patch
314@@ -0,0 +1,34 @@
315+From 52bd60a5cf0b05793f66cd507df0388402499f2a Mon Sep 17 00:00:00 2001
316+From: Yongseok Koh <yskoh@mellanox.com>
317+Date: Thu, 25 Jan 2018 13:02:47 -0800
318+Subject: [PATCH] eal/arm64: define coherent I/O memory barriers
319+
320+Signed-off-by: Yongseok Koh <yskoh@mellanox.com>
321+Acked-by: Thomas Speier <tspeier@qti.qualcomm.com>
322+Acked-by: Jianbo Liu <jianbo.liu@arm.com>
323+
324+Origin: upstream, https://git.dpdk.org/dpdk/commit/?id=52bd60a5
325+Last-Update: 2020-07-02
326+
327+---
328+ lib/librte_eal/common/include/arch/arm/rte_atomic_64.h | 4 ++++
329+ 1 file changed, 4 insertions(+)
330+
331+diff --git a/lib/librte_eal/common/include/arch/arm/rte_atomic_64.h b/lib/librte_eal/common/include/arch/arm/rte_atomic_64.h
332+index b6bbd0b32..ee0d0d15a 100644
333+--- a/lib/librte_eal/common/include/arch/arm/rte_atomic_64.h
334++++ b/lib/librte_eal/common/include/arch/arm/rte_atomic_64.h
335+@@ -36,6 +36,10 @@ extern "C" {
336+
337+ #define rte_io_rmb() rte_rmb()
338+
339++#define rte_cio_wmb() dmb(oshst)
340++
341++#define rte_cio_rmb() dmb(oshld)
342++
343+ #ifdef __cplusplus
344+ }
345+ #endif
346+--
347+2.27.0
348+
349diff --git a/debian/patches/fix_kernel_53_build.patch b/debian/patches/fix_kernel_53_build.patch
350deleted file mode 100644
351index 3bc3846..0000000
352--- a/debian/patches/fix_kernel_53_build.patch
353+++ /dev/null
354@@ -1,32 +0,0 @@
355-From 61cba437cf3ec7dc186cb3dad27ae9829787d8c6 Mon Sep 17 00:00:00 2001
356-From: Christian Ehrhardt <christian.ehrhardt@canonical.com>
357-Date: Thu, 24 Oct 2019 08:34:35 +0200
358-Subject: [PATCH] kni: fix build with kernel 5.3
359-
360-The include of kni_fifo should be local as kni provides it.
361-With newer kernels build systems this will run into an issue on
362-external module compilation.
363-
364-/var/lib/dkms/dpdk-rte-kni/17.11.6/build/kni_net.c:40:10:
365-fatal error: kni_fifo.h: No such file or directory
366- #include <kni_fifo.h>
367- ^~~~~~~~~~~~
368-
369-Like the includes to kni_dev.h this should be local as well.
370-
371-Signed-off-by: Christian Ehrhardt <christian.ehrhardt@canonical.com>
372----
373- lib/librte_eal/linuxapp/kni/kni_net.c | 2 +-
374- 1 file changed, 1 insertion(+), 1 deletion(-)
375-
376---- a/lib/librte_eal/linuxapp/kni/kni_net.c
377-+++ b/lib/librte_eal/linuxapp/kni/kni_net.c
378-@@ -37,7 +37,7 @@
379- #include <linux/delay.h>
380-
381- #include <exec-env/rte_kni_common.h>
382--#include <kni_fifo.h>
383-+#include "kni_fifo.h"
384-
385- #include "compat.h"
386- #include "kni_dev.h"
387diff --git a/debian/patches/series b/debian/patches/series
388index 28ba70a..c0215af 100644
389--- a/debian/patches/series
390+++ b/debian/patches/series
391@@ -1,6 +1,6 @@
392 fix-vhost-user-socket-permission.patch
393 testpmd-link-virtio.patch
394 app-testpmd-add-ethernet-peer-command.patch
395-fix_kernel_53_build.patch
396 0001-vhost-check-log-mmap-offset-and-size-overflow.patch
397 0002-vhost-fix-vring-index-check.patch
398+eal-arm64-define-coherent-I-O-memory-barriers.patch
399diff --git a/doc/guides/conf.py b/doc/guides/conf.py
400index 084512d..ed9bb5b 100644
401--- a/doc/guides/conf.py
402+++ b/doc/guides/conf.py
403@@ -94,6 +94,9 @@ custom_latex_preamble = r"""
404 \usepackage{helvet}
405 \renewcommand{\familydefault}{\sfdefault}
406 \RecustomVerbatimEnvironment{Verbatim}{Verbatim}{xleftmargin=5mm}
407+\usepackage{etoolbox}
408+\robustify\(
409+\robustify\)
410 """
411
412 # Configuration for the latex/pdf docs.
413diff --git a/doc/guides/contributing/coding_style.rst b/doc/guides/contributing/coding_style.rst
414index d8e4a0f..38acd5a 100644
415--- a/doc/guides/contributing/coding_style.rst
416+++ b/doc/guides/contributing/coding_style.rst
417@@ -619,10 +619,10 @@ In the DPDK environment, use the logging interface provided:
418
419 /* log in debug level */
420 rte_log_set_global_level(RTE_LOG_DEBUG);
421- RTE_LOG(DEBUG, my_logtype1, "this is is a debug level message\n");
422- RTE_LOG(INFO, my_logtype1, "this is is a info level message\n");
423- RTE_LOG(WARNING, my_logtype1, "this is is a warning level message\n");
424- RTE_LOG(WARNING, my_logtype2, "this is is a debug level message (not displayed)\n");
425+ RTE_LOG(DEBUG, my_logtype1, "this is a debug level message\n");
426+ RTE_LOG(INFO, my_logtype1, "this is a info level message\n");
427+ RTE_LOG(WARNING, my_logtype1, "this is a warning level message\n");
428+ RTE_LOG(WARNING, my_logtype2, "this is a debug level message (not displayed)\n");
429
430 /* log in info level */
431 rte_log_set_global_level(RTE_LOG_INFO);
432diff --git a/doc/guides/contributing/versioning.rst b/doc/guides/contributing/versioning.rst
433index 4000906..fe15e9a 100644
434--- a/doc/guides/contributing/versioning.rst
435+++ b/doc/guides/contributing/versioning.rst
436@@ -149,11 +149,11 @@ library so that older binaries need not be immediately recompiled.
437 The macros exported are:
438
439 * ``VERSION_SYMBOL(b, e, n)``: Creates a symbol version table entry binding
440- versioned symbol ``b@DPDK_n`` to the internal function ``b_e``.
441+ versioned symbol ``b@DPDK_n`` to the internal function ``be``.
442
443 * ``BIND_DEFAULT_SYMBOL(b, e, n)``: Creates a symbol version entry instructing
444 the linker to bind references to symbol ``b`` to the internal symbol
445- ``b_e``.
446+ ``be``.
447
448 * ``MAP_STATIC_SYMBOL(f, p)``: Declare the prototype ``f``, and map it to the
449 fully qualified function ``p``, so that if a symbol becomes versioned, it
450diff --git a/doc/guides/cryptodevs/aesni_gcm.rst b/doc/guides/cryptodevs/aesni_gcm.rst
451index a1f5848..9a03dec 100644
452--- a/doc/guides/cryptodevs/aesni_gcm.rst
453+++ b/doc/guides/cryptodevs/aesni_gcm.rst
454@@ -48,11 +48,12 @@ AEAD algorithms:
455
456 * RTE_CRYPTO_AEAD_AES_GCM
457
458-
459 Limitations
460 -----------
461
462 * Chained mbufs are supported but only out-of-place (destination mbuf must be contiguous).
463+* Chained mbufs are only supported by RTE_CRYPTO_AEAD_AES_GCM algorithm,
464+ not RTE_CRYPTO_AUTH_AES_GMAC.
465 * Cipher only is not supported.
466
467
468diff --git a/doc/guides/cryptodevs/zuc.rst b/doc/guides/cryptodevs/zuc.rst
469index 7fcfc07..bf33479 100644
470--- a/doc/guides/cryptodevs/zuc.rst
471+++ b/doc/guides/cryptodevs/zuc.rst
472@@ -54,7 +54,7 @@ Limitations
473 * ZUC (EIA3) supported only if hash offset field is byte-aligned.
474 * ZUC (EEA3) supported only if cipher length, cipher offset fields are byte-aligned.
475 * ZUC PMD cannot be built as a shared library, due to limitations in
476- in the underlying library.
477+ the underlying library.
478
479
480 Installation
481diff --git a/doc/guides/linux_gsg/nic_perf_intel_platform.rst b/doc/guides/linux_gsg/nic_perf_intel_platform.rst
482index 2ef6ed7..dd0c622 100644
483--- a/doc/guides/linux_gsg/nic_perf_intel_platform.rst
484+++ b/doc/guides/linux_gsg/nic_perf_intel_platform.rst
485@@ -152,7 +152,7 @@ Configurations before running DPDK
486 # Mount to the specific folder.
487 mount -t hugetlbfs nodev /mnt/huge
488
489-2. Check the CPU layout using using the DPDK ``cpu_layout`` utility:
490+2. Check the CPU layout using the DPDK ``cpu_layout`` utility:
491
492 .. code-block:: console
493
494diff --git a/doc/guides/nics/fm10k.rst b/doc/guides/nics/fm10k.rst
495index b47fc0d..c23f411 100644
496--- a/doc/guides/nics/fm10k.rst
497+++ b/doc/guides/nics/fm10k.rst
498@@ -156,7 +156,7 @@ Switch manager
499
500 The Intel FM10000 family of NICs integrate a hardware switch and multiple host
501 interfaces. The FM10000 PMD driver only manages host interfaces. For the
502-switch component another switch driver has to be loaded prior to to the
503+switch component another switch driver has to be loaded prior to the
504 FM10000 PMD driver. The switch driver can be acquired from Intel support.
505 Only Testpoint is validated with DPDK, the latest version that has been
506 validated with DPDK is 4.1.6.
507@@ -172,8 +172,8 @@ the Rx/Tx queues. When switch comes up, a LSC event indicating ``LINK_UP`` is
508 sent to the app, which can then restart the FM10000 port to resume network
509 processing.
510
511-CRC striping
512-~~~~~~~~~~~~
513+CRC stripping
514+~~~~~~~~~~~~~
515
516 The FM10000 family of NICs strip the CRC for every packets coming into the
517 host interface. So, CRC will be stripped even when the
518diff --git a/doc/guides/nics/liquidio.rst b/doc/guides/nics/liquidio.rst
519index 7bc1604..fffa7b5 100644
520--- a/doc/guides/nics/liquidio.rst
521+++ b/doc/guides/nics/liquidio.rst
522@@ -225,8 +225,8 @@ Ring size
523
524 Number of descriptors for Rx/Tx ring should be in the range 128 to 512.
525
526-CRC striping
527-~~~~~~~~~~~~
528+CRC stripping
529+~~~~~~~~~~~~~
530
531 LiquidIO adapters strip ethernet FCS of every packet coming to the host
532 interface. So, CRC will be stripped even when the ``rxmode.hw_strip_crc``
533diff --git a/doc/guides/nics/octeontx.rst b/doc/guides/nics/octeontx.rst
534index 90bb9e5..8da0867 100644
535--- a/doc/guides/nics/octeontx.rst
536+++ b/doc/guides/nics/octeontx.rst
537@@ -209,8 +209,8 @@ This driver will only work with ``octeontx_fpavf`` external mempool handler
538 as it is the most performance effective way for packet allocation and Tx buffer
539 recycling on OCTEONTX SoC platform.
540
541-CRC striping
542-~~~~~~~~~~~~
543+CRC stripping
544+~~~~~~~~~~~~~
545
546 The OCTEONTX SoC family NICs strip the CRC for every packets coming into the
547 host interface. So, CRC will be stripped even when the
548diff --git a/doc/guides/nics/tap.rst b/doc/guides/nics/tap.rst
549index 04086b1..a85921a 100644
550--- a/doc/guides/nics/tap.rst
551+++ b/doc/guides/nics/tap.rst
552@@ -102,7 +102,7 @@ Please change the IP addresses as you see fit.
553
554 If routing is enabled on the host you can also communicate with the DPDK App
555 over the internet via a standard socket layer application as long as you
556-account for the protocol handing in the application.
557+account for the protocol handling in the application.
558
559 If you have a Network Stack in your DPDK application or something like it you
560 can utilize that stack to handle the network protocols. Plus you would be able
561@@ -146,9 +146,9 @@ As rules are translated to TC, it is possible to show them with something like::
562 Examples of testpmd flow rules
563 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
564
565-Drop packets for destination IP 192.168.0.1::
566+Drop packets for destination IP 192.0.2.1::
567
568- testpmd> flow create 0 priority 1 ingress pattern eth / ipv4 dst is 1.1.1.1 \
569+ testpmd> flow create 0 priority 1 ingress pattern eth / ipv4 dst is 192.0.2.1 \
570 / end actions drop / end
571
572 Ensure packets from a given MAC address are received on a queue 2::
573diff --git a/doc/guides/nics/thunderx.rst b/doc/guides/nics/thunderx.rst
574index 45bc690..f889684 100644
575--- a/doc/guides/nics/thunderx.rst
576+++ b/doc/guides/nics/thunderx.rst
577@@ -354,8 +354,8 @@ The nicvf thunderx driver will make use of attached secondary VFs automatically
578 Limitations
579 -----------
580
581-CRC striping
582-~~~~~~~~~~~~
583+CRC stripping
584+~~~~~~~~~~~~~
585
586 The ThunderX SoC family NICs strip the CRC for every packets coming into the
587 host interface. So, CRC will be stripped even when the
588diff --git a/doc/guides/nics/virtio.rst b/doc/guides/nics/virtio.rst
589index 9d05cca..6672d7f 100644
590--- a/doc/guides/nics/virtio.rst
591+++ b/doc/guides/nics/virtio.rst
592@@ -34,7 +34,6 @@ Poll Mode Driver for Emulated Virtio NIC
593 Virtio is a para-virtualization framework initiated by IBM, and supported by KVM hypervisor.
594 In the Data Plane Development Kit (DPDK),
595 we provide a virtio Poll Mode Driver (PMD) as a software solution, comparing to SRIOV hardware solution,
596-
597 for fast guest VM to guest VM communication and guest VM to host communication.
598
599 Vhost is a kernel acceleration module for virtio qemu backend.
600diff --git a/doc/guides/prog_guide/generic_segmentation_offload_lib.rst b/doc/guides/prog_guide/generic_segmentation_offload_lib.rst
601index ef1de53..008b9dd 100644
602--- a/doc/guides/prog_guide/generic_segmentation_offload_lib.rst
603+++ b/doc/guides/prog_guide/generic_segmentation_offload_lib.rst
604@@ -223,7 +223,7 @@ To segment an outgoing packet, an application must:
605 2. Set the appropriate ol_flags in the mbuf.
606
607 - The GSO library use the value of an mbuf's ``ol_flags`` attribute to
608- to determine how a packet should be segmented. It is the application's
609+ determine how a packet should be segmented. It is the application's
610 responsibility to ensure that these flags are set.
611
612 - For example, in order to segment TCP/IPv4 packets, the application should
613diff --git a/doc/guides/prog_guide/packet_classif_access_ctrl.rst b/doc/guides/prog_guide/packet_classif_access_ctrl.rst
614index a6bee9b..a5748e2 100644
615--- a/doc/guides/prog_guide/packet_classif_access_ctrl.rst
616+++ b/doc/guides/prog_guide/packet_classif_access_ctrl.rst
617@@ -181,7 +181,7 @@ To define classification for the IPv6 2-tuple: <protocol, IPv6 source address> o
618
619 .. code-block:: c
620
621- struct struct ipv6_hdr {
622+ struct ipv6_hdr {
623 uint32_t vtc_flow; /* IP version, traffic class & flow label. */
624 uint16_t payload_len; /* IP packet length - includes sizeof(ip_header). */
625 uint8_t proto; /* Protocol, next header. */
626@@ -194,7 +194,7 @@ The following array of field definitions can be used:
627
628 .. code-block:: c
629
630- struct struct rte_acl_field_def ipv6_2tuple_defs[5] = {
631+ struct rte_acl_field_def ipv6_2tuple_defs[5] = {
632 {
633 .type = RTE_ACL_FIELD_TYPE_BITMASK,
634 .size = sizeof (uint8_t),
635diff --git a/doc/guides/prog_guide/rte_security.rst b/doc/guides/prog_guide/rte_security.rst
636index 71be036..4253ee9 100644
637--- a/doc/guides/prog_guide/rte_security.rst
638+++ b/doc/guides/prog_guide/rte_security.rst
639@@ -76,7 +76,7 @@ however all security protocol related headers are still attached to the
640 packet. e.g. In case of IPSec, the IPSec tunnel headers (if any),
641 ESP/AH headers will remain in the packet but the received packet
642 contains the decrypted data where the encrypted data was when the packet
643-arrived. The driver Rx path check the descriptors and and based on the
644+arrived. The driver Rx path check the descriptors and based on the
645 crypto status sets additional flags in the rte_mbuf.ol_flags field.
646
647 .. note::
648@@ -90,7 +90,7 @@ Egress Data path - The software prepares the egress packet by adding
649 relevant security protocol headers. Only the data will not be
650 encrypted by the software. The driver will accordingly configure the
651 tx descriptors. The hardware device will encrypt the data before sending the
652-the packet out.
653+packet out.
654
655 .. note::
656
657diff --git a/doc/guides/rel_notes/release_17_11.rst b/doc/guides/rel_notes/release_17_11.rst
658index bc1d88d..2d8e667 100644
659--- a/doc/guides/rel_notes/release_17_11.rst
660+++ b/doc/guides/rel_notes/release_17_11.rst
661@@ -459,7 +459,7 @@ API Changes
662 * **Added mbuf flags PKT_RX_VLAN and PKT_RX_QINQ.**
663
664 Two ``mbuf`` flags have been added to indicate that the VLAN
665- identifier has been saved in in the ``mbuf`` structure. For instance:
666+ identifier has been saved in the ``mbuf`` structure. For instance:
667
668 - If VLAN is not stripped and TCI is saved: ``PKT_RX_VLAN``
669 - If VLAN is stripped and TCI is saved: ``PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED``
670@@ -2015,3 +2015,247 @@ Fixes skipped and status unresolved
671 * http://doc.dpdk.org/dts/test_plans/virtio_pvp_regression_test_plan.html
672 * http://doc.dpdk.org/dts/test_plans/vhost_dequeue_zero_copy_test_plan.html
673 * http://doc.dpdk.org/dts/test_plans/vm2vm_virtio_pmd_test_plan.html
674+
675+17.11.10 Release Notes
676+----------------------
677+
678+17.11.10 Fixes
679+~~~~~~~~~~~~~~
680+
681+* app/crypto-perf: fix input of AEAD decrypt
682+* app/proc_info: fix string copying to use strlcpy
683+* app/testpmd: block xstats for hidden ports
684+* app/testpmd: fix crash on port reset
685+* app/testpmd: fix help for loop topology option
686+* app/testpmd: fix Tx checksum when TSO enabled
687+* app/testpmd: use better randomness for Tx split
688+* bus/pci: align next mapping address on page boundary
689+* bus/pci: fix Intel IOMMU sysfs access check
690+* bus/pci: remove useless link dependency on ethdev
691+* cryptodev: fix checks related to device id
692+* cryptodev: fix initialization on multi-process
693+* cryptodev: fix missing device id range checking
694+* crypto/dpaa2_sec: fix length retrieved from hardware
695+* crypto/openssl: use local copy for session contexts
696+* crypto/qat: fix digest length in XCBC capability
697+* crypto/qat: fix null auth issues when using vfio_pci
698+* doc: fix a common typo in NIC guides
699+* doc: fix AESNI-GCM limitations in crypto guide
700+* doc: fix description of versioning macros
701+* doc: fix format in virtio guide
702+* doc: fix tap guide
703+* doc: fix typo in l2fwd-crypto guide
704+* doc/guides: clean repeated words
705+* doc: robustify PDF build
706+* drivers/crypto: remove some invalid comments
707+* eal: remove dead code on NUMA node detection
708+* ethdev: fix endian annotation for SPI item
709+* ethdev: fix include of ethernet header file
710+* ethdev: fix typos for ENOTSUP
711+* ethdev: limit maximum number of queues
712+* ethdev: remove redundant device info cleanup before get
713+* event/dpaa2: fix default queue configuration
714+* event/octeontx: fix partial Rx packet handling
715+* event/sw: fix xstats reset value
716+* examples/ipsec-secgw: fix default configuration
717+* examples/ipsec-secgw: fix GCM IV length
718+* examples/ipsec-secgw: fix SHA256-HMAC digest length
719+* examples/l3fwd-power: fix Rx interrupt disabling
720+* examples/multi_process: fix client crash with sparse ports
721+* examples/vm_power: fix build without i40e
722+* examples/vm_power: fix type of cmdline token in cli
723+* kni: fix build with kernel 5.3
724+* lib/distributor: fix deadlock on aarch64
725+* lib: fix doxygen typos
726+* lib: fix log typos
727+* maintainers: update for stable branches
728+* malloc: fix realloc copy size
729+* malloc: fix realloc padded element size
730+* malloc: set pad to 0 on free
731+* mempool/dpaa2: report error on endless loop in mbuf release
732+* mk: remove library search path from binary
733+* net/af_packet: fix stale sockets
734+* net/af_packet: improve Tx statistics accuracy
735+* net/bnxt: cleanup comments
736+* net/bnxt: enforce IO barrier for doorbell command
737+* net/bnxt: expose some missing counters in port stats
738+* net/bnxt: fix async link handling and update
739+* net/bnxt: fix coding style
740+* net/bnxt: fix crash in secondary process
741+* net/bnxt: fix crash in xstats get
742+* net/bnxt: fix dereference before null check
743+* net/bnxt: fix log message level
744+* net/bnxt: fix mbuf free when clearing Tx queue
745+* net/bnxt: fix memory leak
746+* net/bnxt: fix multicast filter programming
747+* net/bnxt: fix Rx queue count
748+* net/bnxt: fix setting default MAC address
749+* net/bnxt: get default HWRM command timeout from FW
750+* net/bnxt: move macro definitions to header file
751+* net/bnxt: remove commented out code
752+* net/bnxt: remove duplicate barrier
753+* net/bnxt: remove redundant header file inclusion
754+* net/bnxt: remove unnecessary variable assignment
755+* net/bnxt: return error if setting link up fails
756+* net/bonding: fix LACP fast queue Rx handler
757+* net/bonding: fix link speed update in broadcast mode
758+* net/bonding: fix OOB access in other aggregator modes
759+* net/bonding: fix port ID check
760+* net/bonding: fix selection logic
761+* net/bonding: fix slave id types
762+* net/bonding: fix unicast packets filtering
763+* net/bonding: use non deprecated PCI API
764+* net/cxgbe: fix prefetch for non-coalesced Tx packets
765+* net/dpaa2: add retry and timeout in packet enqueue API
766+* net/dpaa2: fix possible use of uninitialized vars
767+* net/dpaa2: set port in mbuf
768+* net/e1000: fix link status
769+* net/fm10k: fix mbuf free in vector Rx
770+* net/fm10k: fix stats crash in multi-process
771+* net/i40e: downgrade error log
772+* net/i40e: fix address of first segment
773+* net/i40e: remove compiler barrier from NEON Rx
774+* net/i40e: remove memory barrier from NEON Rx
775+* net/igb: fix global variable multiple definitions
776+* net/igb: fix PHY status if PHY reset is not blocked
777+* net/ixgbe: enable new PF host mbox version
778+* net/ixgbe: fix address of first segment
779+* net/ixgbe: fix link status
780+* net/ixgbe: fix link status
781+* net/ixgbe: fix queue interrupt for X552/557
782+* net/ixgbe: fix VF RSS offloads configuration
783+* net/ixgbe: fix X553 speed capability
784+* net/ixgbe: remove memory barrier from NEON Rx
785+* net/ixgbe: remove redundant assignment
786+* net/ixgbe: support packet type with NEON
787+* net/mlx4: fix build on ppc64
788+* net/mlx5: fix Rx CQ doorbell synchronization on aarch64
789+* net/mlx: fix build with make and recent gcc
790+* net/mlx: fix debug build with icc
791+* net/qede: fix setting MTU
792+* net/qede: fix setting VLAN strip mode
793+* net/qede: limit Rx ring index read for debug
794+* net/sfc: fix adapter lock usage on rule creation
795+* net/tap: fix blocked Rx packets
796+* net/vhost: fix redundant queue state event
797+* net/virtio: fix descriptor addressed in Tx
798+* net/virtio: fix mbuf data and packet length mismatch
799+* net/virtio: fix Tx checksum offloads
800+* net/virtio: get all pending Rx packets in vectorized paths
801+* net/virtio: init MTU in case no control channel
802+* net/virtio: reject deferred Rx start
803+* net/virtio: reject deferred Tx start
804+* net/virtio-user: fix setting filters
805+* power: fix socket indicator value
806+* power: handle frequency increase with turbo disabled
807+* ring: enforce reading tail before slots
808+* security: fix doxygen fields
809+* service: use log for error messages
810+* test/bonding: fix LSC related cases
811+* test: fix global variable multiple definitions
812+* test/interrupt: account for race with callback
813+* test/lpm: fix measured cycles for delete
814+* test/mbuf: fix forged mbuf in clone test
815+* test: optimise fd closing in forks
816+* test/service: fix wait for service core
817+* test/test: fix test app defining unused variable
818+* usertools: fix pmdinfo with python 3 and pyelftools>=0.24
819+* version: 17.11.10-rc1
820+* vfio: fix truncated BAR offset for 32-bit
821+* vhost: convert buffer addresses to GPA for logging
822+* vhost: fix IPv4 checksum
823+* vhost: fix slave request fd leak
824+* vhost: fix virtqueue not accessible
825+* vhost: fix vring address handling during live migration
826+* vhost: fix vring memory partially mapped
827+* vhost: forbid reallocation when running
828+* vhost: prevent zero copy mode if IOMMU is on
829+* vhost: protect vring access done by application
830+* vhost: translate incoming log address to GPA
831+* vhost: un-inline dirty pages logging functions
832+
833+17.11.10 Validation
834+~~~~~~~~~~~~~~~~~~~
835+
836+* Red Hat(R) Testing
837+
838+ * RHEL 7.8
839+ * Functionality
840+
841+ * PF assignment
842+ * VF assignment
843+ * vhost single/multi queues and cross-NUMA
844+ * vhostclient reconnect
845+ * vhost live migration with single/multi queues and cross-NUMA
846+ * OVS PVP
847+
848+* Intel(R) Testing
849+
850+ * Basic Intel(R) NIC(ixgbe and i40e) testing
851+
852+ * PF (i40e)
853+ * PF (ixgbe)
854+ * VF
855+ * Compile Testing
856+ * Intel NIC single core/NIC performance
857+
858+ * Basic cryptodev and virtio testing
859+
860+ * cryptodev
861+ * vhost/virtio basic loopback, PVP and performance test
862+
863+* Mellanox(R) Testing
864+
865+ * Basic functionality with testpmd
866+
867+ * Tx/Rx
868+ * xstats
869+ * Timestamps
870+ * Link status
871+ * RTE flow and flow_director
872+ * RSS
873+ * VLAN stripping and insertion
874+ * Checksum/TSO
875+ * ptype
876+ * Multi-process
877+
878+ * ConnectX-5
879+
880+ * RHEL 7.4
881+ * Kernel 3.10.0-693.el7.x86_64
882+ * Driver MLNX_OFED_LINUX-4.7-3.2.9.0
883+ * fw 16.26.4012
884+
885+ * ConnectX-4 Lx
886+
887+ * RHEL 7.4
888+ * Kernel 3.10.0-693.el7.x86_64
889+ * Driver MLNX_OFED_LINUX-4.7-3.2.9.0
890+ * fw 14.26.4012
891+
892+* Intel(R) Testing with Open vSwitch
893+
894+ * OVS testing with OVS branches 2.10 and 2.19 with VSPERF
895+
896+ * Tested NICs
897+
898+ * i40e (X710)
899+ * ixgbe (82599ES)
900+
901+ * Functionality
902+
903+ * P2P
904+ * PVP
905+ * Hotplug
906+ * Multiqueue
907+ * Vhostuserclient reconnect
908+ * Vhost cross-NUMA awareness
909+ * Jumbo frames
910+ * Rate limiting
911+ * QoS policer
912+
913+17.11.10 Known Issues
914+~~~~~~~~~~~~~~~~~~~~~
915+
916+* MLX4/5 and rte_flow: count actions and rules with vlan items are not supported, and MLNX_OFED_LINUX 4.6-1.0.1.1 is not compatible
917+* DPDK 17.11.10 contains fixes up to DPDK v19.11. Issues identified/fixed in DPDK master branch after DPDK v19.11 may be present in DPDK 17.11.10
918diff --git a/doc/guides/sample_app_ug/ethtool.rst b/doc/guides/sample_app_ug/ethtool.rst
919index 6dd11dc..4cefc2f 100644
920--- a/doc/guides/sample_app_ug/ethtool.rst
921+++ b/doc/guides/sample_app_ug/ethtool.rst
922@@ -68,7 +68,7 @@ The application is console-driven using the cmdline DPDK interface:
923 EthApp>
924
925 From this interface the available commands and descriptions of what
926-they do as as follows:
927+they do as follows:
928
929 * ``drvinfo``: Print driver info
930 * ``eeprom``: Dump EEPROM to file
931diff --git a/doc/guides/sample_app_ug/l2_forward_crypto.rst b/doc/guides/sample_app_ug/l2_forward_crypto.rst
932index 1e85b4a..2d3c949 100644
933--- a/doc/guides/sample_app_ug/l2_forward_crypto.rst
934+++ b/doc/guides/sample_app_ug/l2_forward_crypto.rst
935@@ -223,7 +223,7 @@ Crypto operation specification
936 All the packets received in all the ports get transformed by the crypto device/s
937 (ciphering and/or authentication).
938 The crypto operation to be performed on the packet is parsed from the command line
939-(go to "Running the Application section for all the options).
940+(go to "Running the Application" section for all the options).
941
942 If no parameter is passed, the default crypto operation is:
943
944diff --git a/doc/guides/sample_app_ug/performance_thread.rst b/doc/guides/sample_app_ug/performance_thread.rst
945index 57391ca..b194c3b 100644
946--- a/doc/guides/sample_app_ug/performance_thread.rst
947+++ b/doc/guides/sample_app_ug/performance_thread.rst
948@@ -308,7 +308,7 @@ functionality into different threads, and the pairs of RX and TX threads are
949 interconnected via software rings.
950
951 On initialization an L-thread scheduler is started on every EAL thread. On all
952-but the master EAL thread only a a dummy L-thread is initially started.
953+but the master EAL thread only a dummy L-thread is initially started.
954 The L-thread started on the master EAL thread then spawns other L-threads on
955 different L-thread schedulers according the the command line parameters.
956
957diff --git a/doc/guides/testpmd_app_ug/testpmd_funcs.rst b/doc/guides/testpmd_app_ug/testpmd_funcs.rst
958index 285dd56..e2d1715 100644
959--- a/doc/guides/testpmd_app_ug/testpmd_funcs.rst
960+++ b/doc/guides/testpmd_app_ug/testpmd_funcs.rst
961@@ -60,7 +60,7 @@ If you type a partial command and hit ``<TAB>`` you get a list of the available
962
963 .. note::
964
965- Some examples in this document are too long to fit on one line are are shown wrapped at `"\\"` for display purposes::
966+ Some examples in this document are too long to fit on one line are shown wrapped at `"\\"` for display purposes::
967
968 testpmd> set flow_ctrl rx (on|off) tx (on|off) (high_water) (low_water) \
969 (pause_time) (send_xon) (port_id)
970@@ -1704,6 +1704,15 @@ Close all ports or a specific port::
971
972 testpmd> port close (port_id|all)
973
974+port reset
975+~~~~~~~~~~
976+
977+Reset all ports or a specific port::
978+
979+ testpmd> port reset (port_id|all)
980+
981+User should stop port(s) before resetting and (re-)start after reset.
982+
983 port start/stop queue
984 ~~~~~~~~~~~~~~~~~~~~~
985
986@@ -2173,7 +2182,7 @@ Traffic Management
987 ------------------
988
989 The following section shows functions for configuring traffic management on
990-on the ethernet device through the use of generic TM API.
991+the ethernet device through the use of generic TM API.
992
993 show port traffic management capability
994 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
995diff --git a/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h b/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h
996index ece1a7d..4f7e49c 100644
997--- a/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h
998+++ b/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h
999@@ -63,6 +63,8 @@
1000 #define DPAA2_DQRR_RING_SIZE 16
1001 /** <Maximum number of slots available in RX ring*/
1002
1003+#define DPAA2_MAX_TX_RETRY_COUNT 10000
1004+
1005 #define MC_PORTAL_INDEX 0
1006 #define NUM_DPIO_REGIONS 2
1007 #define NUM_DQS_PER_QUEUE 2
1008diff --git a/drivers/bus/pci/Makefile b/drivers/bus/pci/Makefile
1009index f3df1c4..37663de 100644
1010--- a/drivers/bus/pci/Makefile
1011+++ b/drivers/bus/pci/Makefile
1012@@ -50,7 +50,7 @@ CFLAGS += -I$(RTE_SDK)/lib/librte_eal/common
1013 CFLAGS += -I$(RTE_SDK)/lib/librte_eal/$(SYSTEM)app/eal
1014
1015 LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
1016-LDLIBS += -lrte_ethdev -lrte_pci
1017+LDLIBS += -lrte_pci
1018
1019 include $(RTE_SDK)/drivers/bus/pci/$(SYSTEM)/Makefile
1020 SRCS-$(CONFIG_RTE_LIBRTE_PCI_BUS) := $(addprefix $(SYSTEM)/,$(SRCS))
1021diff --git a/drivers/bus/pci/linux/pci.c b/drivers/bus/pci/linux/pci.c
1022index aabaa63..a51d00a 100644
1023--- a/drivers/bus/pci/linux/pci.c
1024+++ b/drivers/bus/pci/linux/pci.c
1025@@ -593,18 +593,19 @@ pci_one_device_iommu_support_va(struct rte_pci_device *dev)
1026 "%s/" PCI_PRI_FMT "/iommu/intel-iommu/cap",
1027 rte_pci_get_sysfs_path(), addr->domain, addr->bus, addr->devid,
1028 addr->function);
1029- if (access(filename, F_OK) == -1) {
1030- /* We don't have an Intel IOMMU, assume VA supported*/
1031- return true;
1032- }
1033
1034- /* We have an intel IOMMU */
1035 fp = fopen(filename, "r");
1036 if (fp == NULL) {
1037- RTE_LOG(ERR, EAL, "%s(): can't open %s\n", __func__, filename);
1038+ /* We don't have an Intel IOMMU, assume VA supported */
1039+ if (errno == ENOENT)
1040+ return true;
1041+
1042+ RTE_LOG(ERR, EAL, "%s(): can't open %s: %s\n",
1043+ __func__, filename, strerror(errno));
1044 return false;
1045 }
1046
1047+ /* We have an Intel IOMMU */
1048 if (fscanf(fp, "%" PRIx64, &vtd_cap_reg) != 1) {
1049 RTE_LOG(ERR, EAL, "%s(): can't read %s\n", __func__, filename);
1050 fclose(fp);
1051diff --git a/drivers/bus/pci/linux/pci_uio.c b/drivers/bus/pci/linux/pci_uio.c
1052index 39176ac..2b2ad77 100644
1053--- a/drivers/bus/pci/linux/pci_uio.c
1054+++ b/drivers/bus/pci/linux/pci_uio.c
1055@@ -358,6 +358,8 @@ pci_uio_map_resource_by_index(struct rte_pci_device *dev, int res_idx,
1056 pci_map_addr = RTE_PTR_ADD(mapaddr,
1057 (size_t)dev->mem_resource[res_idx].len);
1058
1059+ pci_map_addr = RTE_PTR_ALIGN(pci_map_addr, sysconf(_SC_PAGE_SIZE));
1060+
1061 maps[map_idx].phaddr = dev->mem_resource[res_idx].phys_addr;
1062 maps[map_idx].size = dev->mem_resource[res_idx].len;
1063 maps[map_idx].addr = mapaddr;
1064diff --git a/drivers/bus/pci/linux/pci_vfio.c b/drivers/bus/pci/linux/pci_vfio.c
1065index 5093265..8b485de 100644
1066--- a/drivers/bus/pci/linux/pci_vfio.c
1067+++ b/drivers/bus/pci/linux/pci_vfio.c
1068@@ -356,7 +356,8 @@ pci_vfio_mmap_bar(int vfio_dev_fd, struct mapped_pci_resource *vfio_res,
1069 int bar_index, int additional_flags)
1070 {
1071 struct memreg {
1072- unsigned long offset, size;
1073+ uint64_t offset;
1074+ size_t size;
1075 } memreg[2] = {};
1076 void *bar_addr;
1077 struct pci_msix_table *msix_table = &vfio_res->msix_table;
1078@@ -392,7 +393,8 @@ pci_vfio_mmap_bar(int vfio_dev_fd, struct mapped_pci_resource *vfio_res,
1079 RTE_LOG(DEBUG, EAL,
1080 "Trying to map BAR%d that contains the MSI-X "
1081 "table. Trying offsets: "
1082- "0x%04lx:0x%04lx, 0x%04lx:0x%04lx\n", bar_index,
1083+ "0x%04" PRIx64 ":0x%04zx, 0x%04" PRIx64 ":0x%04zx\n",
1084+ bar_index,
1085 memreg[0].offset, memreg[0].size,
1086 memreg[1].offset, memreg[1].size);
1087 } else {
1088@@ -417,8 +419,8 @@ pci_vfio_mmap_bar(int vfio_dev_fd, struct mapped_pci_resource *vfio_res,
1089 if (map_addr != MAP_FAILED
1090 && memreg[1].offset && memreg[1].size) {
1091 void *second_addr = RTE_PTR_ADD(bar_addr,
1092- memreg[1].offset -
1093- (uintptr_t)bar->offset);
1094+ (uintptr_t)(memreg[1].offset -
1095+ bar->offset));
1096 map_addr = pci_map_resource(second_addr,
1097 vfio_dev_fd,
1098 memreg[1].offset,
1099@@ -530,6 +532,9 @@ pci_vfio_map_resource_primary(struct rte_pci_device *dev)
1100 bar_addr = pci_map_addr;
1101 pci_map_addr = RTE_PTR_ADD(bar_addr, (size_t) reg.size);
1102
1103+ pci_map_addr = RTE_PTR_ALIGN(pci_map_addr,
1104+ sysconf(_SC_PAGE_SIZE));
1105+
1106 maps[i].addr = bar_addr;
1107 maps[i].offset = reg.offset;
1108 maps[i].size = reg.size;
1109diff --git a/drivers/crypto/armv8/rte_armv8_pmd.c b/drivers/crypto/armv8/rte_armv8_pmd.c
1110index 97719f2..d757ebf 100644
1111--- a/drivers/crypto/armv8/rte_armv8_pmd.c
1112+++ b/drivers/crypto/armv8/rte_armv8_pmd.c
1113@@ -803,7 +803,6 @@ cryptodev_armv8_crypto_create(const char *name,
1114 RTE_CRYPTODEV_FF_CPU_NEON |
1115 RTE_CRYPTODEV_FF_CPU_ARM_CE;
1116
1117- /* Set vector instructions mode supported */
1118 internals = dev->data->dev_private;
1119
1120 internals->max_nb_qpairs = init_params->max_nb_queue_pairs;
1121diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
1122index 8ccb663..56259ce 100644
1123--- a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
1124+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
1125@@ -716,7 +716,7 @@ sec_simple_fd_to_mbuf(const struct qbman_fd *fd, __rte_unused uint8_t id)
1126 {
1127 struct rte_crypto_op *op;
1128 uint16_t len = DPAA2_GET_FD_LEN(fd);
1129- uint16_t diff = 0;
1130+ int16_t diff = 0;
1131 dpaa2_sec_session *sess_priv;
1132
1133 struct rte_mbuf *mbuf = DPAA2_INLINE_MBUF_FROM_BUF(
1134diff --git a/drivers/crypto/mrvl/rte_mrvl_pmd.c b/drivers/crypto/mrvl/rte_mrvl_pmd.c
1135index 31f3fe5..95276a9 100644
1136--- a/drivers/crypto/mrvl/rte_mrvl_pmd.c
1137+++ b/drivers/crypto/mrvl/rte_mrvl_pmd.c
1138@@ -743,7 +743,6 @@ cryptodev_mrvl_crypto_create(const char *name,
1139 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
1140 RTE_CRYPTODEV_FF_HW_ACCELERATED;
1141
1142- /* Set vector instructions mode supported */
1143 internals = dev->data->dev_private;
1144
1145 internals->max_nb_qpairs = init_params->max_nb_queue_pairs;
1146diff --git a/drivers/crypto/openssl/rte_openssl_pmd.c b/drivers/crypto/openssl/rte_openssl_pmd.c
1147index 06e1a6d..24304d5 100644
1148--- a/drivers/crypto/openssl/rte_openssl_pmd.c
1149+++ b/drivers/crypto/openssl/rte_openssl_pmd.c
1150@@ -1296,6 +1296,7 @@ process_openssl_combined_op
1151 int srclen, aadlen, status = -1;
1152 uint32_t offset;
1153 uint8_t taglen;
1154+ EVP_CIPHER_CTX *ctx_copy;
1155
1156 /*
1157 * Segmented destination buffer is not supported for
1158@@ -1332,6 +1333,8 @@ process_openssl_combined_op
1159 }
1160
1161 taglen = sess->auth.digest_length;
1162+ ctx_copy = EVP_CIPHER_CTX_new();
1163+ EVP_CIPHER_CTX_copy(ctx_copy, sess->cipher.ctx);
1164
1165 if (sess->cipher.direction == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
1166 if (sess->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC ||
1167@@ -1339,12 +1342,12 @@ process_openssl_combined_op
1168 status = process_openssl_auth_encryption_gcm(
1169 mbuf_src, offset, srclen,
1170 aad, aadlen, iv,
1171- dst, tag, sess->cipher.ctx);
1172+ dst, tag, ctx_copy);
1173 else
1174 status = process_openssl_auth_encryption_ccm(
1175 mbuf_src, offset, srclen,
1176 aad, aadlen, iv,
1177- dst, tag, taglen, sess->cipher.ctx);
1178+ dst, tag, taglen, ctx_copy);
1179
1180 } else {
1181 if (sess->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC ||
1182@@ -1352,14 +1355,15 @@ process_openssl_combined_op
1183 status = process_openssl_auth_decryption_gcm(
1184 mbuf_src, offset, srclen,
1185 aad, aadlen, iv,
1186- dst, tag, sess->cipher.ctx);
1187+ dst, tag, ctx_copy);
1188 else
1189 status = process_openssl_auth_decryption_ccm(
1190 mbuf_src, offset, srclen,
1191 aad, aadlen, iv,
1192- dst, tag, taglen, sess->cipher.ctx);
1193+ dst, tag, taglen, ctx_copy);
1194 }
1195
1196+ EVP_CIPHER_CTX_free(ctx_copy);
1197 if (status != 0) {
1198 if (status == (-EFAULT) &&
1199 sess->auth.operation ==
1200@@ -1378,6 +1382,7 @@ process_openssl_cipher_op
1201 {
1202 uint8_t *dst, *iv;
1203 int srclen, status;
1204+ EVP_CIPHER_CTX *ctx_copy;
1205
1206 /*
1207 * Segmented destination buffer is not supported for
1208@@ -1394,22 +1399,25 @@ process_openssl_cipher_op
1209
1210 iv = rte_crypto_op_ctod_offset(op, uint8_t *,
1211 sess->iv.offset);
1212+ ctx_copy = EVP_CIPHER_CTX_new();
1213+ EVP_CIPHER_CTX_copy(ctx_copy, sess->cipher.ctx);
1214
1215 if (sess->cipher.mode == OPENSSL_CIPHER_LIB)
1216 if (sess->cipher.direction == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
1217 status = process_openssl_cipher_encrypt(mbuf_src, dst,
1218 op->sym->cipher.data.offset, iv,
1219- srclen, sess->cipher.ctx);
1220+ srclen, ctx_copy);
1221 else
1222 status = process_openssl_cipher_decrypt(mbuf_src, dst,
1223 op->sym->cipher.data.offset, iv,
1224- srclen, sess->cipher.ctx);
1225+ srclen, ctx_copy);
1226 else
1227 status = process_openssl_cipher_des3ctr(mbuf_src, dst,
1228 op->sym->cipher.data.offset, iv,
1229 sess->cipher.key.data, srclen,
1230- sess->cipher.ctx);
1231+ ctx_copy);
1232
1233+ EVP_CIPHER_CTX_free(ctx_copy);
1234 if (status != 0)
1235 op->status = RTE_CRYPTO_OP_STATUS_ERROR;
1236 }
1237@@ -1513,6 +1521,8 @@ process_openssl_auth_op(struct openssl_qp *qp, struct rte_crypto_op *op,
1238 {
1239 uint8_t *dst;
1240 int srclen, status;
1241+ EVP_MD_CTX *ctx_a;
1242+ HMAC_CTX *ctx_h;
1243
1244 srclen = op->sym->auth.data.length;
1245
1246@@ -1528,14 +1538,20 @@ process_openssl_auth_op(struct openssl_qp *qp, struct rte_crypto_op *op,
1247
1248 switch (sess->auth.mode) {
1249 case OPENSSL_AUTH_AS_AUTH:
1250+ ctx_a = EVP_MD_CTX_create();
1251+ EVP_MD_CTX_copy_ex(ctx_a, sess->auth.auth.ctx);
1252 status = process_openssl_auth(mbuf_src, dst,
1253 op->sym->auth.data.offset, NULL, NULL, srclen,
1254- sess->auth.auth.ctx, sess->auth.auth.evp_algo);
1255+ ctx_a, sess->auth.auth.evp_algo);
1256+ EVP_MD_CTX_destroy(ctx_a);
1257 break;
1258 case OPENSSL_AUTH_AS_HMAC:
1259+ ctx_h = HMAC_CTX_new();
1260+ HMAC_CTX_copy(ctx_h, sess->auth.hmac.ctx);
1261 status = process_openssl_auth_hmac(mbuf_src, dst,
1262 op->sym->auth.data.offset, srclen,
1263- sess->auth.hmac.ctx);
1264+ ctx_h);
1265+ HMAC_CTX_free(ctx_h);
1266 break;
1267 default:
1268 status = -1;
1269@@ -1690,7 +1706,6 @@ cryptodev_openssl_create(const char *name,
1270 RTE_CRYPTODEV_FF_CPU_AESNI |
1271 RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER;
1272
1273- /* Set vector instructions mode supported */
1274 internals = dev->data->dev_private;
1275
1276 internals->max_nb_qpairs = init_params->max_nb_queue_pairs;
1277diff --git a/drivers/crypto/qat/qat_crypto.c b/drivers/crypto/qat/qat_crypto.c
1278index 85a9ba0..05b37e6 100644
1279--- a/drivers/crypto/qat/qat_crypto.c
1280+++ b/drivers/crypto/qat/qat_crypto.c
1281@@ -1370,10 +1370,8 @@ qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg,
1282
1283 }
1284 min_ofs = auth_ofs;
1285-
1286- if (likely(ctx->qat_hash_alg != ICP_QAT_HW_AUTH_ALGO_NULL))
1287- auth_param->auth_res_addr =
1288- op->sym->auth.digest.phys_addr;
1289+ auth_param->auth_res_addr =
1290+ op->sym->auth.digest.phys_addr;
1291
1292 }
1293
1294diff --git a/drivers/crypto/qat/qat_crypto_capabilities.h b/drivers/crypto/qat/qat_crypto_capabilities.h
1295index 89ba27d..9e8bc41 100644
1296--- a/drivers/crypto/qat/qat_crypto_capabilities.h
1297+++ b/drivers/crypto/qat/qat_crypto_capabilities.h
1298@@ -174,8 +174,8 @@
1299 .increment = 0 \
1300 }, \
1301 .digest_size = { \
1302- .min = 16, \
1303- .max = 16, \
1304+ .min = 12, \
1305+ .max = 12, \
1306 .increment = 0 \
1307 }, \
1308 .aad_size = { 0 }, \
1309diff --git a/drivers/event/dpaa2/dpaa2_eventdev.c b/drivers/event/dpaa2/dpaa2_eventdev.c
1310index 56ea124..bf1b493 100644
1311--- a/drivers/event/dpaa2/dpaa2_eventdev.c
1312+++ b/drivers/event/dpaa2/dpaa2_eventdev.c
1313@@ -1,7 +1,7 @@
1314 /*-
1315 * BSD LICENSE
1316 *
1317- * Copyright 2017 NXP.
1318+ * Copyright 2017,2019 NXP.
1319 *
1320 * Redistribution and use in source and binary forms, with or without
1321 * modification, are permitted provided that the following conditions
1322@@ -395,8 +395,7 @@ dpaa2_eventdev_queue_def_conf(struct rte_eventdev *dev, uint8_t queue_id,
1323 RTE_SET_USED(queue_conf);
1324
1325 queue_conf->nb_atomic_flows = DPAA2_EVENT_QUEUE_ATOMIC_FLOWS;
1326- queue_conf->schedule_type = RTE_SCHED_TYPE_ATOMIC |
1327- RTE_SCHED_TYPE_PARALLEL;
1328+ queue_conf->schedule_type = RTE_SCHED_TYPE_PARALLEL;
1329 queue_conf->priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
1330 }
1331
1332diff --git a/drivers/event/octeontx/Makefile b/drivers/event/octeontx/Makefile
1333index 2604412..2c029cb 100644
1334--- a/drivers/event/octeontx/Makefile
1335+++ b/drivers/event/octeontx/Makefile
1336@@ -42,6 +42,7 @@ CFLAGS += -I$(RTE_SDK)/drivers/mempool/octeontx/
1337 CFLAGS += -I$(RTE_SDK)/drivers/net/octeontx/
1338
1339 LDLIBS += -lrte_eal -lrte_eventdev -lrte_mempool_octeontx -lrte_pmd_octeontx
1340+LDLIBS += -lrte_mempool
1341 LDLIBS += -lrte_bus_pci
1342 LDLIBS += -lrte_bus_vdev
1343
1344diff --git a/drivers/event/octeontx/ssovf_worker.h b/drivers/event/octeontx/ssovf_worker.h
1345index 4c9a4c4..7e7d0a7 100644
1346--- a/drivers/event/octeontx/ssovf_worker.h
1347+++ b/drivers/event/octeontx/ssovf_worker.h
1348@@ -60,8 +60,7 @@ ssovf_octeontx_wqe_to_pkt(uint64_t work, uint16_t port_info)
1349 rte_prefetch_non_temporal(wqe);
1350
1351 /* Get mbuf from wqe */
1352- mbuf = (struct rte_mbuf *)((uintptr_t)wqe -
1353- OCTTX_PACKET_WQE_SKIP);
1354+ mbuf = (struct rte_mbuf *)((uintptr_t)wqe - OCTTX_PACKET_WQE_SKIP);
1355 mbuf->packet_type =
1356 ptype_table[wqe->s.w2.lcty][wqe->s.w2.lety][wqe->s.w2.lfty];
1357 mbuf->data_off = RTE_PTR_DIFF(wqe->s.w3.addr, mbuf->buf_addr);
1358@@ -74,6 +73,16 @@ ssovf_octeontx_wqe_to_pkt(uint64_t work, uint16_t port_info)
1359 return mbuf;
1360 }
1361
1362+static __rte_always_inline void
1363+ssovf_octeontx_wqe_free(uint64_t work)
1364+{
1365+ octtx_wqe_t *wqe = (octtx_wqe_t *)(uintptr_t)work;
1366+ struct rte_mbuf *mbuf;
1367+
1368+ mbuf = (struct rte_mbuf *)((uintptr_t)wqe - OCTTX_PACKET_WQE_SKIP);
1369+ rte_pktmbuf_free(mbuf);
1370+}
1371+
1372 static __rte_always_inline uint16_t
1373 ssows_get_work(struct ssows *ws, struct rte_event *ev)
1374 {
1375@@ -87,9 +96,13 @@ ssows_get_work(struct ssows *ws, struct rte_event *ev)
1376 ws->cur_grp = sched_type_queue >> 2;
1377 sched_type_queue = sched_type_queue << 38;
1378 ev->event = sched_type_queue | (get_work0 & 0xffffffff);
1379+
1380 if (get_work1 && ev->event_type == RTE_EVENT_TYPE_ETHDEV) {
1381 ev->mbuf = ssovf_octeontx_wqe_to_pkt(get_work1,
1382 (ev->event >> 20) & 0x7F);
1383+ } else if (unlikely((get_work0 & 0xFFFFFFFF) == 0xFFFFFFFF)) {
1384+ ssovf_octeontx_wqe_free(get_work1);
1385+ return 0;
1386 } else {
1387 ev->u64 = get_work1;
1388 }
1389diff --git a/drivers/event/sw/sw_evdev_xstats.c b/drivers/event/sw/sw_evdev_xstats.c
1390index 61a5c33..bed222f 100644
1391--- a/drivers/event/sw/sw_evdev_xstats.c
1392+++ b/drivers/event/sw/sw_evdev_xstats.c
1393@@ -521,7 +521,7 @@ sw_xstats_update(struct sw_evdev *sw, enum rte_event_dev_xstats_mode mode,
1394 values[xidx] = val;
1395
1396 if (xs->reset_allowed && reset)
1397- xs->reset_value = val;
1398+ xs->reset_value += val;
1399
1400 xidx++;
1401 }
1402@@ -574,8 +574,7 @@ sw_xstats_reset_range(struct sw_evdev *sw, uint32_t start, uint32_t num)
1403 if (!xs->reset_allowed)
1404 continue;
1405
1406- uint64_t val = xs->fn(sw, xs->obj_idx, xs->stat, xs->extra_arg)
1407- - xs->reset_value;
1408+ uint64_t val = xs->fn(sw, xs->obj_idx, xs->stat, xs->extra_arg);
1409 xs->reset_value = val;
1410 }
1411 }
1412diff --git a/drivers/mempool/dpaa2/dpaa2_hw_mempool.c b/drivers/mempool/dpaa2/dpaa2_hw_mempool.c
1413index 8bcbaa8..81b7e85 100644
1414--- a/drivers/mempool/dpaa2/dpaa2_hw_mempool.c
1415+++ b/drivers/mempool/dpaa2/dpaa2_hw_mempool.c
1416@@ -200,7 +200,7 @@ rte_dpaa2_mbuf_release(struct rte_mempool *pool __rte_unused,
1417 struct qbman_release_desc releasedesc;
1418 struct qbman_swp *swp;
1419 int ret;
1420- int i, n;
1421+ int i, n, retry_count;
1422 uint64_t bufs[DPAA2_MBUF_MAX_ACQ_REL];
1423
1424 if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
1425@@ -233,9 +233,15 @@ rte_dpaa2_mbuf_release(struct rte_mempool *pool __rte_unused,
1426 }
1427
1428 /* feed them to bman */
1429- do {
1430- ret = qbman_swp_release(swp, &releasedesc, bufs, n);
1431- } while (ret == -EBUSY);
1432+ retry_count = 0;
1433+ while ((ret = qbman_swp_release(swp, &releasedesc, bufs, n)) ==
1434+ -EBUSY) {
1435+ retry_count++;
1436+ if (retry_count > DPAA2_MAX_TX_RETRY_COUNT) {
1437+ DPAA2_MEMPOOL_ERR("bman release retry exceeded, low fbpr?");
1438+ return;
1439+ }
1440+ }
1441
1442 aligned:
1443 /* if there are more buffers to free */
1444@@ -251,10 +257,15 @@ aligned:
1445 #endif
1446 }
1447
1448- do {
1449- ret = qbman_swp_release(swp, &releasedesc, bufs,
1450- DPAA2_MBUF_MAX_ACQ_REL);
1451- } while (ret == -EBUSY);
1452+ retry_count = 0;
1453+ while ((ret = qbman_swp_release(swp, &releasedesc, bufs,
1454+ DPAA2_MBUF_MAX_ACQ_REL)) == -EBUSY) {
1455+ retry_count++;
1456+ if (retry_count > DPAA2_MAX_TX_RETRY_COUNT) {
1457+ DPAA2_MEMPOOL_ERR("bman release retry exceeded, low fbpr?");
1458+ return;
1459+ }
1460+ }
1461 n += DPAA2_MBUF_MAX_ACQ_REL;
1462 }
1463 }
1464diff --git a/drivers/net/af_packet/rte_eth_af_packet.c b/drivers/net/af_packet/rte_eth_af_packet.c
1465index 5cb348f..ae9b0e2 100644
1466--- a/drivers/net/af_packet/rte_eth_af_packet.c
1467+++ b/drivers/net/af_packet/rte_eth_af_packet.c
1468@@ -263,8 +263,14 @@ eth_af_packet_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
1469 }
1470
1471 /* kick-off transmits */
1472- if (sendto(pkt_q->sockfd, NULL, 0, MSG_DONTWAIT, NULL, 0) == -1) {
1473- /* error sending -- no packets transmitted */
1474+ if (sendto(pkt_q->sockfd, NULL, 0, MSG_DONTWAIT, NULL, 0) == -1 &&
1475+ errno != ENOBUFS && errno != EAGAIN) {
1476+ /*
1477+ * In case of a ENOBUFS/EAGAIN error all of the enqueued
1478+ * packets will be considered successful even though only some
1479+ * are sent.
1480+ */
1481+
1482 num_tx = 0;
1483 num_tx_bytes = 0;
1484 }
1485@@ -982,6 +988,7 @@ rte_pmd_af_packet_remove(struct rte_vdev_device *dev)
1486 {
1487 struct rte_eth_dev *eth_dev = NULL;
1488 struct pmd_internals *internals;
1489+ struct tpacket_req *req;
1490 unsigned q;
1491
1492 RTE_LOG(INFO, PMD, "Closing AF_PACKET ethdev on numa socket %u\n",
1493@@ -996,7 +1003,10 @@ rte_pmd_af_packet_remove(struct rte_vdev_device *dev)
1494 return -1;
1495
1496 internals = eth_dev->data->dev_private;
1497+ req = &internals->req;
1498 for (q = 0; q < internals->nb_queues; q++) {
1499+ munmap(internals->rx_queue[q].map,
1500+ 2 * req->tp_block_size * req->tp_block_nr);
1501 rte_free(internals->rx_queue[q].rd);
1502 rte_free(internals->tx_queue[q].rd);
1503 }
1504diff --git a/drivers/net/bnxt/bnxt.h b/drivers/net/bnxt/bnxt.h
1505index 3bc2b93..3c29ee5 100644
1506--- a/drivers/net/bnxt/bnxt.h
1507+++ b/drivers/net/bnxt/bnxt.h
1508@@ -47,6 +47,46 @@
1509
1510 #include "bnxt_cpr.h"
1511
1512+/* Vendor ID */
1513+#define PCI_VENDOR_ID_BROADCOM 0x14E4
1514+
1515+/* Device IDs */
1516+#define BROADCOM_DEV_ID_STRATUS_NIC_VF 0x1609
1517+#define BROADCOM_DEV_ID_STRATUS_NIC 0x1614
1518+#define BROADCOM_DEV_ID_57414_VF 0x16c1
1519+#define BROADCOM_DEV_ID_57301 0x16c8
1520+#define BROADCOM_DEV_ID_57302 0x16c9
1521+#define BROADCOM_DEV_ID_57304_PF 0x16ca
1522+#define BROADCOM_DEV_ID_57304_VF 0x16cb
1523+#define BROADCOM_DEV_ID_57417_MF 0x16cc
1524+#define BROADCOM_DEV_ID_NS2 0x16cd
1525+#define BROADCOM_DEV_ID_57311 0x16ce
1526+#define BROADCOM_DEV_ID_57312 0x16cf
1527+#define BROADCOM_DEV_ID_57402 0x16d0
1528+#define BROADCOM_DEV_ID_57404 0x16d1
1529+#define BROADCOM_DEV_ID_57406_PF 0x16d2
1530+#define BROADCOM_DEV_ID_57406_VF 0x16d3
1531+#define BROADCOM_DEV_ID_57402_MF 0x16d4
1532+#define BROADCOM_DEV_ID_57407_RJ45 0x16d5
1533+#define BROADCOM_DEV_ID_57412 0x16d6
1534+#define BROADCOM_DEV_ID_57414 0x16d7
1535+#define BROADCOM_DEV_ID_57416_RJ45 0x16d8
1536+#define BROADCOM_DEV_ID_57417_RJ45 0x16d9
1537+#define BROADCOM_DEV_ID_5741X_VF 0x16dc
1538+#define BROADCOM_DEV_ID_57412_MF 0x16de
1539+#define BROADCOM_DEV_ID_57314 0x16df
1540+#define BROADCOM_DEV_ID_57317_RJ45 0x16e0
1541+#define BROADCOM_DEV_ID_5731X_VF 0x16e1
1542+#define BROADCOM_DEV_ID_57417_SFP 0x16e2
1543+#define BROADCOM_DEV_ID_57416_SFP 0x16e3
1544+#define BROADCOM_DEV_ID_57317_SFP 0x16e4
1545+#define BROADCOM_DEV_ID_57404_MF 0x16e7
1546+#define BROADCOM_DEV_ID_57406_MF 0x16e8
1547+#define BROADCOM_DEV_ID_57407_SFP 0x16e9
1548+#define BROADCOM_DEV_ID_57407_MF 0x16ea
1549+#define BROADCOM_DEV_ID_57414_MF 0x16ec
1550+#define BROADCOM_DEV_ID_57416_MF 0x16ee
1551+
1552 #define BNXT_MAX_MTU 9500
1553 #define VLAN_TAG_SIZE 4
1554 #define BNXT_MAX_LED 4
1555@@ -246,6 +286,11 @@ struct bnxt {
1556 uint16_t max_req_len;
1557 uint16_t max_resp_len;
1558
1559+ /* default command timeout value of 50ms */
1560+#define HWRM_CMD_TIMEOUT 50000
1561+ /* default HWRM request timeout value */
1562+ uint32_t hwrm_cmd_timeout;
1563+
1564 struct bnxt_link_info link_info;
1565 struct bnxt_cos_queue_info cos_queue[BNXT_COS_QUEUE_COUNT];
1566
1567diff --git a/drivers/net/bnxt/bnxt_cpr.c b/drivers/net/bnxt/bnxt_cpr.c
1568index cde8adc..f3f0805 100644
1569--- a/drivers/net/bnxt/bnxt_cpr.c
1570+++ b/drivers/net/bnxt/bnxt_cpr.c
1571@@ -34,7 +34,6 @@
1572 #include <rte_malloc.h>
1573
1574 #include "bnxt.h"
1575-#include "bnxt_cpr.h"
1576 #include "bnxt_hwrm.h"
1577 #include "bnxt_ring.h"
1578 #include "hsi_struct_def_dpdk.h"
1579@@ -49,13 +48,11 @@ void bnxt_handle_async_event(struct bnxt *bp,
1580 (struct hwrm_async_event_cmpl *)cmp;
1581 uint16_t event_id = rte_le_to_cpu_16(async_cmp->event_id);
1582
1583- /* TODO: HWRM async events are not defined yet */
1584- /* Needs to handle: link events, error events, etc. */
1585 switch (event_id) {
1586 case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE:
1587 case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE:
1588 case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE:
1589- bnxt_link_update_op(bp->eth_dev, 1);
1590+ bnxt_link_update_op(bp->eth_dev, 0);
1591 break;
1592 default:
1593 RTE_LOG(DEBUG, PMD, "handle_async_event id = 0x%x\n", event_id);
1594diff --git a/drivers/net/bnxt/bnxt_ethdev.c b/drivers/net/bnxt/bnxt_ethdev.c
1595index bc7b82f..e73a683 100644
1596--- a/drivers/net/bnxt/bnxt_ethdev.c
1597+++ b/drivers/net/bnxt/bnxt_ethdev.c
1598@@ -41,7 +41,6 @@
1599 #include <rte_cycles.h>
1600
1601 #include "bnxt.h"
1602-#include "bnxt_cpr.h"
1603 #include "bnxt_filter.h"
1604 #include "bnxt_hwrm.h"
1605 #include "bnxt_irq.h"
1606@@ -59,44 +58,6 @@
1607 static const char bnxt_version[] =
1608 "Broadcom Cumulus driver " DRV_MODULE_NAME "\n";
1609
1610-#define PCI_VENDOR_ID_BROADCOM 0x14E4
1611-
1612-#define BROADCOM_DEV_ID_STRATUS_NIC_VF 0x1609
1613-#define BROADCOM_DEV_ID_STRATUS_NIC 0x1614
1614-#define BROADCOM_DEV_ID_57414_VF 0x16c1
1615-#define BROADCOM_DEV_ID_57301 0x16c8
1616-#define BROADCOM_DEV_ID_57302 0x16c9
1617-#define BROADCOM_DEV_ID_57304_PF 0x16ca
1618-#define BROADCOM_DEV_ID_57304_VF 0x16cb
1619-#define BROADCOM_DEV_ID_57417_MF 0x16cc
1620-#define BROADCOM_DEV_ID_NS2 0x16cd
1621-#define BROADCOM_DEV_ID_57311 0x16ce
1622-#define BROADCOM_DEV_ID_57312 0x16cf
1623-#define BROADCOM_DEV_ID_57402 0x16d0
1624-#define BROADCOM_DEV_ID_57404 0x16d1
1625-#define BROADCOM_DEV_ID_57406_PF 0x16d2
1626-#define BROADCOM_DEV_ID_57406_VF 0x16d3
1627-#define BROADCOM_DEV_ID_57402_MF 0x16d4
1628-#define BROADCOM_DEV_ID_57407_RJ45 0x16d5
1629-#define BROADCOM_DEV_ID_57412 0x16d6
1630-#define BROADCOM_DEV_ID_57414 0x16d7
1631-#define BROADCOM_DEV_ID_57416_RJ45 0x16d8
1632-#define BROADCOM_DEV_ID_57417_RJ45 0x16d9
1633-#define BROADCOM_DEV_ID_5741X_VF 0x16dc
1634-#define BROADCOM_DEV_ID_57412_MF 0x16de
1635-#define BROADCOM_DEV_ID_57314 0x16df
1636-#define BROADCOM_DEV_ID_57317_RJ45 0x16e0
1637-#define BROADCOM_DEV_ID_5731X_VF 0x16e1
1638-#define BROADCOM_DEV_ID_57417_SFP 0x16e2
1639-#define BROADCOM_DEV_ID_57416_SFP 0x16e3
1640-#define BROADCOM_DEV_ID_57317_SFP 0x16e4
1641-#define BROADCOM_DEV_ID_57404_MF 0x16e7
1642-#define BROADCOM_DEV_ID_57406_MF 0x16e8
1643-#define BROADCOM_DEV_ID_57407_SFP 0x16e9
1644-#define BROADCOM_DEV_ID_57407_MF 0x16ea
1645-#define BROADCOM_DEV_ID_57414_MF 0x16ec
1646-#define BROADCOM_DEV_ID_57416_MF 0x16ee
1647-
1648 static const struct rte_pci_id bnxt_pci_id_map[] = {
1649 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM,
1650 BROADCOM_DEV_ID_STRATUS_NIC_VF) },
1651@@ -164,6 +125,9 @@ static void bnxt_free_mem(struct bnxt *bp)
1652 bnxt_free_tx_rings(bp);
1653 bnxt_free_rx_rings(bp);
1654 bnxt_free_def_cp_ring(bp);
1655+
1656+ rte_free(bp->grp_info);
1657+ bp->grp_info = NULL;
1658 }
1659
1660 static int bnxt_alloc_mem(struct bnxt *bp)
1661@@ -433,6 +397,7 @@ static int bnxt_init_nic(struct bnxt *bp)
1662 static void bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev,
1663 struct rte_eth_dev_info *dev_info)
1664 {
1665+ struct rte_pci_device *pdev = RTE_DEV_TO_PCI(eth_dev->device);
1666 struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
1667 uint16_t max_vnics, i, j, vpool, vrxq;
1668 unsigned int max_rx_rings;
1669@@ -445,7 +410,7 @@ static void bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev,
1670
1671 /* PF/VF specifics */
1672 if (BNXT_PF(bp))
1673- dev_info->max_vfs = bp->pdev->max_vfs;
1674+ dev_info->max_vfs = pdev->max_vfs;
1675 max_rx_rings = RTE_MIN(bp->max_vnics, RTE_MIN(bp->max_l2_ctx,
1676 RTE_MIN(bp->max_rsscos_ctx,
1677 bp->max_stat_ctx)));
1678@@ -592,6 +557,8 @@ static int bnxt_dev_start_op(struct rte_eth_dev *eth_dev)
1679 }
1680 bp->dev_stopped = 0;
1681
1682+ bnxt_enable_int(bp);
1683+
1684 rc = bnxt_init_chip(bp);
1685 if (rc)
1686 goto error;
1687@@ -626,7 +593,7 @@ static int bnxt_dev_set_link_up_op(struct rte_eth_dev *eth_dev)
1688 eth_dev->data->dev_link.link_status = 1;
1689
1690 bnxt_print_link_info(eth_dev);
1691- return 0;
1692+ return rc;
1693 }
1694
1695 static int bnxt_dev_set_link_down_op(struct rte_eth_dev *eth_dev)
1696@@ -644,15 +611,29 @@ static int bnxt_dev_set_link_down_op(struct rte_eth_dev *eth_dev)
1697 static void bnxt_dev_stop_op(struct rte_eth_dev *eth_dev)
1698 {
1699 struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
1700+ struct rte_intr_handle *intr_handle
1701+ = &bp->pdev->intr_handle;
1702
1703 if (bp->eth_dev->data->dev_started) {
1704 /* TBD: STOP HW queues DMA */
1705 eth_dev->data->dev_link.link_status = 0;
1706 }
1707- bnxt_set_hwrm_link_config(bp, false);
1708+ bnxt_dev_set_link_down_op(eth_dev);
1709+ /* Wait for link to be reset and the async notification to process. */
1710+ rte_delay_ms(BNXT_LINK_WAIT_INTERVAL * 2);
1711+
1712+ /* Clean queue intr-vector mapping */
1713+ rte_intr_efd_disable(intr_handle);
1714+ if (intr_handle->intr_vec != NULL) {
1715+ rte_free(intr_handle->intr_vec);
1716+ intr_handle->intr_vec = NULL;
1717+ }
1718+
1719 bnxt_hwrm_port_clr_stats(bp);
1720 bnxt_free_tx_mbufs(bp);
1721 bnxt_free_rx_mbufs(bp);
1722+ /* Process any remaining notifications in default completion queue */
1723+ bnxt_int_handler(eth_dev);
1724 bnxt_shutdown_nic(bp);
1725 bp->dev_stopped = 1;
1726 }
1727@@ -734,7 +715,7 @@ static int bnxt_mac_addr_add_op(struct rte_eth_dev *eth_dev,
1728 /* Attach requested MAC address to the new l2_filter */
1729 STAILQ_FOREACH(filter, &vnic->filter, next) {
1730 if (filter->mac_index == index) {
1731- RTE_LOG(ERR, PMD,
1732+ RTE_LOG(DEBUG, PMD,
1733 "MAC addr already existed for pool %d\n", pool);
1734 return 0;
1735 }
1736@@ -1429,15 +1410,18 @@ bnxt_set_default_mac_addr_op(struct rte_eth_dev *dev, struct ether_addr *addr)
1737 rc = bnxt_hwrm_clear_l2_filter(bp, filter);
1738 if (rc)
1739 break;
1740- memcpy(filter->l2_addr, bp->mac_addr, ETHER_ADDR_LEN);
1741+ memcpy(filter->l2_addr, addr, ETHER_ADDR_LEN);
1742 memset(filter->l2_addr_mask, 0xff, ETHER_ADDR_LEN);
1743 filter->flags |= HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX;
1744 filter->enables |=
1745 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR |
1746 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK;
1747 rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id, filter);
1748- if (rc)
1749+ if (rc) {
1750+ memcpy(filter->l2_addr, bp->mac_addr,
1751+ ETHER_ADDR_LEN);
1752 break;
1753+ }
1754 filter->mac_index = 0;
1755 RTE_LOG(DEBUG, PMD, "Set MAC addr\n");
1756 }
1757@@ -1468,6 +1452,10 @@ bnxt_dev_set_mc_addr_list_op(struct rte_eth_dev *eth_dev,
1758 }
1759
1760 vnic->mc_addr_cnt = i;
1761+ if (vnic->mc_addr_cnt)
1762+ vnic->flags |= BNXT_VNIC_INFO_MCAST;
1763+ else
1764+ vnic->flags &= ~BNXT_VNIC_INFO_MCAST;
1765
1766 allmulti:
1767 return bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
1768@@ -1628,39 +1616,22 @@ bnxt_rx_queue_count_op(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1769 struct bnxt_cp_ring_info *cpr;
1770 struct bnxt_rx_queue *rxq;
1771 struct rx_pkt_cmpl *rxcmp;
1772- uint16_t cmp_type;
1773- uint8_t cmp = 1;
1774- bool valid;
1775
1776 rxq = dev->data->rx_queues[rx_queue_id];
1777 cpr = rxq->cp_ring;
1778- valid = cpr->valid;
1779+ raw_cons = cpr->cp_raw_cons;
1780
1781- while (raw_cons < rxq->nb_rx_desc) {
1782+ while (1) {
1783 cons = RING_CMP(cpr->cp_ring_struct, raw_cons);
1784+ rte_prefetch0(&cpr->cp_desc_ring[cons]);
1785 rxcmp = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[cons];
1786
1787- if (!CMPL_VALID(rxcmp, valid))
1788- goto nothing_to_do;
1789- valid = FLIP_VALID(cons, cpr->cp_ring_struct->ring_mask, valid);
1790- cmp_type = CMP_TYPE(rxcmp);
1791- if (cmp_type == RX_TPA_END_CMPL_TYPE_RX_TPA_END) {
1792- cmp = (rte_le_to_cpu_32(
1793- ((struct rx_tpa_end_cmpl *)
1794- (rxcmp))->agg_bufs_v1) &
1795- RX_TPA_END_CMPL_AGG_BUFS_MASK) >>
1796- RX_TPA_END_CMPL_AGG_BUFS_SFT;
1797- desc++;
1798- } else if (cmp_type == 0x11) {
1799- desc++;
1800- cmp = (rxcmp->agg_bufs_v1 &
1801- RX_PKT_CMPL_AGG_BUFS_MASK) >>
1802- RX_PKT_CMPL_AGG_BUFS_SFT;
1803+ if (!CMP_VALID(rxcmp, raw_cons, cpr->cp_ring_struct)) {
1804+ break;
1805 } else {
1806- cmp = 1;
1807+ raw_cons++;
1808+ desc++;
1809 }
1810-nothing_to_do:
1811- raw_cons += cmp ? cmp : 2;
1812 }
1813
1814 return desc;
1815@@ -1973,9 +1944,6 @@ parse_ntuple_filter(struct bnxt *bp,
1816 return -EINVAL;
1817 }
1818
1819- //TODO Priority
1820- //nfilter->priority = (uint8_t)filter->priority;
1821-
1822 bfilter->enables = en;
1823 return 0;
1824 }
1825@@ -2342,7 +2310,6 @@ bnxt_parse_fdir_filter(struct bnxt *bp,
1826 return -EINVAL;
1827 }
1828
1829-
1830 if (fdir_mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
1831 rte_memcpy(filter->dst_macaddr,
1832 fdir->input.flow.mac_vlan_flow.mac_addr.addr_bytes, 6);
1833@@ -2846,6 +2813,13 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev)
1834 if (version_printed++ == 0)
1835 RTE_LOG(INFO, PMD, "%s\n", bnxt_version);
1836
1837+ eth_dev->dev_ops = &bnxt_dev_ops;
1838+ eth_dev->rx_pkt_burst = &bnxt_recv_pkts;
1839+ eth_dev->tx_pkt_burst = &bnxt_xmit_pkts;
1840+
1841+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1842+ return 0;
1843+
1844 rte_eth_copy_pci_info(eth_dev, pci_dev);
1845
1846 bp = eth_dev->data->dev_private;
1847@@ -2853,9 +2827,6 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev)
1848 rte_atomic64_init(&bp->rx_mbuf_alloc_fail);
1849 bp->dev_stopped = 1;
1850
1851- if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1852- goto skip_init;
1853-
1854 if (bnxt_vf_pciid(pci_dev->id.device_id))
1855 bp->flags |= BNXT_FLAG_VF;
1856
1857@@ -2865,12 +2836,6 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev)
1858 "Board initialization failed rc: %x\n", rc);
1859 goto error;
1860 }
1861-skip_init:
1862- eth_dev->dev_ops = &bnxt_dev_ops;
1863- if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1864- return 0;
1865- eth_dev->rx_pkt_burst = &bnxt_recv_pkts;
1866- eth_dev->tx_pkt_burst = &bnxt_xmit_pkts;
1867
1868 if (BNXT_PF(bp) && pci_dev->id.device_id != BROADCOM_DEV_ID_NS2) {
1869 snprintf(mz_name, RTE_MEMZONE_NAMESIZE,
1870@@ -3114,10 +3079,6 @@ bnxt_dev_uninit(struct rte_eth_dev *eth_dev) {
1871 rte_free(eth_dev->data->mac_addrs);
1872 eth_dev->data->mac_addrs = NULL;
1873 }
1874- if (bp->grp_info != NULL) {
1875- rte_free(bp->grp_info);
1876- bp->grp_info = NULL;
1877- }
1878 rc = bnxt_hwrm_func_driver_unregister(bp, 0);
1879 bnxt_free_hwrm_resources(bp);
1880 rte_memzone_free((const struct rte_memzone *)bp->tx_mem_zone);
1881diff --git a/drivers/net/bnxt/bnxt_hwrm.c b/drivers/net/bnxt/bnxt_hwrm.c
1882index 815bad9..0636c66 100644
1883--- a/drivers/net/bnxt/bnxt_hwrm.c
1884+++ b/drivers/net/bnxt/bnxt_hwrm.c
1885@@ -39,9 +39,9 @@
1886 #include <rte_malloc.h>
1887 #include <rte_memzone.h>
1888 #include <rte_version.h>
1889+#include <rte_io.h>
1890
1891 #include "bnxt.h"
1892-#include "bnxt_cpr.h"
1893 #include "bnxt_filter.h"
1894 #include "bnxt_hwrm.h"
1895 #include "bnxt_rxq.h"
1896@@ -52,10 +52,6 @@
1897 #include "bnxt_vnic.h"
1898 #include "hsi_struct_def_dpdk.h"
1899
1900-#include <rte_io.h>
1901-
1902-#define HWRM_CMD_TIMEOUT 10000
1903-
1904 struct bnxt_plcmodes_cfg {
1905 uint32_t flags;
1906 uint16_t jumbo_thresh;
1907@@ -90,9 +86,9 @@ static int page_roundup(size_t size)
1908
1909 /*
1910 * HWRM Functions (sent to HWRM)
1911- * These are named bnxt_hwrm_*() and return -1 if bnxt_hwrm_send_message()
1912- * fails (ie: a timeout), and a positive non-zero HWRM error code if the HWRM
1913- * command was failed by the ChiMP.
1914+ * These are named bnxt_hwrm_*() and return 0 on success or -110 if the
1915+ * HWRM command times out, or a negative error code if the HWRM
1916+ * command was failed by the FW.
1917 */
1918
1919 static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg,
1920@@ -123,9 +119,6 @@ static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg,
1921 data = (uint32_t *)&short_input;
1922 msg_len = sizeof(short_input);
1923
1924- /* Sync memory write before updating doorbell */
1925- rte_wmb();
1926-
1927 max_req_len = BNXT_HWRM_SHORT_REQ_LEN;
1928 }
1929
1930@@ -145,6 +138,12 @@ static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg,
1931 /* Ring channel doorbell */
1932 bar = (uint8_t *)bp->bar0 + 0x100;
1933 rte_write32(1, bar);
1934+ /*
1935+ * Make sure the channel doorbell ring command complete before
1936+ * reading the response to avoid getting stale or invalid
1937+ * responses.
1938+ */
1939+ rte_io_mb();
1940
1941 /* Poll for the valid bit */
1942 for (i = 0; i < HWRM_CMD_TIMEOUT; i++) {
1943@@ -172,11 +171,11 @@ err_ret:
1944 }
1945
1946 /*
1947- * HWRM_PREP() should be used to prepare *ALL* HWRM commands. It grabs the
1948+ * HWRM_PREP() should be used to prepare *ALL* HWRM commands. It grabs the
1949 * spinlock, and does initial processing.
1950 *
1951 * HWRM_CHECK_RESULT() returns errors on failure and may not be used. It
1952- * releases the spinlock only if it returns. If the regular int return codes
1953+ * releases the spinlock only if it returns. If the regular int return codes
1954 * are not used by the function, HWRM_CHECK_RESULT() should not be used
1955 * directly, rather it should be copied and modified to suit the function.
1956 *
1957@@ -266,20 +265,17 @@ int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp,
1958 HWRM_PREP(req, CFA_L2_SET_RX_MASK);
1959 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1960
1961- /* FIXME add multicast flag, when multicast adding options is supported
1962- * by ethtool.
1963- */
1964 if (vnic->flags & BNXT_VNIC_INFO_BCAST)
1965 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST;
1966 if (vnic->flags & BNXT_VNIC_INFO_UNTAGGED)
1967 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN;
1968+
1969 if (vnic->flags & BNXT_VNIC_INFO_PROMISC)
1970 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS;
1971- if (vnic->flags & BNXT_VNIC_INFO_ALLMULTI)
1972+
1973+ if (vnic->flags & BNXT_VNIC_INFO_ALLMULTI) {
1974 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
1975- if (vnic->flags & BNXT_VNIC_INFO_MCAST)
1976- mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST;
1977- if (vnic->mc_addr_cnt) {
1978+ } else if (vnic->flags & BNXT_VNIC_INFO_MCAST) {
1979 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST;
1980 req.num_mc_entries = rte_cpu_to_le_32(vnic->mc_addr_cnt);
1981 req.mc_tbl_addr = rte_cpu_to_le_64(vnic->mc_list_dma_addr);
1982@@ -614,6 +610,13 @@ int bnxt_hwrm_ver_get(struct bnxt *bp)
1983 fw_version |= resp->hwrm_intf_min << 8;
1984 fw_version |= resp->hwrm_intf_upd;
1985
1986+ /* def_req_timeout value is in milliseconds */
1987+ bp->hwrm_cmd_timeout = rte_le_to_cpu_16(resp->def_req_timeout);
1988+ /* convert timeout to usec */
1989+ bp->hwrm_cmd_timeout *= 1000;
1990+ if (!bp->hwrm_cmd_timeout)
1991+ bp->hwrm_cmd_timeout = HWRM_CMD_TIMEOUT;
1992+
1993 if (resp->hwrm_intf_maj != HWRM_VERSION_MAJOR) {
1994 RTE_LOG(ERR, PMD, "Unsupported firmware API version\n");
1995 rc = -EINVAL;
1996@@ -1001,8 +1004,7 @@ int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp, unsigned int idx)
1997
1998 HWRM_CHECK_RESULT();
1999
2000- bp->grp_info[idx].fw_grp_id =
2001- rte_le_to_cpu_16(resp->ring_group_id);
2002+ bp->grp_info[idx].fw_grp_id = rte_le_to_cpu_16(resp->ring_group_id);
2003
2004 HWRM_UNLOCK();
2005
2006@@ -1060,8 +1062,7 @@ int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
2007
2008 req.update_period_ms = rte_cpu_to_le_32(0);
2009
2010- req.stats_dma_addr =
2011- rte_cpu_to_le_64(cpr->hw_stats_map);
2012+ req.stats_dma_addr = rte_cpu_to_le_64(cpr->hw_stats_map);
2013
2014 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2015
2016@@ -1562,10 +1563,6 @@ int bnxt_hwrm_func_clr_stats(struct bnxt *bp, uint16_t fid)
2017 return rc;
2018 }
2019
2020-/*
2021- * HWRM utility functions
2022- */
2023-
2024 int bnxt_clear_all_hwrm_stat_ctxs(struct bnxt *bp)
2025 {
2026 unsigned int i;
2027@@ -1767,6 +1764,10 @@ int bnxt_alloc_all_hwrm_ring_grps(struct bnxt *bp)
2028 return rc;
2029 }
2030
2031+/*
2032+ * HWRM utility functions
2033+ */
2034+
2035 void bnxt_free_hwrm_resources(struct bnxt *bp)
2036 {
2037 /* Release memzone */
2038@@ -1815,8 +1816,6 @@ int bnxt_clear_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2039 else
2040 rc = bnxt_hwrm_clear_l2_filter(bp, filter);
2041 STAILQ_REMOVE(&vnic->filter, filter, bnxt_filter_info, next);
2042- //if (rc)
2043- //break;
2044 }
2045 return rc;
2046 }
2047@@ -1840,8 +1839,6 @@ bnxt_clear_hwrm_vnic_flows(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2048
2049 STAILQ_REMOVE(&vnic->flow_list, flow, rte_flow, next);
2050 rte_free(flow);
2051- //if (rc)
2052- //break;
2053 }
2054 return rc;
2055 }
2056@@ -2933,7 +2930,6 @@ int bnxt_hwrm_ctx_qstats(struct bnxt *bp, uint32_t cid, int idx,
2057 stats->q_errors[idx] += rte_le_to_cpu_64(resp->tx_err_pkts);
2058 }
2059
2060-
2061 HWRM_UNLOCK();
2062
2063 return rc;
2064@@ -3532,7 +3528,6 @@ int bnxt_hwrm_set_ntuple_filter(struct bnxt *bp,
2065 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
2066 req.dst_id = rte_cpu_to_le_16(dst_id);
2067
2068-
2069 if (filter->ip_addr_type) {
2070 req.ip_addr_type = filter->ip_addr_type;
2071 enables |=
2072@@ -3545,10 +3540,6 @@ int bnxt_hwrm_set_ntuple_filter(struct bnxt *bp,
2073 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_MACADDR)
2074 memcpy(req.src_macaddr, filter->src_macaddr,
2075 ETHER_ADDR_LEN);
2076- //if (enables &
2077- //HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_MACADDR)
2078- //memcpy(req.dst_macaddr, filter->dst_macaddr,
2079- //ETHER_ADDR_LEN);
2080 if (enables &
2081 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_ETHERTYPE)
2082 req.ethertype = rte_cpu_to_be_16(filter->ethertype);
2083diff --git a/drivers/net/bnxt/bnxt_irq.c b/drivers/net/bnxt/bnxt_irq.c
2084index 49436cf..7d69505 100644
2085--- a/drivers/net/bnxt/bnxt_irq.c
2086+++ b/drivers/net/bnxt/bnxt_irq.c
2087@@ -36,7 +36,6 @@
2088 #include <rte_malloc.h>
2089
2090 #include "bnxt.h"
2091-#include "bnxt_cpr.h"
2092 #include "bnxt_irq.h"
2093 #include "bnxt_ring.h"
2094 #include "hsi_struct_def_dpdk.h"
2095@@ -45,7 +44,7 @@
2096 * Interrupts
2097 */
2098
2099-static void bnxt_int_handler(void *param)
2100+void bnxt_int_handler(void *param)
2101 {
2102 struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param;
2103 struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
2104diff --git a/drivers/net/bnxt/bnxt_irq.h b/drivers/net/bnxt/bnxt_irq.h
2105index 4d2f7af..3162217 100644
2106--- a/drivers/net/bnxt/bnxt_irq.h
2107+++ b/drivers/net/bnxt/bnxt_irq.h
2108@@ -50,5 +50,6 @@ void bnxt_disable_int(struct bnxt *bp);
2109 void bnxt_enable_int(struct bnxt *bp);
2110 int bnxt_setup_int(struct bnxt *bp);
2111 int bnxt_request_int(struct bnxt *bp);
2112+void bnxt_int_handler(void *param);
2113
2114 #endif
2115diff --git a/drivers/net/bnxt/bnxt_ring.c b/drivers/net/bnxt/bnxt_ring.c
2116index 59d1035..b5527d2 100644
2117--- a/drivers/net/bnxt/bnxt_ring.c
2118+++ b/drivers/net/bnxt/bnxt_ring.c
2119@@ -36,7 +36,6 @@
2120 #include <unistd.h>
2121
2122 #include "bnxt.h"
2123-#include "bnxt_cpr.h"
2124 #include "bnxt_hwrm.h"
2125 #include "bnxt_ring.h"
2126 #include "bnxt_rxq.h"
2127diff --git a/drivers/net/bnxt/bnxt_rxq.c b/drivers/net/bnxt/bnxt_rxq.c
2128index 5088e9d..b42562f 100644
2129--- a/drivers/net/bnxt/bnxt_rxq.c
2130+++ b/drivers/net/bnxt/bnxt_rxq.c
2131@@ -36,7 +36,6 @@
2132 #include <rte_malloc.h>
2133
2134 #include "bnxt.h"
2135-#include "bnxt_cpr.h"
2136 #include "bnxt_filter.h"
2137 #include "bnxt_hwrm.h"
2138 #include "bnxt_ring.h"
2139@@ -372,10 +371,9 @@ bnxt_rx_queue_intr_enable_op(struct rte_eth_dev *eth_dev, uint16_t queue_id)
2140
2141 if (eth_dev->data->rx_queues) {
2142 rxq = eth_dev->data->rx_queues[queue_id];
2143- if (!rxq) {
2144- rc = -EINVAL;
2145- return rc;
2146- }
2147+ if (!rxq)
2148+ return -EINVAL;
2149+
2150 cpr = rxq->cp_ring;
2151 B_CP_DB_ARM(cpr);
2152 }
2153@@ -391,10 +389,9 @@ bnxt_rx_queue_intr_disable_op(struct rte_eth_dev *eth_dev, uint16_t queue_id)
2154
2155 if (eth_dev->data->rx_queues) {
2156 rxq = eth_dev->data->rx_queues[queue_id];
2157- if (!rxq) {
2158- rc = -EINVAL;
2159- return rc;
2160- }
2161+ if (!rxq)
2162+ return -EINVAL;
2163+
2164 cpr = rxq->cp_ring;
2165 B_CP_DB_DISARM(cpr);
2166 }
2167diff --git a/drivers/net/bnxt/bnxt_rxr.c b/drivers/net/bnxt/bnxt_rxr.c
2168index 4aaad35..6f91675 100644
2169--- a/drivers/net/bnxt/bnxt_rxr.c
2170+++ b/drivers/net/bnxt/bnxt_rxr.c
2171@@ -40,7 +40,6 @@
2172 #include <rte_memory.h>
2173
2174 #include "bnxt.h"
2175-#include "bnxt_cpr.h"
2176 #include "bnxt_ring.h"
2177 #include "bnxt_rxr.h"
2178 #include "bnxt_rxq.h"
2179@@ -88,18 +87,22 @@ static inline int bnxt_alloc_ag_data(struct bnxt_rx_queue *rxq,
2180 struct bnxt_sw_rx_bd *rx_buf = &rxr->ag_buf_ring[prod];
2181 struct rte_mbuf *data;
2182
2183+ if (rxbd == NULL) {
2184+ RTE_LOG(ERR, PMD, "Jumbo Frame. rxbd is NULL\n");
2185+ return -EINVAL;
2186+ }
2187+
2188+ if (rx_buf == NULL) {
2189+ RTE_LOG(ERR, PMD, "Jumbo Frame. rx_buf is NULL\n");
2190+ return -EINVAL;
2191+ }
2192+
2193 data = __bnxt_alloc_rx_data(rxq->mb_pool);
2194 if (!data) {
2195 rte_atomic64_inc(&rxq->bp->rx_mbuf_alloc_fail);
2196 return -ENOMEM;
2197 }
2198
2199- if (rxbd == NULL)
2200- RTE_LOG(ERR, PMD, "Jumbo Frame. rxbd is NULL\n");
2201- if (rx_buf == NULL)
2202- RTE_LOG(ERR, PMD, "Jumbo Frame. rx_buf is NULL\n");
2203-
2204-
2205 rx_buf->mbuf = data;
2206
2207 rxbd->addr = rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR(rx_buf->mbuf));
2208diff --git a/drivers/net/bnxt/bnxt_stats.c b/drivers/net/bnxt/bnxt_stats.c
2209index f8bb4ed..46a3db8 100644
2210--- a/drivers/net/bnxt/bnxt_stats.c
2211+++ b/drivers/net/bnxt/bnxt_stats.c
2212@@ -108,6 +108,22 @@ static const struct bnxt_xstats_name_off bnxt_rx_stats_strings[] = {
2213 rx_runt_bytes)},
2214 {"rx_runt_frames", offsetof(struct rx_port_stats,
2215 rx_runt_frames)},
2216+ {"rx_pfc_ena_frames_pri0", offsetof(struct rx_port_stats,
2217+ rx_pfc_ena_frames_pri0)},
2218+ {"rx_pfc_ena_frames_pri1", offsetof(struct rx_port_stats,
2219+ rx_pfc_ena_frames_pri1)},
2220+ {"rx_pfc_ena_frames_pri2", offsetof(struct rx_port_stats,
2221+ rx_pfc_ena_frames_pri2)},
2222+ {"rx_pfc_ena_frames_pri3", offsetof(struct rx_port_stats,
2223+ rx_pfc_ena_frames_pri3)},
2224+ {"rx_pfc_ena_frames_pri4", offsetof(struct rx_port_stats,
2225+ rx_pfc_ena_frames_pri4)},
2226+ {"rx_pfc_ena_frames_pri5", offsetof(struct rx_port_stats,
2227+ rx_pfc_ena_frames_pri5)},
2228+ {"rx_pfc_ena_frames_pri6", offsetof(struct rx_port_stats,
2229+ rx_pfc_ena_frames_pri6)},
2230+ {"rx_pfc_ena_frames_pri7", offsetof(struct rx_port_stats,
2231+ rx_pfc_ena_frames_pri7)},
2232 };
2233
2234 static const struct bnxt_xstats_name_off bnxt_tx_stats_strings[] = {
2235@@ -163,6 +179,22 @@ static const struct bnxt_xstats_name_off bnxt_tx_stats_strings[] = {
2236 tx_total_collisions)},
2237 {"tx_bytes", offsetof(struct tx_port_stats,
2238 tx_bytes)},
2239+ {"tx_pfc_ena_frames_pri0", offsetof(struct tx_port_stats,
2240+ tx_pfc_ena_frames_pri0)},
2241+ {"tx_pfc_ena_frames_pri1", offsetof(struct tx_port_stats,
2242+ tx_pfc_ena_frames_pri1)},
2243+ {"tx_pfc_ena_frames_pri2", offsetof(struct tx_port_stats,
2244+ tx_pfc_ena_frames_pri2)},
2245+ {"tx_pfc_ena_frames_pri3", offsetof(struct tx_port_stats,
2246+ tx_pfc_ena_frames_pri3)},
2247+ {"tx_pfc_ena_frames_pri4", offsetof(struct tx_port_stats,
2248+ tx_pfc_ena_frames_pri4)},
2249+ {"tx_pfc_ena_frames_pri5", offsetof(struct tx_port_stats,
2250+ tx_pfc_ena_frames_pri5)},
2251+ {"tx_pfc_ena_frames_pri6", offsetof(struct tx_port_stats,
2252+ tx_pfc_ena_frames_pri6)},
2253+ {"tx_pfc_ena_frames_pri7", offsetof(struct tx_port_stats,
2254+ tx_pfc_ena_frames_pri7)},
2255 };
2256
2257 static const struct bnxt_xstats_name_off bnxt_func_stats_strings[] = {
2258@@ -284,6 +316,9 @@ int bnxt_dev_xstats_get_op(struct rte_eth_dev *eth_dev,
2259 return 0;
2260 }
2261
2262+ if (xstats == NULL)
2263+ return 0;
2264+
2265 bnxt_hwrm_port_qstats(bp);
2266 bnxt_hwrm_func_qstats_tx_drop(bp, 0xffff, &tx_drop_pkts);
2267
2268diff --git a/drivers/net/bnxt/bnxt_txq.c b/drivers/net/bnxt/bnxt_txq.c
2269index 99ddddd..f0098c3 100644
2270--- a/drivers/net/bnxt/bnxt_txq.c
2271+++ b/drivers/net/bnxt/bnxt_txq.c
2272@@ -36,7 +36,6 @@
2273 #include <rte_malloc.h>
2274
2275 #include "bnxt.h"
2276-#include "bnxt_cpr.h"
2277 #include "bnxt_ring.h"
2278 #include "bnxt_txq.h"
2279 #include "bnxt_txr.h"
2280@@ -62,7 +61,7 @@ static void bnxt_tx_queue_release_mbufs(struct bnxt_tx_queue *txq)
2281 if (sw_ring) {
2282 for (i = 0; i < txq->tx_ring->tx_ring_struct->ring_size; i++) {
2283 if (sw_ring[i].mbuf) {
2284- rte_pktmbuf_free(sw_ring[i].mbuf);
2285+ rte_pktmbuf_free_seg(sw_ring[i].mbuf);
2286 sw_ring[i].mbuf = NULL;
2287 }
2288 }
2289diff --git a/drivers/net/bnxt/bnxt_txr.c b/drivers/net/bnxt/bnxt_txr.c
2290index e558413..abb705c 100644
2291--- a/drivers/net/bnxt/bnxt_txr.c
2292+++ b/drivers/net/bnxt/bnxt_txr.c
2293@@ -37,7 +37,6 @@
2294 #include <rte_malloc.h>
2295
2296 #include "bnxt.h"
2297-#include "bnxt_cpr.h"
2298 #include "bnxt_ring.h"
2299 #include "bnxt_txq.h"
2300 #include "bnxt_txr.h"
2301diff --git a/drivers/net/bonding/rte_eth_bond_8023ad.c b/drivers/net/bonding/rte_eth_bond_8023ad.c
2302index 14b82bb..d369576 100644
2303--- a/drivers/net/bonding/rte_eth_bond_8023ad.c
2304+++ b/drivers/net/bonding/rte_eth_bond_8023ad.c
2305@@ -659,7 +659,7 @@ tx_machine(struct bond_dev_private *internals, uint16_t slave_id)
2306 SM_FLAG_CLR(port, NTT);
2307 }
2308
2309-static uint8_t
2310+static uint16_t
2311 max_index(uint64_t *a, int n)
2312 {
2313 if (n <= 0)
2314@@ -690,11 +690,11 @@ selection_logic(struct bond_dev_private *internals, uint8_t slave_id)
2315 struct port *agg, *port;
2316 uint16_t slaves_count, new_agg_id, i, j = 0;
2317 uint16_t *slaves;
2318- uint64_t agg_bandwidth[8] = {0};
2319- uint64_t agg_count[8] = {0};
2320+ uint64_t agg_bandwidth[RTE_MAX_ETHPORTS] = {0};
2321+ uint64_t agg_count[RTE_MAX_ETHPORTS] = {0};
2322 uint16_t default_slave = 0;
2323- uint8_t mode_count_id, mode_band_id;
2324 struct rte_eth_link link_info;
2325+ uint16_t agg_new_idx = 0;
2326
2327 slaves = internals->active_slaves;
2328 slaves_count = internals->active_slave_count;
2329@@ -707,9 +707,9 @@ selection_logic(struct bond_dev_private *internals, uint8_t slave_id)
2330 if (agg->aggregator_port_id != slaves[i])
2331 continue;
2332
2333- agg_count[agg->aggregator_port_id] += 1;
2334+ agg_count[i] += 1;
2335 rte_eth_link_get_nowait(slaves[i], &link_info);
2336- agg_bandwidth[agg->aggregator_port_id] += link_info.link_speed;
2337+ agg_bandwidth[i] += link_info.link_speed;
2338
2339 /* Actors system ID is not checked since all slave device have the same
2340 * ID (MAC address). */
2341@@ -729,24 +729,22 @@ selection_logic(struct bond_dev_private *internals, uint8_t slave_id)
2342
2343 switch (internals->mode4.agg_selection) {
2344 case AGG_COUNT:
2345- mode_count_id = max_index(
2346- (uint64_t *)agg_count, slaves_count);
2347- new_agg_id = mode_count_id;
2348+ agg_new_idx = max_index(agg_count, slaves_count);
2349+ new_agg_id = slaves[agg_new_idx];
2350 break;
2351 case AGG_BANDWIDTH:
2352- mode_band_id = max_index(
2353- (uint64_t *)agg_bandwidth, slaves_count);
2354- new_agg_id = mode_band_id;
2355+ agg_new_idx = max_index(agg_bandwidth, slaves_count);
2356+ new_agg_id = slaves[agg_new_idx];
2357 break;
2358 case AGG_STABLE:
2359 if (default_slave == slaves_count)
2360- new_agg_id = slave_id;
2361+ new_agg_id = slaves[slave_id];
2362 else
2363 new_agg_id = slaves[default_slave];
2364 break;
2365 default:
2366 if (default_slave == slaves_count)
2367- new_agg_id = slave_id;
2368+ new_agg_id = slaves[slave_id];
2369 else
2370 new_agg_id = slaves[default_slave];
2371 break;
2372@@ -1322,11 +1320,12 @@ rte_eth_bond_8023ad_agg_selection_set(uint16_t port_id,
2373 struct bond_dev_private *internals;
2374 struct mode8023ad_private *mode4;
2375
2376+ if (valid_bonded_port_id(port_id) != 0)
2377+ return -EINVAL;
2378+
2379 bond_dev = &rte_eth_devices[port_id];
2380 internals = bond_dev->data->dev_private;
2381
2382- if (valid_bonded_port_id(port_id) != 0)
2383- return -EINVAL;
2384 if (internals->mode != 4)
2385 return -EINVAL;
2386
2387@@ -1343,11 +1342,12 @@ int rte_eth_bond_8023ad_agg_selection_get(uint16_t port_id)
2388 struct bond_dev_private *internals;
2389 struct mode8023ad_private *mode4;
2390
2391+ if (valid_bonded_port_id(port_id) != 0)
2392+ return -EINVAL;
2393+
2394 bond_dev = &rte_eth_devices[port_id];
2395 internals = bond_dev->data->dev_private;
2396
2397- if (valid_bonded_port_id(port_id) != 0)
2398- return -EINVAL;
2399 if (internals->mode != 4)
2400 return -EINVAL;
2401 mode4 = &internals->mode4;
2402@@ -1600,9 +1600,14 @@ int
2403 rte_eth_bond_8023ad_dedicated_queues_enable(uint16_t port)
2404 {
2405 int retval = 0;
2406- struct rte_eth_dev *dev = &rte_eth_devices[port];
2407- struct bond_dev_private *internals = (struct bond_dev_private *)
2408- dev->data->dev_private;
2409+ struct rte_eth_dev *dev;
2410+ struct bond_dev_private *internals;
2411+
2412+ if (valid_bonded_port_id(port) != 0)
2413+ return -EINVAL;
2414+
2415+ dev = &rte_eth_devices[port];
2416+ internals = dev->data->dev_private;
2417
2418 if (check_for_bonded_ethdev(dev) != 0)
2419 return -1;
2420@@ -1624,9 +1629,14 @@ int
2421 rte_eth_bond_8023ad_dedicated_queues_disable(uint16_t port)
2422 {
2423 int retval = 0;
2424- struct rte_eth_dev *dev = &rte_eth_devices[port];
2425- struct bond_dev_private *internals = (struct bond_dev_private *)
2426- dev->data->dev_private;
2427+ struct rte_eth_dev *dev;
2428+ struct bond_dev_private *internals;
2429+
2430+ if (valid_bonded_port_id(port) != 0)
2431+ return -EINVAL;
2432+
2433+ dev = &rte_eth_devices[port];
2434+ internals = dev->data->dev_private;
2435
2436 if (check_for_bonded_ethdev(dev) != 0)
2437 return -1;
2438diff --git a/drivers/net/bonding/rte_eth_bond_args.c b/drivers/net/bonding/rte_eth_bond_args.c
2439index 8c94cc6..e543f53 100644
2440--- a/drivers/net/bonding/rte_eth_bond_args.c
2441+++ b/drivers/net/bonding/rte_eth_bond_args.c
2442@@ -92,11 +92,10 @@ find_port_id_by_dev_name(const char *name)
2443 static inline int
2444 bond_pci_addr_cmp(const struct rte_device *dev, const void *_pci_addr)
2445 {
2446- struct rte_pci_device *pdev;
2447+ const struct rte_pci_device *pdev = RTE_DEV_TO_PCI_CONST(dev);
2448 const struct rte_pci_addr *paddr = _pci_addr;
2449
2450- pdev = RTE_DEV_TO_PCI(*(struct rte_device **)(void *)&dev);
2451- return rte_eal_compare_pci_addr(&pdev->addr, paddr);
2452+ return rte_pci_addr_cmp(&pdev->addr, paddr);
2453 }
2454
2455 /**
2456diff --git a/drivers/net/bonding/rte_eth_bond_pmd.c b/drivers/net/bonding/rte_eth_bond_pmd.c
2457index 9694cfb..b58873a 100644
2458--- a/drivers/net/bonding/rte_eth_bond_pmd.c
2459+++ b/drivers/net/bonding/rte_eth_bond_pmd.c
2460@@ -31,6 +31,7 @@
2461 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
2462 */
2463 #include <stdlib.h>
2464+#include <stdbool.h>
2465 #include <netinet/in.h>
2466
2467 #include <rte_mbuf.h>
2468@@ -283,45 +284,6 @@ bond_ethdev_8023ad_flow_set(struct rte_eth_dev *bond_dev, uint16_t slave_port) {
2469 }
2470
2471 static uint16_t
2472-bond_ethdev_rx_burst_8023ad_fast_queue(void *queue, struct rte_mbuf **bufs,
2473- uint16_t nb_pkts)
2474-{
2475- struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)queue;
2476- struct bond_dev_private *internals = bd_rx_q->dev_private;
2477- uint16_t num_rx_total = 0; /* Total number of received packets */
2478- uint16_t slaves[RTE_MAX_ETHPORTS];
2479- uint16_t slave_count;
2480- uint16_t active_slave;
2481- uint16_t i;
2482-
2483- /* Copy slave list to protect against slave up/down changes during tx
2484- * bursting */
2485- slave_count = internals->active_slave_count;
2486- active_slave = internals->active_slave;
2487- memcpy(slaves, internals->active_slaves,
2488- sizeof(internals->active_slaves[0]) * slave_count);
2489-
2490- for (i = 0; i < slave_count && nb_pkts; i++) {
2491- uint16_t num_rx_slave;
2492-
2493- /* Read packets from this slave */
2494- num_rx_slave = rte_eth_rx_burst(slaves[active_slave],
2495- bd_rx_q->queue_id,
2496- bufs + num_rx_total, nb_pkts);
2497- num_rx_total += num_rx_slave;
2498- nb_pkts -= num_rx_slave;
2499-
2500- if (++active_slave == slave_count)
2501- active_slave = 0;
2502- }
2503-
2504- if (++internals->active_slave >= slave_count)
2505- internals->active_slave = 0;
2506-
2507- return num_rx_total;
2508-}
2509-
2510-static uint16_t
2511 bond_ethdev_tx_burst_8023ad_fast_queue(void *queue, struct rte_mbuf **bufs,
2512 uint16_t nb_pkts)
2513 {
2514@@ -406,10 +368,9 @@ bond_ethdev_tx_burst_8023ad_fast_queue(void *queue, struct rte_mbuf **bufs,
2515 return num_tx_total;
2516 }
2517
2518-
2519-static uint16_t
2520-bond_ethdev_rx_burst_8023ad(void *queue, struct rte_mbuf **bufs,
2521- uint16_t nb_pkts)
2522+static inline uint16_t
2523+rx_burst_8023ad(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts,
2524+ bool dedicated_rxq)
2525 {
2526 /* Cast to structure, containing bonded device's port id and queue id */
2527 struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)queue;
2528@@ -454,23 +415,22 @@ bond_ethdev_rx_burst_8023ad(void *queue, struct rte_mbuf **bufs,
2529
2530 /* Handle slow protocol packets. */
2531 while (j < num_rx_total) {
2532-
2533- /* If packet is not pure L2 and is known, skip it */
2534- if ((bufs[j]->packet_type & ~RTE_PTYPE_L2_ETHER) != 0) {
2535- j++;
2536- continue;
2537- }
2538-
2539 if (j + 3 < num_rx_total)
2540 rte_prefetch0(rte_pktmbuf_mtod(bufs[j + 3], void *));
2541
2542 hdr = rte_pktmbuf_mtod(bufs[j], struct ether_hdr *);
2543 subtype = ((struct slow_protocol_frame *)hdr)->slow_protocol.subtype;
2544
2545- /* Remove packet from array if it is slow packet or slave is not
2546- * in collecting state or bonding interface is not in promiscuous
2547- * mode and packet address does not match. */
2548- if (unlikely(is_lacp_packets(hdr->ether_type, subtype, bufs[j]) ||
2549+ /* Remove packet from array if:
2550+ * - it is slow packet but no dedicated rxq is present,
2551+ * - slave is not in collecting state,
2552+ * - bonding interface is not in promiscuous mode and
2553+ * packet is not multicast and address does not match,
2554+ */
2555+ if (unlikely(
2556+ (!dedicated_rxq &&
2557+ is_lacp_packets(hdr->ether_type, subtype,
2558+ bufs[j])) ||
2559 !collecting || (!promisc &&
2560 !is_multicast_ether_addr(&hdr->d_addr) &&
2561 !is_same_ether_addr(&bond_mac, &hdr->d_addr)))) {
2562@@ -500,6 +460,20 @@ bond_ethdev_rx_burst_8023ad(void *queue, struct rte_mbuf **bufs,
2563 return num_rx_total;
2564 }
2565
2566+static uint16_t
2567+bond_ethdev_rx_burst_8023ad(void *queue, struct rte_mbuf **bufs,
2568+ uint16_t nb_pkts)
2569+{
2570+ return rx_burst_8023ad(queue, bufs, nb_pkts, false);
2571+}
2572+
2573+static uint16_t
2574+bond_ethdev_rx_burst_8023ad_fast_queue(void *queue, struct rte_mbuf **bufs,
2575+ uint16_t nb_pkts)
2576+{
2577+ return rx_burst_8023ad(queue, bufs, nb_pkts, true);
2578+}
2579+
2580 #if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1)
2581 uint32_t burstnumberRX;
2582 uint32_t burstnumberTX;
2583@@ -2352,8 +2326,8 @@ bond_ethdev_link_update(struct rte_eth_dev *ethdev, int wait_to_complete)
2584 * packet loss will occur on this slave if transmission at rates
2585 * greater than this are attempted
2586 */
2587- for (idx = 1; idx < bond_ctx->active_slave_count; idx++) {
2588- link_update(bond_ctx->active_slaves[0], &slave_link);
2589+ for (idx = 0; idx < bond_ctx->active_slave_count; idx++) {
2590+ link_update(bond_ctx->active_slaves[idx], &slave_link);
2591
2592 if (slave_link.link_speed <
2593 ethdev->data->dev_link.link_speed)
2594diff --git a/drivers/net/cxgbe/cxgbe_ethdev.c b/drivers/net/cxgbe/cxgbe_ethdev.c
2595index dc153c7..8b5b9b4 100644
2596--- a/drivers/net/cxgbe/cxgbe_ethdev.c
2597+++ b/drivers/net/cxgbe/cxgbe_ethdev.c
2598@@ -91,6 +91,7 @@ static uint16_t cxgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
2599 struct sge_eth_txq *txq = (struct sge_eth_txq *)tx_queue;
2600 uint16_t pkts_sent, pkts_remain;
2601 uint16_t total_sent = 0;
2602+ uint16_t idx = 0;
2603 int ret = 0;
2604
2605 CXGBE_DEBUG_TX(adapter, "%s: txq = %p; tx_pkts = %p; nb_pkts = %d\n",
2606@@ -99,12 +100,16 @@ static uint16_t cxgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
2607 t4_os_lock(&txq->txq_lock);
2608 /* free up desc from already completed tx */
2609 reclaim_completed_tx(&txq->q);
2610+ rte_prefetch0(rte_pktmbuf_mtod(tx_pkts[0], volatile void *));
2611 while (total_sent < nb_pkts) {
2612 pkts_remain = nb_pkts - total_sent;
2613
2614 for (pkts_sent = 0; pkts_sent < pkts_remain; pkts_sent++) {
2615- ret = t4_eth_xmit(txq, tx_pkts[total_sent + pkts_sent],
2616- nb_pkts);
2617+ idx = total_sent + pkts_sent;
2618+ if ((idx + 1) < nb_pkts)
2619+ rte_prefetch0(rte_pktmbuf_mtod(tx_pkts[idx + 1],
2620+ volatile void *));
2621+ ret = t4_eth_xmit(txq, tx_pkts[idx], nb_pkts);
2622 if (ret < 0)
2623 break;
2624 }
2625diff --git a/drivers/net/cxgbe/sge.c b/drivers/net/cxgbe/sge.c
2626index babff0b..9dddea5 100644
2627--- a/drivers/net/cxgbe/sge.c
2628+++ b/drivers/net/cxgbe/sge.c
2629@@ -1130,7 +1130,6 @@ out_free:
2630 txq->stats.mapping_err++;
2631 goto out_free;
2632 }
2633- rte_prefetch0((volatile void *)addr);
2634 return tx_do_packet_coalesce(txq, mbuf, cflits, adap,
2635 pi, addr, nb_pkts);
2636 } else {
2637diff --git a/drivers/net/dpaa2/dpaa2_rxtx.c b/drivers/net/dpaa2/dpaa2_rxtx.c
2638index 8ecd238..4459424 100644
2639--- a/drivers/net/dpaa2/dpaa2_rxtx.c
2640+++ b/drivers/net/dpaa2/dpaa2_rxtx.c
2641@@ -132,7 +132,8 @@ dpaa2_dev_rx_offload(uint64_t hw_annot_addr, struct rte_mbuf *mbuf)
2642 }
2643
2644 static inline struct rte_mbuf *__attribute__((hot))
2645-eth_sg_fd_to_mbuf(const struct qbman_fd *fd)
2646+eth_sg_fd_to_mbuf(const struct qbman_fd *fd,
2647+ int port_id)
2648 {
2649 struct qbman_sge *sgt, *sge;
2650 dma_addr_t sg_addr;
2651@@ -159,6 +160,7 @@ eth_sg_fd_to_mbuf(const struct qbman_fd *fd)
2652 first_seg->pkt_len = DPAA2_GET_FD_LEN(fd);
2653 first_seg->nb_segs = 1;
2654 first_seg->next = NULL;
2655+ first_seg->port = port_id;
2656
2657 first_seg->packet_type = dpaa2_dev_rx_parse(
2658 (uint64_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd))
2659@@ -192,7 +194,8 @@ eth_sg_fd_to_mbuf(const struct qbman_fd *fd)
2660 }
2661
2662 static inline struct rte_mbuf *__attribute__((hot))
2663-eth_fd_to_mbuf(const struct qbman_fd *fd)
2664+eth_fd_to_mbuf(const struct qbman_fd *fd,
2665+ int port_id)
2666 {
2667 struct rte_mbuf *mbuf = DPAA2_INLINE_MBUF_FROM_BUF(
2668 DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)),
2669@@ -206,6 +209,7 @@ eth_fd_to_mbuf(const struct qbman_fd *fd)
2670 mbuf->data_off = DPAA2_GET_FD_OFFSET(fd);
2671 mbuf->data_len = DPAA2_GET_FD_LEN(fd);
2672 mbuf->pkt_len = mbuf->data_len;
2673+ mbuf->port = port_id;
2674
2675 /* Parse the packet */
2676 /* parse results are after the private - sw annotation area */
2677@@ -470,10 +474,9 @@ dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
2678 + DPAA2_FD_PTA_SIZE + 16));
2679
2680 if (unlikely(DPAA2_FD_GET_FORMAT(fd[num_rx]) == qbman_fd_sg))
2681- bufs[num_rx] = eth_sg_fd_to_mbuf(fd[num_rx]);
2682+ bufs[num_rx] = eth_sg_fd_to_mbuf(fd[num_rx], eth_data->port_id);
2683 else
2684- bufs[num_rx] = eth_fd_to_mbuf(fd[num_rx]);
2685- bufs[num_rx]->port = dev->data->port_id;
2686+ bufs[num_rx] = eth_fd_to_mbuf(fd[num_rx], eth_data->port_id);
2687
2688 if (dev->data->dev_conf.rxmode.hw_vlan_strip)
2689 rte_vlan_strip(bufs[num_rx]);
2690@@ -521,7 +524,7 @@ dpaa2_dev_process_parallel_event(struct qbman_swp *swp,
2691 struct dpaa2_queue *rxq,
2692 struct rte_event *ev)
2693 {
2694- ev->mbuf = eth_fd_to_mbuf(fd);
2695+ ev->mbuf = eth_fd_to_mbuf(fd, rxq->eth_data->port_id);
2696
2697 ev->flow_id = rxq->ev.flow_id;
2698 ev->sub_event_type = rxq->ev.sub_event_type;
2699@@ -633,15 +636,28 @@ dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
2700 }
2701 bufs++;
2702 }
2703+
2704 loop = 0;
2705+ retry_count = 0;
2706 while (loop < frames_to_send) {
2707- loop += qbman_swp_enqueue_multiple(swp, &eqdesc,
2708+ ret = qbman_swp_enqueue_multiple(swp, &eqdesc,
2709 &fd_arr[loop], frames_to_send - loop);
2710+ if (unlikely(ret < 0)) {
2711+ retry_count++;
2712+ if (retry_count > DPAA2_MAX_TX_RETRY_COUNT) {
2713+ num_tx += loop;
2714+ nb_pkts -= loop;
2715+ goto send_n_return;
2716+ }
2717+ } else {
2718+ loop += ret;
2719+ retry_count = 0;
2720+ }
2721 }
2722
2723- num_tx += frames_to_send;
2724- dpaa2_q->tx_pkts += frames_to_send;
2725- nb_pkts -= frames_to_send;
2726+ num_tx += loop;
2727+ dpaa2_q->tx_pkts += loop;
2728+ nb_pkts -= loop;
2729 }
2730 return num_tx;
2731
2732@@ -650,12 +666,21 @@ send_n_return:
2733 if (loop) {
2734 unsigned int i = 0;
2735
2736+ retry_count = 0;
2737 while (i < loop) {
2738- i += qbman_swp_enqueue_multiple(swp, &eqdesc,
2739+ ret = qbman_swp_enqueue_multiple(swp, &eqdesc,
2740 &fd_arr[i], loop - i);
2741+ if (unlikely(ret < 0)) {
2742+ retry_count++;
2743+ if (retry_count > DPAA2_MAX_TX_RETRY_COUNT)
2744+ break;
2745+ } else {
2746+ i += ret;
2747+ retry_count = 0;
2748+ }
2749 }
2750- num_tx += loop;
2751- dpaa2_q->tx_pkts += loop;
2752+ num_tx += i;
2753+ dpaa2_q->tx_pkts += i;
2754 }
2755 skip_tx:
2756 return num_tx;
2757diff --git a/drivers/net/dpaa2/mc/dpkg.c b/drivers/net/dpaa2/mc/dpkg.c
2758index 3f98907..34d37bf 100644
2759--- a/drivers/net/dpaa2/mc/dpkg.c
2760+++ b/drivers/net/dpaa2/mc/dpkg.c
2761@@ -96,7 +96,10 @@ dpkg_prepare_key_cfg(const struct dpkg_profile_cfg *cfg, uint8_t *key_cfg_buf)
2762 dpkg_set_field(extr->extract_type, EXTRACT_TYPE,
2763 cfg->extracts[i].type);
2764
2765- for (j = 0; j < DPKG_NUM_OF_MASKS; j++) {
2766+ if (extr->num_of_byte_masks > DPKG_NUM_OF_MASKS)
2767+ return -EINVAL;
2768+
2769+ for (j = 0; j < extr->num_of_byte_masks; j++) {
2770 extr->masks[j].mask = cfg->extracts[i].masks[j].mask;
2771 extr->masks[j].offset =
2772 cfg->extracts[i].masks[j].offset;
2773diff --git a/drivers/net/e1000/e1000_ethdev.h b/drivers/net/e1000/e1000_ethdev.h
2774index 5668910..70a4798 100644
2775--- a/drivers/net/e1000/e1000_ethdev.h
2776+++ b/drivers/net/e1000/e1000_ethdev.h
2777@@ -350,15 +350,15 @@ struct igb_flow_mem {
2778 };
2779
2780 TAILQ_HEAD(igb_ntuple_filter_list, igb_ntuple_filter_ele);
2781-struct igb_ntuple_filter_list igb_filter_ntuple_list;
2782+extern struct igb_ntuple_filter_list igb_filter_ntuple_list;
2783 TAILQ_HEAD(igb_ethertype_filter_list, igb_ethertype_filter_ele);
2784-struct igb_ethertype_filter_list igb_filter_ethertype_list;
2785+extern struct igb_ethertype_filter_list igb_filter_ethertype_list;
2786 TAILQ_HEAD(igb_syn_filter_list, igb_eth_syn_filter_ele);
2787-struct igb_syn_filter_list igb_filter_syn_list;
2788+extern struct igb_syn_filter_list igb_filter_syn_list;
2789 TAILQ_HEAD(igb_flex_filter_list, igb_flex_filter_ele);
2790-struct igb_flex_filter_list igb_filter_flex_list;
2791+extern struct igb_flex_filter_list igb_filter_flex_list;
2792 TAILQ_HEAD(igb_flow_mem_list, igb_flow_mem);
2793-struct igb_flow_mem_list igb_flow_list;
2794+extern struct igb_flow_mem_list igb_flow_list;
2795
2796 extern const struct rte_flow_ops igb_flow_ops;
2797
2798diff --git a/drivers/net/e1000/igb_ethdev.c b/drivers/net/e1000/igb_ethdev.c
2799index 4378f08..daf9d2f 100644
2800--- a/drivers/net/e1000/igb_ethdev.c
2801+++ b/drivers/net/e1000/igb_ethdev.c
2802@@ -956,6 +956,8 @@ eth_igb_dev_init(struct rte_eth_dev *eth_dev)
2803 /* enable support intr */
2804 igb_intr_enable(eth_dev);
2805
2806+ eth_igb_dev_set_link_down(eth_dev);
2807+
2808 /* initialize filter info */
2809 memset(filter_info, 0,
2810 sizeof(struct e1000_filter_info));
2811@@ -1557,8 +1559,9 @@ eth_igb_stop(struct rte_eth_dev *dev)
2812 igb_pf_reset_hw(hw);
2813 E1000_WRITE_REG(hw, E1000_WUC, 0);
2814
2815- /* Set bit for Go Link disconnect */
2816- if (hw->mac.type >= e1000_82580) {
2817+ /* Set bit for Go Link disconnect if PHY reset is not blocked */
2818+ if (hw->mac.type >= e1000_82580 &&
2819+ (e1000_check_reset_block(hw) != E1000_BLK_PHY_RESET)) {
2820 uint32_t phpm_reg;
2821
2822 phpm_reg = E1000_READ_REG(hw, E1000_82580_PHY_POWER_MGMT);
2823@@ -1632,8 +1635,9 @@ eth_igb_close(struct rte_eth_dev *dev)
2824 igb_release_manageability(hw);
2825 igb_hw_control_release(hw);
2826
2827- /* Clear bit for Go Link disconnect */
2828- if (hw->mac.type >= e1000_82580) {
2829+ /* Clear bit for Go Link disconnect if PHY reset is not blocked */
2830+ if (hw->mac.type >= e1000_82580 &&
2831+ (e1000_check_reset_block(hw) != E1000_BLK_PHY_RESET)) {
2832 uint32_t phpm_reg;
2833
2834 phpm_reg = E1000_READ_REG(hw, E1000_82580_PHY_POWER_MGMT);
2835diff --git a/drivers/net/e1000/igb_flow.c b/drivers/net/e1000/igb_flow.c
2836index 057579b..894f8a1 100644
2837--- a/drivers/net/e1000/igb_flow.c
2838+++ b/drivers/net/e1000/igb_flow.c
2839@@ -78,6 +78,12 @@
2840
2841 #define IGB_FLEX_RAW_NUM 12
2842
2843+struct igb_flow_mem_list igb_flow_list;
2844+struct igb_ntuple_filter_list igb_filter_ntuple_list;
2845+struct igb_ethertype_filter_list igb_filter_ethertype_list;
2846+struct igb_syn_filter_list igb_filter_syn_list;
2847+struct igb_flex_filter_list igb_filter_flex_list;
2848+
2849 /**
2850 * Please aware there's an asumption for all the parsers.
2851 * rte_flow_item is using big endian, rte_flow_attr and
2852diff --git a/drivers/net/fm10k/base/fm10k_api.c b/drivers/net/fm10k/base/fm10k_api.c
2853index c49d20d..e7b2fe7 100644
2854--- a/drivers/net/fm10k/base/fm10k_api.c
2855+++ b/drivers/net/fm10k/base/fm10k_api.c
2856@@ -234,8 +234,14 @@ s32 fm10k_read_mac_addr(struct fm10k_hw *hw)
2857 * */
2858 void fm10k_update_hw_stats(struct fm10k_hw *hw, struct fm10k_hw_stats *stats)
2859 {
2860- if (hw->mac.ops.update_hw_stats)
2861- hw->mac.ops.update_hw_stats(hw, stats);
2862+ switch (hw->mac.type) {
2863+ case fm10k_mac_pf:
2864+ return fm10k_update_hw_stats_pf(hw, stats);
2865+ case fm10k_mac_vf:
2866+ return fm10k_update_hw_stats_vf(hw, stats);
2867+ default:
2868+ break;
2869+ }
2870 }
2871
2872 /**
2873@@ -246,8 +252,14 @@ void fm10k_update_hw_stats(struct fm10k_hw *hw, struct fm10k_hw_stats *stats)
2874 * */
2875 void fm10k_rebind_hw_stats(struct fm10k_hw *hw, struct fm10k_hw_stats *stats)
2876 {
2877- if (hw->mac.ops.rebind_hw_stats)
2878- hw->mac.ops.rebind_hw_stats(hw, stats);
2879+ switch (hw->mac.type) {
2880+ case fm10k_mac_pf:
2881+ return fm10k_rebind_hw_stats_pf(hw, stats);
2882+ case fm10k_mac_vf:
2883+ return fm10k_rebind_hw_stats_vf(hw, stats);
2884+ default:
2885+ break;
2886+ }
2887 }
2888
2889 /**
2890diff --git a/drivers/net/fm10k/base/fm10k_pf.c b/drivers/net/fm10k/base/fm10k_pf.c
2891index db5f491..f5b6a9e 100644
2892--- a/drivers/net/fm10k/base/fm10k_pf.c
2893+++ b/drivers/net/fm10k/base/fm10k_pf.c
2894@@ -1511,7 +1511,7 @@ const struct fm10k_msg_data fm10k_iov_msg_data_pf[] = {
2895 * This function collects and aggregates global and per queue hardware
2896 * statistics.
2897 **/
2898-STATIC void fm10k_update_hw_stats_pf(struct fm10k_hw *hw,
2899+void fm10k_update_hw_stats_pf(struct fm10k_hw *hw,
2900 struct fm10k_hw_stats *stats)
2901 {
2902 u32 timeout, ur, ca, um, xec, vlan_drop, loopback_drop, nodesc_drop;
2903@@ -1584,7 +1584,7 @@ STATIC void fm10k_update_hw_stats_pf(struct fm10k_hw *hw,
2904 * This function resets the base for global and per queue hardware
2905 * statistics.
2906 **/
2907-STATIC void fm10k_rebind_hw_stats_pf(struct fm10k_hw *hw,
2908+void fm10k_rebind_hw_stats_pf(struct fm10k_hw *hw,
2909 struct fm10k_hw_stats *stats)
2910 {
2911 DEBUGFUNC("fm10k_rebind_hw_stats_pf");
2912diff --git a/drivers/net/fm10k/base/fm10k_pf.h b/drivers/net/fm10k/base/fm10k_pf.h
2913index ca125c2..2c22bdd 100644
2914--- a/drivers/net/fm10k/base/fm10k_pf.h
2915+++ b/drivers/net/fm10k/base/fm10k_pf.h
2916@@ -184,4 +184,10 @@ extern const struct fm10k_msg_data fm10k_iov_msg_data_pf[];
2917 #endif
2918
2919 s32 fm10k_init_ops_pf(struct fm10k_hw *hw);
2920+
2921+void fm10k_update_hw_stats_pf(struct fm10k_hw *hw,
2922+ struct fm10k_hw_stats *stats);
2923+
2924+void fm10k_rebind_hw_stats_pf(struct fm10k_hw *hw,
2925+ struct fm10k_hw_stats *stats);
2926 #endif /* _FM10K_PF_H */
2927diff --git a/drivers/net/fm10k/base/fm10k_vf.c b/drivers/net/fm10k/base/fm10k_vf.c
2928index bd44977..2f4b5f5 100644
2929--- a/drivers/net/fm10k/base/fm10k_vf.c
2930+++ b/drivers/net/fm10k/base/fm10k_vf.c
2931@@ -526,7 +526,7 @@ const struct fm10k_tlv_attr fm10k_1588_msg_attr[] = {
2932 *
2933 * This function collects and aggregates per queue hardware statistics.
2934 **/
2935-STATIC void fm10k_update_hw_stats_vf(struct fm10k_hw *hw,
2936+void fm10k_update_hw_stats_vf(struct fm10k_hw *hw,
2937 struct fm10k_hw_stats *stats)
2938 {
2939 DEBUGFUNC("fm10k_update_hw_stats_vf");
2940@@ -541,7 +541,7 @@ STATIC void fm10k_update_hw_stats_vf(struct fm10k_hw *hw,
2941 *
2942 * This function resets the base for queue hardware statistics.
2943 **/
2944-STATIC void fm10k_rebind_hw_stats_vf(struct fm10k_hw *hw,
2945+void fm10k_rebind_hw_stats_vf(struct fm10k_hw *hw,
2946 struct fm10k_hw_stats *stats)
2947 {
2948 DEBUGFUNC("fm10k_rebind_hw_stats_vf");
2949diff --git a/drivers/net/fm10k/base/fm10k_vf.h b/drivers/net/fm10k/base/fm10k_vf.h
2950index 116c56f..d4edd33 100644
2951--- a/drivers/net/fm10k/base/fm10k_vf.h
2952+++ b/drivers/net/fm10k/base/fm10k_vf.h
2953@@ -89,4 +89,9 @@ extern const struct fm10k_tlv_attr fm10k_1588_msg_attr[];
2954 FM10K_MSG_HANDLER(FM10K_VF_MSG_ID_1588, fm10k_1588_msg_attr, func)
2955
2956 s32 fm10k_init_ops_vf(struct fm10k_hw *hw);
2957+
2958+void fm10k_update_hw_stats_vf(struct fm10k_hw *hw,
2959+ struct fm10k_hw_stats *stats);
2960+void fm10k_rebind_hw_stats_vf(struct fm10k_hw *hw,
2961+ struct fm10k_hw_stats *stats);
2962 #endif /* _FM10K_VF_H */
2963diff --git a/drivers/net/fm10k/fm10k_rxtx_vec.c b/drivers/net/fm10k/fm10k_rxtx_vec.c
2964index 1a66dc3..9e86c28 100644
2965--- a/drivers/net/fm10k/fm10k_rxtx_vec.c
2966+++ b/drivers/net/fm10k/fm10k_rxtx_vec.c
2967@@ -388,8 +388,15 @@ fm10k_rx_queue_release_mbufs_vec(struct fm10k_rx_queue *rxq)
2968 return;
2969
2970 /* free all mbufs that are valid in the ring */
2971- for (i = rxq->next_dd; i != rxq->rxrearm_start; i = (i + 1) & mask)
2972- rte_pktmbuf_free_seg(rxq->sw_ring[i]);
2973+ if (rxq->rxrearm_nb == 0) {
2974+ for (i = 0; i < rxq->nb_desc; i++)
2975+ if (rxq->sw_ring[i] != NULL)
2976+ rte_pktmbuf_free_seg(rxq->sw_ring[i]);
2977+ } else {
2978+ for (i = rxq->next_dd; i != rxq->rxrearm_start;
2979+ i = (i + 1) & mask)
2980+ rte_pktmbuf_free_seg(rxq->sw_ring[i]);
2981+ }
2982 rxq->rxrearm_nb = rxq->nb_desc;
2983
2984 /* set all entries to NULL */
2985diff --git a/drivers/net/i40e/i40e_ethdev_vf.c b/drivers/net/i40e/i40e_ethdev_vf.c
2986index b36ba9f..2830ffe 100644
2987--- a/drivers/net/i40e/i40e_ethdev_vf.c
2988+++ b/drivers/net/i40e/i40e_ethdev_vf.c
2989@@ -1384,7 +1384,7 @@ i40evf_handle_aq_msg(struct rte_eth_dev *dev)
2990 }
2991 break;
2992 default:
2993- PMD_DRV_LOG(ERR, "Request %u is not supported yet",
2994+ PMD_DRV_LOG(DEBUG, "Request %u is not supported yet",
2995 aq_opc);
2996 break;
2997 }
2998diff --git a/drivers/net/i40e/i40e_rxtx_vec_neon.c b/drivers/net/i40e/i40e_rxtx_vec_neon.c
2999index b5685e2..4b77e8c 100644
3000--- a/drivers/net/i40e/i40e_rxtx_vec_neon.c
3001+++ b/drivers/net/i40e/i40e_rxtx_vec_neon.c
3002@@ -314,7 +314,6 @@ _recv_raw_pkts_vec(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts,
3003 /* Read desc statuses backwards to avoid race condition */
3004 /* A.1 load 4 pkts desc */
3005 descs[3] = vld1q_u64((uint64_t *)(rxdp + 3));
3006- rte_rmb();
3007
3008 /* B.2 copy 2 mbuf point into rx_pkts */
3009 vst1q_u64((uint64_t *)&rx_pkts[pos], mbp1);
3010@@ -337,9 +336,6 @@ _recv_raw_pkts_vec(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts,
3011 rte_mbuf_prefetch_part2(rx_pkts[pos + 3]);
3012 }
3013
3014- /* avoid compiler reorder optimization */
3015- rte_compiler_barrier();
3016-
3017 /* pkt 3,4 shift the pktlen field to be 16-bit aligned*/
3018 uint32x4_t len3 = vshlq_u32(vreinterpretq_u32_u64(descs[3]),
3019 len_shl);
3020@@ -507,6 +503,7 @@ i40e_recv_scattered_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
3021 i++;
3022 if (i == nb_bufs)
3023 return nb_bufs;
3024+ rxq->pkt_first_seg = rx_pkts[i];
3025 }
3026 return i + reassemble_packets(rxq, &rx_pkts[i], nb_bufs - i,
3027 &split_flags[i]);
3028diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c
3029index c186ed6..0532bf6 100644
3030--- a/drivers/net/ixgbe/ixgbe_ethdev.c
3031+++ b/drivers/net/ixgbe/ixgbe_ethdev.c
3032@@ -1232,6 +1232,7 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev)
3033 diag = ixgbe_bypass_init_hw(hw);
3034 #else
3035 diag = ixgbe_init_hw(hw);
3036+ hw->mac.autotry_restart = false;
3037 #endif /* RTE_LIBRTE_IXGBE_BYPASS */
3038
3039 /*
3040@@ -1337,6 +1338,8 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev)
3041 /* enable support intr */
3042 ixgbe_enable_intr(eth_dev);
3043
3044+ ixgbe_dev_set_link_down(eth_dev);
3045+
3046 /* initialize filter info */
3047 memset(filter_info, 0,
3048 sizeof(struct ixgbe_filter_info));
3049@@ -2852,6 +2855,7 @@ ixgbe_dev_set_link_up(struct rte_eth_dev *dev)
3050 } else {
3051 /* Turn on the laser */
3052 ixgbe_enable_tx_laser(hw);
3053+ ixgbe_dev_link_update(dev, 0);
3054 }
3055
3056 return 0;
3057@@ -2882,6 +2886,7 @@ ixgbe_dev_set_link_down(struct rte_eth_dev *dev)
3058 } else {
3059 /* Turn off the laser */
3060 ixgbe_disable_tx_laser(hw);
3061+ ixgbe_dev_link_update(dev, 0);
3062 }
3063
3064 return 0;
3065@@ -3774,6 +3779,11 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
3066 dev_info->flow_type_rss_offloads = IXGBE_RSS_OFFLOAD_ALL;
3067
3068 dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
3069+ if (hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T ||
3070+ hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)
3071+ dev_info->speed_capa = ETH_LINK_SPEED_10M |
3072+ ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G;
3073+
3074 if (hw->mac.type == ixgbe_mac_X540 ||
3075 hw->mac.type == ixgbe_mac_X540_vf ||
3076 hw->mac.type == ixgbe_mac_X550 ||
3077@@ -3817,7 +3827,7 @@ ixgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev)
3078 dev->rx_pkt_burst == ixgbe_recv_pkts_bulk_alloc)
3079 return ptypes;
3080
3081-#if defined(RTE_ARCH_X86)
3082+#if defined(RTE_ARCH_X86) || defined(RTE_MACHINE_CPUFLAG_NEON)
3083 if (dev->rx_pkt_burst == ixgbe_recv_pkts_vec ||
3084 dev->rx_pkt_burst == ixgbe_recv_scattered_pkts_vec)
3085 return ptypes;
3086@@ -3854,6 +3864,7 @@ ixgbevf_dev_info_get(struct rte_eth_dev *dev,
3087 DEV_TX_OFFLOAD_TCP_CKSUM |
3088 DEV_TX_OFFLOAD_SCTP_CKSUM |
3089 DEV_TX_OFFLOAD_TCP_TSO;
3090+ dev_info->flow_type_rss_offloads = IXGBE_RSS_OFFLOAD_ALL;
3091
3092 dev_info->default_rxconf = (struct rte_eth_rxconf) {
3093 .rx_thresh = {
3094@@ -4006,6 +4017,7 @@ ixgbe_dev_link_update_share(struct rte_eth_dev *dev,
3095 int link_up;
3096 int diag;
3097 int wait = 1;
3098+ u32 esdp_reg;
3099
3100 link.link_status = ETH_LINK_DOWN;
3101 link.link_speed = 0;
3102@@ -4041,6 +4053,10 @@ ixgbe_dev_link_update_share(struct rte_eth_dev *dev,
3103 return 0;
3104 }
3105
3106+ esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
3107+ if ((esdp_reg & IXGBE_ESDP_SDP3))
3108+ link_up = 0;
3109+
3110 if (link_up == 0) {
3111 rte_ixgbe_dev_atomic_write_link_status(dev, &link);
3112 if (ixgbe_get_media_type(hw) == ixgbe_media_type_fiber) {
3113@@ -4059,7 +4075,6 @@ ixgbe_dev_link_update_share(struct rte_eth_dev *dev,
3114 switch (link_speed) {
3115 default:
3116 case IXGBE_LINK_SPEED_UNKNOWN:
3117- link.link_duplex = ETH_LINK_FULL_DUPLEX;
3118 link.link_speed = ETH_SPEED_NUM_100M;
3119 break;
3120
3121@@ -5796,7 +5811,8 @@ ixgbe_set_ivar_map(struct ixgbe_hw *hw, int8_t direction,
3122 IXGBE_WRITE_REG(hw, IXGBE_IVAR(idx), tmp);
3123 } else if ((hw->mac.type == ixgbe_mac_82599EB) ||
3124 (hw->mac.type == ixgbe_mac_X540) ||
3125- (hw->mac.type == ixgbe_mac_X550)) {
3126+ (hw->mac.type == ixgbe_mac_X550) ||
3127+ (hw->mac.type == ixgbe_mac_X550EM_x)) {
3128 if (direction == -1) {
3129 /* other causes */
3130 idx = ((queue & 1) * 8);
3131@@ -5919,6 +5935,7 @@ ixgbe_configure_msix(struct rte_eth_dev *dev)
3132 case ixgbe_mac_82599EB:
3133 case ixgbe_mac_X540:
3134 case ixgbe_mac_X550:
3135+ case ixgbe_mac_X550EM_x:
3136 ixgbe_set_ivar_map(hw, -1, 1, IXGBE_MISC_VEC_ID);
3137 break;
3138 default:
3139diff --git a/drivers/net/ixgbe/ixgbe_pf.c b/drivers/net/ixgbe/ixgbe_pf.c
3140index 22ecbad..29a6f3b 100644
3141--- a/drivers/net/ixgbe/ixgbe_pf.c
3142+++ b/drivers/net/ixgbe/ixgbe_pf.c
3143@@ -655,6 +655,7 @@ ixgbe_get_vf_queues(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf)
3144 case ixgbe_mbox_api_20:
3145 case ixgbe_mbox_api_11:
3146 case ixgbe_mbox_api_12:
3147+ case ixgbe_mbox_api_13:
3148 break;
3149 default:
3150 return -1;
3151diff --git a/drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c b/drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c
3152index 2e87ffa..fd01d97 100644
3153--- a/drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c
3154+++ b/drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c
3155@@ -174,6 +174,68 @@ desc_to_olflags_v(uint8x16x2_t sterr_tmp1, uint8x16x2_t sterr_tmp2,
3156 #define IXGBE_VPMD_DESC_DD_MASK 0x01010101
3157 #define IXGBE_VPMD_DESC_EOP_MASK 0x02020202
3158
3159+static inline uint32_t
3160+get_packet_type(uint32_t pkt_info,
3161+ uint32_t etqf_check,
3162+ uint32_t tunnel_check)
3163+{
3164+ if (etqf_check)
3165+ return RTE_PTYPE_UNKNOWN;
3166+
3167+ if (tunnel_check) {
3168+ pkt_info &= IXGBE_PACKET_TYPE_MASK_TUNNEL;
3169+ return ptype_table_tn[pkt_info];
3170+ }
3171+
3172+ pkt_info &= IXGBE_PACKET_TYPE_MASK_82599;
3173+ return ptype_table[pkt_info];
3174+}
3175+
3176+static inline void
3177+desc_to_ptype_v(uint64x2_t descs[4], uint16_t pkt_type_mask,
3178+ struct rte_mbuf **rx_pkts)
3179+{
3180+ uint32x4_t etqf_check, tunnel_check;
3181+ uint32x4_t etqf_mask = vdupq_n_u32(0x8000);
3182+ uint32x4_t tunnel_mask = vdupq_n_u32(0x10000);
3183+ uint32x4_t ptype_mask = vdupq_n_u32((uint32_t)pkt_type_mask);
3184+ uint32x4_t ptype0 = vzipq_u32(vreinterpretq_u32_u64(descs[0]),
3185+ vreinterpretq_u32_u64(descs[2])).val[0];
3186+ uint32x4_t ptype1 = vzipq_u32(vreinterpretq_u32_u64(descs[1]),
3187+ vreinterpretq_u32_u64(descs[3])).val[0];
3188+
3189+ /* interleave low 32 bits,
3190+ * now we have 4 ptypes in a NEON register
3191+ */
3192+ ptype0 = vzipq_u32(ptype0, ptype1).val[0];
3193+
3194+ /* mask etqf bits */
3195+ etqf_check = vandq_u32(ptype0, etqf_mask);
3196+ /* mask tunnel bits */
3197+ tunnel_check = vandq_u32(ptype0, tunnel_mask);
3198+
3199+ /* shift right by IXGBE_PACKET_TYPE_SHIFT, and apply ptype mask */
3200+ ptype0 = vandq_u32(vshrq_n_u32(ptype0, IXGBE_PACKET_TYPE_SHIFT),
3201+ ptype_mask);
3202+
3203+ rx_pkts[0]->packet_type =
3204+ get_packet_type(vgetq_lane_u32(ptype0, 0),
3205+ vgetq_lane_u32(etqf_check, 0),
3206+ vgetq_lane_u32(tunnel_check, 0));
3207+ rx_pkts[1]->packet_type =
3208+ get_packet_type(vgetq_lane_u32(ptype0, 1),
3209+ vgetq_lane_u32(etqf_check, 1),
3210+ vgetq_lane_u32(tunnel_check, 1));
3211+ rx_pkts[2]->packet_type =
3212+ get_packet_type(vgetq_lane_u32(ptype0, 2),
3213+ vgetq_lane_u32(etqf_check, 2),
3214+ vgetq_lane_u32(tunnel_check, 2));
3215+ rx_pkts[3]->packet_type =
3216+ get_packet_type(vgetq_lane_u32(ptype0, 3),
3217+ vgetq_lane_u32(etqf_check, 3),
3218+ vgetq_lane_u32(tunnel_check, 3));
3219+}
3220+
3221 static inline uint16_t
3222 _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
3223 uint16_t nb_pkts, uint8_t *split_packet)
3224@@ -243,13 +305,13 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
3225 uint32_t var = 0;
3226 uint32_t stat;
3227
3228- /* B.1 load 1 mbuf point */
3229+ /* B.1 load 2 mbuf point */
3230 mbp1 = vld1q_u64((uint64_t *)&sw_ring[pos]);
3231
3232 /* B.2 copy 2 mbuf point into rx_pkts */
3233 vst1q_u64((uint64_t *)&rx_pkts[pos], mbp1);
3234
3235- /* B.1 load 1 mbuf point */
3236+ /* B.1 load 2 mbuf point */
3237 mbp2 = vld1q_u64((uint64_t *)&sw_ring[pos + 2]);
3238
3239 /* A. load 4 pkts descs */
3240@@ -257,7 +319,6 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
3241 descs[1] = vld1q_u64((uint64_t *)(rxdp + 1));
3242 descs[2] = vld1q_u64((uint64_t *)(rxdp + 2));
3243 descs[3] = vld1q_u64((uint64_t *)(rxdp + 3));
3244- rte_smp_rmb();
3245
3246 /* B.2 copy 2 mbuf point into rx_pkts */
3247 vst1q_u64((uint64_t *)&rx_pkts[pos + 2], mbp2);
3248@@ -326,6 +387,8 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
3249 vst1q_u8((uint8_t *)&rx_pkts[pos]->rx_descriptor_fields1,
3250 pkt_mb1);
3251
3252+ desc_to_ptype_v(descs, rxq->pkt_type_mask, &rx_pkts[pos]);
3253+
3254 stat &= IXGBE_VPMD_DESC_DD_MASK;
3255
3256 /* C.4 calc avaialbe number of desc */
3257@@ -404,6 +467,7 @@ ixgbe_recv_scattered_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
3258 i++;
3259 if (i == nb_bufs)
3260 return nb_bufs;
3261+ rxq->pkt_first_seg = rx_pkts[i];
3262 }
3263 return i + reassemble_packets(rxq, &rx_pkts[i], nb_bufs - i,
3264 &split_flags[i]);
3265diff --git a/drivers/net/mlx4/Makefile b/drivers/net/mlx4/Makefile
3266index 1f95e0d..0bde672 100644
3267--- a/drivers/net/mlx4/Makefile
3268+++ b/drivers/net/mlx4/Makefile
3269@@ -73,7 +73,11 @@ endif
3270
3271 # User-defined CFLAGS.
3272 ifeq ($(CONFIG_RTE_LIBRTE_MLX4_DEBUG),y)
3273-CFLAGS += -pedantic -UNDEBUG -DPEDANTIC
3274+CFLAGS += -pedantic -UNDEBUG
3275+ifneq ($(CONFIG_RTE_TOOLCHAIN_ICC),y)
3276+CFLAGS += -DPEDANTIC
3277+endif
3278+AUTO_CONFIG_CFLAGS += -Wno-pedantic
3279 else
3280 CFLAGS += -DNDEBUG -UPEDANTIC
3281 endif
3282@@ -87,7 +91,7 @@ include $(RTE_SDK)/mk/rte.lib.mk
3283 # Generate and clean-up mlx4_autoconf.h.
3284
3285 export CC CFLAGS CPPFLAGS EXTRA_CFLAGS EXTRA_CPPFLAGS
3286-export AUTO_CONFIG_CFLAGS = -Wno-error
3287+export AUTO_CONFIG_CFLAGS += -Wno-error
3288
3289 ifndef V
3290 AUTOCONF_OUTPUT := >/dev/null
3291diff --git a/drivers/net/mlx4/mlx4_utils.h b/drivers/net/mlx4/mlx4_utils.h
3292index 4f11405..02e6b7b 100644
3293--- a/drivers/net/mlx4/mlx4_utils.h
3294+++ b/drivers/net/mlx4/mlx4_utils.h
3295@@ -43,6 +43,16 @@
3296
3297 #include "mlx4.h"
3298
3299+/*
3300+ * Compilation workaround for PPC64 when AltiVec is fully enabled, e.g. std=c11.
3301+ * Otherwise there would be a type conflict between stdbool and altivec.
3302+ */
3303+#if defined(__PPC64__) && !defined(__APPLE_ALTIVEC__)
3304+#undef bool
3305+/* redefine as in stdbool.h */
3306+#define bool _Bool
3307+#endif
3308+
3309 #ifndef NDEBUG
3310
3311 /*
3312diff --git a/drivers/net/mlx5/Makefile b/drivers/net/mlx5/Makefile
3313index c62ad11..e6fe1c6 100644
3314--- a/drivers/net/mlx5/Makefile
3315+++ b/drivers/net/mlx5/Makefile
3316@@ -83,7 +83,11 @@ endif
3317
3318 # User-defined CFLAGS.
3319 ifeq ($(CONFIG_RTE_LIBRTE_MLX5_DEBUG),y)
3320-CFLAGS += -pedantic -UNDEBUG -DPEDANTIC
3321+CFLAGS += -pedantic -UNDEBUG
3322+ifneq ($(CONFIG_RTE_TOOLCHAIN_ICC),y)
3323+CFLAGS += -DPEDANTIC
3324+endif
3325+AUTO_CONFIG_CFLAGS += -Wno-pedantic
3326 else
3327 CFLAGS += -DNDEBUG -UPEDANTIC
3328 endif
3329@@ -97,7 +101,7 @@ include $(RTE_SDK)/mk/rte.lib.mk
3330 # Generate and clean-up mlx5_autoconf.h.
3331
3332 export CC CFLAGS CPPFLAGS EXTRA_CFLAGS EXTRA_CPPFLAGS
3333-export AUTO_CONFIG_CFLAGS = -Wno-error
3334+export AUTO_CONFIG_CFLAGS += -Wno-error
3335
3336 ifndef V
3337 AUTOCONF_OUTPUT := >/dev/null
3338diff --git a/drivers/net/mlx5/mlx5_rxtx_vec_neon.h b/drivers/net/mlx5/mlx5_rxtx_vec_neon.h
3339index ae37c2b..950d27d 100644
3340--- a/drivers/net/mlx5/mlx5_rxtx_vec_neon.h
3341+++ b/drivers/net/mlx5/mlx5_rxtx_vec_neon.h
3342@@ -1032,7 +1032,7 @@ rxq_burst_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, uint16_t pkts_n,
3343 rcvd_pkt += n;
3344 }
3345 }
3346- rte_compiler_barrier();
3347+ rte_cio_wmb();
3348 *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci);
3349 return rcvd_pkt;
3350 }
3351diff --git a/drivers/net/qede/qede_ethdev.c b/drivers/net/qede/qede_ethdev.c
3352index 43403e6..1fa64e1 100644
3353--- a/drivers/net/qede/qede_ethdev.c
3354+++ b/drivers/net/qede/qede_ethdev.c
3355@@ -970,6 +970,8 @@ static int qede_vlan_stripping(struct rte_eth_dev *eth_dev, bool flg)
3356 }
3357 }
3358
3359+ qdev->vlan_strip_flg = flg;
3360+
3361 DP_INFO(edev, "VLAN stripping %s\n", flg ? "enabled" : "disabled");
3362 return 0;
3363 }
3364@@ -1178,9 +1180,11 @@ static int qede_dev_start(struct rte_eth_dev *eth_dev)
3365 PMD_INIT_FUNC_TRACE(edev);
3366
3367 /* Update MTU only if it has changed */
3368- if (eth_dev->data->mtu != qdev->mtu) {
3369- if (qede_update_mtu(eth_dev, qdev->mtu))
3370+ if (qdev->new_mtu && qdev->new_mtu != qdev->mtu) {
3371+ if (qede_update_mtu(eth_dev, qdev->new_mtu))
3372 goto err;
3373+ qdev->mtu = qdev->new_mtu;
3374+ qdev->new_mtu = 0;
3375 }
3376
3377 /* Configure TPA parameters */
3378@@ -2408,7 +2412,7 @@ static int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
3379 restart = true;
3380 }
3381 rte_delay_ms(1000);
3382- qdev->mtu = mtu;
3383+ qdev->new_mtu = mtu;
3384
3385 /* Fix up RX buf size for all queues of the port */
3386 for_each_rss(i) {
3387diff --git a/drivers/net/qede/qede_ethdev.h b/drivers/net/qede/qede_ethdev.h
3388index cc1a409..39d5822 100644
3389--- a/drivers/net/qede/qede_ethdev.h
3390+++ b/drivers/net/qede/qede_ethdev.h
3391@@ -185,6 +185,7 @@ struct qede_dev {
3392 struct ecore_sb_info *sb_array;
3393 struct qede_fastpath *fp_array;
3394 uint16_t mtu;
3395+ uint16_t new_mtu;
3396 bool enable_tx_switching;
3397 bool rss_enable;
3398 struct rte_eth_rss_conf rss_conf;
3399diff --git a/drivers/net/qede/qede_rxtx.c b/drivers/net/qede/qede_rxtx.c
3400index 358a8ef..dd88c50 100644
3401--- a/drivers/net/qede/qede_rxtx.c
3402+++ b/drivers/net/qede/qede_rxtx.c
3403@@ -48,8 +48,6 @@ static inline int qede_alloc_rx_bulk_mbufs(struct qede_rx_queue *rxq, int count)
3404 int i, ret = 0;
3405 uint16_t idx;
3406
3407- idx = rxq->sw_rx_prod & NUM_RX_BDS(rxq);
3408-
3409 if (count > QEDE_MAX_BULK_ALLOC_COUNT)
3410 count = QEDE_MAX_BULK_ALLOC_COUNT;
3411
3412@@ -58,7 +56,9 @@ static inline int qede_alloc_rx_bulk_mbufs(struct qede_rx_queue *rxq, int count)
3413 PMD_RX_LOG(ERR, rxq,
3414 "Failed to allocate %d rx buffers "
3415 "sw_rx_prod %u sw_rx_cons %u mp entries %u free %u",
3416- count, idx, rxq->sw_rx_cons & NUM_RX_BDS(rxq),
3417+ count,
3418+ rxq->sw_rx_prod & NUM_RX_BDS(rxq),
3419+ rxq->sw_rx_cons & NUM_RX_BDS(rxq),
3420 rte_mempool_avail_count(rxq->mb_pool),
3421 rte_mempool_in_use_count(rxq->mb_pool));
3422 return -ENOMEM;
3423diff --git a/drivers/net/sfc/sfc_flow.c b/drivers/net/sfc/sfc_flow.c
3424index 90ef5bf..9d46abe 100644
3425--- a/drivers/net/sfc/sfc_flow.c
3426+++ b/drivers/net/sfc/sfc_flow.c
3427@@ -1185,10 +1185,10 @@ sfc_flow_create(struct rte_eth_dev *dev,
3428 if (rc != 0)
3429 goto fail_bad_value;
3430
3431- TAILQ_INSERT_TAIL(&sa->filter.flow_list, flow, entries);
3432-
3433 sfc_adapter_lock(sa);
3434
3435+ TAILQ_INSERT_TAIL(&sa->filter.flow_list, flow, entries);
3436+
3437 if (sa->state == SFC_ADAPTER_STARTED) {
3438 rc = sfc_flow_filter_insert(sa, flow);
3439 if (rc != 0) {
3440diff --git a/drivers/net/tap/rte_eth_tap.c b/drivers/net/tap/rte_eth_tap.c
3441index 2204216..341a5e0 100644
3442--- a/drivers/net/tap/rte_eth_tap.c
3443+++ b/drivers/net/tap/rte_eth_tap.c
3444@@ -308,9 +308,7 @@ pmd_rx_burst(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
3445
3446 if (trigger == rxq->trigger_seen)
3447 return 0;
3448- if (trigger)
3449- rxq->trigger_seen = trigger;
3450- rte_compiler_barrier();
3451+
3452 for (num_rx = 0; num_rx < nb_pkts; ) {
3453 struct rte_mbuf *mbuf = rxq->pool;
3454 struct rte_mbuf *seg = NULL;
3455@@ -386,6 +384,9 @@ end:
3456 rxq->stats.ipackets += num_rx;
3457 rxq->stats.ibytes += num_rx_bytes;
3458
3459+ if (trigger && num_rx < nb_pkts)
3460+ rxq->trigger_seen = trigger;
3461+
3462 return num_rx;
3463 }
3464
3465diff --git a/drivers/net/vhost/rte_eth_vhost.c b/drivers/net/vhost/rte_eth_vhost.c
3466index 7b7780c..1f3b271 100644
3467--- a/drivers/net/vhost/rte_eth_vhost.c
3468+++ b/drivers/net/vhost/rte_eth_vhost.c
3469@@ -705,6 +705,10 @@ vring_state_changed(int vid, uint16_t vring, int enable)
3470 /* won't be NULL */
3471 state = vring_states[eth_dev->data->port_id];
3472 rte_spinlock_lock(&state->lock);
3473+ if (state->cur[vring] == enable) {
3474+ rte_spinlock_unlock(&state->lock);
3475+ return 0;
3476+ }
3477 state->cur[vring] = enable;
3478 state->max_vring = RTE_MAX(vring, state->max_vring);
3479 rte_spinlock_unlock(&state->lock);
3480diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c
3481index 1bdf37e..a4089f7 100644
3482--- a/drivers/net/virtio/virtio_ethdev.c
3483+++ b/drivers/net/virtio/virtio_ethdev.c
3484@@ -1502,6 +1502,8 @@ virtio_init_device(struct rte_eth_dev *eth_dev, uint64_t req_features)
3485 } else {
3486 PMD_INIT_LOG(DEBUG, "config->max_virtqueue_pairs=1");
3487 hw->max_queue_pairs = 1;
3488+ hw->max_mtu = VIRTIO_MAX_RX_PKTLEN - ETHER_HDR_LEN -
3489+ VLAN_TAG_LEN - hw->vtnet_hdr_size;
3490 }
3491
3492 ret = virtio_alloc_queues(eth_dev);
3493diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c
3494index b37a186..cea097a 100644
3495--- a/drivers/net/virtio/virtio_rxtx.c
3496+++ b/drivers/net/virtio/virtio_rxtx.c
3497@@ -36,6 +36,7 @@
3498 #include <stdlib.h>
3499 #include <string.h>
3500 #include <errno.h>
3501+#include <stdbool.h>
3502
3503 #include <rte_cycles.h>
3504 #include <rte_memory.h>
3505@@ -282,9 +283,10 @@ virtqueue_enqueue_xmit(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
3506 struct vring_desc *start_dp;
3507 uint16_t seg_num = cookie->nb_segs;
3508 uint16_t head_idx, idx;
3509- uint16_t head_size = vq->hw->vtnet_hdr_size;
3510+ int16_t head_size = vq->hw->vtnet_hdr_size;
3511 struct virtio_net_hdr *hdr;
3512 int offload;
3513+ bool prepend_header = false;
3514
3515 offload = tx_offload_enabled(vq->hw);
3516 head_idx = vq->vq_desc_head_idx;
3517@@ -297,12 +299,9 @@ virtqueue_enqueue_xmit(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
3518
3519 if (can_push) {
3520 /* prepend cannot fail, checked by caller */
3521- hdr = (struct virtio_net_hdr *)
3522- rte_pktmbuf_prepend(cookie, head_size);
3523- /* rte_pktmbuf_prepend() counts the hdr size to the pkt length,
3524- * which is wrong. Below subtract restores correct pkt size.
3525- */
3526- cookie->pkt_len -= head_size;
3527+ hdr = rte_pktmbuf_mtod_offset(cookie, struct virtio_net_hdr *,
3528+ -head_size);
3529+ prepend_header = true;
3530 /* if offload disabled, it is not zeroed below, do it now */
3531 if (offload == 0) {
3532 ASSIGN_UNLESS_EQUAL(hdr->csum_start, 0);
3533@@ -388,6 +387,11 @@ virtqueue_enqueue_xmit(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
3534 do {
3535 start_dp[idx].addr = VIRTIO_MBUF_DATA_DMA_ADDR(cookie, vq);
3536 start_dp[idx].len = cookie->data_len;
3537+ if (prepend_header) {
3538+ start_dp[idx].addr -= head_size;
3539+ start_dp[idx].len += head_size;
3540+ prepend_header = false;
3541+ }
3542 start_dp[idx].flags = cookie->next ? VRING_DESC_F_NEXT : 0;
3543 idx = start_dp[idx].next;
3544 } while ((cookie = cookie->next) != NULL);
3545@@ -417,7 +421,7 @@ virtio_dev_rx_queue_setup(struct rte_eth_dev *dev,
3546 uint16_t queue_idx,
3547 uint16_t nb_desc,
3548 unsigned int socket_id __rte_unused,
3549- __rte_unused const struct rte_eth_rxconf *rx_conf,
3550+ const struct rte_eth_rxconf *rx_conf,
3551 struct rte_mempool *mp)
3552 {
3553 uint16_t vtpci_queue_idx = 2 * queue_idx + VTNET_SQ_RQ_QUEUE_IDX;
3554@@ -427,6 +431,11 @@ virtio_dev_rx_queue_setup(struct rte_eth_dev *dev,
3555
3556 PMD_INIT_FUNC_TRACE();
3557
3558+ if (rx_conf->rx_deferred_start) {
3559+ PMD_INIT_LOG(ERR, "Rx deferred start is not supported");
3560+ return -EINVAL;
3561+ }
3562+
3563 if (nb_desc == 0 || nb_desc > vq->vq_nentries)
3564 nb_desc = vq->vq_nentries;
3565 vq->vq_free_cnt = RTE_MIN(vq->vq_free_cnt, nb_desc);
3566@@ -529,6 +538,11 @@ virtio_dev_tx_queue_setup(struct rte_eth_dev *dev,
3567
3568 PMD_INIT_FUNC_TRACE();
3569
3570+ if (tx_conf->tx_deferred_start) {
3571+ PMD_INIT_LOG(ERR, "Tx deferred start is not supported");
3572+ return -EINVAL;
3573+ }
3574+
3575 /* cannot use simple rxtx funcs with multisegs or offloads */
3576 if ((tx_conf->txq_flags & VIRTIO_SIMPLE_FLAGS) != VIRTIO_SIMPLE_FLAGS)
3577 hw->use_simple_tx = 0;
3578diff --git a/drivers/net/virtio/virtio_rxtx_simple_neon.c b/drivers/net/virtio/virtio_rxtx_simple_neon.c
3579index b8b9355..a9d5cb6 100644
3580--- a/drivers/net/virtio/virtio_rxtx_simple_neon.c
3581+++ b/drivers/net/virtio/virtio_rxtx_simple_neon.c
3582@@ -72,7 +72,7 @@ virtio_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
3583 struct virtnet_rx *rxvq = rx_queue;
3584 struct virtqueue *vq = rxvq->vq;
3585 struct virtio_hw *hw = vq->hw;
3586- uint16_t nb_used;
3587+ uint16_t nb_used, nb_total;
3588 uint16_t desc_idx;
3589 struct vring_used_elem *rused;
3590 struct rte_mbuf **sw_ring;
3591@@ -135,8 +135,9 @@ virtio_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
3592 virtqueue_notify(vq);
3593 }
3594
3595+ nb_total = nb_used;
3596 for (nb_pkts_received = 0;
3597- nb_pkts_received < nb_used;) {
3598+ nb_pkts_received < nb_total;) {
3599 uint64x2_t desc[RTE_VIRTIO_DESC_PER_LOOP / 2];
3600 uint64x2_t mbp[RTE_VIRTIO_DESC_PER_LOOP / 2];
3601 uint64x2_t pkt_mb[RTE_VIRTIO_DESC_PER_LOOP];
3602diff --git a/drivers/net/virtio/virtio_rxtx_simple_sse.c b/drivers/net/virtio/virtio_rxtx_simple_sse.c
3603index 94f6514..74ca28d 100644
3604--- a/drivers/net/virtio/virtio_rxtx_simple_sse.c
3605+++ b/drivers/net/virtio/virtio_rxtx_simple_sse.c
3606@@ -74,7 +74,7 @@ virtio_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
3607 struct virtnet_rx *rxvq = rx_queue;
3608 struct virtqueue *vq = rxvq->vq;
3609 struct virtio_hw *hw = vq->hw;
3610- uint16_t nb_used;
3611+ uint16_t nb_used, nb_total;
3612 uint16_t desc_idx;
3613 struct vring_used_elem *rused;
3614 struct rte_mbuf **sw_ring;
3615@@ -138,8 +138,9 @@ virtio_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
3616 virtqueue_notify(vq);
3617 }
3618
3619+ nb_total = nb_used;
3620 for (nb_pkts_received = 0;
3621- nb_pkts_received < nb_used;) {
3622+ nb_pkts_received < nb_total;) {
3623 __m128i desc[RTE_VIRTIO_DESC_PER_LOOP / 2];
3624 __m128i mbp[RTE_VIRTIO_DESC_PER_LOOP / 2];
3625 __m128i pkt_mb[RTE_VIRTIO_DESC_PER_LOOP];
3626diff --git a/drivers/net/virtio/virtio_user/virtio_user_dev.c b/drivers/net/virtio/virtio_user/virtio_user_dev.c
3627index 23510a9..95433f7 100644
3628--- a/drivers/net/virtio/virtio_user/virtio_user_dev.c
3629+++ b/drivers/net/virtio/virtio_user/virtio_user_dev.c
3630@@ -484,6 +484,10 @@ virtio_user_handle_ctrl_msg(struct virtio_user_dev *dev, struct vring *vring,
3631
3632 queues = *(uint16_t *)(uintptr_t)vring->desc[idx_data].addr;
3633 status = virtio_user_handle_mq(dev, queues);
3634+ } else if (hdr->class == VIRTIO_NET_CTRL_RX ||
3635+ hdr->class == VIRTIO_NET_CTRL_MAC ||
3636+ hdr->class == VIRTIO_NET_CTRL_VLAN) {
3637+ status = 0;
3638 }
3639
3640 /* Update status */
3641diff --git a/examples/ethtool/lib/rte_ethtool.c b/examples/ethtool/lib/rte_ethtool.c
3642index c70c547..ebdaed4 100644
3643--- a/examples/ethtool/lib/rte_ethtool.c
3644+++ b/examples/ethtool/lib/rte_ethtool.c
3645@@ -67,7 +67,6 @@ rte_ethtool_get_drvinfo(uint16_t port_id, struct ethtool_drvinfo *drvinfo)
3646 printf("Insufficient fw version buffer size, "
3647 "the minimum size should be %d\n", ret);
3648
3649- memset(&dev_info, 0, sizeof(dev_info));
3650 rte_eth_dev_info_get(port_id, &dev_info);
3651
3652 snprintf(drvinfo->driver, sizeof(drvinfo->driver), "%s",
3653@@ -367,7 +366,6 @@ rte_ethtool_net_set_rx_mode(uint16_t port_id)
3654 struct rte_eth_dev_info dev_info;
3655 uint16_t vf;
3656
3657- memset(&dev_info, 0, sizeof(dev_info));
3658 rte_eth_dev_info_get(port_id, &dev_info);
3659 num_vfs = dev_info.max_vfs;
3660
3661diff --git a/examples/ipsec-secgw/ep0.cfg b/examples/ipsec-secgw/ep0.cfg
3662index 299aa9e..dfd4aca 100644
3663--- a/examples/ipsec-secgw/ep0.cfg
3664+++ b/examples/ipsec-secgw/ep0.cfg
3665@@ -49,14 +49,14 @@ sport 0:65535 dport 0:65535
3666 sp ipv6 out esp protect 26 pri 1 dst 0000:0000:0000:0000:bbbb:bbbb:0000:0000/96 \
3667 sport 0:65535 dport 0:65535
3668
3669-sp ipv6 in esp protect 15 pri 1 dst ffff:0000:0000:0000:5555:5555:0000:0000/96 \
3670-sport 0:65535 dport 0:65535
3671-sp ipv6 in esp protect 16 pri 1 dst ffff:0000:0000:0000:6666:6666:0000:0000/96 \
3672-sport 0:65535 dport 0:65535
3673 sp ipv6 in esp protect 110 pri 1 dst ffff:0000:1111:1111:0000:0000:0000:0000/96 \
3674 sport 0:65535 dport 0:65535
3675 sp ipv6 in esp protect 111 pri 1 dst ffff:0000:1111:1111:1111:1111:0000:0000/96 \
3676 sport 0:65535 dport 0:65535
3677+sp ipv6 in esp protect 115 pri 1 dst ffff:0000:0000:0000:5555:5555:0000:0000/96 \
3678+sport 0:65535 dport 0:65535
3679+sp ipv6 in esp protect 116 pri 1 dst ffff:0000:0000:0000:6666:6666:0000:0000/96 \
3680+sport 0:65535 dport 0:65535
3681 sp ipv6 in esp protect 125 pri 1 dst ffff:0000:0000:0000:aaaa:aaaa:0000:0000/96 \
3682 sport 0:65535 dport 0:65535
3683 sp ipv6 in esp protect 126 pri 1 dst ffff:0000:0000:0000:bbbb:bbbb:0000:0000/96 \
3684diff --git a/examples/ipsec-secgw/ep1.cfg b/examples/ipsec-secgw/ep1.cfg
3685index 3f6ff81..19bdc68 100644
3686--- a/examples/ipsec-secgw/ep1.cfg
3687+++ b/examples/ipsec-secgw/ep1.cfg
3688@@ -19,8 +19,8 @@ sp ipv4 in esp protect 15 pri 1 dst 192.168.200.0/24 sport 0:65535 dport 0:65535
3689 sp ipv4 in esp protect 16 pri 1 dst 192.168.201.0/24 sport 0:65535 dport 0:65535
3690 sp ipv4 in esp protect 25 pri 1 dst 192.168.55.0/24 sport 0:65535 dport 0:65535
3691 sp ipv4 in esp protect 26 pri 1 dst 192.168.56.0/24 sport 0:65535 dport 0:65535
3692-sp ipv4 in esp bypass dst 192.168.240.0/24 sport 0:65535 dport 0:65535
3693-sp ipv4 in esp bypass dst 192.168.241.0/24 sport 0:65535 dport 0:65535
3694+sp ipv4 in esp bypass pri 1 dst 192.168.240.0/24 sport 0:65535 dport 0:65535
3695+sp ipv4 in esp bypass pri 1 dst 192.168.241.0/24 sport 0:65535 dport 0:65535
3696
3697 sp ipv4 out esp protect 105 pri 1 dst 192.168.115.0/24 sport 0:65535 dport 0:65535
3698 sp ipv4 out esp protect 106 pri 1 dst 192.168.116.0/24 sport 0:65535 dport 0:65535
3699@@ -49,14 +49,14 @@ sport 0:65535 dport 0:65535
3700 sp ipv6 in esp protect 26 pri 1 dst 0000:0000:0000:0000:bbbb:bbbb:0000:0000/96 \
3701 sport 0:65535 dport 0:65535
3702
3703-sp ipv6 out esp protect 15 pri 1 dst ffff:0000:0000:0000:5555:5555:0000:0000/96 \
3704-sport 0:65535 dport 0:65535
3705-sp ipv6 out esp protect 16 pri 1 dst ffff:0000:0000:0000:6666:6666:0000:0000/96 \
3706-sport 0:65535 dport 0:65535
3707 sp ipv6 out esp protect 110 pri 1 dst ffff:0000:1111:1111:0000:0000:0000:0000/96 \
3708 sport 0:65535 dport 0:65535
3709 sp ipv6 out esp protect 111 pri 1 dst ffff:0000:1111:1111:1111:1111:0000:0000/96 \
3710 sport 0:65535 dport 0:65535
3711+sp ipv6 out esp protect 115 pri 1 dst ffff:0000:0000:0000:5555:5555:0000:0000/96 \
3712+sport 0:65535 dport 0:65535
3713+sp ipv6 out esp protect 116 pri 1 dst ffff:0000:0000:0000:6666:6666:0000:0000/96 \
3714+sport 0:65535 dport 0:65535
3715 sp ipv6 out esp protect 125 pri 1 dst ffff:0000:0000:0000:aaaa:aaaa:0000:0000/96 \
3716 sport 0:65535 dport 0:65535
3717 sp ipv6 out esp protect 126 pri 1 dst ffff:0000:0000:0000:bbbb:bbbb:0000:0000/96 \
3718diff --git a/examples/ipsec-secgw/sa.c b/examples/ipsec-secgw/sa.c
3719index eb83d94..f01a531 100644
3720--- a/examples/ipsec-secgw/sa.c
3721+++ b/examples/ipsec-secgw/sa.c
3722@@ -123,7 +123,7 @@ const struct supported_auth_algo auth_algos[] = {
3723 {
3724 .keyword = "sha256-hmac",
3725 .algo = RTE_CRYPTO_AUTH_SHA256_HMAC,
3726- .digest_len = 12,
3727+ .digest_len = 16,
3728 .key_len = 32
3729 }
3730 };
3731@@ -822,7 +822,7 @@ sa_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
3732 }
3733
3734 if (sa->aead_algo == RTE_CRYPTO_AEAD_AES_GCM) {
3735- iv_length = 16;
3736+ iv_length = 12;
3737
3738 sa_ctx->xf[idx].a.type = RTE_CRYPTO_SYM_XFORM_AEAD;
3739 sa_ctx->xf[idx].a.aead.algo = sa->aead_algo;
3740diff --git a/examples/kni/main.c b/examples/kni/main.c
3741index 3f17385..e272e84 100644
3742--- a/examples/kni/main.c
3743+++ b/examples/kni/main.c
3744@@ -802,7 +802,6 @@ kni_alloc(uint16_t port_id)
3745 struct rte_kni_ops ops;
3746 struct rte_eth_dev_info dev_info;
3747
3748- memset(&dev_info, 0, sizeof(dev_info));
3749 rte_eth_dev_info_get(port_id, &dev_info);
3750
3751 if (dev_info.pci_dev) {
3752diff --git a/examples/l3fwd-power/main.c b/examples/l3fwd-power/main.c
3753index 50c3702..c669f6a 100644
3754--- a/examples/l3fwd-power/main.c
3755+++ b/examples/l3fwd-power/main.c
3756@@ -813,7 +813,9 @@ sleep_until_rx_interrupt(int num)
3757 port_id = ((uintptr_t)data) >> CHAR_BIT;
3758 queue_id = ((uintptr_t)data) &
3759 RTE_LEN2MASK(CHAR_BIT, uint8_t);
3760+ rte_spinlock_lock(&(locks[port_id]));
3761 rte_eth_dev_rx_intr_disable(port_id, queue_id);
3762+ rte_spinlock_unlock(&(locks[port_id]));
3763 RTE_LOG(INFO, L3FWD_POWER,
3764 "lcore %u is waked up from rx interrupt on"
3765 " port %d queue %d\n",
3766diff --git a/examples/multi_process/client_server_mp/mp_client/client.c b/examples/multi_process/client_server_mp/mp_client/client.c
3767index 30ce4b3..a9076d1 100644
3768--- a/examples/multi_process/client_server_mp/mp_client/client.c
3769+++ b/examples/multi_process/client_server_mp/mp_client/client.c
3770@@ -275,19 +275,19 @@ main(int argc, char *argv[])
3771
3772 for (;;) {
3773 uint16_t i, rx_pkts;
3774- uint16_t port;
3775
3776 rx_pkts = rte_ring_dequeue_burst(rx_ring, pkts,
3777 PKT_READ_SIZE, NULL);
3778
3779- if (unlikely(rx_pkts == 0)){
3780- if (need_flush)
3781- for (port = 0; port < ports->num_ports; port++) {
3782- sent = rte_eth_tx_buffer_flush(ports->id[port], client_id,
3783- tx_buffer[port]);
3784- if (unlikely(sent))
3785- tx_stats->tx[port] += sent;
3786- }
3787+ if (rx_pkts == 0 && need_flush) {
3788+ for (i = 0; i < ports->num_ports; i++) {
3789+ uint16_t port = ports->id[i];
3790+
3791+ sent = rte_eth_tx_buffer_flush(port,
3792+ client_id,
3793+ tx_buffer[port]);
3794+ tx_stats->tx[port] += sent;
3795+ }
3796 need_flush = 0;
3797 continue;
3798 }
3799diff --git a/examples/vm_power_manager/channel_monitor.c b/examples/vm_power_manager/channel_monitor.c
3800index 37e71ed..6e73d54 100644
3801--- a/examples/vm_power_manager/channel_monitor.c
3802+++ b/examples/vm_power_manager/channel_monitor.c
3803@@ -49,7 +49,9 @@
3804 #include <rte_atomic.h>
3805 #include <rte_cycles.h>
3806 #include <rte_ethdev.h>
3807+#ifdef RTE_LIBRTE_I40E_PMD
3808 #include <rte_pmd_i40e.h>
3809+#endif
3810
3811 #include <libvirt/libvirt.h>
3812 #include "channel_monitor.h"
3813@@ -145,8 +147,12 @@ get_pfid(struct policy *pol)
3814 for (i = 0; i < pol->pkt.nb_mac_to_monitor; i++) {
3815
3816 for (x = 0; x < nb_ports; x++) {
3817+#ifdef RTE_LIBRTE_I40E_PMD
3818 ret = rte_pmd_i40e_query_vfid_by_mac(x,
3819 (struct ether_addr *)&(pol->pkt.vfid[i]));
3820+#else
3821+ ret = -ENOTSUP;
3822+#endif
3823 if (ret != -EINVAL) {
3824 pol->port[i] = x;
3825 break;
3826@@ -209,15 +215,21 @@ get_pkt_diff(struct policy *pol)
3827 vsi_pkt_count_prev_total = 0;
3828 double rdtsc_curr, rdtsc_diff, diff;
3829 int x;
3830+#ifdef RTE_LIBRTE_I40E_PMD
3831 struct rte_eth_stats vf_stats;
3832+#endif
3833
3834 for (x = 0; x < pol->pkt.nb_mac_to_monitor; x++) {
3835
3836+#ifdef RTE_LIBRTE_I40E_PMD
3837 /*Read vsi stats*/
3838 if (rte_pmd_i40e_get_vf_stats(x, pol->pfid[x], &vf_stats) == 0)
3839 vsi_pkt_count = vf_stats.ipackets;
3840 else
3841 vsi_pkt_count = -1;
3842+#else
3843+ vsi_pkt_count = -1;
3844+#endif
3845
3846 vsi_pkt_total += vsi_pkt_count;
3847
3848diff --git a/examples/vm_power_manager/guest_cli/vm_power_cli_guest.c b/examples/vm_power_manager/guest_cli/vm_power_cli_guest.c
3849index 63f711e..61a3dd6 100644
3850--- a/examples/vm_power_manager/guest_cli/vm_power_cli_guest.c
3851+++ b/examples/vm_power_manager/guest_cli/vm_power_cli_guest.c
3852@@ -121,7 +121,7 @@ cmd_set_cpu_freq_parsed(void *parsed_result, struct cmdline *cl,
3853 cmdline_parse_token_string_t cmd_set_cpu_freq =
3854 TOKEN_STRING_INITIALIZER(struct cmd_set_cpu_freq_result,
3855 set_cpu_freq, "set_cpu_freq");
3856-cmdline_parse_token_string_t cmd_set_cpu_freq_core_num =
3857+cmdline_parse_token_num_t cmd_set_cpu_freq_core_num =
3858 TOKEN_NUM_INITIALIZER(struct cmd_set_cpu_freq_result,
3859 lcore_id, UINT8);
3860 cmdline_parse_token_string_t cmd_set_cpu_freq_cmd_cmd =
3861diff --git a/lib/librte_compat/rte_compat.h b/lib/librte_compat/rte_compat.h
3862index 41e8032..f45b434 100644
3863--- a/lib/librte_compat/rte_compat.h
3864+++ b/lib/librte_compat/rte_compat.h
3865@@ -63,14 +63,14 @@
3866 /*
3867 * VERSION_SYMBOL
3868 * Creates a symbol version table entry binding symbol <b>@DPDK_<n> to the internal
3869- * function name <b>_<e>
3870+ * function name <b><e>
3871 */
3872 #define VERSION_SYMBOL(b, e, n) __asm__(".symver " RTE_STR(b) RTE_STR(e) ", " RTE_STR(b) "@DPDK_" RTE_STR(n))
3873
3874 /*
3875 * BIND_DEFAULT_SYMBOL
3876 * Creates a symbol version entry instructing the linker to bind references to
3877- * symbol <b> to the internal symbol <b>_<e>
3878+ * symbol <b> to the internal symbol <b><e>
3879 */
3880 #define BIND_DEFAULT_SYMBOL(b, e, n) __asm__(".symver " RTE_STR(b) RTE_STR(e) ", " RTE_STR(b) "@@DPDK_" RTE_STR(n))
3881 #define __vsym __attribute__((used))
3882diff --git a/lib/librte_cryptodev/rte_cryptodev.c b/lib/librte_cryptodev/rte_cryptodev.c
3883index c9f2b62..150ef9a 100644
3884--- a/lib/librte_cryptodev/rte_cryptodev.c
3885+++ b/lib/librte_cryptodev/rte_cryptodev.c
3886@@ -78,8 +78,7 @@ struct rte_cryptodev *rte_cryptodevs = &rte_crypto_devices[0];
3887 static struct rte_cryptodev_global cryptodev_globals = {
3888 .devs = &rte_crypto_devices[0],
3889 .data = { NULL },
3890- .nb_devs = 0,
3891- .max_devs = RTE_CRYPTO_MAX_DEVS
3892+ .nb_devs = 0
3893 };
3894
3895 struct rte_cryptodev_global *rte_cryptodev_globals = &cryptodev_globals;
3896@@ -415,7 +414,7 @@ rte_cryptodev_pmd_get_named_dev(const char *name)
3897 if (name == NULL)
3898 return NULL;
3899
3900- for (i = 0; i < rte_cryptodev_globals->max_devs; i++) {
3901+ for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++) {
3902 dev = &rte_cryptodev_globals->devs[i];
3903
3904 if ((dev->attached == RTE_CRYPTODEV_ATTACHED) &&
3905@@ -426,12 +425,22 @@ rte_cryptodev_pmd_get_named_dev(const char *name)
3906 return NULL;
3907 }
3908
3909+static inline uint8_t
3910+rte_cryptodev_is_valid_device_data(uint8_t dev_id)
3911+{
3912+ if (dev_id >= RTE_CRYPTO_MAX_DEVS ||
3913+ rte_crypto_devices[dev_id].data == NULL)
3914+ return 0;
3915+
3916+ return 1;
3917+}
3918+
3919 unsigned int
3920 rte_cryptodev_pmd_is_valid_dev(uint8_t dev_id)
3921 {
3922 struct rte_cryptodev *dev = NULL;
3923
3924- if (dev_id >= rte_cryptodev_globals->nb_devs)
3925+ if (!rte_cryptodev_is_valid_device_data(dev_id))
3926 return 0;
3927
3928 dev = rte_cryptodev_pmd_get_dev(dev_id);
3929@@ -450,12 +459,15 @@ rte_cryptodev_get_dev_id(const char *name)
3930 if (name == NULL)
3931 return -1;
3932
3933- for (i = 0; i < rte_cryptodev_globals->nb_devs; i++)
3934+ for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++) {
3935+ if (!rte_cryptodev_is_valid_device_data(i))
3936+ continue;
3937 if ((strcmp(rte_cryptodev_globals->devs[i].data->name, name)
3938 == 0) &&
3939 (rte_cryptodev_globals->devs[i].attached ==
3940 RTE_CRYPTODEV_ATTACHED))
3941 return i;
3942+ }
3943
3944 return -1;
3945 }
3946@@ -471,7 +483,7 @@ rte_cryptodev_device_count_by_driver(uint8_t driver_id)
3947 {
3948 uint8_t i, dev_count = 0;
3949
3950- for (i = 0; i < rte_cryptodev_globals->max_devs; i++)
3951+ for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++)
3952 if (rte_cryptodev_globals->devs[i].driver_id == driver_id &&
3953 rte_cryptodev_globals->devs[i].attached ==
3954 RTE_CRYPTODEV_ATTACHED)
3955@@ -486,9 +498,10 @@ rte_cryptodev_devices_get(const char *driver_name, uint8_t *devices,
3956 {
3957 uint8_t i, count = 0;
3958 struct rte_cryptodev *devs = rte_cryptodev_globals->devs;
3959- uint8_t max_devs = rte_cryptodev_globals->max_devs;
3960
3961- for (i = 0; i < max_devs && count < nb_devices; i++) {
3962+ for (i = 0; i < RTE_CRYPTO_MAX_DEVS && count < nb_devices; i++) {
3963+ if (!rte_cryptodev_is_valid_device_data(i))
3964+ continue;
3965
3966 if (devs[i].attached == RTE_CRYPTODEV_ATTACHED) {
3967 int cmp;
3968@@ -508,8 +521,9 @@ rte_cryptodev_devices_get(const char *driver_name, uint8_t *devices,
3969 void *
3970 rte_cryptodev_get_sec_ctx(uint8_t dev_id)
3971 {
3972- if (rte_crypto_devices[dev_id].feature_flags &
3973- RTE_CRYPTODEV_FF_SECURITY)
3974+ if (dev_id < RTE_CRYPTO_MAX_DEVS &&
3975+ (rte_crypto_devices[dev_id].feature_flags &
3976+ RTE_CRYPTODEV_FF_SECURITY))
3977 return rte_crypto_devices[dev_id].security_ctx;
3978
3979 return NULL;
3980@@ -603,12 +617,14 @@ rte_cryptodev_pmd_allocate(const char *name, int socket_id)
3981
3982 cryptodev->data = cryptodev_data;
3983
3984- snprintf(cryptodev->data->name, RTE_CRYPTODEV_NAME_MAX_LEN,
3985- "%s", name);
3986+ if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
3987+ snprintf(cryptodev->data->name, RTE_CRYPTODEV_NAME_MAX_LEN,
3988+ "%s", name);
3989
3990- cryptodev->data->dev_id = dev_id;
3991- cryptodev->data->socket_id = socket_id;
3992- cryptodev->data->dev_started = 0;
3993+ cryptodev->data->dev_id = dev_id;
3994+ cryptodev->data->socket_id = socket_id;
3995+ cryptodev->data->dev_started = 0;
3996+ }
3997
3998 /* init user callbacks */
3999 TAILQ_INIT(&(cryptodev->link_intr_cbs));
4000@@ -646,6 +662,11 @@ rte_cryptodev_queue_pair_count(uint8_t dev_id)
4001 {
4002 struct rte_cryptodev *dev;
4003
4004+ if (!rte_cryptodev_is_valid_device_data(dev_id)) {
4005+ CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
4006+ return 0;
4007+ }
4008+
4009 dev = &rte_crypto_devices[dev_id];
4010 return dev->data->nb_queue_pairs;
4011 }
4012@@ -979,7 +1000,7 @@ rte_cryptodev_info_get(uint8_t dev_id, struct rte_cryptodev_info *dev_info)
4013 {
4014 struct rte_cryptodev *dev;
4015
4016- if (dev_id >= cryptodev_globals.nb_devs) {
4017+ if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
4018 CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
4019 return;
4020 }
4021@@ -1117,6 +1138,11 @@ rte_cryptodev_sym_session_init(uint8_t dev_id,
4022 uint8_t index;
4023 int ret;
4024
4025+ if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
4026+ CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
4027+ return -EINVAL;
4028+ }
4029+
4030 dev = rte_cryptodev_pmd_get_dev(dev_id);
4031
4032 if (sess == NULL || xforms == NULL || dev == NULL)
4033@@ -1214,6 +1240,11 @@ rte_cryptodev_sym_session_clear(uint8_t dev_id,
4034 {
4035 struct rte_cryptodev *dev;
4036
4037+ if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
4038+ CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
4039+ return -EINVAL;
4040+ }
4041+
4042 dev = rte_cryptodev_pmd_get_dev(dev_id);
4043
4044 if (dev == NULL || sess == NULL)
4045@@ -1414,8 +1445,14 @@ rte_cryptodev_driver_id_get(const char *name)
4046 const char *
4047 rte_cryptodev_name_get(uint8_t dev_id)
4048 {
4049- struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(dev_id);
4050+ struct rte_cryptodev *dev;
4051+
4052+ if (!rte_cryptodev_is_valid_device_data(dev_id)) {
4053+ CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
4054+ return NULL;
4055+ }
4056
4057+ dev = rte_cryptodev_pmd_get_dev(dev_id);
4058 if (dev == NULL)
4059 return NULL;
4060
4061diff --git a/lib/librte_cryptodev/rte_cryptodev_pmd.h b/lib/librte_cryptodev/rte_cryptodev_pmd.h
4062index 089848e..2b40717 100644
4063--- a/lib/librte_cryptodev/rte_cryptodev_pmd.h
4064+++ b/lib/librte_cryptodev/rte_cryptodev_pmd.h
4065@@ -92,7 +92,6 @@ struct rte_cryptodev_global {
4066 struct rte_cryptodev_data *data[RTE_CRYPTO_MAX_DEVS];
4067 /**< Device private data */
4068 uint8_t nb_devs; /**< Number of devices found */
4069- uint8_t max_devs; /**< Max number of devices */
4070 };
4071
4072 /* Cryptodev driver, containing the driver ID */
4073diff --git a/lib/librte_distributor/rte_distributor.c b/lib/librte_distributor/rte_distributor.c
4074index 6ad2301..00fc003 100644
4075--- a/lib/librte_distributor/rte_distributor.c
4076+++ b/lib/librte_distributor/rte_distributor.c
4077@@ -76,8 +76,11 @@ rte_distributor_request_pkt_v1705(struct rte_distributor *d,
4078 }
4079
4080 retptr64 = &(buf->retptr64[0]);
4081- /* Spin while handshake bits are set (scheduler clears it) */
4082- while (unlikely(*retptr64 & RTE_DISTRIB_GET_BUF)) {
4083+ /* Spin while handshake bits are set (scheduler clears it).
4084+ * Sync with worker on GET_BUF flag.
4085+ */
4086+ while (unlikely(__atomic_load_n(retptr64, __ATOMIC_ACQUIRE)
4087+ & RTE_DISTRIB_GET_BUF)) {
4088 rte_pause();
4089 uint64_t t = rte_rdtsc()+100;
4090
4091@@ -102,8 +105,10 @@ rte_distributor_request_pkt_v1705(struct rte_distributor *d,
4092 /*
4093 * Finally, set the GET_BUF to signal to distributor that cache
4094 * line is ready for processing
4095+ * Sync with distributor to release retptrs
4096 */
4097- *retptr64 |= RTE_DISTRIB_GET_BUF;
4098+ __atomic_store_n(retptr64, *retptr64 | RTE_DISTRIB_GET_BUF,
4099+ __ATOMIC_RELEASE);
4100 }
4101 BIND_DEFAULT_SYMBOL(rte_distributor_request_pkt, _v1705, 17.05);
4102 MAP_STATIC_SYMBOL(void rte_distributor_request_pkt(struct rte_distributor *d,
4103@@ -125,8 +130,11 @@ rte_distributor_poll_pkt_v1705(struct rte_distributor *d,
4104 return (pkts[0]) ? 1 : 0;
4105 }
4106
4107- /* If bit is set, return */
4108- if (buf->bufptr64[0] & RTE_DISTRIB_GET_BUF)
4109+ /* If bit is set, return
4110+ * Sync with distributor to acquire bufptrs
4111+ */
4112+ if (__atomic_load_n(&(buf->bufptr64[0]), __ATOMIC_ACQUIRE)
4113+ & RTE_DISTRIB_GET_BUF)
4114 return -1;
4115
4116 /* since bufptr64 is signed, this should be an arithmetic shift */
4117@@ -141,8 +149,10 @@ rte_distributor_poll_pkt_v1705(struct rte_distributor *d,
4118 * so now we've got the contents of the cacheline into an array of
4119 * mbuf pointers, so toggle the bit so scheduler can start working
4120 * on the next cacheline while we're working.
4121+ * Sync with distributor on GET_BUF flag. Release bufptrs.
4122 */
4123- buf->bufptr64[0] |= RTE_DISTRIB_GET_BUF;
4124+ __atomic_store_n(&(buf->bufptr64[0]),
4125+ buf->bufptr64[0] | RTE_DISTRIB_GET_BUF, __ATOMIC_RELEASE);
4126
4127 return count;
4128 }
4129@@ -201,6 +211,8 @@ rte_distributor_return_pkt_v1705(struct rte_distributor *d,
4130 return -EINVAL;
4131 }
4132
4133+ /* Sync with distributor to acquire retptrs */
4134+ __atomic_thread_fence(__ATOMIC_ACQUIRE);
4135 for (i = 0; i < RTE_DIST_BURST_SIZE; i++)
4136 /* Switch off the return bit first */
4137 buf->retptr64[i] &= ~RTE_DISTRIB_RETURN_BUF;
4138@@ -209,8 +221,11 @@ rte_distributor_return_pkt_v1705(struct rte_distributor *d,
4139 buf->retptr64[i] = (((int64_t)(uintptr_t)oldpkt[i]) <<
4140 RTE_DISTRIB_FLAG_BITS) | RTE_DISTRIB_RETURN_BUF;
4141
4142- /* set the GET_BUF but even if we got no returns */
4143- buf->retptr64[0] |= RTE_DISTRIB_GET_BUF;
4144+ /* set the GET_BUF but even if we got no returns.
4145+ * Sync with distributor on GET_BUF flag. Release retptrs.
4146+ */
4147+ __atomic_store_n(&(buf->retptr64[0]),
4148+ buf->retptr64[0] | RTE_DISTRIB_GET_BUF, __ATOMIC_RELEASE);
4149
4150 return 0;
4151 }
4152@@ -300,7 +315,9 @@ handle_returns(struct rte_distributor *d, unsigned int wkr)
4153 unsigned int count = 0;
4154 unsigned int i;
4155
4156- if (buf->retptr64[0] & RTE_DISTRIB_GET_BUF) {
4157+ /* Sync on GET_BUF flag. Acquire retptrs. */
4158+ if (__atomic_load_n(&(buf->retptr64[0]), __ATOMIC_ACQUIRE)
4159+ & RTE_DISTRIB_GET_BUF) {
4160 for (i = 0; i < RTE_DIST_BURST_SIZE; i++) {
4161 if (buf->retptr64[i] & RTE_DISTRIB_RETURN_BUF) {
4162 oldbuf = ((uintptr_t)(buf->retptr64[i] >>
4163@@ -313,8 +330,10 @@ handle_returns(struct rte_distributor *d, unsigned int wkr)
4164 }
4165 d->returns.start = ret_start;
4166 d->returns.count = ret_count;
4167- /* Clear for the worker to populate with more returns */
4168- buf->retptr64[0] = 0;
4169+ /* Clear for the worker to populate with more returns.
4170+ * Sync with distributor on GET_BUF flag. Release retptrs.
4171+ */
4172+ __atomic_store_n(&(buf->retptr64[0]), 0, __ATOMIC_RELEASE);
4173 }
4174 return count;
4175 }
4176@@ -334,7 +353,9 @@ release(struct rte_distributor *d, unsigned int wkr)
4177 struct rte_distributor_buffer *buf = &(d->bufs[wkr]);
4178 unsigned int i;
4179
4180- while (!(d->bufs[wkr].bufptr64[0] & RTE_DISTRIB_GET_BUF))
4181+ /* Sync with worker on GET_BUF flag */
4182+ while (!(__atomic_load_n(&(d->bufs[wkr].bufptr64[0]), __ATOMIC_ACQUIRE)
4183+ & RTE_DISTRIB_GET_BUF))
4184 rte_pause();
4185
4186 handle_returns(d, wkr);
4187@@ -354,8 +375,11 @@ release(struct rte_distributor *d, unsigned int wkr)
4188
4189 d->backlog[wkr].count = 0;
4190
4191- /* Clear the GET bit */
4192- buf->bufptr64[0] &= ~RTE_DISTRIB_GET_BUF;
4193+ /* Clear the GET bit.
4194+ * Sync with worker on GET_BUF flag. Release bufptrs.
4195+ */
4196+ __atomic_store_n(&(buf->bufptr64[0]),
4197+ buf->bufptr64[0] & ~RTE_DISTRIB_GET_BUF, __ATOMIC_RELEASE);
4198 return buf->count;
4199
4200 }
4201@@ -382,7 +406,9 @@ rte_distributor_process_v1705(struct rte_distributor *d,
4202 if (unlikely(num_mbufs == 0)) {
4203 /* Flush out all non-full cache-lines to workers. */
4204 for (wid = 0 ; wid < d->num_workers; wid++) {
4205- if ((d->bufs[wid].bufptr64[0] & RTE_DISTRIB_GET_BUF)) {
4206+ /* Sync with worker on GET_BUF flag. */
4207+ if (__atomic_load_n(&(d->bufs[wid].bufptr64[0]),
4208+ __ATOMIC_ACQUIRE) & RTE_DISTRIB_GET_BUF) {
4209 release(d, wid);
4210 handle_returns(d, wid);
4211 }
4212@@ -394,7 +420,9 @@ rte_distributor_process_v1705(struct rte_distributor *d,
4213 uint16_t matches[RTE_DIST_BURST_SIZE];
4214 unsigned int pkts;
4215
4216- if (d->bufs[wkr].bufptr64[0] & RTE_DISTRIB_GET_BUF)
4217+ /* Sync with worker on GET_BUF flag. */
4218+ if (__atomic_load_n(&(d->bufs[wkr].bufptr64[0]),
4219+ __ATOMIC_ACQUIRE) & RTE_DISTRIB_GET_BUF)
4220 d->bufs[wkr].count = 0;
4221
4222 if ((num_mbufs - next_idx) < RTE_DIST_BURST_SIZE)
4223@@ -492,7 +520,9 @@ rte_distributor_process_v1705(struct rte_distributor *d,
4224
4225 /* Flush out all non-full cache-lines to workers. */
4226 for (wid = 0 ; wid < d->num_workers; wid++)
4227- if ((d->bufs[wid].bufptr64[0] & RTE_DISTRIB_GET_BUF))
4228+ /* Sync with worker on GET_BUF flag. */
4229+ if ((__atomic_load_n(&(d->bufs[wid].bufptr64[0]),
4230+ __ATOMIC_ACQUIRE) & RTE_DISTRIB_GET_BUF))
4231 release(d, wid);
4232
4233 return num_mbufs;
4234@@ -598,7 +628,9 @@ rte_distributor_clear_returns_v1705(struct rte_distributor *d)
4235
4236 /* throw away returns, so workers can exit */
4237 for (wkr = 0; wkr < d->num_workers; wkr++)
4238- d->bufs[wkr].retptr64[0] = 0;
4239+ /* Sync with worker. Release retptrs. */
4240+ __atomic_store_n(&(d->bufs[wkr].retptr64[0]), 0,
4241+ __ATOMIC_RELEASE);
4242 }
4243 BIND_DEFAULT_SYMBOL(rte_distributor_clear_returns, _v1705, 17.05);
4244 MAP_STATIC_SYMBOL(void rte_distributor_clear_returns(struct rte_distributor *d),
4245diff --git a/lib/librte_distributor/rte_distributor_v20.c b/lib/librte_distributor/rte_distributor_v20.c
4246index 5be6efd..6fede5c 100644
4247--- a/lib/librte_distributor/rte_distributor_v20.c
4248+++ b/lib/librte_distributor/rte_distributor_v20.c
4249@@ -62,9 +62,12 @@ rte_distributor_request_pkt_v20(struct rte_distributor_v20 *d,
4250 union rte_distributor_buffer_v20 *buf = &d->bufs[worker_id];
4251 int64_t req = (((int64_t)(uintptr_t)oldpkt) << RTE_DISTRIB_FLAG_BITS)
4252 | RTE_DISTRIB_GET_BUF;
4253- while (unlikely(buf->bufptr64 & RTE_DISTRIB_FLAGS_MASK))
4254+ while (unlikely(__atomic_load_n(&buf->bufptr64, __ATOMIC_RELAXED)
4255+ & RTE_DISTRIB_FLAGS_MASK))
4256 rte_pause();
4257- buf->bufptr64 = req;
4258+
4259+ /* Sync with distributor on GET_BUF flag. */
4260+ __atomic_store_n(&(buf->bufptr64), req, __ATOMIC_RELEASE);
4261 }
4262 VERSION_SYMBOL(rte_distributor_request_pkt, _v20, 2.0);
4263
4264@@ -73,7 +76,9 @@ rte_distributor_poll_pkt_v20(struct rte_distributor_v20 *d,
4265 unsigned worker_id)
4266 {
4267 union rte_distributor_buffer_v20 *buf = &d->bufs[worker_id];
4268- if (buf->bufptr64 & RTE_DISTRIB_GET_BUF)
4269+ /* Sync with distributor. Acquire bufptr64. */
4270+ if (__atomic_load_n(&buf->bufptr64, __ATOMIC_ACQUIRE)
4271+ & RTE_DISTRIB_GET_BUF)
4272 return NULL;
4273
4274 /* since bufptr64 is signed, this should be an arithmetic shift */
4275@@ -101,7 +106,8 @@ rte_distributor_return_pkt_v20(struct rte_distributor_v20 *d,
4276 union rte_distributor_buffer_v20 *buf = &d->bufs[worker_id];
4277 uint64_t req = (((int64_t)(uintptr_t)oldpkt) << RTE_DISTRIB_FLAG_BITS)
4278 | RTE_DISTRIB_RETURN_BUF;
4279- buf->bufptr64 = req;
4280+ /* Sync with distributor on RETURN_BUF flag. */
4281+ __atomic_store_n(&(buf->bufptr64), req, __ATOMIC_RELEASE);
4282 return 0;
4283 }
4284 VERSION_SYMBOL(rte_distributor_return_pkt, _v20, 2.0);
4285@@ -145,7 +151,8 @@ handle_worker_shutdown(struct rte_distributor_v20 *d, unsigned int wkr)
4286 {
4287 d->in_flight_tags[wkr] = 0;
4288 d->in_flight_bitmask &= ~(1UL << wkr);
4289- d->bufs[wkr].bufptr64 = 0;
4290+ /* Sync with worker. Release bufptr64. */
4291+ __atomic_store_n(&(d->bufs[wkr].bufptr64), 0, __ATOMIC_RELEASE);
4292 if (unlikely(d->backlog[wkr].count != 0)) {
4293 /* On return of a packet, we need to move the
4294 * queued packets for this core elsewhere.
4295@@ -189,17 +196,23 @@ process_returns(struct rte_distributor_v20 *d)
4296 ret_count = d->returns.count;
4297
4298 for (wkr = 0; wkr < d->num_workers; wkr++) {
4299-
4300- const int64_t data = d->bufs[wkr].bufptr64;
4301 uintptr_t oldbuf = 0;
4302+ /* Sync with worker. Acquire bufptr64. */
4303+ const int64_t data = __atomic_load_n(&(d->bufs[wkr].bufptr64),
4304+ __ATOMIC_ACQUIRE);
4305
4306 if (data & RTE_DISTRIB_GET_BUF) {
4307 flushed++;
4308 if (d->backlog[wkr].count)
4309- d->bufs[wkr].bufptr64 =
4310- backlog_pop(&d->backlog[wkr]);
4311+ /* Sync with worker. Release bufptr64. */
4312+ __atomic_store_n(&(d->bufs[wkr].bufptr64),
4313+ backlog_pop(&d->backlog[wkr]),
4314+ __ATOMIC_RELEASE);
4315 else {
4316- d->bufs[wkr].bufptr64 = RTE_DISTRIB_GET_BUF;
4317+ /* Sync with worker on GET_BUF flag. */
4318+ __atomic_store_n(&(d->bufs[wkr].bufptr64),
4319+ RTE_DISTRIB_GET_BUF,
4320+ __ATOMIC_RELEASE);
4321 d->in_flight_tags[wkr] = 0;
4322 d->in_flight_bitmask &= ~(1UL << wkr);
4323 }
4324@@ -235,9 +248,10 @@ rte_distributor_process_v20(struct rte_distributor_v20 *d,
4325 return process_returns(d);
4326
4327 while (next_idx < num_mbufs || next_mb != NULL) {
4328-
4329- int64_t data = d->bufs[wkr].bufptr64;
4330 uintptr_t oldbuf = 0;
4331+ /* Sync with worker. Acquire bufptr64. */
4332+ int64_t data = __atomic_load_n(&(d->bufs[wkr].bufptr64),
4333+ __ATOMIC_ACQUIRE);
4334
4335 if (!next_mb) {
4336 next_mb = mbufs[next_idx++];
4337@@ -283,11 +297,16 @@ rte_distributor_process_v20(struct rte_distributor_v20 *d,
4338 (d->backlog[wkr].count || next_mb)) {
4339
4340 if (d->backlog[wkr].count)
4341- d->bufs[wkr].bufptr64 =
4342- backlog_pop(&d->backlog[wkr]);
4343+ /* Sync with worker. Release bufptr64. */
4344+ __atomic_store_n(&(d->bufs[wkr].bufptr64),
4345+ backlog_pop(&d->backlog[wkr]),
4346+ __ATOMIC_RELEASE);
4347
4348 else {
4349- d->bufs[wkr].bufptr64 = next_value;
4350+ /* Sync with worker. Release bufptr64. */
4351+ __atomic_store_n(&(d->bufs[wkr].bufptr64),
4352+ next_value,
4353+ __ATOMIC_RELEASE);
4354 d->in_flight_tags[wkr] = new_tag;
4355 d->in_flight_bitmask |= (1UL << wkr);
4356 next_mb = NULL;
4357@@ -308,13 +327,19 @@ rte_distributor_process_v20(struct rte_distributor_v20 *d,
4358 * if they are ready */
4359 for (wkr = 0; wkr < d->num_workers; wkr++)
4360 if (d->backlog[wkr].count &&
4361- (d->bufs[wkr].bufptr64 & RTE_DISTRIB_GET_BUF)) {
4362+ /* Sync with worker. Acquire bufptr64. */
4363+ (__atomic_load_n(&(d->bufs[wkr].bufptr64),
4364+ __ATOMIC_ACQUIRE) & RTE_DISTRIB_GET_BUF)) {
4365
4366 int64_t oldbuf = d->bufs[wkr].bufptr64 >>
4367 RTE_DISTRIB_FLAG_BITS;
4368+
4369 store_return(oldbuf, d, &ret_start, &ret_count);
4370
4371- d->bufs[wkr].bufptr64 = backlog_pop(&d->backlog[wkr]);
4372+ /* Sync with worker. Release bufptr64. */
4373+ __atomic_store_n(&(d->bufs[wkr].bufptr64),
4374+ backlog_pop(&d->backlog[wkr]),
4375+ __ATOMIC_RELEASE);
4376 }
4377
4378 d->returns.start = ret_start;
4379diff --git a/lib/librte_eal/common/eal_common_lcore.c b/lib/librte_eal/common/eal_common_lcore.c
4380index 0db1555..723f2b2 100644
4381--- a/lib/librte_eal/common/eal_common_lcore.c
4382+++ b/lib/librte_eal/common/eal_common_lcore.c
4383@@ -84,17 +84,6 @@ rte_eal_cpu_init(void)
4384 lcore_config[lcore_id].core_role = ROLE_RTE;
4385 lcore_config[lcore_id].core_id = eal_cpu_core_id(lcore_id);
4386 lcore_config[lcore_id].socket_id = eal_cpu_socket_id(lcore_id);
4387- if (lcore_config[lcore_id].socket_id >= RTE_MAX_NUMA_NODES) {
4388-#ifdef RTE_EAL_ALLOW_INV_SOCKET_ID
4389- lcore_config[lcore_id].socket_id = 0;
4390-#else
4391- RTE_LOG(ERR, EAL, "Socket ID (%u) is greater than "
4392- "RTE_MAX_NUMA_NODES (%d)\n",
4393- lcore_config[lcore_id].socket_id,
4394- RTE_MAX_NUMA_NODES);
4395- return -1;
4396-#endif
4397- }
4398 RTE_LOG(DEBUG, EAL, "Detected lcore %u as "
4399 "core %u on socket %u\n",
4400 lcore_id, lcore_config[lcore_id].core_id,
4401diff --git a/lib/librte_eal/common/eal_common_log.c b/lib/librte_eal/common/eal_common_log.c
4402index 4b2409a..88a3e10 100644
4403--- a/lib/librte_eal/common/eal_common_log.c
4404+++ b/lib/librte_eal/common/eal_common_log.c
4405@@ -56,7 +56,7 @@ struct rte_logs rte_logs = {
4406 static FILE *default_log_stream;
4407
4408 /**
4409- * This global structure stores some informations about the message
4410+ * This global structure stores some information about the message
4411 * that is currently being processed by one lcore
4412 */
4413 struct log_cur_msg {
4414diff --git a/lib/librte_eal/common/eal_hugepages.h b/lib/librte_eal/common/eal_hugepages.h
4415index 68369f2..31d745d 100644
4416--- a/lib/librte_eal/common/eal_hugepages.h
4417+++ b/lib/librte_eal/common/eal_hugepages.h
4418@@ -41,7 +41,7 @@
4419 #define MAX_HUGEPAGE_PATH PATH_MAX
4420
4421 /**
4422- * Structure used to store informations about hugepages that we mapped
4423+ * Structure used to store information about hugepages that we mapped
4424 * through the files in hugetlbfs.
4425 */
4426 struct hugepage_file {
4427diff --git a/lib/librte_eal/common/include/rte_dev.h b/lib/librte_eal/common/include/rte_dev.h
4428index bc6e592..6f21fde 100644
4429--- a/lib/librte_eal/common/include/rte_dev.h
4430+++ b/lib/librte_eal/common/include/rte_dev.h
4431@@ -281,7 +281,7 @@ __attribute__((used)) = str
4432 * "pci:v8086:d*:sv*:sd*" all PCI devices supported by this driver
4433 * whose vendor id is 0x8086.
4434 *
4435- * The format of the kernel modules list is a parenthesed expression
4436+ * The format of the kernel modules list is a parenthesized expression
4437 * containing logical-and (&) and logical-or (|).
4438 *
4439 * The device pattern and the kmod expression are separated by a space.
4440diff --git a/lib/librte_eal/common/include/rte_version.h b/lib/librte_eal/common/include/rte_version.h
4441index 27137a0..c3242e0 100644
4442--- a/lib/librte_eal/common/include/rte_version.h
4443+++ b/lib/librte_eal/common/include/rte_version.h
4444@@ -66,7 +66,7 @@ extern "C" {
4445 /**
4446 * Patch level number i.e. the z in yy.mm.z
4447 */
4448-#define RTE_VER_MINOR 9
4449+#define RTE_VER_MINOR 10
4450
4451 /**
4452 * Extra string to be appended to version number
4453diff --git a/lib/librte_eal/common/malloc_elem.c b/lib/librte_eal/common/malloc_elem.c
4454index f6cbc42..02bd958 100644
4455--- a/lib/librte_eal/common/malloc_elem.c
4456+++ b/lib/librte_eal/common/malloc_elem.c
4457@@ -136,6 +136,11 @@ split_elem(struct malloc_elem *elem, struct malloc_elem *split_pt)
4458 next_elem->prev = split_pt;
4459 elem->size = old_elem_size;
4460 set_trailer(elem);
4461+ if (elem->pad) {
4462+ /* Update inner padding inner element size. */
4463+ elem = RTE_PTR_ADD(elem, elem->pad);
4464+ elem->size = old_elem_size - elem->pad;
4465+ }
4466 }
4467
4468 /*
4469@@ -298,6 +303,8 @@ malloc_elem_free(struct malloc_elem *elem)
4470 }
4471 malloc_elem_free_list_insert(elem);
4472
4473+ elem->pad = 0;
4474+
4475 /* decrease heap's count of allocated elements */
4476 elem->heap->alloc_count--;
4477
4478diff --git a/lib/librte_eal/common/rte_malloc.c b/lib/librte_eal/common/rte_malloc.c
4479index fe2278b..1d3ab5a 100644
4480--- a/lib/librte_eal/common/rte_malloc.c
4481+++ b/lib/librte_eal/common/rte_malloc.c
4482@@ -177,7 +177,8 @@ rte_realloc(void *ptr, size_t size, unsigned align)
4483 void *new_ptr = rte_malloc(NULL, size, align);
4484 if (new_ptr == NULL)
4485 return NULL;
4486- const unsigned old_size = elem->size - MALLOC_ELEM_OVERHEAD;
4487+ /* elem: |pad|data_elem|data|trailer| */
4488+ const size_t old_size = elem->size - elem->pad - MALLOC_ELEM_OVERHEAD;
4489 rte_memcpy(new_ptr, ptr, old_size < size ? old_size : size);
4490 rte_free(ptr);
4491
4492diff --git a/lib/librte_eal/common/rte_service.c b/lib/librte_eal/common/rte_service.c
4493index 1f92294..71a07dd 100644
4494--- a/lib/librte_eal/common/rte_service.c
4495+++ b/lib/librte_eal/common/rte_service.c
4496@@ -98,10 +98,12 @@ static struct rte_service_spec_impl *rte_services;
4497 static struct core_state *lcore_states;
4498 static uint32_t rte_service_library_initialized;
4499
4500-int32_t rte_service_init(void)
4501+int32_t
4502+rte_service_init(void)
4503 {
4504 if (rte_service_library_initialized) {
4505- printf("service library init() called, init flag %d\n",
4506+ RTE_LOG(NOTICE, EAL,
4507+ "service library init() called, init flag %d\n",
4508 rte_service_library_initialized);
4509 return -EALREADY;
4510 }
4511@@ -110,14 +112,14 @@ int32_t rte_service_init(void)
4512 sizeof(struct rte_service_spec_impl),
4513 RTE_CACHE_LINE_SIZE);
4514 if (!rte_services) {
4515- printf("error allocating rte services array\n");
4516+ RTE_LOG(ERR, EAL, "error allocating rte services array\n");
4517 goto fail_mem;
4518 }
4519
4520 lcore_states = rte_calloc("rte_service_core_states", RTE_MAX_LCORE,
4521 sizeof(struct core_state), RTE_CACHE_LINE_SIZE);
4522 if (!lcore_states) {
4523- printf("error allocating core states array\n");
4524+ RTE_LOG(ERR, EAL, "error allocating core states array\n");
4525 goto fail_mem;
4526 }
4527
4528@@ -136,10 +138,8 @@ int32_t rte_service_init(void)
4529 rte_service_library_initialized = 1;
4530 return 0;
4531 fail_mem:
4532- if (rte_services)
4533- rte_free(rte_services);
4534- if (lcore_states)
4535- rte_free(lcore_states);
4536+ rte_free(rte_services);
4537+ rte_free(lcore_states);
4538 return -ENOMEM;
4539 }
4540
4541@@ -384,8 +384,8 @@ service_run(uint32_t i, struct core_state *cs, uint64_t service_mask)
4542 return 0;
4543 }
4544
4545-int32_t rte_service_run_iter_on_app_lcore(uint32_t id,
4546- uint32_t serialize_mt_unsafe)
4547+int32_t
4548+rte_service_run_iter_on_app_lcore(uint32_t id, uint32_t serialize_mt_unsafe)
4549 {
4550 /* run service on calling core, using all-ones as the service mask */
4551 if (!service_valid(id))
4552diff --git a/lib/librte_eal/linuxapp/kni/kni_net.c b/lib/librte_eal/linuxapp/kni/kni_net.c
4553index db9f489..1802f9d 100644
4554--- a/lib/librte_eal/linuxapp/kni/kni_net.c
4555+++ b/lib/librte_eal/linuxapp/kni/kni_net.c
4556@@ -37,7 +37,7 @@
4557 #include <linux/delay.h>
4558
4559 #include <exec-env/rte_kni_common.h>
4560-#include <kni_fifo.h>
4561+#include "kni_fifo.h"
4562
4563 #include "compat.h"
4564 #include "kni_dev.h"
4565diff --git a/lib/librte_efd/rte_efd.c b/lib/librte_efd/rte_efd.c
4566index 34e0920..f449180 100644
4567--- a/lib/librte_efd/rte_efd.c
4568+++ b/lib/librte_efd/rte_efd.c
4569@@ -208,7 +208,7 @@ struct efd_offline_group_rules {
4570 /**< Array with all values of the keys of the group. */
4571
4572 uint8_t bin_id[EFD_MAX_GROUP_NUM_RULES];
4573- /**< Stores the bin for each correspending key to
4574+ /**< Stores the bin for each corresponding key to
4575 * avoid having to recompute it
4576 */
4577 };
4578diff --git a/lib/librte_ether/rte_eth_ctrl.h b/lib/librte_ether/rte_eth_ctrl.h
4579index 8386904..c1c97bd 100644
4580--- a/lib/librte_ether/rte_eth_ctrl.h
4581+++ b/lib/librte_ether/rte_eth_ctrl.h
4582@@ -36,7 +36,7 @@
4583
4584 #include <stdint.h>
4585 #include <rte_common.h>
4586-#include "rte_ether.h"
4587+#include <rte_ether.h>
4588
4589 /**
4590 * @file
4591diff --git a/lib/librte_ether/rte_ethdev.c b/lib/librte_ether/rte_ethdev.c
4592index 7998a3a..f116707 100644
4593--- a/lib/librte_ether/rte_ethdev.c
4594+++ b/lib/librte_ether/rte_ethdev.c
4595@@ -63,8 +63,8 @@
4596 #include <rte_errno.h>
4597 #include <rte_spinlock.h>
4598 #include <rte_string_fns.h>
4599+#include <rte_ether.h>
4600
4601-#include "rte_ether.h"
4602 #include "rte_ethdev.h"
4603 #include "ethdev_profile.h"
4604
4605@@ -2051,6 +2051,12 @@ rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info)
4606 dev_info->rx_desc_lim = lim;
4607 dev_info->tx_desc_lim = lim;
4608
4609+ /* Maximum number of queues should be <= RTE_MAX_QUEUES_PER_PORT */
4610+ dev_info->max_rx_queues = RTE_MIN(dev_info->max_rx_queues,
4611+ RTE_MAX_QUEUES_PER_PORT);
4612+ dev_info->max_tx_queues = RTE_MIN(dev_info->max_tx_queues,
4613+ RTE_MAX_QUEUES_PER_PORT);
4614+
4615 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get);
4616 (*dev->dev_ops->dev_infos_get)(dev, dev_info);
4617 dev_info->driver_name = dev->device->driver->name;
4618diff --git a/lib/librte_ether/rte_ethdev.h b/lib/librte_ether/rte_ethdev.h
4619index 1a730d3..16a1508 100644
4620--- a/lib/librte_ether/rte_ethdev.h
4621+++ b/lib/librte_ether/rte_ethdev.h
4622@@ -182,8 +182,8 @@ extern "C" {
4623 #include <rte_errno.h>
4624 #include <rte_common.h>
4625 #include <rte_config.h>
4626+#include <rte_ether.h>
4627
4628-#include "rte_ether.h"
4629 #include "rte_eth_ctrl.h"
4630 #include "rte_dev_info.h"
4631
4632@@ -2696,7 +2696,7 @@ int rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu);
4633 * Otherwise, disable VLAN filtering of VLAN packets tagged with *vlan_id*.
4634 * @return
4635 * - (0) if successful.
4636- * - (-ENOSUP) if hardware-assisted VLAN filtering not configured.
4637+ * - (-ENOTSUP) if hardware-assisted VLAN filtering not configured.
4638 * - (-ENODEV) if *port_id* invalid.
4639 * - (-ENOSYS) if VLAN filtering on *port_id* disabled.
4640 * - (-EINVAL) if *vlan_id* > 4095.
4641@@ -2718,7 +2718,7 @@ int rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on);
4642 * If 0, Disable VLAN Stripping of the receive queue of the Ethernet port.
4643 * @return
4644 * - (0) if successful.
4645- * - (-ENOSUP) if hardware-assisted VLAN stripping not configured.
4646+ * - (-ENOTSUP) if hardware-assisted VLAN stripping not configured.
4647 * - (-ENODEV) if *port_id* invalid.
4648 * - (-EINVAL) if *rx_queue_id* invalid.
4649 */
4650@@ -2738,7 +2738,7 @@ int rte_eth_dev_set_vlan_strip_on_queue(uint16_t port_id, uint16_t rx_queue_id,
4651 * The Tag Protocol ID
4652 * @return
4653 * - (0) if successful.
4654- * - (-ENOSUP) if hardware-assisted VLAN TPID setup is not supported.
4655+ * - (-ENOTSUP) if hardware-assisted VLAN TPID setup is not supported.
4656 * - (-ENODEV) if *port_id* invalid.
4657 */
4658 int rte_eth_dev_set_vlan_ether_type(uint16_t port_id,
4659@@ -2762,7 +2762,7 @@ int rte_eth_dev_set_vlan_ether_type(uint16_t port_id,
4660 * ETH_VLAN_EXTEND_OFFLOAD
4661 * @return
4662 * - (0) if successful.
4663- * - (-ENOSUP) if hardware-assisted VLAN filtering not configured.
4664+ * - (-ENOTSUP) if hardware-assisted VLAN filtering not configured.
4665 * - (-ENODEV) if *port_id* invalid.
4666 */
4667 int rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask);
4668diff --git a/lib/librte_ether/rte_flow.h b/lib/librte_ether/rte_flow.h
4669index 47c88ea..8cbf38d 100644
4670--- a/lib/librte_ether/rte_flow.h
4671+++ b/lib/librte_ether/rte_flow.h
4672@@ -807,7 +807,7 @@ struct rte_flow_item_esp {
4673 #ifndef __cplusplus
4674 static const struct rte_flow_item_esp rte_flow_item_esp_mask = {
4675 .hdr = {
4676- .spi = 0xffffffff,
4677+ .spi = RTE_BE32(0xffffffff),
4678 },
4679 };
4680 #endif
4681diff --git a/lib/librte_power/guest_channel.c b/lib/librte_power/guest_channel.c
4682index fa5de0f..2c3ee7d 100644
4683--- a/lib/librte_power/guest_channel.c
4684+++ b/lib/librte_power/guest_channel.c
4685@@ -48,7 +48,7 @@
4686
4687 #define RTE_LOGTYPE_GUEST_CHANNEL RTE_LOGTYPE_USER1
4688
4689-static int global_fds[RTE_MAX_LCORE];
4690+static int global_fds[RTE_MAX_LCORE] = { [0 ... RTE_MAX_LCORE-1] = -1 };
4691
4692 int
4693 guest_channel_host_connect(const char *path, unsigned lcore_id)
4694@@ -64,7 +64,7 @@ guest_channel_host_connect(const char *path, unsigned lcore_id)
4695 return -1;
4696 }
4697 /* check if path is already open */
4698- if (global_fds[lcore_id] != 0) {
4699+ if (global_fds[lcore_id] != -1) {
4700 RTE_LOG(ERR, GUEST_CHANNEL, "Channel(%u) is already open with fd %d\n",
4701 lcore_id, global_fds[lcore_id]);
4702 return -1;
4703@@ -113,7 +113,7 @@ guest_channel_host_connect(const char *path, unsigned lcore_id)
4704 return 0;
4705 error:
4706 close(fd);
4707- global_fds[lcore_id] = 0;
4708+ global_fds[lcore_id] = -1;
4709 return -1;
4710 }
4711
4712@@ -129,7 +129,7 @@ guest_channel_send_msg(struct channel_packet *pkt, unsigned lcore_id)
4713 return -1;
4714 }
4715
4716- if (global_fds[lcore_id] == 0) {
4717+ if (global_fds[lcore_id] < 0) {
4718 RTE_LOG(ERR, GUEST_CHANNEL, "Channel is not connected\n");
4719 return -1;
4720 }
4721@@ -163,8 +163,8 @@ guest_channel_host_disconnect(unsigned lcore_id)
4722 lcore_id, RTE_MAX_LCORE-1);
4723 return;
4724 }
4725- if (global_fds[lcore_id] == 0)
4726+ if (global_fds[lcore_id] < 0)
4727 return;
4728 close(global_fds[lcore_id]);
4729- global_fds[lcore_id] = 0;
4730+ global_fds[lcore_id] = -1;
4731 }
4732diff --git a/lib/librte_power/rte_power_acpi_cpufreq.c b/lib/librte_power/rte_power_acpi_cpufreq.c
4733index 9a2fa81..63ad029 100644
4734--- a/lib/librte_power/rte_power_acpi_cpufreq.c
4735+++ b/lib/librte_power/rte_power_acpi_cpufreq.c
4736@@ -57,7 +57,7 @@
4737
4738 #define FOPEN_OR_ERR_RET(f, retval) do { \
4739 if ((f) == NULL) { \
4740- RTE_LOG(ERR, POWER, "File not openned\n"); \
4741+ RTE_LOG(ERR, POWER, "File not opened\n"); \
4742 return retval; \
4743 } \
4744 } while (0)
4745@@ -136,7 +136,7 @@ set_freq_internal(struct rte_power_info *pi, uint32_t idx)
4746 if (idx == pi->curr_idx)
4747 return 0;
4748
4749- POWER_DEBUG_TRACE("Freqency[%u] %u to be set for lcore %u\n",
4750+ POWER_DEBUG_TRACE("Frequency[%u] %u to be set for lcore %u\n",
4751 idx, pi->freqs[idx], pi->lcore_id);
4752 if (fseek(pi->f, 0, SEEK_SET) < 0) {
4753 RTE_LOG(ERR, POWER, "Fail to set file position indicator to 0 "
4754@@ -531,7 +531,8 @@ rte_power_acpi_cpufreq_freq_up(unsigned lcore_id)
4755 }
4756
4757 pi = &lcore_power_info[lcore_id];
4758- if (pi->curr_idx == 0)
4759+ if (pi->curr_idx == 0 ||
4760+ (pi->curr_idx == 1 && pi->turbo_available && !pi->turbo_enable))
4761 return 0;
4762
4763 /* Frequencies in the array are from high to low. */
4764diff --git a/lib/librte_ring/rte_ring.h b/lib/librte_ring/rte_ring.h
4765index 935bfac..11e26c5 100644
4766--- a/lib/librte_ring/rte_ring.h
4767+++ b/lib/librte_ring/rte_ring.h
4768@@ -543,11 +543,14 @@ __rte_ring_move_cons_head(struct rte_ring *r, unsigned int is_sc,
4769 return 0;
4770
4771 *new_head = *old_head + n;
4772- if (is_sc)
4773- r->cons.head = *new_head, success = 1;
4774- else
4775+ if (is_sc) {
4776+ r->cons.head = *new_head;
4777+ rte_smp_rmb();
4778+ success = 1;
4779+ } else {
4780 success = rte_atomic32_cmpset(&r->cons.head, *old_head,
4781 *new_head);
4782+ }
4783 } while (unlikely(success == 0));
4784 return n;
4785 }
4786diff --git a/lib/librte_security/rte_security.h b/lib/librte_security/rte_security.h
4787index 2b609cb..2c8cf6b 100644
4788--- a/lib/librte_security/rte_security.h
4789+++ b/lib/librte_security/rte_security.h
4790@@ -143,14 +143,14 @@ struct rte_security_ipsec_tunnel_param {
4791 * IPsec Security Association option flags
4792 */
4793 struct rte_security_ipsec_sa_options {
4794- /**< Extended Sequence Numbers (ESN)
4795+ /** Extended Sequence Numbers (ESN)
4796 *
4797 * * 1: Use extended (64 bit) sequence numbers
4798 * * 0: Use normal sequence numbers
4799 */
4800 uint32_t esn : 1;
4801
4802- /**< UDP encapsulation
4803+ /** UDP encapsulation
4804 *
4805 * * 1: Do UDP encapsulation/decapsulation so that IPSEC packets can
4806 * traverse through NAT boxes.
4807@@ -158,7 +158,7 @@ struct rte_security_ipsec_sa_options {
4808 */
4809 uint32_t udp_encap : 1;
4810
4811- /**< Copy DSCP bits
4812+ /** Copy DSCP bits
4813 *
4814 * * 1: Copy IPv4 or IPv6 DSCP bits from inner IP header to
4815 * the outer IP header in encapsulation, and vice versa in
4816@@ -167,7 +167,7 @@ struct rte_security_ipsec_sa_options {
4817 */
4818 uint32_t copy_dscp : 1;
4819
4820- /**< Copy IPv6 Flow Label
4821+ /** Copy IPv6 Flow Label
4822 *
4823 * * 1: Copy IPv6 flow label from inner IPv6 header to the
4824 * outer IPv6 header.
4825@@ -175,7 +175,7 @@ struct rte_security_ipsec_sa_options {
4826 */
4827 uint32_t copy_flabel : 1;
4828
4829- /**< Copy IPv4 Don't Fragment bit
4830+ /** Copy IPv4 Don't Fragment bit
4831 *
4832 * * 1: Copy the DF bit from the inner IPv4 header to the outer
4833 * IPv4 header.
4834@@ -183,7 +183,7 @@ struct rte_security_ipsec_sa_options {
4835 */
4836 uint32_t copy_df : 1;
4837
4838- /**< Decrement inner packet Time To Live (TTL) field
4839+ /** Decrement inner packet Time To Live (TTL) field
4840 *
4841 * * 1: In tunnel mode, decrement inner packet IPv4 TTL or
4842 * IPv6 Hop Limit after tunnel decapsulation, or before tunnel
4843diff --git a/lib/librte_vhost/rte_vhost.h b/lib/librte_vhost/rte_vhost.h
4844index 3fc6034..eccaa3e 100644
4845--- a/lib/librte_vhost/rte_vhost.h
4846+++ b/lib/librte_vhost/rte_vhost.h
4847@@ -204,7 +204,7 @@ rte_vhost_va_from_guest_pa(struct rte_vhost_memory *mem,
4848 * @param vid
4849 * vhost device ID
4850 * @param addr
4851- * the starting address for write
4852+ * the starting address for write (in guest physical address space)
4853 * @param len
4854 * the length to write
4855 */
4856diff --git a/lib/librte_vhost/socket.c b/lib/librte_vhost/socket.c
4857index 2fa7ea0..1933f47 100644
4858--- a/lib/librte_vhost/socket.c
4859+++ b/lib/librte_vhost/socket.c
4860@@ -150,7 +150,7 @@ read_fd_message(int sockfd, char *buf, int buflen, int *fds, int max_fds,
4861 }
4862
4863 if (msgh.msg_flags & (MSG_TRUNC | MSG_CTRUNC)) {
4864- RTE_LOG(ERR, VHOST_CONFIG, "truncted msg\n");
4865+ RTE_LOG(ERR, VHOST_CONFIG, "truncated msg\n");
4866 return -1;
4867 }
4868
4869@@ -675,6 +675,14 @@ rte_vhost_driver_register(const char *path, uint64_t flags)
4870 }
4871 vsocket->dequeue_zero_copy = flags & RTE_VHOST_USER_DEQUEUE_ZERO_COPY;
4872
4873+ if (vsocket->dequeue_zero_copy &&
4874+ (flags & RTE_VHOST_USER_IOMMU_SUPPORT)) {
4875+ RTE_LOG(ERR, VHOST_CONFIG,
4876+ "error: enabling dequeue zero copy and IOMMU features "
4877+ "simultaneously is not supported\n");
4878+ goto out_mutex;
4879+ }
4880+
4881 /*
4882 * Set the supported features correctly for the builtin vhost-user
4883 * net driver.
4884diff --git a/lib/librte_vhost/vhost.c b/lib/librte_vhost/vhost.c
4885index a8ed40b..6a8f54f 100644
4886--- a/lib/librte_vhost/vhost.c
4887+++ b/lib/librte_vhost/vhost.c
4888@@ -110,6 +110,180 @@ get_device(int vid)
4889 return dev;
4890 }
4891
4892+#define VHOST_LOG_PAGE 4096
4893+
4894+/*
4895+ * Atomically set a bit in memory.
4896+ */
4897+static __rte_always_inline void
4898+vhost_set_bit(unsigned int nr, volatile uint8_t *addr)
4899+{
4900+#if defined(RTE_TOOLCHAIN_GCC) && (GCC_VERSION < 70100)
4901+ /*
4902+ * __sync_ built-ins are deprecated, but __atomic_ ones
4903+ * are sub-optimized in older GCC versions.
4904+ */
4905+ __sync_fetch_and_or_1(addr, (1U << nr));
4906+#else
4907+ __atomic_fetch_or(addr, (1U << nr), __ATOMIC_RELAXED);
4908+#endif
4909+}
4910+
4911+static __rte_always_inline void
4912+vhost_log_page(uint8_t *log_base, uint64_t page)
4913+{
4914+ vhost_set_bit(page % 8, &log_base[page / 8]);
4915+}
4916+
4917+void
4918+__vhost_log_write(struct virtio_net *dev, uint64_t addr, uint64_t len)
4919+{
4920+ uint64_t page;
4921+
4922+ if (unlikely(!dev->log_base || !len))
4923+ return;
4924+
4925+ if (unlikely(dev->log_size <= ((addr + len - 1) / VHOST_LOG_PAGE / 8)))
4926+ return;
4927+
4928+ /* To make sure guest memory updates are committed before logging */
4929+ rte_smp_wmb();
4930+
4931+ page = addr / VHOST_LOG_PAGE;
4932+ while (page * VHOST_LOG_PAGE < addr + len) {
4933+ vhost_log_page((uint8_t *)(uintptr_t)dev->log_base, page);
4934+ page += 1;
4935+ }
4936+}
4937+
4938+void
4939+__vhost_log_write_iova(struct virtio_net *dev, struct vhost_virtqueue *vq,
4940+ uint64_t iova, uint64_t len)
4941+{
4942+ uint64_t hva, gpa, map_len;
4943+ map_len = len;
4944+
4945+ hva = __vhost_iova_to_vva(dev, vq, iova, &map_len, VHOST_ACCESS_RW);
4946+ if (map_len != len) {
4947+ RTE_LOG(ERR, VHOST_CONFIG,
4948+ "Failed to write log for IOVA 0x%" PRIx64 ". No IOTLB entry found\n",
4949+ iova);
4950+ return;
4951+ }
4952+
4953+ gpa = hva_to_gpa(dev, hva, len);
4954+ if (gpa)
4955+ __vhost_log_write(dev, gpa, len);
4956+}
4957+
4958+void
4959+__vhost_log_cache_sync(struct virtio_net *dev, struct vhost_virtqueue *vq)
4960+{
4961+ unsigned long *log_base;
4962+ int i;
4963+
4964+ if (unlikely(!dev->log_base))
4965+ return;
4966+
4967+ log_base = (unsigned long *)(uintptr_t)dev->log_base;
4968+
4969+ /*
4970+ * It is expected a write memory barrier has been issued
4971+ * before this function is called.
4972+ */
4973+
4974+ for (i = 0; i < vq->log_cache_nb_elem; i++) {
4975+ struct log_cache_entry *elem = vq->log_cache + i;
4976+
4977+#if defined(RTE_TOOLCHAIN_GCC) && (GCC_VERSION < 70100)
4978+ /*
4979+ * '__sync' builtins are deprecated, but '__atomic' ones
4980+ * are sub-optimized in older GCC versions.
4981+ */
4982+ __sync_fetch_and_or(log_base + elem->offset, elem->val);
4983+#else
4984+ __atomic_fetch_or(log_base + elem->offset, elem->val,
4985+ __ATOMIC_RELAXED);
4986+#endif
4987+ }
4988+
4989+ rte_smp_wmb();
4990+
4991+ vq->log_cache_nb_elem = 0;
4992+}
4993+
4994+static __rte_always_inline void
4995+vhost_log_cache_page(struct virtio_net *dev, struct vhost_virtqueue *vq,
4996+ uint64_t page)
4997+{
4998+ uint32_t bit_nr = page % (sizeof(unsigned long) << 3);
4999+ uint32_t offset = page / (sizeof(unsigned long) << 3);
5000+ int i;
The diff has been truncated for viewing.

Subscribers

People subscribed via source and target branches