Merge ~paelzer/ubuntu/+source/dpdk:bionic-17.11.3 into ubuntu/+source/dpdk:ubuntu/bionic-devel

Proposed by Christian Ehrhardt 
Status: Merged
Approved by: Christian Ehrhardt 
Approved revision: 0c8d5c69539ab6b122ea60efb1042e924a04b57f
Merge reported by: Christian Ehrhardt 
Merged at revision: 0c8d5c69539ab6b122ea60efb1042e924a04b57f
Proposed branch: ~paelzer/ubuntu/+source/dpdk:bionic-17.11.3
Merge into: ubuntu/+source/dpdk:ubuntu/bionic-devel
Diff against target: 18265 lines (+5242/-3976) (has conflicts)
176 files modified
app/proc_info/main.c (+1/-1)
app/test-crypto-perf/cperf_test_common.c (+1/-1)
app/test-crypto-perf/cperf_test_vector_parsing.c (+3/-4)
app/test-crypto-perf/main.c (+10/-4)
app/test-pmd/cmdline.c (+1/-1)
app/test-pmd/cmdline_flow.c (+2/-1)
app/test-pmd/config.c (+15/-21)
app/test-pmd/parameters.c (+2/-10)
app/test-pmd/testpmd.c (+57/-17)
app/test-pmd/testpmd.h (+2/-0)
config/defconfig_i686-native-linuxapp-icc (+0/-5)
debian/changelog (+36/-0)
debian/control (+26/-0)
debian/librte-pmd-bond17.11.symbols (+1/-0)
debian/librte-pmd-mlx4-17.11.symbols (+5/-0)
debian/librte-pmd-mlx5-17.11.symbols (+5/-0)
debian/librte-vhost17.11.symbols (+1/-0)
debian/patches/app-testpmd-add-ethernet-peer-command.patch (+168/-0)
debian/patches/net-mlx5-fix-build-with-rdma-core-v19.patch (+71/-0)
debian/patches/series (+2/-0)
debian/rules (+2/-0)
doc/guides/nics/features/mlx5.ini (+1/-0)
doc/guides/nics/mlx5.rst (+5/-1)
doc/guides/nics/nfp.rst (+22/-21)
doc/guides/rel_notes/release_17_11.rst (+215/-0)
drivers/bus/dpaa/base/fman/fman.c (+2/-0)
drivers/bus/fslmc/fslmc_bus.c (+3/-2)
drivers/bus/pci/pci_common.c (+12/-9)
drivers/bus/pci/rte_bus_pci.h (+3/-0)
drivers/bus/vdev/vdev.c (+1/-1)
drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h (+13/-13)
drivers/crypto/scheduler/rte_cryptodev_scheduler.c (+6/-2)
drivers/crypto/scheduler/rte_cryptodev_scheduler.h (+1/-1)
drivers/crypto/scheduler/scheduler_multicore.c (+39/-14)
drivers/crypto/scheduler/scheduler_pkt_size_distr.c (+3/-1)
drivers/crypto/scheduler/scheduler_pmd.c (+52/-16)
drivers/crypto/scheduler/scheduler_pmd_ops.c (+7/-2)
drivers/crypto/scheduler/scheduler_pmd_private.h (+1/-1)
drivers/crypto/zuc/rte_zuc_pmd.c (+64/-44)
drivers/event/dpaa2/dpaa2_eventdev.c (+0/-5)
drivers/event/dpaa2/dpaa2_eventdev.h (+0/-1)
drivers/net/af_packet/rte_eth_af_packet.c (+1/-1)
drivers/net/bnx2x/bnx2x.c (+14/-14)
drivers/net/bnx2x/elink.c (+173/-171)
drivers/net/bnxt/bnxt_ethdev.c (+6/-8)
drivers/net/bnxt/bnxt_filter.c (+8/-0)
drivers/net/bnxt/bnxt_hwrm.c (+9/-5)
drivers/net/bnxt/bnxt_nvm_defs.h (+30/-6)
drivers/net/bnxt/bnxt_rxq.c (+4/-2)
drivers/net/bnxt/bnxt_rxr.c (+5/-1)
drivers/net/bnxt/bnxt_rxr.h (+18/-4)
drivers/net/bonding/rte_eth_bond_api.c (+5/-2)
drivers/net/bonding/rte_eth_bond_args.c (+1/-1)
drivers/net/bonding/rte_eth_bond_pmd.c (+29/-7)
drivers/net/bonding/rte_eth_bond_private.h (+1/-0)
drivers/net/bonding/rte_pmd_bond_version.map (+1/-0)
drivers/net/dpaa/dpaa_ethdev.c (+5/-2)
drivers/net/dpaa2/dpaa2_ethdev.c (+7/-4)
drivers/net/enic/base/vnic_dev.c (+14/-10)
drivers/net/enic/base/vnic_dev.h (+1/-0)
drivers/net/enic/enic_main.c (+11/-0)
drivers/net/failsafe/failsafe.c (+3/-1)
drivers/net/failsafe/failsafe_ether.c (+30/-0)
drivers/net/failsafe/failsafe_ops.c (+7/-2)
drivers/net/failsafe/failsafe_private.h (+5/-0)
drivers/net/i40e/base/i40e_register.h (+12/-12)
drivers/net/i40e/i40e_ethdev.c (+153/-45)
drivers/net/i40e/i40e_ethdev.h (+3/-1)
drivers/net/i40e/i40e_flow.c (+1/-1)
drivers/net/i40e/rte_pmd_i40e.c (+4/-2)
drivers/net/ixgbe/ixgbe_ethdev.c (+45/-34)
drivers/net/kni/rte_eth_kni.c (+1/-1)
drivers/net/liquidio/lio_ethdev.c (+5/-0)
drivers/net/mlx4/mlx4.c (+13/-5)
drivers/net/mlx4/mlx4.h (+3/-0)
drivers/net/mlx4/mlx4_ethdev.c (+12/-180)
drivers/net/mlx4/mlx4_flow.c (+23/-11)
drivers/net/mlx4/mlx4_flow.h (+1/-0)
drivers/net/mlx4/mlx4_intr.c (+38/-3)
drivers/net/mlx4/mlx4_rxq.c (+7/-0)
drivers/net/mlx4/mlx4_rxtx.c (+2/-1)
drivers/net/mlx4/mlx4_rxtx.h (+1/-1)
drivers/net/mlx5/mlx5.c (+340/-185)
drivers/net/mlx5/mlx5.h (+128/-139)
drivers/net/mlx5/mlx5_defs.h (+12/-2)
drivers/net/mlx5/mlx5_ethdev.c (+304/-714)
drivers/net/mlx5/mlx5_flow.c (+627/-520)
drivers/net/mlx5/mlx5_mac.c (+31/-18)
drivers/net/mlx5/mlx5_mr.c (+101/-96)
drivers/net/mlx5/mlx5_rss.c (+71/-105)
drivers/net/mlx5/mlx5_rxmode.c (+24/-4)
drivers/net/mlx5/mlx5_rxq.c (+395/-317)
drivers/net/mlx5/mlx5_rxtx.c (+71/-45)
drivers/net/mlx5/mlx5_rxtx.h (+99/-80)
drivers/net/mlx5/mlx5_rxtx_vec.c (+15/-11)
drivers/net/mlx5/mlx5_rxtx_vec_neon.h (+36/-29)
drivers/net/mlx5/mlx5_rxtx_vec_sse.h (+1/-1)
drivers/net/mlx5/mlx5_socket.c (+103/-69)
drivers/net/mlx5/mlx5_stats.c (+125/-123)
drivers/net/mlx5/mlx5_trigger.c (+140/-118)
drivers/net/mlx5/mlx5_txq.c (+214/-165)
drivers/net/mlx5/mlx5_utils.h (+15/-14)
drivers/net/mlx5/mlx5_vlan.c (+41/-65)
drivers/net/mrvl/mrvl_ethdev.c (+4/-3)
drivers/net/nfp/nfp_net.c (+10/-8)
drivers/net/nfp/nfp_nfpu.c (+22/-4)
drivers/net/null/rte_eth_null.c (+1/-1)
drivers/net/octeontx/octeontx_ethdev.c (+3/-3)
drivers/net/pcap/rte_eth_pcap.c (+1/-1)
drivers/net/qede/base/bcm_osal.c (+7/-2)
drivers/net/qede/base/ecore.h (+6/-0)
drivers/net/qede/base/ecore_dcbx.c (+5/-0)
drivers/net/qede/base/ecore_dcbx_api.h (+1/-0)
drivers/net/qede/base/ecore_dev.c (+8/-2)
drivers/net/qede/base/ecore_hsi_common.h (+4/-1)
drivers/net/qede/base/ecore_l2.c (+5/-7)
drivers/net/qede/base/ecore_l2_api.h (+1/-1)
drivers/net/qede/base/ecore_sp_commands.c (+20/-9)
drivers/net/qede/base/ecore_sriov.c (+1/-2)
drivers/net/qede/base/ecore_vf.c (+2/-3)
drivers/net/qede/base/ecore_vfpf_if.h (+7/-1)
drivers/net/qede/qede_ethdev.c (+186/-158)
drivers/net/qede/qede_ethdev.h (+2/-1)
drivers/net/qede/qede_fdir.c (+2/-2)
drivers/net/qede/qede_rxtx.c (+18/-11)
drivers/net/ring/rte_eth_ring.c (+1/-1)
drivers/net/sfc/base/efx_port.c (+1/-1)
drivers/net/sfc/efsys.h (+2/-0)
drivers/net/sfc/sfc.c (+2/-0)
drivers/net/sfc/sfc_ev.c (+1/-1)
drivers/net/sfc/sfc_flow.c (+2/-4)
drivers/net/sfc/sfc_rx.c (+43/-1)
drivers/net/softnic/rte_eth_softnic.c (+1/-1)
drivers/net/szedata2/rte_eth_szedata2.c (+24/-17)
drivers/net/tap/rte_eth_tap.c (+4/-2)
drivers/net/vhost/rte_eth_vhost.c (+58/-35)
drivers/net/vmxnet3/vmxnet3_ethdev.c (+4/-0)
drivers/net/vmxnet3/vmxnet3_rxtx.c (+2/-2)
examples/exception_path/main.c (+14/-4)
examples/l2fwd-crypto/main.c (+2/-2)
examples/performance-thread/common/lthread.c (+2/-1)
examples/performance-thread/common/lthread_api.h (+1/-1)
examples/performance-thread/l3fwd-thread/main.c (+13/-7)
examples/performance-thread/pthread_shim/main.c (+2/-4)
examples/performance-thread/pthread_shim/pthread_shim.c (+2/-2)
examples/quota_watermark/qw/main.c (+8/-6)
lib/librte_cryptodev/rte_cryptodev.c (+33/-12)
lib/librte_eal/common/include/arch/ppc_64/rte_atomic.h (+1/-1)
lib/librte_eal/common/include/rte_version.h (+1/-1)
lib/librte_eal/linuxapp/eal/eal_memory.c (+5/-1)
lib/librte_eal/linuxapp/eal/eal_vfio.c (+2/-8)
lib/librte_eal/linuxapp/kni/compat.h (+5/-0)
lib/librte_eal/linuxapp/kni/ethtool/igb/kcompat.h (+10/-3)
lib/librte_ether/rte_ethdev.c (+12/-0)
lib/librte_ether/rte_ethdev.h (+3/-0)
lib/librte_ether/rte_ethdev_pci.h (+3/-3)
lib/librte_hash/rte_cuckoo_hash.c (+4/-2)
lib/librte_ip_frag/rte_ipv4_reassembly.c (+2/-0)
lib/librte_ip_frag/rte_ipv6_reassembly.c (+2/-0)
lib/librte_mbuf/rte_mbuf.h (+9/-16)
lib/librte_mempool/rte_mempool.c (+9/-6)
lib/librte_net/rte_ip.h (+6/-0)
lib/librte_pci/rte_pci_version.map (+2/-3)
lib/librte_vhost/fd_man.c (+32/-0)
lib/librte_vhost/fd_man.h (+1/-0)
lib/librte_vhost/socket.c (+18/-1)
lib/librte_vhost/vhost_user.c (+32/-10)
lib/librte_vhost/virtio_net.c (+5/-5)
mk/rte.sdkconfig.mk (+14/-5)
pkg/dpdk.spec (+1/-1)
test/test/test_cryptodev.c (+1/-1)
test/test/test_distributor_perf.c (+2/-1)
test/test/test_eal_flags.c (+4/-3)
test/test/test_mempool.c (+9/-6)
test/test/test_reorder.c (+37/-10)
test/test/test_table_pipeline.c (+6/-8)
Conflict in debian/changelog
Reviewer Review Type Date Requested Status
Andreas Hasenack Approve
git-ubuntu developers Pending
Canonical Server Pending
Review via email: mp+352112@code.launchpad.net

This proposal supersedes a proposal from 2018-08-01.

To post a comment you must log in.
Revision history for this message
Christian Ehrhardt  (paelzer) wrote : Posted in a previous version of this proposal

Info:
- Cosmic is a sync from Debian
- Bionic will get a backport of the stable release
Due to that the MP is against cosmic-devel as that keeps the Delta sane and shows "the new" changes.

If you want to see the changes the LTS introduced itself you can compare current ubuntu/bionic-devel vs ubuntu/cosmic-devel (import/17.11.2-1ubuntu0.1 vs import/17.11.3-3).

Marc did not on the last LTS, but I chose to update maintainers, since it is no more identical to Debian (I'm the Debian co-maintainer anyway).

I'll be doing a set of functional tests to fulfill the SRU exception, see bug 1784816 for more details - the review here is mostly from the packaging POV (also this ocntent was already tested from PPA and by Mellanox).
Test PPA can be found at: https://launchpad.net/~ci-train-ppa-service/+archive/ubuntu/3334

Revision history for this message
Christian Ehrhardt  (paelzer) wrote :

Resubmitted against bionic-devel after IRC discussion - not sure if that is better (I prefered cosmic-devel) but lets give it a shot.

Revision history for this message
Andreas Hasenack (ahasenack) wrote :

Ok, so:
- new builddep: libibverbs-dev. It's in main, check.
- new packages: librte-pmd-mlx5-17.11, librte-pmd-mlx4-17.11 -> up to the sru team
- new symbols, none removed. check

Trying the upgrade path now

Revision history for this message
Andreas Hasenack (ahasenack) wrote :

dist-upgrade pulls in new packages as expected:
The following NEW packages will be installed:
  ibverbs-providers libibverbs1 libnl-3-200 libnl-route-3-200 librte-pmd-mlx4-17.11 librte-pmd-mlx5-17.11

Seems fine from a packaging point of view.

review: Approve
Revision history for this message
Christian Ehrhardt  (paelzer) wrote :

Thanks Andreas - also all testing is complete, so I'm uploading ...

Revision history for this message
Christian Ehrhardt  (paelzer) wrote :

upload/17.11.3-3_ubuntu0.18.04 pushed

Revision history for this message
Christian Ehrhardt  (paelzer) wrote :

Is in Bionic-unapproved now

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1diff --git a/app/proc_info/main.c b/app/proc_info/main.c
2index 64fbbd0..875d91e 100644
3--- a/app/proc_info/main.c
4+++ b/app/proc_info/main.c
5@@ -188,7 +188,7 @@ proc_info_preparse_args(int argc, char **argv)
6 proc_info_usage(prgname);
7 return -1;
8 }
9- strncpy(host_id, argv[i+1], sizeof(host_id));
10+ snprintf(host_id, sizeof(host_id), "%s", argv[i+1]);
11 }
12 }
13
14diff --git a/app/test-crypto-perf/cperf_test_common.c b/app/test-crypto-perf/cperf_test_common.c
15index 328744e..9b08b3f 100644
16--- a/app/test-crypto-perf/cperf_test_common.c
17+++ b/app/test-crypto-perf/cperf_test_common.c
18@@ -119,7 +119,7 @@ mempool_obj_init(struct rte_mempool *mp,
19 op->type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
20 op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
21 op->sess_type = RTE_CRYPTO_OP_WITH_SESSION;
22- op->phys_addr = rte_mem_virt2phy(obj);
23+ op->phys_addr = rte_mem_virt2iova(obj);
24 op->mempool = mp;
25
26 /* Set source buffer */
27diff --git a/app/test-crypto-perf/cperf_test_vector_parsing.c b/app/test-crypto-perf/cperf_test_vector_parsing.c
28index d4736f9..56df4e7 100644
29--- a/app/test-crypto-perf/cperf_test_vector_parsing.c
30+++ b/app/test-crypto-perf/cperf_test_vector_parsing.c
31@@ -534,8 +534,7 @@ parse_file(struct cperf_test_vector *vector, struct cperf_options *opts)
32 if (entry == NULL)
33 return -1;
34
35- memset(entry, 0, strlen(line) + 1);
36- strncpy(entry, line, strlen(line));
37+ strcpy(entry, line);
38
39 /* check if entry ends with , or = */
40 if (entry[strlen(entry) - 1] == ','
41@@ -552,8 +551,8 @@ parse_file(struct cperf_test_vector *vector, struct cperf_options *opts)
42 if (entry_extended == NULL)
43 goto err;
44 entry = entry_extended;
45-
46- strncat(entry, line, strlen(line));
47+ /* entry has been allocated accordingly */
48+ strcpy(&entry[strlen(entry)], line);
49
50 if (entry[strlen(entry) - 1] != ',')
51 break;
52diff --git a/app/test-crypto-perf/main.c b/app/test-crypto-perf/main.c
53index 29373f5..13e0121 100644
54--- a/app/test-crypto-perf/main.c
55+++ b/app/test-crypto-perf/main.c
56@@ -106,13 +106,19 @@ cperf_initialize_cryptodev(struct cperf_options *opts, uint8_t *enabled_cdevs,
57
58 nb_lcores = rte_lcore_count() - 1;
59
60- if (enabled_cdev_count > nb_lcores) {
61- printf("Number of capable crypto devices (%d) "
62- "has to be less or equal to number of slave "
63- "cores (%d)\n", enabled_cdev_count, nb_lcores);
64+ if (nb_lcores < 1) {
65+ RTE_LOG(ERR, USER1,
66+ "Number of enabled cores need to be higher than 1\n");
67 return -EINVAL;
68 }
69
70+ /*
71+ * Use less number of devices,
72+ * if there are more available than cores.
73+ */
74+ if (enabled_cdev_count > nb_lcores)
75+ enabled_cdev_count = nb_lcores;
76+
77 /* Create a mempool shared by all the devices */
78 uint32_t max_sess_size = 0, sess_size;
79
80diff --git a/app/test-pmd/cmdline.c b/app/test-pmd/cmdline.c
81index b3c3f24..77c11b8 100644
82--- a/app/test-pmd/cmdline.c
83+++ b/app/test-pmd/cmdline.c
84@@ -2127,7 +2127,7 @@ cmdline_parse_inst_t cmd_config_rxtx_queue = {
85 .data = NULL,
86 .help_str = "port <port_id> rxq|txq <queue_id> start|stop",
87 .tokens = {
88- (void *)&cmd_config_speed_all_port,
89+ (void *)&cmd_config_rxtx_queue_port,
90 (void *)&cmd_config_rxtx_queue_portid,
91 (void *)&cmd_config_rxtx_queue_rxtxq,
92 (void *)&cmd_config_rxtx_queue_qid,
93diff --git a/app/test-pmd/cmdline_flow.c b/app/test-pmd/cmdline_flow.c
94index df16d2a..35440ea 100644
95--- a/app/test-pmd/cmdline_flow.c
96+++ b/app/test-pmd/cmdline_flow.c
97@@ -2028,7 +2028,7 @@ parse_vc_action_rss_queue(struct context *ctx, const struct token *token,
98 i = ctx->objdata >> 16;
99 if (!strcmp_partial("end", str, len)) {
100 ctx->objdata &= 0xffff;
101- return len;
102+ goto end;
103 }
104 if (i >= ACTION_RSS_NUM)
105 return -1;
106@@ -2045,6 +2045,7 @@ parse_vc_action_rss_queue(struct context *ctx, const struct token *token,
107 if (ctx->next_num == RTE_DIM(ctx->next))
108 return -1;
109 ctx->next[ctx->next_num++] = next;
110+end:
111 if (!ctx->object)
112 return len;
113 ((struct rte_flow_action_rss *)ctx->object)->num = i;
114diff --git a/app/test-pmd/config.c b/app/test-pmd/config.c
115index a0f3c24..61608d1 100644
116--- a/app/test-pmd/config.c
117+++ b/app/test-pmd/config.c
118@@ -149,15 +149,11 @@ nic_stats_display(portid_t port_id)
119 struct rte_eth_stats stats;
120 struct rte_port *port = &ports[port_id];
121 uint8_t i;
122- portid_t pid;
123
124 static const char *nic_stats_border = "########################";
125
126 if (port_id_is_invalid(port_id, ENABLED_WARN)) {
127- printf("Valid port range is [0");
128- RTE_ETH_FOREACH_DEV(pid)
129- printf(", %d", pid);
130- printf("]\n");
131+ print_valid_ports();
132 return;
133 }
134 rte_eth_stats_get(port_id, &stats);
135@@ -231,13 +227,8 @@ nic_stats_display(portid_t port_id)
136 void
137 nic_stats_clear(portid_t port_id)
138 {
139- portid_t pid;
140-
141 if (port_id_is_invalid(port_id, ENABLED_WARN)) {
142- printf("Valid port range is [0");
143- RTE_ETH_FOREACH_DEV(pid)
144- printf(", %d", pid);
145- printf("]\n");
146+ print_valid_ports();
147 return;
148 }
149 rte_eth_stats_reset(port_id);
150@@ -314,15 +305,11 @@ nic_stats_mapping_display(portid_t port_id)
151 {
152 struct rte_port *port = &ports[port_id];
153 uint16_t i;
154- portid_t pid;
155
156 static const char *nic_stats_mapping_border = "########################";
157
158 if (port_id_is_invalid(port_id, ENABLED_WARN)) {
159- printf("Valid port range is [0");
160- RTE_ETH_FOREACH_DEV(pid)
161- printf(", %d", pid);
162- printf("]\n");
163+ print_valid_ports();
164 return;
165 }
166
167@@ -434,14 +421,10 @@ port_infos_display(portid_t port_id)
168 int vlan_offload;
169 struct rte_mempool * mp;
170 static const char *info_border = "*********************";
171- portid_t pid;
172 uint16_t mtu;
173
174 if (port_id_is_invalid(port_id, ENABLED_WARN)) {
175- printf("Valid port range is [0");
176- RTE_ETH_FOREACH_DEV(pid)
177- printf(", %d", pid);
178- printf("]\n");
179+ print_valid_ports();
180 return;
181 }
182 port = &ports[port_id];
183@@ -739,6 +722,17 @@ port_id_is_invalid(portid_t port_id, enum print_warning warning)
184 return 1;
185 }
186
187+void print_valid_ports(void)
188+{
189+ portid_t pid;
190+
191+ printf("The valid ports array is [");
192+ RTE_ETH_FOREACH_DEV(pid) {
193+ printf(" %d", pid);
194+ }
195+ printf(" ]\n");
196+}
197+
198 static int
199 vlan_id_is_invalid(uint16_t vlan_id)
200 {
201diff --git a/app/test-pmd/parameters.c b/app/test-pmd/parameters.c
202index 8fbb515..5d51808 100644
203--- a/app/test-pmd/parameters.c
204+++ b/app/test-pmd/parameters.c
205@@ -403,7 +403,6 @@ parse_portnuma_config(const char *q_arg)
206 };
207 unsigned long int_fld[_NUM_FLD];
208 char *str_fld[_NUM_FLD];
209- portid_t pid;
210
211 /* reset from value set at definition */
212 while ((p = strchr(p0,'(')) != NULL) {
213@@ -427,10 +426,7 @@ parse_portnuma_config(const char *q_arg)
214 port_id = (portid_t)int_fld[FLD_PORT];
215 if (port_id_is_invalid(port_id, ENABLED_WARN) ||
216 port_id == (portid_t)RTE_PORT_ALL) {
217- printf("Valid port range is [0");
218- RTE_ETH_FOREACH_DEV(pid)
219- printf(", %d", pid);
220- printf("]\n");
221+ print_valid_ports();
222 return -1;
223 }
224 socket_id = (uint8_t)int_fld[FLD_SOCKET];
225@@ -461,7 +457,6 @@ parse_ringnuma_config(const char *q_arg)
226 };
227 unsigned long int_fld[_NUM_FLD];
228 char *str_fld[_NUM_FLD];
229- portid_t pid;
230 #define RX_RING_ONLY 0x1
231 #define TX_RING_ONLY 0x2
232 #define RXTX_RING 0x3
233@@ -488,10 +483,7 @@ parse_ringnuma_config(const char *q_arg)
234 port_id = (portid_t)int_fld[FLD_PORT];
235 if (port_id_is_invalid(port_id, ENABLED_WARN) ||
236 port_id == (portid_t)RTE_PORT_ALL) {
237- printf("Valid port range is [0");
238- RTE_ETH_FOREACH_DEV(pid)
239- printf(", %d", pid);
240- printf("]\n");
241+ print_valid_ports();
242 return -1;
243 }
244 socket_id = (uint8_t)int_fld[FLD_SOCKET];
245diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c
246index f66f4c6..a4b28e9 100644
247--- a/app/test-pmd/testpmd.c
248+++ b/app/test-pmd/testpmd.c
249@@ -880,18 +880,23 @@ init_fwd_streams(void)
250
251 /* init new */
252 nb_fwd_streams = nb_fwd_streams_new;
253- fwd_streams = rte_zmalloc("testpmd: fwd_streams",
254- sizeof(struct fwd_stream *) * nb_fwd_streams, RTE_CACHE_LINE_SIZE);
255- if (fwd_streams == NULL)
256- rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) "
257- "failed\n", nb_fwd_streams);
258+ if (nb_fwd_streams) {
259+ fwd_streams = rte_zmalloc("testpmd: fwd_streams",
260+ sizeof(struct fwd_stream *) * nb_fwd_streams,
261+ RTE_CACHE_LINE_SIZE);
262+ if (fwd_streams == NULL)
263+ rte_exit(EXIT_FAILURE, "rte_zmalloc(%d"
264+ " (struct fwd_stream *)) failed\n",
265+ nb_fwd_streams);
266
267- for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
268- fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream",
269- sizeof(struct fwd_stream), RTE_CACHE_LINE_SIZE);
270- if (fwd_streams[sm_id] == NULL)
271- rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)"
272- " failed\n");
273+ for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
274+ fwd_streams[sm_id] = rte_zmalloc("testpmd:"
275+ " struct fwd_stream", sizeof(struct fwd_stream),
276+ RTE_CACHE_LINE_SIZE);
277+ if (fwd_streams[sm_id] == NULL)
278+ rte_exit(EXIT_FAILURE, "rte_zmalloc"
279+ "(struct fwd_stream) failed\n");
280+ }
281 }
282
283 return 0;
284@@ -925,6 +930,9 @@ pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
285 pktnb_stats[1] = pktnb_stats[0];
286 burst_stats[0] = nb_burst;
287 pktnb_stats[0] = nb_pkt;
288+ } else if (nb_burst > burst_stats[1]) {
289+ burst_stats[1] = nb_burst;
290+ pktnb_stats[1] = nb_pkt;
291 }
292 }
293 if (total_burst == 0)
294@@ -1211,6 +1219,31 @@ launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
295 }
296
297 /*
298+ * Update the forward ports list.
299+ */
300+void
301+update_fwd_ports(portid_t new_pid)
302+{
303+ unsigned int i;
304+ unsigned int new_nb_fwd_ports = 0;
305+ int move = 0;
306+
307+ for (i = 0; i < nb_fwd_ports; ++i) {
308+ if (port_id_is_invalid(fwd_ports_ids[i], DISABLED_WARN))
309+ move = 1;
310+ else if (move)
311+ fwd_ports_ids[new_nb_fwd_ports++] = fwd_ports_ids[i];
312+ else
313+ new_nb_fwd_ports++;
314+ }
315+ if (new_pid < RTE_MAX_ETHPORTS)
316+ fwd_ports_ids[new_nb_fwd_ports++] = new_pid;
317+
318+ nb_fwd_ports = new_nb_fwd_ports;
319+ nb_cfg_ports = new_nb_fwd_ports;
320+}
321+
322+/*
323 * Launch packet forwarding configuration.
324 */
325 void
326@@ -1245,10 +1278,6 @@ start_packet_forwarding(int with_tx_first)
327 return;
328 }
329
330- if (init_fwd_streams() < 0) {
331- printf("Fail from init_fwd_streams()\n");
332- return;
333- }
334
335 if(dcb_test) {
336 for (i = 0; i < nb_fwd_ports; i++) {
337@@ -1268,10 +1297,11 @@ start_packet_forwarding(int with_tx_first)
338 }
339 test_done = 0;
340
341+ fwd_config_setup();
342+
343 if(!no_flush_rx)
344 flush_fwd_rx_queues();
345
346- fwd_config_setup();
347 pkt_fwd_config_display(&cur_fwd_config);
348 rxtx_config_display();
349
350@@ -1876,6 +1906,8 @@ attach_port(char *identifier)
351
352 ports[pi].port_status = RTE_PORT_STOPPED;
353
354+ update_fwd_ports(pi);
355+
356 printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
357 printf("Done\n");
358 }
359@@ -1902,6 +1934,8 @@ detach_port(portid_t port_id)
360
361 nb_ports = rte_eth_dev_count();
362
363+ update_fwd_ports(RTE_MAX_ETHPORTS);
364+
365 printf("Port '%s' is detached. Now total ports is %d\n",
366 name, nb_ports);
367 printf("Done\n");
368@@ -1995,13 +2029,16 @@ check_all_ports_link_status(uint32_t port_mask)
369 static void
370 rmv_event_callback(void *arg)
371 {
372+ int org_no_link_check = no_link_check;
373 struct rte_eth_dev *dev;
374 portid_t port_id = (intptr_t)arg;
375
376 RTE_ETH_VALID_PORTID_OR_RET(port_id);
377 dev = &rte_eth_devices[port_id];
378
379+ no_link_check = 1;
380 stop_port(port_id);
381+ no_link_check = org_no_link_check;
382 close_port(port_id);
383 printf("removing device %s\n", dev->device->name);
384 if (rte_eal_dev_detach(dev->device))
385@@ -2246,7 +2283,10 @@ uint8_t port_is_bonding_slave(portid_t slave_pid)
386 struct rte_port *port;
387
388 port = &ports[slave_pid];
389- return port->slave_flag;
390+ if ((rte_eth_devices[slave_pid].data->dev_flags &
391+ RTE_ETH_DEV_BONDED_SLAVE) || (port->slave_flag == 1))
392+ return 1;
393+ return 0;
394 }
395
396 const uint16_t vlan_tags[] = {
397diff --git a/app/test-pmd/testpmd.h b/app/test-pmd/testpmd.h
398index 92e1607..b3b26d2 100644
399--- a/app/test-pmd/testpmd.h
400+++ b/app/test-pmd/testpmd.h
401@@ -599,6 +599,7 @@ void fwd_config_setup(void);
402 void set_def_fwd_config(void);
403 void reconfig(portid_t new_port_id, unsigned socket_id);
404 int init_fwd_streams(void);
405+void update_fwd_ports(portid_t new_pid);
406
407 void port_mtu_set(portid_t port_id, uint16_t mtu);
408 void port_reg_bit_display(portid_t port_id, uint32_t reg_off, uint8_t bit_pos);
409@@ -726,6 +727,7 @@ enum print_warning {
410 DISABLED_WARN
411 };
412 int port_id_is_invalid(portid_t port_id, enum print_warning warning);
413+void print_valid_ports(void);
414 int new_socket_id(unsigned int socket_id);
415
416 queueid_t get_allowed_max_nb_rxq(portid_t *pid);
417diff --git a/config/defconfig_i686-native-linuxapp-icc b/config/defconfig_i686-native-linuxapp-icc
418index 269e88e..46ffb11 100644
419--- a/config/defconfig_i686-native-linuxapp-icc
420+++ b/config/defconfig_i686-native-linuxapp-icc
421@@ -47,11 +47,6 @@ CONFIG_RTE_TOOLCHAIN_ICC=y
422 CONFIG_RTE_LIBRTE_KNI=n
423
424 #
425-# Vectorized PMD is not supported on 32-bit
426-#
427-CONFIG_RTE_IXGBE_INC_VECTOR=n
428-
429-#
430 # Solarflare PMD is not supported on 32-bit
431 #
432 CONFIG_RTE_LIBRTE_SFC_EFX_PMD=n
433diff --git a/debian/changelog b/debian/changelog
434index 7575ece..c250026 100644
435--- a/debian/changelog
436+++ b/debian/changelog
437@@ -1,3 +1,39 @@
438+<<<<<<< debian/changelog
439+=======
440+dpdk (17.11.3-3~ubuntu0.18.04) bionic; urgency=medium
441+
442+ * Make DPDK LTS release available in Bionic (LP: #1784816)
443+
444+ -- Christian Ehrhardt <christian.ehrhardt@canonical.com> Wed, 01 Aug 2018 11:07:07 +0200
445+
446+dpdk (17.11.3-3) unstable; urgency=medium
447+
448+ * d/p/net-mlx5-fix-build-with-rdma-core-v19.patch: fix Build against
449+ rdma-core v19 which is in Debian unstable now. (This will most likely be
450+ in 17.11.4 and can then be dropped)
451+
452+ -- Luca Boccassi <bluca@debian.org> Mon, 30 Jul 2018 10:34:20 +0100
453+
454+dpdk (17.11.3-2) unstable; urgency=medium
455+
456+ * Temporarily disable ML4 and MLX5 again to fix FTBS due to
457+ incompatibility with rdma-core 19.0.
458+
459+ -- Luca Boccassi <bluca@debian.org> Tue, 03 Jul 2018 11:14:22 +0100
460+
461+dpdk (17.11.3-1) unstable; urgency=medium
462+
463+ [ Christian Ehrhardt ]
464+ * New upstream release 17.11.3; for a full list of changes see:
465+ https://dpdk.org/doc/guides-17.11/rel_notes/release_17_11.html
466+ * d/control: binary package and build-dependency for MLX4 and MLX5 PMDs.
467+ - d/librte-pmd-mlx*.symbols: symbol files for mlx PMDs
468+ - d/p/app-testpmd-add-ethernet-peer-command.patch: ensure MLX PMDs are
469+ well testable
470+
471+ -- Luca Boccassi <bluca@debian.org> Wed, 20 Jun 2018 13:42:27 +0100
472+
473+>>>>>>> debian/changelog
474 dpdk (17.11.2-1ubuntu0.1) bionic-security; urgency=medium
475
476 * Release as bionic security update.
477diff --git a/debian/control b/debian/control
478index f7e1da8..bf4ca6c 100644
479--- a/debian/control
480+++ b/debian/control
481@@ -13,6 +13,7 @@ Build-Depends: debhelper (>= 9),
482 graphviz <!nodoc>,
483 inkscape <!nodoc>,
484 libcap-dev,
485+ libibverbs-dev,
486 libpcap-dev,
487 libnuma-dev,
488 python3,
489@@ -1081,3 +1082,28 @@ Description: Data Plane Development Kit (librte_pmd_thunderx_nicvf runtime libra
490 .
491 This package contains the runtime libraries for librte_pmd_thunderx_nicvf.
492
493+Package: librte-pmd-mlx4-17.11
494+Architecture: amd64 arm64 i386 ppc64el
495+Multi-Arch: same
496+Homepage: https://dpdk.org/doc/guides/nics/mlx4.html
497+Pre-Depends: ${misc:Pre-Depends}
498+Depends: ${misc:Depends}, ${shlibs:Depends}
499+Conflicts: libdpdk0
500+Description: Data Plane Development Kit (librte-pmd-mlx4 runtime library)
501+ DPDK is a set of libraries for fast packet processing. Applications run
502+ in user-space and communicate directly with dedicated network interfaces.
503+ .
504+ This package contains the runtime libraries for librte-pmd-mlx4.
505+
506+Package: librte-pmd-mlx5-17.11
507+Architecture: amd64 arm64 i386 ppc64el
508+Multi-Arch: same
509+Homepage: https://dpdk.org/doc/guides/nics/mlx5.html
510+Pre-Depends: ${misc:Pre-Depends}
511+Depends: ${misc:Depends}, ${shlibs:Depends}
512+Conflicts: libdpdk0
513+Description: Data Plane Development Kit (librte-pmd-mlx5 runtime library)
514+ DPDK is a set of libraries for fast packet processing. Applications run
515+ in user-space and communicate directly with dedicated network interfaces.
516+ .
517+ This package contains the runtime libraries for librte-pmd-mlx5.
518diff --git a/debian/librte-pmd-bond17.11.symbols b/debian/librte-pmd-bond17.11.symbols
519index 9ab11c0..7ed0b24 100644
520--- a/debian/librte-pmd-bond17.11.symbols
521+++ b/debian/librte-pmd-bond17.11.symbols
522@@ -15,6 +15,7 @@ librte_pmd_bond.so.17.11 librte-pmd-bond17.11 #MINVER#
523 rte_eth_bond_8023ad_ext_distrib_get@DPDK_16.07 16.07~rc1
524 rte_eth_bond_8023ad_ext_slowtx@DPDK_16.07 16.07~rc1
525 rte_eth_bond_8023ad_setup@DPDK_17.08 17.08
526+ rte_eth_bond_8023ad_slave_info@DPDK_2.0 17.11.3
527 rte_eth_bond_active_slaves_get@DPDK_2.0 16.04
528 rte_eth_bond_create@DPDK_2.0 16.04
529 rte_eth_bond_free@DPDK_2.1 16.04
530diff --git a/debian/librte-pmd-mlx4-17.11.symbols b/debian/librte-pmd-mlx4-17.11.symbols
531new file mode 100644
532index 0000000..d33b21b
533--- /dev/null
534+++ b/debian/librte-pmd-mlx4-17.11.symbols
535@@ -0,0 +1,5 @@
536+librte_pmd_mlx4.so.17.11 librte-pmd-mlx4-17.11 #MINVER#
537+# Built since 17.11 due to rdma-core being available
538+ DPDK_2.0@DPDK_2.0 17.11
539+# INFO: this library exports no symbols, essentially it is a driver that
540+# registers itself on load and is then only driven by callbacks.
541diff --git a/debian/librte-pmd-mlx5-17.11.symbols b/debian/librte-pmd-mlx5-17.11.symbols
542new file mode 100644
543index 0000000..b7f244b
544--- /dev/null
545+++ b/debian/librte-pmd-mlx5-17.11.symbols
546@@ -0,0 +1,5 @@
547+librte_pmd_mlx5.so.17.11 librte-pmd-mlx5-17.11 #MINVER#
548+# Built since 17.11 due to rdma-core being available
549+ DPDK_2.2@DPDK_2.2 17.11.3
550+# INFO: this library exports no symbols, essentially it is a driver that
551+# registers itself on load and is then only driven by callbacks.
552diff --git a/debian/librte-vhost17.11.symbols b/debian/librte-vhost17.11.symbols
553index 2b38437..a05dc21 100644
554--- a/debian/librte-vhost17.11.symbols
555+++ b/debian/librte-vhost17.11.symbols
556@@ -2,6 +2,7 @@ librte_vhost.so.17.11 librte-vhost17.11 #MINVER#
557 DPDK_16.07@DPDK_16.07 16.07~rc1
558 DPDK_17.05@DPDK_17.05 17.05
559 DPDK_17.08@DPDK_17.08 17.08
560+ DPDK_17.11.2@DPDK_17.11.2 17.11.2
561 DPDK_2.0@DPDK_2.0 16.07~rc1
562 DPDK_2.1@DPDK_2.1 16.07~rc1
563 rte_vhost_avail_entries@DPDK_16.07 16.07~rc1
564diff --git a/debian/patches/app-testpmd-add-ethernet-peer-command.patch b/debian/patches/app-testpmd-add-ethernet-peer-command.patch
565new file mode 100644
566index 0000000..af9204f
567--- /dev/null
568+++ b/debian/patches/app-testpmd-add-ethernet-peer-command.patch
569@@ -0,0 +1,168 @@
570+From aac6f11f586480f9222dba99910654eda989c649 Mon Sep 17 00:00:00 2001
571+From: Wisam Jaddo <wisamm@mellanox.com>
572+Date: Sun, 14 Jan 2018 10:27:10 +0200
573+Subject: [PATCH] app/testpmd: add ethernet peer command
574+
575+This command will simulate the process of setting the
576+eth-peer from command line.
577+
578+It will be useful to perform extra testing.
579+
580+usage:
581+ testpmd> set eth-peer <port_id> <peer_addr>.
582+
583+Signed-off-by: Wisam Jaddo <wisamm@mellanox.com>
584+Acked-by: Wenzhuo Lu <wenzhuo.lu@intel.com>
585+
586+This will help Debian/Ubuntu to have the MLX PMDs much more testable
587+while at the same time not affecting
588+ a) any other formerly existing PMDs
589+ b) any other parts than the testpmd helper
590+ c) not changing things if not specified
591+
592+Author: Christian Ehrhardt <christian.ehrhardt@canonical.com>
593+Original-Author: Wisam Jaddo <wisamm@mellanox.com>
594+Origin: backport, http://git.dpdk.org/dpdk/commit/?id=aac6f11f586480f9222dba99910654eda989c649
595+Last-Update: 2018-06-18
596+---
597+ app/test-pmd/cmdline.c | 48 +++++++++++++++++++++
598+ app/test-pmd/config.c | 19 ++++++++
599+ app/test-pmd/testpmd.h | 2 +
600+ doc/guides/testpmd_app_ug/testpmd_funcs.rst | 9 ++++
601+ 4 files changed, 78 insertions(+)
602+
603+--- a/app/test-pmd/cmdline.c
604++++ b/app/test-pmd/cmdline.c
605+@@ -486,6 +486,9 @@ static void cmd_help_long_parsed(void *p
606+ "set vf mac addr (port_id) (vf_id) (XX:XX:XX:XX:XX:XX)\n"
607+ " Set the MAC address for a VF from the PF.\n\n"
608+
609++ "set eth-peer (port_id) (peer_addr)\n"
610++ " set the peer address for certain port.\n\n"
611++
612+ "set port (port_id) uta (mac_address|all) (on|off)\n"
613+ " Add/Remove a or all unicast hash filter(s)"
614+ "from port X.\n\n"
615+@@ -7124,6 +7127,50 @@ cmdline_parse_inst_t cmd_mac_addr = {
616+ },
617+ };
618+
619++/* *** SET THE PEER ADDRESS FOR CERTAIN PORT *** */
620++struct cmd_eth_peer_result {
621++ cmdline_fixed_string_t set;
622++ cmdline_fixed_string_t eth_peer;
623++ portid_t port_id;
624++ cmdline_fixed_string_t peer_addr;
625++};
626++
627++static void cmd_set_eth_peer_parsed(void *parsed_result,
628++ __attribute__((unused)) struct cmdline *cl,
629++ __attribute__((unused)) void *data)
630++{
631++ struct cmd_eth_peer_result *res = parsed_result;
632++
633++ if (test_done == 0) {
634++ printf("Please stop forwarding first\n");
635++ return;
636++ }
637++ if (!strcmp(res->eth_peer, "eth-peer")) {
638++ set_fwd_eth_peer(res->port_id, res->peer_addr);
639++ fwd_config_setup();
640++ }
641++}
642++cmdline_parse_token_string_t cmd_eth_peer_set =
643++ TOKEN_STRING_INITIALIZER(struct cmd_eth_peer_result, set, "set");
644++cmdline_parse_token_string_t cmd_eth_peer =
645++ TOKEN_STRING_INITIALIZER(struct cmd_eth_peer_result, eth_peer, "eth-peer");
646++cmdline_parse_token_num_t cmd_eth_peer_port_id =
647++ TOKEN_NUM_INITIALIZER(struct cmd_eth_peer_result, port_id, UINT16);
648++cmdline_parse_token_string_t cmd_eth_peer_addr =
649++ TOKEN_STRING_INITIALIZER(struct cmd_eth_peer_result, peer_addr, NULL);
650++
651++cmdline_parse_inst_t cmd_set_fwd_eth_peer = {
652++ .f = cmd_set_eth_peer_parsed,
653++ .data = NULL,
654++ .help_str = "set eth-peer <port_id> <peer_mac>",
655++ .tokens = {
656++ (void *)&cmd_eth_peer_set,
657++ (void *)&cmd_eth_peer,
658++ (void *)&cmd_eth_peer_port_id,
659++ (void *)&cmd_eth_peer_addr,
660++ NULL,
661++ },
662++};
663+
664+ /* *** CONFIGURE QUEUE STATS COUNTER MAPPINGS *** */
665+ struct cmd_set_qmap_result {
666+@@ -15629,6 +15676,7 @@ cmdline_parse_ctx_t main_ctx[] = {
667+ (cmdline_parse_inst_t *)&cmd_read_rxd_txd,
668+ (cmdline_parse_inst_t *)&cmd_stop,
669+ (cmdline_parse_inst_t *)&cmd_mac_addr,
670++ (cmdline_parse_inst_t *)&cmd_set_fwd_eth_peer,
671+ (cmdline_parse_inst_t *)&cmd_set_qmap,
672+ (cmdline_parse_inst_t *)&cmd_set_xstats_hide_zero,
673+ (cmdline_parse_inst_t *)&cmd_operate_port,
674+--- a/app/test-pmd/config.c
675++++ b/app/test-pmd/config.c
676+@@ -78,6 +78,7 @@
677+ #include <rte_pmd_bnxt.h>
678+ #endif
679+ #include <rte_gro.h>
680++#include <cmdline_parse_etheraddr.h>
681+
682+ #include "testpmd.h"
683+
684+@@ -2213,6 +2214,24 @@ pkt_fwd_config_display(struct fwd_config
685+ printf("\n");
686+ }
687+
688++void
689++set_fwd_eth_peer(portid_t port_id, char *peer_addr)
690++{
691++ uint8_t c, new_peer_addr[6];
692++ if (!rte_eth_dev_is_valid_port(port_id)) {
693++ printf("Error: Invalid port number %i\n", port_id);
694++ return;
695++ }
696++ if (cmdline_parse_etheraddr(NULL, peer_addr, &new_peer_addr,
697++ sizeof(new_peer_addr)) < 0) {
698++ printf("Error: Invalid ethernet address: %s\n", peer_addr);
699++ return;
700++ }
701++ for (c = 0; c < 6; c++)
702++ peer_eth_addrs[port_id].addr_bytes[c] =
703++ new_peer_addr[c];
704++}
705++
706+ int
707+ set_fwd_lcores_list(unsigned int *lcorelist, unsigned int nb_lc)
708+ {
709+--- a/app/test-pmd/testpmd.h
710++++ b/app/test-pmd/testpmd.h
711+@@ -601,6 +601,8 @@ void reconfig(portid_t new_port_id, unsi
712+ int init_fwd_streams(void);
713+ void update_fwd_ports(portid_t new_pid);
714+
715++void set_fwd_eth_peer(portid_t port_id, char *peer_addr);
716++
717+ void port_mtu_set(portid_t port_id, uint16_t mtu);
718+ void port_reg_bit_display(portid_t port_id, uint32_t reg_off, uint8_t bit_pos);
719+ void port_reg_bit_set(portid_t port_id, uint32_t reg_off, uint8_t bit_pos,
720+--- a/doc/guides/testpmd_app_ug/testpmd_funcs.rst
721++++ b/doc/guides/testpmd_app_ug/testpmd_funcs.rst
722+@@ -1091,6 +1091,15 @@ Set the MAC address for a VF from the PF
723+
724+ testpmd> set vf mac addr (port_id) (vf_id) (XX:XX:XX:XX:XX:XX)
725+
726++set eth-peer
727++~~~~~~~~~~~~
728++
729++Set the forwarding peer address for certain port::
730++
731++ testpmd> set eth-peer (port_id) (perr_addr)
732++
733++This is equivalent to the ``--eth-peer`` command-line option.
734++
735+ set port-uta
736+ ~~~~~~~~~~~~
737+
738diff --git a/debian/patches/net-mlx5-fix-build-with-rdma-core-v19.patch b/debian/patches/net-mlx5-fix-build-with-rdma-core-v19.patch
739new file mode 100644
740index 0000000..90ac3c9
741--- /dev/null
742+++ b/debian/patches/net-mlx5-fix-build-with-rdma-core-v19.patch
743@@ -0,0 +1,71 @@
744+From 06b1fe3f6d2121009b3b879e92b8cca25d4c0c42 Mon Sep 17 00:00:00 2001
745+From: Shahaf Shuler <shahafs@mellanox.com>
746+Date: Thu, 12 Jul 2018 09:40:32 +0300
747+Subject: [PATCH] net/mlx5: fix build with rdma-core v19
748+
749+The flow counter support introduced by
750+commit 9a761de8ea14 ("net/mlx5: flow counter support") was intend to
751+work only with MLNX_OFED_4.3 as the upstream rdma-core
752+libraries were lack such support.
753+
754+On rdma-core v19 the support for the flow counters was added but with
755+different user APIs, hence causing compilation issues on the PMD.
756+
757+This patch fix the compilation errors by forcing the flow counters
758+to be enabled only with MLNX_OFED APIs.
759+Once MLNX_OFED and rdma-core APIs will be aligned, a proper patch to
760+support the new API will be submitted.
761+
762+Fixes: 9a761de8ea14 ("net/mlx5: flow counter support")
763+Cc: stable@dpdk.org
764+
765+Note: lacking b42c000 "net/mlx5: remove flow support" this was not
766+enough and needed special handling for ibv_flow_spec_counter_action.
767+
768+Reported-by: Stephen Hemminger <stephen@networkplumber.org>
769+Reported-by: Ferruh Yigit <ferruh.yigit@intel.com>
770+Signed-off-by: Shahaf Shuler <shahafs@mellanox.com>
771+Acked-by: Ori Kam <orika@mellanox.com>
772+
773+Author: Christian Ehrhardt <christian.ehrhardt@canonical.com>
774+Original-Author: Shahaf Shuler <shahafs@mellanox.com>
775+Origin: backport, http://git.dpdk.org/dpdk/commit/?id=06b1fe3f6d2121009b3b879e92b8cca25d4c0c42
776+Last-Update: 2018-06-17
777+---
778+ drivers/net/mlx5/Makefile | 2 +-
779+ 1 file changed, 1 insertion(+), 1 deletion(-)
780+
781+--- a/drivers/net/mlx5/Makefile
782++++ b/drivers/net/mlx5/Makefile
783+@@ -145,7 +145,12 @@ mlx5_autoconf.h.new: $(RTE_SDK)/buildtoo
784+ $Q sh -- '$<' '$@' \
785+ HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT \
786+ infiniband/verbs.h \
787+- enum IBV_FLOW_SPEC_ACTION_COUNT \
788++ type 'struct ibv_counter_set_init_attr' \
789++ $(AUTOCONF_OUTPUT)
790++ $Q sh -- '$<' '$@' \
791++ HAVE_IBV_FLOW_SPEC_COUNTER_ACTION \
792++ infiniband/verbs.h \
793++ type 'struct ibv_flow_spec_counter_action' \
794+ $(AUTOCONF_OUTPUT)
795+
796+ # Create mlx5_autoconf.h or update it in case it differs from the new one.
797+--- a/drivers/net/mlx5/mlx5_flow.c
798++++ b/drivers/net/mlx5/mlx5_flow.c
799+@@ -66,9 +66,15 @@
800+ struct ibv_counter_set_init_attr {
801+ int dummy;
802+ };
803++/* rdma-core v19 has no ibv_counter_set_init_attr, but it has
804++ * ibv_flow_spec_counter_action which would conflict.
805++ * Newer DPDK, doesn't have the issue due to the series starting with
806++ * "net/mlx5: remove flow support" */
807++#ifndef HAVE_IBV_FLOW_SPEC_COUNTER_ACTION
808+ struct ibv_flow_spec_counter_action {
809+ int dummy;
810+ };
811++#endif
812+ struct ibv_counter_set {
813+ int dummy;
814+ };
815diff --git a/debian/patches/series b/debian/patches/series
816index 5034d58..4969a43 100644
817--- a/debian/patches/series
818+++ b/debian/patches/series
819@@ -1,2 +1,4 @@
820 fix-vhost-user-socket-permission.patch
821 testpmd-link-virtio.patch
822+app-testpmd-add-ethernet-peer-command.patch
823+net-mlx5-fix-build-with-rdma-core-v19.patch
824diff --git a/debian/rules b/debian/rules
825index 20780c9..0a40833 100755
826--- a/debian/rules
827+++ b/debian/rules
828@@ -182,6 +182,8 @@ ifeq (,$(filter dpdk_config=%,$(DEB_BUILD_OPTIONS)))
829 -e 's,(CONFIG_RTE_EAL_IGB_UIO=).*,\1$(DPDK_CONFIG_BUILD_KMOD),' \
830 -e 's,(CONFIG_RTE_KNI_KMOD=).*,\1$(DPDK_CONFIG_BUILD_KMOD),' \
831 -e 's,(LIBRTE_PMD_PCAP=).*,\1y,' \
832+ -e 's,(CONFIG_RTE_LIBRTE_MLX4_PMD=).*,\1y,' \
833+ -e 's,(CONFIG_RTE_LIBRTE_MLX5_PMD=).*,\1y,' \
834 -e 's,(CONFIG_RTE_EAL_PMD_PATH=).*,\1"/usr/lib/$(DEB_HOST_MULTIARCH)/$(DPDK_DRIVER_DIR)/",' \
835 $(DPDK_STATIC_DIR)/.config
836 endif
837diff --git a/doc/guides/nics/features/mlx5.ini b/doc/guides/nics/features/mlx5.ini
838index c363639..f8ce087 100644
839--- a/doc/guides/nics/features/mlx5.ini
840+++ b/doc/guides/nics/features/mlx5.ini
841@@ -29,6 +29,7 @@ CRC offload = Y
842 VLAN offload = Y
843 L3 checksum offload = Y
844 L4 checksum offload = Y
845+Timestamp offload = Y
846 Packet type parsing = Y
847 Rx descriptor status = Y
848 Tx descriptor status = Y
849diff --git a/doc/guides/nics/mlx5.rst b/doc/guides/nics/mlx5.rst
850index f9558da..50fced3 100644
851--- a/doc/guides/nics/mlx5.rst
852+++ b/doc/guides/nics/mlx5.rst
853@@ -108,7 +108,11 @@ Limitations
854 - Port statistics through software counters only. Flow statistics are
855 supported by hardware counters.
856 - Hardware checksum RX offloads for VXLAN inner header are not supported yet.
857-- Forked secondary process not supported.
858+- For secondary process:
859+
860+ - Forked secondary process not supported.
861+ - All mempools must be initialized before rte_eth_dev_start().
862+
863 - Flow pattern without any specific vlan will match for vlan packets as well:
864
865 When VLAN spec is not specified in the pattern, the matching rule will be created with VLAN as a wild card.
866diff --git a/doc/guides/nics/nfp.rst b/doc/guides/nics/nfp.rst
867index 99a3b76..67e574e 100644
868--- a/doc/guides/nics/nfp.rst
869+++ b/doc/guides/nics/nfp.rst
870@@ -34,14 +34,14 @@ NFP poll mode driver library
871 Netronome's sixth generation of flow processors pack 216 programmable
872 cores and over 100 hardware accelerators that uniquely combine packet,
873 flow, security and content processing in a single device that scales
874-up to 400 Gbps.
875+up to 400-Gb/s.
876
877 This document explains how to use DPDK with the Netronome Poll Mode
878 Driver (PMD) supporting Netronome's Network Flow Processor 6xxx
879 (NFP-6xxx) and Netronome's Flow Processor 4xxx (NFP-4xxx).
880
881 NFP is a SRIOV capable device and the PMD driver supports the physical
882-function (PF) and virtual functions (VFs).
883+function (PF) and the virtual functions (VFs).
884
885 Dependencies
886 ------------
887@@ -49,17 +49,18 @@ Dependencies
888 Before using the Netronome's DPDK PMD some NFP configuration,
889 which is not related to DPDK, is required. The system requires
890 installation of **Netronome's BSP (Board Support Package)** along
891-with some specific NFP firmware application. Netronome's NSP ABI
892+with a specific NFP firmware application. Netronome's NSP ABI
893 version should be 0.20 or higher.
894
895 If you have a NFP device you should already have the code and
896-documentation for doing all this configuration. Contact
897+documentation for this configuration. Contact
898 **support@netronome.com** to obtain the latest available firmware.
899
900-The NFP Linux netdev kernel driver for VFs is part of vanilla kernel
901-since kernel version 4.5, and support for the PF since kernel version
902-4.11. Support for older kernels can be obtained on Github at
903-**https://github.com/Netronome/nfp-drv-kmods** along with build
904+The NFP Linux netdev kernel driver for VFs has been a part of the
905+vanilla kernel since kernel version 4.5, and support for the PF
906+since kernel version 4.11. Support for older kernels can be obtained
907+on Github at
908+**https://github.com/Netronome/nfp-drv-kmods** along with the build
909 instructions.
910
911 NFP PMD needs to be used along with UIO ``igb_uio`` or VFIO (``vfio-pci``)
912@@ -70,15 +71,15 @@ Building the software
913
914 Netronome's PMD code is provided in the **drivers/net/nfp** directory.
915 Although NFP PMD has Netronome´s BSP dependencies, it is possible to
916-compile it along with other DPDK PMDs even if no BSP was installed before.
917+compile it along with other DPDK PMDs even if no BSP was installed previously.
918 Of course, a DPDK app will require such a BSP installed for using the
919 NFP PMD, along with a specific NFP firmware application.
920
921-Default PMD configuration is at **common_linuxapp configuration** file:
922+Default PMD configuration is at the **common_linuxapp configuration** file:
923
924 - **CONFIG_RTE_LIBRTE_NFP_PMD=y**
925
926-Once DPDK is built all the DPDK apps and examples include support for
927+Once the DPDK is built all the DPDK apps and examples include support for
928 the NFP PMD.
929
930
931@@ -91,18 +92,18 @@ for details.
932 Using the PF
933 ------------
934
935-NFP PMD has support for using the NFP PF as another DPDK port, but it does not
936+NFP PMD supports using the NFP PF as another DPDK port, but it does not
937 have any functionality for controlling VFs. In fact, it is not possible to use
938 the PMD with the VFs if the PF is being used by DPDK, that is, with the NFP PF
939-bound to ``igb_uio`` or ``vfio-pci`` kernel drivers. Future DPDK version will
940+bound to ``igb_uio`` or ``vfio-pci`` kernel drivers. Future DPDK versions will
941 have a PMD able to work with the PF and VFs at the same time and with the PF
942 implementing VF management along with other PF-only functionalities/offloads.
943
944-The PMD PF has extra work to do which will delay the DPDK app initialization
945-like checking if a firmware is already available in the device, uploading the
946-firmware if necessary, and configure the Link state properly when starting or
947-stopping a PF port. Note that firmware upload is not always necessary which is
948-the main delay for NFP PF PMD initialization.
949+The PMD PF has extra work to do which will delay the DPDK app initialization.
950+This additional effort could be checking if a firmware is already available in
951+the device, uploading the firmware if necessary or configuring the Link state
952+properly when starting or stopping a PF port. Note that firmware upload is not
953+always necessary which is the main delay for NFP PF PMD initialization.
954
955 Depending on the Netronome product installed in the system, firmware files
956 should be available under ``/lib/firmware/netronome``. DPDK PMD supporting the
957@@ -114,14 +115,14 @@ PF multiport support
958 --------------------
959
960 Some NFP cards support several physical ports with just one single PCI device.
961-DPDK core is designed with the 1:1 relationship between PCI devices and DPDK
962+The DPDK core is designed with a 1:1 relationship between PCI devices and DPDK
963 ports, so NFP PMD PF support requires handling the multiport case specifically.
964 During NFP PF initialization, the PMD will extract the information about the
965 number of PF ports from the firmware and will create as many DPDK ports as
966 needed.
967
968 Because the unusual relationship between a single PCI device and several DPDK
969-ports, there are some limitations when using more than one PF DPDK ports: there
970+ports, there are some limitations when using more than one PF DPDK port: there
971 is no support for RX interrupts and it is not possible either to use those PF
972 ports with the device hotplug functionality.
973
974@@ -136,7 +137,7 @@ System configuration
975 get the drivers from the above Github repository and follow the instructions
976 for building and installing it.
977
978- Virtual Functions need to be enabled before they can be used with the PMD.
979+ VFs need to be enabled before they can be used with the PMD.
980 Before enabling the VFs it is useful to obtain information about the
981 current NFP PCI device detected by the system:
982
983diff --git a/doc/guides/rel_notes/release_17_11.rst b/doc/guides/rel_notes/release_17_11.rst
984index fad7a7e..39a14ff 100644
985--- a/doc/guides/rel_notes/release_17_11.rst
986+++ b/doc/guides/rel_notes/release_17_11.rst
987@@ -1090,3 +1090,218 @@ Fixes in 17.11 LTS Release
988 * vhost: handle virtually non-contiguous buffers in Rx-mrg (fixes CVE-2018-1059)
989 * vhost: handle virtually non-contiguous buffers in Tx (fixes CVE-2018-1059)
990 * vhost: introduce safe API for GPA translation (fixes CVE-2018-1059)
991+
992+17.11.3
993+~~~~~~~
994+
995+* app/crypto-perf: check minimum lcore number
996+* app/crypto-perf: fix excess crypto device error
997+* app/crypto-perf: fix IOVA translation
998+* app/crypto-perf: fix parameters copy
999+* app/crypto-perf: use strcpy for allocated string
1000+* app/procinfo: fix strncpy usage in args parsing
1001+* app/testpmd: fix burst stats reporting
1002+* app/testpmd: fix command token
1003+* app/testpmd: fix empty list of RSS queues for flow
1004+* app/testpmd: fix forward ports Rx flush
1005+* app/testpmd: fix forward ports update
1006+* app/testpmd: fix removed device link status asking
1007+* app/testpmd: fix slave port detection
1008+* app/testpmd: fix synchronic port hotplug
1009+* app/testpmd: fix valid ports prints
1010+* bus/dpaa: fix resource leak
1011+* bus/fslmc: fix find device start condition
1012+* bus/pci: fix find device implementation
1013+* bus/vdev: fix finding device by name
1014+* cryptodev: fix supported size check
1015+* crypto/dpaa2_sec: fix HMAC supported digest sizes
1016+* crypto/scheduler: fix 64-bit mask of workers cores
1017+* crypto/scheduler: fix memory leak
1018+* crypto/scheduler: fix multicore rings re-use
1019+* crypto/scheduler: fix possible duplicated ring names
1020+* crypto/scheduler: set null pointer after freeing
1021+* crypto/zuc: batch ops with same transform
1022+* crypto/zuc: do not set default op status
1023+* doc: add timestamp offload to mlx5 features
1024+* doc: fix NFP NIC guide grammar
1025+* drivers/net: fix link autoneg value for virtual PMDs
1026+* eal/ppc: remove braces in SMP memory barrier macro
1027+* ethdev: fix port accessing after release
1028+* ethdev: fix queue start
1029+* event/dpaa2: remove link from info structure
1030+* examples/exception_path: limit core count to 64
1031+* examples/l2fwd-crypto: fix the default aead assignments
1032+* examples/performance-thread: fix return type of threads
1033+* examples/quota_watermark: fix return type of threads
1034+* hash: fix missing spinlock unlock in add key
1035+* ip_frag: fix double free of chained mbufs
1036+* kni: fix build on CentOS 7.4
1037+* kni: fix build on RHEL 7.5
1038+* mbuf: fix Tx checksum offload API doc
1039+* mbuf: improve tunnel Tx offloads API doc
1040+* mem: do not use physical addresses in IOVA as VA mode
1041+* mempool: fix leak when no objects are populated
1042+* mempool: fix virtual address population
1043+* mk: fix make defconfig on FreeBSD
1044+* net: add IPv6 header fields macros
1045+* net/bnx2x: do not cast function pointers as a policy
1046+* net/bnx2x: fix for PCI FLR after ungraceful exit
1047+* net/bnx2x: fix KR2 device check
1048+* net/bnx2x: fix memzone name overrun
1049+* net/bnxt: avoid invalid vnic id in set L2 Rx mask
1050+* net/bnxt: fix endianness of flag
1051+* net/bnxt: fix license header
1052+* net/bnxt: fix LRO disable
1053+* net/bnxt: fix Rx checksum flags
1054+* net/bnxt: fix Rx checksum flags for tunnel frames
1055+* net/bnxt: fix Rx drop setting
1056+* net/bnxt: fix Rx mbuf and agg ring leak in dev stop
1057+* net/bnxt: fix usage of vnic id
1058+* net/bnxt: free memory allocated for VF filters
1059+* net/bnxt: set padding flags in Rx descriptor
1060+* net/bonding: clear started state if start fails
1061+* net/bonding: export mode 4 slave info routine
1062+* net/bonding: fix primary slave port id storage type
1063+* net/bonding: fix setting VLAN ID on slave ports
1064+* net/bonding: fix slave activation simultaneously
1065+* net/bonding: free mempool used in mode 6
1066+* net/dpaa2: fix xstats
1067+* net/dpaa: fix oob access
1068+* net/enic: allocate stats DMA buffer upfront during probe
1069+* net/enic: fix crash on MTU update with non-setup queues
1070+* net/failsafe: fix duplicate event registration
1071+* net/failsafe: fix probe cleanup
1072+* net/failsafe: fix removed sub-device cleanup
1073+* net/i40e: fix DDP profile DEL operation
1074+* net/i40e: fix failing to disable FDIR Tx queue
1075+* net/i40e: fix intr callback unregister by adding retry
1076+* net/i40e: fix link status update
1077+* net/i40e: fix link update no wait
1078+* net/i40e: fix shifts of signed values
1079+* net/ixgbe: enable vector PMD for icc 32 bits
1080+* net/ixgbe: fix busy wait during checking link status
1081+* net/ixgbe: fix DCB configuration
1082+* net/ixgbe: fix intr callback unregister by adding retry
1083+* net/ixgbe: fix too many interrupts
1084+* net/liquidio: fix link state fetching during start
1085+* net/mlx4: avoid constant recreations in function
1086+* net/mlx4: fix a typo in header file
1087+* net/mlx4: fix broadcast Rx
1088+* net/mlx4: fix removal detection of stopped port
1089+* net/mlx4: fix RSS resource leak in case of error
1090+* net/mlx4: fix Rx resource leak in case of error
1091+* net/mlx4: fix single port configuration
1092+* net/mlx4: fix UDP flow rule limitation enforcement
1093+* net/mlx4: store RSS hash result in mbufs
1094+* net/mlx5: add data-plane debug message macro
1095+* net/mlx5: add missing function documentation
1096+* net/mlx5: add packet type index for TCP ack
1097+* net/mlx5: change device reference for secondary process
1098+* net/mlx5: change non failing function return values
1099+* net/mlx5: change pkt burst select function prototype
1100+* net/mlx5: change tunnel flow priority
1101+* net/mlx5: enforce RSS key length limitation
1102+* net/mlx5: fix allocation when no memory on device NUMA node
1103+* net/mlx5: fix build with clang on ARM
1104+* net/mlx5: fix calculation of Tx TSO inline room size
1105+* net/mlx5: fix close after start failure
1106+* net/mlx5: fix count in xstats
1107+* net/mlx5: fix CRC strip capability query
1108+* net/mlx5: fix disabling Tx packet inlining
1109+* net/mlx5: fix double free on error handling
1110+* net/mlx5: fix ethtool link setting call order
1111+* net/mlx5: fix existing file removal
1112+* net/mlx5: fix flow creation with a single target queue
1113+* net/mlx5: fix flow director conversion
1114+* net/mlx5: fix flow director drop rule deletion crash
1115+* net/mlx5: fix flow director mask
1116+* net/mlx5: fix flow director rule deletion crash
1117+* net/mlx5: fix flow validation
1118+* net/mlx5: fix icc build
1119+* net/mlx5: fix invalid flow item check
1120+* net/mlx5: fix IPv6 header fields
1121+* net/mlx5: fix link status behavior
1122+* net/mlx5: fix link status initialization
1123+* net/mlx5: fix link status to use wait to complete
1124+* net/mlx5: fix probe return value polarity
1125+* net/mlx5: fix reception of multiple MAC addresses
1126+* net/mlx5: fix resource leak in case of error
1127+* net/mlx5: fix RSS flow action bounds check
1128+* net/mlx5: fix RSS key length query
1129+* net/mlx5: fix secondary process mempool registration
1130+* net/mlx5: fix socket connection return value
1131+* net/mlx5: fix sriov flag
1132+* net/mlx5: fix synchronization on polling Rx completions
1133+* net/mlx5: improve flow error explanation
1134+* net/mlx5: map UAR address around huge pages
1135+* net/mlx5: mark parameters with unused attribute
1136+* net/mlx5: name parameters in function prototypes
1137+* net/mlx5: normalize function prototypes
1138+* net/mlx5: prefix all functions with mlx5
1139+* net/mlx5: refuse empty VLAN flow specification
1140+* net/mlx5: remove 32-bit support
1141+* net/mlx5: remove assert un-accessible from secondary process
1142+* net/mlx5: remove control path locks
1143+* net/mlx5: remove excessive data prefetch
1144+* net/mlx5: remove get priv internal function
1145+* net/mlx5: remove kernel version check
1146+* net/mlx5: remove useless empty lines
1147+* net/mlx5: setup RSS regardless of queue count
1148+* net/mlx5: split L3/L4 in flow director
1149+* net/mlx5: standardize on negative errno values
1150+* net/mlx5: use dynamic logging
1151+* net/mlx5: use port id in PMD log
1152+* net/mlx5: warn for unsuccessful memory registration
1153+* net/mlx: control netdevices through ioctl only
1154+* net/mrvl: fix crash when port is closed without starting
1155+* net/mrvl: fix Rx descriptors number
1156+* net/nfp: fix assigning port id in mbuf
1157+* net/nfp: fix barrier location
1158+* net/nfp: fix link speed capabilities
1159+* net/nfp: fix mbufs releasing when stop or close
1160+* net/octeontx: fix null pointer dereference
1161+* net/octeontx: fix uninitialized speed variable
1162+* net/octeontx: fix uninitialized variable in port open
1163+* net/qede/base: fix to support OVLAN mode
1164+* net/qede: fix alloc from socket 0
1165+* net/qede: fix device stop to remove primary MAC
1166+* net/qede: fix L2-handles used for RSS hash update
1167+* net/qede: fix memory alloc for multiple port reconfig
1168+* net/qede: fix missing loop index in Tx SG mode
1169+* net/qede: fix multicast filtering
1170+* net/qede: fix to prevent overwriting packet type
1171+* net/qede: fix unicast filter routine return code
1172+* net/qede: fix VF port creation sequence
1173+* net/sfc: add missing defines for SAL annotation
1174+* net/sfc: add missing Rx fini on RSS setup fail path
1175+* net/sfc/base: fix comparison always true warning
1176+* net/sfc: fix mbuf data alignment calculation
1177+* net/sfc: fix type of opaque pointer in perf profile handler
1178+* net/sfc: ignore spec bits not covered by mask
1179+* net/sfc: process RSS settings on Rx configure step
1180+* net/szedata2: fix format string for PCI address
1181+* net/szedata2: fix total stats
1182+* net/tap: fix icc build
1183+* net/vhost: fix crash when creating vdev dynamically
1184+* net/vhost: fix invalid state
1185+* net/vhost: initialise device as inactive
1186+* net/vmxnet3: set the queue shared buffer at start
1187+* nfp: allow for non-root user
1188+* nfp: restore the unlink operation
1189+* nfp: unlink the appropriate lock file
1190+* pci: remove duplicated symbol from map file
1191+* test/distributor: fix return type of thread function
1192+* test: fix memory flags test for low NUMA nodes number
1193+* test/mempool: fix autotest retry
1194+* test/pipeline: fix return type of stub miss
1195+* test/pipeline: fix type of table entry parameter
1196+* test/reorder: fix freeing mbuf twice
1197+* vfio: do not needlessly check for IOVA mode
1198+* vhost: check cmsg not null
1199+* vhost: fix compilation issue when vhost debug enabled
1200+* vhost: fix dead lock on closing in server mode
1201+* vhost: fix device cleanup at stop
1202+* vhost: fix message payload union in setting ring address
1203+* vhost: fix offset while mmaping log base address
1204+* vhost: fix realloc failure
1205+* vhost: fix ring index returned to master on stop
1206diff --git a/drivers/bus/dpaa/base/fman/fman.c b/drivers/bus/dpaa/base/fman/fman.c
1207index 3816dba..a9c88dd 100644
1208--- a/drivers/bus/dpaa/base/fman/fman.c
1209+++ b/drivers/bus/dpaa/base/fman/fman.c
1210@@ -475,6 +475,7 @@ fman_if_init(const struct device_node *dpa_node)
1211 if (!pool_node) {
1212 FMAN_ERR(-ENXIO, "%s: bad fsl,bman-buffer-pools\n",
1213 dname);
1214+ free(bpool);
1215 goto err;
1216 }
1217 pname = pool_node->full_name;
1218@@ -482,6 +483,7 @@ fman_if_init(const struct device_node *dpa_node)
1219 prop = of_get_property(pool_node, "fsl,bpid", &proplen);
1220 if (!prop) {
1221 FMAN_ERR(-EINVAL, "%s: no fsl,bpid\n", pname);
1222+ free(bpool);
1223 goto err;
1224 }
1225 assert(proplen == sizeof(*prop));
1226diff --git a/drivers/bus/fslmc/fslmc_bus.c b/drivers/bus/fslmc/fslmc_bus.c
1227index 480857e..001e56c 100644
1228--- a/drivers/bus/fslmc/fslmc_bus.c
1229+++ b/drivers/bus/fslmc/fslmc_bus.c
1230@@ -310,8 +310,9 @@ rte_fslmc_find_device(const struct rte_device *start, rte_dev_cmp_t cmp,
1231 struct rte_dpaa2_device *dev;
1232
1233 TAILQ_FOREACH(dev, &rte_fslmc_bus.device_list, next) {
1234- if (start && &dev->device == start) {
1235- start = NULL; /* starting point found */
1236+ if (start != NULL) {
1237+ if (&dev->device == start)
1238+ start = NULL; /* starting point found */
1239 continue;
1240 }
1241
1242diff --git a/drivers/bus/pci/pci_common.c b/drivers/bus/pci/pci_common.c
1243index 104fdf9..6789748 100644
1244--- a/drivers/bus/pci/pci_common.c
1245+++ b/drivers/bus/pci/pci_common.c
1246@@ -488,17 +488,20 @@ static struct rte_device *
1247 pci_find_device(const struct rte_device *start, rte_dev_cmp_t cmp,
1248 const void *data)
1249 {
1250- struct rte_pci_device *dev;
1251+ const struct rte_pci_device *pstart;
1252+ struct rte_pci_device *pdev;
1253
1254- FOREACH_DEVICE_ON_PCIBUS(dev) {
1255- if (start && &dev->device == start) {
1256- start = NULL; /* starting point found */
1257- continue;
1258- }
1259- if (cmp(&dev->device, data) == 0)
1260- return &dev->device;
1261+ if (start != NULL) {
1262+ pstart = RTE_DEV_TO_PCI_CONST(start);
1263+ pdev = TAILQ_NEXT(pstart, next);
1264+ } else {
1265+ pdev = TAILQ_FIRST(&rte_pci_bus.device_list);
1266+ }
1267+ while (pdev != NULL) {
1268+ if (cmp(&pdev->device, data) == 0)
1269+ return &pdev->device;
1270+ pdev = TAILQ_NEXT(pdev, next);
1271 }
1272-
1273 return NULL;
1274 }
1275
1276diff --git a/drivers/bus/pci/rte_bus_pci.h b/drivers/bus/pci/rte_bus_pci.h
1277index d4a2996..c6af61d 100644
1278--- a/drivers/bus/pci/rte_bus_pci.h
1279+++ b/drivers/bus/pci/rte_bus_pci.h
1280@@ -103,6 +103,9 @@ struct rte_pci_device {
1281 */
1282 #define RTE_DEV_TO_PCI(ptr) container_of(ptr, struct rte_pci_device, device)
1283
1284+#define RTE_DEV_TO_PCI_CONST(ptr) \
1285+ container_of(ptr, const struct rte_pci_device, device)
1286+
1287 #define RTE_ETH_DEV_TO_PCI(eth_dev) RTE_DEV_TO_PCI((eth_dev)->device)
1288
1289 /** Any PCI device identifier (vendor, device, ...) */
1290diff --git a/drivers/bus/vdev/vdev.c b/drivers/bus/vdev/vdev.c
1291index ba0ed7a..a0ffb53 100644
1292--- a/drivers/bus/vdev/vdev.c
1293+++ b/drivers/bus/vdev/vdev.c
1294@@ -129,7 +129,7 @@ find_vdev(const char *name)
1295 TAILQ_FOREACH(dev, &vdev_device_list, next) {
1296 const char *devname = rte_vdev_device_name(dev);
1297
1298- if (!strncmp(devname, name, strlen(name)))
1299+ if (!strcmp(devname, name))
1300 return dev;
1301 }
1302
1303diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h b/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h
1304index 8e58380..ae8c0c3 100644
1305--- a/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h
1306+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h
1307@@ -211,9 +211,9 @@ static const struct rte_cryptodev_capabilities dpaa2_sec_capabilities[] = {
1308 .increment = 1
1309 },
1310 .digest_size = {
1311- .min = 16,
1312+ .min = 1,
1313 .max = 16,
1314- .increment = 0
1315+ .increment = 1
1316 },
1317 .iv_size = { 0 }
1318 }, }
1319@@ -232,9 +232,9 @@ static const struct rte_cryptodev_capabilities dpaa2_sec_capabilities[] = {
1320 .increment = 1
1321 },
1322 .digest_size = {
1323- .min = 20,
1324+ .min = 1,
1325 .max = 20,
1326- .increment = 0
1327+ .increment = 1
1328 },
1329 .iv_size = { 0 }
1330 }, }
1331@@ -253,9 +253,9 @@ static const struct rte_cryptodev_capabilities dpaa2_sec_capabilities[] = {
1332 .increment = 1
1333 },
1334 .digest_size = {
1335- .min = 28,
1336+ .min = 1,
1337 .max = 28,
1338- .increment = 0
1339+ .increment = 1
1340 },
1341 .iv_size = { 0 }
1342 }, }
1343@@ -274,9 +274,9 @@ static const struct rte_cryptodev_capabilities dpaa2_sec_capabilities[] = {
1344 .increment = 1
1345 },
1346 .digest_size = {
1347- .min = 32,
1348- .max = 32,
1349- .increment = 0
1350+ .min = 1,
1351+ .max = 32,
1352+ .increment = 1
1353 },
1354 .iv_size = { 0 }
1355 }, }
1356@@ -295,9 +295,9 @@ static const struct rte_cryptodev_capabilities dpaa2_sec_capabilities[] = {
1357 .increment = 1
1358 },
1359 .digest_size = {
1360- .min = 48,
1361+ .min = 1,
1362 .max = 48,
1363- .increment = 0
1364+ .increment = 1
1365 },
1366 .iv_size = { 0 }
1367 }, }
1368@@ -316,9 +316,9 @@ static const struct rte_cryptodev_capabilities dpaa2_sec_capabilities[] = {
1369 .increment = 1
1370 },
1371 .digest_size = {
1372- .min = 64,
1373+ .min = 1,
1374 .max = 64,
1375- .increment = 0
1376+ .increment = 1
1377 },
1378 .iv_size = { 0 }
1379 }, }
1380diff --git a/drivers/crypto/scheduler/rte_cryptodev_scheduler.c b/drivers/crypto/scheduler/rte_cryptodev_scheduler.c
1381index 822ce27..59ece95 100644
1382--- a/drivers/crypto/scheduler/rte_cryptodev_scheduler.c
1383+++ b/drivers/crypto/scheduler/rte_cryptodev_scheduler.c
1384@@ -119,8 +119,10 @@ update_scheduler_capability(struct scheduler_ctx *sched_ctx)
1385 struct rte_cryptodev_capabilities tmp_caps[256] = { {0} };
1386 uint32_t nb_caps = 0, i;
1387
1388- if (sched_ctx->capabilities)
1389+ if (sched_ctx->capabilities) {
1390 rte_free(sched_ctx->capabilities);
1391+ sched_ctx->capabilities = NULL;
1392+ }
1393
1394 for (i = 0; i < sched_ctx->nb_slaves; i++) {
1395 struct rte_cryptodev_info dev_info;
1396@@ -490,8 +492,10 @@ rte_cryptodev_scheduler_load_user_scheduler(uint8_t scheduler_id,
1397 sched_ctx->ops.option_set = scheduler->ops->option_set;
1398 sched_ctx->ops.option_get = scheduler->ops->option_get;
1399
1400- if (sched_ctx->private_ctx)
1401+ if (sched_ctx->private_ctx) {
1402 rte_free(sched_ctx->private_ctx);
1403+ sched_ctx->private_ctx = NULL;
1404+ }
1405
1406 if (sched_ctx->ops.create_private_ctx) {
1407 int ret = (*sched_ctx->ops.create_private_ctx)(dev);
1408diff --git a/drivers/crypto/scheduler/rte_cryptodev_scheduler.h b/drivers/crypto/scheduler/rte_cryptodev_scheduler.h
1409index df22f2a..84917d1 100644
1410--- a/drivers/crypto/scheduler/rte_cryptodev_scheduler.h
1411+++ b/drivers/crypto/scheduler/rte_cryptodev_scheduler.h
1412@@ -59,7 +59,7 @@ extern "C" {
1413 #endif
1414
1415 /** Maximum number of multi-core worker cores */
1416-#define RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKER_CORES (64)
1417+#define RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKER_CORES (RTE_MAX_LCORE - 1)
1418
1419 /** Round-robin scheduling mode string */
1420 #define SCHEDULER_MODE_NAME_ROUND_ROBIN round-robin
1421diff --git a/drivers/crypto/scheduler/scheduler_multicore.c b/drivers/crypto/scheduler/scheduler_multicore.c
1422index 0cd5bce..14c33b9 100644
1423--- a/drivers/crypto/scheduler/scheduler_multicore.c
1424+++ b/drivers/crypto/scheduler/scheduler_multicore.c
1425@@ -49,8 +49,8 @@ struct mc_scheduler_ctx {
1426 uint32_t num_workers; /**< Number of workers polling */
1427 uint32_t stop_signal;
1428
1429- struct rte_ring *sched_enq_ring[RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKER_CORES];
1430- struct rte_ring *sched_deq_ring[RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKER_CORES];
1431+ struct rte_ring *sched_enq_ring[RTE_MAX_LCORE];
1432+ struct rte_ring *sched_deq_ring[RTE_MAX_LCORE];
1433 };
1434
1435 struct mc_scheduler_qp_ctx {
1436@@ -356,11 +356,13 @@ static int
1437 scheduler_create_private_ctx(struct rte_cryptodev *dev)
1438 {
1439 struct scheduler_ctx *sched_ctx = dev->data->dev_private;
1440- struct mc_scheduler_ctx *mc_ctx;
1441+ struct mc_scheduler_ctx *mc_ctx = NULL;
1442 uint16_t i;
1443
1444- if (sched_ctx->private_ctx)
1445+ if (sched_ctx->private_ctx) {
1446 rte_free(sched_ctx->private_ctx);
1447+ sched_ctx->private_ctx = NULL;
1448+ }
1449
1450 mc_ctx = rte_zmalloc_socket(NULL, sizeof(struct mc_scheduler_ctx), 0,
1451 rte_socket_id());
1452@@ -373,25 +375,48 @@ scheduler_create_private_ctx(struct rte_cryptodev *dev)
1453 for (i = 0; i < sched_ctx->nb_wc; i++) {
1454 char r_name[16];
1455
1456- snprintf(r_name, sizeof(r_name), MC_SCHED_ENQ_RING_NAME_PREFIX "%u", i);
1457- mc_ctx->sched_enq_ring[i] = rte_ring_create(r_name, PER_SLAVE_BUFF_SIZE,
1458- rte_socket_id(), RING_F_SC_DEQ | RING_F_SP_ENQ);
1459+ snprintf(r_name, sizeof(r_name), MC_SCHED_ENQ_RING_NAME_PREFIX
1460+ "%u_%u", dev->data->dev_id, i);
1461+ mc_ctx->sched_enq_ring[i] = rte_ring_lookup(r_name);
1462 if (!mc_ctx->sched_enq_ring[i]) {
1463- CS_LOG_ERR("Cannot create ring for worker %u", i);
1464- return -1;
1465+ mc_ctx->sched_enq_ring[i] = rte_ring_create(r_name,
1466+ PER_SLAVE_BUFF_SIZE,
1467+ rte_socket_id(),
1468+ RING_F_SC_DEQ | RING_F_SP_ENQ);
1469+ if (!mc_ctx->sched_enq_ring[i]) {
1470+ CS_LOG_ERR("Cannot create ring for worker %u",
1471+ i);
1472+ goto exit;
1473+ }
1474 }
1475- snprintf(r_name, sizeof(r_name), MC_SCHED_DEQ_RING_NAME_PREFIX "%u", i);
1476- mc_ctx->sched_deq_ring[i] = rte_ring_create(r_name, PER_SLAVE_BUFF_SIZE,
1477- rte_socket_id(), RING_F_SC_DEQ | RING_F_SP_ENQ);
1478+ snprintf(r_name, sizeof(r_name), MC_SCHED_DEQ_RING_NAME_PREFIX
1479+ "%u_%u", dev->data->dev_id, i);
1480+ mc_ctx->sched_deq_ring[i] = rte_ring_lookup(r_name);
1481 if (!mc_ctx->sched_deq_ring[i]) {
1482- CS_LOG_ERR("Cannot create ring for worker %u", i);
1483- return -1;
1484+ mc_ctx->sched_deq_ring[i] = rte_ring_create(r_name,
1485+ PER_SLAVE_BUFF_SIZE,
1486+ rte_socket_id(),
1487+ RING_F_SC_DEQ | RING_F_SP_ENQ);
1488+ if (!mc_ctx->sched_deq_ring[i]) {
1489+ CS_LOG_ERR("Cannot create ring for worker %u",
1490+ i);
1491+ goto exit;
1492+ }
1493 }
1494 }
1495
1496 sched_ctx->private_ctx = (void *)mc_ctx;
1497
1498 return 0;
1499+
1500+exit:
1501+ for (i = 0; i < sched_ctx->nb_wc; i++) {
1502+ rte_ring_free(mc_ctx->sched_enq_ring[i]);
1503+ rte_ring_free(mc_ctx->sched_deq_ring[i]);
1504+ }
1505+ rte_free(mc_ctx);
1506+
1507+ return -1;
1508 }
1509
1510 struct rte_cryptodev_scheduler_ops scheduler_mc_ops = {
1511diff --git a/drivers/crypto/scheduler/scheduler_pkt_size_distr.c b/drivers/crypto/scheduler/scheduler_pkt_size_distr.c
1512index 1dd1bc3..4874191 100644
1513--- a/drivers/crypto/scheduler/scheduler_pkt_size_distr.c
1514+++ b/drivers/crypto/scheduler/scheduler_pkt_size_distr.c
1515@@ -362,8 +362,10 @@ scheduler_create_private_ctx(struct rte_cryptodev *dev)
1516 struct scheduler_ctx *sched_ctx = dev->data->dev_private;
1517 struct psd_scheduler_ctx *psd_ctx;
1518
1519- if (sched_ctx->private_ctx)
1520+ if (sched_ctx->private_ctx) {
1521 rte_free(sched_ctx->private_ctx);
1522+ sched_ctx->private_ctx = NULL;
1523+ }
1524
1525 psd_ctx = rte_zmalloc_socket(NULL, sizeof(struct psd_scheduler_ctx), 0,
1526 rte_socket_id());
1527diff --git a/drivers/crypto/scheduler/scheduler_pmd.c b/drivers/crypto/scheduler/scheduler_pmd.c
1528index acdf636..fcba119 100644
1529--- a/drivers/crypto/scheduler/scheduler_pmd.c
1530+++ b/drivers/crypto/scheduler/scheduler_pmd.c
1531@@ -48,7 +48,8 @@ struct scheduler_init_params {
1532 uint32_t nb_slaves;
1533 enum rte_cryptodev_scheduler_mode mode;
1534 uint32_t enable_ordering;
1535- uint64_t wcmask;
1536+ uint16_t wc_pool[RTE_MAX_LCORE];
1537+ uint16_t nb_wc;
1538 char slave_names[RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES]
1539 [RTE_CRYPTODEV_SCHEDULER_NAME_MAX_LEN];
1540 };
1541@@ -114,10 +115,6 @@ cryptodev_scheduler_create(const char *name,
1542 return -EFAULT;
1543 }
1544
1545- if (init_params->wcmask != 0)
1546- RTE_LOG(INFO, PMD, " workers core mask = %"PRIx64"\n",
1547- init_params->wcmask);
1548-
1549 dev->driver_id = cryptodev_driver_id;
1550 dev->dev_ops = rte_crypto_scheduler_pmd_ops;
1551
1552@@ -128,15 +125,12 @@ cryptodev_scheduler_create(const char *name,
1553 if (init_params->mode == CDEV_SCHED_MODE_MULTICORE) {
1554 uint16_t i;
1555
1556- sched_ctx->nb_wc = 0;
1557+ sched_ctx->nb_wc = init_params->nb_wc;
1558
1559- for (i = 0; i < RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKER_CORES; i++) {
1560- if (init_params->wcmask & (1ULL << i)) {
1561- sched_ctx->wc_pool[sched_ctx->nb_wc++] = i;
1562- RTE_LOG(INFO, PMD,
1563- " Worker core[%u]=%u added\n",
1564- sched_ctx->nb_wc-1, i);
1565- }
1566+ for (i = 0; i < sched_ctx->nb_wc; i++) {
1567+ sched_ctx->wc_pool[i] = init_params->wc_pool[i];
1568+ RTE_LOG(INFO, PMD, " Worker core[%u]=%u added\n",
1569+ i, sched_ctx->wc_pool[i]);
1570 }
1571 }
1572
1573@@ -260,9 +254,47 @@ static int
1574 parse_coremask_arg(const char *key __rte_unused,
1575 const char *value, void *extra_args)
1576 {
1577+ int i, j, val;
1578+ uint16_t idx = 0;
1579+ char c;
1580 struct scheduler_init_params *params = extra_args;
1581
1582- params->wcmask = strtoull(value, NULL, 16);
1583+ params->nb_wc = 0;
1584+
1585+ if (value == NULL)
1586+ return -1;
1587+ /* Remove all blank characters ahead and after .
1588+ * Remove 0x/0X if exists.
1589+ */
1590+ while (isblank(*value))
1591+ value++;
1592+ if (value[0] == '0' && ((value[1] == 'x') || (value[1] == 'X')))
1593+ value += 2;
1594+ i = strlen(value);
1595+ while ((i > 0) && isblank(value[i - 1]))
1596+ i--;
1597+
1598+ if (i == 0)
1599+ return -1;
1600+
1601+ for (i = i - 1; i >= 0 && idx < RTE_MAX_LCORE; i--) {
1602+ c = value[i];
1603+ if (isxdigit(c) == 0) {
1604+ /* invalid characters */
1605+ return -1;
1606+ }
1607+ if (isdigit(c))
1608+ val = c - '0';
1609+ else if (isupper(c))
1610+ val = c - 'A' + 10;
1611+ else
1612+ val = c - 'a' + 10;
1613+
1614+ for (j = 0; j < 4 && idx < RTE_MAX_LCORE; j++, idx++) {
1615+ if ((1 << j) & val)
1616+ params->wc_pool[params->nb_wc++] = idx;
1617+ }
1618+ }
1619
1620 return 0;
1621 }
1622@@ -274,7 +306,7 @@ parse_corelist_arg(const char *key __rte_unused,
1623 {
1624 struct scheduler_init_params *params = extra_args;
1625
1626- params->wcmask = 0ULL;
1627+ params->nb_wc = 0;
1628
1629 const char *token = value;
1630
1631@@ -282,7 +314,11 @@ parse_corelist_arg(const char *key __rte_unused,
1632 char *rval;
1633 unsigned int core = strtoul(token, &rval, 10);
1634
1635- params->wcmask |= 1ULL << core;
1636+ if (core >= RTE_MAX_LCORE) {
1637+ CS_LOG_ERR("Invalid worker core %u, should be smaller "
1638+ "than %u.\n", core, RTE_MAX_LCORE);
1639+ }
1640+ params->wc_pool[params->nb_wc++] = (uint16_t)core;
1641 token = (const char *)rval;
1642 if (token[0] == '\0')
1643 break;
1644diff --git a/drivers/crypto/scheduler/scheduler_pmd_ops.c b/drivers/crypto/scheduler/scheduler_pmd_ops.c
1645index d9b5235..75433db 100644
1646--- a/drivers/crypto/scheduler/scheduler_pmd_ops.c
1647+++ b/drivers/crypto/scheduler/scheduler_pmd_ops.c
1648@@ -74,6 +74,7 @@ scheduler_attach_init_slave(struct rte_cryptodev *dev)
1649 sched_ctx->init_slave_names[i]);
1650
1651 rte_free(sched_ctx->init_slave_names[i]);
1652+ sched_ctx->init_slave_names[i] = NULL;
1653
1654 sched_ctx->nb_init_slaves -= 1;
1655 }
1656@@ -289,11 +290,15 @@ scheduler_pmd_close(struct rte_cryptodev *dev)
1657 }
1658 }
1659
1660- if (sched_ctx->private_ctx)
1661+ if (sched_ctx->private_ctx) {
1662 rte_free(sched_ctx->private_ctx);
1663+ sched_ctx->private_ctx = NULL;
1664+ }
1665
1666- if (sched_ctx->capabilities)
1667+ if (sched_ctx->capabilities) {
1668 rte_free(sched_ctx->capabilities);
1669+ sched_ctx->capabilities = NULL;
1670+ }
1671
1672 return 0;
1673 }
1674diff --git a/drivers/crypto/scheduler/scheduler_pmd_private.h b/drivers/crypto/scheduler/scheduler_pmd_private.h
1675index e606716..bab4334 100644
1676--- a/drivers/crypto/scheduler/scheduler_pmd_private.h
1677+++ b/drivers/crypto/scheduler/scheduler_pmd_private.h
1678@@ -89,7 +89,7 @@ struct scheduler_ctx {
1679
1680 char name[RTE_CRYPTODEV_SCHEDULER_NAME_MAX_LEN];
1681 char description[RTE_CRYPTODEV_SCHEDULER_DESC_MAX_LEN];
1682- uint16_t wc_pool[RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKER_CORES];
1683+ uint16_t wc_pool[RTE_MAX_LCORE];
1684 uint16_t nb_wc;
1685
1686 char *init_slave_names[RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES];
1687diff --git a/drivers/crypto/zuc/rte_zuc_pmd.c b/drivers/crypto/zuc/rte_zuc_pmd.c
1688index 590224b..8b13be9 100644
1689--- a/drivers/crypto/zuc/rte_zuc_pmd.c
1690+++ b/drivers/crypto/zuc/rte_zuc_pmd.c
1691@@ -40,7 +40,7 @@
1692
1693 #include "rte_zuc_pmd_private.h"
1694
1695-#define ZUC_MAX_BURST 8
1696+#define ZUC_MAX_BURST 4
1697 #define BYTE_LEN 8
1698
1699 static uint8_t cryptodev_driver_id;
1700@@ -196,10 +196,10 @@ zuc_get_session(struct zuc_qp *qp, struct rte_crypto_op *op)
1701 return sess;
1702 }
1703
1704-/** Encrypt/decrypt mbufs with same cipher key. */
1705+/** Encrypt/decrypt mbufs. */
1706 static uint8_t
1707 process_zuc_cipher_op(struct rte_crypto_op **ops,
1708- struct zuc_session *session,
1709+ struct zuc_session **sessions,
1710 uint8_t num_ops)
1711 {
1712 unsigned i;
1713@@ -208,6 +208,7 @@ process_zuc_cipher_op(struct rte_crypto_op **ops,
1714 uint8_t *iv[ZUC_MAX_BURST];
1715 uint32_t num_bytes[ZUC_MAX_BURST];
1716 uint8_t *cipher_keys[ZUC_MAX_BURST];
1717+ struct zuc_session *sess;
1718
1719 for (i = 0; i < num_ops; i++) {
1720 if (((ops[i]->sym->cipher.data.length % BYTE_LEN) != 0)
1721@@ -218,6 +219,8 @@ process_zuc_cipher_op(struct rte_crypto_op **ops,
1722 break;
1723 }
1724
1725+ sess = sessions[i];
1726+
1727 #ifdef RTE_LIBRTE_PMD_ZUC_DEBUG
1728 if (!rte_pktmbuf_is_contiguous(ops[i]->sym->m_src) ||
1729 (ops[i]->sym->m_dst != NULL &&
1730@@ -239,10 +242,10 @@ process_zuc_cipher_op(struct rte_crypto_op **ops,
1731 rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *) +
1732 (ops[i]->sym->cipher.data.offset >> 3);
1733 iv[i] = rte_crypto_op_ctod_offset(ops[i], uint8_t *,
1734- session->cipher_iv_offset);
1735+ sess->cipher_iv_offset);
1736 num_bytes[i] = ops[i]->sym->cipher.data.length >> 3;
1737
1738- cipher_keys[i] = session->pKey_cipher;
1739+ cipher_keys[i] = sess->pKey_cipher;
1740
1741 processed_ops++;
1742 }
1743@@ -253,10 +256,10 @@ process_zuc_cipher_op(struct rte_crypto_op **ops,
1744 return processed_ops;
1745 }
1746
1747-/** Generate/verify hash from mbufs with same hash key. */
1748+/** Generate/verify hash from mbufs. */
1749 static int
1750 process_zuc_hash_op(struct zuc_qp *qp, struct rte_crypto_op **ops,
1751- struct zuc_session *session,
1752+ struct zuc_session **sessions,
1753 uint8_t num_ops)
1754 {
1755 unsigned i;
1756@@ -265,6 +268,7 @@ process_zuc_hash_op(struct zuc_qp *qp, struct rte_crypto_op **ops,
1757 uint32_t *dst;
1758 uint32_t length_in_bits;
1759 uint8_t *iv;
1760+ struct zuc_session *sess;
1761
1762 for (i = 0; i < num_ops; i++) {
1763 /* Data must be byte aligned */
1764@@ -274,17 +278,19 @@ process_zuc_hash_op(struct zuc_qp *qp, struct rte_crypto_op **ops,
1765 break;
1766 }
1767
1768+ sess = sessions[i];
1769+
1770 length_in_bits = ops[i]->sym->auth.data.length;
1771
1772 src = rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *) +
1773 (ops[i]->sym->auth.data.offset >> 3);
1774 iv = rte_crypto_op_ctod_offset(ops[i], uint8_t *,
1775- session->auth_iv_offset);
1776+ sess->auth_iv_offset);
1777
1778- if (session->auth_op == RTE_CRYPTO_AUTH_OP_VERIFY) {
1779+ if (sess->auth_op == RTE_CRYPTO_AUTH_OP_VERIFY) {
1780 dst = (uint32_t *)qp->temp_digest;
1781
1782- sso_zuc_eia3_1_buffer(session->pKey_hash,
1783+ sso_zuc_eia3_1_buffer(sess->pKey_hash,
1784 iv, src,
1785 length_in_bits, dst);
1786 /* Verify digest. */
1787@@ -294,7 +300,7 @@ process_zuc_hash_op(struct zuc_qp *qp, struct rte_crypto_op **ops,
1788 } else {
1789 dst = (uint32_t *)ops[i]->sym->auth.digest.data;
1790
1791- sso_zuc_eia3_1_buffer(session->pKey_hash,
1792+ sso_zuc_eia3_1_buffer(sess->pKey_hash,
1793 iv, src,
1794 length_in_bits, dst);
1795 }
1796@@ -304,33 +310,34 @@ process_zuc_hash_op(struct zuc_qp *qp, struct rte_crypto_op **ops,
1797 return processed_ops;
1798 }
1799
1800-/** Process a batch of crypto ops which shares the same session. */
1801+/** Process a batch of crypto ops which shares the same operation type. */
1802 static int
1803-process_ops(struct rte_crypto_op **ops, struct zuc_session *session,
1804+process_ops(struct rte_crypto_op **ops, enum zuc_operation op_type,
1805+ struct zuc_session **sessions,
1806 struct zuc_qp *qp, uint8_t num_ops,
1807 uint16_t *accumulated_enqueued_ops)
1808 {
1809 unsigned i;
1810 unsigned enqueued_ops, processed_ops;
1811
1812- switch (session->op) {
1813+ switch (op_type) {
1814 case ZUC_OP_ONLY_CIPHER:
1815 processed_ops = process_zuc_cipher_op(ops,
1816- session, num_ops);
1817+ sessions, num_ops);
1818 break;
1819 case ZUC_OP_ONLY_AUTH:
1820- processed_ops = process_zuc_hash_op(qp, ops, session,
1821+ processed_ops = process_zuc_hash_op(qp, ops, sessions,
1822 num_ops);
1823 break;
1824 case ZUC_OP_CIPHER_AUTH:
1825- processed_ops = process_zuc_cipher_op(ops, session,
1826+ processed_ops = process_zuc_cipher_op(ops, sessions,
1827 num_ops);
1828- process_zuc_hash_op(qp, ops, session, processed_ops);
1829+ process_zuc_hash_op(qp, ops, sessions, processed_ops);
1830 break;
1831 case ZUC_OP_AUTH_CIPHER:
1832- processed_ops = process_zuc_hash_op(qp, ops, session,
1833+ processed_ops = process_zuc_hash_op(qp, ops, sessions,
1834 num_ops);
1835- process_zuc_cipher_op(ops, session, processed_ops);
1836+ process_zuc_cipher_op(ops, sessions, processed_ops);
1837 break;
1838 default:
1839 /* Operation not supported. */
1840@@ -346,10 +353,10 @@ process_ops(struct rte_crypto_op **ops, struct zuc_session *session,
1841 ops[i]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
1842 /* Free session if a session-less crypto op. */
1843 if (ops[i]->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
1844- memset(session, 0, sizeof(struct zuc_session));
1845+ memset(sessions[i], 0, sizeof(struct zuc_session));
1846 memset(ops[i]->sym->session, 0,
1847 rte_cryptodev_get_header_session_size());
1848- rte_mempool_put(qp->sess_mp, session);
1849+ rte_mempool_put(qp->sess_mp, sessions[i]);
1850 rte_mempool_put(qp->sess_mp, ops[i]->sym->session);
1851 ops[i]->sym->session = NULL;
1852 }
1853@@ -370,7 +377,10 @@ zuc_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops,
1854 struct rte_crypto_op *c_ops[ZUC_MAX_BURST];
1855 struct rte_crypto_op *curr_c_op;
1856
1857- struct zuc_session *prev_sess = NULL, *curr_sess = NULL;
1858+ struct zuc_session *curr_sess;
1859+ struct zuc_session *sessions[ZUC_MAX_BURST];
1860+ enum zuc_operation prev_zuc_op = ZUC_OP_NOT_SUPPORTED;
1861+ enum zuc_operation curr_zuc_op;
1862 struct zuc_qp *qp = queue_pair;
1863 unsigned i;
1864 uint8_t burst_size = 0;
1865@@ -380,9 +390,6 @@ zuc_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops,
1866 for (i = 0; i < nb_ops; i++) {
1867 curr_c_op = ops[i];
1868
1869- /* Set status as enqueued (not processed yet) by default. */
1870- curr_c_op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
1871-
1872 curr_sess = zuc_get_session(qp, curr_c_op);
1873 if (unlikely(curr_sess == NULL ||
1874 curr_sess->op == ZUC_OP_NOT_SUPPORTED)) {
1875@@ -391,50 +398,63 @@ zuc_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops,
1876 break;
1877 }
1878
1879- /* Batch ops that share the same session. */
1880- if (prev_sess == NULL) {
1881- prev_sess = curr_sess;
1882- c_ops[burst_size++] = curr_c_op;
1883- } else if (curr_sess == prev_sess) {
1884- c_ops[burst_size++] = curr_c_op;
1885+ curr_zuc_op = curr_sess->op;
1886+
1887+ /*
1888+ * Batch ops that share the same operation type
1889+ * (cipher only, auth only...).
1890+ */
1891+ if (burst_size == 0) {
1892+ prev_zuc_op = curr_zuc_op;
1893+ c_ops[0] = curr_c_op;
1894+ sessions[0] = curr_sess;
1895+ burst_size++;
1896+ } else if (curr_zuc_op == prev_zuc_op) {
1897+ c_ops[burst_size] = curr_c_op;
1898+ sessions[burst_size] = curr_sess;
1899+ burst_size++;
1900 /*
1901 * When there are enough ops to process in a batch,
1902 * process them, and start a new batch.
1903 */
1904 if (burst_size == ZUC_MAX_BURST) {
1905- processed_ops = process_ops(c_ops, prev_sess,
1906- qp, burst_size, &enqueued_ops);
1907+ processed_ops = process_ops(c_ops, curr_zuc_op,
1908+ sessions, qp, burst_size,
1909+ &enqueued_ops);
1910 if (processed_ops < burst_size) {
1911 burst_size = 0;
1912 break;
1913 }
1914
1915 burst_size = 0;
1916- prev_sess = NULL;
1917 }
1918 } else {
1919 /*
1920- * Different session, process the ops
1921- * of the previous session.
1922+ * Different operation type, process the ops
1923+ * of the previous type.
1924 */
1925- processed_ops = process_ops(c_ops, prev_sess,
1926- qp, burst_size, &enqueued_ops);
1927+ processed_ops = process_ops(c_ops, prev_zuc_op,
1928+ sessions, qp, burst_size,
1929+ &enqueued_ops);
1930 if (processed_ops < burst_size) {
1931 burst_size = 0;
1932 break;
1933 }
1934
1935 burst_size = 0;
1936- prev_sess = curr_sess;
1937+ prev_zuc_op = curr_zuc_op;
1938
1939- c_ops[burst_size++] = curr_c_op;
1940+ c_ops[0] = curr_c_op;
1941+ sessions[0] = curr_sess;
1942+ burst_size++;
1943 }
1944 }
1945
1946 if (burst_size != 0) {
1947- /* Process the crypto ops of the last session. */
1948- processed_ops = process_ops(c_ops, prev_sess,
1949- qp, burst_size, &enqueued_ops);
1950+ /* Process the crypto ops of the last operation type. */
1951+ processed_ops = process_ops(c_ops, prev_zuc_op,
1952+ sessions, qp, burst_size,
1953+ &enqueued_ops);
1954 }
1955
1956 qp->qp_stats.enqueue_err_count += nb_ops - enqueued_ops;
1957diff --git a/drivers/event/dpaa2/dpaa2_eventdev.c b/drivers/event/dpaa2/dpaa2_eventdev.c
1958index eeeb231..56ea124 100644
1959--- a/drivers/event/dpaa2/dpaa2_eventdev.c
1960+++ b/drivers/event/dpaa2/dpaa2_eventdev.c
1961@@ -489,7 +489,6 @@ dpaa2_eventdev_port_unlink(struct rte_eventdev *dev, void *port,
1962 dpio_remove_static_dequeue_channel(dpaa2_portal->dpio_dev->dpio,
1963 0, dpaa2_portal->dpio_dev->token,
1964 evq_info->dpcon->dpcon_id);
1965- evq_info->link = 0;
1966 }
1967
1968 return (int)nb_unlinks;
1969@@ -510,8 +509,6 @@ dpaa2_eventdev_port_link(struct rte_eventdev *dev, void *port,
1970
1971 for (i = 0; i < nb_links; i++) {
1972 evq_info = &priv->evq_info[queues[i]];
1973- if (evq_info->link)
1974- continue;
1975
1976 ret = dpio_add_static_dequeue_channel(
1977 dpaa2_portal->dpio_dev->dpio,
1978@@ -526,7 +523,6 @@ dpaa2_eventdev_port_link(struct rte_eventdev *dev, void *port,
1979 qbman_swp_push_set(dpaa2_portal->dpio_dev->sw_portal,
1980 channel_index, 1);
1981 evq_info->dpcon->channel_index = channel_index;
1982- evq_info->link = 1;
1983 }
1984
1985 RTE_SET_USED(priorities);
1986@@ -540,7 +536,6 @@ err:
1987 dpio_remove_static_dequeue_channel(dpaa2_portal->dpio_dev->dpio,
1988 0, dpaa2_portal->dpio_dev->token,
1989 evq_info->dpcon->dpcon_id);
1990- evq_info->link = 0;
1991 }
1992 return ret;
1993 }
1994diff --git a/drivers/event/dpaa2/dpaa2_eventdev.h b/drivers/event/dpaa2/dpaa2_eventdev.h
1995index ae8e07e..5b9c80e 100644
1996--- a/drivers/event/dpaa2/dpaa2_eventdev.h
1997+++ b/drivers/event/dpaa2/dpaa2_eventdev.h
1998@@ -100,7 +100,6 @@ struct evq_info_t {
1999 struct dpaa2_dpci_dev *dpci;
2000 /* Configuration provided by the user */
2001 uint32_t event_queue_cfg;
2002- uint8_t link;
2003 };
2004
2005 struct dpaa2_eventdev {
2006diff --git a/drivers/net/af_packet/rte_eth_af_packet.c b/drivers/net/af_packet/rte_eth_af_packet.c
2007index d515408..5a101ce 100644
2008--- a/drivers/net/af_packet/rte_eth_af_packet.c
2009+++ b/drivers/net/af_packet/rte_eth_af_packet.c
2010@@ -124,7 +124,7 @@ static struct rte_eth_link pmd_link = {
2011 .link_speed = ETH_SPEED_NUM_10G,
2012 .link_duplex = ETH_LINK_FULL_DUPLEX,
2013 .link_status = ETH_LINK_DOWN,
2014- .link_autoneg = ETH_LINK_AUTONEG
2015+ .link_autoneg = ETH_LINK_FIXED,
2016 };
2017
2018 static uint16_t
2019diff --git a/drivers/net/bnx2x/bnx2x.c b/drivers/net/bnx2x/bnx2x.c
2020index 9394f6c..98b08d1 100644
2021--- a/drivers/net/bnx2x/bnx2x.c
2022+++ b/drivers/net/bnx2x/bnx2x.c
2023@@ -170,10 +170,10 @@ bnx2x_dma_alloc(struct bnx2x_softc *sc, size_t size, struct bnx2x_dma *dma,
2024
2025 dma->sc = sc;
2026 if (IS_PF(sc))
2027- sprintf(mz_name, "bnx2x%d_%s_%" PRIx64, SC_ABS_FUNC(sc), msg,
2028+ snprintf(mz_name, sizeof(mz_name), "bnx2x%d_%s_%" PRIx64, SC_ABS_FUNC(sc), msg,
2029 rte_get_timer_cycles());
2030 else
2031- sprintf(mz_name, "bnx2x%d_%s_%" PRIx64, sc->pcie_device, msg,
2032+ snprintf(mz_name, sizeof(mz_name), "bnx2x%d_%s_%" PRIx64, sc->pcie_device, msg,
2033 rte_get_timer_cycles());
2034
2035 /* Caller must take care that strlen(mz_name) < RTE_MEMZONE_NAMESIZE */
2036@@ -8289,16 +8289,6 @@ static int bnx2x_get_device_info(struct bnx2x_softc *sc)
2037 REG_WR(sc, PXP2_REG_PGL_ADDR_90_F1, 0);
2038 REG_WR(sc, PXP2_REG_PGL_ADDR_94_F1, 0);
2039 }
2040-
2041-/*
2042- * Enable internal target-read (in case we are probed after PF
2043- * FLR). Must be done prior to any BAR read access. Only for
2044- * 57712 and up
2045- */
2046- if (!CHIP_IS_E1x(sc)) {
2047- REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ,
2048- 1);
2049- }
2050 }
2051
2052 /* get the nvram size */
2053@@ -9675,7 +9665,17 @@ int bnx2x_attach(struct bnx2x_softc *sc)
2054 bnx2x_init_rte(sc);
2055
2056 if (IS_PF(sc)) {
2057-/* get device info and set params */
2058+ /* Enable internal target-read (in case we are probed after PF
2059+ * FLR). Must be done prior to any BAR read access. Only for
2060+ * 57712 and up
2061+ */
2062+ if (!CHIP_IS_E1x(sc)) {
2063+ REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ,
2064+ 1);
2065+ DELAY(200000);
2066+ }
2067+
2068+ /* get device info and set params */
2069 if (bnx2x_get_device_info(sc) != 0) {
2070 PMD_DRV_LOG(NOTICE, "getting device info");
2071 return -ENXIO;
2072@@ -9684,7 +9684,7 @@ int bnx2x_attach(struct bnx2x_softc *sc)
2073 /* get phy settings from shmem and 'and' against admin settings */
2074 bnx2x_get_phy_info(sc);
2075 } else {
2076-/* Left mac of VF unfilled, PF should set it for VF */
2077+ /* Left mac of VF unfilled, PF should set it for VF */
2078 memset(sc->link_params.mac_addr, 0, ETHER_ADDR_LEN);
2079 }
2080
2081diff --git a/drivers/net/bnx2x/elink.c b/drivers/net/bnx2x/elink.c
2082index 9d0f313..74e1bea 100644
2083--- a/drivers/net/bnx2x/elink.c
2084+++ b/drivers/net/bnx2x/elink.c
2085@@ -4143,9 +4143,9 @@ static void elink_sfp_e3_set_transmitter(struct elink_params *params,
2086 elink_set_cfg_pin(sc, cfg_pin + 3, tx_en ^ 1);
2087 }
2088
2089-static void elink_warpcore_config_init(struct elink_phy *phy,
2090- struct elink_params *params,
2091- struct elink_vars *vars)
2092+static uint8_t elink_warpcore_config_init(struct elink_phy *phy,
2093+ struct elink_params *params,
2094+ struct elink_vars *vars)
2095 {
2096 struct bnx2x_softc *sc = params->sc;
2097 uint32_t serdes_net_if;
2098@@ -4222,7 +4222,7 @@ static void elink_warpcore_config_init(struct elink_phy *phy,
2099 case PORT_HW_CFG_NET_SERDES_IF_DXGXS:
2100 if (vars->line_speed != ELINK_SPEED_20000) {
2101 PMD_DRV_LOG(DEBUG, "Speed not supported yet");
2102- return;
2103+ return 0;
2104 }
2105 PMD_DRV_LOG(DEBUG, "Setting 20G DXGXS");
2106 elink_warpcore_set_20G_DXGXS(sc, phy, lane);
2107@@ -4242,13 +4242,15 @@ static void elink_warpcore_config_init(struct elink_phy *phy,
2108 PMD_DRV_LOG(DEBUG,
2109 "Unsupported Serdes Net Interface 0x%x",
2110 serdes_net_if);
2111- return;
2112+ return 0;
2113 }
2114 }
2115
2116 /* Take lane out of reset after configuration is finished */
2117 elink_warpcore_reset_lane(sc, phy, 0);
2118 PMD_DRV_LOG(DEBUG, "Exit config init");
2119+
2120+ return 0;
2121 }
2122
2123 static void elink_warpcore_link_reset(struct elink_phy *phy,
2124@@ -5226,9 +5228,9 @@ static elink_status_t elink_get_link_speed_duplex(struct elink_phy *phy,
2125 return ELINK_STATUS_OK;
2126 }
2127
2128-static elink_status_t elink_link_settings_status(struct elink_phy *phy,
2129- struct elink_params *params,
2130- struct elink_vars *vars)
2131+static uint8_t elink_link_settings_status(struct elink_phy *phy,
2132+ struct elink_params *params,
2133+ struct elink_vars *vars)
2134 {
2135 struct bnx2x_softc *sc = params->sc;
2136
2137@@ -5299,9 +5301,9 @@ static elink_status_t elink_link_settings_status(struct elink_phy *phy,
2138 return rc;
2139 }
2140
2141-static elink_status_t elink_warpcore_read_status(struct elink_phy *phy,
2142- struct elink_params *params,
2143- struct elink_vars *vars)
2144+static uint8_t elink_warpcore_read_status(struct elink_phy *phy,
2145+ struct elink_params *params,
2146+ struct elink_vars *vars)
2147 {
2148 struct bnx2x_softc *sc = params->sc;
2149 uint8_t lane;
2150@@ -5520,9 +5522,9 @@ static void elink_set_preemphasis(struct elink_phy *phy,
2151 }
2152 }
2153
2154-static void elink_xgxs_config_init(struct elink_phy *phy,
2155- struct elink_params *params,
2156- struct elink_vars *vars)
2157+static uint8_t elink_xgxs_config_init(struct elink_phy *phy,
2158+ struct elink_params *params,
2159+ struct elink_vars *vars)
2160 {
2161 uint8_t enable_cl73 = (ELINK_SINGLE_MEDIA_DIRECT(params) ||
2162 (params->loopback_mode == ELINK_LOOPBACK_XGXS));
2163@@ -5567,6 +5569,8 @@ static void elink_xgxs_config_init(struct elink_phy *phy,
2164
2165 elink_initialize_sgmii_process(phy, params, vars);
2166 }
2167+
2168+ return 0;
2169 }
2170
2171 static elink_status_t elink_prepare_xgxs(struct elink_phy *phy,
2172@@ -5751,8 +5755,8 @@ static void elink_link_int_ack(struct elink_params *params,
2173 }
2174 }
2175
2176-static elink_status_t elink_format_ver(uint32_t num, uint8_t * str,
2177- uint16_t * len)
2178+static uint8_t elink_format_ver(uint32_t num, uint8_t * str,
2179+ uint16_t * len)
2180 {
2181 uint8_t *str_ptr = str;
2182 uint32_t mask = 0xf0000000;
2183@@ -5790,8 +5794,8 @@ static elink_status_t elink_format_ver(uint32_t num, uint8_t * str,
2184 return ELINK_STATUS_OK;
2185 }
2186
2187-static elink_status_t elink_null_format_ver(__rte_unused uint32_t spirom_ver,
2188- uint8_t * str, uint16_t * len)
2189+static uint8_t elink_null_format_ver(__rte_unused uint32_t spirom_ver,
2190+ uint8_t * str, uint16_t * len)
2191 {
2192 str[0] = '\0';
2193 (*len)--;
2194@@ -6802,9 +6806,9 @@ static void elink_8073_specific_func(struct elink_phy *phy,
2195 }
2196 }
2197
2198-static elink_status_t elink_8073_config_init(struct elink_phy *phy,
2199- struct elink_params *params,
2200- struct elink_vars *vars)
2201+static uint8_t elink_8073_config_init(struct elink_phy *phy,
2202+ struct elink_params *params,
2203+ struct elink_vars *vars)
2204 {
2205 struct bnx2x_softc *sc = params->sc;
2206 uint16_t val = 0, tmp1;
2207@@ -7097,9 +7101,9 @@ static void elink_8073_link_reset(__rte_unused struct elink_phy *phy,
2208 /******************************************************************/
2209 /* BNX2X8705 PHY SECTION */
2210 /******************************************************************/
2211-static elink_status_t elink_8705_config_init(struct elink_phy *phy,
2212- struct elink_params *params,
2213- __rte_unused struct elink_vars
2214+static uint8_t elink_8705_config_init(struct elink_phy *phy,
2215+ struct elink_params *params,
2216+ __rte_unused struct elink_vars
2217 *vars)
2218 {
2219 struct bnx2x_softc *sc = params->sc;
2220@@ -8403,9 +8407,9 @@ static uint8_t elink_8706_config_init(struct elink_phy *phy,
2221 return ELINK_STATUS_OK;
2222 }
2223
2224-static elink_status_t elink_8706_read_status(struct elink_phy *phy,
2225- struct elink_params *params,
2226- struct elink_vars *vars)
2227+static uint8_t elink_8706_read_status(struct elink_phy *phy,
2228+ struct elink_params *params,
2229+ struct elink_vars *vars)
2230 {
2231 return elink_8706_8726_read_status(phy, params, vars);
2232 }
2233@@ -8477,9 +8481,9 @@ static uint8_t elink_8726_read_status(struct elink_phy *phy,
2234 return link_up;
2235 }
2236
2237-static elink_status_t elink_8726_config_init(struct elink_phy *phy,
2238- struct elink_params *params,
2239- struct elink_vars *vars)
2240+static uint8_t elink_8726_config_init(struct elink_phy *phy,
2241+ struct elink_params *params,
2242+ struct elink_vars *vars)
2243 {
2244 struct bnx2x_softc *sc = params->sc;
2245 PMD_DRV_LOG(DEBUG, "Initializing BNX2X8726");
2246@@ -8684,9 +8688,9 @@ static void elink_8727_config_speed(struct elink_phy *phy,
2247 }
2248 }
2249
2250-static elink_status_t elink_8727_config_init(struct elink_phy *phy,
2251- struct elink_params *params,
2252- __rte_unused struct elink_vars
2253+static uint8_t elink_8727_config_init(struct elink_phy *phy,
2254+ struct elink_params *params,
2255+ __rte_unused struct elink_vars
2256 *vars)
2257 {
2258 uint32_t tx_en_mode;
2259@@ -9291,7 +9295,7 @@ static elink_status_t elink_848xx_cmn_config_init(struct elink_phy *phy,
2260 return ELINK_STATUS_OK;
2261 }
2262
2263-static elink_status_t elink_8481_config_init(struct elink_phy *phy,
2264+static uint8_t elink_8481_config_init(struct elink_phy *phy,
2265 struct elink_params *params,
2266 struct elink_vars *vars)
2267 {
2268@@ -9442,8 +9446,8 @@ static uint8_t elink_84833_get_reset_gpios(struct bnx2x_softc *sc,
2269 return reset_gpios;
2270 }
2271
2272-static elink_status_t elink_84833_hw_reset_phy(struct elink_phy *phy,
2273- struct elink_params *params)
2274+static void elink_84833_hw_reset_phy(struct elink_phy *phy,
2275+ struct elink_params *params)
2276 {
2277 struct bnx2x_softc *sc = params->sc;
2278 uint8_t reset_gpios;
2279@@ -9471,8 +9475,6 @@ static elink_status_t elink_84833_hw_reset_phy(struct elink_phy *phy,
2280 MISC_REGISTERS_GPIO_OUTPUT_LOW);
2281 DELAY(10);
2282 PMD_DRV_LOG(DEBUG, "84833 hw reset on pin values 0x%x", reset_gpios);
2283-
2284- return ELINK_STATUS_OK;
2285 }
2286
2287 static elink_status_t elink_8483x_disable_eee(struct elink_phy *phy,
2288@@ -9513,9 +9515,9 @@ static elink_status_t elink_8483x_enable_eee(struct elink_phy *phy,
2289 }
2290
2291 #define PHY84833_CONSTANT_LATENCY 1193
2292-static elink_status_t elink_848x3_config_init(struct elink_phy *phy,
2293- struct elink_params *params,
2294- struct elink_vars *vars)
2295+static uint8_t elink_848x3_config_init(struct elink_phy *phy,
2296+ struct elink_params *params,
2297+ struct elink_vars *vars)
2298 {
2299 struct bnx2x_softc *sc = params->sc;
2300 uint8_t port, initialize = 1;
2301@@ -9819,7 +9821,7 @@ static uint8_t elink_848xx_read_status(struct elink_phy *phy,
2302 return link_up;
2303 }
2304
2305-static elink_status_t elink_848xx_format_ver(uint32_t raw_ver, uint8_t * str,
2306+static uint8_t elink_848xx_format_ver(uint32_t raw_ver, uint8_t * str,
2307 uint16_t * len)
2308 {
2309 elink_status_t status = ELINK_STATUS_OK;
2310@@ -10146,9 +10148,9 @@ static void elink_54618se_specific_func(struct elink_phy *phy,
2311 }
2312 }
2313
2314-static elink_status_t elink_54618se_config_init(struct elink_phy *phy,
2315- struct elink_params *params,
2316- struct elink_vars *vars)
2317+static uint8_t elink_54618se_config_init(struct elink_phy *phy,
2318+ struct elink_params *params,
2319+ struct elink_vars *vars)
2320 {
2321 struct bnx2x_softc *sc = params->sc;
2322 uint8_t port;
2323@@ -10542,9 +10544,9 @@ static void elink_7101_config_loopback(struct elink_phy *phy,
2324 MDIO_XS_DEVAD, MDIO_XS_SFX7101_XGXS_TEST1, 0x100);
2325 }
2326
2327-static elink_status_t elink_7101_config_init(struct elink_phy *phy,
2328- struct elink_params *params,
2329- struct elink_vars *vars)
2330+static uint8_t elink_7101_config_init(struct elink_phy *phy,
2331+ struct elink_params *params,
2332+ struct elink_vars *vars)
2333 {
2334 uint16_t fw_ver1, fw_ver2, val;
2335 struct bnx2x_softc *sc = params->sc;
2336@@ -10614,8 +10616,8 @@ static uint8_t elink_7101_read_status(struct elink_phy *phy,
2337 return link_up;
2338 }
2339
2340-static elink_status_t elink_7101_format_ver(uint32_t spirom_ver, uint8_t * str,
2341- uint16_t * len)
2342+static uint8_t elink_7101_format_ver(uint32_t spirom_ver, uint8_t * str,
2343+ uint16_t * len)
2344 {
2345 if (*len < 5)
2346 return ELINK_STATUS_ERROR;
2347@@ -10680,14 +10682,14 @@ static const struct elink_phy phy_null = {
2348 .speed_cap_mask = 0,
2349 .req_duplex = 0,
2350 .rsrv = 0,
2351- .config_init = (config_init_t) NULL,
2352- .read_status = (read_status_t) NULL,
2353- .link_reset = (link_reset_t) NULL,
2354- .config_loopback = (config_loopback_t) NULL,
2355- .format_fw_ver = (format_fw_ver_t) NULL,
2356- .hw_reset = (hw_reset_t) NULL,
2357- .set_link_led = (set_link_led_t) NULL,
2358- .phy_specific_func = (phy_specific_func_t) NULL
2359+ .config_init = NULL,
2360+ .read_status = NULL,
2361+ .link_reset = NULL,
2362+ .config_loopback = NULL,
2363+ .format_fw_ver = NULL,
2364+ .hw_reset = NULL,
2365+ .set_link_led = NULL,
2366+ .phy_specific_func = NULL
2367 };
2368
2369 static const struct elink_phy phy_serdes = {
2370@@ -10714,14 +10716,14 @@ static const struct elink_phy phy_serdes = {
2371 .speed_cap_mask = 0,
2372 .req_duplex = 0,
2373 .rsrv = 0,
2374- .config_init = (config_init_t) elink_xgxs_config_init,
2375- .read_status = (read_status_t) elink_link_settings_status,
2376- .link_reset = (link_reset_t) elink_int_link_reset,
2377- .config_loopback = (config_loopback_t) NULL,
2378- .format_fw_ver = (format_fw_ver_t) NULL,
2379- .hw_reset = (hw_reset_t) NULL,
2380- .set_link_led = (set_link_led_t) NULL,
2381- .phy_specific_func = (phy_specific_func_t) NULL
2382+ .config_init = elink_xgxs_config_init,
2383+ .read_status = elink_link_settings_status,
2384+ .link_reset = elink_int_link_reset,
2385+ .config_loopback = NULL,
2386+ .format_fw_ver = NULL,
2387+ .hw_reset = NULL,
2388+ .set_link_led = NULL,
2389+ .phy_specific_func = NULL
2390 };
2391
2392 static const struct elink_phy phy_xgxs = {
2393@@ -10749,14 +10751,14 @@ static const struct elink_phy phy_xgxs = {
2394 .speed_cap_mask = 0,
2395 .req_duplex = 0,
2396 .rsrv = 0,
2397- .config_init = (config_init_t) elink_xgxs_config_init,
2398- .read_status = (read_status_t) elink_link_settings_status,
2399- .link_reset = (link_reset_t) elink_int_link_reset,
2400- .config_loopback = (config_loopback_t) elink_set_xgxs_loopback,
2401- .format_fw_ver = (format_fw_ver_t) NULL,
2402- .hw_reset = (hw_reset_t) NULL,
2403- .set_link_led = (set_link_led_t) NULL,
2404- .phy_specific_func = (phy_specific_func_t) elink_xgxs_specific_func
2405+ .config_init = elink_xgxs_config_init,
2406+ .read_status = elink_link_settings_status,
2407+ .link_reset = elink_int_link_reset,
2408+ .config_loopback = elink_set_xgxs_loopback,
2409+ .format_fw_ver = NULL,
2410+ .hw_reset = NULL,
2411+ .set_link_led = NULL,
2412+ .phy_specific_func = elink_xgxs_specific_func
2413 };
2414
2415 static const struct elink_phy phy_warpcore = {
2416@@ -10785,14 +10787,14 @@ static const struct elink_phy phy_warpcore = {
2417 .speed_cap_mask = 0,
2418 /* req_duplex = */ 0,
2419 /* rsrv = */ 0,
2420- .config_init = (config_init_t) elink_warpcore_config_init,
2421- .read_status = (read_status_t) elink_warpcore_read_status,
2422- .link_reset = (link_reset_t) elink_warpcore_link_reset,
2423- .config_loopback = (config_loopback_t) elink_set_warpcore_loopback,
2424- .format_fw_ver = (format_fw_ver_t) NULL,
2425- .hw_reset = (hw_reset_t) elink_warpcore_hw_reset,
2426- .set_link_led = (set_link_led_t) NULL,
2427- .phy_specific_func = (phy_specific_func_t) NULL
2428+ .config_init = elink_warpcore_config_init,
2429+ .read_status = elink_warpcore_read_status,
2430+ .link_reset = elink_warpcore_link_reset,
2431+ .config_loopback = elink_set_warpcore_loopback,
2432+ .format_fw_ver = NULL,
2433+ .hw_reset = elink_warpcore_hw_reset,
2434+ .set_link_led = NULL,
2435+ .phy_specific_func = NULL
2436 };
2437
2438 static const struct elink_phy phy_7101 = {
2439@@ -10814,14 +10816,14 @@ static const struct elink_phy phy_7101 = {
2440 .speed_cap_mask = 0,
2441 .req_duplex = 0,
2442 .rsrv = 0,
2443- .config_init = (config_init_t) elink_7101_config_init,
2444- .read_status = (read_status_t) elink_7101_read_status,
2445- .link_reset = (link_reset_t) elink_common_ext_link_reset,
2446- .config_loopback = (config_loopback_t) elink_7101_config_loopback,
2447- .format_fw_ver = (format_fw_ver_t) elink_7101_format_ver,
2448- .hw_reset = (hw_reset_t) elink_7101_hw_reset,
2449- .set_link_led = (set_link_led_t) elink_7101_set_link_led,
2450- .phy_specific_func = (phy_specific_func_t) NULL
2451+ .config_init = elink_7101_config_init,
2452+ .read_status = elink_7101_read_status,
2453+ .link_reset = elink_common_ext_link_reset,
2454+ .config_loopback = elink_7101_config_loopback,
2455+ .format_fw_ver = elink_7101_format_ver,
2456+ .hw_reset = elink_7101_hw_reset,
2457+ .set_link_led = elink_7101_set_link_led,
2458+ .phy_specific_func = NULL
2459 };
2460
2461 static const struct elink_phy phy_8073 = {
2462@@ -10845,14 +10847,14 @@ static const struct elink_phy phy_8073 = {
2463 .speed_cap_mask = 0,
2464 .req_duplex = 0,
2465 .rsrv = 0,
2466- .config_init = (config_init_t) elink_8073_config_init,
2467- .read_status = (read_status_t) elink_8073_read_status,
2468- .link_reset = (link_reset_t) elink_8073_link_reset,
2469- .config_loopback = (config_loopback_t) NULL,
2470- .format_fw_ver = (format_fw_ver_t) elink_format_ver,
2471- .hw_reset = (hw_reset_t) NULL,
2472- .set_link_led = (set_link_led_t) NULL,
2473- .phy_specific_func = (phy_specific_func_t) elink_8073_specific_func
2474+ .config_init = elink_8073_config_init,
2475+ .read_status = elink_8073_read_status,
2476+ .link_reset = elink_8073_link_reset,
2477+ .config_loopback = NULL,
2478+ .format_fw_ver = elink_format_ver,
2479+ .hw_reset = NULL,
2480+ .set_link_led = NULL,
2481+ .phy_specific_func = elink_8073_specific_func
2482 };
2483
2484 static const struct elink_phy phy_8705 = {
2485@@ -10873,14 +10875,14 @@ static const struct elink_phy phy_8705 = {
2486 .speed_cap_mask = 0,
2487 .req_duplex = 0,
2488 .rsrv = 0,
2489- .config_init = (config_init_t) elink_8705_config_init,
2490- .read_status = (read_status_t) elink_8705_read_status,
2491- .link_reset = (link_reset_t) elink_common_ext_link_reset,
2492- .config_loopback = (config_loopback_t) NULL,
2493- .format_fw_ver = (format_fw_ver_t) elink_null_format_ver,
2494- .hw_reset = (hw_reset_t) NULL,
2495- .set_link_led = (set_link_led_t) NULL,
2496- .phy_specific_func = (phy_specific_func_t) NULL
2497+ .config_init = elink_8705_config_init,
2498+ .read_status = elink_8705_read_status,
2499+ .link_reset = elink_common_ext_link_reset,
2500+ .config_loopback = NULL,
2501+ .format_fw_ver = elink_null_format_ver,
2502+ .hw_reset = NULL,
2503+ .set_link_led = NULL,
2504+ .phy_specific_func = NULL
2505 };
2506
2507 static const struct elink_phy phy_8706 = {
2508@@ -10902,14 +10904,14 @@ static const struct elink_phy phy_8706 = {
2509 .speed_cap_mask = 0,
2510 .req_duplex = 0,
2511 .rsrv = 0,
2512- .config_init = (config_init_t) elink_8706_config_init,
2513- .read_status = (read_status_t) elink_8706_read_status,
2514- .link_reset = (link_reset_t) elink_common_ext_link_reset,
2515- .config_loopback = (config_loopback_t) NULL,
2516- .format_fw_ver = (format_fw_ver_t) elink_format_ver,
2517- .hw_reset = (hw_reset_t) NULL,
2518- .set_link_led = (set_link_led_t) NULL,
2519- .phy_specific_func = (phy_specific_func_t) NULL
2520+ .config_init = elink_8706_config_init,
2521+ .read_status = elink_8706_read_status,
2522+ .link_reset = elink_common_ext_link_reset,
2523+ .config_loopback = NULL,
2524+ .format_fw_ver = elink_format_ver,
2525+ .hw_reset = NULL,
2526+ .set_link_led = NULL,
2527+ .phy_specific_func = NULL
2528 };
2529
2530 static const struct elink_phy phy_8726 = {
2531@@ -10932,14 +10934,14 @@ static const struct elink_phy phy_8726 = {
2532 .speed_cap_mask = 0,
2533 .req_duplex = 0,
2534 .rsrv = 0,
2535- .config_init = (config_init_t) elink_8726_config_init,
2536- .read_status = (read_status_t) elink_8726_read_status,
2537- .link_reset = (link_reset_t) elink_8726_link_reset,
2538- .config_loopback = (config_loopback_t) elink_8726_config_loopback,
2539- .format_fw_ver = (format_fw_ver_t) elink_format_ver,
2540- .hw_reset = (hw_reset_t) NULL,
2541- .set_link_led = (set_link_led_t) NULL,
2542- .phy_specific_func = (phy_specific_func_t) NULL
2543+ .config_init = elink_8726_config_init,
2544+ .read_status = elink_8726_read_status,
2545+ .link_reset = elink_8726_link_reset,
2546+ .config_loopback = elink_8726_config_loopback,
2547+ .format_fw_ver = elink_format_ver,
2548+ .hw_reset = NULL,
2549+ .set_link_led = NULL,
2550+ .phy_specific_func = NULL
2551 };
2552
2553 static const struct elink_phy phy_8727 = {
2554@@ -10961,14 +10963,14 @@ static const struct elink_phy phy_8727 = {
2555 .speed_cap_mask = 0,
2556 .req_duplex = 0,
2557 .rsrv = 0,
2558- .config_init = (config_init_t) elink_8727_config_init,
2559- .read_status = (read_status_t) elink_8727_read_status,
2560- .link_reset = (link_reset_t) elink_8727_link_reset,
2561- .config_loopback = (config_loopback_t) NULL,
2562- .format_fw_ver = (format_fw_ver_t) elink_format_ver,
2563- .hw_reset = (hw_reset_t) elink_8727_hw_reset,
2564- .set_link_led = (set_link_led_t) elink_8727_set_link_led,
2565- .phy_specific_func = (phy_specific_func_t) elink_8727_specific_func
2566+ .config_init = elink_8727_config_init,
2567+ .read_status = elink_8727_read_status,
2568+ .link_reset = elink_8727_link_reset,
2569+ .config_loopback = NULL,
2570+ .format_fw_ver = elink_format_ver,
2571+ .hw_reset = elink_8727_hw_reset,
2572+ .set_link_led = elink_8727_set_link_led,
2573+ .phy_specific_func = elink_8727_specific_func
2574 };
2575
2576 static const struct elink_phy phy_8481 = {
2577@@ -10996,14 +10998,14 @@ static const struct elink_phy phy_8481 = {
2578 .speed_cap_mask = 0,
2579 .req_duplex = 0,
2580 .rsrv = 0,
2581- .config_init = (config_init_t) elink_8481_config_init,
2582- .read_status = (read_status_t) elink_848xx_read_status,
2583- .link_reset = (link_reset_t) elink_8481_link_reset,
2584- .config_loopback = (config_loopback_t) NULL,
2585- .format_fw_ver = (format_fw_ver_t) elink_848xx_format_ver,
2586- .hw_reset = (hw_reset_t) elink_8481_hw_reset,
2587- .set_link_led = (set_link_led_t) elink_848xx_set_link_led,
2588- .phy_specific_func = (phy_specific_func_t) NULL
2589+ .config_init = elink_8481_config_init,
2590+ .read_status = elink_848xx_read_status,
2591+ .link_reset = elink_8481_link_reset,
2592+ .config_loopback = NULL,
2593+ .format_fw_ver = elink_848xx_format_ver,
2594+ .hw_reset = elink_8481_hw_reset,
2595+ .set_link_led = elink_848xx_set_link_led,
2596+ .phy_specific_func = NULL
2597 };
2598
2599 static const struct elink_phy phy_84823 = {
2600@@ -11031,14 +11033,14 @@ static const struct elink_phy phy_84823 = {
2601 .speed_cap_mask = 0,
2602 .req_duplex = 0,
2603 .rsrv = 0,
2604- .config_init = (config_init_t) elink_848x3_config_init,
2605- .read_status = (read_status_t) elink_848xx_read_status,
2606- .link_reset = (link_reset_t) elink_848x3_link_reset,
2607- .config_loopback = (config_loopback_t) NULL,
2608- .format_fw_ver = (format_fw_ver_t) elink_848xx_format_ver,
2609- .hw_reset = (hw_reset_t) NULL,
2610- .set_link_led = (set_link_led_t) elink_848xx_set_link_led,
2611- .phy_specific_func = (phy_specific_func_t) elink_848xx_specific_func
2612+ .config_init = elink_848x3_config_init,
2613+ .read_status = elink_848xx_read_status,
2614+ .link_reset = elink_848x3_link_reset,
2615+ .config_loopback = NULL,
2616+ .format_fw_ver = elink_848xx_format_ver,
2617+ .hw_reset = NULL,
2618+ .set_link_led = elink_848xx_set_link_led,
2619+ .phy_specific_func = elink_848xx_specific_func
2620 };
2621
2622 static const struct elink_phy phy_84833 = {
2623@@ -11065,14 +11067,14 @@ static const struct elink_phy phy_84833 = {
2624 .speed_cap_mask = 0,
2625 .req_duplex = 0,
2626 .rsrv = 0,
2627- .config_init = (config_init_t) elink_848x3_config_init,
2628- .read_status = (read_status_t) elink_848xx_read_status,
2629- .link_reset = (link_reset_t) elink_848x3_link_reset,
2630- .config_loopback = (config_loopback_t) NULL,
2631- .format_fw_ver = (format_fw_ver_t) elink_848xx_format_ver,
2632- .hw_reset = (hw_reset_t) elink_84833_hw_reset_phy,
2633- .set_link_led = (set_link_led_t) elink_848xx_set_link_led,
2634- .phy_specific_func = (phy_specific_func_t) elink_848xx_specific_func
2635+ .config_init = elink_848x3_config_init,
2636+ .read_status = elink_848xx_read_status,
2637+ .link_reset = elink_848x3_link_reset,
2638+ .config_loopback = NULL,
2639+ .format_fw_ver = elink_848xx_format_ver,
2640+ .hw_reset = elink_84833_hw_reset_phy,
2641+ .set_link_led = elink_848xx_set_link_led,
2642+ .phy_specific_func = elink_848xx_specific_func
2643 };
2644
2645 static const struct elink_phy phy_84834 = {
2646@@ -11098,14 +11100,14 @@ static const struct elink_phy phy_84834 = {
2647 .speed_cap_mask = 0,
2648 .req_duplex = 0,
2649 .rsrv = 0,
2650- .config_init = (config_init_t) elink_848x3_config_init,
2651- .read_status = (read_status_t) elink_848xx_read_status,
2652- .link_reset = (link_reset_t) elink_848x3_link_reset,
2653- .config_loopback = (config_loopback_t) NULL,
2654- .format_fw_ver = (format_fw_ver_t) elink_848xx_format_ver,
2655- .hw_reset = (hw_reset_t) elink_84833_hw_reset_phy,
2656- .set_link_led = (set_link_led_t) elink_848xx_set_link_led,
2657- .phy_specific_func = (phy_specific_func_t) elink_848xx_specific_func
2658+ .config_init = elink_848x3_config_init,
2659+ .read_status = elink_848xx_read_status,
2660+ .link_reset = elink_848x3_link_reset,
2661+ .config_loopback = NULL,
2662+ .format_fw_ver = elink_848xx_format_ver,
2663+ .hw_reset = elink_84833_hw_reset_phy,
2664+ .set_link_led = elink_848xx_set_link_led,
2665+ .phy_specific_func = elink_848xx_specific_func
2666 };
2667
2668 static const struct elink_phy phy_54618se = {
2669@@ -11131,14 +11133,14 @@ static const struct elink_phy phy_54618se = {
2670 .speed_cap_mask = 0,
2671 /* req_duplex = */ 0,
2672 /* rsrv = */ 0,
2673- .config_init = (config_init_t) elink_54618se_config_init,
2674- .read_status = (read_status_t) elink_54618se_read_status,
2675- .link_reset = (link_reset_t) elink_54618se_link_reset,
2676- .config_loopback = (config_loopback_t) elink_54618se_config_loopback,
2677- .format_fw_ver = (format_fw_ver_t) NULL,
2678- .hw_reset = (hw_reset_t) NULL,
2679- .set_link_led = (set_link_led_t) elink_5461x_set_link_led,
2680- .phy_specific_func = (phy_specific_func_t) elink_54618se_specific_func
2681+ .config_init = elink_54618se_config_init,
2682+ .read_status = elink_54618se_read_status,
2683+ .link_reset = elink_54618se_link_reset,
2684+ .config_loopback = elink_54618se_config_loopback,
2685+ .format_fw_ver = NULL,
2686+ .hw_reset = NULL,
2687+ .set_link_led = elink_5461x_set_link_led,
2688+ .phy_specific_func = elink_54618se_specific_func
2689 };
2690
2691 /*****************************************************************/
2692@@ -12919,7 +12921,7 @@ static void elink_check_kr2_wa(struct elink_params *params,
2693 */
2694 not_kr2_device = (((base_page & 0x8000) == 0) ||
2695 (((base_page & 0x8000) &&
2696- ((next_page & 0xe0) == 0x2))));
2697+ ((next_page & 0xe0) == 0x20))));
2698
2699 /* In case KR2 is already disabled, check if we need to re-enable it */
2700 if (!(vars->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) {
2701diff --git a/drivers/net/bnxt/bnxt_ethdev.c b/drivers/net/bnxt/bnxt_ethdev.c
2702index 3eeca6f..52c511e 100644
2703--- a/drivers/net/bnxt/bnxt_ethdev.c
2704+++ b/drivers/net/bnxt/bnxt_ethdev.c
2705@@ -400,10 +400,6 @@ static int bnxt_init_nic(struct bnxt *bp)
2706 bnxt_init_vnics(bp);
2707 bnxt_init_filters(bp);
2708
2709- rc = bnxt_init_chip(bp);
2710- if (rc)
2711- return rc;
2712-
2713 return 0;
2714 }
2715
2716@@ -465,7 +461,8 @@ static void bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev,
2717 .wthresh = 0,
2718 },
2719 .rx_free_thresh = 32,
2720- .rx_drop_en = 0,
2721+ /* If no descriptors available, pkts are dropped by default */
2722+ .rx_drop_en = 1,
2723 };
2724
2725 dev_info->default_txconf = (struct rte_eth_txconf) {
2726@@ -572,7 +569,7 @@ static int bnxt_dev_start_op(struct rte_eth_dev *eth_dev)
2727 }
2728 bp->dev_stopped = 0;
2729
2730- rc = bnxt_init_nic(bp);
2731+ rc = bnxt_init_chip(bp);
2732 if (rc)
2733 goto error;
2734
2735@@ -631,6 +628,8 @@ static void bnxt_dev_stop_op(struct rte_eth_dev *eth_dev)
2736 }
2737 bnxt_set_hwrm_link_config(bp, false);
2738 bnxt_hwrm_port_clr_stats(bp);
2739+ bnxt_free_tx_mbufs(bp);
2740+ bnxt_free_rx_mbufs(bp);
2741 bnxt_shutdown_nic(bp);
2742 bp->dev_stopped = 1;
2743 }
2744@@ -642,8 +641,6 @@ static void bnxt_dev_close_op(struct rte_eth_dev *eth_dev)
2745 if (bp->dev_stopped == 0)
2746 bnxt_dev_stop_op(eth_dev);
2747
2748- bnxt_free_tx_mbufs(bp);
2749- bnxt_free_rx_mbufs(bp);
2750 bnxt_free_mem(bp);
2751 if (eth_dev->data->mac_addrs != NULL) {
2752 rte_free(eth_dev->data->mac_addrs);
2753@@ -3057,6 +3054,7 @@ skip_init:
2754 goto error_free_int;
2755
2756 bnxt_enable_int(bp);
2757+ bnxt_init_nic(bp);
2758
2759 return 0;
2760
2761diff --git a/drivers/net/bnxt/bnxt_filter.c b/drivers/net/bnxt/bnxt_filter.c
2762index 32af606..8d3ddf1 100644
2763--- a/drivers/net/bnxt/bnxt_filter.c
2764+++ b/drivers/net/bnxt/bnxt_filter.c
2765@@ -159,6 +159,14 @@ void bnxt_free_filter_mem(struct bnxt *bp)
2766
2767 rte_free(bp->filter_info);
2768 bp->filter_info = NULL;
2769+
2770+ for (i = 0; i < bp->pf.max_vfs; i++) {
2771+ STAILQ_FOREACH(filter, &bp->pf.vf_info[i].filter, next) {
2772+ rte_free(filter);
2773+ STAILQ_REMOVE(&bp->pf.vf_info[i].filter, filter,
2774+ bnxt_filter_info, next);
2775+ }
2776+ }
2777 }
2778
2779 int bnxt_alloc_filter_mem(struct bnxt *bp)
2780diff --git a/drivers/net/bnxt/bnxt_hwrm.c b/drivers/net/bnxt/bnxt_hwrm.c
2781index ce214d7..22f092f 100644
2782--- a/drivers/net/bnxt/bnxt_hwrm.c
2783+++ b/drivers/net/bnxt/bnxt_hwrm.c
2784@@ -252,6 +252,9 @@ int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp,
2785 struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
2786 uint32_t mask = 0;
2787
2788+ if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
2789+ return rc;
2790+
2791 HWRM_PREP(req, CFA_L2_SET_RX_MASK);
2792 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
2793
2794@@ -1100,7 +1103,8 @@ int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2795 HWRM_PREP(req, VNIC_ALLOC);
2796
2797 if (vnic->func_default)
2798- req.flags = HWRM_VNIC_ALLOC_INPUT_FLAGS_DEFAULT;
2799+ req.flags =
2800+ rte_cpu_to_le_32(HWRM_VNIC_ALLOC_INPUT_FLAGS_DEFAULT);
2801 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2802
2803 HWRM_CHECK_RESULT();
2804@@ -1121,7 +1125,7 @@ static int bnxt_hwrm_vnic_plcmodes_qcfg(struct bnxt *bp,
2805
2806 HWRM_PREP(req, VNIC_PLCMODES_QCFG);
2807
2808- req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
2809+ req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
2810
2811 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2812
2813@@ -1149,7 +1153,7 @@ static int bnxt_hwrm_vnic_plcmodes_cfg(struct bnxt *bp,
2814
2815 HWRM_PREP(req, VNIC_PLCMODES_CFG);
2816
2817- req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
2818+ req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
2819 req.flags = rte_cpu_to_le_32(pmode->flags);
2820 req.jumbo_thresh = rte_cpu_to_le_16(pmode->jumbo_thresh);
2821 req.hds_offset = rte_cpu_to_le_16(pmode->hds_offset);
2822@@ -1393,7 +1397,7 @@ int bnxt_hwrm_vnic_plcmode_cfg(struct bnxt *bp,
2823 size -= RTE_PKTMBUF_HEADROOM;
2824
2825 req.jumbo_thresh = rte_cpu_to_le_16(size);
2826- req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
2827+ req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
2828
2829 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2830
2831@@ -1424,12 +1428,12 @@ int bnxt_hwrm_vnic_tpa_cfg(struct bnxt *bp,
2832 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO |
2833 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_ECN |
2834 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_SAME_GRE_SEQ);
2835- req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
2836 req.max_agg_segs = rte_cpu_to_le_16(5);
2837 req.max_aggs =
2838 rte_cpu_to_le_16(HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_MAX);
2839 req.min_agg_len = rte_cpu_to_le_32(512);
2840 }
2841+ req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
2842
2843 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2844
2845diff --git a/drivers/net/bnxt/bnxt_nvm_defs.h b/drivers/net/bnxt/bnxt_nvm_defs.h
2846index c5ccc9b..6ce94bf 100644
2847--- a/drivers/net/bnxt/bnxt_nvm_defs.h
2848+++ b/drivers/net/bnxt/bnxt_nvm_defs.h
2849@@ -1,13 +1,37 @@
2850-/* Broadcom NetXtreme-C/E network driver.
2851+/*-
2852+ * BSD LICENSE
2853 *
2854- * Copyright (c) 2014-2016 Broadcom Corporation
2855- * Copyright (c) 2016-2017 Broadcom Limited
2856+ * Copyright(c) Broadcom Limited.
2857+ * All rights reserved.
2858 *
2859- * This program is free software; you can redistribute it and/or modify
2860- * it under the terms of the GNU General Public License as published by
2861- * the Free Software Foundation.
2862+ * Redistribution and use in source and binary forms, with or without
2863+ * modification, are permitted provided that the following conditions
2864+ * are met:
2865+ *
2866+ * * Redistributions of source code must retain the above copyright
2867+ * notice, this list of conditions and the following disclaimer.
2868+ * * Redistributions in binary form must reproduce the above copyright
2869+ * notice, this list of conditions and the following disclaimer in
2870+ * the documentation and/or other materials provided with the
2871+ * distribution.
2872+ * * Neither the name of Broadcom Corporation nor the names of its
2873+ * contributors may be used to endorse or promote products derived
2874+ * from this software without specific prior written permission.
2875+ *
2876+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
2877+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
2878+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
2879+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
2880+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
2881+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
2882+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
2883+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
2884+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
2885+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
2886+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
2887 */
2888
2889+
2890 #ifndef _BNXT_NVM_DEFS_H_
2891 #define _BNXT_NVM_DEFS_H_
2892
2893diff --git a/drivers/net/bnxt/bnxt_rxq.c b/drivers/net/bnxt/bnxt_rxq.c
2894index b4e9f38..5088e9d 100644
2895--- a/drivers/net/bnxt/bnxt_rxq.c
2896+++ b/drivers/net/bnxt/bnxt_rxq.c
2897@@ -237,7 +237,8 @@ static void bnxt_rx_queue_release_mbufs(struct bnxt_rx_queue *rxq)
2898 if (rxq) {
2899 sw_ring = rxq->rx_ring->rx_buf_ring;
2900 if (sw_ring) {
2901- for (i = 0; i < rxq->nb_rx_desc; i++) {
2902+ for (i = 0;
2903+ i < rxq->rx_ring->rx_ring_struct->ring_size; i++) {
2904 if (sw_ring[i].mbuf) {
2905 rte_pktmbuf_free_seg(sw_ring[i].mbuf);
2906 sw_ring[i].mbuf = NULL;
2907@@ -247,7 +248,8 @@ static void bnxt_rx_queue_release_mbufs(struct bnxt_rx_queue *rxq)
2908 /* Free up mbufs in Agg ring */
2909 sw_ring = rxq->rx_ring->ag_buf_ring;
2910 if (sw_ring) {
2911- for (i = 0; i < rxq->nb_rx_desc; i++) {
2912+ for (i = 0;
2913+ i < rxq->rx_ring->ag_ring_struct->ring_size; i++) {
2914 if (sw_ring[i].mbuf) {
2915 rte_pktmbuf_free_seg(sw_ring[i].mbuf);
2916 sw_ring[i].mbuf = NULL;
2917diff --git a/drivers/net/bnxt/bnxt_rxr.c b/drivers/net/bnxt/bnxt_rxr.c
2918index 5128335..c5c5484 100644
2919--- a/drivers/net/bnxt/bnxt_rxr.c
2920+++ b/drivers/net/bnxt/bnxt_rxr.c
2921@@ -469,11 +469,15 @@ static int bnxt_rx_pkt(struct rte_mbuf **rx_pkt,
2922
2923 if (likely(RX_CMP_IP_CS_OK(rxcmp1)))
2924 mbuf->ol_flags |= PKT_RX_IP_CKSUM_GOOD;
2925+ else if (likely(RX_CMP_IP_CS_UNKNOWN(rxcmp1)))
2926+ mbuf->ol_flags |= PKT_RX_IP_CKSUM_UNKNOWN;
2927 else
2928 mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD;
2929
2930 if (likely(RX_CMP_L4_CS_OK(rxcmp1)))
2931 mbuf->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
2932+ else if (likely(RX_CMP_L4_CS_UNKNOWN(rxcmp1)))
2933+ mbuf->ol_flags |= PKT_RX_L4_CKSUM_UNKNOWN;
2934 else
2935 mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD;
2936
2937@@ -730,7 +734,7 @@ int bnxt_init_one_rx_ring(struct bnxt_rx_queue *rxq)
2938 if (rxq->rx_buf_use_size <= size)
2939 size = rxq->rx_buf_use_size;
2940
2941- type = RX_PROD_PKT_BD_TYPE_RX_PROD_PKT;
2942+ type = RX_PROD_PKT_BD_TYPE_RX_PROD_PKT | RX_PROD_PKT_BD_FLAGS_EOP_PAD;
2943
2944 rxr = rxq->rx_ring;
2945 ring = rxr->rx_ring_struct;
2946diff --git a/drivers/net/bnxt/bnxt_rxr.h b/drivers/net/bnxt/bnxt_rxr.h
2947index a94373d..4daa7e2 100644
2948--- a/drivers/net/bnxt/bnxt_rxr.h
2949+++ b/drivers/net/bnxt/bnxt_rxr.h
2950@@ -52,22 +52,36 @@
2951 #define BNXT_TPA_OUTER_L3_OFF(hdr_info) \
2952 ((hdr_info) & 0x1ff)
2953
2954-#define RX_CMP_L4_CS_BITS rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS2_L4_CS_CALC)
2955+#define RX_CMP_L4_CS_BITS \
2956+ rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS2_L4_CS_CALC | \
2957+ RX_PKT_CMPL_FLAGS2_T_L4_CS_CALC)
2958
2959-#define RX_CMP_L4_CS_ERR_BITS rte_cpu_to_le_32(RX_PKT_CMPL_ERRORS_L4_CS_ERROR)
2960+#define RX_CMP_L4_CS_ERR_BITS \
2961+ rte_cpu_to_le_32(RX_PKT_CMPL_ERRORS_L4_CS_ERROR | \
2962+ RX_PKT_CMPL_ERRORS_T_L4_CS_ERROR)
2963
2964 #define RX_CMP_L4_CS_OK(rxcmp1) \
2965 (((rxcmp1)->flags2 & RX_CMP_L4_CS_BITS) && \
2966 !((rxcmp1)->errors_v2 & RX_CMP_L4_CS_ERR_BITS))
2967
2968-#define RX_CMP_IP_CS_ERR_BITS rte_cpu_to_le_32(RX_PKT_CMPL_ERRORS_IP_CS_ERROR)
2969+#define RX_CMP_L4_CS_UNKNOWN(rxcmp1) \
2970+ !((rxcmp1)->flags2 & RX_CMP_L4_CS_BITS)
2971
2972-#define RX_CMP_IP_CS_BITS rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS2_IP_CS_CALC)
2973+#define RX_CMP_IP_CS_ERR_BITS \
2974+ rte_cpu_to_le_32(RX_PKT_CMPL_ERRORS_IP_CS_ERROR | \
2975+ RX_PKT_CMPL_ERRORS_T_IP_CS_ERROR)
2976+
2977+#define RX_CMP_IP_CS_BITS \
2978+ rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS2_IP_CS_CALC | \
2979+ RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC)
2980
2981 #define RX_CMP_IP_CS_OK(rxcmp1) \
2982 (((rxcmp1)->flags2 & RX_CMP_IP_CS_BITS) && \
2983 !((rxcmp1)->errors_v2 & RX_CMP_IP_CS_ERR_BITS))
2984
2985+#define RX_CMP_IP_CS_UNKNOWN(rxcmp1) \
2986+ !((rxcmp1)->flags2 & RX_CMP_IP_CS_BITS)
2987+
2988 enum pkt_hash_types {
2989 PKT_HASH_TYPE_NONE, /* Undefined type */
2990 PKT_HASH_TYPE_L2, /* Input: src_MAC, dest_MAC */
2991diff --git a/drivers/net/bonding/rte_eth_bond_api.c b/drivers/net/bonding/rte_eth_bond_api.c
2992index b834035..8fd90ae 100644
2993--- a/drivers/net/bonding/rte_eth_bond_api.c
2994+++ b/drivers/net/bonding/rte_eth_bond_api.c
2995@@ -240,9 +240,12 @@ slave_vlan_filter_set(uint16_t bonded_port_id, uint16_t slave_port_id)
2996 for (i = 0, mask = 1;
2997 i < RTE_BITMAP_SLAB_BIT_SIZE;
2998 i ++, mask <<= 1) {
2999- if (unlikely(slab & mask))
3000+ if (unlikely(slab & mask)) {
3001+ uint16_t vlan_id = pos + i;
3002+
3003 res = rte_eth_dev_vlan_filter(slave_port_id,
3004- (uint16_t)pos, 1);
3005+ vlan_id, 1);
3006+ }
3007 }
3008 found = rte_bitmap_scan(internals->vlan_filter_bmp,
3009 &pos, &slab);
3010diff --git a/drivers/net/bonding/rte_eth_bond_args.c b/drivers/net/bonding/rte_eth_bond_args.c
3011index e816da3..8c94cc6 100644
3012--- a/drivers/net/bonding/rte_eth_bond_args.c
3013+++ b/drivers/net/bonding/rte_eth_bond_args.c
3014@@ -273,7 +273,7 @@ bond_ethdev_parse_primary_slave_port_id_kvarg(const char *key __rte_unused,
3015 if (primary_slave_port_id < 0)
3016 return -1;
3017
3018- *(uint8_t *)extra_args = (uint8_t)primary_slave_port_id;
3019+ *(uint16_t *)extra_args = (uint16_t)primary_slave_port_id;
3020
3021 return 0;
3022 }
3023diff --git a/drivers/net/bonding/rte_eth_bond_pmd.c b/drivers/net/bonding/rte_eth_bond_pmd.c
3024index 1d3fbeb..e19a4a3 100644
3025--- a/drivers/net/bonding/rte_eth_bond_pmd.c
3026+++ b/drivers/net/bonding/rte_eth_bond_pmd.c
3027@@ -1912,7 +1912,7 @@ bond_ethdev_start(struct rte_eth_dev *eth_dev)
3028
3029 if (internals->slave_count == 0) {
3030 RTE_BOND_LOG(ERR, "Cannot start port since there are no slave devices");
3031- return -1;
3032+ goto out_err;
3033 }
3034
3035 if (internals->user_defined_mac == 0) {
3036@@ -1923,18 +1923,18 @@ bond_ethdev_start(struct rte_eth_dev *eth_dev)
3037 new_mac_addr = &internals->slaves[i].persisted_mac_addr;
3038
3039 if (new_mac_addr == NULL)
3040- return -1;
3041+ goto out_err;
3042
3043 if (mac_address_set(eth_dev, new_mac_addr) != 0) {
3044 RTE_BOND_LOG(ERR, "bonded port (%d) failed to update MAC address",
3045 eth_dev->data->port_id);
3046- return -1;
3047+ goto out_err;
3048 }
3049 }
3050
3051 /* Update all slave devices MACs*/
3052 if (mac_address_slaves_update(eth_dev) != 0)
3053- return -1;
3054+ goto out_err;
3055
3056 /* If bonded device is configure in promiscuous mode then re-apply config */
3057 if (internals->promiscuous_en)
3058@@ -1959,7 +1959,7 @@ bond_ethdev_start(struct rte_eth_dev *eth_dev)
3059 "bonded port (%d) failed to reconfigure slave device (%d)",
3060 eth_dev->data->port_id,
3061 internals->slaves[i].port_id);
3062- return -1;
3063+ goto out_err;
3064 }
3065 /* We will need to poll for link status if any slave doesn't
3066 * support interrupts
3067@@ -1967,6 +1967,7 @@ bond_ethdev_start(struct rte_eth_dev *eth_dev)
3068 if (internals->slaves[i].link_status_poll_enabled)
3069 internals->link_status_polling_enabled = 1;
3070 }
3071+
3072 /* start polling if needed */
3073 if (internals->link_status_polling_enabled) {
3074 rte_eal_alarm_set(
3075@@ -1986,6 +1987,10 @@ bond_ethdev_start(struct rte_eth_dev *eth_dev)
3076 bond_tlb_enable(internals);
3077
3078 return 0;
3079+
3080+out_err:
3081+ eth_dev->data->dev_started = 0;
3082+ return -1;
3083 }
3084
3085 static void
3086@@ -2519,14 +2524,21 @@ bond_ethdev_lsc_event_callback(uint16_t port_id, enum rte_eth_event_type type,
3087 if (!valid_slave)
3088 return rc;
3089
3090+ /* Synchronize lsc callback parallel calls either by real link event
3091+ * from the slaves PMDs or by the bonding PMD itself.
3092+ */
3093+ rte_spinlock_lock(&internals->lsc_lock);
3094+
3095 /* Search for port in active port list */
3096 active_pos = find_slave_by_id(internals->active_slaves,
3097 internals->active_slave_count, port_id);
3098
3099 rte_eth_link_get_nowait(port_id, &link);
3100 if (link.link_status) {
3101- if (active_pos < internals->active_slave_count)
3102+ if (active_pos < internals->active_slave_count) {
3103+ rte_spinlock_unlock(&internals->lsc_lock);
3104 return rc;
3105+ }
3106
3107 /* if no active slave ports then set this port to be primary port */
3108 if (internals->active_slave_count < 1) {
3109@@ -2545,8 +2557,10 @@ bond_ethdev_lsc_event_callback(uint16_t port_id, enum rte_eth_event_type type,
3110 internals->primary_port == port_id)
3111 bond_ethdev_primary_set(internals, port_id);
3112 } else {
3113- if (active_pos == internals->active_slave_count)
3114+ if (active_pos == internals->active_slave_count) {
3115+ rte_spinlock_unlock(&internals->lsc_lock);
3116 return rc;
3117+ }
3118
3119 /* Remove from active slave list */
3120 deactivate_slave(bonded_eth_dev, port_id);
3121@@ -2599,6 +2613,9 @@ bond_ethdev_lsc_event_callback(uint16_t port_id, enum rte_eth_event_type type,
3122 NULL, NULL);
3123 }
3124 }
3125+
3126+ rte_spinlock_unlock(&internals->lsc_lock);
3127+
3128 return 0;
3129 }
3130
3131@@ -2766,6 +2783,7 @@ bond_alloc(struct rte_vdev_device *dev, uint8_t mode)
3132 eth_dev->data->dev_flags = RTE_ETH_DEV_INTR_LSC;
3133
3134 rte_spinlock_init(&internals->lock);
3135+ rte_spinlock_init(&internals->lsc_lock);
3136
3137 internals->port_id = eth_dev->data->port_id;
3138 internals->mode = BONDING_MODE_INVALID;
3139@@ -2967,6 +2985,10 @@ bond_remove(struct rte_vdev_device *dev)
3140 eth_dev->tx_pkt_burst = NULL;
3141
3142 internals = eth_dev->data->dev_private;
3143+ /* Try to release mempool used in mode6. If the bond
3144+ * device is not mode6, free the NULL is not problem.
3145+ */
3146+ rte_mempool_free(internals->mode6.mempool);
3147 rte_bitmap_free(internals->vlan_filter_bmp);
3148 rte_free(internals->vlan_filter_bmpmem);
3149 rte_free(eth_dev->data->dev_private);
3150diff --git a/drivers/net/bonding/rte_eth_bond_private.h b/drivers/net/bonding/rte_eth_bond_private.h
3151index a5cfa6a..dae8aab 100644
3152--- a/drivers/net/bonding/rte_eth_bond_private.h
3153+++ b/drivers/net/bonding/rte_eth_bond_private.h
3154@@ -118,6 +118,7 @@ struct bond_dev_private {
3155 uint8_t mode; /**< Link Bonding Mode */
3156
3157 rte_spinlock_t lock;
3158+ rte_spinlock_t lsc_lock;
3159
3160 uint16_t primary_port; /**< Primary Slave Port */
3161 uint16_t current_primary_port; /**< Primary Slave Port */
3162diff --git a/drivers/net/bonding/rte_pmd_bond_version.map b/drivers/net/bonding/rte_pmd_bond_version.map
3163index ec3374b..03ddb44 100644
3164--- a/drivers/net/bonding/rte_pmd_bond_version.map
3165+++ b/drivers/net/bonding/rte_pmd_bond_version.map
3166@@ -1,6 +1,7 @@
3167 DPDK_2.0 {
3168 global:
3169
3170+ rte_eth_bond_8023ad_slave_info;
3171 rte_eth_bond_active_slaves_get;
3172 rte_eth_bond_create;
3173 rte_eth_bond_link_monitoring_set;
3174diff --git a/drivers/net/dpaa/dpaa_ethdev.c b/drivers/net/dpaa/dpaa_ethdev.c
3175index e4375c3..02056bc 100644
3176--- a/drivers/net/dpaa/dpaa_ethdev.c
3177+++ b/drivers/net/dpaa/dpaa_ethdev.c
3178@@ -324,10 +324,13 @@ dpaa_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
3179 static int
3180 dpaa_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
3181 struct rte_eth_xstat_name *xstats_names,
3182- __rte_unused unsigned int limit)
3183+ unsigned int limit)
3184 {
3185 unsigned int i, stat_cnt = RTE_DIM(dpaa_xstats_strings);
3186
3187+ if (limit < stat_cnt)
3188+ return stat_cnt;
3189+
3190 if (xstats_names != NULL)
3191 for (i = 0; i < stat_cnt; i++)
3192 snprintf(xstats_names[i].name,
3193@@ -355,7 +358,7 @@ dpaa_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
3194 return 0;
3195
3196 fman_if_stats_get_all(dpaa_intf->fif, values_copy,
3197- sizeof(struct dpaa_if_stats));
3198+ sizeof(struct dpaa_if_stats) / 8);
3199
3200 for (i = 0; i < stat_cnt; i++)
3201 values[i] =
3202diff --git a/drivers/net/dpaa2/dpaa2_ethdev.c b/drivers/net/dpaa2/dpaa2_ethdev.c
3203index 202f84f..0711baf 100644
3204--- a/drivers/net/dpaa2/dpaa2_ethdev.c
3205+++ b/drivers/net/dpaa2/dpaa2_ethdev.c
3206@@ -1144,12 +1144,12 @@ dpaa2_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
3207 union dpni_statistics value[3] = {};
3208 unsigned int i = 0, num = RTE_DIM(dpaa2_xstats_strings);
3209
3210- if (xstats == NULL)
3211- return 0;
3212-
3213 if (n < num)
3214 return num;
3215
3216+ if (xstats == NULL)
3217+ return 0;
3218+
3219 /* Get Counters from page_0*/
3220 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
3221 0, 0, &value[0]);
3222@@ -1182,10 +1182,13 @@ err:
3223 static int
3224 dpaa2_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
3225 struct rte_eth_xstat_name *xstats_names,
3226- __rte_unused unsigned int limit)
3227+ unsigned int limit)
3228 {
3229 unsigned int i, stat_cnt = RTE_DIM(dpaa2_xstats_strings);
3230
3231+ if (limit < stat_cnt)
3232+ return stat_cnt;
3233+
3234 if (xstats_names != NULL)
3235 for (i = 0; i < stat_cnt; i++)
3236 snprintf(xstats_names[i].name,
3237diff --git a/drivers/net/enic/base/vnic_dev.c b/drivers/net/enic/base/vnic_dev.c
3238index 9b25d21..9e54ace 100644
3239--- a/drivers/net/enic/base/vnic_dev.c
3240+++ b/drivers/net/enic/base/vnic_dev.c
3241@@ -627,17 +627,9 @@ int vnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats)
3242 {
3243 u64 a0, a1;
3244 int wait = 1000;
3245- static u32 instance;
3246- char name[NAME_MAX];
3247
3248- if (!vdev->stats) {
3249- snprintf((char *)name, sizeof(name),
3250- "vnic_stats-%u", instance++);
3251- vdev->stats = vdev->alloc_consistent(vdev->priv,
3252- sizeof(struct vnic_stats), &vdev->stats_pa, (u8 *)name);
3253- if (!vdev->stats)
3254- return -ENOMEM;
3255- }
3256+ if (!vdev->stats)
3257+ return -ENOMEM;
3258
3259 *stats = vdev->stats;
3260 a0 = vdev->stats_pa;
3261@@ -962,6 +954,18 @@ u32 vnic_dev_get_intr_coal_timer_max(struct vnic_dev *vdev)
3262 return vdev->intr_coal_timer_info.max_usec;
3263 }
3264
3265+int vnic_dev_alloc_stats_mem(struct vnic_dev *vdev)
3266+{
3267+ char name[NAME_MAX];
3268+ static u32 instance;
3269+
3270+ snprintf((char *)name, sizeof(name), "vnic_stats-%u", instance++);
3271+ vdev->stats = vdev->alloc_consistent(vdev->priv,
3272+ sizeof(struct vnic_stats),
3273+ &vdev->stats_pa, (u8 *)name);
3274+ return vdev->stats == NULL ? -ENOMEM : 0;
3275+}
3276+
3277 void vnic_dev_unregister(struct vnic_dev *vdev)
3278 {
3279 if (vdev) {
3280diff --git a/drivers/net/enic/base/vnic_dev.h b/drivers/net/enic/base/vnic_dev.h
3281index c9ca25b..94964e4 100644
3282--- a/drivers/net/enic/base/vnic_dev.h
3283+++ b/drivers/net/enic/base/vnic_dev.h
3284@@ -196,6 +196,7 @@ struct vnic_dev *vnic_dev_register(struct vnic_dev *vdev,
3285 void *priv, struct rte_pci_device *pdev, struct vnic_dev_bar *bar,
3286 unsigned int num_bars);
3287 struct rte_pci_device *vnic_dev_get_pdev(struct vnic_dev *vdev);
3288+int vnic_dev_alloc_stats_mem(struct vnic_dev *vdev);
3289 int vnic_dev_cmd_init(struct vnic_dev *vdev, int fallback);
3290 int vnic_dev_get_size(void);
3291 int vnic_dev_int13(struct vnic_dev *vdev, u64 arg, u32 op);
3292diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c
3293index 1694aed..6356c10 100644
3294--- a/drivers/net/enic/enic_main.c
3295+++ b/drivers/net/enic/enic_main.c
3296@@ -1252,6 +1252,8 @@ int enic_set_mtu(struct enic *enic, uint16_t new_mtu)
3297 /* free and reallocate RQs with the new MTU */
3298 for (rq_idx = 0; rq_idx < enic->rq_count; rq_idx++) {
3299 rq = &enic->rq[enic_rte_rq_idx_to_sop_idx(rq_idx)];
3300+ if (!rq->in_use)
3301+ continue;
3302
3303 enic_free_rq(rq);
3304 rc = enic_alloc_rq(enic, rq_idx, rq->socket_id, rq->mp,
3305@@ -1383,6 +1385,15 @@ int enic_probe(struct enic *enic)
3306 enic_alloc_consistent,
3307 enic_free_consistent);
3308
3309+ /*
3310+ * Allocate the consistent memory for stats upfront so both primary and
3311+ * secondary processes can dump stats.
3312+ */
3313+ err = vnic_dev_alloc_stats_mem(enic->vdev);
3314+ if (err) {
3315+ dev_err(enic, "Failed to allocate cmd memory, aborting\n");
3316+ goto err_out_unregister;
3317+ }
3318 /* Issue device open to get device in known state */
3319 err = enic_dev_open(enic);
3320 if (err) {
3321diff --git a/drivers/net/failsafe/failsafe.c b/drivers/net/failsafe/failsafe.c
3322index 8336510..41b4cb0 100644
3323--- a/drivers/net/failsafe/failsafe.c
3324+++ b/drivers/net/failsafe/failsafe.c
3325@@ -210,7 +210,7 @@ fs_eth_dev_create(struct rte_vdev_device *vdev)
3326 mac);
3327 if (ret) {
3328 ERROR("Failed to set default MAC address");
3329- goto free_args;
3330+ goto cancel_alarm;
3331 }
3332 }
3333 } else {
3334@@ -240,6 +240,8 @@ fs_eth_dev_create(struct rte_vdev_device *vdev)
3335 mac->addr_bytes[4], mac->addr_bytes[5]);
3336 dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC;
3337 return 0;
3338+cancel_alarm:
3339+ failsafe_hotplug_alarm_cancel(dev);
3340 free_args:
3341 failsafe_args_free(dev);
3342 free_subs:
3343diff --git a/drivers/net/failsafe/failsafe_ether.c b/drivers/net/failsafe/failsafe_ether.c
3344index 21392e5..5b5ac42 100644
3345--- a/drivers/net/failsafe/failsafe_ether.c
3346+++ b/drivers/net/failsafe/failsafe_ether.c
3347@@ -287,6 +287,7 @@ fs_dev_remove(struct sub_device *sdev)
3348 sdev->state = DEV_ACTIVE;
3349 /* fallthrough */
3350 case DEV_ACTIVE:
3351+ failsafe_eth_dev_unregister_callbacks(sdev);
3352 rte_eth_dev_close(PORT_ID(sdev));
3353 sdev->state = DEV_PROBED;
3354 /* fallthrough */
3355@@ -347,6 +348,35 @@ fs_rxtx_clean(struct sub_device *sdev)
3356 }
3357
3358 void
3359+failsafe_eth_dev_unregister_callbacks(struct sub_device *sdev)
3360+{
3361+ int ret;
3362+
3363+ if (sdev == NULL)
3364+ return;
3365+ if (sdev->rmv_callback) {
3366+ ret = rte_eth_dev_callback_unregister(PORT_ID(sdev),
3367+ RTE_ETH_EVENT_INTR_RMV,
3368+ failsafe_eth_rmv_event_callback,
3369+ sdev);
3370+ if (ret)
3371+ WARN("Failed to unregister RMV callback for sub_device"
3372+ " %d", SUB_ID(sdev));
3373+ sdev->rmv_callback = 0;
3374+ }
3375+ if (sdev->lsc_callback) {
3376+ ret = rte_eth_dev_callback_unregister(PORT_ID(sdev),
3377+ RTE_ETH_EVENT_INTR_LSC,
3378+ failsafe_eth_lsc_event_callback,
3379+ sdev);
3380+ if (ret)
3381+ WARN("Failed to unregister LSC callback for sub_device"
3382+ " %d", SUB_ID(sdev));
3383+ sdev->lsc_callback = 0;
3384+ }
3385+}
3386+
3387+void
3388 failsafe_dev_remove(struct rte_eth_dev *dev)
3389 {
3390 struct sub_device *sdev;
3391diff --git a/drivers/net/failsafe/failsafe_ops.c b/drivers/net/failsafe/failsafe_ops.c
3392index e16a590..9a5d873 100644
3393--- a/drivers/net/failsafe/failsafe_ops.c
3394+++ b/drivers/net/failsafe/failsafe_ops.c
3395@@ -124,7 +124,7 @@ fs_dev_configure(struct rte_eth_dev *dev)
3396 ERROR("Could not configure sub_device %d", i);
3397 return ret;
3398 }
3399- if (rmv_interrupt) {
3400+ if (rmv_interrupt && sdev->rmv_callback == 0) {
3401 ret = rte_eth_dev_callback_register(PORT_ID(sdev),
3402 RTE_ETH_EVENT_INTR_RMV,
3403 failsafe_eth_rmv_event_callback,
3404@@ -132,9 +132,11 @@ fs_dev_configure(struct rte_eth_dev *dev)
3405 if (ret)
3406 WARN("Failed to register RMV callback for sub_device %d",
3407 SUB_ID(sdev));
3408+ else
3409+ sdev->rmv_callback = 1;
3410 }
3411 dev->data->dev_conf.intr_conf.rmv = 0;
3412- if (lsc_interrupt) {
3413+ if (lsc_interrupt && sdev->lsc_callback == 0) {
3414 ret = rte_eth_dev_callback_register(PORT_ID(sdev),
3415 RTE_ETH_EVENT_INTR_LSC,
3416 failsafe_eth_lsc_event_callback,
3417@@ -142,6 +144,8 @@ fs_dev_configure(struct rte_eth_dev *dev)
3418 if (ret)
3419 WARN("Failed to register LSC callback for sub_device %d",
3420 SUB_ID(sdev));
3421+ else
3422+ sdev->lsc_callback = 1;
3423 }
3424 dev->data->dev_conf.intr_conf.lsc = lsc_enabled;
3425 sdev->state = DEV_ACTIVE;
3426@@ -237,6 +241,7 @@ fs_dev_close(struct rte_eth_dev *dev)
3427 PRIV(dev)->state = DEV_ACTIVE - 1;
3428 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
3429 DEBUG("Closing sub_device %d", i);
3430+ failsafe_eth_dev_unregister_callbacks(sdev);
3431 rte_eth_dev_close(PORT_ID(sdev));
3432 sdev->state = DEV_ACTIVE - 1;
3433 }
3434diff --git a/drivers/net/failsafe/failsafe_private.h b/drivers/net/failsafe/failsafe_private.h
3435index d81cc3c..40eabb7 100644
3436--- a/drivers/net/failsafe/failsafe_private.h
3437+++ b/drivers/net/failsafe/failsafe_private.h
3438@@ -117,6 +117,10 @@ struct sub_device {
3439 volatile unsigned int remove:1;
3440 /* flow isolation state */
3441 int flow_isolated:1;
3442+ /* RMV callback registration state */
3443+ unsigned int rmv_callback:1;
3444+ /* LSC callback registration state */
3445+ unsigned int lsc_callback:1;
3446 };
3447
3448 struct fs_priv {
3449@@ -187,6 +191,7 @@ int failsafe_eal_uninit(struct rte_eth_dev *dev);
3450 /* ETH_DEV */
3451
3452 int failsafe_eth_dev_state_sync(struct rte_eth_dev *dev);
3453+void failsafe_eth_dev_unregister_callbacks(struct sub_device *sdev);
3454 void failsafe_dev_remove(struct rte_eth_dev *dev);
3455 void failsafe_stats_increment(struct rte_eth_stats *to,
3456 struct rte_eth_stats *from);
3457diff --git a/drivers/net/i40e/base/i40e_register.h b/drivers/net/i40e/base/i40e_register.h
3458index a482ab9..df66e76 100644
3459--- a/drivers/net/i40e/base/i40e_register.h
3460+++ b/drivers/net/i40e/base/i40e_register.h
3461@@ -90,7 +90,7 @@ POSSIBILITY OF SUCH DAMAGE.
3462 #define I40E_PF_ARQLEN_ARQCRIT_SHIFT 30
3463 #define I40E_PF_ARQLEN_ARQCRIT_MASK I40E_MASK(0x1, I40E_PF_ARQLEN_ARQCRIT_SHIFT)
3464 #define I40E_PF_ARQLEN_ARQENABLE_SHIFT 31
3465-#define I40E_PF_ARQLEN_ARQENABLE_MASK I40E_MASK(0x1, I40E_PF_ARQLEN_ARQENABLE_SHIFT)
3466+#define I40E_PF_ARQLEN_ARQENABLE_MASK I40E_MASK(0x1u, I40E_PF_ARQLEN_ARQENABLE_SHIFT)
3467 #define I40E_PF_ARQT 0x00080480 /* Reset: EMPR */
3468 #define I40E_PF_ARQT_ARQT_SHIFT 0
3469 #define I40E_PF_ARQT_ARQT_MASK I40E_MASK(0x3FF, I40E_PF_ARQT_ARQT_SHIFT)
3470@@ -113,7 +113,7 @@ POSSIBILITY OF SUCH DAMAGE.
3471 #define I40E_PF_ATQLEN_ATQCRIT_SHIFT 30
3472 #define I40E_PF_ATQLEN_ATQCRIT_MASK I40E_MASK(0x1, I40E_PF_ATQLEN_ATQCRIT_SHIFT)
3473 #define I40E_PF_ATQLEN_ATQENABLE_SHIFT 31
3474-#define I40E_PF_ATQLEN_ATQENABLE_MASK I40E_MASK(0x1, I40E_PF_ATQLEN_ATQENABLE_SHIFT)
3475+#define I40E_PF_ATQLEN_ATQENABLE_MASK I40E_MASK(0x1u, I40E_PF_ATQLEN_ATQENABLE_SHIFT)
3476 #define I40E_PF_ATQT 0x00080400 /* Reset: EMPR */
3477 #define I40E_PF_ATQT_ATQT_SHIFT 0
3478 #define I40E_PF_ATQT_ATQT_MASK I40E_MASK(0x3FF, I40E_PF_ATQT_ATQT_SHIFT)
3479@@ -140,7 +140,7 @@ POSSIBILITY OF SUCH DAMAGE.
3480 #define I40E_VF_ARQLEN_ARQCRIT_SHIFT 30
3481 #define I40E_VF_ARQLEN_ARQCRIT_MASK I40E_MASK(0x1, I40E_VF_ARQLEN_ARQCRIT_SHIFT)
3482 #define I40E_VF_ARQLEN_ARQENABLE_SHIFT 31
3483-#define I40E_VF_ARQLEN_ARQENABLE_MASK I40E_MASK(0x1, I40E_VF_ARQLEN_ARQENABLE_SHIFT)
3484+#define I40E_VF_ARQLEN_ARQENABLE_MASK I40E_MASK(0x1u, I40E_VF_ARQLEN_ARQENABLE_SHIFT)
3485 #define I40E_VF_ARQT(_VF) (0x00082C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
3486 #define I40E_VF_ARQT_MAX_INDEX 127
3487 #define I40E_VF_ARQT_ARQT_SHIFT 0
3488@@ -168,7 +168,7 @@ POSSIBILITY OF SUCH DAMAGE.
3489 #define I40E_VF_ATQLEN_ATQCRIT_SHIFT 30
3490 #define I40E_VF_ATQLEN_ATQCRIT_MASK I40E_MASK(0x1, I40E_VF_ATQLEN_ATQCRIT_SHIFT)
3491 #define I40E_VF_ATQLEN_ATQENABLE_SHIFT 31
3492-#define I40E_VF_ATQLEN_ATQENABLE_MASK I40E_MASK(0x1, I40E_VF_ATQLEN_ATQENABLE_SHIFT)
3493+#define I40E_VF_ATQLEN_ATQENABLE_MASK I40E_MASK(0x1u, I40E_VF_ATQLEN_ATQENABLE_SHIFT)
3494 #define I40E_VF_ATQT(_VF) (0x00082800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
3495 #define I40E_VF_ATQT_MAX_INDEX 127
3496 #define I40E_VF_ATQT_ATQT_SHIFT 0
3497@@ -291,7 +291,7 @@ POSSIBILITY OF SUCH DAMAGE.
3498 #define I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT 30
3499 #define I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT)
3500 #define I40E_PRTDCB_RETSTCC_ETSTC_SHIFT 31
3501-#define I40E_PRTDCB_RETSTCC_ETSTC_MASK I40E_MASK(0x1, I40E_PRTDCB_RETSTCC_ETSTC_SHIFT)
3502+#define I40E_PRTDCB_RETSTCC_ETSTC_MASK I40E_MASK(0x1u, I40E_PRTDCB_RETSTCC_ETSTC_SHIFT)
3503 #define I40E_PRTDCB_RPPMC 0x001223A0 /* Reset: CORER */
3504 #define I40E_PRTDCB_RPPMC_LANRPPM_SHIFT 0
3505 #define I40E_PRTDCB_RPPMC_LANRPPM_MASK I40E_MASK(0xFF, I40E_PRTDCB_RPPMC_LANRPPM_SHIFT)
3506@@ -535,7 +535,7 @@ POSSIBILITY OF SUCH DAMAGE.
3507 #define I40E_GLGEN_MSCA_MDICMD_SHIFT 30
3508 #define I40E_GLGEN_MSCA_MDICMD_MASK I40E_MASK(0x1, I40E_GLGEN_MSCA_MDICMD_SHIFT)
3509 #define I40E_GLGEN_MSCA_MDIINPROGEN_SHIFT 31
3510-#define I40E_GLGEN_MSCA_MDIINPROGEN_MASK I40E_MASK(0x1, I40E_GLGEN_MSCA_MDIINPROGEN_SHIFT)
3511+#define I40E_GLGEN_MSCA_MDIINPROGEN_MASK I40E_MASK(0x1u, I40E_GLGEN_MSCA_MDIINPROGEN_SHIFT)
3512 #define I40E_GLGEN_MSRWD(_i) (0x0008819C + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */
3513 #define I40E_GLGEN_MSRWD_MAX_INDEX 3
3514 #define I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT 0
3515@@ -1274,14 +1274,14 @@ POSSIBILITY OF SUCH DAMAGE.
3516 #define I40E_GLLAN_TXPRE_QDIS_SET_QDIS_SHIFT 30
3517 #define I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK I40E_MASK(0x1, I40E_GLLAN_TXPRE_QDIS_SET_QDIS_SHIFT)
3518 #define I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_SHIFT 31
3519-#define I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK I40E_MASK(0x1, I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_SHIFT)
3520+#define I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK I40E_MASK(0x1u, I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_SHIFT)
3521 #define I40E_PFLAN_QALLOC 0x001C0400 /* Reset: CORER */
3522 #define I40E_PFLAN_QALLOC_FIRSTQ_SHIFT 0
3523 #define I40E_PFLAN_QALLOC_FIRSTQ_MASK I40E_MASK(0x7FF, I40E_PFLAN_QALLOC_FIRSTQ_SHIFT)
3524 #define I40E_PFLAN_QALLOC_LASTQ_SHIFT 16
3525 #define I40E_PFLAN_QALLOC_LASTQ_MASK I40E_MASK(0x7FF, I40E_PFLAN_QALLOC_LASTQ_SHIFT)
3526 #define I40E_PFLAN_QALLOC_VALID_SHIFT 31
3527-#define I40E_PFLAN_QALLOC_VALID_MASK I40E_MASK(0x1, I40E_PFLAN_QALLOC_VALID_SHIFT)
3528+#define I40E_PFLAN_QALLOC_VALID_MASK I40E_MASK(0x1u, I40E_PFLAN_QALLOC_VALID_SHIFT)
3529 #define I40E_QRX_ENA(_Q) (0x00120000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: PFR */
3530 #define I40E_QRX_ENA_MAX_INDEX 1535
3531 #define I40E_QRX_ENA_QENA_REQ_SHIFT 0
3532@@ -1692,7 +1692,7 @@ POSSIBILITY OF SUCH DAMAGE.
3533 #define I40E_GLNVM_SRCTL_START_SHIFT 30
3534 #define I40E_GLNVM_SRCTL_START_MASK I40E_MASK(0x1, I40E_GLNVM_SRCTL_START_SHIFT)
3535 #define I40E_GLNVM_SRCTL_DONE_SHIFT 31
3536-#define I40E_GLNVM_SRCTL_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_SRCTL_DONE_SHIFT)
3537+#define I40E_GLNVM_SRCTL_DONE_MASK I40E_MASK(0x1u, I40E_GLNVM_SRCTL_DONE_SHIFT)
3538 #define I40E_GLNVM_SRDATA 0x000B6114 /* Reset: POR */
3539 #define I40E_GLNVM_SRDATA_WRDATA_SHIFT 0
3540 #define I40E_GLNVM_SRDATA_WRDATA_MASK I40E_MASK(0xFFFF, I40E_GLNVM_SRDATA_WRDATA_SHIFT)
3541@@ -3059,7 +3059,7 @@ POSSIBILITY OF SUCH DAMAGE.
3542 #define I40E_PF_VT_PFALLOC_LASTVF_SHIFT 8
3543 #define I40E_PF_VT_PFALLOC_LASTVF_MASK I40E_MASK(0xFF, I40E_PF_VT_PFALLOC_LASTVF_SHIFT)
3544 #define I40E_PF_VT_PFALLOC_VALID_SHIFT 31
3545-#define I40E_PF_VT_PFALLOC_VALID_MASK I40E_MASK(0x1, I40E_PF_VT_PFALLOC_VALID_SHIFT)
3546+#define I40E_PF_VT_PFALLOC_VALID_MASK I40E_MASK(0x1u, I40E_PF_VT_PFALLOC_VALID_SHIFT)
3547 #define I40E_VP_MDET_RX(_VF) (0x0012A000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
3548 #define I40E_VP_MDET_RX_MAX_INDEX 127
3549 #define I40E_VP_MDET_RX_VALID_SHIFT 0
3550@@ -3196,7 +3196,7 @@ POSSIBILITY OF SUCH DAMAGE.
3551 #define I40E_VF_ARQLEN1_ARQCRIT_SHIFT 30
3552 #define I40E_VF_ARQLEN1_ARQCRIT_MASK I40E_MASK(0x1, I40E_VF_ARQLEN1_ARQCRIT_SHIFT)
3553 #define I40E_VF_ARQLEN1_ARQENABLE_SHIFT 31
3554-#define I40E_VF_ARQLEN1_ARQENABLE_MASK I40E_MASK(0x1, I40E_VF_ARQLEN1_ARQENABLE_SHIFT)
3555+#define I40E_VF_ARQLEN1_ARQENABLE_MASK I40E_MASK(0x1u, I40E_VF_ARQLEN1_ARQENABLE_SHIFT)
3556 #define I40E_VF_ARQT1 0x00007000 /* Reset: EMPR */
3557 #define I40E_VF_ARQT1_ARQT_SHIFT 0
3558 #define I40E_VF_ARQT1_ARQT_MASK I40E_MASK(0x3FF, I40E_VF_ARQT1_ARQT_SHIFT)
3559@@ -3219,7 +3219,7 @@ POSSIBILITY OF SUCH DAMAGE.
3560 #define I40E_VF_ATQLEN1_ATQCRIT_SHIFT 30
3561 #define I40E_VF_ATQLEN1_ATQCRIT_MASK I40E_MASK(0x1, I40E_VF_ATQLEN1_ATQCRIT_SHIFT)
3562 #define I40E_VF_ATQLEN1_ATQENABLE_SHIFT 31
3563-#define I40E_VF_ATQLEN1_ATQENABLE_MASK I40E_MASK(0x1, I40E_VF_ATQLEN1_ATQENABLE_SHIFT)
3564+#define I40E_VF_ATQLEN1_ATQENABLE_MASK I40E_MASK(0x1u, I40E_VF_ATQLEN1_ATQENABLE_SHIFT)
3565 #define I40E_VF_ATQT1 0x00008400 /* Reset: EMPR */
3566 #define I40E_VF_ATQT1_ATQT_SHIFT 0
3567 #define I40E_VF_ATQT1_ATQT_MASK I40E_MASK(0x3FF, I40E_VF_ATQT1_ATQT_SHIFT)
3568diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
3569index 290ef24..85baff9 100644
3570--- a/drivers/net/i40e/i40e_ethdev.c
3571+++ b/drivers/net/i40e/i40e_ethdev.c
3572@@ -1554,6 +1554,7 @@ eth_i40e_dev_uninit(struct rte_eth_dev *dev)
3573 struct rte_flow *p_flow;
3574 int ret;
3575 uint8_t aq_fail = 0;
3576+ int retries = 0;
3577
3578 PMD_INIT_FUNC_TRACE();
3579
3580@@ -1595,9 +1596,20 @@ eth_i40e_dev_uninit(struct rte_eth_dev *dev)
3581 /* disable uio intr before callback unregister */
3582 rte_intr_disable(intr_handle);
3583
3584- /* register callback func to eal lib */
3585- rte_intr_callback_unregister(intr_handle,
3586- i40e_dev_interrupt_handler, dev);
3587+ /* unregister callback func to eal lib */
3588+ do {
3589+ ret = rte_intr_callback_unregister(intr_handle,
3590+ i40e_dev_interrupt_handler, dev);
3591+ if (ret >= 0) {
3592+ break;
3593+ } else if (ret != -EAGAIN) {
3594+ PMD_INIT_LOG(ERR,
3595+ "intr callback unregister failed: %d",
3596+ ret);
3597+ return ret;
3598+ }
3599+ i40e_msec_delay(500);
3600+ } while (retries++ < 5);
3601
3602 i40e_rm_ethtype_filter_list(pf);
3603 i40e_rm_tunnel_filter_list(pf);
3604@@ -2297,6 +2309,8 @@ i40e_dev_close(struct rte_eth_dev *dev)
3605 i40e_pf_disable_irq0(hw);
3606 rte_intr_disable(intr_handle);
3607
3608+ i40e_fdir_teardown(pf);
3609+
3610 /* shutdown and destroy the HMC */
3611 i40e_shutdown_lan_hmc(hw);
3612
3613@@ -2308,7 +2322,6 @@ i40e_dev_close(struct rte_eth_dev *dev)
3614 pf->vmdq = NULL;
3615
3616 /* release all the existing VSIs and VEBs */
3617- i40e_fdir_teardown(pf);
3618 i40e_vsi_release(pf->main_vsi);
3619
3620 /* shutdown the adminq */
3621@@ -2444,77 +2457,139 @@ i40e_dev_set_link_down(struct rte_eth_dev *dev)
3622 return i40e_phy_conf_link(hw, abilities, speed, false);
3623 }
3624
3625-int
3626-i40e_dev_link_update(struct rte_eth_dev *dev,
3627- int wait_to_complete)
3628+static __rte_always_inline void
3629+update_link_no_wait(struct i40e_hw *hw, struct rte_eth_link *link)
3630+{
3631+/* Link status registers and values*/
3632+#define I40E_PRTMAC_LINKSTA 0x001E2420
3633+#define I40E_REG_LINK_UP 0x40000080
3634+#define I40E_PRTMAC_MACC 0x001E24E0
3635+#define I40E_REG_MACC_25GB 0x00020000
3636+#define I40E_REG_SPEED_MASK 0x38000000
3637+#define I40E_REG_SPEED_100MB 0x00000000
3638+#define I40E_REG_SPEED_1GB 0x08000000
3639+#define I40E_REG_SPEED_10GB 0x10000000
3640+#define I40E_REG_SPEED_20GB 0x20000000
3641+#define I40E_REG_SPEED_25_40GB 0x18000000
3642+ uint32_t link_speed;
3643+ uint32_t reg_val;
3644+
3645+ reg_val = I40E_READ_REG(hw, I40E_PRTMAC_LINKSTA);
3646+ link_speed = reg_val & I40E_REG_SPEED_MASK;
3647+ reg_val &= I40E_REG_LINK_UP;
3648+ link->link_status = (reg_val == I40E_REG_LINK_UP) ? 1 : 0;
3649+
3650+ if (unlikely(link->link_status == 0))
3651+ return;
3652+
3653+ /* Parse the link status */
3654+ switch (link_speed) {
3655+ case I40E_REG_SPEED_100MB:
3656+ link->link_speed = ETH_SPEED_NUM_100M;
3657+ break;
3658+ case I40E_REG_SPEED_1GB:
3659+ link->link_speed = ETH_SPEED_NUM_1G;
3660+ break;
3661+ case I40E_REG_SPEED_10GB:
3662+ link->link_speed = ETH_SPEED_NUM_10G;
3663+ break;
3664+ case I40E_REG_SPEED_20GB:
3665+ link->link_speed = ETH_SPEED_NUM_20G;
3666+ break;
3667+ case I40E_REG_SPEED_25_40GB:
3668+ reg_val = I40E_READ_REG(hw, I40E_PRTMAC_MACC);
3669+
3670+ if (reg_val & I40E_REG_MACC_25GB)
3671+ link->link_speed = ETH_SPEED_NUM_25G;
3672+ else
3673+ link->link_speed = ETH_SPEED_NUM_40G;
3674+
3675+ break;
3676+ default:
3677+ PMD_DRV_LOG(ERR, "Unknown link speed info %u", link_speed);
3678+ break;
3679+ }
3680+}
3681+
3682+static __rte_always_inline void
3683+update_link_wait(struct i40e_hw *hw, struct rte_eth_link *link,
3684+ bool enable_lse)
3685 {
3686-#define CHECK_INTERVAL 100 /* 100ms */
3687-#define MAX_REPEAT_TIME 10 /* 1s (10 * 100ms) in total */
3688- struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3689+#define CHECK_INTERVAL 100 /* 100ms */
3690+#define MAX_REPEAT_TIME 10 /* 1s (10 * 100ms) in total */
3691+ uint32_t rep_cnt = MAX_REPEAT_TIME;
3692 struct i40e_link_status link_status;
3693- struct rte_eth_link link, old;
3694 int status;
3695- unsigned rep_cnt = MAX_REPEAT_TIME;
3696- bool enable_lse = dev->data->dev_conf.intr_conf.lsc ? true : false;
3697
3698- memset(&link, 0, sizeof(link));
3699- memset(&old, 0, sizeof(old));
3700 memset(&link_status, 0, sizeof(link_status));
3701- rte_i40e_dev_atomic_read_link_status(dev, &old);
3702
3703 do {
3704 /* Get link status information from hardware */
3705 status = i40e_aq_get_link_info(hw, enable_lse,
3706 &link_status, NULL);
3707- if (status != I40E_SUCCESS) {
3708- link.link_speed = ETH_SPEED_NUM_100M;
3709- link.link_duplex = ETH_LINK_FULL_DUPLEX;
3710+ if (unlikely(status != I40E_SUCCESS)) {
3711+ link->link_speed = ETH_SPEED_NUM_100M;
3712+ link->link_duplex = ETH_LINK_FULL_DUPLEX;
3713 PMD_DRV_LOG(ERR, "Failed to get link info");
3714- goto out;
3715+ return;
3716 }
3717
3718- link.link_status = link_status.link_info & I40E_AQ_LINK_UP;
3719- if (!wait_to_complete || link.link_status)
3720+ link->link_status = link_status.link_info & I40E_AQ_LINK_UP;
3721+ if (unlikely(link->link_status != 0))
3722 break;
3723
3724 rte_delay_ms(CHECK_INTERVAL);
3725 } while (--rep_cnt);
3726
3727- if (!link.link_status)
3728- goto out;
3729-
3730- /* i40e uses full duplex only */
3731- link.link_duplex = ETH_LINK_FULL_DUPLEX;
3732-
3733 /* Parse the link status */
3734 switch (link_status.link_speed) {
3735 case I40E_LINK_SPEED_100MB:
3736- link.link_speed = ETH_SPEED_NUM_100M;
3737+ link->link_speed = ETH_SPEED_NUM_100M;
3738 break;
3739 case I40E_LINK_SPEED_1GB:
3740- link.link_speed = ETH_SPEED_NUM_1G;
3741+ link->link_speed = ETH_SPEED_NUM_1G;
3742 break;
3743 case I40E_LINK_SPEED_10GB:
3744- link.link_speed = ETH_SPEED_NUM_10G;
3745+ link->link_speed = ETH_SPEED_NUM_10G;
3746 break;
3747 case I40E_LINK_SPEED_20GB:
3748- link.link_speed = ETH_SPEED_NUM_20G;
3749+ link->link_speed = ETH_SPEED_NUM_20G;
3750 break;
3751 case I40E_LINK_SPEED_25GB:
3752- link.link_speed = ETH_SPEED_NUM_25G;
3753+ link->link_speed = ETH_SPEED_NUM_25G;
3754 break;
3755 case I40E_LINK_SPEED_40GB:
3756- link.link_speed = ETH_SPEED_NUM_40G;
3757+ link->link_speed = ETH_SPEED_NUM_40G;
3758 break;
3759 default:
3760- link.link_speed = ETH_SPEED_NUM_100M;
3761+ link->link_speed = ETH_SPEED_NUM_100M;
3762 break;
3763 }
3764+}
3765
3766+int
3767+i40e_dev_link_update(struct rte_eth_dev *dev,
3768+ int wait_to_complete)
3769+{
3770+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3771+ struct rte_eth_link link, old;
3772+ bool enable_lse = dev->data->dev_conf.intr_conf.lsc ? true : false;
3773+
3774+ memset(&link, 0, sizeof(link));
3775+ memset(&old, 0, sizeof(old));
3776+
3777+ rte_i40e_dev_atomic_read_link_status(dev, &old);
3778+
3779+ /* i40e uses full duplex only */
3780+ link.link_duplex = ETH_LINK_FULL_DUPLEX;
3781 link.link_autoneg = !(dev->data->dev_conf.link_speeds &
3782 ETH_LINK_SPEED_FIXED);
3783
3784-out:
3785+ if (!wait_to_complete)
3786+ update_link_no_wait(hw, &link);
3787+ else
3788+ update_link_wait(hw, &link, enable_lse);
3789+
3790 rte_i40e_dev_atomic_write_link_status(dev, &link);
3791 if (link.link_status == old.link_status)
3792 return -1;
3793@@ -11329,7 +11404,8 @@ i40e_find_customized_pctype(struct i40e_pf *pf, uint8_t index)
3794 static int
3795 i40e_update_customized_pctype(struct rte_eth_dev *dev, uint8_t *pkg,
3796 uint32_t pkg_size, uint32_t proto_num,
3797- struct rte_pmd_i40e_proto_info *proto)
3798+ struct rte_pmd_i40e_proto_info *proto,
3799+ enum rte_pmd_i40e_package_op op)
3800 {
3801 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3802 uint32_t pctype_num;
3803@@ -11342,6 +11418,12 @@ i40e_update_customized_pctype(struct rte_eth_dev *dev, uint8_t *pkg,
3804 uint32_t i, j, n;
3805 int ret;
3806
3807+ if (op != RTE_PMD_I40E_PKG_OP_WR_ADD &&
3808+ op != RTE_PMD_I40E_PKG_OP_WR_DEL) {
3809+ PMD_DRV_LOG(ERR, "Unsupported operation.");
3810+ return -1;
3811+ }
3812+
3813 ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
3814 (uint8_t *)&pctype_num, sizeof(pctype_num),
3815 RTE_PMD_I40E_PKG_INFO_PCTYPE_NUM);
3816@@ -11404,8 +11486,13 @@ i40e_update_customized_pctype(struct rte_eth_dev *dev, uint8_t *pkg,
3817 i40e_find_customized_pctype(pf,
3818 I40E_CUSTOMIZED_GTPU);
3819 if (new_pctype) {
3820- new_pctype->pctype = pctype_value;
3821- new_pctype->valid = true;
3822+ if (op == RTE_PMD_I40E_PKG_OP_WR_ADD) {
3823+ new_pctype->pctype = pctype_value;
3824+ new_pctype->valid = true;
3825+ } else {
3826+ new_pctype->pctype = I40E_FILTER_PCTYPE_INVALID;
3827+ new_pctype->valid = false;
3828+ }
3829 }
3830 }
3831
3832@@ -11415,8 +11502,9 @@ i40e_update_customized_pctype(struct rte_eth_dev *dev, uint8_t *pkg,
3833
3834 static int
3835 i40e_update_customized_ptype(struct rte_eth_dev *dev, uint8_t *pkg,
3836- uint32_t pkg_size, uint32_t proto_num,
3837- struct rte_pmd_i40e_proto_info *proto)
3838+ uint32_t pkg_size, uint32_t proto_num,
3839+ struct rte_pmd_i40e_proto_info *proto,
3840+ enum rte_pmd_i40e_package_op op)
3841 {
3842 struct rte_pmd_i40e_ptype_mapping *ptype_mapping;
3843 uint16_t port_id = dev->data->port_id;
3844@@ -11429,6 +11517,17 @@ i40e_update_customized_ptype(struct rte_eth_dev *dev, uint8_t *pkg,
3845 bool inner_ip;
3846 int ret;
3847
3848+ if (op != RTE_PMD_I40E_PKG_OP_WR_ADD &&
3849+ op != RTE_PMD_I40E_PKG_OP_WR_DEL) {
3850+ PMD_DRV_LOG(ERR, "Unsupported operation.");
3851+ return -1;
3852+ }
3853+
3854+ if (op == RTE_PMD_I40E_PKG_OP_WR_DEL) {
3855+ rte_pmd_i40e_ptype_mapping_reset(port_id);
3856+ return 0;
3857+ }
3858+
3859 /* get information about new ptype num */
3860 ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
3861 (uint8_t *)&ptype_num, sizeof(ptype_num),
3862@@ -11547,7 +11646,7 @@ i40e_update_customized_ptype(struct rte_eth_dev *dev, uint8_t *pkg,
3863
3864 void
3865 i40e_update_customized_info(struct rte_eth_dev *dev, uint8_t *pkg,
3866- uint32_t pkg_size)
3867+ uint32_t pkg_size, enum rte_pmd_i40e_package_op op)
3868 {
3869 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3870 uint32_t proto_num;
3871@@ -11556,6 +11655,12 @@ i40e_update_customized_info(struct rte_eth_dev *dev, uint8_t *pkg,
3872 uint32_t i;
3873 int ret;
3874
3875+ if (op != RTE_PMD_I40E_PKG_OP_WR_ADD &&
3876+ op != RTE_PMD_I40E_PKG_OP_WR_DEL) {
3877+ PMD_DRV_LOG(ERR, "Unsupported operation.");
3878+ return;
3879+ }
3880+
3881 /* get information about protocol number */
3882 ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
3883 (uint8_t *)&proto_num, sizeof(proto_num),
3884@@ -11589,20 +11694,23 @@ i40e_update_customized_info(struct rte_eth_dev *dev, uint8_t *pkg,
3885 /* Check if GTP is supported. */
3886 for (i = 0; i < proto_num; i++) {
3887 if (!strncmp(proto[i].name, "GTP", 3)) {
3888- pf->gtp_support = true;
3889+ if (op == RTE_PMD_I40E_PKG_OP_WR_ADD)
3890+ pf->gtp_support = true;
3891+ else
3892+ pf->gtp_support = false;
3893 break;
3894 }
3895 }
3896
3897 /* Update customized pctype info */
3898 ret = i40e_update_customized_pctype(dev, pkg, pkg_size,
3899- proto_num, proto);
3900+ proto_num, proto, op);
3901 if (ret)
3902 PMD_DRV_LOG(INFO, "No pctype is updated.");
3903
3904 /* Update customized ptype info */
3905 ret = i40e_update_customized_ptype(dev, pkg, pkg_size,
3906- proto_num, proto);
3907+ proto_num, proto, op);
3908 if (ret)
3909 PMD_DRV_LOG(INFO, "No ptype is updated.");
3910
3911diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
3912index 229c974..2c107e2 100644
3913--- a/drivers/net/i40e/i40e_ethdev.h
3914+++ b/drivers/net/i40e/i40e_ethdev.h
3915@@ -40,6 +40,7 @@
3916 #include <rte_hash.h>
3917 #include <rte_flow_driver.h>
3918 #include <rte_tm_driver.h>
3919+#include "rte_pmd_i40e.h"
3920
3921 #define I40E_VLAN_TAG_SIZE 4
3922
3923@@ -1221,7 +1222,8 @@ void i40e_tm_conf_uninit(struct rte_eth_dev *dev);
3924 struct i40e_customized_pctype*
3925 i40e_find_customized_pctype(struct i40e_pf *pf, uint8_t index);
3926 void i40e_update_customized_info(struct rte_eth_dev *dev, uint8_t *pkg,
3927- uint32_t pkg_size);
3928+ uint32_t pkg_size,
3929+ enum rte_pmd_i40e_package_op op);
3930 int i40e_dcb_init_configure(struct rte_eth_dev *dev, bool sw_dcb);
3931 int i40e_flush_queue_region_all_conf(struct rte_eth_dev *dev,
3932 struct i40e_hw *hw, struct i40e_pf *pf, uint16_t on);
3933diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c
3934index 37380e6..4ebf925 100644
3935--- a/drivers/net/i40e/i40e_flow.c
3936+++ b/drivers/net/i40e/i40e_flow.c
3937@@ -2418,7 +2418,7 @@ i40e_flow_fdir_get_pctype_value(struct i40e_pf *pf,
3938 break;
3939 }
3940
3941- if (cus_pctype)
3942+ if (cus_pctype && cus_pctype->valid)
3943 return cus_pctype->pctype;
3944
3945 return I40E_FILTER_PCTYPE_INVALID;
3946diff --git a/drivers/net/i40e/rte_pmd_i40e.c b/drivers/net/i40e/rte_pmd_i40e.c
3947index f726a9c..ab1163c 100644
3948--- a/drivers/net/i40e/rte_pmd_i40e.c
3949+++ b/drivers/net/i40e/rte_pmd_i40e.c
3950@@ -1632,8 +1632,6 @@ rte_pmd_i40e_process_ddp_package(uint16_t port, uint8_t *buff,
3951 return -EINVAL;
3952 }
3953
3954- i40e_update_customized_info(dev, buff, size);
3955-
3956 /* Find metadata segment */
3957 metadata_seg_hdr = i40e_find_segment_in_package(SEGMENT_TYPE_METADATA,
3958 pkg_hdr);
3959@@ -1737,6 +1735,10 @@ rte_pmd_i40e_process_ddp_package(uint16_t port, uint8_t *buff,
3960 }
3961 }
3962
3963+ if (op == RTE_PMD_I40E_PKG_OP_WR_ADD ||
3964+ op == RTE_PMD_I40E_PKG_OP_WR_DEL)
3965+ i40e_update_customized_info(dev, buff, size, op);
3966+
3967 rte_free(profile_info_sec);
3968 return status;
3969 }
3970diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c
3971index f219866..d7eb458 100644
3972--- a/drivers/net/ixgbe/ixgbe_ethdev.c
3973+++ b/drivers/net/ixgbe/ixgbe_ethdev.c
3974@@ -1366,6 +1366,8 @@ eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev)
3975 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
3976 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
3977 struct ixgbe_hw *hw;
3978+ int retries = 0;
3979+ int ret;
3980
3981 PMD_INIT_FUNC_TRACE();
3982
3983@@ -1386,8 +1388,20 @@ eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev)
3984
3985 /* disable uio intr before callback unregister */
3986 rte_intr_disable(intr_handle);
3987- rte_intr_callback_unregister(intr_handle,
3988- ixgbe_dev_interrupt_handler, eth_dev);
3989+
3990+ do {
3991+ ret = rte_intr_callback_unregister(intr_handle,
3992+ ixgbe_dev_interrupt_handler, eth_dev);
3993+ if (ret >= 0) {
3994+ break;
3995+ } else if (ret != -EAGAIN) {
3996+ PMD_INIT_LOG(ERR,
3997+ "intr callback unregister failed: %d",
3998+ ret);
3999+ return ret;
4000+ }
4001+ rte_delay_ms(100);
4002+ } while (retries++ < (10 + IXGBE_LINK_UP_TIME));
4003
4004 /* uninitialize PF if max_vfs not zero */
4005 ixgbe_pf_host_uninit(eth_dev);
4006@@ -2316,11 +2330,6 @@ ixgbe_check_mq_mode(struct rte_eth_dev *dev)
4007 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) {
4008 const struct rte_eth_dcb_rx_conf *conf;
4009
4010- if (nb_rx_q != IXGBE_DCB_NB_QUEUES) {
4011- PMD_INIT_LOG(ERR, "DCB selected, nb_rx_q != %d.",
4012- IXGBE_DCB_NB_QUEUES);
4013- return -EINVAL;
4014- }
4015 conf = &dev_conf->rx_adv_conf.dcb_rx_conf;
4016 if (!(conf->nb_tcs == ETH_4_TCS ||
4017 conf->nb_tcs == ETH_8_TCS)) {
4018@@ -2334,11 +2343,6 @@ ixgbe_check_mq_mode(struct rte_eth_dev *dev)
4019 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
4020 const struct rte_eth_dcb_tx_conf *conf;
4021
4022- if (nb_tx_q != IXGBE_DCB_NB_QUEUES) {
4023- PMD_INIT_LOG(ERR, "DCB, nb_tx_q != %d.",
4024- IXGBE_DCB_NB_QUEUES);
4025- return -EINVAL;
4026- }
4027 conf = &dev_conf->tx_adv_conf.dcb_tx_conf;
4028 if (!(conf->nb_tcs == ETH_4_TCS ||
4029 conf->nb_tcs == ETH_8_TCS)) {
4030@@ -3886,7 +3890,7 @@ ixgbevf_check_link(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
4031 /* for SFP+ modules and DA cables on 82599 it can take up to 500usecs
4032 * before the link status is correct
4033 */
4034- if (mac->type == ixgbe_mac_82599_vf) {
4035+ if (mac->type == ixgbe_mac_82599_vf && wait_to_complete) {
4036 int i;
4037
4038 for (i = 0; i < 5; i++) {
4039@@ -5822,8 +5826,12 @@ ixgbe_configure_msix(struct rte_eth_dev *dev)
4040
4041 /* won't configure msix register if no mapping is done
4042 * between intr vector and event fd
4043+ * but if misx has been enabled already, need to configure
4044+ * auto clean, auto mask and throttling.
4045 */
4046- if (!rte_intr_dp_is_en(intr_handle))
4047+ gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
4048+ if (!rte_intr_dp_is_en(intr_handle) &&
4049+ !(gpie & (IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_PBA_SUPPORT)))
4050 return;
4051
4052 if (rte_intr_allow_others(intr_handle))
4053@@ -5847,27 +5855,30 @@ ixgbe_configure_msix(struct rte_eth_dev *dev)
4054 /* Populate the IVAR table and set the ITR values to the
4055 * corresponding register.
4056 */
4057- for (queue_id = 0; queue_id < dev->data->nb_rx_queues;
4058- queue_id++) {
4059- /* by default, 1:1 mapping */
4060- ixgbe_set_ivar_map(hw, 0, queue_id, vec);
4061- intr_handle->intr_vec[queue_id] = vec;
4062- if (vec < base + intr_handle->nb_efd - 1)
4063- vec++;
4064- }
4065+ if (rte_intr_dp_is_en(intr_handle)) {
4066+ for (queue_id = 0; queue_id < dev->data->nb_rx_queues;
4067+ queue_id++) {
4068+ /* by default, 1:1 mapping */
4069+ ixgbe_set_ivar_map(hw, 0, queue_id, vec);
4070+ intr_handle->intr_vec[queue_id] = vec;
4071+ if (vec < base + intr_handle->nb_efd - 1)
4072+ vec++;
4073+ }
4074
4075- switch (hw->mac.type) {
4076- case ixgbe_mac_82598EB:
4077- ixgbe_set_ivar_map(hw, -1, IXGBE_IVAR_OTHER_CAUSES_INDEX,
4078- IXGBE_MISC_VEC_ID);
4079- break;
4080- case ixgbe_mac_82599EB:
4081- case ixgbe_mac_X540:
4082- case ixgbe_mac_X550:
4083- ixgbe_set_ivar_map(hw, -1, 1, IXGBE_MISC_VEC_ID);
4084- break;
4085- default:
4086- break;
4087+ switch (hw->mac.type) {
4088+ case ixgbe_mac_82598EB:
4089+ ixgbe_set_ivar_map(hw, -1,
4090+ IXGBE_IVAR_OTHER_CAUSES_INDEX,
4091+ IXGBE_MISC_VEC_ID);
4092+ break;
4093+ case ixgbe_mac_82599EB:
4094+ case ixgbe_mac_X540:
4095+ case ixgbe_mac_X550:
4096+ ixgbe_set_ivar_map(hw, -1, 1, IXGBE_MISC_VEC_ID);
4097+ break;
4098+ default:
4099+ break;
4100+ }
4101 }
4102 IXGBE_WRITE_REG(hw, IXGBE_EITR(IXGBE_MISC_VEC_ID),
4103 IXGBE_MIN_INTER_INTERRUPT_INTERVAL_DEFAULT & 0xFFF);
4104diff --git a/drivers/net/kni/rte_eth_kni.c b/drivers/net/kni/rte_eth_kni.c
4105index c1a2ea5..5c79501 100644
4106--- a/drivers/net/kni/rte_eth_kni.c
4107+++ b/drivers/net/kni/rte_eth_kni.c
4108@@ -90,7 +90,7 @@ static const struct rte_eth_link pmd_link = {
4109 .link_speed = ETH_SPEED_NUM_10G,
4110 .link_duplex = ETH_LINK_FULL_DUPLEX,
4111 .link_status = ETH_LINK_DOWN,
4112- .link_autoneg = ETH_LINK_AUTONEG,
4113+ .link_autoneg = ETH_LINK_FIXED,
4114 };
4115 static int is_kni_initialized;
4116
4117diff --git a/drivers/net/liquidio/lio_ethdev.c b/drivers/net/liquidio/lio_ethdev.c
4118index 84b8a32..0ca491b 100644
4119--- a/drivers/net/liquidio/lio_ethdev.c
4120+++ b/drivers/net/liquidio/lio_ethdev.c
4121@@ -1479,6 +1479,11 @@ lio_dev_start(struct rte_eth_dev *eth_dev)
4122 /* Configure RSS if device configured with multiple RX queues. */
4123 lio_dev_mq_rx_configure(eth_dev);
4124
4125+ /* Before update the link info,
4126+ * must set linfo.link.link_status64 to 0.
4127+ */
4128+ lio_dev->linfo.link.link_status64 = 0;
4129+
4130 /* start polling for lsc */
4131 ret = rte_eal_alarm_set(LIO_LSC_TIMEOUT,
4132 lio_sync_link_state_check,
4133diff --git a/drivers/net/mlx4/mlx4.c b/drivers/net/mlx4/mlx4.c
4134index 97dac64..4d7bd5f 100644
4135--- a/drivers/net/mlx4/mlx4.c
4136+++ b/drivers/net/mlx4/mlx4.c
4137@@ -85,6 +85,8 @@ const char *pmd_mlx4_init_params[] = {
4138 NULL,
4139 };
4140
4141+static void mlx4_dev_stop(struct rte_eth_dev *dev);
4142+
4143 /**
4144 * DPDK callback for Ethernet device configuration.
4145 *
4146@@ -108,7 +110,13 @@ mlx4_dev_configure(struct rte_eth_dev *dev)
4147 " flow error type %d, cause %p, message: %s",
4148 -ret, strerror(-ret), error.type, error.cause,
4149 error.message ? error.message : "(unspecified)");
4150+ goto exit;
4151 }
4152+ ret = mlx4_intr_install(priv);
4153+ if (ret)
4154+ ERROR("%p: interrupt handler installation failed",
4155+ (void *)dev);
4156+exit:
4157 return ret;
4158 }
4159
4160@@ -141,7 +149,7 @@ mlx4_dev_start(struct rte_eth_dev *dev)
4161 (void *)dev, strerror(-ret));
4162 goto err;
4163 }
4164- ret = mlx4_intr_install(priv);
4165+ ret = mlx4_rxq_intr_enable(priv);
4166 if (ret) {
4167 ERROR("%p: interrupt handler installation failed",
4168 (void *)dev);
4169@@ -161,8 +169,7 @@ mlx4_dev_start(struct rte_eth_dev *dev)
4170 dev->rx_pkt_burst = mlx4_rx_burst;
4171 return 0;
4172 err:
4173- /* Rollback. */
4174- priv->started = 0;
4175+ mlx4_dev_stop(dev);
4176 return ret;
4177 }
4178
4179@@ -187,7 +194,7 @@ mlx4_dev_stop(struct rte_eth_dev *dev)
4180 dev->rx_pkt_burst = mlx4_rx_burst_removed;
4181 rte_wmb();
4182 mlx4_flow_sync(priv, NULL);
4183- mlx4_intr_uninstall(priv);
4184+ mlx4_rxq_intr_disable(priv);
4185 mlx4_rss_deinit(priv);
4186 }
4187
4188@@ -212,6 +219,7 @@ mlx4_dev_close(struct rte_eth_dev *dev)
4189 dev->tx_pkt_burst = mlx4_tx_burst_removed;
4190 rte_wmb();
4191 mlx4_flow_clean(priv);
4192+ mlx4_rss_deinit(priv);
4193 for (i = 0; i != dev->data->nb_rx_queues; ++i)
4194 mlx4_rx_queue_release(dev->data->rx_queues[i]);
4195 for (i = 0; i != dev->data->nb_tx_queues; ++i)
4196@@ -336,7 +344,7 @@ mlx4_arg_parse(const char *key, const char *val, struct mlx4_conf *conf)
4197 return -rte_errno;
4198 }
4199 if (strcmp(MLX4_PMD_PORT_KVARG, key) == 0) {
4200- uint32_t ports = rte_log2_u32(conf->ports.present);
4201+ uint32_t ports = rte_log2_u32(conf->ports.present + 1);
4202
4203 if (tmp >= ports) {
4204 ERROR("port index %lu outside range [0,%" PRIu32 ")",
4205diff --git a/drivers/net/mlx4/mlx4.h b/drivers/net/mlx4/mlx4.h
4206index 3aeef87..41d652b 100644
4207--- a/drivers/net/mlx4/mlx4.h
4208+++ b/drivers/net/mlx4/mlx4.h
4209@@ -126,6 +126,7 @@ struct priv {
4210 uint32_t vf:1; /**< This is a VF device. */
4211 uint32_t intr_alarm:1; /**< An interrupt alarm is scheduled. */
4212 uint32_t isolated:1; /**< Toggle isolated mode. */
4213+ uint32_t rss_init:1; /**< Common RSS context is initialized. */
4214 uint32_t hw_csum:1; /* Checksum offload is supported. */
4215 uint32_t hw_csum_l2tun:1; /* Checksum support for L2 tunnels. */
4216 struct rte_intr_handle intr_handle; /**< Port interrupt handle. */
4217@@ -170,6 +171,8 @@ const uint32_t *mlx4_dev_supported_ptypes_get(struct rte_eth_dev *dev);
4218
4219 int mlx4_intr_uninstall(struct priv *priv);
4220 int mlx4_intr_install(struct priv *priv);
4221+int mlx4_rxq_intr_enable(struct priv *priv);
4222+void mlx4_rxq_intr_disable(struct priv *priv);
4223 int mlx4_rx_intr_disable(struct rte_eth_dev *dev, uint16_t idx);
4224 int mlx4_rx_intr_enable(struct rte_eth_dev *dev, uint16_t idx);
4225
4226diff --git a/drivers/net/mlx4/mlx4_ethdev.c b/drivers/net/mlx4/mlx4_ethdev.c
4227index 2f69e7d..89f552c 100644
4228--- a/drivers/net/mlx4/mlx4_ethdev.c
4229+++ b/drivers/net/mlx4/mlx4_ethdev.c
4230@@ -159,167 +159,6 @@ try_dev_id:
4231 }
4232
4233 /**
4234- * Read from sysfs entry.
4235- *
4236- * @param[in] priv
4237- * Pointer to private structure.
4238- * @param[in] entry
4239- * Entry name relative to sysfs path.
4240- * @param[out] buf
4241- * Data output buffer.
4242- * @param size
4243- * Buffer size.
4244- *
4245- * @return
4246- * Number of bytes read on success, negative errno value otherwise and
4247- * rte_errno is set.
4248- */
4249-static int
4250-mlx4_sysfs_read(const struct priv *priv, const char *entry,
4251- char *buf, size_t size)
4252-{
4253- char ifname[IF_NAMESIZE];
4254- FILE *file;
4255- int ret;
4256-
4257- ret = mlx4_get_ifname(priv, &ifname);
4258- if (ret)
4259- return ret;
4260-
4261- MKSTR(path, "%s/device/net/%s/%s", priv->ctx->device->ibdev_path,
4262- ifname, entry);
4263-
4264- file = fopen(path, "rb");
4265- if (file == NULL) {
4266- rte_errno = errno;
4267- return -rte_errno;
4268- }
4269- ret = fread(buf, 1, size, file);
4270- if ((size_t)ret < size && ferror(file)) {
4271- rte_errno = EIO;
4272- ret = -rte_errno;
4273- } else {
4274- ret = size;
4275- }
4276- fclose(file);
4277- return ret;
4278-}
4279-
4280-/**
4281- * Write to sysfs entry.
4282- *
4283- * @param[in] priv
4284- * Pointer to private structure.
4285- * @param[in] entry
4286- * Entry name relative to sysfs path.
4287- * @param[in] buf
4288- * Data buffer.
4289- * @param size
4290- * Buffer size.
4291- *
4292- * @return
4293- * Number of bytes written on success, negative errno value otherwise and
4294- * rte_errno is set.
4295- */
4296-static int
4297-mlx4_sysfs_write(const struct priv *priv, const char *entry,
4298- char *buf, size_t size)
4299-{
4300- char ifname[IF_NAMESIZE];
4301- FILE *file;
4302- int ret;
4303-
4304- ret = mlx4_get_ifname(priv, &ifname);
4305- if (ret)
4306- return ret;
4307-
4308- MKSTR(path, "%s/device/net/%s/%s", priv->ctx->device->ibdev_path,
4309- ifname, entry);
4310-
4311- file = fopen(path, "wb");
4312- if (file == NULL) {
4313- rte_errno = errno;
4314- return -rte_errno;
4315- }
4316- ret = fwrite(buf, 1, size, file);
4317- if ((size_t)ret < size || ferror(file)) {
4318- rte_errno = EIO;
4319- ret = -rte_errno;
4320- } else {
4321- ret = size;
4322- }
4323- fclose(file);
4324- return ret;
4325-}
4326-
4327-/**
4328- * Get unsigned long sysfs property.
4329- *
4330- * @param priv
4331- * Pointer to private structure.
4332- * @param[in] name
4333- * Entry name relative to sysfs path.
4334- * @param[out] value
4335- * Value output buffer.
4336- *
4337- * @return
4338- * 0 on success, negative errno value otherwise and rte_errno is set.
4339- */
4340-static int
4341-mlx4_get_sysfs_ulong(struct priv *priv, const char *name, unsigned long *value)
4342-{
4343- int ret;
4344- unsigned long value_ret;
4345- char value_str[32];
4346-
4347- ret = mlx4_sysfs_read(priv, name, value_str, (sizeof(value_str) - 1));
4348- if (ret < 0) {
4349- DEBUG("cannot read %s value from sysfs: %s",
4350- name, strerror(rte_errno));
4351- return ret;
4352- }
4353- value_str[ret] = '\0';
4354- errno = 0;
4355- value_ret = strtoul(value_str, NULL, 0);
4356- if (errno) {
4357- rte_errno = errno;
4358- DEBUG("invalid %s value `%s': %s", name, value_str,
4359- strerror(rte_errno));
4360- return -rte_errno;
4361- }
4362- *value = value_ret;
4363- return 0;
4364-}
4365-
4366-/**
4367- * Set unsigned long sysfs property.
4368- *
4369- * @param priv
4370- * Pointer to private structure.
4371- * @param[in] name
4372- * Entry name relative to sysfs path.
4373- * @param value
4374- * Value to set.
4375- *
4376- * @return
4377- * 0 on success, negative errno value otherwise and rte_errno is set.
4378- */
4379-static int
4380-mlx4_set_sysfs_ulong(struct priv *priv, const char *name, unsigned long value)
4381-{
4382- int ret;
4383- MKSTR(value_str, "%lu", value);
4384-
4385- ret = mlx4_sysfs_write(priv, name, value_str, (sizeof(value_str) - 1));
4386- if (ret < 0) {
4387- DEBUG("cannot write %s `%s' (%lu) to sysfs: %s",
4388- name, value_str, value, strerror(rte_errno));
4389- return ret;
4390- }
4391- return 0;
4392-}
4393-
4394-/**
4395 * Perform ifreq ioctl() on associated Ethernet device.
4396 *
4397 * @param[in] priv
4398@@ -388,12 +227,12 @@ mlx4_get_mac(struct priv *priv, uint8_t (*mac)[ETHER_ADDR_LEN])
4399 int
4400 mlx4_mtu_get(struct priv *priv, uint16_t *mtu)
4401 {
4402- unsigned long ulong_mtu = 0;
4403- int ret = mlx4_get_sysfs_ulong(priv, "mtu", &ulong_mtu);
4404+ struct ifreq request;
4405+ int ret = mlx4_ifreq(priv, SIOCGIFMTU, &request);
4406
4407 if (ret)
4408 return ret;
4409- *mtu = ulong_mtu;
4410+ *mtu = request.ifr_mtu;
4411 return 0;
4412 }
4413
4414@@ -412,20 +251,13 @@ int
4415 mlx4_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
4416 {
4417 struct priv *priv = dev->data->dev_private;
4418- uint16_t new_mtu;
4419- int ret = mlx4_set_sysfs_ulong(priv, "mtu", mtu);
4420+ struct ifreq request = { .ifr_mtu = mtu, };
4421+ int ret = mlx4_ifreq(priv, SIOCSIFMTU, &request);
4422
4423 if (ret)
4424 return ret;
4425- ret = mlx4_mtu_get(priv, &new_mtu);
4426- if (ret)
4427- return ret;
4428- if (new_mtu == mtu) {
4429- priv->mtu = mtu;
4430- return 0;
4431- }
4432- rte_errno = EINVAL;
4433- return -rte_errno;
4434+ priv->mtu = mtu;
4435+ return 0;
4436 }
4437
4438 /**
4439@@ -444,14 +276,14 @@ mlx4_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
4440 static int
4441 mlx4_set_flags(struct priv *priv, unsigned int keep, unsigned int flags)
4442 {
4443- unsigned long tmp = 0;
4444- int ret = mlx4_get_sysfs_ulong(priv, "flags", &tmp);
4445+ struct ifreq request;
4446+ int ret = mlx4_ifreq(priv, SIOCGIFFLAGS, &request);
4447
4448 if (ret)
4449 return ret;
4450- tmp &= keep;
4451- tmp |= (flags & (~keep));
4452- return mlx4_set_sysfs_ulong(priv, "flags", tmp);
4453+ request.ifr_flags &= keep;
4454+ request.ifr_flags |= flags & ~keep;
4455+ return mlx4_ifreq(priv, SIOCSIFFLAGS, &request);
4456 }
4457
4458 /**
4459diff --git a/drivers/net/mlx4/mlx4_flow.c b/drivers/net/mlx4/mlx4_flow.c
4460index e81e24d..fc0f061 100644
4461--- a/drivers/net/mlx4/mlx4_flow.c
4462+++ b/drivers/net/mlx4/mlx4_flow.c
4463@@ -116,7 +116,7 @@ static uint64_t
4464 mlx4_conv_rss_hf(uint64_t rss_hf)
4465 {
4466 enum { IPV4, IPV6, TCP, UDP, };
4467- const uint64_t in[] = {
4468+ static const uint64_t in[] = {
4469 [IPV4] = (ETH_RSS_IPV4 |
4470 ETH_RSS_FRAG_IPV4 |
4471 ETH_RSS_NONFRAG_IPV4_TCP |
4472@@ -139,7 +139,7 @@ mlx4_conv_rss_hf(uint64_t rss_hf)
4473 */
4474 [UDP] = 0,
4475 };
4476- const uint64_t out[RTE_DIM(in)] = {
4477+ static const uint64_t out[RTE_DIM(in)] = {
4478 [IPV4] = IBV_RX_HASH_SRC_IPV4 | IBV_RX_HASH_DST_IPV4,
4479 [IPV6] = IBV_RX_HASH_SRC_IPV6 | IBV_RX_HASH_DST_IPV6,
4480 [TCP] = IBV_RX_HASH_SRC_PORT_TCP | IBV_RX_HASH_DST_PORT_TCP,
4481@@ -379,6 +379,9 @@ error:
4482 * Additional mlx4-specific constraints on supported fields:
4483 *
4484 * - No support for partial masks.
4485+ * - Due to HW/FW limitation, flow rule priority is not taken into account
4486+ * when matching UDP destination ports, doing is therefore only supported
4487+ * at the highest priority level (0).
4488 *
4489 * @param[in, out] flow
4490 * Flow rule handle to update.
4491@@ -410,6 +413,11 @@ mlx4_flow_merge_udp(struct rte_flow *flow,
4492 msg = "mlx4 does not support matching partial UDP fields";
4493 goto error;
4494 }
4495+ if (mask && mask->hdr.dst_port && flow->priority) {
4496+ msg = "combining UDP destination port matching with a nonzero"
4497+ " priority level is not supported";
4498+ goto error;
4499+ }
4500 if (!flow->ibv_attr)
4501 return 0;
4502 ++flow->ibv_attr->num_of_specs;
4503@@ -674,6 +682,7 @@ mlx4_flow_prepare(struct priv *priv,
4504 NULL, "only ingress is supported");
4505 fill:
4506 proc = mlx4_flow_proc_item_list;
4507+ flow->priority = attr->priority;
4508 /* Go over pattern. */
4509 for (item = pattern; item->type; ++item) {
4510 const struct mlx4_flow_proc_item *next = NULL;
4511@@ -839,11 +848,14 @@ fill:
4512 },
4513 };
4514
4515- if (!mlx4_zmallocv(__func__, vec, RTE_DIM(vec)))
4516+ if (!mlx4_zmallocv(__func__, vec, RTE_DIM(vec))) {
4517+ if (temp.rss)
4518+ mlx4_rss_put(temp.rss);
4519 return rte_flow_error_set
4520 (error, -rte_errno,
4521 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
4522 "flow rule handle allocation failure");
4523+ }
4524 /* Most fields will be updated by second pass. */
4525 *flow = (struct rte_flow){
4526 .ibv_attr = temp.ibv_attr,
4527@@ -1217,9 +1229,12 @@ mlx4_flow_internal_next_vlan(struct priv *priv, uint16_t vlan)
4528 *
4529 * Various flow rules are created depending on the mode the device is in:
4530 *
4531- * 1. Promiscuous: port MAC + catch-all (VLAN filtering is ignored).
4532- * 2. All multicast: port MAC/VLAN + catch-all multicast.
4533- * 3. Otherwise: port MAC/VLAN + broadcast MAC/VLAN.
4534+ * 1. Promiscuous:
4535+ * port MAC + broadcast + catch-all (VLAN filtering is ignored).
4536+ * 2. All multicast:
4537+ * port MAC/VLAN + broadcast + catch-all multicast.
4538+ * 3. Otherwise:
4539+ * port MAC/VLAN + broadcast MAC/VLAN.
4540 *
4541 * About MAC flow rules:
4542 *
4543@@ -1298,9 +1313,6 @@ mlx4_flow_internal(struct priv *priv, struct rte_flow_error *error)
4544 !priv->dev->data->promiscuous ?
4545 &vlan_spec.tci :
4546 NULL;
4547- int broadcast =
4548- !priv->dev->data->promiscuous &&
4549- !priv->dev->data->all_multicast;
4550 uint16_t vlan = 0;
4551 struct rte_flow *flow;
4552 unsigned int i;
4553@@ -1334,7 +1346,7 @@ next_vlan:
4554 rule_vlan = NULL;
4555 }
4556 }
4557- for (i = 0; i != RTE_DIM(priv->mac) + broadcast; ++i) {
4558+ for (i = 0; i != RTE_DIM(priv->mac) + 1; ++i) {
4559 const struct ether_addr *mac;
4560
4561 /* Broadcasts are handled by an extra iteration. */
4562@@ -1398,7 +1410,7 @@ next_vlan:
4563 goto next_vlan;
4564 }
4565 /* Take care of promiscuous and all multicast flow rules. */
4566- if (!broadcast) {
4567+ if (priv->dev->data->promiscuous || priv->dev->data->all_multicast) {
4568 for (flow = LIST_FIRST(&priv->flows);
4569 flow && flow->internal;
4570 flow = LIST_NEXT(flow, next)) {
4571diff --git a/drivers/net/mlx4/mlx4_flow.h b/drivers/net/mlx4/mlx4_flow.h
4572index 651fd37..5e1f9ea 100644
4573--- a/drivers/net/mlx4/mlx4_flow.h
4574+++ b/drivers/net/mlx4/mlx4_flow.h
4575@@ -70,6 +70,7 @@ struct rte_flow {
4576 uint32_t promisc:1; /**< This rule matches everything. */
4577 uint32_t allmulti:1; /**< This rule matches all multicast traffic. */
4578 uint32_t drop:1; /**< This rule drops packets. */
4579+ uint32_t priority; /**< Flow rule priority. */
4580 struct mlx4_rss *rss; /**< Rx target. */
4581 };
4582
4583diff --git a/drivers/net/mlx4/mlx4_intr.c b/drivers/net/mlx4/mlx4_intr.c
4584index 50d1976..2364cb2 100644
4585--- a/drivers/net/mlx4/mlx4_intr.c
4586+++ b/drivers/net/mlx4/mlx4_intr.c
4587@@ -291,7 +291,7 @@ mlx4_intr_uninstall(struct priv *priv)
4588 }
4589 rte_eal_alarm_cancel((void (*)(void *))mlx4_link_status_alarm, priv);
4590 priv->intr_alarm = 0;
4591- mlx4_rx_intr_vec_disable(priv);
4592+ mlx4_rxq_intr_disable(priv);
4593 rte_errno = err;
4594 return 0;
4595 }
4596@@ -313,8 +313,6 @@ mlx4_intr_install(struct priv *priv)
4597 int rc;
4598
4599 mlx4_intr_uninstall(priv);
4600- if (intr_conf->rxq && mlx4_rx_intr_vec_enable(priv) < 0)
4601- goto error;
4602 if (intr_conf->lsc | intr_conf->rmv) {
4603 priv->intr_handle.fd = priv->ctx->async_fd;
4604 rc = rte_intr_callback_register(&priv->intr_handle,
4605@@ -395,3 +393,40 @@ mlx4_rx_intr_enable(struct rte_eth_dev *dev, uint16_t idx)
4606 }
4607 return -ret;
4608 }
4609+
4610+/**
4611+ * Enable datapath interrupts.
4612+ *
4613+ * @param priv
4614+ * Pointer to private structure.
4615+ *
4616+ * @return
4617+ * 0 on success, negative errno value otherwise and rte_errno is set.
4618+ */
4619+int
4620+mlx4_rxq_intr_enable(struct priv *priv)
4621+{
4622+ const struct rte_intr_conf *const intr_conf =
4623+ &priv->dev->data->dev_conf.intr_conf;
4624+
4625+ if (intr_conf->rxq && mlx4_rx_intr_vec_enable(priv) < 0)
4626+ goto error;
4627+ return 0;
4628+error:
4629+ return -rte_errno;
4630+}
4631+
4632+/**
4633+ * Disable datapath interrupts, keeping other interrupts intact.
4634+ *
4635+ * @param priv
4636+ * Pointer to private structure.
4637+ */
4638+void
4639+mlx4_rxq_intr_disable(struct priv *priv)
4640+{
4641+ int err = rte_errno; /* Make sure rte_errno remains unchanged. */
4642+
4643+ mlx4_rx_intr_vec_disable(priv);
4644+ rte_errno = err;
4645+}
4646diff --git a/drivers/net/mlx4/mlx4_rxq.c b/drivers/net/mlx4/mlx4_rxq.c
4647index 53313c5..06030c2 100644
4648--- a/drivers/net/mlx4/mlx4_rxq.c
4649+++ b/drivers/net/mlx4/mlx4_rxq.c
4650@@ -363,6 +363,8 @@ mlx4_rss_init(struct priv *priv)
4651 unsigned int i;
4652 int ret;
4653
4654+ if (priv->rss_init)
4655+ return 0;
4656 /* Prepare range for RSS contexts before creating the first WQ. */
4657 ret = mlx4dv_set_context_attr(priv->ctx,
4658 MLX4DV_SET_CTX_ATTR_LOG_WQS_RANGE_SZ,
4659@@ -444,6 +446,7 @@ wq_num_check:
4660 }
4661 wq_num_prev = wq_num;
4662 }
4663+ priv->rss_init = 1;
4664 return 0;
4665 error:
4666 ERROR("cannot initialize common RSS resources (queue %u): %s: %s",
4667@@ -472,6 +475,8 @@ mlx4_rss_deinit(struct priv *priv)
4668 {
4669 unsigned int i;
4670
4671+ if (!priv->rss_init)
4672+ return;
4673 for (i = 0; i != priv->dev->data->nb_rx_queues; ++i) {
4674 struct rxq *rxq = priv->dev->data->rx_queues[i];
4675
4676@@ -480,6 +485,7 @@ mlx4_rss_deinit(struct priv *priv)
4677 mlx4_rxq_detach(rxq);
4678 }
4679 }
4680+ priv->rss_init = 0;
4681 }
4682
4683 /**
4684@@ -622,6 +628,7 @@ error:
4685 claim_zero(ibv_destroy_wq(wq));
4686 if (cq)
4687 claim_zero(ibv_destroy_cq(cq));
4688+ --rxq->usecnt;
4689 rte_errno = ret;
4690 ERROR("error while attaching Rx queue %p: %s: %s",
4691 (void *)rxq, msg, strerror(ret));
4692diff --git a/drivers/net/mlx4/mlx4_rxtx.c b/drivers/net/mlx4/mlx4_rxtx.c
4693index 92b6257..05c4892 100644
4694--- a/drivers/net/mlx4/mlx4_rxtx.c
4695+++ b/drivers/net/mlx4/mlx4_rxtx.c
4696@@ -961,7 +961,8 @@ mlx4_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
4697 /* Update packet information. */
4698 pkt->packet_type =
4699 rxq_cq_to_pkt_type(cqe, rxq->l2tun_offload);
4700- pkt->ol_flags = 0;
4701+ pkt->ol_flags = PKT_RX_RSS_HASH;
4702+ pkt->hash.rss = cqe->immed_rss_invalid;
4703 pkt->pkt_len = len;
4704 if (rxq->csum | rxq->csum_l2tun) {
4705 uint32_t flags =
4706diff --git a/drivers/net/mlx4/mlx4_rxtx.h b/drivers/net/mlx4/mlx4_rxtx.h
4707index 463df2b..5a390e8 100644
4708--- a/drivers/net/mlx4/mlx4_rxtx.h
4709+++ b/drivers/net/mlx4/mlx4_rxtx.h
4710@@ -107,7 +107,7 @@ struct txq_elt {
4711 struct rte_mbuf *buf; /**< Buffer. */
4712 };
4713
4714-/** Rx queue counters. */
4715+/** Tx queue counters. */
4716 struct mlx4_txq_stats {
4717 unsigned int idx; /**< Mapping index. */
4718 uint64_t opackets; /**< Total of successfully sent packets. */
4719diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
4720index 45e0e8d..10ce335 100644
4721--- a/drivers/net/mlx5/mlx5.c
4722+++ b/drivers/net/mlx5/mlx5.c
4723@@ -39,6 +39,7 @@
4724 #include <stdlib.h>
4725 #include <errno.h>
4726 #include <net/if.h>
4727+#include <sys/mman.h>
4728
4729 /* Verbs header. */
4730 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
4731@@ -56,6 +57,7 @@
4732 #include <rte_pci.h>
4733 #include <rte_bus_pci.h>
4734 #include <rte_common.h>
4735+#include <rte_eal_memconfig.h>
4736 #include <rte_kvargs.h>
4737
4738 #include "mlx5.h"
4739@@ -117,6 +119,10 @@ struct mlx5_args {
4740 int tx_vec_en;
4741 int rx_vec_en;
4742 };
4743+
4744+/** Driver-specific log messages type. */
4745+int mlx5_logtype;
4746+
4747 /**
4748 * Retrieve integer value from environment variable.
4749 *
4750@@ -148,7 +154,7 @@ mlx5_getenv_int(const char *name)
4751 * A pointer to the callback data.
4752 *
4753 * @return
4754- * a pointer to the allocate space.
4755+ * Allocated buffer, NULL otherwise and rte_errno is set.
4756 */
4757 static void *
4758 mlx5_alloc_verbs_buf(size_t size, void *data)
4759@@ -156,11 +162,22 @@ mlx5_alloc_verbs_buf(size_t size, void *data)
4760 struct priv *priv = data;
4761 void *ret;
4762 size_t alignment = sysconf(_SC_PAGESIZE);
4763+ unsigned int socket = SOCKET_ID_ANY;
4764+
4765+ if (priv->verbs_alloc_ctx.type == MLX5_VERBS_ALLOC_TYPE_TX_QUEUE) {
4766+ const struct mlx5_txq_ctrl *ctrl = priv->verbs_alloc_ctx.obj;
4767+
4768+ socket = ctrl->socket;
4769+ } else if (priv->verbs_alloc_ctx.type ==
4770+ MLX5_VERBS_ALLOC_TYPE_RX_QUEUE) {
4771+ const struct mlx5_rxq_ctrl *ctrl = priv->verbs_alloc_ctx.obj;
4772
4773+ socket = ctrl->socket;
4774+ }
4775 assert(data != NULL);
4776- ret = rte_malloc_socket(__func__, size, alignment,
4777- priv->dev->device->numa_node);
4778- DEBUG("Extern alloc size: %lu, align: %lu: %p", size, alignment, ret);
4779+ ret = rte_malloc_socket(__func__, size, alignment, socket);
4780+ if (!ret && size)
4781+ rte_errno = ENOMEM;
4782 return ret;
4783 }
4784
4785@@ -176,7 +193,6 @@ static void
4786 mlx5_free_verbs_buf(void *ptr, void *data __rte_unused)
4787 {
4788 assert(data != NULL);
4789- DEBUG("Extern free request: %p", ptr);
4790 rte_free(ptr);
4791 }
4792
4793@@ -191,17 +207,16 @@ mlx5_free_verbs_buf(void *ptr, void *data __rte_unused)
4794 static void
4795 mlx5_dev_close(struct rte_eth_dev *dev)
4796 {
4797- struct priv *priv = mlx5_get_priv(dev);
4798+ struct priv *priv = dev->data->dev_private;
4799 unsigned int i;
4800 int ret;
4801
4802- priv_lock(priv);
4803- DEBUG("%p: closing device \"%s\"",
4804- (void *)dev,
4805- ((priv->ctx != NULL) ? priv->ctx->device->name : ""));
4806+ DRV_LOG(DEBUG, "port %u closing device \"%s\"",
4807+ dev->data->port_id,
4808+ ((priv->ctx != NULL) ? priv->ctx->device->name : ""));
4809 /* In case mlx5_dev_stop() has not been called. */
4810- priv_dev_interrupt_handler_uninstall(priv, dev);
4811- priv_dev_traffic_disable(priv, dev);
4812+ mlx5_dev_interrupt_handler_uninstall(dev);
4813+ mlx5_traffic_disable(dev);
4814 /* Prevent crashes when queues are still in use. */
4815 dev->rx_pkt_burst = removed_rx_burst;
4816 dev->tx_pkt_burst = removed_tx_burst;
4817@@ -209,7 +224,7 @@ mlx5_dev_close(struct rte_eth_dev *dev)
4818 /* XXX race condition if mlx5_rx_burst() is still running. */
4819 usleep(1000);
4820 for (i = 0; (i != priv->rxqs_n); ++i)
4821- mlx5_priv_rxq_release(priv, i);
4822+ mlx5_rxq_release(dev, i);
4823 priv->rxqs_n = 0;
4824 priv->rxqs = NULL;
4825 }
4826@@ -217,7 +232,7 @@ mlx5_dev_close(struct rte_eth_dev *dev)
4827 /* XXX race condition if mlx5_tx_burst() is still running. */
4828 usleep(1000);
4829 for (i = 0; (i != priv->txqs_n); ++i)
4830- mlx5_priv_txq_release(priv, i);
4831+ mlx5_txq_release(dev, i);
4832 priv->txqs_n = 0;
4833 priv->txqs = NULL;
4834 }
4835@@ -231,32 +246,40 @@ mlx5_dev_close(struct rte_eth_dev *dev)
4836 rte_free(priv->rss_conf.rss_key);
4837 if (priv->reta_idx != NULL)
4838 rte_free(priv->reta_idx);
4839- priv_socket_uninit(priv);
4840- ret = mlx5_priv_hrxq_ibv_verify(priv);
4841+ if (priv->primary_socket)
4842+ mlx5_socket_uninit(dev);
4843+ ret = mlx5_hrxq_ibv_verify(dev);
4844 if (ret)
4845- WARN("%p: some Hash Rx queue still remain", (void *)priv);
4846- ret = mlx5_priv_ind_table_ibv_verify(priv);
4847+ DRV_LOG(WARNING, "port %u some hash Rx queue still remain",
4848+ dev->data->port_id);
4849+ ret = mlx5_ind_table_ibv_verify(dev);
4850 if (ret)
4851- WARN("%p: some Indirection table still remain", (void *)priv);
4852- ret = mlx5_priv_rxq_ibv_verify(priv);
4853+ DRV_LOG(WARNING, "port %u some indirection table still remain",
4854+ dev->data->port_id);
4855+ ret = mlx5_rxq_ibv_verify(dev);
4856 if (ret)
4857- WARN("%p: some Verbs Rx queue still remain", (void *)priv);
4858- ret = mlx5_priv_rxq_verify(priv);
4859+ DRV_LOG(WARNING, "port %u some Verbs Rx queue still remain",
4860+ dev->data->port_id);
4861+ ret = mlx5_rxq_verify(dev);
4862 if (ret)
4863- WARN("%p: some Rx Queues still remain", (void *)priv);
4864- ret = mlx5_priv_txq_ibv_verify(priv);
4865+ DRV_LOG(WARNING, "port %u some Rx queues still remain",
4866+ dev->data->port_id);
4867+ ret = mlx5_txq_ibv_verify(dev);
4868 if (ret)
4869- WARN("%p: some Verbs Tx queue still remain", (void *)priv);
4870- ret = mlx5_priv_txq_verify(priv);
4871+ DRV_LOG(WARNING, "port %u some Verbs Tx queue still remain",
4872+ dev->data->port_id);
4873+ ret = mlx5_txq_verify(dev);
4874 if (ret)
4875- WARN("%p: some Tx Queues still remain", (void *)priv);
4876- ret = priv_flow_verify(priv);
4877+ DRV_LOG(WARNING, "port %u some Tx queues still remain",
4878+ dev->data->port_id);
4879+ ret = mlx5_flow_verify(dev);
4880 if (ret)
4881- WARN("%p: some flows still remain", (void *)priv);
4882- ret = priv_mr_verify(priv);
4883+ DRV_LOG(WARNING, "port %u some flows still remain",
4884+ dev->data->port_id);
4885+ ret = mlx5_mr_verify(dev);
4886 if (ret)
4887- WARN("%p: some Memory Region still remain", (void *)priv);
4888- priv_unlock(priv);
4889+ DRV_LOG(WARNING, "port %u some memory region still remain",
4890+ dev->data->port_id);
4891 memset(priv, 0, sizeof(*priv));
4892 }
4893
4894@@ -394,7 +417,7 @@ mlx5_dev_idx(struct rte_pci_addr *pci_addr)
4895 * User data.
4896 *
4897 * @return
4898- * 0 on success, negative errno value on failure.
4899+ * 0 on success, a negative errno value otherwise and rte_errno is set.
4900 */
4901 static int
4902 mlx5_args_check(const char *key, const char *val, void *opaque)
4903@@ -405,8 +428,9 @@ mlx5_args_check(const char *key, const char *val, void *opaque)
4904 errno = 0;
4905 tmp = strtoul(val, NULL, 0);
4906 if (errno) {
4907- WARN("%s: \"%s\" is not a valid integer", key, val);
4908- return errno;
4909+ rte_errno = errno;
4910+ DRV_LOG(WARNING, "%s: \"%s\" is not a valid integer", key, val);
4911+ return -rte_errno;
4912 }
4913 if (strcmp(MLX5_RXQ_CQE_COMP_EN, key) == 0) {
4914 args->cqe_comp = !!tmp;
4915@@ -427,8 +451,9 @@ mlx5_args_check(const char *key, const char *val, void *opaque)
4916 } else if (strcmp(MLX5_RX_VEC_EN, key) == 0) {
4917 args->rx_vec_en = !!tmp;
4918 } else {
4919- WARN("%s: unknown parameter", key);
4920- return -EINVAL;
4921+ DRV_LOG(WARNING, "%s: unknown parameter", key);
4922+ rte_errno = EINVAL;
4923+ return -rte_errno;
4924 }
4925 return 0;
4926 }
4927@@ -442,7 +467,7 @@ mlx5_args_check(const char *key, const char *val, void *opaque)
4928 * Device arguments structure.
4929 *
4930 * @return
4931- * 0 on success, errno value on failure.
4932+ * 0 on success, a negative errno value otherwise and rte_errno is set.
4933 */
4934 static int
4935 mlx5_args(struct mlx5_args *args, struct rte_devargs *devargs)
4936@@ -474,9 +499,10 @@ mlx5_args(struct mlx5_args *args, struct rte_devargs *devargs)
4937 if (rte_kvargs_count(kvlist, params[i])) {
4938 ret = rte_kvargs_process(kvlist, params[i],
4939 mlx5_args_check, args);
4940- if (ret != 0) {
4941+ if (ret) {
4942+ rte_errno = EINVAL;
4943 rte_kvargs_free(kvlist);
4944- return ret;
4945+ return -rte_errno;
4946 }
4947 }
4948 }
4949@@ -486,6 +512,112 @@ mlx5_args(struct mlx5_args *args, struct rte_devargs *devargs)
4950
4951 static struct rte_pci_driver mlx5_driver;
4952
4953+/*
4954+ * Reserved UAR address space for TXQ UAR(hw doorbell) mapping, process
4955+ * local resource used by both primary and secondary to avoid duplicate
4956+ * reservation.
4957+ * The space has to be available on both primary and secondary process,
4958+ * TXQ UAR maps to this area using fixed mmap w/o double check.
4959+ */
4960+static void *uar_base;
4961+
4962+/**
4963+ * Reserve UAR address space for primary process.
4964+ *
4965+ * @param[in] dev
4966+ * Pointer to Ethernet device.
4967+ *
4968+ * @return
4969+ * 0 on success, a negative errno value otherwise and rte_errno is set.
4970+ */
4971+static int
4972+mlx5_uar_init_primary(struct rte_eth_dev *dev)
4973+{
4974+ struct priv *priv = dev->data->dev_private;
4975+ void *addr = (void *)0;
4976+ int i;
4977+ const struct rte_mem_config *mcfg;
4978+
4979+ if (uar_base) { /* UAR address space mapped. */
4980+ priv->uar_base = uar_base;
4981+ return 0;
4982+ }
4983+ /* find out lower bound of hugepage segments */
4984+ mcfg = rte_eal_get_configuration()->mem_config;
4985+ for (i = 0; i < RTE_MAX_MEMSEG && mcfg->memseg[i].addr; i++) {
4986+ if (addr)
4987+ addr = RTE_MIN(addr, mcfg->memseg[i].addr);
4988+ else
4989+ addr = mcfg->memseg[i].addr;
4990+ }
4991+ /* keep distance to hugepages to minimize potential conflicts. */
4992+ addr = RTE_PTR_SUB(addr, MLX5_UAR_OFFSET + MLX5_UAR_SIZE);
4993+ /* anonymous mmap, no real memory consumption. */
4994+ addr = mmap(addr, MLX5_UAR_SIZE,
4995+ PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
4996+ if (addr == MAP_FAILED) {
4997+ DRV_LOG(ERR,
4998+ "port %u failed to reserve UAR address space, please"
4999+ " adjust MLX5_UAR_SIZE or try --base-virtaddr",
5000+ dev->data->port_id);
The diff has been truncated for viewing.

Subscribers

People subscribed via source and target branches