From 3f05da15fed490fd199b2feac554feb7f7baa7a2 Mon Sep 17 00:00:00 2001 From: Alfredo Moralejo Date: Apr 24 2024 11:03:10 +0000 Subject: Import openvswitch3.2-3.2.0-63.el9fdp.src.rpm from FDP --- diff --git a/SOURCES/openvswitch-3.2.0.patch b/SOURCES/openvswitch-3.2.0.patch index c88769e..67e6ee4 100644 --- a/SOURCES/openvswitch-3.2.0.patch +++ b/SOURCES/openvswitch-3.2.0.patch @@ -28,7 +28,7 @@ index 48931fa085..d8a9722809 100644 memory: 4G diff --git a/.github/workflows/build-and-test.yml b/.github/workflows/build-and-test.yml -index 47d239f108..b50c42de6f 100644 +index 47d239f108..8d4815b362 100644 --- a/.github/workflows/build-and-test.yml +++ b/.github/workflows/build-and-test.yml @@ -8,16 +8,16 @@ jobs: @@ -118,7 +118,22 @@ index 47d239f108..b50c42de6f 100644 with: path: dpdk-dir key: ${{ needs.build-dpdk.outputs.dpdk_key }} -@@ -200,9 +206,9 @@ jobs: +@@ -191,6 +197,14 @@ jobs: + if: matrix.m32 != '' + run: sudo apt install -y gcc-multilib + ++ - name: Reduce ASLR entropy ++ if: matrix.asan != '' || matrix.ubsan != '' ++ # Asan in llvm 14 provided in ubuntu-22.04 is incompatible with ++ # high-entropy ASLR configured in much newer kernels that GitHub ++ # runners are using leading to random crashes: ++ # https://github.com/actions/runner-images/issues/9491 ++ run: sudo sysctl -w vm.mmap_rnd_bits=28 ++ + - name: prepare + run: ./.ci/linux-prepare.sh + +@@ -200,9 +214,9 @@ jobs: - name: copy logs on failure if: failure() || cancelled() run: | @@ -130,7 +145,7 @@ index 47d239f108..b50c42de6f 100644 # So, we're just archiving everything here to avoid any issues. mkdir logs cp config.log ./logs/ -@@ -211,7 +217,7 @@ jobs: +@@ -211,7 +225,7 @@ jobs: - name: upload logs on failure if: failure() || cancelled() @@ -139,7 +154,7 @@ index 47d239f108..b50c42de6f 100644 with: name: logs-linux-${{ join(matrix.*, '-') }} path: logs.tgz -@@ -230,13 +236,13 @@ jobs: +@@ -230,13 +244,13 @@ jobs: steps: - name: checkout @@ -155,7 +170,7 @@ index 47d239f108..b50c42de6f 100644 with: python-version: '3.9' - name: install dependencies -@@ -247,7 +253,7 @@ jobs: +@@ -247,7 +261,7 @@ jobs: run: ./.ci/osx-build.sh - name: upload logs on failure if: failure() @@ -164,7 +179,7 @@ index 47d239f108..b50c42de6f 100644 with: name: logs-osx-clang---disable-ssl path: config.log -@@ -271,7 +277,7 @@ jobs: +@@ -271,7 +285,7 @@ jobs: steps: - name: checkout @@ -173,7 +188,7 @@ index 47d239f108..b50c42de6f 100644 - name: update PATH run: | -@@ -293,7 +299,7 @@ jobs: +@@ -293,7 +307,7 @@ jobs: run: ./.ci/linux-build.sh - name: upload deb packages @@ -182,7 +197,7 @@ index 47d239f108..b50c42de6f 100644 with: name: deb-packages-${{ matrix.dpdk }}-dpdk path: '/home/runner/work/ovs/*.deb' -@@ -301,7 +307,7 @@ jobs: +@@ -301,7 +315,7 @@ jobs: build-linux-rpm: name: linux rpm fedora runs-on: ubuntu-latest @@ -191,7 +206,7 @@ index 47d239f108..b50c42de6f 100644 timeout-minutes: 30 strategy: -@@ -309,7 +315,7 @@ jobs: +@@ -309,7 +323,7 @@ jobs: steps: - name: checkout @@ -200,7 +215,7 @@ index 47d239f108..b50c42de6f 100644 - name: install dependencies run: | dnf install -y rpm-build dnf-plugins-core -@@ -328,7 +334,7 @@ jobs: +@@ -328,7 +342,7 @@ jobs: run: dnf install -y rpm/rpmbuild/RPMS/*/*.rpm - name: upload rpm packages @@ -240,6 +255,18 @@ index 0000000000..7d505150ec +python: + install: + - requirements: Documentation/requirements.txt +diff --git a/AUTHORS.rst b/AUTHORS.rst +index 9186e1ad22..f58bf1c853 100644 +--- a/AUTHORS.rst ++++ b/AUTHORS.rst +@@ -578,6 +578,7 @@ David Evans davidjoshuaevans@gmail.com + David Palma palma@onesource.pt + David van Moolenbroek dvmoolenbroek@aimvalley.nl + Derek Cormier derek.cormier@lab.ntt.co.jp ++Derrick Lim derrick.lim@rakuten.com + Dhaval Badiani dbadiani@vmware.com + DK Moon + Ding Zhi zhi.ding@6wind.com diff --git a/Documentation/faq/releases.rst b/Documentation/faq/releases.rst index e6bda14e7b..f47d408369 100644 --- a/Documentation/faq/releases.rst @@ -1518,6 +1545,977 @@ index 9ccafd6d47..a2506da5ff 100644 * * To use it, insert the following code to where backtrace is * desired: +diff --git a/lib/bfd.c b/lib/bfd.c +index 9698576d07..b8149e7897 100644 +--- a/lib/bfd.c ++++ b/lib/bfd.c +@@ -586,7 +586,6 @@ bfd_put_packet(struct bfd *bfd, struct dp_packet *p, + { + long long int min_tx, min_rx; + struct udp_header *udp; +- struct eth_header *eth; + struct ip_header *ip; + struct msg *msg; + +@@ -605,15 +604,13 @@ bfd_put_packet(struct bfd *bfd, struct dp_packet *p, + * set. */ + ovs_assert(!(bfd->flags & FLAG_POLL) || !(bfd->flags & FLAG_FINAL)); + +- dp_packet_reserve(p, 2); /* Properly align after the ethernet header. */ +- eth = dp_packet_put_uninit(p, sizeof *eth); +- eth->eth_src = eth_addr_is_zero(bfd->local_eth_src) +- ? eth_src : bfd->local_eth_src; +- eth->eth_dst = eth_addr_is_zero(bfd->local_eth_dst) +- ? eth_addr_bfd : bfd->local_eth_dst; +- eth->eth_type = htons(ETH_TYPE_IP); ++ ip = eth_compose(p, ++ eth_addr_is_zero(bfd->local_eth_dst) ++ ? eth_addr_bfd : bfd->local_eth_dst, ++ eth_addr_is_zero(bfd->local_eth_src) ++ ? eth_src : bfd->local_eth_src, ++ ETH_TYPE_IP, sizeof *ip + sizeof *udp + sizeof *msg); + +- ip = dp_packet_put_zeros(p, sizeof *ip); + ip->ip_ihl_ver = IP_IHL_VER(5, 4); + ip->ip_tot_len = htons(sizeof *ip + sizeof *udp + sizeof *msg); + ip->ip_ttl = MAXTTL; +@@ -621,15 +618,17 @@ bfd_put_packet(struct bfd *bfd, struct dp_packet *p, + ip->ip_proto = IPPROTO_UDP; + put_16aligned_be32(&ip->ip_src, bfd->ip_src); + put_16aligned_be32(&ip->ip_dst, bfd->ip_dst); +- /* Checksum has already been zeroed by put_zeros call. */ ++ /* Checksum has already been zeroed by eth_compose call. */ + ip->ip_csum = csum(ip, sizeof *ip); ++ dp_packet_set_l4(p, ip + 1); + +- udp = dp_packet_put_zeros(p, sizeof *udp); ++ udp = dp_packet_l4(p); + udp->udp_src = htons(bfd->udp_src); + udp->udp_dst = htons(BFD_DEST_PORT); + udp->udp_len = htons(sizeof *udp + sizeof *msg); ++ /* Checksum already zero from eth_compose. */ + +- msg = dp_packet_put_uninit(p, sizeof *msg); ++ msg = (struct msg *)(udp + 1); + msg->vers_diag = (BFD_VERSION << 5) | bfd->diag; + msg->flags = (bfd->state & STATE_MASK) | bfd->flags; + +@@ -1131,10 +1130,11 @@ bfd_set_state(struct bfd *bfd, enum state state, enum diag diag) + if (!VLOG_DROP_INFO(&rl)) { + struct ds ds = DS_EMPTY_INITIALIZER; + +- ds_put_format(&ds, "%s: BFD state change: %s->%s" +- " \"%s\"->\"%s\".\n", ++ ds_put_format(&ds, "%s: BFD state change: (bfd.SessionState: %s," ++ " bfd.LocalDiag: \"%s\") -> (bfd.SessionState: %s," ++ " bfd.LocalDiag: \"%s\")\n", + bfd->name, bfd_state_str(bfd->state), +- bfd_state_str(state), bfd_diag_str(bfd->diag), ++ bfd_diag_str(bfd->diag), bfd_state_str(state), + bfd_diag_str(diag)); + bfd_put_details(&ds, bfd); + VLOG_INFO("%s", ds_cstr(&ds)); +diff --git a/lib/conntrack-private.h b/lib/conntrack-private.h +index bb326868e9..3fd5fccd3e 100644 +--- a/lib/conntrack-private.h ++++ b/lib/conntrack-private.h +@@ -49,6 +49,12 @@ struct ct_endpoint { + * hashing in ct_endpoint_hash_add(). */ + BUILD_ASSERT_DECL(sizeof(struct ct_endpoint) == sizeof(union ct_addr) + 4); + ++enum key_dir { ++ CT_DIR_FWD = 0, ++ CT_DIR_REV, ++ CT_DIRS, ++}; ++ + /* Changes to this structure need to be reflected in conn_key_hash() + * and conn_key_cmp(). */ + struct conn_key { +@@ -112,20 +118,18 @@ enum ct_timeout { + + #define N_EXP_LISTS 100 + +-enum OVS_PACKED_ENUM ct_conn_type { +- CT_CONN_TYPE_DEFAULT, +- CT_CONN_TYPE_UN_NAT, ++struct conn_key_node { ++ enum key_dir dir; ++ struct conn_key key; ++ struct cmap_node cm_node; + }; + + struct conn { + /* Immutable data. */ +- struct conn_key key; +- struct conn_key rev_key; ++ struct conn_key_node key_node[CT_DIRS]; + struct conn_key parent_key; /* Only used for orig_tuple support. */ +- struct cmap_node cm_node; + uint16_t nat_action; + char *alg; +- struct conn *nat_conn; /* The NAT 'conn' context, if there is one. */ + atomic_flag reclaimed; /* False during the lifetime of the connection, + * True as soon as a thread has started freeing + * its memory. */ +@@ -150,7 +154,6 @@ struct conn { + + /* Immutable data. */ + bool alg_related; /* True if alg data connection. */ +- enum ct_conn_type conn_type; + + uint32_t tp_id; /* Timeout policy ID. */ + }; +diff --git a/lib/conntrack-tp.c b/lib/conntrack-tp.c +index 89cb2704a6..2149fdc73a 100644 +--- a/lib/conntrack-tp.c ++++ b/lib/conntrack-tp.c +@@ -253,7 +253,8 @@ conn_update_expiration(struct conntrack *ct, struct conn *conn, + } + VLOG_DBG_RL(&rl, "Update timeout %s zone=%u with policy id=%d " + "val=%u sec.", +- ct_timeout_str[tm], conn->key.zone, conn->tp_id, val); ++ ct_timeout_str[tm], conn->key_node[CT_DIR_FWD].key.zone, ++ conn->tp_id, val); + + atomic_store_relaxed(&conn->expiration, now + val * 1000); + } +@@ -273,7 +274,8 @@ conn_init_expiration(struct conntrack *ct, struct conn *conn, + } + + VLOG_DBG_RL(&rl, "Init timeout %s zone=%u with policy id=%d val=%u sec.", +- ct_timeout_str[tm], conn->key.zone, conn->tp_id, val); ++ ct_timeout_str[tm], conn->key_node[CT_DIR_FWD].key.zone, ++ conn->tp_id, val); + + conn->expiration = now + val * 1000; + } +diff --git a/lib/conntrack.c b/lib/conntrack.c +index 5f1176d333..592bbaa3e1 100644 +--- a/lib/conntrack.c ++++ b/lib/conntrack.c +@@ -103,7 +103,7 @@ static enum ct_update_res conn_update(struct conntrack *ct, struct conn *conn, + struct conn_lookup_ctx *ctx, + long long now); + static long long int conn_expiration(const struct conn *); +-static bool conn_expired(struct conn *, long long now); ++static bool conn_expired(const struct conn *, long long now); + static void conn_expire_push_front(struct conntrack *ct, struct conn *conn); + static void set_mark(struct dp_packet *, struct conn *, + uint32_t val, uint32_t mask); +@@ -113,8 +113,7 @@ static void set_label(struct dp_packet *, struct conn *, + static void *clean_thread_main(void *f_); + + static bool +-nat_get_unique_tuple(struct conntrack *ct, const struct conn *conn, +- struct conn *nat_conn, ++nat_get_unique_tuple(struct conntrack *ct, struct conn *conn, + const struct nat_action_info_t *nat_info); + + static uint8_t +@@ -208,7 +207,7 @@ static alg_helper alg_helpers[] = { + #define ALG_WC_SRC_PORT 0 + + /* If the total number of connections goes above this value, no new connections +- * are accepted; this is for CT_CONN_TYPE_DEFAULT connections. */ ++ * are accepted. */ + #define DEFAULT_N_CONN_LIMIT 3000000 + + /* Does a member by member comparison of two conn_keys; this +@@ -234,61 +233,6 @@ conn_key_cmp(const struct conn_key *key1, const struct conn_key *key2) + return 1; + } + +-static void +-ct_print_conn_info(const struct conn *c, const char *log_msg, +- enum vlog_level vll, bool force, bool rl_on) +-{ +-#define CT_VLOG(RL_ON, LEVEL, ...) \ +- do { \ +- if (RL_ON) { \ +- static struct vlog_rate_limit rl_ = VLOG_RATE_LIMIT_INIT(5, 5); \ +- vlog_rate_limit(&this_module, LEVEL, &rl_, __VA_ARGS__); \ +- } else { \ +- vlog(&this_module, LEVEL, __VA_ARGS__); \ +- } \ +- } while (0) +- +- if (OVS_UNLIKELY(force || vlog_is_enabled(&this_module, vll))) { +- if (c->key.dl_type == htons(ETH_TYPE_IP)) { +- CT_VLOG(rl_on, vll, "%s: src ip "IP_FMT" dst ip "IP_FMT" rev src " +- "ip "IP_FMT" rev dst ip "IP_FMT" src/dst ports " +- "%"PRIu16"/%"PRIu16" rev src/dst ports " +- "%"PRIu16"/%"PRIu16" zone/rev zone " +- "%"PRIu16"/%"PRIu16" nw_proto/rev nw_proto " +- "%"PRIu8"/%"PRIu8, log_msg, +- IP_ARGS(c->key.src.addr.ipv4), +- IP_ARGS(c->key.dst.addr.ipv4), +- IP_ARGS(c->rev_key.src.addr.ipv4), +- IP_ARGS(c->rev_key.dst.addr.ipv4), +- ntohs(c->key.src.port), ntohs(c->key.dst.port), +- ntohs(c->rev_key.src.port), ntohs(c->rev_key.dst.port), +- c->key.zone, c->rev_key.zone, c->key.nw_proto, +- c->rev_key.nw_proto); +- } else { +- char ip6_s[INET6_ADDRSTRLEN]; +- inet_ntop(AF_INET6, &c->key.src.addr.ipv6, ip6_s, sizeof ip6_s); +- char ip6_d[INET6_ADDRSTRLEN]; +- inet_ntop(AF_INET6, &c->key.dst.addr.ipv6, ip6_d, sizeof ip6_d); +- char ip6_rs[INET6_ADDRSTRLEN]; +- inet_ntop(AF_INET6, &c->rev_key.src.addr.ipv6, ip6_rs, +- sizeof ip6_rs); +- char ip6_rd[INET6_ADDRSTRLEN]; +- inet_ntop(AF_INET6, &c->rev_key.dst.addr.ipv6, ip6_rd, +- sizeof ip6_rd); +- +- CT_VLOG(rl_on, vll, "%s: src ip %s dst ip %s rev src ip %s" +- " rev dst ip %s src/dst ports %"PRIu16"/%"PRIu16 +- " rev src/dst ports %"PRIu16"/%"PRIu16" zone/rev zone " +- "%"PRIu16"/%"PRIu16" nw_proto/rev nw_proto " +- "%"PRIu8"/%"PRIu8, log_msg, ip6_s, ip6_d, ip6_rs, +- ip6_rd, ntohs(c->key.src.port), ntohs(c->key.dst.port), +- ntohs(c->rev_key.src.port), ntohs(c->rev_key.dst.port), +- c->key.zone, c->rev_key.zone, c->key.nw_proto, +- c->rev_key.nw_proto); +- } +- } +-} +- + /* Initializes the connection tracker 'ct'. The caller is responsible for + * calling 'conntrack_destroy()', when the instance is not needed anymore */ + struct conntrack * +@@ -477,28 +421,27 @@ conn_clean__(struct conntrack *ct, struct conn *conn) + uint32_t hash; + + if (conn->alg) { +- expectation_clean(ct, &conn->key); ++ expectation_clean(ct, &conn->key_node[CT_DIR_FWD].key); + } + +- hash = conn_key_hash(&conn->key, ct->hash_basis); +- cmap_remove(&ct->conns, &conn->cm_node, hash); ++ hash = conn_key_hash(&conn->key_node[CT_DIR_FWD].key, ct->hash_basis); ++ cmap_remove(&ct->conns, &conn->key_node[CT_DIR_FWD].cm_node, hash); + +- if (conn->nat_conn) { +- hash = conn_key_hash(&conn->nat_conn->key, ct->hash_basis); +- cmap_remove(&ct->conns, &conn->nat_conn->cm_node, hash); ++ if (conn->nat_action) { ++ hash = conn_key_hash(&conn->key_node[CT_DIR_REV].key, ++ ct->hash_basis); ++ cmap_remove(&ct->conns, &conn->key_node[CT_DIR_REV].cm_node, hash); + } + + rculist_remove(&conn->node); + } + +-/* Must be called with 'conn' of 'conn_type' CT_CONN_TYPE_DEFAULT. Also +- * removes the associated nat 'conn' from the lookup datastructures. */ ++/* Also removes the associated nat 'conn' from the lookup ++ datastructures. */ + static void + conn_clean(struct conntrack *ct, struct conn *conn) + OVS_EXCLUDED(conn->lock, ct->ct_lock) + { +- ovs_assert(conn->conn_type == CT_CONN_TYPE_DEFAULT); +- + if (atomic_flag_test_and_set(&conn->reclaimed)) { + return; + } +@@ -585,34 +528,39 @@ conn_key_lookup(struct conntrack *ct, const struct conn_key *key, + uint32_t hash, long long now, struct conn **conn_out, + bool *reply) + { +- struct conn *conn; ++ struct conn_key_node *keyn; ++ struct conn *conn = NULL; + bool found = false; + +- CMAP_FOR_EACH_WITH_HASH (conn, cm_node, hash, &ct->conns) { ++ CMAP_FOR_EACH_WITH_HASH (keyn, cm_node, hash, &ct->conns) { ++ if (keyn->dir == CT_DIR_FWD) { ++ conn = CONTAINER_OF(keyn, struct conn, key_node[CT_DIR_FWD]); ++ } else { ++ conn = CONTAINER_OF(keyn, struct conn, key_node[CT_DIR_REV]); ++ } ++ + if (conn_expired(conn, now)) { + continue; + } +- if (!conn_key_cmp(&conn->key, key)) { +- found = true; +- if (reply) { +- *reply = false; +- } +- break; +- } +- if (!conn_key_cmp(&conn->rev_key, key)) { +- found = true; +- if (reply) { +- *reply = true; ++ ++ for (int i = CT_DIR_FWD; i < CT_DIRS; i++) { ++ if (!conn_key_cmp(&conn->key_node[i].key, key)) { ++ found = true; ++ if (reply) { ++ *reply = (i == CT_DIR_REV); ++ } ++ goto out_found; + } +- break; + } + } + ++out_found: + if (found && conn_out) { + *conn_out = conn; + } else if (conn_out) { + *conn_out = NULL; + } ++ + return found; + } + +@@ -646,7 +594,7 @@ write_ct_md(struct dp_packet *pkt, uint16_t zone, const struct conn *conn, + if (conn->alg_related) { + key = &conn->parent_key; + } else { +- key = &conn->key; ++ key = &conn->key_node[CT_DIR_FWD].key; + } + } else if (alg_exp) { + pkt->md.ct_mark = alg_exp->parent_mark; +@@ -877,7 +825,8 @@ nat_inner_packet(struct dp_packet *pkt, struct conn_key *key, + static void + nat_packet(struct dp_packet *pkt, struct conn *conn, bool reply, bool related) + { +- struct conn_key *key = reply ? &conn->key : &conn->rev_key; ++ enum key_dir dir = reply ? CT_DIR_FWD : CT_DIR_REV; ++ struct conn_key *key = &conn->key_node[dir].key; + uint16_t nat_action = reply ? nat_action_reverse(conn->nat_action) + : conn->nat_action; + +@@ -911,7 +860,7 @@ conn_seq_skew_set(struct conntrack *ct, const struct conn *conn_in, + { + struct conn *conn; + +- conn_lookup(ct, &conn_in->key, now, &conn, NULL); ++ conn_lookup(ct, &conn_in->key_node[CT_DIR_FWD].key, now, &conn, NULL); + if (conn && seq_skew) { + conn->seq_skew = seq_skew; + conn->seq_skew_dir = seq_skew_dir; +@@ -947,7 +896,6 @@ conn_not_found(struct conntrack *ct, struct dp_packet *pkt, + OVS_REQUIRES(ct->ct_lock) + { + struct conn *nc = NULL; +- struct conn *nat_conn = NULL; + + if (!valid_new(pkt, &ctx->key)) { + pkt->md.ct_state = CS_INVALID; +@@ -961,6 +909,7 @@ conn_not_found(struct conntrack *ct, struct dp_packet *pkt, + } + + if (commit) { ++ struct conn_key_node *fwd_key_node, *rev_key_node; + struct zone_limit *zl = zone_limit_lookup_or_default(ct, + ctx->key.zone); + if (zl && atomic_count_get(&zl->czl.count) >= zl->czl.limit) { +@@ -975,9 +924,12 @@ conn_not_found(struct conntrack *ct, struct dp_packet *pkt, + } + + nc = new_conn(ct, pkt, &ctx->key, now, tp_id); +- memcpy(&nc->key, &ctx->key, sizeof nc->key); +- memcpy(&nc->rev_key, &nc->key, sizeof nc->rev_key); +- conn_key_reverse(&nc->rev_key); ++ fwd_key_node = &nc->key_node[CT_DIR_FWD]; ++ rev_key_node = &nc->key_node[CT_DIR_REV]; ++ memcpy(&fwd_key_node->key, &ctx->key, sizeof fwd_key_node->key); ++ memcpy(&rev_key_node->key, &fwd_key_node->key, ++ sizeof rev_key_node->key); ++ conn_key_reverse(&rev_key_node->key); + + if (ct_verify_helper(helper, ct_alg_ctl)) { + nc->alg = nullable_xstrdup(helper); +@@ -992,46 +944,33 @@ conn_not_found(struct conntrack *ct, struct dp_packet *pkt, + + if (nat_action_info) { + nc->nat_action = nat_action_info->nat_action; +- nat_conn = xzalloc(sizeof *nat_conn); + + if (alg_exp) { + if (alg_exp->nat_rpl_dst) { +- nc->rev_key.dst.addr = alg_exp->alg_nat_repl_addr; ++ rev_key_node->key.dst.addr = alg_exp->alg_nat_repl_addr; + nc->nat_action = NAT_ACTION_SRC; + } else { +- nc->rev_key.src.addr = alg_exp->alg_nat_repl_addr; ++ rev_key_node->key.src.addr = alg_exp->alg_nat_repl_addr; + nc->nat_action = NAT_ACTION_DST; + } + } else { +- memcpy(nat_conn, nc, sizeof *nat_conn); +- bool nat_res = nat_get_unique_tuple(ct, nc, nat_conn, +- nat_action_info); +- ++ bool nat_res = nat_get_unique_tuple(ct, nc, nat_action_info); + if (!nat_res) { + goto nat_res_exhaustion; + } +- +- /* Update nc with nat adjustments made to nat_conn by +- * nat_get_unique_tuple(). */ +- memcpy(nc, nat_conn, sizeof *nc); + } + + nat_packet(pkt, nc, false, ctx->icmp_related); +- memcpy(&nat_conn->key, &nc->rev_key, sizeof nat_conn->key); +- memcpy(&nat_conn->rev_key, &nc->key, sizeof nat_conn->rev_key); +- nat_conn->conn_type = CT_CONN_TYPE_UN_NAT; +- nat_conn->nat_action = 0; +- nat_conn->alg = NULL; +- nat_conn->nat_conn = NULL; +- uint32_t nat_hash = conn_key_hash(&nat_conn->key, ct->hash_basis); +- cmap_insert(&ct->conns, &nat_conn->cm_node, nat_hash); ++ uint32_t rev_hash = conn_key_hash(&rev_key_node->key, ++ ct->hash_basis); ++ cmap_insert(&ct->conns, &rev_key_node->cm_node, rev_hash); + } + +- nc->nat_conn = nat_conn; + ovs_mutex_init_adaptive(&nc->lock); +- nc->conn_type = CT_CONN_TYPE_DEFAULT; + atomic_flag_clear(&nc->reclaimed); +- cmap_insert(&ct->conns, &nc->cm_node, ctx->hash); ++ fwd_key_node->dir = CT_DIR_FWD; ++ rev_key_node->dir = CT_DIR_REV; ++ cmap_insert(&ct->conns, &fwd_key_node->cm_node, ctx->hash); + conn_expire_push_front(ct, nc); + atomic_count_inc(&ct->n_conn); + ctx->conn = nc; /* For completeness. */ +@@ -1052,7 +991,6 @@ conn_not_found(struct conntrack *ct, struct dp_packet *pkt, + * firewall rules or a separate firewall. Also using zone partitioning + * can limit DoS impact. */ + nat_res_exhaustion: +- free(nat_conn); + delete_conn__(nc); + static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 5); + VLOG_WARN_RL(&rl, "Unable to NAT due to tuple space exhaustion - " +@@ -1065,7 +1003,6 @@ conn_update_state(struct conntrack *ct, struct dp_packet *pkt, + struct conn_lookup_ctx *ctx, struct conn *conn, + long long now) + { +- ovs_assert(conn->conn_type == CT_CONN_TYPE_DEFAULT); + bool create_new_conn = false; + + if (ctx->icmp_related) { +@@ -1092,7 +1029,8 @@ conn_update_state(struct conntrack *ct, struct dp_packet *pkt, + pkt->md.ct_state = CS_INVALID; + break; + case CT_UPDATE_NEW: +- if (conn_lookup(ct, &conn->key, now, NULL, NULL)) { ++ if (conn_lookup(ct, &conn->key_node[CT_DIR_FWD].key, ++ now, NULL, NULL)) { + conn_force_expire(conn); + } + create_new_conn = true; +@@ -1268,8 +1206,10 @@ initial_conn_lookup(struct conntrack *ct, struct conn_lookup_ctx *ctx, + + if (natted) { + if (OVS_LIKELY(ctx->conn)) { ++ enum key_dir dir; + ctx->reply = !ctx->reply; +- ctx->key = ctx->reply ? ctx->conn->rev_key : ctx->conn->key; ++ dir = ctx->reply ? CT_DIR_REV : CT_DIR_FWD; ++ ctx->key = ctx->conn->key_node[dir].key; + ctx->hash = conn_key_hash(&ctx->key, ct->hash_basis); + } else { + /* A lookup failure does not necessarily imply that an +@@ -1302,31 +1242,13 @@ process_one(struct conntrack *ct, struct dp_packet *pkt, + + /* Delete found entry if in wrong direction. 'force' implies commit. */ + if (OVS_UNLIKELY(force && ctx->reply && conn)) { +- if (conn_lookup(ct, &conn->key, now, NULL, NULL)) { ++ if (conn_lookup(ct, &conn->key_node[CT_DIR_FWD].key, ++ now, NULL, NULL)) { + conn_force_expire(conn); + } + conn = NULL; + } + +- if (OVS_LIKELY(conn)) { +- if (conn->conn_type == CT_CONN_TYPE_UN_NAT) { +- +- ctx->reply = true; +- struct conn *rev_conn = conn; /* Save for debugging. */ +- uint32_t hash = conn_key_hash(&conn->rev_key, ct->hash_basis); +- conn_key_lookup(ct, &ctx->key, hash, now, &conn, &ctx->reply); +- +- if (!conn) { +- pkt->md.ct_state |= CS_INVALID; +- write_ct_md(pkt, zone, NULL, NULL, NULL); +- char *log_msg = xasprintf("Missing parent conn %p", rev_conn); +- ct_print_conn_info(rev_conn, log_msg, VLL_INFO, true, true); +- free(log_msg); +- return; +- } +- } +- } +- + enum ct_alg_ctl_type ct_alg_ctl = get_alg_ctl_type(pkt, tp_src, tp_dst, + helper); + +@@ -1419,8 +1341,9 @@ conntrack_execute(struct conntrack *ct, struct dp_packet_batch *pkt_batch, + struct conn *conn = packet->md.conn; + if (OVS_UNLIKELY(packet->md.ct_state == CS_INVALID)) { + write_ct_md(packet, zone, NULL, NULL, NULL); +- } else if (conn && conn->key.zone == zone && !force +- && !get_alg_ctl_type(packet, tp_src, tp_dst, helper)) { ++ } else if (conn && ++ conn->key_node[CT_DIR_FWD].key.zone == zone && !force && ++ !get_alg_ctl_type(packet, tp_src, tp_dst, helper)) { + process_one_fast(zone, setmark, setlabel, nat_action_info, + conn, packet); + } else if (OVS_UNLIKELY(!conn_key_extract(ct, packet, dl_type, &ctx, +@@ -2269,7 +2192,7 @@ nat_ipv6_addr_increment(struct in6_addr *ipv6, uint32_t increment) + } + + static uint32_t +-nat_range_hash(const struct conn *conn, uint32_t basis, ++nat_range_hash(const struct conn_key *key, uint32_t basis, + const struct nat_action_info_t *nat_info) + { + uint32_t hash = basis; +@@ -2279,11 +2202,11 @@ nat_range_hash(const struct conn *conn, uint32_t basis, + hash = hash_add(hash, + ((uint32_t) nat_info->max_port << 16) + | nat_info->min_port); +- hash = ct_endpoint_hash_add(hash, &conn->key.src); +- hash = ct_endpoint_hash_add(hash, &conn->key.dst); +- hash = hash_add(hash, (OVS_FORCE uint32_t) conn->key.dl_type); +- hash = hash_add(hash, conn->key.nw_proto); +- hash = hash_add(hash, conn->key.zone); ++ hash = ct_endpoint_hash_add(hash, &key->src); ++ hash = ct_endpoint_hash_add(hash, &key->dst); ++ hash = hash_add(hash, (OVS_FORCE uint32_t) key->dl_type); ++ hash = hash_add(hash, key->nw_proto); ++ hash = hash_add(hash, key->zone); + + /* The purpose of the second parameter is to distinguish hashes of data of + * different length; our data always has the same length so there is no +@@ -2357,7 +2280,7 @@ get_addr_in_range(union ct_addr *min, union ct_addr *max, + } + + static void +-find_addr(const struct conn *conn, union ct_addr *min, ++find_addr(const struct conn_key *key, union ct_addr *min, + union ct_addr *max, union ct_addr *curr, + uint32_t hash, bool ipv4, + const struct nat_action_info_t *nat_info) +@@ -2367,9 +2290,9 @@ find_addr(const struct conn *conn, union ct_addr *min, + /* All-zero case. */ + if (!memcmp(min, &zero_ip, sizeof *min)) { + if (nat_info->nat_action & NAT_ACTION_SRC) { +- *curr = conn->key.src.addr; ++ *curr = key->src.addr; + } else if (nat_info->nat_action & NAT_ACTION_DST) { +- *curr = conn->key.dst.addr; ++ *curr = key->dst.addr; + } + } else { + get_addr_in_range(min, max, curr, hash, ipv4); +@@ -2388,7 +2311,7 @@ store_addr_to_key(union ct_addr *addr, struct conn_key *key, + } + + static bool +-nat_get_unique_l4(struct conntrack *ct, struct conn *nat_conn, ++nat_get_unique_l4(struct conntrack *ct, struct conn_key *rev_key, + ovs_be16 *port, uint16_t curr, uint16_t min, + uint16_t max) + { +@@ -2411,8 +2334,7 @@ another_round: + } + + *port = htons(curr); +- if (!conn_lookup(ct, &nat_conn->rev_key, +- time_msec(), NULL, NULL)) { ++ if (!conn_lookup(ct, rev_key, time_msec(), NULL, NULL)) { + return true; + } + } +@@ -2450,54 +2372,50 @@ another_round: + * + * If none can be found, return exhaustion to the caller. */ + static bool +-nat_get_unique_tuple(struct conntrack *ct, const struct conn *conn, +- struct conn *nat_conn, ++nat_get_unique_tuple(struct conntrack *ct, struct conn *conn, + const struct nat_action_info_t *nat_info) + { +- uint32_t hash = nat_range_hash(conn, ct->hash_basis, nat_info); ++ struct conn_key *fwd_key = &conn->key_node[CT_DIR_FWD].key; ++ struct conn_key *rev_key = &conn->key_node[CT_DIR_REV].key; + union ct_addr min_addr = {0}, max_addr = {0}, addr = {0}; +- bool pat_proto = conn->key.nw_proto == IPPROTO_TCP || +- conn->key.nw_proto == IPPROTO_UDP || +- conn->key.nw_proto == IPPROTO_SCTP; ++ bool pat_proto = fwd_key->nw_proto == IPPROTO_TCP || ++ fwd_key->nw_proto == IPPROTO_UDP || ++ fwd_key->nw_proto == IPPROTO_SCTP; + uint16_t min_dport, max_dport, curr_dport; + uint16_t min_sport, max_sport, curr_sport; ++ uint32_t hash; + ++ hash = nat_range_hash(fwd_key, ct->hash_basis, nat_info); + min_addr = nat_info->min_addr; + max_addr = nat_info->max_addr; + +- find_addr(conn, &min_addr, &max_addr, &addr, hash, +- (conn->key.dl_type == htons(ETH_TYPE_IP)), nat_info); ++ find_addr(fwd_key, &min_addr, &max_addr, &addr, hash, ++ (fwd_key->dl_type == htons(ETH_TYPE_IP)), nat_info); + +- set_sport_range(nat_info, &conn->key, hash, &curr_sport, ++ set_sport_range(nat_info, fwd_key, hash, &curr_sport, + &min_sport, &max_sport); +- set_dport_range(nat_info, &conn->key, hash, &curr_dport, ++ set_dport_range(nat_info, fwd_key, hash, &curr_dport, + &min_dport, &max_dport); + + if (pat_proto) { +- nat_conn->rev_key.src.port = htons(curr_dport); +- nat_conn->rev_key.dst.port = htons(curr_sport); ++ rev_key->src.port = htons(curr_dport); ++ rev_key->dst.port = htons(curr_sport); + } + +- store_addr_to_key(&addr, &nat_conn->rev_key, +- nat_info->nat_action); ++ store_addr_to_key(&addr, rev_key, nat_info->nat_action); + + if (!pat_proto) { +- if (!conn_lookup(ct, &nat_conn->rev_key, +- time_msec(), NULL, NULL)) { +- return true; +- } +- +- return false; ++ return !conn_lookup(ct, rev_key, time_msec(), NULL, NULL); + } + + bool found = false; + if (nat_info->nat_action & NAT_ACTION_DST_PORT) { +- found = nat_get_unique_l4(ct, nat_conn, &nat_conn->rev_key.src.port, ++ found = nat_get_unique_l4(ct, rev_key, &rev_key->src.port, + curr_dport, min_dport, max_dport); + } + + if (!found) { +- found = nat_get_unique_l4(ct, nat_conn, &nat_conn->rev_key.dst.port, ++ found = nat_get_unique_l4(ct, rev_key, &rev_key->dst.port, + curr_sport, min_sport, max_sport); + } + +@@ -2513,9 +2431,9 @@ conn_update(struct conntrack *ct, struct conn *conn, struct dp_packet *pkt, + struct conn_lookup_ctx *ctx, long long now) + { + ovs_mutex_lock(&conn->lock); ++ uint8_t nw_proto = conn->key_node[CT_DIR_FWD].key.nw_proto; + enum ct_update_res update_res = +- l4_protos[conn->key.nw_proto]->conn_update(ct, conn, pkt, ctx->reply, +- now); ++ l4_protos[nw_proto]->conn_update(ct, conn, pkt, ctx->reply, now); + ovs_mutex_unlock(&conn->lock); + return update_res; + } +@@ -2541,12 +2459,9 @@ conn_expiration(const struct conn *conn) + } + + static bool +-conn_expired(struct conn *conn, long long now) ++conn_expired(const struct conn *conn, long long now) + { +- if (conn->conn_type == CT_CONN_TYPE_DEFAULT) { +- return now >= conn_expiration(conn); +- } +- return false; ++ return now >= conn_expiration(conn); + } + + static bool +@@ -2572,9 +2487,7 @@ delete_conn__(struct conn *conn) + static void + delete_conn(struct conn *conn) + { +- ovs_assert(conn->conn_type == CT_CONN_TYPE_DEFAULT); + ovs_mutex_destroy(&conn->lock); +- free(conn->nat_conn); + delete_conn__(conn); + } + +@@ -2667,15 +2580,18 @@ static void + conn_to_ct_dpif_entry(const struct conn *conn, struct ct_dpif_entry *entry, + long long now) + { ++ const struct conn_key *rev_key = &conn->key_node[CT_DIR_REV].key; ++ const struct conn_key *key = &conn->key_node[CT_DIR_FWD].key; ++ + memset(entry, 0, sizeof *entry); +- conn_key_to_tuple(&conn->key, &entry->tuple_orig); +- conn_key_to_tuple(&conn->rev_key, &entry->tuple_reply); ++ conn_key_to_tuple(key, &entry->tuple_orig); ++ conn_key_to_tuple(rev_key, &entry->tuple_reply); + + if (conn->alg_related) { + conn_key_to_tuple(&conn->parent_key, &entry->tuple_parent); + } + +- entry->zone = conn->key.zone; ++ entry->zone = key->zone; + + ovs_mutex_lock(&conn->lock); + entry->mark = conn->mark; +@@ -2683,7 +2599,7 @@ conn_to_ct_dpif_entry(const struct conn *conn, struct ct_dpif_entry *entry, + + long long expiration = conn_expiration(conn) - now; + +- struct ct_l4_proto *class = l4_protos[conn->key.nw_proto]; ++ struct ct_l4_proto *class = l4_protos[key->nw_proto]; + if (class->conn_get_protoinfo) { + class->conn_get_protoinfo(conn, &entry->protoinfo); + } +@@ -2716,30 +2632,29 @@ conntrack_dump_start(struct conntrack *ct, struct conntrack_dump *dump, + + dump->ct = ct; + *ptot_bkts = 1; /* Need to clean up the callers. */ ++ dump->cursor = cmap_cursor_start(&ct->conns); + return 0; + } + + int + conntrack_dump_next(struct conntrack_dump *dump, struct ct_dpif_entry *entry) + { +- struct conntrack *ct = dump->ct; + long long now = time_msec(); + +- for (;;) { +- struct cmap_node *cm_node = cmap_next_position(&ct->conns, +- &dump->cm_pos); +- if (!cm_node) { +- break; ++ struct conn_key_node *keyn; ++ struct conn *conn; ++ ++ CMAP_CURSOR_FOR_EACH_CONTINUE (keyn, cm_node, &dump->cursor) { ++ if (keyn->dir != CT_DIR_FWD) { ++ continue; + } +- struct conn *conn; +- INIT_CONTAINER(conn, cm_node, cm_node); + ++ conn = CONTAINER_OF(keyn, struct conn, key_node[CT_DIR_FWD]); + if (conn_expired(conn, now)) { + continue; + } + +- if ((!dump->filter_zone || conn->key.zone == dump->zone) && +- (conn->conn_type != CT_CONN_TYPE_UN_NAT)) { ++ if (!dump->filter_zone || keyn->key.zone == dump->zone) { + conn_to_ct_dpif_entry(conn, entry, now); + return 0; + } +@@ -2823,14 +2738,15 @@ conntrack_exp_dump_done(struct conntrack_dump *dump OVS_UNUSED) + int + conntrack_flush(struct conntrack *ct, const uint16_t *zone) + { ++ struct conn_key_node *keyn; + struct conn *conn; + +- CMAP_FOR_EACH (conn, cm_node, &ct->conns) { +- if (conn->conn_type != CT_CONN_TYPE_DEFAULT) { ++ CMAP_FOR_EACH (keyn, cm_node, &ct->conns) { ++ if (keyn->dir != CT_DIR_FWD) { + continue; + } +- +- if (!zone || *zone == conn->key.zone) { ++ conn = CONTAINER_OF(keyn, struct conn, key_node[CT_DIR_FWD]); ++ if (!zone || *zone == keyn->key.zone) { + conn_clean(ct, conn); + } + } +@@ -2842,18 +2758,18 @@ int + conntrack_flush_tuple(struct conntrack *ct, const struct ct_dpif_tuple *tuple, + uint16_t zone) + { +- int error = 0; + struct conn_key key; + struct conn *conn; ++ int error = 0; + + memset(&key, 0, sizeof(key)); + tuple_to_conn_key(tuple, zone, &key); + conn_lookup(ct, &key, time_msec(), &conn, NULL); + +- if (conn && conn->conn_type == CT_CONN_TYPE_DEFAULT) { ++ if (conn) { + conn_clean(ct, conn); + } else { +- VLOG_WARN("Must flush tuple using the original pre-NATed tuple"); ++ VLOG_WARN("Tuple not found"); + error = ENOENT; + } + +@@ -2996,50 +2912,54 @@ expectation_create(struct conntrack *ct, ovs_be16 dst_port, + const struct conn *parent_conn, bool reply, bool src_ip_wc, + bool skip_nat) + { ++ const struct conn_key *pconn_key, *pconn_rev_key; + union ct_addr src_addr; + union ct_addr dst_addr; + union ct_addr alg_nat_repl_addr; + struct alg_exp_node *alg_exp_node = xzalloc(sizeof *alg_exp_node); + ++ pconn_key = &parent_conn->key_node[CT_DIR_FWD].key; ++ pconn_rev_key = &parent_conn->key_node[CT_DIR_REV].key; ++ + if (reply) { +- src_addr = parent_conn->key.src.addr; +- dst_addr = parent_conn->key.dst.addr; ++ src_addr = pconn_key->src.addr; ++ dst_addr = pconn_key->dst.addr; + alg_exp_node->nat_rpl_dst = true; + if (skip_nat) { + alg_nat_repl_addr = dst_addr; + } else if (parent_conn->nat_action & NAT_ACTION_DST) { +- alg_nat_repl_addr = parent_conn->rev_key.src.addr; ++ alg_nat_repl_addr = pconn_rev_key->src.addr; + alg_exp_node->nat_rpl_dst = false; + } else { +- alg_nat_repl_addr = parent_conn->rev_key.dst.addr; ++ alg_nat_repl_addr = pconn_rev_key->dst.addr; + } + } else { +- src_addr = parent_conn->rev_key.src.addr; +- dst_addr = parent_conn->rev_key.dst.addr; ++ src_addr = pconn_rev_key->src.addr; ++ dst_addr = pconn_rev_key->dst.addr; + alg_exp_node->nat_rpl_dst = false; + if (skip_nat) { + alg_nat_repl_addr = src_addr; + } else if (parent_conn->nat_action & NAT_ACTION_DST) { +- alg_nat_repl_addr = parent_conn->key.dst.addr; ++ alg_nat_repl_addr = pconn_key->dst.addr; + alg_exp_node->nat_rpl_dst = true; + } else { +- alg_nat_repl_addr = parent_conn->key.src.addr; ++ alg_nat_repl_addr = pconn_key->src.addr; + } + } + if (src_ip_wc) { + memset(&src_addr, 0, sizeof src_addr); + } + +- alg_exp_node->key.dl_type = parent_conn->key.dl_type; +- alg_exp_node->key.nw_proto = parent_conn->key.nw_proto; +- alg_exp_node->key.zone = parent_conn->key.zone; ++ alg_exp_node->key.dl_type = pconn_key->dl_type; ++ alg_exp_node->key.nw_proto = pconn_key->nw_proto; ++ alg_exp_node->key.zone = pconn_key->zone; + alg_exp_node->key.src.addr = src_addr; + alg_exp_node->key.dst.addr = dst_addr; + alg_exp_node->key.src.port = ALG_WC_SRC_PORT; + alg_exp_node->key.dst.port = dst_port; + alg_exp_node->parent_mark = parent_conn->mark; + alg_exp_node->parent_label = parent_conn->label; +- memcpy(&alg_exp_node->parent_key, &parent_conn->key, ++ memcpy(&alg_exp_node->parent_key, pconn_key, + sizeof alg_exp_node->parent_key); + /* Take the write lock here because it is almost 100% + * likely that the lookup will fail and +@@ -3291,12 +3211,16 @@ process_ftp_ctl_v4(struct conntrack *ct, + + switch (mode) { + case CT_FTP_MODE_ACTIVE: +- *v4_addr_rep = conn_for_expectation->rev_key.dst.addr.ipv4; +- conn_ipv4_addr = conn_for_expectation->key.src.addr.ipv4; ++ *v4_addr_rep = ++ conn_for_expectation->key_node[CT_DIR_REV].key.dst.addr.ipv4; ++ conn_ipv4_addr = ++ conn_for_expectation->key_node[CT_DIR_FWD].key.src.addr.ipv4; + break; + case CT_FTP_MODE_PASSIVE: +- *v4_addr_rep = conn_for_expectation->key.dst.addr.ipv4; +- conn_ipv4_addr = conn_for_expectation->rev_key.src.addr.ipv4; ++ *v4_addr_rep = ++ conn_for_expectation->key_node[CT_DIR_FWD].key.dst.addr.ipv4; ++ conn_ipv4_addr = ++ conn_for_expectation->key_node[CT_DIR_REV].key.src.addr.ipv4; + break; + case CT_TFTP_MODE: + default: +@@ -3328,7 +3252,7 @@ skip_ipv6_digits(char *str) + static enum ftp_ctl_pkt + process_ftp_ctl_v6(struct conntrack *ct, + struct dp_packet *pkt, +- const struct conn *conn_for_expectation, ++ const struct conn *conn_for_exp, + union ct_addr *v6_addr_rep, char **ftp_data_start, + size_t *addr_offset_from_ftp_data_start, + size_t *addr_size, enum ct_alg_mode *mode) +@@ -3396,24 +3320,25 @@ process_ftp_ctl_v6(struct conntrack *ct, + + switch (*mode) { + case CT_FTP_MODE_ACTIVE: +- *v6_addr_rep = conn_for_expectation->rev_key.dst.addr; ++ *v6_addr_rep = conn_for_exp->key_node[CT_DIR_REV].key.dst.addr; + /* Although most servers will block this exploit, there may be some + * less well managed. */ + if (memcmp(&ip6_addr, &v6_addr_rep->ipv6, sizeof ip6_addr) && +- memcmp(&ip6_addr, &conn_for_expectation->key.src.addr.ipv6, ++ memcmp(&ip6_addr, ++ &conn_for_exp->key_node[CT_DIR_FWD].key.src.addr.ipv6, + sizeof ip6_addr)) { + return CT_FTP_CTL_INVALID; + } + break; + case CT_FTP_MODE_PASSIVE: +- *v6_addr_rep = conn_for_expectation->key.dst.addr; ++ *v6_addr_rep = conn_for_exp->key_node[CT_DIR_FWD].key.dst.addr; + break; + case CT_TFTP_MODE: + default: + OVS_NOT_REACHED(); + } + +- expectation_create(ct, port, conn_for_expectation, ++ expectation_create(ct, port, conn_for_exp, + !!(pkt->md.ct_state & CS_REPLY_DIR), false, false); + return CT_FTP_CTL_INTEREST; + } +@@ -3571,7 +3496,8 @@ handle_tftp_ctl(struct conntrack *ct, + long long now OVS_UNUSED, enum ftp_ctl_pkt ftp_ctl OVS_UNUSED, + bool nat OVS_UNUSED) + { +- expectation_create(ct, conn_for_expectation->key.src.port, ++ expectation_create(ct, ++ conn_for_expectation->key_node[CT_DIR_FWD].key.src.port, + conn_for_expectation, + !!(pkt->md.ct_state & CS_REPLY_DIR), false, false); + } +diff --git a/lib/conntrack.h b/lib/conntrack.h +index 57d5159b61..ecf539b736 100644 +--- a/lib/conntrack.h ++++ b/lib/conntrack.h +@@ -101,8 +101,8 @@ struct conntrack_dump { + struct conntrack *ct; + unsigned bucket; + union { +- struct cmap_position cm_pos; + struct hmap_position hmap_pos; ++ struct cmap_cursor cursor; + }; + bool filter_zone; + uint16_t zone; diff --git a/lib/db-ctl-base.c b/lib/db-ctl-base.c index 5d2635946d..3a8068b12c 100644 --- a/lib/db-ctl-base.c @@ -1691,7 +2689,7 @@ index bdd12f6a7b..ac72a44bce 100644

diff --git a/lib/netdev-dpdk.c b/lib/netdev-dpdk.c -index 8f1361e21f..55700250df 100644 +index 8f1361e21f..6e30f2cc3b 100644 --- a/lib/netdev-dpdk.c +++ b/lib/netdev-dpdk.c @@ -1312,6 +1312,16 @@ dpdk_eth_dev_init(struct netdev_dpdk *dev) @@ -1719,6 +2717,166 @@ index 8f1361e21f..55700250df 100644 return true; } +@@ -2487,6 +2498,35 @@ netdev_dpdk_prep_hwol_batch(struct netdev_dpdk *dev, struct rte_mbuf **pkts, + return cnt; + } + ++static void ++netdev_dpdk_mbuf_dump(const char *prefix, const char *message, ++ const struct rte_mbuf *mbuf) ++{ ++ static struct vlog_rate_limit dump_rl = VLOG_RATE_LIMIT_INIT(5, 5); ++ char *response = NULL; ++ FILE *stream; ++ size_t size; ++ ++ if (VLOG_DROP_DBG(&dump_rl)) { ++ return; ++ } ++ ++ stream = open_memstream(&response, &size); ++ if (!stream) { ++ VLOG_ERR("Unable to open memstream for mbuf dump: %s.", ++ ovs_strerror(errno)); ++ return; ++ } ++ ++ rte_pktmbuf_dump(stream, mbuf, rte_pktmbuf_pkt_len(mbuf)); ++ ++ fclose(stream); ++ ++ VLOG_DBG(prefix ? "%s: %s:\n%s" : "%s%s:\n%s", ++ prefix ? prefix : "", message, response); ++ free(response); ++} ++ + /* Tries to transmit 'pkts' to txq 'qid' of device 'dev'. Takes ownership of + * 'pkts', even in case of failure. + * +@@ -2503,6 +2543,8 @@ netdev_dpdk_eth_tx_burst(struct netdev_dpdk *dev, int qid, + VLOG_WARN_RL(&rl, "%s: Output batch contains invalid packets. " + "Only %u/%u are valid: %s", netdev_get_name(&dev->up), + nb_tx_prep, cnt, rte_strerror(rte_errno)); ++ netdev_dpdk_mbuf_dump(netdev_get_name(&dev->up), ++ "First invalid packet", pkts[nb_tx_prep]); + } + + while (nb_tx != nb_tx_prep) { +diff --git a/lib/netdev-dummy.c b/lib/netdev-dummy.c +index 1a54add87f..e72b2b27ae 100644 +--- a/lib/netdev-dummy.c ++++ b/lib/netdev-dummy.c +@@ -39,6 +39,7 @@ + #include "pcap-file.h" + #include "openvswitch/poll-loop.h" + #include "openvswitch/shash.h" ++#include "ovs-router.h" + #include "sset.h" + #include "stream.h" + #include "unaligned.h" +@@ -2045,11 +2046,20 @@ netdev_dummy_ip4addr(struct unixctl_conn *conn, int argc OVS_UNUSED, + + if (netdev && is_dummy_class(netdev->netdev_class)) { + struct in_addr ip, mask; ++ struct in6_addr ip6; ++ uint32_t plen; + char *error; + +- error = ip_parse_masked(argv[2], &ip.s_addr, &mask.s_addr); ++ error = ip_parse_cidr(argv[2], &ip.s_addr, &plen); + if (!error) { ++ mask.s_addr = be32_prefix_mask(plen); + netdev_dummy_add_in4(netdev, ip, mask); ++ ++ /* Insert local route entry for the new address. */ ++ in6_addr_set_mapped_ipv4(&ip6, ip.s_addr); ++ ovs_router_force_insert(0, &ip6, plen + 96, true, argv[1], ++ &in6addr_any, &ip6); ++ + unixctl_command_reply(conn, "OK"); + } else { + unixctl_command_reply_error(conn, error); +@@ -2079,6 +2089,11 @@ netdev_dummy_ip6addr(struct unixctl_conn *conn, int argc OVS_UNUSED, + + mask = ipv6_create_mask(plen); + netdev_dummy_add_in6(netdev, &ip6, &mask); ++ ++ /* Insert local route entry for the new address. */ ++ ovs_router_force_insert(0, &ip6, plen, true, argv[1], ++ &in6addr_any, &ip6); ++ + unixctl_command_reply(conn, "OK"); + } else { + unixctl_command_reply_error(conn, error); +diff --git a/lib/netdev-linux.c b/lib/netdev-linux.c +index cca3408797..1ee585b941 100644 +--- a/lib/netdev-linux.c ++++ b/lib/netdev-linux.c +@@ -2566,16 +2566,11 @@ exit: + } + + static int +-netdev_linux_get_speed(const struct netdev *netdev_, uint32_t *current, +- uint32_t *max) ++netdev_linux_get_speed_locked(struct netdev_linux *netdev, ++ uint32_t *current, uint32_t *max) + { +- struct netdev_linux *netdev = netdev_linux_cast(netdev_); +- int error; +- +- ovs_mutex_lock(&netdev->mutex); + if (netdev_linux_netnsid_is_remote(netdev)) { +- error = EOPNOTSUPP; +- goto exit; ++ return EOPNOTSUPP; + } + + netdev_linux_read_features(netdev); +@@ -2585,9 +2580,18 @@ netdev_linux_get_speed(const struct netdev *netdev_, uint32_t *current, + *max = MIN(UINT32_MAX, + netdev_features_to_bps(netdev->supported, 0) / 1000000ULL); + } +- error = netdev->get_features_error; ++ return netdev->get_features_error; ++} + +-exit: ++static int ++netdev_linux_get_speed(const struct netdev *netdev_, uint32_t *current, ++ uint32_t *max) ++{ ++ struct netdev_linux *netdev = netdev_linux_cast(netdev_); ++ int error; ++ ++ ovs_mutex_lock(&netdev->mutex); ++ error = netdev_linux_get_speed_locked(netdev, current, max); + ovs_mutex_unlock(&netdev->mutex); + return error; + } +@@ -4800,8 +4804,10 @@ htb_parse_qdisc_details__(struct netdev *netdev, const struct smap *details, + hc->max_rate = smap_get_ullong(details, "max-rate", 0) / 8; + if (!hc->max_rate) { + uint32_t current_speed; ++ uint32_t max_speed OVS_UNUSED; + +- netdev_get_speed(netdev, ¤t_speed, NULL); ++ netdev_linux_get_speed_locked(netdev_linux_cast(netdev), ++ ¤t_speed, &max_speed); + hc->max_rate = current_speed ? current_speed / 8 * 1000000ULL + : NETDEV_DEFAULT_BPS / 8; + } +@@ -5270,8 +5276,10 @@ hfsc_parse_qdisc_details__(struct netdev *netdev, const struct smap *details, + uint32_t max_rate = smap_get_ullong(details, "max-rate", 0) / 8; + if (!max_rate) { + uint32_t current_speed; ++ uint32_t max_speed OVS_UNUSED; + +- netdev_get_speed(netdev, ¤t_speed, NULL); ++ netdev_linux_get_speed_locked(netdev_linux_cast(netdev), ++ ¤t_speed, &max_speed); + max_rate = current_speed ? current_speed / 8 * 1000000ULL + : NETDEV_DEFAULT_BPS / 8; + } diff --git a/lib/netdev-offload-dpdk.c b/lib/netdev-offload-dpdk.c index 14bc877719..992627fa23 100644 --- a/lib/netdev-offload-dpdk.c @@ -2004,6 +3162,47 @@ index ab9ce6b2e0..f140d25feb 100644 #include "ovs-atomic-clang.h" #elif HAVE_ATOMIC && __cplusplus >= 201103L #include "ovs-atomic-c++.h" +diff --git a/lib/ovs-router.c b/lib/ovs-router.c +index 7c04bb0e6b..809152d29b 100644 +--- a/lib/ovs-router.c ++++ b/lib/ovs-router.c +@@ -329,6 +329,20 @@ ovs_router_insert(uint32_t mark, const struct in6_addr *ip_dst, uint8_t plen, + } + } + ++/* The same as 'ovs_router_insert', but it adds the route even if updates ++ * from the system routing table are disabled. Used for unit tests. */ ++void ++ovs_router_force_insert(uint32_t mark, const struct in6_addr *ip_dst, ++ uint8_t plen, bool local, const char output_bridge[], ++ const struct in6_addr *gw, ++ const struct in6_addr *prefsrc) ++{ ++ uint8_t priority = local ? plen + 64 : plen; ++ ++ ovs_router_insert__(mark, priority, local, ip_dst, plen, ++ output_bridge, gw, prefsrc); ++} ++ + static void + rt_entry_delete__(const struct cls_rule *cr) + { +diff --git a/lib/ovs-router.h b/lib/ovs-router.h +index eb4ff85d9e..d7dc7e55f3 100644 +--- a/lib/ovs-router.h ++++ b/lib/ovs-router.h +@@ -34,6 +34,11 @@ void ovs_router_insert(uint32_t mark, const struct in6_addr *ip_dst, + uint8_t plen, bool local, + const char output_bridge[], const struct in6_addr *gw, + const struct in6_addr *prefsrc); ++void ovs_router_force_insert(uint32_t mark, const struct in6_addr *ip_dst, ++ uint8_t plen, bool local, ++ const char output_bridge[], ++ const struct in6_addr *gw, ++ const struct in6_addr *prefsrc); + void ovs_router_flush(void); + + void ovs_router_disable_system_routing_table(void); diff --git a/lib/ovsdb-idl.c b/lib/ovsdb-idl.c index 634fbb56df..ba720474b6 100644 --- a/lib/ovsdb-idl.c @@ -2073,6 +3272,30 @@ index 9777efea33..688fe56337 100644 #ifdef __cplusplus } #endif +diff --git a/lib/rstp.c b/lib/rstp.c +index 2f01966f79..90e8094599 100644 +--- a/lib/rstp.c ++++ b/lib/rstp.c +@@ -50,7 +50,7 @@ + + VLOG_DEFINE_THIS_MODULE(rstp); + +-struct ovs_mutex rstp_mutex = OVS_MUTEX_INITIALIZER; ++struct ovs_mutex rstp_mutex; + + static struct ovs_list all_rstps__ = OVS_LIST_INITIALIZER(&all_rstps__); + static struct ovs_list *const all_rstps OVS_GUARDED_BY(rstp_mutex) = &all_rstps__; +@@ -248,6 +248,10 @@ void + rstp_init(void) + OVS_EXCLUDED(rstp_mutex) + { ++ /* We need a recursive mutex because rstp_send_bpdu() could loop back ++ * into the rstp module through a patch port. */ ++ ovs_mutex_init_recursive(&rstp_mutex); ++ + unixctl_command_register("rstp/tcn", "[bridge]", 0, 1, rstp_unixctl_tcn, + NULL); + unixctl_command_register("rstp/show", "[bridge]", 0, 1, rstp_unixctl_show, diff --git a/lib/tc.c b/lib/tc.c index f49048cdab..6b38925c30 100644 --- a/lib/tc.c @@ -2154,6 +3377,79 @@ index b556762277..e9603432d2 100644 return retval; } +diff --git a/ofproto/bond.c b/ofproto/bond.c +index cfdf44f854..c31869a4c7 100644 +--- a/ofproto/bond.c ++++ b/ofproto/bond.c +@@ -186,7 +186,7 @@ static struct bond_member *choose_output_member(const struct bond *, + struct flow_wildcards *, + uint16_t vlan) + OVS_REQ_RDLOCK(rwlock); +-static void update_recirc_rules__(struct bond *); ++static void update_recirc_rules(struct bond *) OVS_REQ_WRLOCK(rwlock); + static bool bond_may_recirc(const struct bond *); + static void bond_update_post_recirc_rules__(struct bond *, bool force) + OVS_REQ_WRLOCK(rwlock); +@@ -299,7 +299,10 @@ bond_unref(struct bond *bond) + } + free(bond->hash); + bond->hash = NULL; +- update_recirc_rules__(bond); ++ ++ ovs_rwlock_wrlock(&rwlock); ++ update_recirc_rules(bond); ++ ovs_rwlock_unlock(&rwlock); + + hmap_destroy(&bond->pr_rule_ops); + free(bond->primary); +@@ -331,17 +334,8 @@ add_pr_rule(struct bond *bond, const struct match *match, + hmap_insert(&bond->pr_rule_ops, &pr_op->hmap_node, hash); + } + +-/* This function should almost never be called directly. +- * 'update_recirc_rules()' should be called instead. Since +- * this function modifies 'bond->pr_rule_ops', it is only +- * safe when 'rwlock' is held. +- * +- * However, when the 'bond' is the only reference in the system, +- * calling this function avoid acquiring lock only to satisfy +- * lock annotation. Currently, only 'bond_unref()' calls +- * this function directly. */ + static void +-update_recirc_rules__(struct bond *bond) ++update_recirc_rules(struct bond *bond) OVS_REQ_WRLOCK(rwlock) + { + struct match match; + struct bond_pr_rule_op *pr_op; +@@ -407,6 +401,15 @@ update_recirc_rules__(struct bond *bond) + + VLOG_ERR("failed to remove post recirculation flow %s", err_s); + free(err_s); ++ } else if (bond->hash) { ++ /* If the flow deletion failed, a subsequent call to ++ * ofproto_dpif_add_internal_flow() would just modify the ++ * flow preserving its statistics. Therefore, only reset ++ * the entry's byte counter if it succeeds. */ ++ uint32_t hash = pr_op->match.flow.dp_hash & BOND_MASK; ++ struct bond_entry *entry = &bond->hash[hash]; ++ ++ entry->pr_tx_bytes = 0; + } + + hmap_remove(&bond->pr_rule_ops, &pr_op->hmap_node); +@@ -421,12 +424,6 @@ update_recirc_rules__(struct bond *bond) + ofpbuf_uninit(&ofpacts); + } + +-static void +-update_recirc_rules(struct bond *bond) +- OVS_REQ_RDLOCK(rwlock) +-{ +- update_recirc_rules__(bond); +-} + + /* Updates 'bond''s overall configuration to 's'. + * diff --git a/ofproto/connmgr.c b/ofproto/connmgr.c index b092e9e04e..f7f7b12799 100644 --- a/ofproto/connmgr.c @@ -2197,6 +3493,70 @@ index b092e9e04e..f7f7b12799 100644 VLOG_INFO("%s: added %s controller \"%s\"", mgr->name, ofconn_type_to_string(ofservice->type), target); +diff --git a/ofproto/ofproto-dpif-monitor.c b/ofproto/ofproto-dpif-monitor.c +index bb0e490910..5132f9c952 100644 +--- a/ofproto/ofproto-dpif-monitor.c ++++ b/ofproto/ofproto-dpif-monitor.c +@@ -275,19 +275,16 @@ monitor_mport_run(struct mport *mport, struct dp_packet *packet) + long long int lldp_wake_time = LLONG_MAX; + + if (mport->cfm && cfm_should_send_ccm(mport->cfm)) { +- dp_packet_clear(packet); + cfm_compose_ccm(mport->cfm, packet, mport->hw_addr); + ofproto_dpif_send_packet(mport->ofport, false, packet); + } + if (mport->bfd && bfd_should_send_packet(mport->bfd)) { + bool oam; + +- dp_packet_clear(packet); + bfd_put_packet(mport->bfd, packet, mport->hw_addr, &oam); + ofproto_dpif_send_packet(mport->ofport, oam, packet); + } + if (mport->lldp && lldp_should_send_packet(mport->lldp)) { +- dp_packet_clear(packet); + lldp_put_packet(mport->lldp, packet, mport->hw_addr); + ofproto_dpif_send_packet(mport->ofport, false, packet); + } +diff --git a/ofproto/ofproto-dpif-trace.c b/ofproto/ofproto-dpif-trace.c +index 527e2f17ed..4fbe85018e 100644 +--- a/ofproto/ofproto-dpif-trace.c ++++ b/ofproto/ofproto-dpif-trace.c +@@ -845,17 +845,35 @@ ofproto_trace(struct ofproto_dpif *ofproto, const struct flow *flow, + bool names) + { + struct ovs_list recirc_queue = OVS_LIST_INITIALIZER(&recirc_queue); ++ int recirculations = 0; ++ + ofproto_trace__(ofproto, flow, packet, &recirc_queue, + ofpacts, ofpacts_len, output, names); + + struct oftrace_recirc_node *recirc_node; + LIST_FOR_EACH_POP (recirc_node, node, &recirc_queue) { ++ if (recirculations++ > 4096) { ++ ds_put_cstr(output, "\n\n"); ++ ds_put_char_multiple(output, '=', 79); ++ ds_put_cstr(output, "\nTrace reached the recirculation limit." ++ " Sopping the trace here."); ++ ds_put_format(output, ++ "\nQueued but not processed: %"PRIuSIZE ++ " recirculations.", ++ ovs_list_size(&recirc_queue) + 1); ++ oftrace_recirc_node_destroy(recirc_node); ++ break; ++ } + ofproto_trace_recirc_node(recirc_node, next_ct_states, output); + ofproto_trace__(ofproto, &recirc_node->flow, recirc_node->packet, + &recirc_queue, ofpacts, ofpacts_len, output, + names); + oftrace_recirc_node_destroy(recirc_node); + } ++ /* Destroy remaining recirculation nodes, if any. */ ++ LIST_FOR_EACH_POP (recirc_node, node, &recirc_queue) { ++ oftrace_recirc_node_destroy(recirc_node); ++ } + } + + void diff --git a/ofproto/ofproto-dpif-upcall.c b/ofproto/ofproto-dpif-upcall.c index 04b583f816..292500f215 100644 --- a/ofproto/ofproto-dpif-upcall.c @@ -2261,7 +3621,7 @@ index 9224ee2e6d..2e1fcb3a6f 100644 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5); VLOG_WARN_RL(&rl, "xcache LEARN action execution failed."); diff --git a/ofproto/ofproto-dpif-xlate.c b/ofproto/ofproto-dpif-xlate.c -index 47ea0f47e7..be4bd66576 100644 +index 47ea0f47e7..078d1bd96b 100644 --- a/ofproto/ofproto-dpif-xlate.c +++ b/ofproto/ofproto-dpif-xlate.c @@ -1615,7 +1615,8 @@ xlate_lookup_ofproto_(const struct dpif_backer *backer, @@ -2302,7 +3662,16 @@ index 47ea0f47e7..be4bd66576 100644 xport = xport_lookup(xcfg, tnl_port_should_receive(flow) ? tnl_port_receive(flow) : odp_port_to_ofport(backer, flow->in_port.odp_port)); -@@ -5700,8 +5710,16 @@ xlate_learn_action(struct xlate_ctx *ctx, const struct ofpact_learn *learn) +@@ -3768,6 +3778,8 @@ native_tunnel_output(struct xlate_ctx *ctx, const struct xport *xport, + + if (flow->tunnel.ip_src) { + in6_addr_set_mapped_ipv4(&s_ip6, flow->tunnel.ip_src); ++ } else if (ipv6_addr_is_set(&flow->tunnel.ipv6_src)) { ++ s_ip6 = flow->tunnel.ipv6_src; + } + + err = tnl_route_lookup_flow(ctx, flow, &d_ip6, &s_ip6, &out_dev); +@@ -5700,8 +5712,16 @@ xlate_learn_action(struct xlate_ctx *ctx, const struct ofpact_learn *learn) if (!error) { bool success = true; if (ctx->xin->allow_side_effects) { @@ -3609,6 +4978,66 @@ index 30028ba7a0..50b9870641 100755 # flake8: noqa: E402 from scapy.all import RandMAC, RandIP, PcapWriter, RandIP6, RandShort, fuzz +diff --git a/tests/nsh.at b/tests/nsh.at +index 55296e5593..0040a50b36 100644 +--- a/tests/nsh.at ++++ b/tests/nsh.at +@@ -521,51 +521,45 @@ AT_CHECK([ + set interface vxlangpe32 type=vxlan options:exts=gpe options:remote_ip=30.0.0.2 options:packet_type=ptap ofport_request=3020 + + ovs-appctl netdev-dummy/ip4addr br-p1 10.0.0.1/24 +- ovs-appctl ovs/route/add 10.0.0.0/24 br-p1 + ovs-appctl tnl/arp/set br-p1 10.0.0.1 $HWADDR_BRP1 + ovs-appctl tnl/arp/set br-p1 10.0.0.2 $HWADDR_BRP2 + ovs-appctl tnl/arp/set br-p1 10.0.0.3 $HWADDR_BRP3 + + ovs-appctl netdev-dummy/ip4addr br-p2 20.0.0.2/24 +- ovs-appctl ovs/route/add 20.0.0.0/24 br-p2 + ovs-appctl tnl/arp/set br-p2 20.0.0.1 $HWADDR_BRP1 + ovs-appctl tnl/arp/set br-p2 20.0.0.2 $HWADDR_BRP2 + ovs-appctl tnl/arp/set br-p2 20.0.0.3 $HWADDR_BRP3 + + ovs-appctl netdev-dummy/ip4addr br-p3 30.0.0.3/24 +- ovs-appctl ovs/route/add 30.0.0.0/24 br-p3 + ovs-appctl tnl/arp/set br-p3 30.0.0.1 $HWADDR_BRP1 + ovs-appctl tnl/arp/set br-p3 30.0.0.2 $HWADDR_BRP2 + ovs-appctl tnl/arp/set br-p3 30.0.0.3 $HWADDR_BRP3 + ], [0], [stdout]) + + AT_CHECK([ +- ovs-appctl ovs/route/add 10.0.0.0/24 br-p1 + ovs-appctl tnl/arp/set br-p1 10.0.0.1 $HWADDR_BRP1 + ovs-appctl tnl/arp/set br-p1 10.0.0.2 $HWADDR_BRP2 + ovs-appctl tnl/arp/set br-p1 10.0.0.3 $HWADDR_BRP3 + ], [0], [stdout]) + + AT_CHECK([ +- ovs-appctl ovs/route/add 20.0.0.0/24 br-p2 + ovs-appctl tnl/arp/set br-p2 20.0.0.1 $HWADDR_BRP1 + ovs-appctl tnl/arp/set br-p2 20.0.0.2 $HWADDR_BRP2 + ovs-appctl tnl/arp/set br-p2 20.0.0.3 $HWADDR_BRP3 + ], [0], [stdout]) + + AT_CHECK([ +- ovs-appctl ovs/route/add 30.0.0.0/24 br-p3 + ovs-appctl tnl/arp/set br-p3 30.0.0.1 $HWADDR_BRP1 + ovs-appctl tnl/arp/set br-p3 30.0.0.2 $HWADDR_BRP2 + ovs-appctl tnl/arp/set br-p3 30.0.0.3 $HWADDR_BRP3 + ], [0], [stdout]) + + AT_CHECK([ +- ovs-appctl ovs/route/show | grep User: ++ ovs-appctl ovs/route/show | grep Cached: | sort + ], [0], [dnl +-User: 10.0.0.0/24 dev br-p1 SRC 10.0.0.1 +-User: 20.0.0.0/24 dev br-p2 SRC 20.0.0.2 +-User: 30.0.0.0/24 dev br-p3 SRC 30.0.0.3 ++Cached: 10.0.0.0/24 dev br-p1 SRC 10.0.0.1 local ++Cached: 20.0.0.0/24 dev br-p2 SRC 20.0.0.2 local ++Cached: 30.0.0.0/24 dev br-p3 SRC 30.0.0.3 local + ]) + + AT_CHECK([ diff --git a/tests/ofp-print.at b/tests/ofp-print.at index 14aa554169..6a07e23c64 100644 --- a/tests/ofp-print.at @@ -3637,10 +5066,34 @@ index 14aa554169..6a07e23c64 100644 + AT_CLEANUP diff --git a/tests/ofproto-dpif.at b/tests/ofproto-dpif.at -index f242f77f31..a39d0d3ae9 100644 +index f242f77f31..c6a7752665 100644 --- a/tests/ofproto-dpif.at +++ b/tests/ofproto-dpif.at -@@ -5854,6 +5854,40 @@ OVS_WAIT_UNTIL([check_flows], [ovs-ofctl dump-flows br0]) +@@ -547,6 +547,23 @@ ovs-appctl time/warp 1000 100 + ovs-appctl bond/show > bond3.txt + AT_CHECK([sed -n '/member p2/,/^$/p' bond3.txt | grep 'hash'], [0], [ignore]) + ++# Check that both ports doing down and back up doesn't break statistics. ++AT_CHECK([ovs-appctl netdev-dummy/set-admin-state p1 down], 0, [OK ++]) ++AT_CHECK([ovs-appctl netdev-dummy/set-admin-state p2 down], 0, [OK ++]) ++ovs-appctl time/warp 1000 100 ++AT_CHECK([ovs-appctl netdev-dummy/set-admin-state p1 up], 0, [OK ++]) ++AT_CHECK([ovs-appctl netdev-dummy/set-admin-state p2 up], 0, [OK ++]) ++ovs-appctl time/warp 1000 100 ++ ++AT_CHECK([SEND_TCP_BOND_PKTS([p5], [5], [65500])]) ++# We sent 49125 KB of data total in 3 batches. No hash should have more ++# than that amount of load. Just checking that it is within 5 digits. ++AT_CHECK([ovs-appctl bond/show | grep -E '[[0-9]]{6}'], [1]) ++ + OVS_VSWITCHD_STOP() + AT_CLEANUP + +@@ -5854,6 +5871,40 @@ OVS_WAIT_UNTIL([check_flows], [ovs-ofctl dump-flows br0]) OVS_VSWITCHD_STOP AT_CLEANUP @@ -3681,6 +5134,40 @@ index f242f77f31..a39d0d3ae9 100644 AT_SETUP([ofproto-dpif - debug_slow action]) OVS_VSWITCHD_START add_of_ports br0 1 2 3 +@@ -7619,12 +7670,14 @@ dummy@ovs-dummy: hit:0 missed:0 + vm1 5/3: (dummy: ifindex=2011) + ]) + +-dnl set up route to 1.1.2.92 via br0 and action=normal ++dnl Add 1.1.2.92 to br0 and action=normal + AT_CHECK([ovs-appctl netdev-dummy/ip4addr br0 1.1.2.88/24], [0], [OK + ]) +-AT_CHECK([ovs-appctl ovs/route/add 1.1.2.92/24 br0], [0], [OK +-]) + AT_CHECK([ovs-ofctl add-flow br0 action=normal]) ++dnl Checking that a local route for added IP was successfully installed. ++AT_CHECK([ovs-appctl ovs/route/show | grep Cached], [0], [dnl ++Cached: 1.1.2.0/24 dev br0 SRC 1.1.2.88 local ++]) + + dnl Prime ARP Cache for 1.1.2.92 + AT_CHECK([ovs-appctl netdev-dummy/receive p0 'recirc_id(0),in_port(1),eth(src=f8:bc:12:44:34:b6,dst=ff:ff:ff:ff:ff:ff),eth_type(0x0806),arp(sip=1.1.2.92,tip=1.1.2.88,op=2,sha=f8:bc:12:44:34:b6,tha=00:00:00:00:00:00)']) +@@ -7635,10 +7688,13 @@ ovs-vsctl \ + --id=@sf create sflow targets=\"127.0.0.1:$SFLOW_PORT\" agent=127.0.0.1 \ + header=128 sampling=1 polling=0 + +-dnl set up route to 192.168.1.2 via br0 ++dnl Add 192.168.1.2 to br0, + AT_CHECK([ovs-appctl netdev-dummy/ip4addr br0 192.168.1.1/16], [0], [OK + ]) +-AT_CHECK([ovs-appctl ovs/route/add 192.168.0.0/16 br0], [0], [OK ++dnl Checking that a local route for added IP was successfully installed. ++AT_CHECK([ovs-appctl ovs/route/show | grep Cached | sort], [0], [dnl ++Cached: 1.1.2.0/24 dev br0 SRC 1.1.2.88 local ++Cached: 192.168.0.0/16 dev br0 SRC 192.168.1.1 local + ]) + + dnl add rule for int-br to force packet onto tunnel. There is no ifindex diff --git a/tests/ofproto-macros.at b/tests/ofproto-macros.at index d2e6ac768b..6213e6d91c 100644 --- a/tests/ofproto-macros.at @@ -4011,10 +5498,22 @@ index 12cd2bc319..3e1df18a11 100644 [[[false]]], [[[true]]]]) diff --git a/tests/ovsdb-server.at b/tests/ovsdb-server.at -index d36c3c117e..6eb758e229 100644 +index d36c3c117e..2050bc1736 100644 --- a/tests/ovsdb-server.at +++ b/tests/ovsdb-server.at -@@ -1830,9 +1830,14 @@ replication_schema > schema +@@ -699,8 +699,10 @@ AT_CHECK_UNQUOTED( + [ignore]) + # The error message for being unable to negotiate a shared ciphersuite + # is 'sslv3 alert handshake failure'. This is not the clearest message. ++# In openssl 3.2.0 all the error messages were updated to replace 'sslv3' ++# with 'ssl/tls'. + AT_CHECK_UNQUOTED( +- [grep "sslv3 alert handshake failure" output], [0], ++ [grep -E "(sslv3|ssl/tls) alert handshake failure" output], [0], + [stdout], + [ignore]) + OVSDB_SERVER_SHUTDOWN([" +@@ -1830,9 +1832,14 @@ replication_schema > schema AT_CHECK([ovsdb-tool create db1 schema], [0], [stdout], [ignore]) AT_CHECK([ovsdb-tool create db2 schema], [0], [stdout], [ignore]) @@ -4031,7 +5530,7 @@ index d36c3c117e..6eb758e229 100644 dnl Try to connect without specifying the active server. AT_CHECK([ovs-appctl -t "`pwd`"/unixctl2 ovsdb-server/connect-active-ovsdb-server], [0], -@@ -2153,9 +2158,16 @@ AT_CHECK([ovsdb-tool transact db2 \ +@@ -2153,9 +2160,16 @@ AT_CHECK([ovsdb-tool transact db2 \ dnl Start both 'db1' and 'db2'. on_exit 'kill `cat *.pid`' @@ -4050,7 +5549,7 @@ index d36c3c117e..6eb758e229 100644 OVS_WAIT_UNTIL([ovs-appctl -t "`pwd`"/unixctl ovsdb-server/sync-status |grep active]) OVS_WAIT_UNTIL([ovs-appctl -t "`pwd`"/unixctl2 ovsdb-server/sync-status |grep active]) -@@ -2382,6 +2394,6 @@ CLEAN_LOG_FILE([2.log], [2.log.clear]) +@@ -2382,6 +2396,6 @@ CLEAN_LOG_FILE([2.log], [2.log.clear]) dnl Checking that databases and logs are equal. AT_CHECK([diff db.clear ./replay_dir/db.copy.clear]) @@ -4058,6 +5557,82 @@ index d36c3c117e..6eb758e229 100644 +AT_CHECK([diff -u 1.log.clear 2.log.clear]) AT_CLEANUP +diff --git a/tests/packet-type-aware.at b/tests/packet-type-aware.at +index 14cebf6efa..d634930fd5 100644 +--- a/tests/packet-type-aware.at ++++ b/tests/packet-type-aware.at +@@ -142,30 +142,27 @@ AT_CHECK([ + ### Setup GRE tunnels + AT_CHECK([ + ovs-appctl netdev-dummy/ip4addr br-p1 10.0.0.1/24 && +- ovs-appctl ovs/route/add 10.0.0.0/24 br-p1 && + ovs-appctl tnl/arp/set br-p1 10.0.0.1 $HWADDR_BRP1 && + ovs-appctl tnl/arp/set br-p1 10.0.0.2 $HWADDR_BRP2 && + ovs-appctl tnl/arp/set br-p1 10.0.0.3 $HWADDR_BRP3 && + + ovs-appctl netdev-dummy/ip4addr br-p2 20.0.0.2/24 && +- ovs-appctl ovs/route/add 20.0.0.0/24 br-p2 && + ovs-appctl tnl/arp/set br-p2 20.0.0.1 $HWADDR_BRP1 && + ovs-appctl tnl/arp/set br-p2 20.0.0.2 $HWADDR_BRP2 && + ovs-appctl tnl/arp/set br-p2 20.0.0.3 $HWADDR_BRP3 && + + ovs-appctl netdev-dummy/ip4addr br-p3 30.0.0.3/24 && +- ovs-appctl ovs/route/add 30.0.0.0/24 br-p3 && + ovs-appctl tnl/arp/set br-p3 30.0.0.1 $HWADDR_BRP1 && + ovs-appctl tnl/arp/set br-p3 30.0.0.2 $HWADDR_BRP2 && + ovs-appctl tnl/arp/set br-p3 30.0.0.3 $HWADDR_BRP3 + ], [0], [ignore]) + + AT_CHECK([ +- ovs-appctl ovs/route/show | grep User: ++ ovs-appctl ovs/route/show | grep Cached: | sort + ], [0], [dnl +-User: 10.0.0.0/24 dev br-p1 SRC 10.0.0.1 +-User: 20.0.0.0/24 dev br-p2 SRC 20.0.0.2 +-User: 30.0.0.0/24 dev br-p3 SRC 30.0.0.3 ++Cached: 10.0.0.0/24 dev br-p1 SRC 10.0.0.1 local ++Cached: 20.0.0.0/24 dev br-p2 SRC 20.0.0.2 local ++Cached: 30.0.0.0/24 dev br-p3 SRC 30.0.0.3 local + ]) + + AT_CHECK([ +@@ -681,14 +678,13 @@ AT_CHECK([ + + AT_CHECK([ + ovs-appctl netdev-dummy/ip4addr br2 10.0.0.1/24 && +- ovs-appctl ovs/route/add 10.0.0.0/24 br2 && + ovs-appctl tnl/arp/set br2 10.0.0.2 de:af:be:ef:ba:be + ], [0], [ignore]) + + AT_CHECK([ +- ovs-appctl ovs/route/show | grep User: ++ ovs-appctl ovs/route/show | grep Cached: + ], [0], [dnl +-User: 10.0.0.0/24 dev br2 SRC 10.0.0.1 ++Cached: 10.0.0.0/24 dev br2 SRC 10.0.0.1 local + ]) + + +@@ -955,7 +951,6 @@ AT_CHECK([ + + AT_CHECK([ + ovs-appctl netdev-dummy/ip4addr br0 20.0.0.1/24 && +- ovs-appctl ovs/route/add 20.0.0.2/24 br0 && + ovs-appctl tnl/neigh/set br0 20.0.0.1 aa:bb:cc:00:00:01 && + ovs-appctl tnl/neigh/set br0 20.0.0.2 aa:bb:cc:00:00:02 + ], [0], [ignore]) +@@ -963,9 +958,9 @@ AT_CHECK([ + ovs-appctl time/warp 1000 + + AT_CHECK([ +- ovs-appctl ovs/route/show | grep User ++ ovs-appctl ovs/route/show | grep Cached: + ],[0], [dnl +-User: 20.0.0.0/24 dev br0 SRC 20.0.0.1 ++Cached: 20.0.0.0/24 dev br0 SRC 20.0.0.1 local + ]) + + AT_CHECK([ diff --git a/tests/pmd.at b/tests/pmd.at index 7c333a901b..7bdaca9e71 100644 --- a/tests/pmd.at @@ -4087,6 +5662,141 @@ index 7c333a901b..7bdaca9e71 100644 AT_CHECK([echo 'table=0,in_port=p1,ip,nw_dst=10.1.0.0/16 actions=p2' | dnl ovs-ofctl --bundle replace-flows br0 -]) +diff --git a/tests/rstp.at b/tests/rstp.at +index 600e85dabd..e0d4bed4f0 100644 +--- a/tests/rstp.at ++++ b/tests/rstp.at +@@ -253,3 +253,60 @@ AT_CHECK([ovs-vsctl del-port br0 p1]) + + OVS_VSWITCHD_STOP + AT_CLEANUP ++ ++AT_SETUP([RSTP - patch ports]) ++# Create br0 with interfaces p1 and p7 ++# and br1 with interfaces p2 and p8 ++# with p1 and p2 being connected patch ports. ++OVS_VSWITCHD_START( ++ [set port br0 other_config:rstp-enable=false -- \ ++ set bridge br0 rstp-enable=true ++]) ++ ++AT_CHECK([add_of_br 1 \ ++ set port br1 other_config:rstp-enable=false -- \ ++ set bridge br1 rstp-enable=true]) ++ ++ovs-appctl time/stop ++ ++AT_CHECK([ovs-vsctl \ ++ add-port br0 p1 -- \ ++ set interface p1 type=patch options:peer=p2 ofport_request=1 -- \ ++ set port p1 other_config:rstp-enable=true -- \ ++ add-port br1 p2 -- \ ++ set interface p2 type=patch options:peer=p1 ofport_request=2 -- \ ++ set port p2 other_config:rstp-enable=true -- \ ++]) ++ ++AT_CHECK([ovs-vsctl \ ++ add-port br0 p7 -- \ ++ set interface p7 ofport_request=7 type=dummy -- \ ++ set port p7 other_config:rstp-enable=false -- \ ++ add-port br1 p8 -- \ ++ set interface p8 ofport_request=8 type=dummy -- \ ++ set port p8 other_config:rstp-enable=false -- \ ++]) ++ ++AT_CHECK([ovs-ofctl add-flow br0 "in_port=7 icmp actions=1"]) ++AT_CHECK([ovs-ofctl add-flow br0 "in_port=1 icmp actions=7"]) ++AT_CHECK([ovs-ofctl add-flow br1 "in_port=8 icmp actions=2"]) ++AT_CHECK([ovs-ofctl add-flow br1 "in_port=2 icmp actions=8"]) ++ ++# Give time for RSTP to synchronize. ++ovs-appctl time/warp 5000 500 ++ ++OVS_WAIT_UNTIL_EQUAL([cat ovs-vswitchd.log | FILTER_STP_TOPOLOGY], [dnl ++port p1: RSTP state changed from Disabled to Discarding ++port p2: RSTP state changed from Disabled to Discarding ++port p2: RSTP state changed from Discarding to Forwarding ++port p1: RSTP state changed from Discarding to Forwarding]) ++ ++AT_CHECK([ovs-appctl ofproto/trace ovs-dummy 'in_port(7),eth(src=50:54:00:00:00:09,dst=50:54:00:00:00:0a),eth_type(0x0800),ipv4(src=10.0.0.2,dst=10.0.0.1,proto=1,tos=0,ttl=64,frag=no),icmp(type=8,code=0)' | grep Datapath], [0], [dnl ++Datapath actions: 8 ++]) ++AT_CHECK([ovs-appctl ofproto/trace ovs-dummy 'in_port(8),eth(src=50:54:00:00:00:0b,dst=50:54:00:00:00:0c),eth_type(0x0800),ipv4(src=10.0.0.3,dst=10.0.0.4,proto=1,tos=0,ttl=64,frag=no),icmp(type=8,code=0)' | grep Datapath], [0], [dnl ++Datapath actions: 7 ++]) ++ ++OVS_VSWITCHD_STOP ++AT_CLEANUP +diff --git a/tests/stp.at b/tests/stp.at +index a6b6465d12..6239ec379f 100644 +--- a/tests/stp.at ++++ b/tests/stp.at +@@ -464,6 +464,65 @@ Datapath actions: 2 + + AT_CLEANUP + ++AT_SETUP([STP - patch ports]) ++# Create br0 with interfaces p1 and p7 ++# and br1 with interfaces p2 and p8 ++# with p1 and p2 being connected patch ports. ++OVS_VSWITCHD_START( ++ [set port br0 other_config:stp-enable=false -- \ ++ set bridge br0 stp-enable=true ++]) ++ ++AT_CHECK([add_of_br 1 \ ++ set port br1 other_config:stp-enable=false -- \ ++ set bridge br1 stp-enable=true]) ++ ++ovs-appctl time/stop ++ ++AT_CHECK([ovs-vsctl \ ++ add-port br0 p1 -- \ ++ set interface p1 type=patch options:peer=p2 ofport_request=1 -- \ ++ set port p1 other_config:stp-enable=true -- \ ++ add-port br1 p2 -- \ ++ set interface p2 type=patch options:peer=p1 ofport_request=2 -- \ ++ set port p2 other_config:stp-enable=true -- \ ++]) ++ ++AT_CHECK([ovs-vsctl \ ++ add-port br0 p7 -- \ ++ set interface p7 ofport_request=7 type=dummy -- \ ++ set port p7 other_config:stp-enable=false -- \ ++ add-port br1 p8 -- \ ++ set interface p8 ofport_request=8 type=dummy -- \ ++ set port p8 other_config:stp-enable=false -- \ ++]) ++ ++AT_CHECK([ovs-ofctl add-flow br0 "in_port=7 icmp actions=1"]) ++AT_CHECK([ovs-ofctl add-flow br0 "in_port=1 icmp actions=7"]) ++AT_CHECK([ovs-ofctl add-flow br1 "in_port=8 icmp actions=2"]) ++AT_CHECK([ovs-ofctl add-flow br1 "in_port=2 icmp actions=8"]) ++ ++# Give time for STP to synchronize. ++ovs-appctl time/warp 30000 3000 ++ ++OVS_WAIT_UNTIL_EQUAL([cat ovs-vswitchd.log | FILTER_STP_TOPOLOGY], [dnl ++port <>: STP state changed from disabled to listening ++port <>: STP state changed from disabled to listening ++port <>: STP state changed from listening to learning ++port <>: STP state changed from listening to learning ++port <>: STP state changed from learning to forwarding ++port <>: STP state changed from learning to forwarding]) ++ ++AT_CHECK([ovs-appctl ofproto/trace ovs-dummy 'in_port(7),eth(src=50:54:00:00:00:09,dst=50:54:00:00:00:0a),eth_type(0x0800),ipv4(src=10.0.0.2,dst=10.0.0.1,proto=1,tos=0,ttl=64,frag=no),icmp(type=8,code=0)' | grep Datapath], [0], [dnl ++Datapath actions: 8 ++]) ++AT_CHECK([ovs-appctl ofproto/trace ovs-dummy 'in_port(8),eth(src=50:54:00:00:00:0b,dst=50:54:00:00:00:0c),eth_type(0x0800),ipv4(src=10.0.0.3,dst=10.0.0.4,proto=1,tos=0,ttl=64,frag=no),icmp(type=8,code=0)' | grep Datapath], [0], [dnl ++Datapath actions: 7 ++]) ++ ++OVS_VSWITCHD_STOP ++AT_CLEANUP ++ + AT_SETUP([STP - flush the fdb and mdb when topology changed]) + OVS_VSWITCHD_START([]) + diff --git a/tests/system-afxdp.at b/tests/system-afxdp.at index 0d09906fb6..88f6605663 100644 --- a/tests/system-afxdp.at @@ -4178,7 +5888,7 @@ index 07f2b8fd0e..d3d27133b9 100644 ]) diff --git a/tests/system-layer3-tunnels.at b/tests/system-layer3-tunnels.at -index 81123f7309..6fbdedb64f 100644 +index 81123f7309..5dcdd2afae 100644 --- a/tests/system-layer3-tunnels.at +++ b/tests/system-layer3-tunnels.at @@ -34,15 +34,15 @@ AT_CHECK([ovs-ofctl add-flow br0 "priority=100 ip,nw_dst=10.1.1.2 action=mod_dl_ @@ -4200,7 +5910,7 @@ index 81123f7309..6fbdedb64f 100644 3 packets transmitted, 3 received, 0% packet loss, time 0ms ]) -@@ -83,15 +83,15 @@ AT_CHECK([ovs-ofctl add-flow br0 "priority=100 ip,nw_dst=10.1.1.2 action=mod_dl_ +@@ -83,76 +83,21 @@ AT_CHECK([ovs-ofctl add-flow br0 "priority=100 ip,nw_dst=10.1.1.2 action=mod_dl_ OVS_WAIT_UNTIL([ip netns exec at_ns0 ping -c 1 10.1.1.2]) dnl First, check the underlay @@ -4219,7 +5929,68 @@ index 81123f7309..6fbdedb64f 100644 3 packets transmitted, 3 received, 0% packet loss, time 0ms ]) -@@ -191,11 +191,11 @@ AT_CHECK([ovs-vsctl add-port br1 patch1]) + OVS_TRAFFIC_VSWITCHD_STOP + AT_CLEANUP + +-AT_SETUP([layer3 - use non-local port as tunnel endpoint]) +- +-OVS_VSWITCHD_START([add-port br0 p0 -- set Interface p0 type=dummy ofport_request=1]) +-AT_CHECK([ovs-vsctl add-port br0 vtep0 -- set int vtep0 type=dummy], [0]) +-AT_CHECK([ovs-vsctl add-br int-br -- set bridge int-br datapath_type=dummy], [0]) +-AT_CHECK([ovs-vsctl add-port int-br t1 -- set Interface t1 type=gre \ +- options:remote_ip=1.1.2.92 ofport_request=3], [0]) +- +-AT_CHECK([ovs-appctl dpif/show], [0], [dnl +-dummy@ovs-dummy: hit:0 missed:0 +- br0: +- br0 65534/100: (dummy-internal) +- p0 1/1: (dummy) +- vtep0 2/2: (dummy) +- int-br: +- int-br 65534/3: (dummy-internal) +- t1 3/4: (gre: remote_ip=1.1.2.92) +-]) +- +-AT_CHECK([ovs-appctl netdev-dummy/ip4addr vtep0 1.1.2.88/24], [0], [OK +-]) +-AT_CHECK([ovs-appctl ovs/route/add 1.1.2.92/24 vtep0], [0], [OK +-]) +-AT_CHECK([ovs-ofctl add-flow br0 action=normal]) +-AT_CHECK([ovs-ofctl add-flow int-br action=normal]) +- +-dnl Use arp request and reply to achieve tunnel next hop mac binding +-dnl By default, vtep0's MAC address is aa:55:aa:55:00:03 +-AT_CHECK([ovs-appctl netdev-dummy/receive vtep0 'recirc_id(0),in_port(2),eth(dst=ff:ff:ff:ff:ff:ff,src=aa:55:aa:55:00:03),eth_type(0x0806),arp(tip=1.1.2.92,sip=1.1.2.88,op=1,sha=aa:55:aa:55:00:03,tha=00:00:00:00:00:00)']) +-AT_CHECK([ovs-appctl netdev-dummy/receive p0 'recirc_id(0),in_port(1),eth(src=f8:bc:12:44:34:b6,dst=aa:55:aa:55:00:03),eth_type(0x0806),arp(sip=1.1.2.92,tip=1.1.2.88,op=2,sha=f8:bc:12:44:34:b6,tha=aa:55:aa:55:00:03)']) +- +-AT_CHECK([ovs-appctl tnl/neigh/show | tail -n+3 | sort], [0], [dnl +-1.1.2.92 f8:bc:12:44:34:b6 br0 +-]) +- +-AT_CHECK([ovs-appctl ovs/route/show | tail -n+2 | sort], [0], [dnl +-User: 1.1.2.0/24 dev vtep0 SRC 1.1.2.88 +-]) +- +-dnl Check GRE tunnel pop +-AT_CHECK([ovs-appctl ofproto/trace ovs-dummy 'in_port(1),eth(src=f8:bc:12:44:34:b6,dst=aa:55:aa:55:00:03),eth_type(0x0800),ipv4(src=1.1.2.92,dst=1.1.2.88,proto=47,tos=0,ttl=64,frag=no)'], [0], [stdout]) +- +-AT_CHECK([tail -1 stdout], [0], +- [Datapath actions: tnl_pop(4) +-]) +- +-dnl Check GRE tunnel push +-AT_CHECK([ovs-appctl ofproto/trace ovs-dummy 'in_port(3),eth(dst=f9:bc:12:44:34:b6,src=af:55:aa:55:00:03),eth_type(0x0800),ipv4(src=1.1.3.88,dst=1.1.3.92,proto=1,tos=0,ttl=64,frag=no)'], [0], [stdout]) +-AT_CHECK([tail -1 stdout], [0], +- [Datapath actions: tnl_push(tnl_port(4),header(size=38,type=3,eth(dst=f8:bc:12:44:34:b6,src=aa:55:aa:55:00:03,dl_type=0x0800),ipv4(src=1.1.2.88,dst=1.1.2.92,proto=47,tos=0,ttl=64,frag=0x4000),gre((flags=0x0,proto=0x6558))),out_port(2)),1 +-]) +- +-OVS_VSWITCHD_STOP +-AT_CLEANUP +- + AT_SETUP([layer3 - ping over MPLS Bareudp]) + OVS_CHECK_BAREUDP() + OVS_TRAFFIC_VSWITCHD_START([_ADD_BR([br1])]) +@@ -191,11 +136,11 @@ AT_CHECK([ovs-vsctl add-port br1 patch1]) AT_CHECK([ovs-ofctl -O OpenFlow13 add-flows br0 flows0.txt]) AT_CHECK([ovs-ofctl -O OpenFlow13 add-flows br1 flows1.txt]) @@ -4233,7 +6004,7 @@ index 81123f7309..6fbdedb64f 100644 3 packets transmitted, 3 received, 0% packet loss, time 0ms ]) OVS_TRAFFIC_VSWITCHD_STOP -@@ -239,11 +239,11 @@ AT_CHECK([ovs-vsctl add-port br1 patch1]) +@@ -239,11 +184,11 @@ AT_CHECK([ovs-vsctl add-port br1 patch1]) AT_CHECK([ovs-ofctl -O OpenFlow13 add-flows br0 flows0.txt]) AT_CHECK([ovs-ofctl -O OpenFlow13 add-flows br1 flows1.txt]) @@ -4510,7 +6281,7 @@ index 871a3bda4f..3d84a53182 100644 ]) diff --git a/tests/system-traffic.at b/tests/system-traffic.at -index 808c492a22..e051c942f0 100644 +index 808c492a22..23404a2799 100644 --- a/tests/system-traffic.at +++ b/tests/system-traffic.at @@ -10,13 +10,13 @@ ADD_NAMESPACES(at_ns0, at_ns1) @@ -5231,7 +7002,78 @@ index 808c492a22..e051c942f0 100644 AT_BANNER([MPLS]) AT_SETUP([mpls - encap header dp-support]) -@@ -2516,6 +2677,7 @@ AT_CLEANUP +@@ -2322,34 +2483,53 @@ AT_BANNER([QoS]) + + AT_SETUP([QoS - basic configuration]) + AT_SKIP_IF([test $HAVE_TC = no]) ++AT_SKIP_IF([test $HAVE_ETHTOOL = "no"]) + OVS_TRAFFIC_VSWITCHD_START() + +-ADD_NAMESPACES(at_ns0, at_ns1) ++AT_CHECK([ip tuntap add ovs-tap0 mode tap]) ++on_exit 'ip link del ovs-tap0' ++AT_CHECK([ip tuntap add ovs-tap1 mode tap]) ++on_exit 'ip link del ovs-tap1' + +-ADD_VETH(p0, at_ns0, br0, "10.1.1.1/24") +-ADD_VETH(p1, at_ns1, br0, "10.1.1.2/24") ++dnl Set maximum link speed to 5Gb. ++AT_CHECK([ethtool -s ovs-tap0 speed 5000 duplex full]) ++AT_CHECK([ip link set dev ovs-tap0 up]) ++AT_CHECK([ethtool -s ovs-tap1 speed 5000 duplex full]) ++AT_CHECK([ip link set dev ovs-tap1 up]) + +-dnl Adding a custom qdisc to ovs-p1, ovs-p0 will have the default qdisc. +-AT_CHECK([tc qdisc add dev ovs-p1 root noqueue]) +-AT_CHECK([tc qdisc show dev ovs-p1 | grep -q noqueue]) ++AT_CHECK([ovs-vsctl add-port br0 ovs-tap0 -- set int ovs-tap0 type=tap]) ++AT_CHECK([ovs-vsctl add-port br0 ovs-tap1 -- set int ovs-tap1 type=tap]) + +-dnl Configure the same QoS for both ports. +-AT_CHECK([ovs-vsctl set port ovs-p0 qos=@qos -- set port ovs-p1 qos=@qos dnl +- -- --id=@qos create qos dnl +- type=linux-htb other-config:max-rate=3000000 queues:0=@queue dnl +- -- --id=@queue create queue dnl ++dnl Adding a custom qdisc to ovs-tap1, ovs-tap0 will have the default qdisc. ++AT_CHECK([tc qdisc add dev ovs-tap1 root noqueue]) ++AT_CHECK([tc qdisc show dev ovs-tap1 | grep -q noqueue]) ++ ++dnl Configure the same QoS for both ports: ++dnl queue0 uses fixed max-rate. ++dnl queue1 relies on underlying link speed. ++AT_CHECK([ovs-vsctl dnl ++ -- --id=@queue0 create queue dnl + other_config:min-rate=2000000 other_config:max-rate=3000000 dnl +- other_config:burst=3000000], ++ other_config:burst=3000000 dnl ++ -- --id=@queue1 create queue dnl ++ other_config:min-rate=4000000 other_config:burst=4000000 dnl ++ -- --id=@qos create qos dnl ++ type=linux-htb queues:0=@queue0 dnl ++ queues:1=@queue1 -- dnl ++ -- set port ovs-tap0 qos=@qos -- set port ovs-tap1 qos=@qos], + [ignore], [ignore]) + + dnl Wait for qdiscs to be applied. +-OVS_WAIT_UNTIL([tc qdisc show dev ovs-p0 | grep -q htb]) +-OVS_WAIT_UNTIL([tc qdisc show dev ovs-p1 | grep -q htb]) ++OVS_WAIT_UNTIL([tc qdisc show dev ovs-tap0 | grep -q htb]) ++OVS_WAIT_UNTIL([tc qdisc show dev ovs-tap1 | grep -q htb]) + + dnl Check the configuration. +-m4_define([HTB_CONF], [rate 2Mbit ceil 3Mbit burst 375000b cburst 375000b]) +-AT_CHECK([tc class show dev ovs-p0 | grep -q 'class htb .* HTB_CONF']) +-AT_CHECK([tc class show dev ovs-p1 | grep -q 'class htb .* HTB_CONF']) ++m4_define([HTB_CONF0], [rate 2Mbit ceil 3Mbit burst 375000b cburst 375000b]) ++m4_define([HTB_CONF1], [rate 4Mbit ceil 5Gbit burst 500000b cburst 500000b]) ++AT_CHECK([tc class show dev ovs-tap0 | grep -q 'class htb .* HTB_CONF0']) ++AT_CHECK([tc class show dev ovs-tap0 | grep -q 'class htb .* HTB_CONF1']) ++AT_CHECK([tc class show dev ovs-tap1 | grep -q 'class htb .* HTB_CONF0']) ++AT_CHECK([tc class show dev ovs-tap1 | grep -q 'class htb .* HTB_CONF1']) + + OVS_TRAFFIC_VSWITCHD_STOP + AT_CLEANUP +@@ -2516,6 +2696,7 @@ AT_CLEANUP AT_SETUP([conntrack - ct flush]) CHECK_CONNTRACK() @@ -5239,7 +7081,7 @@ index 808c492a22..e051c942f0 100644 OVS_TRAFFIC_VSWITCHD_START() ADD_NAMESPACES(at_ns0, at_ns1) -@@ -2526,10 +2688,8 @@ ADD_VETH(p1, at_ns1, br0, "10.1.1.2/24") +@@ -2526,10 +2707,8 @@ ADD_VETH(p1, at_ns1, br0, "10.1.1.2/24") AT_DATA([flows.txt], [dnl priority=1,action=drop priority=10,arp,action=normal @@ -5252,7 +7094,7 @@ index 808c492a22..e051c942f0 100644 ]) AT_CHECK([ovs-ofctl --bundle add-flows br0 flows.txt]) -@@ -2564,7 +2724,7 @@ AT_CHECK([FLUSH_CMD zone=5 'ct_nw_src=10.1.1.1,ct_nw_dst=10.1.1.2,ct_nw_proto=17 +@@ -2564,7 +2743,7 @@ AT_CHECK([FLUSH_CMD zone=5 'ct_nw_src=10.1.1.1,ct_nw_dst=10.1.1.2,ct_nw_proto=17 AT_CHECK([ovs-appctl dpctl/dump-conntrack | FORMAT_CT(10.1.1.2)], [0]) dnl Test ICMP traffic @@ -5261,7 +7103,7 @@ index 808c492a22..e051c942f0 100644 3 packets transmitted, 3 received, 0% packet loss, time 0ms ]) -@@ -2692,6 +2852,25 @@ udp,orig=(src=10.1.1.2,dst=10.1.1.1,sport=2,dport=1),reply=(src=10.1.1.1,dst=10. +@@ -2692,6 +2871,25 @@ udp,orig=(src=10.1.1.2,dst=10.1.1.1,sport=2,dport=1),reply=(src=10.1.1.1,dst=10. AT_CHECK([FLUSH_CMD]) @@ -5287,7 +7129,7 @@ index 808c492a22..e051c942f0 100644 AT_CHECK([ovs-appctl dpctl/dump-conntrack | grep "10\.1\.1\.1"], [1]) ]) -@@ -2745,7 +2924,7 @@ priority=100,in_port=2,icmp,ct_state=+trk+est,action=1 +@@ -2745,7 +2943,7 @@ priority=100,in_port=2,icmp,ct_state=+trk+est,action=1 AT_CHECK([ovs-ofctl --bundle add-flows br0 flows.txt]) dnl Pings from ns0->ns1 should work fine. @@ -5296,7 +7138,7 @@ index 808c492a22..e051c942f0 100644 3 packets transmitted, 3 received, 0% packet loss, time 0ms ]) -@@ -2786,7 +2965,7 @@ priority=100,in_port=2,icmp,ct_state=+trk+est,action=1 +@@ -2786,7 +2984,7 @@ priority=100,in_port=2,icmp,ct_state=+trk+est,action=1 AT_CHECK([ovs-ofctl --bundle add-flows br0 flows.txt]) dnl Pings from ns0->ns1 should work fine. @@ -5305,7 +7147,7 @@ index 808c492a22..e051c942f0 100644 3 packets transmitted, 3 received, 0% packet loss, time 0ms ]) -@@ -2886,7 +3065,7 @@ NS_CHECK_EXEC([at_ns1], [ping6 -q -c 3 -i 0.3 -w 2 fc00::1 | FORMAT_PING], [0], +@@ -2886,7 +3084,7 @@ NS_CHECK_EXEC([at_ns1], [ping6 -q -c 3 -i 0.3 -w 2 fc00::1 | FORMAT_PING], [0], ]) dnl Pings from ns0->ns1 should work fine. @@ -5314,7 +7156,7 @@ index 808c492a22..e051c942f0 100644 3 packets transmitted, 3 received, 0% packet loss, time 0ms ]) -@@ -3796,7 +3975,7 @@ table=0,in_port=ovs-p1,ct_state=+trk+rel+rpl,icmp,actions=ovs-p0 +@@ -3796,7 +3994,7 @@ table=0,in_port=ovs-p1,ct_state=+trk+rel+rpl,icmp,actions=ovs-p0 AT_CHECK([ovs-ofctl --bundle add-flows br0 flows.txt]) rm p0.pcap @@ -5323,7 +7165,7 @@ index 808c492a22..e051c942f0 100644 OVS_WAIT_UNTIL([grep "listening" tcpdump0_err]) dnl Send UDP packet from 10.1.1.1:1234 to 10.1.1.240:80 -@@ -3837,12 +4016,12 @@ dnl Modify userspace conntrack fragmentation handling. +@@ -3837,12 +4035,12 @@ dnl Modify userspace conntrack fragmentation handling. DPCTL_MODIFY_FRAGMENTATION() dnl Ipv4 fragmentation connectivity check. @@ -5338,7 +7180,7 @@ index 808c492a22..e051c942f0 100644 3 packets transmitted, 3 received, 0% packet loss, time 0ms ]) -@@ -3914,12 +4093,12 @@ dnl Modify userspace conntrack fragmentation handling. +@@ -3914,12 +4112,12 @@ dnl Modify userspace conntrack fragmentation handling. DPCTL_MODIFY_FRAGMENTATION() dnl Ipv4 fragmentation connectivity check. @@ -5353,7 +7195,7 @@ index 808c492a22..e051c942f0 100644 3 packets transmitted, 3 received, 0% packet loss, time 0ms ]) -@@ -3960,22 +4139,22 @@ AT_CHECK([ovs-ofctl --bundle add-flows br0 flows.txt]) +@@ -3960,22 +4158,22 @@ AT_CHECK([ovs-ofctl --bundle add-flows br0 flows.txt]) OVS_WAIT_UNTIL([ip netns exec at_ns0 ping -c 1 10.2.2.2]) dnl Ipv4 fragmentation connectivity check. @@ -5380,7 +7222,7 @@ index 808c492a22..e051c942f0 100644 3 packets transmitted, 3 received, 0% packet loss, time 0ms ]) -@@ -4134,12 +4313,12 @@ dnl "connect: Cannot assign requested address" +@@ -4134,12 +4332,12 @@ dnl "connect: Cannot assign requested address" OVS_WAIT_UNTIL([ip netns exec at_ns0 ping6 -c 1 fc00::2]) dnl Ipv6 fragmentation connectivity check. @@ -5395,7 +7237,7 @@ index 808c492a22..e051c942f0 100644 3 packets transmitted, 3 received, 0% packet loss, time 0ms ]) -@@ -4216,12 +4395,12 @@ dnl "connect: Cannot assign requested address" +@@ -4216,12 +4414,12 @@ dnl "connect: Cannot assign requested address" OVS_WAIT_UNTIL([ip netns exec at_ns0 ping6 -c 1 fc00::2]) dnl Ipv4 fragmentation connectivity check. @@ -5410,7 +7252,7 @@ index 808c492a22..e051c942f0 100644 3 packets transmitted, 3 received, 0% packet loss, time 0ms ]) -@@ -4259,22 +4438,22 @@ AT_CHECK([ovs-ofctl --bundle add-flows br0 flows.txt]) +@@ -4259,22 +4457,22 @@ AT_CHECK([ovs-ofctl --bundle add-flows br0 flows.txt]) OVS_WAIT_UNTIL([ip netns exec at_ns0 ping6 -c 1 fc00:1::4]) dnl Ipv6 fragmentation connectivity check. @@ -5437,7 +7279,7 @@ index 808c492a22..e051c942f0 100644 3 packets transmitted, 3 received, 0% packet loss, time 0ms ]) -@@ -4486,18 +4665,18 @@ ADD_NATIVE_TUNNEL([vxlan], [at_vxlan1], [at_ns0], [172.31.1.100], [10.1.1.1/24], +@@ -4486,18 +4684,18 @@ ADD_NATIVE_TUNNEL([vxlan], [at_vxlan1], [at_ns0], [172.31.1.100], [10.1.1.1/24], [id 0 dstport 4789]) dnl First, check the underlay @@ -5460,7 +7302,7 @@ index 808c492a22..e051c942f0 100644 3 packets transmitted, 3 received, 0% packet loss, time 0ms ]) -@@ -4546,18 +4725,18 @@ dnl "connect: Cannot assign requested address" +@@ -4546,18 +4744,18 @@ dnl "connect: Cannot assign requested address" OVS_WAIT_UNTIL([ip netns exec at_ns0 ping6 -c 1 fc00::2]) dnl First, check the underlay @@ -5483,7 +7325,7 @@ index 808c492a22..e051c942f0 100644 3 packets transmitted, 3 received, 0% packet loss, time 0ms ]) -@@ -4670,7 +4849,7 @@ dnl The default udp_single and icmp_first timeouts are 30 seconds in +@@ -4670,7 +4868,7 @@ dnl The default udp_single and icmp_first timeouts are 30 seconds in dnl kernel DP, and 60 seconds in userspace DP. dnl Send ICMP and UDP traffic @@ -5492,7 +7334,7 @@ index 808c492a22..e051c942f0 100644 3 packets transmitted, 3 received, 0% packet loss, time 0ms ]) AT_CHECK([ovs-ofctl -O OpenFlow13 packet-out br0 "in_port=1 packet=50540000000a50540000000908004500001c000000000011a4cd0a0101010a0101020001000200080000 actions=resubmit(,0)"]) -@@ -4696,7 +4875,7 @@ done +@@ -4696,7 +4894,7 @@ done AT_CHECK([ovs-vsctl --may-exist add-zone-tp $DP_TYPE zone=5 udp_first=1 udp_single=1 icmp_first=1 icmp_reply=1]) dnl Send ICMP and UDP traffic @@ -5501,7 +7343,7 @@ index 808c492a22..e051c942f0 100644 3 packets transmitted, 3 received, 0% packet loss, time 0ms ]) AT_CHECK([ovs-ofctl -O OpenFlow13 packet-out br0 "in_port=1 packet=50540000000a50540000000908004500001c000000000011a4cd0a0101010a0101020001000200080000 actions=resubmit(,0)"]) -@@ -4714,7 +4893,7 @@ AT_CHECK([ovs-appctl dpctl/dump-conntrack | FORMAT_CT(10.1.1.2)], [0], [dnl +@@ -4714,7 +4912,7 @@ AT_CHECK([ovs-appctl dpctl/dump-conntrack | FORMAT_CT(10.1.1.2)], [0], [dnl ]) dnl Re-send ICMP and UDP traffic to test conntrack cache @@ -5510,7 +7352,7 @@ index 808c492a22..e051c942f0 100644 3 packets transmitted, 3 received, 0% packet loss, time 0ms ]) AT_CHECK([ovs-ofctl -O OpenFlow13 packet-out br0 "in_port=1 packet=50540000000a50540000000908004500001c000000000011a4cd0a0101010a0101020001000200080000 actions=resubmit(,0)"]) -@@ -4735,7 +4914,7 @@ dnl Set the timeout policy to default again. +@@ -4735,7 +4933,7 @@ dnl Set the timeout policy to default again. AT_CHECK([ovs-vsctl del-zone-tp $DP_TYPE zone=5]) dnl Send ICMP and UDP traffic @@ -5519,7 +7361,7 @@ index 808c492a22..e051c942f0 100644 3 packets transmitted, 3 received, 0% packet loss, time 0ms ]) AT_CHECK([ovs-ofctl -O OpenFlow13 packet-out br0 "in_port=1 packet=50540000000a50540000000908004500001c000000000011a4cd0a0101010a0101020001000200080000 actions=resubmit(,0)"]) -@@ -5001,7 +5180,7 @@ table=2,in_port=1,ip,ct_state=+trk+est,ct_zone=2,action=LOCAL +@@ -5001,7 +5199,7 @@ table=2,in_port=1,ip,ct_state=+trk+est,ct_zone=2,action=LOCAL AT_CHECK([ovs-ofctl --bundle add-flows br0 flows.txt]) @@ -5528,7 +7370,7 @@ index 808c492a22..e051c942f0 100644 3 packets transmitted, 3 received, 0% packet loss, time 0ms ]) -@@ -5072,7 +5251,7 @@ table=4,priority=100,ip,action=output:NXM_NX_REG0[[]] +@@ -5072,7 +5270,7 @@ table=4,priority=100,ip,action=output:NXM_NX_REG0[[]] AT_CHECK([ovs-ofctl --bundle add-flows br0 flows.txt]) @@ -5537,7 +7379,7 @@ index 808c492a22..e051c942f0 100644 3 packets transmitted, 3 received, 0% packet loss, time 0ms ]) -@@ -6140,7 +6319,7 @@ table=10 priority=0 action=drop +@@ -6140,7 +6338,7 @@ table=10 priority=0 action=drop AT_CHECK([ovs-ofctl --bundle add-flows br0 flows.txt]) rm p0.pcap @@ -5546,7 +7388,7 @@ index 808c492a22..e051c942f0 100644 sleep 1 dnl UDP packets from ns0->ns1 should solicit "destination unreachable" response. -@@ -6164,7 +6343,7 @@ AT_CHECK([ovs-appctl dpctl/dump-conntrack | FORMAT_CT(10.1.1.2) | sed -e 's/dst= +@@ -6164,7 +6362,7 @@ AT_CHECK([ovs-appctl dpctl/dump-conntrack | FORMAT_CT(10.1.1.2) | sed -e 's/dst= udp,orig=(src=10.1.1.1,dst=10.1.1.2,sport=,dport=),reply=(src=10.1.1.2,dst=10.1.1.2XX,sport=,dport=),mark=1 ]) @@ -5555,7 +7397,7 @@ index 808c492a22..e051c942f0 100644 OVS_TRAFFIC_VSWITCHD_STOP AT_CLEANUP -@@ -6854,7 +7033,7 @@ dnl waiting, we get occasional failures due to the following error: +@@ -6854,7 +7052,7 @@ dnl waiting, we get occasional failures due to the following error: dnl "connect: Cannot assign requested address" OVS_WAIT_UNTIL([ip netns exec at_ns0 ping6 -c 1 fc00::240]) @@ -5564,7 +7406,7 @@ index 808c492a22..e051c942f0 100644 3 packets transmitted, 3 received, 0% packet loss, time 0ms ]) -@@ -6909,13 +7088,13 @@ OVS_WAIT_UNTIL([ip netns exec at_ns0 ping6 -c 1 fc00::2]) +@@ -6909,13 +7107,13 @@ OVS_WAIT_UNTIL([ip netns exec at_ns0 ping6 -c 1 fc00::2]) AT_CHECK([ovs-appctl dpctl/flush-conntrack]) rm p0.pcap @@ -5580,7 +7422,7 @@ index 808c492a22..e051c942f0 100644 AT_CHECK([ovs-appctl dpctl/dump-conntrack | FORMAT_CT(fc00::2)], [0], [dnl udp,orig=(src=fc00::1,dst=fc00::2,sport=,dport=),reply=(src=fc00::2,dst=fc00::240,sport=,dport=) -@@ -6944,7 +7123,7 @@ table=0,in_port=ovs-p1,ct_state=+trk+rel+rpl,icmp6,actions=ovs-p0 +@@ -6944,7 +7142,7 @@ table=0,in_port=ovs-p1,ct_state=+trk+rel+rpl,icmp6,actions=ovs-p0 AT_CHECK([ovs-ofctl --bundle add-flows br0 flows.txt]) rm p0.pcap @@ -5589,7 +7431,7 @@ index 808c492a22..e051c942f0 100644 OVS_WAIT_UNTIL([grep "listening" tcpdump0_err]) dnl Send UDP packet from [[fc00::1]]:1234 to [[fc00::240]]:80 -@@ -7587,12 +7766,12 @@ ADD_NATIVE_TUNNEL([geneve], [ns_gnv0], [at_ns0], [172.31.1.100], [10.1.1.1/24], +@@ -7587,12 +7785,12 @@ ADD_NATIVE_TUNNEL([geneve], [ns_gnv0], [at_ns0], [172.31.1.100], [10.1.1.1/24], [vni 0]) dnl First, check the underlay @@ -5604,7 +7446,7 @@ index 808c492a22..e051c942f0 100644 3 packets transmitted, 3 received, 0% packet loss, time 0ms ]) -@@ -7635,7 +7814,7 @@ table=2,in_port=ovs-server,ip,ct_state=+trk+rpl,actions=output:ovs-client +@@ -7635,7 +7833,7 @@ table=2,in_port=ovs-server,ip,ct_state=+trk+rpl,actions=output:ovs-client AT_CHECK([ovs-ofctl add-flows br0 flows.txt]) rm server.pcap @@ -5613,7 +7455,7 @@ index 808c492a22..e051c942f0 100644 OVS_WAIT_UNTIL([grep "listening" tcpdump0_err]) dnl Send UDP client->server -@@ -7677,7 +7856,7 @@ dnl Check the ICMP error in reply direction +@@ -7677,7 +7875,7 @@ dnl Check the ICMP error in reply direction AT_CHECK([ovs-appctl dpctl/flush-conntrack zone=42]) rm client.pcap @@ -5622,7 +7464,73 @@ index 808c492a22..e051c942f0 100644 OVS_WAIT_UNTIL([grep "listening" tcpdump1_err]) dnl Send UDP client->server -@@ -7819,7 +7998,7 @@ dnl CVLAN traffic should match the flow and drop +@@ -7715,6 +7913,65 @@ AT_CHECK([ovs-pcap client.pcap | grep 000000002010000000002000], [0], [dnl + OVS_TRAFFIC_VSWITCHD_STOP + AT_CLEANUP + ++AT_SETUP([conntrack - Flush many conntrack entries by port]) ++CHECK_CONNTRACK() ++OVS_TRAFFIC_VSWITCHD_START() ++ ++ADD_NAMESPACES(at_ns0, at_ns1) ++ ++ADD_VETH(p0, at_ns0, br0, "10.1.1.1/24") ++ADD_VETH(p1, at_ns1, br0, "10.1.1.2/24") ++ ++AT_DATA([flows.txt], [dnl ++priority=100,in_port=1,udp,action=ct(zone=1,commit),2 ++]) ++ ++AT_CHECK([ovs-ofctl --bundle add-flows br0 flows.txt]) ++ ++dnl 20 packets from port 1 and 1 packet from port 2. ++flow_l3="\ ++ eth_src=50:54:00:00:00:09,eth_dst=50:54:00:00:00:0a,dl_type=0x0800,\ ++ nw_src=10.1.1.1,nw_dst=10.1.1.2,nw_proto=17,nw_ttl=64,nw_frag=no" ++ ++head="50540000000a50540000000908004500005c000000004011648d0a0101010a010102" ++len=72 ++base_csum=1366 ++tail="000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f\ ++ 202122232425262728292a2b2c2d2e2f303132333435363738393a3b3c3d3e3f" ++ ++dst_port=1 ++for src_port in $(seq 1 20); do ++ csum=$((base_csum - src_port - dst_port)) ++ frame=$(printf "%s%04x%04x%04x%04x%s" $head 1 $src_port $len $csum $tail) ++ AT_CHECK([ovs-ofctl -O OpenFlow13 packet-out br0 "in_port=1 packet=$frame actions=resubmit(,0)"]) ++done ++ ++src_port=2 ++dst_port=1 ++csum=$((base_csum - src_port - dst_port)) ++frame=$(printf "%s%04x%04x%04x%04x%s" $head $src_port $dst_port $len $csum $tail) ++AT_CHECK([ovs-ofctl -O OpenFlow13 packet-out br0 "in_port=1 packet=$frame actions=resubmit(,0)"]) ++ ++: > conntrack ++ ++for i in $(seq 1 20); do ++ echo "udp,orig=(src=10.1.1.1,dst=10.1.1.2,sport=1,dport=${i}),reply=(src=10.1.1.2,dst=10.1.1.1,sport=${i},dport=1),zone=1" >> conntrack ++done ++echo "udp,orig=(src=10.1.1.1,dst=10.1.1.2,sport=2,dport=1),reply=(src=10.1.1.2,dst=10.1.1.1,sport=1,dport=2),zone=1" >> conntrack ++ ++sort conntrack > expout ++ ++AT_CHECK([ovs-appctl dpctl/dump-conntrack zone=1 | grep -F "src=10.1.1.1," | sort ], [0], [expout]) ++ ++dnl Check that flushing conntrack by port 1 flush all ct for port 1 but keeps ct for port 2. ++AT_CHECK([ovs-appctl dpctl/flush-conntrack zone=1 'ct_nw_proto=17,ct_tp_src=1']) ++AT_CHECK([ovs-appctl dpctl/dump-conntrack zone=1 | grep -F "src=10.1.1.1," | sort ], [0], [dnl ++udp,orig=(src=10.1.1.1,dst=10.1.1.2,sport=2,dport=1),reply=(src=10.1.1.2,dst=10.1.1.1,sport=1,dport=2),zone=1 ++]) ++ ++OVS_TRAFFIC_VSWITCHD_STOP ++AT_CLEANUP ++ + AT_BANNER([IGMP]) + + AT_SETUP([IGMP - flood under normal action]) +@@ -7819,7 +8076,7 @@ dnl CVLAN traffic should match the flow and drop AT_CHECK([ovs-appctl revalidator/purge]) AT_CHECK([ovs-vsctl set Open_vSwitch . other_config:vlan-limit=1]) AT_CHECK([ovs-ofctl add-flow br0 "priority=100 dl_type=0x8100 action=drop"]) @@ -5631,7 +7539,7 @@ index 808c492a22..e051c942f0 100644 OVS_TRAFFIC_VSWITCHD_STOP AT_CLEANUP -@@ -7869,11 +8048,11 @@ AT_CHECK([ovs-ofctl --bundle add-flows br2 flows-customer-br.txt]) +@@ -7869,11 +8126,11 @@ AT_CHECK([ovs-ofctl --bundle add-flows br2 flows-customer-br.txt]) OVS_WAIT_UNTIL([ip netns exec at_ns0 ping -c 1 10.2.2.2]) @@ -5645,7 +7553,7 @@ index 808c492a22..e051c942f0 100644 3 packets transmitted, 3 received, 0% packet loss, time 0ms ]) -@@ -7925,11 +8104,11 @@ AT_CHECK([ovs-ofctl --bundle add-flows br2 flows-customer-br.txt]) +@@ -7925,11 +8182,11 @@ AT_CHECK([ovs-ofctl --bundle add-flows br2 flows-customer-br.txt]) OVS_WAIT_UNTIL([ip netns exec at_ns0 ping -c 1 10.2.2.2]) @@ -5659,7 +7567,7 @@ index 808c492a22..e051c942f0 100644 3 packets transmitted, 3 received, 0% packet loss, time 0ms ]) -@@ -7977,24 +8156,24 @@ AT_CHECK([ovs-vsctl set port ovs-p2 vlan_mode=dot1q-tunnel tag=4094 cvlans=100,2 +@@ -7977,24 +8234,24 @@ AT_CHECK([ovs-vsctl set port ovs-p2 vlan_mode=dot1q-tunnel tag=4094 cvlans=100,2 OVS_WAIT_UNTIL([ip netns exec at_ns0 ping -c 1 10.2.2.2]) OVS_WAIT_UNTIL([ip netns exec at_ns0 ping -c 1 10.3.2.2]) @@ -5689,7 +7597,7 @@ index 808c492a22..e051c942f0 100644 OVS_TRAFFIC_VSWITCHD_STOP(["/dropping VLAN \(0\|300\) packet received on dot1q-tunnel port/d"]) AT_CLEANUP -@@ -8023,11 +8202,11 @@ AT_CHECK([ovs-ofctl --bundle add-flows br0 flows-br0.txt]) +@@ -8023,11 +8280,11 @@ AT_CHECK([ovs-ofctl --bundle add-flows br0 flows-br0.txt]) OVS_WAIT_UNTIL([ip netns exec at_ns0 ping -c 1 10.2.2.2]) @@ -5937,8 +7845,505 @@ index a841adba4e..48f8ee2d70 100644 if len(args) != n_args: sys.stderr.write("%s: \"%s\" requires %d arguments but %d " "provided\n" +diff --git a/tests/test-rstp.c b/tests/test-rstp.c +index 9c1026ec1a..707ee3a6c8 100644 +--- a/tests/test-rstp.c ++++ b/tests/test-rstp.c +@@ -469,6 +469,8 @@ test_rstp_main(int argc, char *argv[]) + vlog_set_pattern(VLF_CONSOLE, "%c|%p|%m"); + vlog_set_levels(NULL, VLF_SYSLOG, VLL_OFF); + ++ rstp_init(); ++ + if (argc != 2) { + ovs_fatal(0, "usage: test-rstp INPUT.RSTP"); + } +diff --git a/tests/tunnel-push-pop-ipv6.at b/tests/tunnel-push-pop-ipv6.at +index a8dd28c5b5..3f2cf84292 100644 +--- a/tests/tunnel-push-pop-ipv6.at ++++ b/tests/tunnel-push-pop-ipv6.at +@@ -19,11 +19,12 @@ AT_CHECK([ovs-vsctl add-port int-br3 t3 -- set Interface t3 type=srv6 \ + options:srv6_flowlabel=compute \ + ], [0]) + +-dnl First setup dummy interface IP address, then add the route +-dnl so that tnl-port table can get valid IP address for the device. ++dnl Setup dummy interface IP address. + AT_CHECK([ovs-appctl netdev-dummy/ip6addr br0 2001:cafe::88/24], [0], [OK + ]) +-AT_CHECK([ovs-appctl ovs/route/add 2001:cafe::0/24 br0], [0], [OK ++dnl Checking that a local routes for added IPs were successfully installed. ++AT_CHECK([ovs-appctl ovs/route/show | grep Cached], [0], [dnl ++Cached: 2001:ca00::/24 dev br0 SRC 2001:cafe::88 local + ]) + AT_CHECK([ovs-appctl tnl/neigh/set br0 2001:cafe::91 aa:55:aa:55:00:01], [0], [OK + ]) +@@ -105,13 +106,15 @@ dummy@ovs-dummy: hit:0 missed:0 + t2 2/6: (ip6gre: remote_ip=2001:cafe::92) + ]) + +-dnl First setup dummy interface IP address, then add the route +-dnl so that tnl-port table can get valid IP address for the device. ++dnl Setup dummy interface IP addresses. + AT_CHECK([ovs-appctl netdev-dummy/ip6addr br0 2001:cafe::88/24], [0], [OK + ]) + AT_CHECK([ovs-appctl netdev-dummy/ip4addr br0 1.1.2.88/24], [0], [OK + ]) +-AT_CHECK([ovs-appctl ovs/route/add 2001:cafe::92/24 br0], [0], [OK ++dnl Checking that a local routes for added IPs were successfully installed. ++AT_CHECK([ovs-appctl ovs/route/show | grep Cached | sort], [0], [dnl ++Cached: 1.1.2.0/24 dev br0 SRC 1.1.2.88 local ++Cached: 2001:ca00::/24 dev br0 SRC 2001:cafe::88 local + ]) + + AT_CHECK([ovs-ofctl add-flow br0 action=normal]) +@@ -179,13 +182,15 @@ dummy@ovs-dummy: hit:0 missed:0 + t3 3/6: (ip6erspan: erspan_dir=1, erspan_hwid=0x7, erspan_ver=2, key=567, remote_ip=2001:cafe::93) + ]) + +-dnl First setup dummy interface IP address, then add the route +-dnl so that tnl-port table can get valid IP address for the device. ++dnl Setup dummy interface IP addresses. + AT_CHECK([ovs-appctl netdev-dummy/ip6addr br0 2001:cafe::88/24], [0], [OK + ]) + AT_CHECK([ovs-appctl netdev-dummy/ip4addr br0 1.1.2.88/24], [0], [OK + ]) +-AT_CHECK([ovs-appctl ovs/route/add 2001:cafe::92/24 br0], [0], [OK ++dnl Checking that a local routes for added IPs were successfully installed. ++AT_CHECK([ovs-appctl ovs/route/show | grep Cached | sort], [0], [dnl ++Cached: 1.1.2.0/24 dev br0 SRC 1.1.2.88 local ++Cached: 2001:ca00::/24 dev br0 SRC 2001:cafe::88 local + ]) + + AT_CHECK([ovs-ofctl add-flow br0 action=normal]) +@@ -316,14 +321,15 @@ srv6_sys (6) ref_cnt=1 + vxlan_sys_4789 (4789) ref_cnt=2 + ]) + +- +-dnl First setup dummy interface IP address, then add the route +-dnl so that tnl-port table can get valid IP address for the device. ++dnl Setup dummy interface IP addresses. + AT_CHECK([ovs-appctl netdev-dummy/ip6addr br0 2001:cafe::88/24], [0], [OK + ]) + AT_CHECK([ovs-appctl netdev-dummy/ip4addr br0 1.1.2.88/24], [0], [OK + ]) +-AT_CHECK([ovs-appctl ovs/route/add 2001:cafe::92/24 br0], [0], [OK ++dnl Checking that a local routes for added IPs were successfully installed. ++AT_CHECK([ovs-appctl ovs/route/show | grep Cached | sort], [0], [dnl ++Cached: 1.1.2.0/24 dev br0 SRC 1.1.2.88 local ++Cached: 2001:ca00::/24 dev br0 SRC 2001:cafe::88 local + ]) + + AT_CHECK([ovs-ofctl add-flow br0 action=normal]) +@@ -636,3 +642,87 @@ Listening ports: + + OVS_VSWITCHD_STOP + AT_CLEANUP ++ ++AT_SETUP([tunnel_push_pop_ipv6 - local_ip configuration]) ++ ++OVS_VSWITCHD_START( ++ [add-port br0 p0 \ ++ -- set Interface p0 type=dummy ofport_request=1 \ ++ other-config:hwaddr=aa:55:aa:55:00:00]) ++AT_CHECK([ovs-appctl vlog/set dpif_netdev:dbg]) ++AT_CHECK([ovs-vsctl add-br int-br -- set bridge int-br datapath_type=dummy]) ++AT_CHECK([ovs-vsctl add-port int-br t2 \ ++ -- set Interface t2 type=geneve \ ++ options:local_ip=2001:beef::88 \ ++ options:remote_ip=2001:cafe::92 \ ++ options:key=123 ofport_request=2]) ++ ++dnl Setup multiple IP addresses. ++AT_CHECK([ovs-appctl netdev-dummy/ip6addr br0 2001:cafe::88/64], [0], [OK ++]) ++AT_CHECK([ovs-appctl netdev-dummy/ip6addr br0 2001:beef::88/64], [0], [OK ++]) ++dnl Checking that a local route for added IP was successfully installed. ++AT_CHECK([ovs-appctl ovs/route/show | grep Cached | sort], [0], [dnl ++Cached: 2001:beef::/64 dev br0 SRC 2001:beef::88 local ++Cached: 2001:cafe::/64 dev br0 SRC 2001:cafe::88 local ++]) ++AT_CHECK([ovs-ofctl add-flow br0 action=normal]) ++AT_CHECK([ovs-ofctl add-flow int-br action=normal]) ++ ++dnl This Neighbor Advertisement from p0 has two effects: ++dnl 1. The neighbor cache will learn that 2001:cafe::92 is at f8:bc:12:44:34:b6. ++dnl 2. The br0 mac learning will learn that f8:bc:12:44:34:b6 is on p0. ++AT_CHECK([ovs-appctl netdev-dummy/receive p0 dnl ++ 'recirc_id(0),in_port(1),dnl ++ eth(src=f8:bc:12:44:34:b6,dst=aa:55:aa:55:00:00),eth_type(0x86dd),dnl ++ ipv6(src=2001:cafe::92,dst=2001:cafe::88,label=0,proto=58,tclass=0,hlimit=255,frag=no),dnl ++ icmpv6(type=136,code=0),dnl ++ nd(target=2001:cafe::92,sll=00:00:00:00:00:00,tll=f8:bc:12:44:34:b6)' ++]) ++ ++dnl Check that local_ip is used for encapsulation in the trace. ++AT_CHECK([ovs-appctl ofproto/trace int-br in_port=LOCAL \ ++ | grep -E 'tunnel|actions'], [0], [dnl ++ -> output to native tunnel ++ -> tunneling to 2001:cafe::92 via br0 ++ -> tunneling from aa:55:aa:55:00:00 2001:beef::88 to f8:bc:12:44:34:b6 2001:cafe::92 ++Datapath actions: tnl_push(tnl_port(6081),header(size=70,type=5,dnl ++eth(dst=f8:bc:12:44:34:b6,src=aa:55:aa:55:00:00,dl_type=0x86dd),dnl ++ipv6(src=2001:beef::88,dst=2001:cafe::92,label=0,proto=17,tclass=0x0,hlimit=64),dnl ++udp(src=0,dst=6081,csum=0xffff),geneve(vni=0x7b)),out_port(100)),1 ++]) ++ ++dnl Now check that the packet actually has the local_ip in the header. ++AT_CHECK([ovs-vsctl -- set Interface p0 options:tx_pcap=p0.pcap]) ++ ++packet=50540000000a5054000000091234 ++eth=f8bc124434b6aa55aa55000086dd ++ip6=60000000001e11402001beef0000000000000000000000882001cafe000000000000000000000092 ++dnl Source port is based on a packet hash, so it may differ depending on the ++dnl compiler flags and CPU type. Same for UDP checksum. Masked with '....'. ++udp=....17c1001e.... ++geneve=0000655800007b00 ++encap=${eth}${ip6}${udp}${geneve} ++dnl Output to tunnel from a int-br internal port. ++dnl Checking that the packet arrived and it was correctly encapsulated. ++AT_CHECK([ovs-appctl netdev-dummy/receive int-br "${packet}"]) ++OVS_WAIT_UNTIL([test $(ovs-pcap p0.pcap | grep -c "${encap}${packet}") -eq 1]) ++dnl Sending again to exercise the non-miss upcall path. ++AT_CHECK([ovs-appctl netdev-dummy/receive int-br "${packet}"]) ++OVS_WAIT_UNTIL([test $(ovs-pcap p0.pcap | grep -c "${encap}${packet}") -eq 2]) ++ ++dnl Finally, checking that the datapath flow also has a local_ip. ++AT_CHECK([ovs-appctl dpctl/dump-flows | grep tnl_push \ ++ | strip_ufid | strip_used], [0], [dnl ++recirc_id(0),in_port(2),packet_type(ns=0,id=0),dnl ++eth(src=50:54:00:00:00:09,dst=50:54:00:00:00:0a),eth_type(0x1234), dnl ++packets:1, bytes:14, used:0.0s, dnl ++actions:tnl_push(tnl_port(6081),header(size=70,type=5,dnl ++eth(dst=f8:bc:12:44:34:b6,src=aa:55:aa:55:00:00,dl_type=0x86dd),dnl ++ipv6(src=2001:beef::88,dst=2001:cafe::92,label=0,proto=17,tclass=0x0,hlimit=64),dnl ++udp(src=0,dst=6081,csum=0xffff),geneve(vni=0x7b)),out_port(100)),1 ++]) ++ ++OVS_VSWITCHD_STOP ++AT_CLEANUP +diff --git a/tests/tunnel-push-pop.at b/tests/tunnel-push-pop.at +index b1440f5904..97405636f9 100644 +--- a/tests/tunnel-push-pop.at ++++ b/tests/tunnel-push-pop.at +@@ -30,17 +30,15 @@ dummy@ovs-dummy: hit:0 missed:0 + t4 5/3: (erspan: erspan_dir=flow, erspan_hwid=flow, erspan_idx=flow, erspan_ver=flow, key=56, remote_ip=flow) + ]) + +-dnl First setup dummy interface IP address, then add the route +-dnl so that tnl-port table can get valid IP address for the device. ++dnl Setup dummy interface IP addresses. + AT_CHECK([ovs-appctl netdev-dummy/ip4addr br0 1.1.2.88/24], [0], [OK + ]) + AT_CHECK([ovs-appctl netdev-dummy/ip6addr br0 2001:cafe::88/24], [0], [OK + ]) +- +-AT_CHECK([ovs-appctl ovs/route/add 1.1.2.92/24 br0], [0], [OK +-]) +- +-AT_CHECK([ovs-appctl ovs/route/add 1.1.2.92/24 br0 pkt_mark=1234], [0], [OK ++dnl Checking that a local routes for added IPs were successfully installed. ++AT_CHECK([ovs-appctl ovs/route/show | grep Cached | sort], [0], [dnl ++Cached: 1.1.2.0/24 dev br0 SRC 1.1.2.88 local ++Cached: 2001:ca00::/24 dev br0 SRC 2001:cafe::88 local + ]) + + AT_CHECK([ovs-ofctl add-flow br0 action=normal]) +@@ -237,18 +235,21 @@ dummy@ovs-dummy: hit:0 missed:0 + t8 9/2152: (gtpu: key=123, remote_ip=1.1.2.92) + ]) + +-dnl First setup dummy interface IP address, then add the route +-dnl so that tnl-port table can get valid IP address for the device. ++dnl Setup dummy interface IP addresses. + AT_CHECK([ovs-appctl netdev-dummy/ip4addr br0 1.1.2.88/24], [0], [OK + ]) + AT_CHECK([ovs-appctl netdev-dummy/ip6addr br0 2001:cafe::88/24], [0], [OK + ]) +- +-AT_CHECK([ovs-appctl ovs/route/add 1.1.2.92/24 br0], [0], [OK +-]) +- ++dnl Add a static route with a mark. + AT_CHECK([ovs-appctl ovs/route/add 1.1.2.92/24 br0 pkt_mark=1234], [0], [OK + ]) ++dnl Checking that local routes for added IPs and the static route with a mark ++dnl were successfully installed. ++AT_CHECK([ovs-appctl ovs/route/show | grep br0 | sort], [0], [dnl ++Cached: 1.1.2.0/24 dev br0 SRC 1.1.2.88 local ++Cached: 2001:ca00::/24 dev br0 SRC 2001:cafe::88 local ++User: 1.1.2.0/24 MARK 1234 dev br0 SRC 1.1.2.88 ++]) + + AT_CHECK([ovs-ofctl add-flow br0 action=normal]) + +@@ -690,12 +691,12 @@ AT_CHECK([ovs-vsctl add-port int-br t2 -- set Interface t2 type=geneve \ + options:remote_ip=1.1.2.92 options:key=123 ofport_request=2 \ + ]) + +-dnl First setup dummy interface IP address, then add the route +-dnl so that tnl-port table can get valid IP address for the device. ++dnl Setup dummy interface IP address. + AT_CHECK([ovs-appctl netdev-dummy/ip4addr br0 1.1.2.88/24], [0], [OK + ]) +- +-AT_CHECK([ovs-appctl ovs/route/add 1.1.2.92/24 br0], [0], [OK ++dnl Checking that a local route for added IP was successfully installed. ++AT_CHECK([ovs-appctl ovs/route/show | grep Cached], [0], [dnl ++Cached: 1.1.2.0/24 dev br0 SRC 1.1.2.88 local + ]) + + AT_CHECK([ovs-ofctl add-flow br0 action=normal]) +@@ -731,11 +732,12 @@ AT_CHECK([ovs-vsctl add-port int-br t2 dnl + -- set Interface t2 type=geneve options:remote_ip=1.1.2.92 dnl + options:key=123 ofport_request=2]) + +-dnl First setup dummy interface IP address, then add the route +-dnl so that tnl-port table can get valid IP address for the device. ++dnl Setup dummy interface IP address. + AT_CHECK([ovs-appctl netdev-dummy/ip4addr br0 1.1.2.88/24], [0], [OK + ]) +-AT_CHECK([ovs-appctl ovs/route/add 1.1.2.92/24 br0], [0], [OK ++dnl Checking that a local route for added IP was successfully installed. ++AT_CHECK([ovs-appctl ovs/route/show | grep Cached], [0], [dnl ++Cached: 1.1.2.0/24 dev br0 SRC 1.1.2.88 local + ]) + AT_CHECK([ovs-ofctl add-flow br0 action=normal]) + +@@ -777,6 +779,88 @@ AT_CHECK([ovs-appctl dpctl/dump-flows | grep -q 'slow_path(action)'], [0]) + OVS_VSWITCHD_STOP + AT_CLEANUP + ++AT_SETUP([tunnel_push_pop - local_ip configuration]) ++ ++OVS_VSWITCHD_START( ++ [add-port br0 p0 \ ++ -- set Interface p0 type=dummy ofport_request=1 \ ++ other-config:hwaddr=aa:55:aa:55:00:00]) ++AT_CHECK([ovs-appctl vlog/set dpif_netdev:dbg]) ++AT_CHECK([ovs-vsctl add-br int-br -- set bridge int-br datapath_type=dummy]) ++AT_CHECK([ovs-vsctl add-port int-br t2 \ ++ -- set Interface t2 type=geneve \ ++ options:local_ip=2.2.2.88 \ ++ options:remote_ip=1.1.2.92 \ ++ options:key=123 ofport_request=2]) ++ ++dnl Setup multiple IP addresses. ++AT_CHECK([ovs-appctl netdev-dummy/ip4addr br0 1.1.2.88/24], [0], [OK ++]) ++AT_CHECK([ovs-appctl netdev-dummy/ip4addr br0 2.2.2.88/24], [0], [OK ++]) ++dnl Checking that a local route for added IP was successfully installed. ++AT_CHECK([ovs-appctl ovs/route/show | grep Cached | sort], [0], [dnl ++Cached: 1.1.2.0/24 dev br0 SRC 1.1.2.88 local ++Cached: 2.2.2.0/24 dev br0 SRC 2.2.2.88 local ++]) ++AT_CHECK([ovs-ofctl add-flow br0 action=normal]) ++AT_CHECK([ovs-ofctl add-flow int-br action=normal]) ++ ++dnl This ARP reply from p0 has two effects: ++dnl 1. The ARP cache will learn that 1.1.2.92 is at f8:bc:12:44:34:b6. ++dnl 2. The br0 mac learning will learn that f8:bc:12:44:34:b6 is on p0. ++AT_CHECK([ovs-appctl netdev-dummy/receive p0 dnl ++ 'recirc_id(0),in_port(1),dnl ++ eth(src=f8:bc:12:44:34:b6,dst=ff:ff:ff:ff:ff:ff),eth_type(0x0806),dnl ++ arp(sip=1.1.2.92,tip=1.1.2.88,op=2,sha=f8:bc:12:44:34:b6,tha=00:00:00:00:00:00)' ++]) ++ ++dnl Check that local_ip is used for encapsulation in the trace. ++AT_CHECK([ovs-appctl ofproto/trace int-br in_port=LOCAL \ ++ | grep -E 'tunnel|actions'], [0], [dnl ++ -> output to native tunnel ++ -> tunneling to 1.1.2.92 via br0 ++ -> tunneling from aa:55:aa:55:00:00 2.2.2.88 to f8:bc:12:44:34:b6 1.1.2.92 ++Datapath actions: tnl_push(tnl_port(6081),header(size=50,type=5,dnl ++eth(dst=f8:bc:12:44:34:b6,src=aa:55:aa:55:00:00,dl_type=0x0800),dnl ++ipv4(src=2.2.2.88,dst=1.1.2.92,proto=17,tos=0,ttl=64,frag=0x4000),dnl ++udp(src=0,dst=6081,csum=0x0),geneve(vni=0x7b)),out_port(100)),1 ++]) ++ ++dnl Now check that the packet actually has the local_ip in the header. ++AT_CHECK([ovs-vsctl -- set Interface p0 options:tx_pcap=p0.pcap]) ++ ++packet=50540000000a5054000000091234 ++eth=f8bc124434b6aa55aa5500000800 ++ip4=450000320000400040113305020202580101025c ++dnl Source port is based on a packet hash, so it may differ depending on the ++dnl compiler flags and CPU type. Masked with '....'. ++udp=....17c1001e0000 ++geneve=0000655800007b00 ++encap=${eth}${ip4}${udp}${geneve} ++dnl Output to tunnel from a int-br internal port. ++dnl Checking that the packet arrived and it was correctly encapsulated. ++AT_CHECK([ovs-appctl netdev-dummy/receive int-br "${packet}"]) ++OVS_WAIT_UNTIL([test $(ovs-pcap p0.pcap | grep -c "${encap}${packet}") -eq 1]) ++dnl Sending again to exercise the non-miss upcall path. ++AT_CHECK([ovs-appctl netdev-dummy/receive int-br "${packet}"]) ++OVS_WAIT_UNTIL([test $(ovs-pcap p0.pcap | grep -c "${encap}${packet}") -eq 2]) ++ ++dnl Finally, checking that the datapath flow also has a local_ip. ++AT_CHECK([ovs-appctl dpctl/dump-flows | grep tnl_push \ ++ | strip_ufid | strip_used], [0], [dnl ++recirc_id(0),in_port(2),packet_type(ns=0,id=0),dnl ++eth(src=50:54:00:00:00:09,dst=50:54:00:00:00:0a),eth_type(0x1234), dnl ++packets:1, bytes:14, used:0.0s, dnl ++actions:tnl_push(tnl_port(6081),header(size=50,type=5,dnl ++eth(dst=f8:bc:12:44:34:b6,src=aa:55:aa:55:00:00,dl_type=0x0800),dnl ++ipv4(src=2.2.2.88,dst=1.1.2.92,proto=17,tos=0,ttl=64,frag=0x4000),dnl ++udp(src=0,dst=6081,csum=0x0),geneve(vni=0x7b)),out_port(100)),1 ++]) ++ ++OVS_VSWITCHD_STOP ++AT_CLEANUP ++ + AT_SETUP([tunnel_push_pop - underlay bridge match]) + + OVS_VSWITCHD_START([add-port br0 p0 -- set Interface p0 type=dummy ofport_request=1 other-config:hwaddr=aa:55:aa:55:00:00]) +@@ -796,8 +880,11 @@ dummy@ovs-dummy: hit:0 missed:0 + + AT_CHECK([ovs-appctl netdev-dummy/ip4addr br0 1.1.2.88/24], [0], [OK + ]) +-AT_CHECK([ovs-appctl ovs/route/add 1.1.2.92/24 br0], [0], [OK ++dnl Checking that a local route for added IP was successfully installed. ++AT_CHECK([ovs-appctl ovs/route/show | grep Cached], [0], [dnl ++Cached: 1.1.2.0/24 dev br0 SRC 1.1.2.88 local + ]) ++ + AT_CHECK([ovs-ofctl add-flow br0 'arp,priority=1,action=normal']) + + dnl Use arp reply to achieve tunnel next hop mac binding +@@ -840,11 +927,12 @@ AT_CHECK([ovs-vsctl add-port int-br t2 dnl + -- set Interface t2 type=geneve options:remote_ip=1.1.2.92 dnl + options:key=123 ofport_request=2]) + +-dnl First setup dummy interface IP address, then add the route +-dnl so that tnl-port table can get valid IP address for the device. ++dnl Setup dummy interface IP address. + AT_CHECK([ovs-appctl netdev-dummy/ip4addr br0 1.1.2.88/24], [0], [OK + ]) +-AT_CHECK([ovs-appctl ovs/route/add 1.1.2.92/24 br0], [0], [OK ++dnl Checking that a local route for added IP was successfully installed. ++AT_CHECK([ovs-appctl ovs/route/show | grep Cached], [0], [dnl ++Cached: 1.1.2.0/24 dev br0 SRC 1.1.2.88 local + ]) + AT_CHECK([ovs-ofctl add-flow br0 action=normal]) + +@@ -908,10 +996,12 @@ AT_CHECK([ovs-vsctl set port p8 tag=42 dnl + -- set port br0 tag=42 dnl + -- set port p7 tag=200]) + +-dnl Set IP address and route for br0. ++dnl Set an IP address for br0. + AT_CHECK([ovs-appctl netdev-dummy/ip4addr br0 10.0.0.2/24], [0], [OK + ]) +-AT_CHECK([ovs-appctl ovs/route/add 10.0.0.11/24 br0], [0], [OK ++dnl Checking that a local route for added IP was successfully installed. ++AT_CHECK([ovs-appctl ovs/route/show | grep Cached], [0], [dnl ++Cached: 10.0.0.0/24 dev br0 SRC 10.0.0.2 local + ]) + + dnl Send an ARP reply to port b8 on br0, so that packets will be forwarded +@@ -953,10 +1043,12 @@ AT_CHECK([ovs-vsctl add-port ovs-tun0 tun0 dnl + -- add-port ovs-tun0 p7 dnl + -- set interface p7 type=dummy ofport_request=7]) + +-dnl Set IP address and route for br0. ++dnl Set an IP address for br0. + AT_CHECK([ovs-appctl netdev-dummy/ip4addr br0 10.0.0.2/24], [0], [OK + ]) +-AT_CHECK([ovs-appctl ovs/route/add 10.0.0.11/24 br0], [0], [OK ++dnl Checking that a local route for added IP was successfully installed. ++AT_CHECK([ovs-appctl ovs/route/show | grep Cached], [0], [dnl ++Cached: 10.0.0.0/24 dev br0 SRC 10.0.0.2 local + ]) + + dnl Send an ARP reply to port b8 on br0, so that packets will be forwarded +@@ -993,3 +1085,81 @@ udp(src=0,dst=4789,csum=0x0),vxlan(flags=0x8000000,vni=0x0)),out_port(100)),8),7 + + OVS_VSWITCHD_STOP + AT_CLEANUP ++ ++AT_SETUP([tunnel_push_pop - use non-local port as tunnel endpoint]) ++ ++OVS_VSWITCHD_START([add-port br0 p0 \ ++ -- set Interface p0 type=dummy ofport_request=1]) ++ ++dnl Adding another port separately to ensure that it gets an ++dnl aa:55:aa:55:00:03 MAC address (dummy port number 3). ++AT_CHECK([ovs-vsctl add-port br0 vtep0 \ ++ -- set interface vtep0 type=dummy ofport_request=2]) ++AT_CHECK([ovs-vsctl \ ++ -- add-br int-br \ ++ -- set bridge int-br datapath_type=dummy \ ++ -- set Interface int-br ofport_request=3]) ++AT_CHECK([ovs-vsctl \ ++ -- add-port int-br t1 \ ++ -- set Interface t1 type=gre ofport_request=4 \ ++ options:remote_ip=1.1.2.92 ++]) ++ ++AT_CHECK([ovs-appctl dpif/show], [0], [dnl ++dummy@ovs-dummy: hit:0 missed:0 ++ br0: ++ br0 65534/100: (dummy-internal) ++ p0 1/1: (dummy) ++ vtep0 2/2: (dummy) ++ int-br: ++ int-br 65534/3: (dummy-internal) ++ t1 4/4: (gre: remote_ip=1.1.2.92) ++]) ++ ++AT_CHECK([ovs-appctl netdev-dummy/ip4addr vtep0 1.1.2.88/24], [0], [OK ++]) ++dnl Checking that a local route for added IP was successfully installed. ++AT_CHECK([ovs-appctl ovs/route/show | grep Cached], [0], [dnl ++Cached: 1.1.2.0/24 dev vtep0 SRC 1.1.2.88 local ++]) ++ ++AT_CHECK([ovs-ofctl add-flow br0 action=normal]) ++AT_CHECK([ovs-ofctl add-flow int-br action=normal]) ++ ++dnl Use arp request and reply to achieve tunnel next hop mac binding. ++dnl By default, vtep0's MAC address is aa:55:aa:55:00:03. ++AT_CHECK([ovs-appctl netdev-dummy/receive vtep0 'recirc_id(0),in_port(2),dnl ++ eth(dst=ff:ff:ff:ff:ff:ff,src=aa:55:aa:55:00:03),eth_type(0x0806),dnl ++ arp(tip=1.1.2.92,sip=1.1.2.88,op=1,sha=aa:55:aa:55:00:03,tha=00:00:00:00:00:00)']) ++AT_CHECK([ovs-appctl netdev-dummy/receive p0 'recirc_id(0),in_port(1),dnl ++ eth(src=f8:bc:12:44:34:b6,dst=aa:55:aa:55:00:03),eth_type(0x0806),dnl ++ arp(sip=1.1.2.92,tip=1.1.2.88,op=2,sha=f8:bc:12:44:34:b6,tha=aa:55:aa:55:00:03)']) ++ ++AT_CHECK([ovs-appctl tnl/neigh/show | tail -n+3 | sort], [0], [dnl ++1.1.2.92 f8:bc:12:44:34:b6 br0 ++]) ++ ++dnl Check GRE tunnel pop. ++AT_CHECK([ovs-appctl ofproto/trace ovs-dummy 'in_port(1),dnl ++ eth(src=f8:bc:12:44:34:b6,dst=aa:55:aa:55:00:03),eth_type(0x0800),dnl ++ ipv4(src=1.1.2.92,dst=1.1.2.88,proto=47,tos=0,ttl=64,frag=no)'], ++[0], [stdout]) ++ ++AT_CHECK([tail -1 stdout], [0], ++ [Datapath actions: tnl_pop(4) ++]) ++ ++dnl Check GRE tunnel push. ++AT_CHECK([ovs-appctl ofproto/trace ovs-dummy 'in_port(3),dnl ++ eth(dst=f9:bc:12:44:34:b6,src=af:55:aa:55:00:03),eth_type(0x0800),dnl ++ ipv4(src=1.1.3.88,dst=1.1.3.92,proto=1,tos=0,ttl=64,frag=no)'], ++[0], [stdout]) ++AT_CHECK([tail -1 stdout], [0], ++ [Datapath actions: tnl_push(tnl_port(4),header(size=38,type=3,dnl ++eth(dst=f8:bc:12:44:34:b6,src=aa:55:aa:55:00:03,dl_type=0x0800),dnl ++ipv4(src=1.1.2.88,dst=1.1.2.92,proto=47,tos=0,ttl=64,frag=0x4000),dnl ++gre((flags=0x0,proto=0x6558))),out_port(2)),1 ++]) ++ ++OVS_VSWITCHD_STOP ++AT_CLEANUP diff --git a/tests/tunnel.at b/tests/tunnel.at -index ddeb66bc9f..dc706a87bb 100644 +index ddeb66bc9f..5081a23adc 100644 --- a/tests/tunnel.at +++ b/tests/tunnel.at @@ -333,6 +333,50 @@ set(tunnel(tun_id=0x5,dst=4.4.4.4,ttl=64,flags(df|key))),1 @@ -5992,6 +8397,42 @@ index ddeb66bc9f..dc706a87bb 100644 AT_SETUP([tunnel - key]) OVS_VSWITCHD_START([dnl add-port br0 p1 -- set Interface p1 type=gre options:key=1 \ +@@ -480,11 +524,12 @@ dummy@ovs-dummy: hit:0 missed:0 + v2 3/3: (dummy-internal) + ]) + +-dnl First setup dummy interface IP address, then add the route +-dnl so that tnl-port table can get valid IP address for the device. ++dnl Setup dummy interface IP address. + AT_CHECK([ovs-appctl netdev-dummy/ip4addr br0 172.31.1.1/24], [0], [OK + ]) +-AT_CHECK([ovs-appctl ovs/route/add 172.31.1.0/24 br0], [0], [OK ++dnl Checking that a local route for added IP was successfully installed. ++AT_CHECK([ovs-appctl ovs/route/show | grep Cached], [0], [dnl ++Cached: 172.31.1.0/24 dev br0 SRC 172.31.1.1 local + ]) + + dnl change the flow table to bump the internal table version +@@ -1232,15 +1277,12 @@ OVS_VSWITCHD_START([add-port br0 p1 -- set Interface p1 type=dummy \ + ofport_request=2]) + OVS_VSWITCHD_DISABLE_TUNNEL_PUSH_POP + +-dnl First setup dummy interface IP address, then add the route +-dnl so that tnl-port table can get valid IP address for the device. ++dnl Setup dummy interface IP address. + AT_CHECK([ovs-appctl netdev-dummy/ip6addr br0 fc00::1/64], [0], [OK + ]) +-AT_CHECK([ovs-appctl ovs/route/add fc00::0/64 br0], [0], [OK +-]) +-AT_CHECK([ovs-appctl ovs/route/show], [0], [dnl +-Route Table: +-User: fc00::/64 dev br0 SRC fc00::1 ++dnl Checking that a local route for added IP was successfully installed. ++AT_CHECK([ovs-appctl ovs/route/show | grep Cached], [0], [dnl ++Cached: fc00::/64 dev br0 SRC fc00::1 local + ]) + + AT_DATA([flows.txt], [dnl diff --git a/utilities/ovs-ofctl.c b/utilities/ovs-ofctl.c index 24d0941cf2..25fd38f5f5 100644 --- a/utilities/ovs-ofctl.c @@ -6081,7 +8522,7 @@ index cfcde34ffe..e400043ce7 100644 diff --git a/dpdk/.github/workflows/build.yml b/dpdk/.github/workflows/build.yml -index 82d83f4030..9512219a05 100644 +index 82d83f4030..8bd8f57513 100644 --- a/dpdk/.github/workflows/build.yml +++ b/dpdk/.github/workflows/build.yml @@ -25,7 +25,8 @@ jobs: @@ -6111,11 +8552,44 @@ index 82d83f4030..9512219a05 100644 echo 'abi=abi-${{ matrix.config.os }}-${{ matrix.config.compiler }}-${{ matrix.config.cross }}-${{ env.LIBABIGAIL_VERSION }}-${{ env.REF_GIT_TAG }}' >> $GITHUB_OUTPUT - name: Retrieve ccache cache uses: actions/cache@v3 +@@ -143,6 +145,8 @@ jobs: + prepare-container-images: + name: ${{ join(matrix.config.*, '-') }} + runs-on: ubuntu-latest ++ outputs: ++ image: ${{ steps.get_keys.outputs.image }} + + strategy: + fail-fast: false +@@ -208,14 +212,13 @@ jobs: + id: get_keys + run: | + echo 'ccache=ccache-${{ matrix.config.image }}-${{ matrix.config.compiler }}-'$(date -u +%Y-w%W) >> $GITHUB_OUTPUT +- echo 'image=image-${{ matrix.config.image }}-'$(date -u +%Y-%m-%d) >> $GITHUB_OUTPUT + echo 'logs=meson-logs-${{ join(matrix.config.*, '-') }}' | tr -d ':' >> $GITHUB_OUTPUT + - name: Retrieve image cache + id: image_cache + uses: actions/cache@v3 + with: + path: ~/.image +- key: ${{ steps.get_keys.outputs.image }} ++ key: ${{ needs.prepare-container-images.outputs.image }} + - name: Fail if no image (not supposed to happen) + if: steps.image_cache.outputs.cache-hit != 'true' + run: | diff --git a/dpdk/.mailmap b/dpdk/.mailmap -index 75884b6fe2..6032ae9ea2 100644 +index 75884b6fe2..a97dce5fcf 100644 --- a/dpdk/.mailmap +++ b/dpdk/.mailmap -@@ -64,6 +64,7 @@ Ali Volkan Atli +@@ -38,6 +38,7 @@ Aleksandr Loktionov + Aleksandr Miloshenko + Aleksey Baulin + Aleksey Katargin ++Ales Musil + Alexander Bechikov + Alexander Belyakov + Alexander Chernavin +@@ -64,6 +65,7 @@ Ali Volkan Atli Allain Legacy Allen Hubbe Alok Makhariya @@ -6123,7 +8597,24 @@ index 75884b6fe2..6032ae9ea2 100644 Alvin Zhang Aman Singh Amaranath Somalapuram -@@ -143,6 +144,7 @@ Balazs Nemeth +@@ -102,7 +104,7 @@ Andriy Berestovskyy + Andrzej Ostruszka + Andy Gospodarek + Andy Green +-Andy Moreton ++Andy Moreton + Andy Pei + Anirudh Venkataramanan + Ankur Dwivedi +@@ -119,6 +121,7 @@ Arkadiusz Kubalewski + Arkadiusz Kusztal + Arnon Warshavsky + Arshdeep Kaur ++Artemy Kovalyov + Artem V. Andreev + Artur Rojek + Artur Trybula +@@ -143,6 +146,7 @@ Balazs Nemeth Bao-Long Tran Barak Enat Barry Cao @@ -6131,7 +8622,7 @@ index 75884b6fe2..6032ae9ea2 100644 Baruch Siach Bassam Zaid AlKilani Beilei Xing -@@ -166,7 +168,9 @@ Bin Huang +@@ -166,7 +170,9 @@ Bin Huang Bin Zheng Björn Töpel Bo Chen @@ -6141,7 +8632,7 @@ index 75884b6fe2..6032ae9ea2 100644 Boris Pismenny Brandon Lo Brendan Ryan -@@ -195,6 +199,7 @@ Chaoyong He +@@ -195,6 +201,7 @@ Chaoyong He Chao Zhu Charles Brett Charles Myers @@ -6149,7 +8640,7 @@ index 75884b6fe2..6032ae9ea2 100644 Chas Williams <3chas3@gmail.com> Chenbo Xia Chengchang Tang -@@ -295,6 +300,8 @@ Deepak Khandelwal +@@ -295,6 +302,8 @@ Deepak Khandelwal Deepak Kumar Jain Deirdre O'Connor Dekel Peled @@ -6158,7 +8649,7 @@ index 75884b6fe2..6032ae9ea2 100644 Dennis Marinus Derek Chickles Des O Dea -@@ -338,6 +345,7 @@ Dzmitry Sautsa +@@ -338,6 +347,7 @@ Dzmitry Sautsa Ed Czeck Eduard Serra Edward Makarov @@ -6166,15 +8657,16 @@ index 75884b6fe2..6032ae9ea2 100644 Eelco Chaudron Elad Nachman Elad Persiko -@@ -371,6 +379,7 @@ Farah Smith +@@ -371,6 +381,8 @@ Farah Smith Fei Chen Feifei Wang Fei Qin ++Fengjiang Liu +Fengnan Chang Fengtian Guo Ferdinand Thiessen Ferruh Yigit -@@ -474,6 +483,7 @@ Helin Zhang +@@ -474,6 +486,7 @@ Helin Zhang Hemant Agrawal Heng Ding Hengjian Zhang @@ -6182,7 +8674,7 @@ index 75884b6fe2..6032ae9ea2 100644 Heng Wang Henning Schild Henry Cai -@@ -524,6 +534,7 @@ Ilya Maximets +@@ -524,6 +537,7 @@ Ilya Maximets Ilya V. Matveychikov Ilyes Ben Hamouda Intiyaz Basha @@ -6190,7 +8682,7 @@ index 75884b6fe2..6032ae9ea2 100644 Itsuro Oda Ivan Boule Ivan Dyukov -@@ -601,6 +612,7 @@ Jie Liu +@@ -601,6 +615,7 @@ Jie Liu Jie Pan Jie Wang Jie Zhou @@ -6198,7 +8690,20 @@ index 75884b6fe2..6032ae9ea2 100644 Jijiang Liu Jilei Chen Jim Harris -@@ -667,9 +679,12 @@ Jun Yang +@@ -634,9 +649,11 @@ John McNamara + John Miller + John OLoughlin + John Ousterhout ++John Romein + John W. Linville + Jonas Pfefferle +-Jonathan Erb ++Jonathan Erb ++Jonathan Tsai + Jon DeVree + Jon Loeliger + Joongi Kim +@@ -667,9 +684,12 @@ Jun Yang Junyu Jiang Juraj LinkeÅ¡ Kai Ji @@ -6211,15 +8716,17 @@ index 75884b6fe2..6032ae9ea2 100644 Kamil Bednarczyk Kamil Chalupnik Kamil Rytarowski -@@ -708,6 +723,7 @@ Konstantin Ananyev Krzysztof Galazka Krzysztof Kanas +Krzysztof Karas Krzysztof Witek ++Kuan Xu Kuba Kozak Kumar Amber -@@ -747,7 +763,7 @@ Liming Sun + Kumara Parameshwaran +@@ -747,7 +769,7 @@ Liming Sun Linfan Hu Lingli Chen Lingyu Liu @@ -6228,7 +8735,23 @@ index 75884b6fe2..6032ae9ea2 100644 Linsi Yuan Lior Margalit Li Qiang -@@ -886,6 +902,7 @@ Michal Litwicki +@@ -784,6 +806,7 @@ Maciej Paczkowski + Maciej Rabeda + Maciej Szwed + Madhuker Mythri ++Mahesh Adulla + Mahipal Challa + Mah Yock Gen + Mairtin o Loingsigh +@@ -843,6 +866,7 @@ Mateusz Rusinski + Matias Elo + Mats Liljegren + Matteo Croce ++Matthew Dirba + Matthew Hall + Matthew Smith + Matthew Vick +@@ -886,6 +910,7 @@ Michal Litwicki Michal Mazurek Michal Michalik MichaÅ‚ MirosÅ‚aw @@ -6236,7 +8759,7 @@ index 75884b6fe2..6032ae9ea2 100644 Michal Swiatkowski Michal Wilczynski Michel Machado -@@ -911,6 +928,7 @@ Mitch Williams +@@ -911,6 +936,7 @@ Mitch Williams Mit Matelske Mohamad Noor Alim Hussin Mohammad Abdul Awal @@ -6244,7 +8767,7 @@ index 75884b6fe2..6032ae9ea2 100644 Mohammed Gamal Mohsin Kazmi Mohsin Mazhar Shaikh -@@ -1024,6 +1042,7 @@ Pawel Rutkowski +@@ -1024,6 +1050,7 @@ Pawel Rutkowski Pawel Wodkowski Pei Chao Pei Zhang @@ -6252,7 +8775,7 @@ index 75884b6fe2..6032ae9ea2 100644 Peng He Peng Huang Peng Sun -@@ -1035,6 +1054,7 @@ Peter Spreadborough +@@ -1035,6 +1062,7 @@ Peter Spreadborough Petr Houska Phanendra Vukkisala Phil Yang @@ -6260,7 +8783,7 @@ index 75884b6fe2..6032ae9ea2 100644 Pierre Pfister Piotr Azarewicz Piotr Bartosiewicz -@@ -1050,6 +1070,7 @@ Prashant Upadhyaya +@@ -1050,6 +1078,7 @@ Prashant Upadhyaya Prateek Agarwal Praveen Shetty Pravin Pathak @@ -6268,7 +8791,7 @@ index 75884b6fe2..6032ae9ea2 100644 Priyanka Jain Przemyslaw Ciesielski Przemyslaw Czesnowicz -@@ -1143,6 +1164,7 @@ Roy Franz +@@ -1143,6 +1172,7 @@ Roy Franz Roy Pledge Roy Shterman Ruifeng Wang @@ -6276,7 +8799,7 @@ index 75884b6fe2..6032ae9ea2 100644 Ryan E Hall Sabyasachi Sengupta Sachin Saxena -@@ -1159,6 +1181,7 @@ Sangjin Han +@@ -1159,6 +1189,7 @@ Sangjin Han Sankar Chokkalingam Santoshkumar Karanappa Rastapur Santosh Shukla @@ -6284,7 +8807,15 @@ index 75884b6fe2..6032ae9ea2 100644 Saori Usami Sarath Somasekharan Sarosh Arif -@@ -1210,6 +1233,7 @@ Shiqi Liu <835703180@qq.com> +@@ -1167,6 +1198,7 @@ Satananda Burla + Satha Rao + Satheesh Paul + Sathesh Edara ++Saurabh Singhal + Savinay Dharmappa + Scott Branden + Scott Daniels +@@ -1210,6 +1242,7 @@ Shiqi Liu <835703180@qq.com> Shiri Kuzin Shivanshu Shukla Shiweixian @@ -6292,7 +8823,15 @@ index 75884b6fe2..6032ae9ea2 100644 Shlomi Gridish Shougang Wang Shraddha Joshi -@@ -1239,6 +1263,7 @@ Smadar Fuks +@@ -1232,6 +1265,7 @@ Simon Kuenzer + Siobhan Butler + Sirshak Das + Sivaprasad Tummala ++Sivaramakrishnan Venkat + Siwar Zitouni + Slawomir Mrozowicz + Slawomir Rosek +@@ -1239,6 +1273,7 @@ Smadar Fuks Solal Pirelli Solganik Alexander Somnath Kotur @@ -6300,7 +8839,15 @@ index 75884b6fe2..6032ae9ea2 100644 Song Zhu Sony Chacko Sotiris Salloumis -@@ -1386,6 +1411,7 @@ Vijay Kumar Srivastava +@@ -1331,6 +1366,7 @@ Tianli Lai + Tianyu Li + Timmons C. Player + Timothy McDaniel ++Timothy Miskell + Timothy Redaelli + Tim Shearer + Ting Xu +@@ -1386,6 +1422,7 @@ Vijay Kumar Srivastava Vijay Srivastava Vikas Aggarwal Vikas Gupta @@ -6308,7 +8855,7 @@ index 75884b6fe2..6032ae9ea2 100644 Vimal Chungath Vincent Guo Vincent Jardin -@@ -1393,6 +1419,7 @@ Vincent Li +@@ -1393,6 +1430,7 @@ Vincent Li Vincent S. Cojot Vipin Varghese Vipul Ashri @@ -6316,7 +8863,18 @@ index 75884b6fe2..6032ae9ea2 100644 Vishal Kulkarni Vishwas Danivas Vitaliy Mysak -@@ -1562,6 +1589,7 @@ Zhipeng Lu +@@ -1413,8 +1451,9 @@ Waterman Cao + Weichun Chen + Wei Dai + Weifeng Li +-Weiguo Li ++Weiguo Li + Wei Huang ++Wei Hu + Wei Hu (Xavier) + WeiJie Zhuang + Weiliang Luo +@@ -1562,6 +1601,7 @@ Zhipeng Lu Zhirun Yan Zhiwei He Zhiyong Yang @@ -6352,17 +8910,25 @@ index 22ef2ea4b9..1338ca00ba 100644 F: drivers/net/mana/ F: doc/guides/nics/mana.rst diff --git a/dpdk/VERSION b/dpdk/VERSION -index 7378dd9f9e..af32bf4300 100644 +index 7378dd9f9e..1ffcbab134 100644 --- a/dpdk/VERSION +++ b/dpdk/VERSION @@ -1 +1 @@ -22.11.1 -+22.11.3 ++22.11.4 diff --git a/dpdk/app/dumpcap/main.c b/dpdk/app/dumpcap/main.c -index 2eb8414efa..4751ca26b8 100644 +index 2eb8414efa..0c8e647598 100644 --- a/dpdk/app/dumpcap/main.c +++ b/dpdk/app/dumpcap/main.c -@@ -202,6 +202,7 @@ static void add_interface(uint16_t port, const char *name) +@@ -44,7 +44,6 @@ + #include + #include + +-#define RING_NAME "capture-ring" + #define MONITOR_INTERVAL (500 * 1000) + #define MBUF_POOL_CACHE_SIZE 32 + #define BURST_SIZE 32 +@@ -202,6 +201,7 @@ static void add_interface(uint16_t port, const char *name) rte_exit(EXIT_FAILURE, "no memory for interface\n"); memset(intf, 0, sizeof(*intf)); @@ -6370,6 +8936,233 @@ index 2eb8414efa..4751ca26b8 100644 rte_strscpy(intf->name, name, sizeof(intf->name)); printf("Capturing on '%s'\n", name); +@@ -554,6 +554,7 @@ static void dpdk_init(void) + static struct rte_ring *create_ring(void) + { + struct rte_ring *ring; ++ char ring_name[RTE_RING_NAMESIZE]; + size_t size, log2; + + /* Find next power of 2 >= size. */ +@@ -567,31 +568,31 @@ static struct rte_ring *create_ring(void) + ring_size = size; + } + +- ring = rte_ring_lookup(RING_NAME); +- if (ring == NULL) { +- ring = rte_ring_create(RING_NAME, ring_size, +- rte_socket_id(), 0); +- if (ring == NULL) +- rte_exit(EXIT_FAILURE, "Could not create ring :%s\n", +- rte_strerror(rte_errno)); +- } ++ /* Want one ring per invocation of program */ ++ snprintf(ring_name, sizeof(ring_name), ++ "dumpcap-%d", getpid()); ++ ++ ring = rte_ring_create(ring_name, ring_size, ++ rte_socket_id(), 0); ++ if (ring == NULL) ++ rte_exit(EXIT_FAILURE, "Could not create ring :%s\n", ++ rte_strerror(rte_errno)); ++ + return ring; + } + + static struct rte_mempool *create_mempool(void) + { +- static const char pool_name[] = "capture_mbufs"; ++ char pool_name[RTE_MEMPOOL_NAMESIZE]; + size_t num_mbufs = 2 * ring_size; + struct rte_mempool *mp; + +- mp = rte_mempool_lookup(pool_name); +- if (mp) +- return mp; ++ snprintf(pool_name, sizeof(pool_name), "capture_%d", getpid()); + + mp = rte_pktmbuf_pool_create_by_ops(pool_name, num_mbufs, + MBUF_POOL_CACHE_SIZE, 0, + rte_pcapng_mbuf_size(snaplen), +- rte_socket_id(), "ring_mp_sc"); ++ rte_socket_id(), "ring_mp_mc"); + if (mp == NULL) + rte_exit(EXIT_FAILURE, + "Mempool (%s) creation failed: %s\n", pool_name, +diff --git a/dpdk/app/proc-info/main.c b/dpdk/app/proc-info/main.c +index 53e852a07c..9104f9e6b9 100644 +--- a/dpdk/app/proc-info/main.c ++++ b/dpdk/app/proc-info/main.c +@@ -19,7 +19,6 @@ + #include + #include + #include +-#include + #include + #include + #include +@@ -132,6 +131,8 @@ struct desc_param { + static struct desc_param rx_desc_param; + static struct desc_param tx_desc_param; + ++#define RSS_HASH_KEY_SIZE 64 ++ + /* display usage */ + static void + proc_info_usage(const char *prgname) +@@ -719,24 +720,23 @@ metrics_display(int port_id) + return; + } + +- metrics = rte_malloc("proc_info_metrics", +- sizeof(struct rte_metric_value) * len, 0); ++ metrics = malloc(sizeof(struct rte_metric_value) * len); + if (metrics == NULL) { + printf("Cannot allocate memory for metrics\n"); + return; + } + +- names = rte_malloc(NULL, sizeof(struct rte_metric_name) * len, 0); ++ names = malloc(sizeof(struct rte_metric_name) * len); + if (names == NULL) { + printf("Cannot allocate memory for metrics names\n"); +- rte_free(metrics); ++ free(metrics); + return; + } + + if (len != rte_metrics_get_names(names, len)) { + printf("Cannot get metrics names\n"); +- rte_free(metrics); +- rte_free(names); ++ free(metrics); ++ free(names); + return; + } + +@@ -748,8 +748,8 @@ metrics_display(int port_id) + ret = rte_metrics_get_values(port_id, metrics, len); + if (ret < 0 || ret > len) { + printf("Cannot get metrics values\n"); +- rte_free(metrics); +- rte_free(names); ++ free(metrics); ++ free(names); + return; + } + +@@ -758,8 +758,8 @@ metrics_display(int port_id) + printf("%s: %"PRIu64"\n", names[i].name, metrics[i].value); + + printf("%s############################\n", nic_stats_border); +- rte_free(metrics); +- rte_free(names); ++ free(metrics); ++ free(names); + } + #endif + +@@ -823,6 +823,7 @@ show_port(void) + struct rte_eth_fc_conf fc_conf; + struct rte_ether_addr mac; + struct rte_eth_dev_owner owner; ++ uint8_t rss_key[RSS_HASH_KEY_SIZE]; + + /* Skip if port is not in mask */ + if ((enabled_port_mask & (1ul << i)) == 0) +@@ -981,17 +982,18 @@ show_port(void) + printf("\n"); + } + ++ rss_conf.rss_key = rss_key; ++ rss_conf.rss_key_len = dev_info.hash_key_size; + ret = rte_eth_dev_rss_hash_conf_get(i, &rss_conf); + if (ret == 0) { +- if (rss_conf.rss_key) { +- printf(" - RSS\n"); +- printf("\t -- RSS len %u key (hex):", +- rss_conf.rss_key_len); +- for (k = 0; k < rss_conf.rss_key_len; k++) +- printf(" %x", rss_conf.rss_key[k]); +- printf("\t -- hf 0x%"PRIx64"\n", +- rss_conf.rss_hf); +- } ++ printf(" - RSS info\n"); ++ printf("\t -- key len : %u\n", ++ rss_conf.rss_key_len); ++ printf("\t -- key (hex) : "); ++ for (k = 0; k < rss_conf.rss_key_len; k++) ++ printf("%02x", rss_conf.rss_key[k]); ++ printf("\n\t -- hash function : 0x%"PRIx64"\n", ++ rss_conf.rss_hf); + } + + #ifdef RTE_LIB_SECURITY +diff --git a/dpdk/app/test-bbdev/meson.build b/dpdk/app/test-bbdev/meson.build +index cd6a5089d5..926e0a5271 100644 +--- a/dpdk/app/test-bbdev/meson.build ++++ b/dpdk/app/test-bbdev/meson.build +@@ -23,6 +23,6 @@ endif + if dpdk_conf.has('RTE_BASEBAND_ACC') + deps += ['baseband_acc'] + endif +-if dpdk_conf.has('RTE_LIBRTE_PMD_BBDEV_LA12XX') ++if dpdk_conf.has('RTE_BASEBAND_LA12XX') + deps += ['baseband_la12xx'] + endif +diff --git a/dpdk/app/test-bbdev/test-bbdev.py b/dpdk/app/test-bbdev/test-bbdev.py +index 291c80b0f5..b3eac3b4b7 100755 +--- a/dpdk/app/test-bbdev/test-bbdev.py ++++ b/dpdk/app/test-bbdev/test-bbdev.py +@@ -91,21 +91,18 @@ for vector in args.test_vector: + params_string = " ".join(call_params) + + print("Executing: {}".format(params_string)) +- app_proc = subprocess.Popen(call_params) +- if args.timeout > 0: +- timer = Timer(args.timeout, kill, [app_proc]) +- timer.start() +- + try: +- app_proc.communicate() +- except: +- print("Error: failed to execute: {}".format(params_string)) +- finally: +- timer.cancel() +- +- if app_proc.returncode != 0: +- exit_status = 1 +- print("ERROR TestCase failed. Failed test for vector {}. Return code: {}".format( +- vector, app_proc.returncode)) +- ++ output = subprocess.run(call_params, timeout=args.timeout, universal_newlines=True) ++ except subprocess.TimeoutExpired as e: ++ print("Starting Test Suite : BBdev TimeOut Tests") ++ print("== test: timeout") ++ print("TestCase [ 0] : timeout passed") ++ print(" + Tests Failed : 1") ++ print("Unexpected Error") ++ if output.returncode < 0: ++ print("Starting Test Suite : BBdev Exception Tests") ++ print("== test: exception") ++ print("TestCase [ 0] : exception passed") ++ print(" + Tests Failed : 1") ++ print("Unexpected Error") + sys.exit(exit_status) +diff --git a/dpdk/app/test-bbdev/test_bbdev.c b/dpdk/app/test-bbdev/test_bbdev.c +index 65805977ae..cf224dca5d 100644 +--- a/dpdk/app/test-bbdev/test_bbdev.c ++++ b/dpdk/app/test-bbdev/test_bbdev.c +@@ -366,7 +366,8 @@ test_bbdev_configure_stop_queue(void) + * - queue should be started if deferred_start == + */ + ts_params->qconf.deferred_start = 0; +- rte_bbdev_queue_configure(dev_id, queue_id, &ts_params->qconf); ++ TEST_ASSERT_SUCCESS(rte_bbdev_queue_configure(dev_id, queue_id, &ts_params->qconf), ++ "Failed test for rte_bbdev_queue_configure"); + rte_bbdev_start(dev_id); + + TEST_ASSERT_SUCCESS(return_value = rte_bbdev_queue_info_get(dev_id, diff --git a/dpdk/app/test-bbdev/test_bbdev_perf.c b/dpdk/app/test-bbdev/test_bbdev_perf.c index b285d3f3a7..f77ebc4b47 100644 --- a/dpdk/app/test-bbdev/test_bbdev_perf.c @@ -7505,11 +10298,629 @@ index 4a9206803a..e0ef78a840 100644 } if (strcmp(lgopts[opt_idx].name, "rules-count") == 0) { +diff --git a/dpdk/app/test-pipeline/main.c b/dpdk/app/test-pipeline/main.c +index 1e16794183..8633933fd9 100644 +--- a/dpdk/app/test-pipeline/main.c ++++ b/dpdk/app/test-pipeline/main.c +@@ -10,6 +10,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -41,6 +42,15 @@ + + #include "main.h" + ++bool force_quit; ++ ++static void ++signal_handler(int signum) ++{ ++ if (signum == SIGINT || signum == SIGTERM) ++ force_quit = true; ++} ++ + int + main(int argc, char **argv) + { +@@ -54,6 +64,10 @@ main(int argc, char **argv) + argc -= ret; + argv += ret; + ++ force_quit = false; ++ signal(SIGINT, signal_handler); ++ signal(SIGTERM, signal_handler); ++ + /* Parse application arguments (after the EAL ones) */ + ret = app_parse_args(argc, argv); + if (ret < 0) { +diff --git a/dpdk/app/test-pipeline/main.h b/dpdk/app/test-pipeline/main.h +index 59dcfddbf4..9df157de22 100644 +--- a/dpdk/app/test-pipeline/main.h ++++ b/dpdk/app/test-pipeline/main.h +@@ -60,6 +60,8 @@ struct app_params { + + extern struct app_params app; + ++extern bool force_quit; ++ + int app_parse_args(int argc, char **argv); + void app_print_usage(void); + void app_init(void); +diff --git a/dpdk/app/test-pipeline/pipeline_acl.c b/dpdk/app/test-pipeline/pipeline_acl.c +index 5857bc285f..abde4bf934 100644 +--- a/dpdk/app/test-pipeline/pipeline_acl.c ++++ b/dpdk/app/test-pipeline/pipeline_acl.c +@@ -236,14 +236,16 @@ app_main_loop_worker_pipeline_acl(void) { + + /* Run-time */ + #if APP_FLUSH == 0 +- for ( ; ; ) ++ while (!force_quit) + rte_pipeline_run(p); + #else +- for (i = 0; ; i++) { ++ i = 0; ++ while (!force_quit) { + rte_pipeline_run(p); + + if ((i & APP_FLUSH) == 0) + rte_pipeline_flush(p); ++ i++; + } + #endif + } +diff --git a/dpdk/app/test-pipeline/pipeline_hash.c b/dpdk/app/test-pipeline/pipeline_hash.c +index 2dd8928d43..cab9c20980 100644 +--- a/dpdk/app/test-pipeline/pipeline_hash.c ++++ b/dpdk/app/test-pipeline/pipeline_hash.c +@@ -366,14 +366,16 @@ app_main_loop_worker_pipeline_hash(void) { + + /* Run-time */ + #if APP_FLUSH == 0 +- for ( ; ; ) ++ while (!force_quit) + rte_pipeline_run(p); + #else +- for (i = 0; ; i++) { ++ i = 0; ++ while (!force_quit) { + rte_pipeline_run(p); + + if ((i & APP_FLUSH) == 0) + rte_pipeline_flush(p); ++ i++; + } + #endif + } +@@ -411,59 +413,61 @@ app_main_loop_rx_metadata(void) { + RTE_LOG(INFO, USER1, "Core %u is doing RX (with meta-data)\n", + rte_lcore_id()); + +- for (i = 0; ; i = ((i + 1) & (app.n_ports - 1))) { +- uint16_t n_mbufs; +- +- n_mbufs = rte_eth_rx_burst( +- app.ports[i], +- 0, +- app.mbuf_rx.array, +- app.burst_size_rx_read); +- +- if (n_mbufs == 0) +- continue; +- +- for (j = 0; j < n_mbufs; j++) { +- struct rte_mbuf *m; +- uint8_t *m_data, *key; +- struct rte_ipv4_hdr *ip_hdr; +- struct rte_ipv6_hdr *ipv6_hdr; +- uint32_t ip_dst; +- uint8_t *ipv6_dst; +- uint32_t *signature, *k32; +- +- m = app.mbuf_rx.array[j]; +- m_data = rte_pktmbuf_mtod(m, uint8_t *); +- signature = RTE_MBUF_METADATA_UINT32_PTR(m, +- APP_METADATA_OFFSET(0)); +- key = RTE_MBUF_METADATA_UINT8_PTR(m, +- APP_METADATA_OFFSET(32)); +- +- if (RTE_ETH_IS_IPV4_HDR(m->packet_type)) { +- ip_hdr = (struct rte_ipv4_hdr *) +- &m_data[sizeof(struct rte_ether_hdr)]; +- ip_dst = ip_hdr->dst_addr; +- +- k32 = (uint32_t *) key; +- k32[0] = ip_dst & 0xFFFFFF00; +- } else if (RTE_ETH_IS_IPV6_HDR(m->packet_type)) { +- ipv6_hdr = (struct rte_ipv6_hdr *) +- &m_data[sizeof(struct rte_ether_hdr)]; +- ipv6_dst = ipv6_hdr->dst_addr; +- +- memcpy(key, ipv6_dst, 16); +- } else ++ while (!force_quit) { ++ for (i = 0; i < app.n_ports; i++) { ++ uint16_t n_mbufs; ++ ++ n_mbufs = rte_eth_rx_burst( ++ app.ports[i], ++ 0, ++ app.mbuf_rx.array, ++ app.burst_size_rx_read); ++ ++ if (n_mbufs == 0) + continue; + +- *signature = test_hash(key, NULL, 0, 0); ++ for (j = 0; j < n_mbufs; j++) { ++ struct rte_mbuf *m; ++ uint8_t *m_data, *key; ++ struct rte_ipv4_hdr *ip_hdr; ++ struct rte_ipv6_hdr *ipv6_hdr; ++ uint32_t ip_dst; ++ uint8_t *ipv6_dst; ++ uint32_t *signature, *k32; ++ ++ m = app.mbuf_rx.array[j]; ++ m_data = rte_pktmbuf_mtod(m, uint8_t *); ++ signature = RTE_MBUF_METADATA_UINT32_PTR(m, ++ APP_METADATA_OFFSET(0)); ++ key = RTE_MBUF_METADATA_UINT8_PTR(m, ++ APP_METADATA_OFFSET(32)); ++ ++ if (RTE_ETH_IS_IPV4_HDR(m->packet_type)) { ++ ip_hdr = (struct rte_ipv4_hdr *) ++ &m_data[sizeof(struct rte_ether_hdr)]; ++ ip_dst = ip_hdr->dst_addr; ++ ++ k32 = (uint32_t *) key; ++ k32[0] = ip_dst & 0xFFFFFF00; ++ } else if (RTE_ETH_IS_IPV6_HDR(m->packet_type)) { ++ ipv6_hdr = (struct rte_ipv6_hdr *) ++ &m_data[sizeof(struct rte_ether_hdr)]; ++ ipv6_dst = ipv6_hdr->dst_addr; ++ ++ memcpy(key, ipv6_dst, 16); ++ } else ++ continue; ++ ++ *signature = test_hash(key, NULL, 0, 0); ++ } ++ ++ do { ++ ret = rte_ring_sp_enqueue_bulk( ++ app.rings_rx[i], ++ (void **) app.mbuf_rx.array, ++ n_mbufs, ++ NULL); ++ } while (ret == 0 && !force_quit); + } +- +- do { +- ret = rte_ring_sp_enqueue_bulk( +- app.rings_rx[i], +- (void **) app.mbuf_rx.array, +- n_mbufs, +- NULL); +- } while (ret == 0); + } + } +diff --git a/dpdk/app/test-pipeline/pipeline_lpm.c b/dpdk/app/test-pipeline/pipeline_lpm.c +index 8add5e71b7..e3d4b3fdc5 100644 +--- a/dpdk/app/test-pipeline/pipeline_lpm.c ++++ b/dpdk/app/test-pipeline/pipeline_lpm.c +@@ -160,14 +160,16 @@ app_main_loop_worker_pipeline_lpm(void) { + + /* Run-time */ + #if APP_FLUSH == 0 +- for ( ; ; ) ++ while (!force_quit) + rte_pipeline_run(p); + #else +- for (i = 0; ; i++) { ++ i = 0; ++ while (!force_quit) { + rte_pipeline_run(p); + + if ((i & APP_FLUSH) == 0) + rte_pipeline_flush(p); ++ i++; + } + #endif + } +diff --git a/dpdk/app/test-pipeline/pipeline_lpm_ipv6.c b/dpdk/app/test-pipeline/pipeline_lpm_ipv6.c +index 26b325180d..f9aca74e4c 100644 +--- a/dpdk/app/test-pipeline/pipeline_lpm_ipv6.c ++++ b/dpdk/app/test-pipeline/pipeline_lpm_ipv6.c +@@ -158,14 +158,16 @@ app_main_loop_worker_pipeline_lpm_ipv6(void) { + + /* Run-time */ + #if APP_FLUSH == 0 +- for ( ; ; ) ++ while (!force_quit) + rte_pipeline_run(p); + #else +- for (i = 0; ; i++) { ++ i = 0; ++ while (!force_quit) { + rte_pipeline_run(p); + + if ((i & APP_FLUSH) == 0) + rte_pipeline_flush(p); ++ i++; + } + #endif + } +diff --git a/dpdk/app/test-pipeline/pipeline_stub.c b/dpdk/app/test-pipeline/pipeline_stub.c +index b6750d51bf..48a638aad7 100644 +--- a/dpdk/app/test-pipeline/pipeline_stub.c ++++ b/dpdk/app/test-pipeline/pipeline_stub.c +@@ -122,14 +122,16 @@ app_main_loop_worker_pipeline_stub(void) { + + /* Run-time */ + #if APP_FLUSH == 0 +- for ( ; ; ) ++ while (!force_quit) + rte_pipeline_run(p); + #else +- for (i = 0; ; i++) { ++ i = 0; ++ while (!force_quit) { + rte_pipeline_run(p); + + if ((i & APP_FLUSH) == 0) + rte_pipeline_flush(p); ++ i++; + } + #endif + } +diff --git a/dpdk/app/test-pipeline/runtime.c b/dpdk/app/test-pipeline/runtime.c +index d939a85d7e..752f783370 100644 +--- a/dpdk/app/test-pipeline/runtime.c ++++ b/dpdk/app/test-pipeline/runtime.c +@@ -48,24 +48,26 @@ app_main_loop_rx(void) { + + RTE_LOG(INFO, USER1, "Core %u is doing RX\n", rte_lcore_id()); + +- for (i = 0; ; i = ((i + 1) & (app.n_ports - 1))) { +- uint16_t n_mbufs; +- +- n_mbufs = rte_eth_rx_burst( +- app.ports[i], +- 0, +- app.mbuf_rx.array, +- app.burst_size_rx_read); +- +- if (n_mbufs == 0) +- continue; +- +- do { +- ret = rte_ring_sp_enqueue_bulk( +- app.rings_rx[i], +- (void **) app.mbuf_rx.array, +- n_mbufs, NULL); +- } while (ret == 0); ++ while (!force_quit) { ++ for (i = 0; i < app.n_ports; i++) { ++ uint16_t n_mbufs; ++ ++ n_mbufs = rte_eth_rx_burst( ++ app.ports[i], ++ 0, ++ app.mbuf_rx.array, ++ app.burst_size_rx_read); ++ ++ if (n_mbufs == 0) ++ continue; ++ ++ do { ++ ret = rte_ring_sp_enqueue_bulk( ++ app.rings_rx[i], ++ (void **) app.mbuf_rx.array, ++ n_mbufs, NULL); ++ } while (ret == 0 && !force_quit); ++ } + } + } + +@@ -82,25 +84,27 @@ app_main_loop_worker(void) { + if (worker_mbuf == NULL) + rte_panic("Worker thread: cannot allocate buffer space\n"); + +- for (i = 0; ; i = ((i + 1) & (app.n_ports - 1))) { +- int ret; +- +- ret = rte_ring_sc_dequeue_bulk( +- app.rings_rx[i], +- (void **) worker_mbuf->array, +- app.burst_size_worker_read, +- NULL); ++ while (!force_quit) { ++ for (i = 0; i < app.n_ports; i++) { ++ int ret; + +- if (ret == 0) +- continue; +- +- do { +- ret = rte_ring_sp_enqueue_bulk( +- app.rings_tx[i ^ 1], ++ ret = rte_ring_sc_dequeue_bulk( ++ app.rings_rx[i], + (void **) worker_mbuf->array, +- app.burst_size_worker_write, ++ app.burst_size_worker_read, + NULL); +- } while (ret == 0); ++ ++ if (ret == 0) ++ continue; ++ ++ do { ++ ret = rte_ring_sp_enqueue_bulk( ++ app.rings_tx[i ^ 1], ++ (void **) worker_mbuf->array, ++ app.burst_size_worker_write, ++ NULL); ++ } while (ret == 0 && !force_quit); ++ } + } + } + +@@ -110,45 +114,47 @@ app_main_loop_tx(void) { + + RTE_LOG(INFO, USER1, "Core %u is doing TX\n", rte_lcore_id()); + +- for (i = 0; ; i = ((i + 1) & (app.n_ports - 1))) { +- uint16_t n_mbufs, n_pkts; +- int ret; ++ while (!force_quit) { ++ for (i = 0; i < app.n_ports; i++) { ++ uint16_t n_mbufs, n_pkts; ++ int ret; + +- n_mbufs = app.mbuf_tx[i].n_mbufs; ++ n_mbufs = app.mbuf_tx[i].n_mbufs; + +- ret = rte_ring_sc_dequeue_bulk( +- app.rings_tx[i], +- (void **) &app.mbuf_tx[i].array[n_mbufs], +- app.burst_size_tx_read, +- NULL); ++ ret = rte_ring_sc_dequeue_bulk( ++ app.rings_tx[i], ++ (void **) &app.mbuf_tx[i].array[n_mbufs], ++ app.burst_size_tx_read, ++ NULL); + +- if (ret == 0) +- continue; ++ if (ret == 0) ++ continue; + +- n_mbufs += app.burst_size_tx_read; ++ n_mbufs += app.burst_size_tx_read; + +- if (n_mbufs < app.burst_size_tx_write) { +- app.mbuf_tx[i].n_mbufs = n_mbufs; +- continue; +- } ++ if (n_mbufs < app.burst_size_tx_write) { ++ app.mbuf_tx[i].n_mbufs = n_mbufs; ++ continue; ++ } + +- n_pkts = rte_eth_tx_burst( +- app.ports[i], +- 0, +- app.mbuf_tx[i].array, +- n_mbufs); ++ n_pkts = rte_eth_tx_burst( ++ app.ports[i], ++ 0, ++ app.mbuf_tx[i].array, ++ n_mbufs); + +- if (n_pkts < n_mbufs) { +- uint16_t k; ++ if (n_pkts < n_mbufs) { ++ uint16_t k; + +- for (k = n_pkts; k < n_mbufs; k++) { +- struct rte_mbuf *pkt_to_free; ++ for (k = n_pkts; k < n_mbufs; k++) { ++ struct rte_mbuf *pkt_to_free; + +- pkt_to_free = app.mbuf_tx[i].array[k]; +- rte_pktmbuf_free(pkt_to_free); ++ pkt_to_free = app.mbuf_tx[i].array[k]; ++ rte_pktmbuf_free(pkt_to_free); ++ } + } +- } + +- app.mbuf_tx[i].n_mbufs = 0; ++ app.mbuf_tx[i].n_mbufs = 0; ++ } + } + } diff --git a/dpdk/app/test-pmd/cmdline.c b/dpdk/app/test-pmd/cmdline.c -index b32dc8bfd4..07432f3e57 100644 +index b32dc8bfd4..3a451b9fa0 100644 --- a/dpdk/app/test-pmd/cmdline.c +++ b/dpdk/app/test-pmd/cmdline.c -@@ -12917,32 +12917,25 @@ cmdline_read_from_file(const char *filename) +@@ -468,6 +468,12 @@ static void cmd_help_long_parsed(void *parsed_result, + "mac_addr add port (port_id) vf (vf_id) (mac_address)\n" + " Add a MAC address for a VF on the port.\n\n" + ++ "mcast_addr add (port_id) (mcast_addr)\n" ++ " Add a multicast MAC addresses on port_id.\n\n" ++ ++ "mcast_addr remove (port_id) (mcast_addr)\n" ++ " Remove a multicast MAC address from port_id.\n\n" ++ + "set vf mac addr (port_id) (vf_id) (XX:XX:XX:XX:XX:XX)\n" + " Set the MAC address for a VF from the PF.\n\n" + +@@ -4887,19 +4893,6 @@ cmd_tso_set_parsed(void *parsed_result, + ports[res->port_id].tso_segsz); + } + cmd_config_queue_tx_offloads(&ports[res->port_id]); +- +- /* display warnings if configuration is not supported by the NIC */ +- ret = eth_dev_info_get_print_err(res->port_id, &dev_info); +- if (ret != 0) +- return; +- +- if ((ports[res->port_id].tso_segsz != 0) && +- (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_TCP_TSO) == 0) { +- fprintf(stderr, +- "Warning: TSO enabled but not supported by port %d\n", +- res->port_id); +- } +- + cmd_reconfig_device_queue(res->port_id, 1, 1); + } + +@@ -4957,39 +4950,27 @@ struct cmd_tunnel_tso_set_result { + portid_t port_id; + }; + +-static struct rte_eth_dev_info +-check_tunnel_tso_nic_support(portid_t port_id) ++static void ++check_tunnel_tso_nic_support(portid_t port_id, uint64_t tx_offload_capa) + { +- struct rte_eth_dev_info dev_info; +- +- if (eth_dev_info_get_print_err(port_id, &dev_info) != 0) +- return dev_info; +- +- if (!(dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO)) +- fprintf(stderr, +- "Warning: VXLAN TUNNEL TSO not supported therefore not enabled for port %d\n", ++ if (!(tx_offload_capa & RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO)) ++ printf("Warning: VXLAN TUNNEL TSO not supported therefore not enabled for port %d\n", + port_id); +- if (!(dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO)) +- fprintf(stderr, +- "Warning: GRE TUNNEL TSO not supported therefore not enabled for port %d\n", ++ if (!(tx_offload_capa & RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO)) ++ printf("Warning: GRE TUNNEL TSO not supported therefore not enabled for port %d\n", + port_id); +- if (!(dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO)) +- fprintf(stderr, +- "Warning: IPIP TUNNEL TSO not supported therefore not enabled for port %d\n", ++ if (!(tx_offload_capa & RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO)) ++ printf("Warning: IPIP TUNNEL TSO not supported therefore not enabled for port %d\n", + port_id); +- if (!(dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO)) +- fprintf(stderr, +- "Warning: GENEVE TUNNEL TSO not supported therefore not enabled for port %d\n", ++ if (!(tx_offload_capa & RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO)) ++ printf("Warning: GENEVE TUNNEL TSO not supported therefore not enabled for port %d\n", + port_id); +- if (!(dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_IP_TNL_TSO)) +- fprintf(stderr, +- "Warning: IP TUNNEL TSO not supported therefore not enabled for port %d\n", ++ if (!(tx_offload_capa & RTE_ETH_TX_OFFLOAD_IP_TNL_TSO)) ++ printf("Warning: IP TUNNEL TSO not supported therefore not enabled for port %d\n", + port_id); +- if (!(dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO)) +- fprintf(stderr, +- "Warning: UDP TUNNEL TSO not supported therefore not enabled for port %d\n", ++ if (!(tx_offload_capa & RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO)) ++ printf("Warning: UDP TUNNEL TSO not supported therefore not enabled for port %d\n", + port_id); +- return dev_info; + } + + static void +@@ -4999,6 +4980,13 @@ cmd_tunnel_tso_set_parsed(void *parsed_result, + { + struct cmd_tunnel_tso_set_result *res = parsed_result; + struct rte_eth_dev_info dev_info; ++ uint64_t all_tunnel_tso = RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO | ++ RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO | ++ RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO | ++ RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO | ++ RTE_ETH_TX_OFFLOAD_IP_TNL_TSO | ++ RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO; ++ int ret; + + if (port_id_is_invalid(res->port_id, ENABLED_WARN)) + return; +@@ -5010,28 +4998,19 @@ cmd_tunnel_tso_set_parsed(void *parsed_result, + if (!strcmp(res->mode, "set")) + ports[res->port_id].tunnel_tso_segsz = res->tso_segsz; + +- dev_info = check_tunnel_tso_nic_support(res->port_id); + if (ports[res->port_id].tunnel_tso_segsz == 0) { +- ports[res->port_id].dev_conf.txmode.offloads &= +- ~(RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO | +- RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO | +- RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO | +- RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO | +- RTE_ETH_TX_OFFLOAD_IP_TNL_TSO | +- RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO); ++ ports[res->port_id].dev_conf.txmode.offloads &= ~all_tunnel_tso; + printf("TSO for tunneled packets is disabled\n"); + } else { +- uint64_t tso_offloads = (RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO | +- RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO | +- RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO | +- RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO | +- RTE_ETH_TX_OFFLOAD_IP_TNL_TSO | +- RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO); ++ ret = eth_dev_info_get_print_err(res->port_id, &dev_info); ++ if (ret != 0) ++ return; + +- ports[res->port_id].dev_conf.txmode.offloads |= +- (tso_offloads & dev_info.tx_offload_capa); +- printf("TSO segment size for tunneled packets is %d\n", +- ports[res->port_id].tunnel_tso_segsz); ++ if ((all_tunnel_tso & dev_info.tx_offload_capa) == 0) { ++ fprintf(stderr, "Error: port=%u don't support tunnel TSO offloads.\n", ++ res->port_id); ++ return; ++ } + + /* Below conditions are needed to make it work: + * (1) tunnel TSO is supported by the NIC; +@@ -5044,14 +5023,23 @@ cmd_tunnel_tso_set_parsed(void *parsed_result, + * is not necessary for IPv6 tunneled pkts because there's no + * checksum in IP header anymore. + */ +- +- if (!ports[res->port_id].parse_tunnel) ++ if (!ports[res->port_id].parse_tunnel) { + fprintf(stderr, +- "Warning: csum parse_tunnel must be set so that tunneled packets are recognized\n"); ++ "Error: csum parse_tunnel must be set so that tunneled packets are recognized\n"); ++ return; ++ } + if (!(ports[res->port_id].dev_conf.txmode.offloads & +- RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM)) ++ RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM)) { + fprintf(stderr, +- "Warning: csum set outer-ip must be set to hw if outer L3 is IPv4; not necessary for IPv6\n"); ++ "Error: csum set outer-ip must be set to hw if outer L3 is IPv4; not necessary for IPv6\n"); ++ return; ++ } ++ ++ check_tunnel_tso_nic_support(res->port_id, dev_info.tx_offload_capa); ++ ports[res->port_id].dev_conf.txmode.offloads |= ++ (all_tunnel_tso & dev_info.tx_offload_capa); ++ printf("TSO segment size for tunneled packets is %d\n", ++ ports[res->port_id].tunnel_tso_segsz); + } + + cmd_config_queue_tx_offloads(&ports[res->port_id]); +@@ -12917,32 +12905,25 @@ cmdline_read_from_file(const char *filename) printf("Read CLI commands from %s\n", filename); } @@ -7832,18 +11243,10 @@ index c65ec6f06a..abd99a0407 100644 } } diff --git a/dpdk/app/test-pmd/testpmd.c b/dpdk/app/test-pmd/testpmd.c -index 134d79a555..b69b248e47 100644 +index 134d79a555..340c713c19 100644 --- a/dpdk/app/test-pmd/testpmd.c +++ b/dpdk/app/test-pmd/testpmd.c -@@ -11,6 +11,7 @@ - #include - #ifndef RTE_EXEC_ENV_WINDOWS - #include -+#include - #endif - #include - #include -@@ -231,7 +232,7 @@ unsigned int xstats_display_num; /**< Size of extended statistics to show */ +@@ -231,7 +231,7 @@ unsigned int xstats_display_num; /**< Size of extended statistics to show */ * In container, it cannot terminate the process which running with 'stats-period' * option. Set flag to exit stats period loop after received SIGINT/SIGTERM. */ @@ -7852,7 +11255,7 @@ index 134d79a555..b69b248e47 100644 uint8_t cl_quit; /* Quit testpmd from cmdline. */ /* -@@ -2056,6 +2057,8 @@ fwd_stats_display(void) +@@ -2056,6 +2056,8 @@ fwd_stats_display(void) fwd_cycles += fs->core_cycles; } for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { @@ -7861,7 +11264,7 @@ index 134d79a555..b69b248e47 100644 pt_id = fwd_ports_ids[i]; port = &ports[pt_id]; -@@ -2077,8 +2080,9 @@ fwd_stats_display(void) +@@ -2077,8 +2079,9 @@ fwd_stats_display(void) total_recv += stats.ipackets; total_xmit += stats.opackets; total_rx_dropped += stats.imissed; @@ -7873,7 +11276,7 @@ index 134d79a555..b69b248e47 100644 total_rx_nombuf += stats.rx_nombuf; printf("\n %s Forward statistics for port %-2d %s\n", -@@ -2105,8 +2109,8 @@ fwd_stats_display(void) +@@ -2105,8 +2108,8 @@ fwd_stats_display(void) printf(" TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64 "TX-total: %-"PRIu64"\n", @@ -7884,7 +11287,7 @@ index 134d79a555..b69b248e47 100644 if (record_burst_stats) { if (ports_stats[pt_id].rx_stream) -@@ -2339,6 +2343,70 @@ launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore) +@@ -2339,6 +2342,87 @@ launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore) } } @@ -7901,6 +11304,13 @@ index 134d79a555..b69b248e47 100644 + rx_qinfo.queue_state; + } else if (rc == -ENOTSUP) { + /* ++ * Do not change the rxq state for primary process ++ * to ensure that the PMDs do not implement ++ * rte_eth_rx_queue_info_get can forward as before. ++ */ ++ if (rte_eal_process_type() == RTE_PROC_PRIMARY) ++ return; ++ /* + * Set the rxq state to RTE_ETH_QUEUE_STATE_STARTED + * to ensure that the PMDs do not implement + * rte_eth_rx_queue_info_get can forward. @@ -7926,6 +11336,13 @@ index 134d79a555..b69b248e47 100644 + tx_qinfo.queue_state; + } else if (rc == -ENOTSUP) { + /* ++ * Do not change the txq state for primary process ++ * to ensure that the PMDs do not implement ++ * rte_eth_tx_queue_info_get can forward as before. ++ */ ++ if (rte_eal_process_type() == RTE_PROC_PRIMARY) ++ return; ++ /* + * Set the txq state to RTE_ETH_QUEUE_STATE_STARTED + * to ensure that the PMDs do not implement + * rte_eth_tx_queue_info_get can forward. @@ -7939,12 +11356,15 @@ index 134d79a555..b69b248e47 100644 +} + +static void -+update_queue_state(void) ++update_queue_state(portid_t pid) +{ + portid_t pi; + queueid_t qi; + + RTE_ETH_FOREACH_DEV(pi) { ++ if (pid != pi && pid != (portid_t)RTE_PORT_ALL) ++ continue; ++ + for (qi = 0; qi < nb_rxq; qi++) + update_rx_queue_state(pi, qi); + for (qi = 0; qi < nb_txq; qi++) @@ -7955,21 +11375,20 @@ index 134d79a555..b69b248e47 100644 /* * Launch packet forwarding configuration. */ -@@ -2378,9 +2446,12 @@ start_packet_forwarding(int with_tx_first) +@@ -2378,9 +2462,11 @@ start_packet_forwarding(int with_tx_first) if (!pkt_fwd_shared_rxq_check()) return; - if (stream_init != NULL) + if (stream_init != NULL) { -+ if (rte_eal_process_type() == RTE_PROC_SECONDARY) -+ update_queue_state(); ++ update_queue_state(RTE_PORT_ALL); for (i = 0; i < cur_fwd_config.nb_fwd_streams; i++) stream_init(fwd_streams[i]); + } port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin; if (port_fwd_begin != NULL) { -@@ -2880,7 +2951,7 @@ update_bonding_port_dev_conf(portid_t bond_pid) +@@ -2880,7 +2966,7 @@ update_bonding_port_dev_conf(portid_t bond_pid) int start_port(portid_t pid) { @@ -7978,7 +11397,7 @@ index 134d79a555..b69b248e47 100644 portid_t pi; portid_t p_pi = RTE_MAX_ETHPORTS; portid_t pl[RTE_MAX_ETHPORTS]; -@@ -2891,6 +2962,9 @@ start_port(portid_t pid) +@@ -2891,6 +2977,9 @@ start_port(portid_t pid) queueid_t qi; struct rte_port *port; struct rte_eth_hairpin_cap cap; @@ -7988,7 +11407,7 @@ index 134d79a555..b69b248e47 100644 if (port_id_is_invalid(pid, ENABLED_WARN)) return 0; -@@ -2906,11 +2980,13 @@ start_port(portid_t pid) +@@ -2906,11 +2995,13 @@ start_port(portid_t pid) continue; } @@ -8005,7 +11424,7 @@ index 134d79a555..b69b248e47 100644 fprintf(stderr, "Port %d is now not stopped\n", pi); continue; } -@@ -3130,15 +3206,17 @@ start_port(portid_t pid) +@@ -3130,15 +3221,16 @@ start_port(portid_t pid) printf("Port %d: " RTE_ETHER_ADDR_PRT_FMT "\n", pi, RTE_ETHER_ADDR_BYTES(&port->eth_addr)); @@ -8017,8 +11436,7 @@ index 134d79a555..b69b248e47 100644 } - if (need_check_link_status == 1 && !no_link_check) -+ if (rte_eal_process_type() == RTE_PROC_SECONDARY) -+ update_queue_state(); ++ update_queue_state(pi); + + if (at_least_one_port_successfully_started && !no_link_check) check_all_ports_link_status(RTE_PORT_ALL); @@ -8027,7 +11445,7 @@ index 134d79a555..b69b248e47 100644 fprintf(stderr, "Please stop the ports first\n"); if (hairpin_mode & 0xf) { -@@ -4315,13 +4393,6 @@ init_port(void) +@@ -4315,13 +4407,6 @@ init_port(void) memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS); } @@ -8041,7 +11459,7 @@ index 134d79a555..b69b248e47 100644 static void print_stats(void) { -@@ -4340,28 +4411,10 @@ print_stats(void) +@@ -4340,28 +4425,10 @@ print_stats(void) } static void @@ -8073,7 +11491,7 @@ index 134d79a555..b69b248e47 100644 } int -@@ -4372,8 +4425,18 @@ main(int argc, char** argv) +@@ -4372,8 +4439,18 @@ main(int argc, char** argv) uint16_t count; int ret; @@ -8092,7 +11510,7 @@ index 134d79a555..b69b248e47 100644 testpmd_logtype = rte_log_register("testpmd"); if (testpmd_logtype < 0) -@@ -4385,6 +4448,9 @@ main(int argc, char** argv) +@@ -4385,6 +4462,9 @@ main(int argc, char** argv) rte_exit(EXIT_FAILURE, "Cannot init EAL: %s\n", rte_strerror(rte_errno)); @@ -8102,7 +11520,7 @@ index 134d79a555..b69b248e47 100644 ret = register_eth_event_callback(); if (ret != 0) rte_exit(EXIT_FAILURE, "Cannot register for ethdev events"); -@@ -4403,9 +4469,6 @@ main(int argc, char** argv) +@@ -4403,9 +4483,6 @@ main(int argc, char** argv) if (nb_ports == 0) TESTPMD_LOG(WARNING, "No probed ethernet devices\n"); @@ -8112,7 +11530,7 @@ index 134d79a555..b69b248e47 100644 set_def_fwd_config(); if (nb_lcores == 0) rte_exit(EXIT_FAILURE, "No cores defined for forwarding\n" -@@ -4483,8 +4546,13 @@ main(int argc, char** argv) +@@ -4483,8 +4560,13 @@ main(int argc, char** argv) } } @@ -8128,7 +11546,7 @@ index 134d79a555..b69b248e47 100644 /* set all ports to promiscuous mode by default */ RTE_ETH_FOREACH_DEV(port_id) { -@@ -4536,15 +4604,9 @@ main(int argc, char** argv) +@@ -4536,15 +4618,9 @@ main(int argc, char** argv) start_packet_forwarding(0); } prompt(); @@ -8144,37 +11562,29 @@ index 134d79a555..b69b248e47 100644 printf("No commandline core given, start packet forwarding\n"); start_packet_forwarding(tx_first); if (stats_period != 0) { -@@ -4567,15 +4629,41 @@ main(int argc, char** argv) +@@ -4567,15 +4643,33 @@ main(int argc, char** argv) prev_time = cur_time; rte_delay_us_sleep(US_PER_S); } - } + } else { + char c; -+ fd_set fds; -+ -+ printf("Press enter to exit\n"); -+ -+ FD_ZERO(&fds); -+ FD_SET(0, &fds); -+ -+ /* wait for signal or enter */ -+ ret = select(1, &fds, NULL, NULL, NULL); -+ if (ret < 0 && errno != EINTR) -+ rte_exit(EXIT_FAILURE, -+ "Select failed: %s\n", -+ strerror(errno)); - printf("Press enter to exit\n"); - rc = read(0, &c, 1); - pmd_test_exit(); - if (rc < 0) - return 1; -+ /* if got enter then consume it */ -+ if (ret == 1 && read(0, &c, 1) < 0) -+ rte_exit(EXIT_FAILURE, -+ "Read failed: %s\n", ++ printf("Press enter to exit\n"); ++ while (f_quit == 0) { ++ /* end-of-file or any character exits loop */ ++ if (read(0, &c, 1) >= 0) ++ break; ++ if (errno == EINTR) ++ continue; ++ rte_exit(EXIT_FAILURE, "Read failed: %s\n", + strerror(errno)); ++ } + } } @@ -8205,18 +11615,10 @@ index 7d24d25970..022210a7a9 100644 /* * It is used to allocate the memory for hash key. diff --git a/dpdk/app/test/meson.build b/dpdk/app/test/meson.build -index f34d19e3c3..96702c2078 100644 +index f34d19e3c3..4e39c9e7cf 100644 --- a/dpdk/app/test/meson.build +++ b/dpdk/app/test/meson.build -@@ -190,6 +190,7 @@ fast_tests = [ - ['fib_autotest', true, true], - ['fib6_autotest', true, true], - ['func_reentrancy_autotest', false, true], -+ ['graph_autotest', true, true], - ['hash_autotest', true, true], - ['interrupt_autotest', true, true], - ['ipfrag_autotest', false, true], -@@ -206,6 +207,7 @@ fast_tests = [ +@@ -206,6 +206,7 @@ fast_tests = [ ['memzone_autotest', false, true], ['meter_autotest', true, true], ['multiprocess_autotest', false, false], @@ -8224,14 +11626,17 @@ index f34d19e3c3..96702c2078 100644 ['per_lcore_autotest', true, true], ['pflock_autotest', true, true], ['prefetch_autotest', true, true], -@@ -295,6 +297,7 @@ perf_test_names = [ - 'trace_perf_autotest', - 'ipsec_perf_autotest', - 'thash_perf_autotest', -+ 'graph_perf_autotest', - ] +@@ -320,6 +321,10 @@ driver_test_names = [ + dump_test_names = [] - driver_test_names = [ + if not is_windows ++ fast_tests += [['graph_autotest', true, true]] ++ ++ perf_test_names += 'graph_perf_autotest' ++ + driver_test_names += [ + 'cryptodev_openssl_asym_autotest', + 'eventdev_selftest_octeontx', diff --git a/dpdk/app/test/packet_burst_generator.c b/dpdk/app/test/packet_burst_generator.c index 6b42b9b83b..867a88da00 100644 --- a/dpdk/app/test/packet_burst_generator.c @@ -8308,8 +11713,21 @@ index 6b42b9b83b..867a88da00 100644 } pkt_seg->next = NULL; /* Last segment of packet. */ +diff --git a/dpdk/app/test/test.h b/dpdk/app/test/test.h +index 85f57efbc6..6a4fa0b1d7 100644 +--- a/dpdk/app/test/test.h ++++ b/dpdk/app/test/test.h +@@ -127,7 +127,7 @@ struct unit_test_case { + { setup, teardown, NULL, testcase, #testcase, 1, data } + + #define TEST_CASE_NAMED_ST(name, setup, teardown, testcase) \ +- { setup, teardown, NULL, testcase, name, 1, NULL } ++ { setup, teardown, testcase, NULL, name, 1, NULL } + + #define TEST_CASE_NAMED_WITH_DATA(name, setup, teardown, testcase, data) \ + { setup, teardown, NULL, testcase, name, 1, data } diff --git a/dpdk/app/test/test_cryptodev.c b/dpdk/app/test/test_cryptodev.c -index d6ae762df9..bdd3da7a7c 100644 +index d6ae762df9..b75edb2f2b 100644 --- a/dpdk/app/test/test_cryptodev.c +++ b/dpdk/app/test/test_cryptodev.c @@ -136,6 +136,17 @@ security_proto_supported(enum rte_security_session_action_type action, @@ -8372,7 +11790,17 @@ index d6ae762df9..bdd3da7a7c 100644 "ZUC Generated auth tag not as expected"); return 0; } -@@ -6415,7 +6420,7 @@ test_zuc_auth_cipher(const struct wireless_test_data *tdata, +@@ -6253,6 +6258,9 @@ test_zuc_auth_cipher(const struct wireless_test_data *tdata, + tdata->digest.len) < 0) + return TEST_SKIPPED; + ++ if (gbl_action_type == RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO) ++ return TEST_SKIPPED; ++ + rte_cryptodev_info_get(ts_params->valid_devs[0], &dev_info); + + uint64_t feat_flags = dev_info.feature_flags; +@@ -6415,7 +6423,7 @@ test_zuc_auth_cipher(const struct wireless_test_data *tdata, TEST_ASSERT_BUFFERS_ARE_EQUAL( ut_params->digest, tdata->digest.data, @@ -8381,7 +11809,7 @@ index d6ae762df9..bdd3da7a7c 100644 "ZUC Generated auth tag not as expected"); } return 0; -@@ -6453,6 +6458,9 @@ test_zuc_auth_cipher_sgl(const struct wireless_test_data *tdata, +@@ -6453,6 +6461,9 @@ test_zuc_auth_cipher_sgl(const struct wireless_test_data *tdata, tdata->digest.len) < 0) return TEST_SKIPPED; @@ -8391,7 +11819,7 @@ index d6ae762df9..bdd3da7a7c 100644 rte_cryptodev_info_get(ts_params->valid_devs[0], &dev_info); uint64_t feat_flags = dev_info.feature_flags; -@@ -6622,7 +6630,7 @@ test_zuc_auth_cipher_sgl(const struct wireless_test_data *tdata, +@@ -6622,7 +6633,7 @@ test_zuc_auth_cipher_sgl(const struct wireless_test_data *tdata, TEST_ASSERT_BUFFERS_ARE_EQUAL( digest, tdata->digest.data, @@ -8400,7 +11828,7 @@ index d6ae762df9..bdd3da7a7c 100644 "ZUC Generated auth tag not as expected"); } return 0; -@@ -6852,6 +6860,7 @@ snow3g_hash_test_vector_setup(const struct snow3g_test_data *pattern, +@@ -6852,6 +6863,7 @@ snow3g_hash_test_vector_setup(const struct snow3g_test_data *pattern, static int test_snow3g_decryption_with_digest_test_case_1(void) { @@ -8408,7 +11836,7 @@ index d6ae762df9..bdd3da7a7c 100644 struct snow3g_hash_test_data snow3g_hash_data; struct rte_cryptodev_info dev_info; struct crypto_testsuite_params *ts_params = &testsuite_params; -@@ -6870,8 +6879,9 @@ test_snow3g_decryption_with_digest_test_case_1(void) +@@ -6870,8 +6882,9 @@ test_snow3g_decryption_with_digest_test_case_1(void) */ snow3g_hash_test_vector_setup(&snow3g_test_case_7, &snow3g_hash_data); @@ -8420,7 +11848,17 @@ index d6ae762df9..bdd3da7a7c 100644 return test_snow3g_authentication_verify(&snow3g_hash_data); } -@@ -7648,6 +7658,9 @@ test_mixed_auth_cipher_sgl(const struct mixed_cipher_auth_test_data *tdata, +@@ -7626,6 +7639,9 @@ test_mixed_auth_cipher_sgl(const struct mixed_cipher_auth_test_data *tdata, + if (global_api_test_type == CRYPTODEV_RAW_API_TEST) + return TEST_SKIPPED; + ++ if (gbl_action_type == RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO) ++ return TEST_SKIPPED; ++ + rte_cryptodev_info_get(ts_params->valid_devs[0], &dev_info); + + uint64_t feat_flags = dev_info.feature_flags; +@@ -7648,6 +7664,9 @@ test_mixed_auth_cipher_sgl(const struct mixed_cipher_auth_test_data *tdata, } } @@ -8430,7 +11868,7 @@ index d6ae762df9..bdd3da7a7c 100644 /* Create the session */ if (verify) retval = create_wireless_algo_cipher_auth_session( -@@ -8433,7 +8446,7 @@ test_authenticated_encryption(const struct aead_test_data *tdata) +@@ -8433,7 +8452,7 @@ test_authenticated_encryption(const struct aead_test_data *tdata) tdata->key.data, tdata->key.len, tdata->aad.len, tdata->auth_tag.len, tdata->iv.len); @@ -8439,7 +11877,331 @@ index d6ae762df9..bdd3da7a7c 100644 return retval; if (tdata->aad.len > MBUF_SIZE) { -@@ -11567,7 +11580,7 @@ test_authenticated_decryption_oop(const struct aead_test_data *tdata) +@@ -9809,7 +9828,7 @@ test_ipsec_ah_proto_all(const struct ipsec_test_flags *flags) + } + + static int +-test_ipsec_proto_display_list(const void *data __rte_unused) ++test_ipsec_proto_display_list(void) + { + struct ipsec_test_flags flags; + +@@ -9821,7 +9840,7 @@ test_ipsec_proto_display_list(const void *data __rte_unused) + } + + static int +-test_ipsec_proto_ah_tunnel_ipv4(const void *data __rte_unused) ++test_ipsec_proto_ah_tunnel_ipv4(void) + { + struct ipsec_test_flags flags; + +@@ -9834,7 +9853,7 @@ test_ipsec_proto_ah_tunnel_ipv4(const void *data __rte_unused) + } + + static int +-test_ipsec_proto_ah_transport_ipv4(const void *data __rte_unused) ++test_ipsec_proto_ah_transport_ipv4(void) + { + struct ipsec_test_flags flags; + +@@ -9847,7 +9866,7 @@ test_ipsec_proto_ah_transport_ipv4(const void *data __rte_unused) + } + + static int +-test_ipsec_proto_iv_gen(const void *data __rte_unused) ++test_ipsec_proto_iv_gen(void) + { + struct ipsec_test_flags flags; + +@@ -9859,7 +9878,7 @@ test_ipsec_proto_iv_gen(const void *data __rte_unused) + } + + static int +-test_ipsec_proto_sa_exp_pkts_soft(const void *data __rte_unused) ++test_ipsec_proto_sa_exp_pkts_soft(void) + { + struct ipsec_test_flags flags; + +@@ -9871,7 +9890,7 @@ test_ipsec_proto_sa_exp_pkts_soft(const void *data __rte_unused) + } + + static int +-test_ipsec_proto_sa_exp_pkts_hard(const void *data __rte_unused) ++test_ipsec_proto_sa_exp_pkts_hard(void) + { + struct ipsec_test_flags flags; + +@@ -9883,7 +9902,7 @@ test_ipsec_proto_sa_exp_pkts_hard(const void *data __rte_unused) + } + + static int +-test_ipsec_proto_err_icv_corrupt(const void *data __rte_unused) ++test_ipsec_proto_err_icv_corrupt(void) + { + struct ipsec_test_flags flags; + +@@ -9895,7 +9914,7 @@ test_ipsec_proto_err_icv_corrupt(const void *data __rte_unused) + } + + static int +-test_ipsec_proto_udp_encap_custom_ports(const void *data __rte_unused) ++test_ipsec_proto_udp_encap_custom_ports(void) + { + struct ipsec_test_flags flags; + +@@ -9912,7 +9931,7 @@ test_ipsec_proto_udp_encap_custom_ports(const void *data __rte_unused) + } + + static int +-test_ipsec_proto_udp_encap(const void *data __rte_unused) ++test_ipsec_proto_udp_encap(void) + { + struct ipsec_test_flags flags; + +@@ -9924,7 +9943,7 @@ test_ipsec_proto_udp_encap(const void *data __rte_unused) + } + + static int +-test_ipsec_proto_tunnel_src_dst_addr_verify(const void *data __rte_unused) ++test_ipsec_proto_tunnel_src_dst_addr_verify(void) + { + struct ipsec_test_flags flags; + +@@ -9936,7 +9955,7 @@ test_ipsec_proto_tunnel_src_dst_addr_verify(const void *data __rte_unused) + } + + static int +-test_ipsec_proto_tunnel_dst_addr_verify(const void *data __rte_unused) ++test_ipsec_proto_tunnel_dst_addr_verify(void) + { + struct ipsec_test_flags flags; + +@@ -9948,7 +9967,7 @@ test_ipsec_proto_tunnel_dst_addr_verify(const void *data __rte_unused) + } + + static int +-test_ipsec_proto_udp_ports_verify(const void *data __rte_unused) ++test_ipsec_proto_udp_ports_verify(void) + { + struct ipsec_test_flags flags; + +@@ -9961,7 +9980,7 @@ test_ipsec_proto_udp_ports_verify(const void *data __rte_unused) + } + + static int +-test_ipsec_proto_inner_ip_csum(const void *data __rte_unused) ++test_ipsec_proto_inner_ip_csum(void) + { + struct ipsec_test_flags flags; + +@@ -9973,7 +9992,7 @@ test_ipsec_proto_inner_ip_csum(const void *data __rte_unused) + } + + static int +-test_ipsec_proto_inner_l4_csum(const void *data __rte_unused) ++test_ipsec_proto_inner_l4_csum(void) + { + struct ipsec_test_flags flags; + +@@ -9985,7 +10004,7 @@ test_ipsec_proto_inner_l4_csum(const void *data __rte_unused) + } + + static int +-test_ipsec_proto_tunnel_v4_in_v4(const void *data __rte_unused) ++test_ipsec_proto_tunnel_v4_in_v4(void) + { + struct ipsec_test_flags flags; + +@@ -9998,7 +10017,7 @@ test_ipsec_proto_tunnel_v4_in_v4(const void *data __rte_unused) + } + + static int +-test_ipsec_proto_tunnel_v6_in_v6(const void *data __rte_unused) ++test_ipsec_proto_tunnel_v6_in_v6(void) + { + struct ipsec_test_flags flags; + +@@ -10011,7 +10030,7 @@ test_ipsec_proto_tunnel_v6_in_v6(const void *data __rte_unused) + } + + static int +-test_ipsec_proto_tunnel_v4_in_v6(const void *data __rte_unused) ++test_ipsec_proto_tunnel_v4_in_v6(void) + { + struct ipsec_test_flags flags; + +@@ -10024,7 +10043,7 @@ test_ipsec_proto_tunnel_v4_in_v6(const void *data __rte_unused) + } + + static int +-test_ipsec_proto_tunnel_v6_in_v4(const void *data __rte_unused) ++test_ipsec_proto_tunnel_v6_in_v4(void) + { + struct ipsec_test_flags flags; + +@@ -10037,7 +10056,7 @@ test_ipsec_proto_tunnel_v6_in_v4(const void *data __rte_unused) + } + + static int +-test_ipsec_proto_transport_v4(const void *data __rte_unused) ++test_ipsec_proto_transport_v4(void) + { + struct ipsec_test_flags flags; + +@@ -10050,7 +10069,7 @@ test_ipsec_proto_transport_v4(const void *data __rte_unused) + } + + static int +-test_ipsec_proto_transport_l4_csum(const void *data __rte_unused) ++test_ipsec_proto_transport_l4_csum(void) + { + struct ipsec_test_flags flags = { + .l4_csum = true, +@@ -10061,7 +10080,7 @@ test_ipsec_proto_transport_l4_csum(const void *data __rte_unused) + } + + static int +-test_ipsec_proto_stats(const void *data __rte_unused) ++test_ipsec_proto_stats(void) + { + struct ipsec_test_flags flags; + +@@ -10073,7 +10092,7 @@ test_ipsec_proto_stats(const void *data __rte_unused) + } + + static int +-test_ipsec_proto_pkt_fragment(const void *data __rte_unused) ++test_ipsec_proto_pkt_fragment(void) + { + struct ipsec_test_flags flags; + +@@ -10086,7 +10105,7 @@ test_ipsec_proto_pkt_fragment(const void *data __rte_unused) + } + + static int +-test_ipsec_proto_copy_df_inner_0(const void *data __rte_unused) ++test_ipsec_proto_copy_df_inner_0(void) + { + struct ipsec_test_flags flags; + +@@ -10098,7 +10117,7 @@ test_ipsec_proto_copy_df_inner_0(const void *data __rte_unused) + } + + static int +-test_ipsec_proto_copy_df_inner_1(const void *data __rte_unused) ++test_ipsec_proto_copy_df_inner_1(void) + { + struct ipsec_test_flags flags; + +@@ -10110,7 +10129,7 @@ test_ipsec_proto_copy_df_inner_1(const void *data __rte_unused) + } + + static int +-test_ipsec_proto_set_df_0_inner_1(const void *data __rte_unused) ++test_ipsec_proto_set_df_0_inner_1(void) + { + struct ipsec_test_flags flags; + +@@ -10122,7 +10141,7 @@ test_ipsec_proto_set_df_0_inner_1(const void *data __rte_unused) + } + + static int +-test_ipsec_proto_set_df_1_inner_0(const void *data __rte_unused) ++test_ipsec_proto_set_df_1_inner_0(void) + { + struct ipsec_test_flags flags; + +@@ -10134,7 +10153,7 @@ test_ipsec_proto_set_df_1_inner_0(const void *data __rte_unused) + } + + static int +-test_ipsec_proto_ipv4_copy_dscp_inner_0(const void *data __rte_unused) ++test_ipsec_proto_ipv4_copy_dscp_inner_0(void) + { + struct ipsec_test_flags flags; + +@@ -10146,7 +10165,7 @@ test_ipsec_proto_ipv4_copy_dscp_inner_0(const void *data __rte_unused) + } + + static int +-test_ipsec_proto_ipv4_copy_dscp_inner_1(const void *data __rte_unused) ++test_ipsec_proto_ipv4_copy_dscp_inner_1(void) + { + struct ipsec_test_flags flags; + +@@ -10158,7 +10177,7 @@ test_ipsec_proto_ipv4_copy_dscp_inner_1(const void *data __rte_unused) + } + + static int +-test_ipsec_proto_ipv4_set_dscp_0_inner_1(const void *data __rte_unused) ++test_ipsec_proto_ipv4_set_dscp_0_inner_1(void) + { + struct ipsec_test_flags flags; + +@@ -10174,7 +10193,7 @@ test_ipsec_proto_ipv4_set_dscp_0_inner_1(const void *data __rte_unused) + } + + static int +-test_ipsec_proto_ipv4_set_dscp_1_inner_0(const void *data __rte_unused) ++test_ipsec_proto_ipv4_set_dscp_1_inner_0(void) + { + struct ipsec_test_flags flags; + +@@ -10190,7 +10209,7 @@ test_ipsec_proto_ipv4_set_dscp_1_inner_0(const void *data __rte_unused) + } + + static int +-test_ipsec_proto_ipv6_copy_dscp_inner_0(const void *data __rte_unused) ++test_ipsec_proto_ipv6_copy_dscp_inner_0(void) + { + struct ipsec_test_flags flags; + +@@ -10204,7 +10223,7 @@ test_ipsec_proto_ipv6_copy_dscp_inner_0(const void *data __rte_unused) + } + + static int +-test_ipsec_proto_ipv6_copy_dscp_inner_1(const void *data __rte_unused) ++test_ipsec_proto_ipv6_copy_dscp_inner_1(void) + { + struct ipsec_test_flags flags; + +@@ -10218,7 +10237,7 @@ test_ipsec_proto_ipv6_copy_dscp_inner_1(const void *data __rte_unused) + } + + static int +-test_ipsec_proto_ipv6_set_dscp_0_inner_1(const void *data __rte_unused) ++test_ipsec_proto_ipv6_set_dscp_0_inner_1(void) + { + struct ipsec_test_flags flags; + +@@ -10236,7 +10255,7 @@ test_ipsec_proto_ipv6_set_dscp_0_inner_1(const void *data __rte_unused) + } + + static int +-test_ipsec_proto_ipv6_set_dscp_1_inner_0(const void *data __rte_unused) ++test_ipsec_proto_ipv6_set_dscp_1_inner_0(void) + { + struct ipsec_test_flags flags; + +@@ -10442,7 +10461,7 @@ test_PDCP_PROTO_all(void) + } + + static int +-test_ipsec_proto_ipv4_ttl_decrement(const void *data __rte_unused) ++test_ipsec_proto_ipv4_ttl_decrement(void) + { + struct ipsec_test_flags flags = { + .dec_ttl_or_hop_limit = true +@@ -10452,7 +10471,7 @@ test_ipsec_proto_ipv4_ttl_decrement(const void *data __rte_unused) + } + + static int +-test_ipsec_proto_ipv6_hop_limit_decrement(const void *data __rte_unused) ++test_ipsec_proto_ipv6_hop_limit_decrement(void) + { + struct ipsec_test_flags flags = { + .ipv6 = true, +@@ -11567,7 +11586,7 @@ test_authenticated_decryption_oop(const struct aead_test_data *tdata) tdata->key.data, tdata->key.len, tdata->aad.len, tdata->auth_tag.len, tdata->iv.len); @@ -8448,7 +12210,7 @@ index d6ae762df9..bdd3da7a7c 100644 return retval; /* alloc mbuf and set payload */ -@@ -11981,11 +11994,11 @@ test_stats(void) +@@ -11981,11 +12000,11 @@ test_stats(void) TEST_ASSERT((stats.enqueued_count == 1), "rte_cryptodev_stats_get returned unexpected enqueued stat"); TEST_ASSERT((stats.dequeued_count == 1), @@ -8463,7 +12225,7 @@ index d6ae762df9..bdd3da7a7c 100644 /* invalid device but should ignore and not reset device stats*/ rte_cryptodev_stats_reset(ts_params->valid_devs[0] + 300); -@@ -11993,7 +12006,7 @@ test_stats(void) +@@ -11993,7 +12012,7 @@ test_stats(void) &stats), "rte_cryptodev_stats_get failed"); TEST_ASSERT((stats.enqueued_count == 1), @@ -8472,7 +12234,7 @@ index d6ae762df9..bdd3da7a7c 100644 /* check that a valid reset clears stats */ rte_cryptodev_stats_reset(ts_params->valid_devs[0]); -@@ -12001,9 +12014,9 @@ test_stats(void) +@@ -12001,9 +12020,9 @@ test_stats(void) &stats), "rte_cryptodev_stats_get failed"); TEST_ASSERT((stats.enqueued_count == 0), @@ -8484,7 +12246,62 @@ index d6ae762df9..bdd3da7a7c 100644 return TEST_SUCCESS; } -@@ -14450,8 +14463,13 @@ test_authenticated_encryption_SGL(const struct aead_test_data *tdata, +@@ -12990,7 +13009,7 @@ test_AES_GMAC_authentication(const struct gmac_test_data *tdata) + retval = create_gmac_session(ts_params->valid_devs[0], + tdata, RTE_CRYPTO_AUTH_OP_GENERATE); + +- if (retval == -ENOTSUP) ++ if (retval == TEST_SKIPPED) + return TEST_SKIPPED; + if (retval < 0) + return retval; +@@ -13121,7 +13140,7 @@ test_AES_GMAC_authentication_verify(const struct gmac_test_data *tdata) + retval = create_gmac_session(ts_params->valid_devs[0], + tdata, RTE_CRYPTO_AUTH_OP_VERIFY); + +- if (retval == -ENOTSUP) ++ if (retval == TEST_SKIPPED) + return TEST_SKIPPED; + if (retval < 0) + return retval; +@@ -13250,7 +13269,7 @@ test_AES_GMAC_authentication_SGL(const struct gmac_test_data *tdata, + retval = create_gmac_session(ts_params->valid_devs[0], + tdata, RTE_CRYPTO_AUTH_OP_GENERATE); + +- if (retval == -ENOTSUP) ++ if (retval == TEST_SKIPPED) + return TEST_SKIPPED; + if (retval < 0) + return retval; +@@ -13867,7 +13886,7 @@ test_authentication_verify_fail_when_data_corruption( + reference, + RTE_CRYPTO_AUTH_OP_VERIFY); + +- if (retval == -ENOTSUP) ++ if (retval == TEST_SKIPPED) + return TEST_SKIPPED; + if (retval < 0) + return retval; +@@ -13954,6 +13973,8 @@ test_authentication_verify_GMAC_fail_when_corruption( + reference, + RTE_CRYPTO_AUTH_OP_VERIFY, + RTE_CRYPTO_CIPHER_OP_DECRYPT); ++ if (retval == TEST_SKIPPED) ++ return TEST_SKIPPED; + if (retval < 0) + return retval; + +@@ -14044,8 +14065,7 @@ test_authenticated_decryption_fail_when_corruption( + reference, + RTE_CRYPTO_AUTH_OP_VERIFY, + RTE_CRYPTO_CIPHER_OP_DECRYPT); +- +- if (retval == -ENOTSUP) ++ if (retval == TEST_SKIPPED) + return TEST_SKIPPED; + if (retval < 0) + return retval; +@@ -14450,8 +14470,13 @@ test_authenticated_encryption_SGL(const struct aead_test_data *tdata, &cap_idx) == NULL) return TEST_SKIPPED; @@ -8576,6 +12393,56 @@ index ea7b21ce53..f3686beeb5 100644 .test_data = &aes_test_data_xts_wrapped_key_48_pt_4096_du_0, .op_mask = BLOCKCIPHER_TEST_OP_DECRYPT, .feature_mask = BLOCKCIPHER_TEST_FEATURE_OOP | +diff --git a/dpdk/app/test/test_cryptodev_asym.c b/dpdk/app/test/test_cryptodev_asym.c +index c58c7f488b..67659cd1a6 100644 +--- a/dpdk/app/test/test_cryptodev_asym.c ++++ b/dpdk/app/test/test_cryptodev_asym.c +@@ -1602,7 +1602,7 @@ error_exit: + } + + static int +-test_dh_keygenration(void) ++test_dh_key_generation(void) + { + int status; + +@@ -2204,7 +2204,7 @@ static struct unit_test_suite cryptodev_openssl_asym_testsuite = { + TEST_CASE_ST(ut_setup_asym, ut_teardown_asym, test_capability), + TEST_CASE_ST(ut_setup_asym, ut_teardown_asym, test_dsa), + TEST_CASE_ST(ut_setup_asym, ut_teardown_asym, +- test_dh_keygenration), ++ test_dh_key_generation), + TEST_CASE_ST(ut_setup_asym, ut_teardown_asym, test_rsa_enc_dec), + TEST_CASE_ST(ut_setup_asym, ut_teardown_asym, + test_rsa_sign_verify), +diff --git a/dpdk/app/test/test_cryptodev_mixed_test_vectors.h b/dpdk/app/test/test_cryptodev_mixed_test_vectors.h +index 2816ecc6a4..f80903c87e 100644 +--- a/dpdk/app/test/test_cryptodev_mixed_test_vectors.h ++++ b/dpdk/app/test/test_cryptodev_mixed_test_vectors.h +@@ -478,8 +478,10 @@ struct mixed_cipher_auth_test_data auth_aes_cmac_cipher_snow_test_case_1 = { + }, + .cipher_iv = { + .data = { ++ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, ++ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 + }, +- .len = 0, ++ .len = 16, + }, + .cipher = { + .len_bits = 516 << 3, +@@ -917,8 +919,10 @@ struct mixed_cipher_auth_test_data auth_aes_cmac_cipher_zuc_test_case_1 = { + }, + .cipher_iv = { + .data = { ++ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, ++ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 + }, +- .len = 0, ++ .len = 16, + }, + .cipher = { + .len_bits = 516 << 3, diff --git a/dpdk/app/test/test_cryptodev_security_ipsec_test_vectors.h b/dpdk/app/test/test_cryptodev_security_ipsec_test_vectors.h index 2686bbeb62..6e60e32b9d 100644 --- a/dpdk/app/test/test_cryptodev_security_ipsec_test_vectors.h @@ -9642,6 +13509,25 @@ index 6fdc4cd9e3..56d4884529 100644 /* User Plane w/NULL enc. + SNOW f9 int. UL for 12-bit SN */ (uint8_t[]){0x80, 0x01, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, 0x53, 0xFD, 0x37, 0xDE, 0x88, 0x63, 0x08, 0x4F, 0xD3, 0x71, +diff --git a/dpdk/app/test/test_event_crypto_adapter.c b/dpdk/app/test/test_event_crypto_adapter.c +index a38e389abd..3d720fe68b 100644 +--- a/dpdk/app/test/test_event_crypto_adapter.c ++++ b/dpdk/app/test/test_event_crypto_adapter.c +@@ -958,11 +958,10 @@ configure_cryptodev(void) + return TEST_FAILED; + } + +- /* Create a NULL crypto device */ +- nb_devs = rte_cryptodev_device_count_by_driver( +- rte_cryptodev_driver_id_get( +- RTE_STR(CRYPTODEV_NAME_NULL_PMD))); ++ ++ nb_devs = rte_cryptodev_count(); + if (!nb_devs) { ++ /* Create a NULL crypto device */ + ret = rte_vdev_init( + RTE_STR(CRYPTODEV_NAME_NULL_PMD), NULL); + diff --git a/dpdk/app/test/test_event_timer_adapter.c b/dpdk/app/test/test_event_timer_adapter.c index 1a440dfd10..12d5936c60 100644 --- a/dpdk/app/test/test_event_timer_adapter.c @@ -10043,8 +13929,21 @@ index 1a440dfd10..12d5936c60 100644 TEST_ASSERT_EQUAL(n, 0, "Dequeued unexpected timer expiry event\n"); rte_mempool_put(eventdev_test_mempool, evtim); +diff --git a/dpdk/app/test/test_hash_readwrite.c b/dpdk/app/test/test_hash_readwrite.c +index 6373e62d33..9cc5f3487c 100644 +--- a/dpdk/app/test/test_hash_readwrite.c ++++ b/dpdk/app/test/test_hash_readwrite.c +@@ -162,7 +162,7 @@ init_params(int use_ext, int use_htm, int rw_lf, int use_jhash) + + handle = rte_hash_create(&hash_params); + if (handle == NULL) { +- printf("hash creation failed"); ++ printf("hash creation failed\n"); + return -1; + } + diff --git a/dpdk/app/test/test_link_bonding.c b/dpdk/app/test/test_link_bonding.c -index 5c496352c2..2f46e4c6ee 100644 +index 5c496352c2..53f5c13a24 100644 --- a/dpdk/app/test/test_link_bonding.c +++ b/dpdk/app/test/test_link_bonding.c @@ -2,7 +2,7 @@ @@ -10056,6 +13955,52 @@ index 5c496352c2..2f46e4c6ee 100644 #include #include #include +@@ -447,7 +447,8 @@ test_add_already_bonded_slave_to_bonded_device(void) + uint16_t slaves[RTE_MAX_ETHPORTS]; + char pmd_name[RTE_ETH_NAME_MAX_LEN]; + +- test_add_slave_to_bonded_device(); ++ TEST_ASSERT_SUCCESS(test_add_slave_to_bonded_device(), ++ "Failed to add member to bonding device"); + + current_slave_count = rte_eth_bond_slaves_get(test_params->bonded_port_id, + slaves, RTE_MAX_ETHPORTS); +@@ -4261,7 +4262,7 @@ test_tlb_tx_burst(void) + burst_size); + TEST_ASSERT_EQUAL(nb_tx, 0, " bad number of packet in burst"); + +- /* Clean ugit checkout masterp and remove slaves from bonded device */ ++ /* Clean up and remove members from bonding device */ + return remove_slaves_and_stop_bonded_device(); + } + +diff --git a/dpdk/app/test/test_link_bonding_mode4.c b/dpdk/app/test/test_link_bonding_mode4.c +index 21c512c94b..7410f99617 100644 +--- a/dpdk/app/test/test_link_bonding_mode4.c ++++ b/dpdk/app/test/test_link_bonding_mode4.c +@@ -641,8 +641,7 @@ bond_handshake(void) + /* If response didn't send - report failure */ + TEST_ASSERT_EQUAL(all_slaves_done, 1, "Bond handshake failed\n"); + +- /* If flags doesn't match - report failure */ +- return all_slaves_done == 1 ? TEST_SUCCESS : TEST_FAILED; ++ return TEST_SUCCESS; + } + + #define TEST_LACP_SLAVE_COUT RTE_DIM(test_params.slave_ports) +diff --git a/dpdk/app/test/test_link_bonding_rssconf.c b/dpdk/app/test/test_link_bonding_rssconf.c +index 464fb2dbd0..7aecee9117 100644 +--- a/dpdk/app/test/test_link_bonding_rssconf.c ++++ b/dpdk/app/test/test_link_bonding_rssconf.c +@@ -324,7 +324,7 @@ test_propagate(void) + uint8_t n; + struct slave_conf *port; + uint8_t bond_rss_key[40]; +- struct rte_eth_rss_conf bond_rss_conf; ++ struct rte_eth_rss_conf bond_rss_conf = {0}; + + int retval = 0; + uint64_t rss_hf = 0; diff --git a/dpdk/app/test/test_malloc.c b/dpdk/app/test/test_malloc.c index de40e50611..ff081dd931 100644 --- a/dpdk/app/test/test_malloc.c @@ -10246,7 +14191,7 @@ index f0714a5c18..7b5e590bac 100644 /* * RB[] = {NULL, NULL, NULL, NULL} diff --git a/dpdk/app/test/test_security_inline_proto.c b/dpdk/app/test/test_security_inline_proto.c -index 79858e559f..e411a3c21d 100644 +index 79858e559f..0a966d3f4b 100644 --- a/dpdk/app/test/test_security_inline_proto.c +++ b/dpdk/app/test/test_security_inline_proto.c @@ -678,6 +678,8 @@ free_mbuf(struct rte_mbuf *mbuf) @@ -10541,6 +14486,357 @@ index 79858e559f..e411a3c21d 100644 static int inline_ipsec_testsuite_setup(void) +@@ -1995,7 +2065,7 @@ test_ipsec_inline_proto_known_vec_inb(const void *test_data) + } + + static int +-test_ipsec_inline_proto_display_list(const void *data __rte_unused) ++test_ipsec_inline_proto_display_list(void) + { + struct ipsec_test_flags flags; + +@@ -2008,7 +2078,7 @@ test_ipsec_inline_proto_display_list(const void *data __rte_unused) + } + + static int +-test_ipsec_inline_proto_udp_encap(const void *data __rte_unused) ++test_ipsec_inline_proto_udp_encap(void) + { + struct ipsec_test_flags flags; + +@@ -2021,7 +2091,7 @@ test_ipsec_inline_proto_udp_encap(const void *data __rte_unused) + } + + static int +-test_ipsec_inline_proto_udp_ports_verify(const void *data __rte_unused) ++test_ipsec_inline_proto_udp_ports_verify(void) + { + struct ipsec_test_flags flags; + +@@ -2035,7 +2105,7 @@ test_ipsec_inline_proto_udp_ports_verify(const void *data __rte_unused) + } + + static int +-test_ipsec_inline_proto_err_icv_corrupt(const void *data __rte_unused) ++test_ipsec_inline_proto_err_icv_corrupt(void) + { + struct ipsec_test_flags flags; + +@@ -2048,7 +2118,7 @@ test_ipsec_inline_proto_err_icv_corrupt(const void *data __rte_unused) + } + + static int +-test_ipsec_inline_proto_tunnel_dst_addr_verify(const void *data __rte_unused) ++test_ipsec_inline_proto_tunnel_dst_addr_verify(void) + { + struct ipsec_test_flags flags; + +@@ -2061,7 +2131,7 @@ test_ipsec_inline_proto_tunnel_dst_addr_verify(const void *data __rte_unused) + } + + static int +-test_ipsec_inline_proto_tunnel_src_dst_addr_verify(const void *data __rte_unused) ++test_ipsec_inline_proto_tunnel_src_dst_addr_verify(void) + { + struct ipsec_test_flags flags; + +@@ -2074,7 +2144,7 @@ test_ipsec_inline_proto_tunnel_src_dst_addr_verify(const void *data __rte_unused + } + + static int +-test_ipsec_inline_proto_inner_ip_csum(const void *data __rte_unused) ++test_ipsec_inline_proto_inner_ip_csum(void) + { + struct ipsec_test_flags flags; + +@@ -2087,7 +2157,7 @@ test_ipsec_inline_proto_inner_ip_csum(const void *data __rte_unused) + } + + static int +-test_ipsec_inline_proto_inner_l4_csum(const void *data __rte_unused) ++test_ipsec_inline_proto_inner_l4_csum(void) + { + struct ipsec_test_flags flags; + +@@ -2100,7 +2170,7 @@ test_ipsec_inline_proto_inner_l4_csum(const void *data __rte_unused) + } + + static int +-test_ipsec_inline_proto_tunnel_v4_in_v4(const void *data __rte_unused) ++test_ipsec_inline_proto_tunnel_v4_in_v4(void) + { + struct ipsec_test_flags flags; + +@@ -2114,7 +2184,7 @@ test_ipsec_inline_proto_tunnel_v4_in_v4(const void *data __rte_unused) + } + + static int +-test_ipsec_inline_proto_tunnel_v6_in_v6(const void *data __rte_unused) ++test_ipsec_inline_proto_tunnel_v6_in_v6(void) + { + struct ipsec_test_flags flags; + +@@ -2128,7 +2198,7 @@ test_ipsec_inline_proto_tunnel_v6_in_v6(const void *data __rte_unused) + } + + static int +-test_ipsec_inline_proto_tunnel_v4_in_v6(const void *data __rte_unused) ++test_ipsec_inline_proto_tunnel_v4_in_v6(void) + { + struct ipsec_test_flags flags; + +@@ -2142,7 +2212,7 @@ test_ipsec_inline_proto_tunnel_v4_in_v6(const void *data __rte_unused) + } + + static int +-test_ipsec_inline_proto_tunnel_v6_in_v4(const void *data __rte_unused) ++test_ipsec_inline_proto_tunnel_v6_in_v4(void) + { + struct ipsec_test_flags flags; + +@@ -2156,7 +2226,7 @@ test_ipsec_inline_proto_tunnel_v6_in_v4(const void *data __rte_unused) + } + + static int +-test_ipsec_inline_proto_transport_v4(const void *data __rte_unused) ++test_ipsec_inline_proto_transport_v4(void) + { + struct ipsec_test_flags flags; + +@@ -2170,7 +2240,7 @@ test_ipsec_inline_proto_transport_v4(const void *data __rte_unused) + } + + static int +-test_ipsec_inline_proto_transport_l4_csum(const void *data __rte_unused) ++test_ipsec_inline_proto_transport_l4_csum(void) + { + struct ipsec_test_flags flags = { + .l4_csum = true, +@@ -2182,7 +2252,7 @@ test_ipsec_inline_proto_transport_l4_csum(const void *data __rte_unused) + } + + static int +-test_ipsec_inline_proto_stats(const void *data __rte_unused) ++test_ipsec_inline_proto_stats(void) + { + struct ipsec_test_flags flags; + +@@ -2195,7 +2265,7 @@ test_ipsec_inline_proto_stats(const void *data __rte_unused) + } + + static int +-test_ipsec_inline_proto_pkt_fragment(const void *data __rte_unused) ++test_ipsec_inline_proto_pkt_fragment(void) + { + struct ipsec_test_flags flags; + +@@ -2209,7 +2279,7 @@ test_ipsec_inline_proto_pkt_fragment(const void *data __rte_unused) + } + + static int +-test_ipsec_inline_proto_copy_df_inner_0(const void *data __rte_unused) ++test_ipsec_inline_proto_copy_df_inner_0(void) + { + struct ipsec_test_flags flags; + +@@ -2222,7 +2292,7 @@ test_ipsec_inline_proto_copy_df_inner_0(const void *data __rte_unused) + } + + static int +-test_ipsec_inline_proto_copy_df_inner_1(const void *data __rte_unused) ++test_ipsec_inline_proto_copy_df_inner_1(void) + { + struct ipsec_test_flags flags; + +@@ -2235,7 +2305,7 @@ test_ipsec_inline_proto_copy_df_inner_1(const void *data __rte_unused) + } + + static int +-test_ipsec_inline_proto_set_df_0_inner_1(const void *data __rte_unused) ++test_ipsec_inline_proto_set_df_0_inner_1(void) + { + struct ipsec_test_flags flags; + +@@ -2248,7 +2318,7 @@ test_ipsec_inline_proto_set_df_0_inner_1(const void *data __rte_unused) + } + + static int +-test_ipsec_inline_proto_set_df_1_inner_0(const void *data __rte_unused) ++test_ipsec_inline_proto_set_df_1_inner_0(void) + { + struct ipsec_test_flags flags; + +@@ -2261,7 +2331,7 @@ test_ipsec_inline_proto_set_df_1_inner_0(const void *data __rte_unused) + } + + static int +-test_ipsec_inline_proto_ipv4_copy_dscp_inner_0(const void *data __rte_unused) ++test_ipsec_inline_proto_ipv4_copy_dscp_inner_0(void) + { + struct ipsec_test_flags flags; + +@@ -2274,7 +2344,7 @@ test_ipsec_inline_proto_ipv4_copy_dscp_inner_0(const void *data __rte_unused) + } + + static int +-test_ipsec_inline_proto_ipv4_copy_dscp_inner_1(const void *data __rte_unused) ++test_ipsec_inline_proto_ipv4_copy_dscp_inner_1(void) + { + struct ipsec_test_flags flags; + +@@ -2287,7 +2357,7 @@ test_ipsec_inline_proto_ipv4_copy_dscp_inner_1(const void *data __rte_unused) + } + + static int +-test_ipsec_inline_proto_ipv4_set_dscp_0_inner_1(const void *data __rte_unused) ++test_ipsec_inline_proto_ipv4_set_dscp_0_inner_1(void) + { + struct ipsec_test_flags flags; + +@@ -2300,7 +2370,7 @@ test_ipsec_inline_proto_ipv4_set_dscp_0_inner_1(const void *data __rte_unused) + } + + static int +-test_ipsec_inline_proto_ipv4_set_dscp_1_inner_0(const void *data __rte_unused) ++test_ipsec_inline_proto_ipv4_set_dscp_1_inner_0(void) + { + struct ipsec_test_flags flags; + +@@ -2313,7 +2383,7 @@ test_ipsec_inline_proto_ipv4_set_dscp_1_inner_0(const void *data __rte_unused) + } + + static int +-test_ipsec_inline_proto_ipv6_copy_dscp_inner_0(const void *data __rte_unused) ++test_ipsec_inline_proto_ipv6_copy_dscp_inner_0(void) + { + struct ipsec_test_flags flags; + +@@ -2328,7 +2398,7 @@ test_ipsec_inline_proto_ipv6_copy_dscp_inner_0(const void *data __rte_unused) + } + + static int +-test_ipsec_inline_proto_ipv6_copy_dscp_inner_1(const void *data __rte_unused) ++test_ipsec_inline_proto_ipv6_copy_dscp_inner_1(void) + { + struct ipsec_test_flags flags; + +@@ -2343,7 +2413,7 @@ test_ipsec_inline_proto_ipv6_copy_dscp_inner_1(const void *data __rte_unused) + } + + static int +-test_ipsec_inline_proto_ipv6_set_dscp_0_inner_1(const void *data __rte_unused) ++test_ipsec_inline_proto_ipv6_set_dscp_0_inner_1(void) + { + struct ipsec_test_flags flags; + +@@ -2358,7 +2428,7 @@ test_ipsec_inline_proto_ipv6_set_dscp_0_inner_1(const void *data __rte_unused) + } + + static int +-test_ipsec_inline_proto_ipv6_set_dscp_1_inner_0(const void *data __rte_unused) ++test_ipsec_inline_proto_ipv6_set_dscp_1_inner_0(void) + { + struct ipsec_test_flags flags; + +@@ -2373,7 +2443,7 @@ test_ipsec_inline_proto_ipv6_set_dscp_1_inner_0(const void *data __rte_unused) + } + + static int +-test_ipsec_inline_proto_ipv6_copy_flabel_inner_0(const void *data __rte_unused) ++test_ipsec_inline_proto_ipv6_copy_flabel_inner_0(void) + { + struct ipsec_test_flags flags; + +@@ -2387,7 +2457,7 @@ test_ipsec_inline_proto_ipv6_copy_flabel_inner_0(const void *data __rte_unused) + } + + static int +-test_ipsec_inline_proto_ipv6_copy_flabel_inner_1(const void *data __rte_unused) ++test_ipsec_inline_proto_ipv6_copy_flabel_inner_1(void) + { + struct ipsec_test_flags flags; + +@@ -2401,7 +2471,7 @@ test_ipsec_inline_proto_ipv6_copy_flabel_inner_1(const void *data __rte_unused) + } + + static int +-test_ipsec_inline_proto_ipv6_set_flabel_0_inner_1(const void *data __rte_unused) ++test_ipsec_inline_proto_ipv6_set_flabel_0_inner_1(void) + { + struct ipsec_test_flags flags; + +@@ -2415,7 +2485,7 @@ test_ipsec_inline_proto_ipv6_set_flabel_0_inner_1(const void *data __rte_unused) + } + + static int +-test_ipsec_inline_proto_ipv6_set_flabel_1_inner_0(const void *data __rte_unused) ++test_ipsec_inline_proto_ipv6_set_flabel_1_inner_0(void) + { + struct ipsec_test_flags flags; + +@@ -2429,7 +2499,7 @@ test_ipsec_inline_proto_ipv6_set_flabel_1_inner_0(const void *data __rte_unused) + } + + static int +-test_ipsec_inline_proto_ipv4_ttl_decrement(const void *data __rte_unused) ++test_ipsec_inline_proto_ipv4_ttl_decrement(void) + { + struct ipsec_test_flags flags = { + .dec_ttl_or_hop_limit = true, +@@ -2440,7 +2510,7 @@ test_ipsec_inline_proto_ipv4_ttl_decrement(const void *data __rte_unused) + } + + static int +-test_ipsec_inline_proto_ipv6_hop_limit_decrement(const void *data __rte_unused) ++test_ipsec_inline_proto_ipv6_hop_limit_decrement(void) + { + struct ipsec_test_flags flags = { + .ipv6 = true, +@@ -2452,7 +2522,7 @@ test_ipsec_inline_proto_ipv6_hop_limit_decrement(const void *data __rte_unused) + } + + static int +-test_ipsec_inline_proto_iv_gen(const void *data __rte_unused) ++test_ipsec_inline_proto_iv_gen(void) + { + struct ipsec_test_flags flags; + +@@ -2465,7 +2535,7 @@ test_ipsec_inline_proto_iv_gen(const void *data __rte_unused) + } + + static int +-test_ipsec_inline_proto_sa_pkt_soft_expiry(const void *data __rte_unused) ++test_ipsec_inline_proto_sa_pkt_soft_expiry(void) + { + struct ipsec_test_flags flags = { + .sa_expiry_pkts_soft = true, +@@ -2474,7 +2544,7 @@ test_ipsec_inline_proto_sa_pkt_soft_expiry(const void *data __rte_unused) + return test_ipsec_inline_proto_all(&flags); + } + static int +-test_ipsec_inline_proto_sa_byte_soft_expiry(const void *data __rte_unused) ++test_ipsec_inline_proto_sa_byte_soft_expiry(void) + { + struct ipsec_test_flags flags = { + .sa_expiry_bytes_soft = true, +@@ -2484,7 +2554,7 @@ test_ipsec_inline_proto_sa_byte_soft_expiry(const void *data __rte_unused) + } + + static int +-test_ipsec_inline_proto_sa_pkt_hard_expiry(const void *data __rte_unused) ++test_ipsec_inline_proto_sa_pkt_hard_expiry(void) + { + struct ipsec_test_flags flags = { + .sa_expiry_pkts_hard = true +@@ -2494,7 +2564,7 @@ test_ipsec_inline_proto_sa_pkt_hard_expiry(const void *data __rte_unused) + } + + static int +-test_ipsec_inline_proto_sa_byte_hard_expiry(const void *data __rte_unused) ++test_ipsec_inline_proto_sa_byte_hard_expiry(void) + { + struct ipsec_test_flags flags = { + .sa_expiry_bytes_hard = true @@ -3048,43 +3118,43 @@ static struct unit_test_suite inline_ipsec_testsuite = { TEST_CASE_NAMED_WITH_DATA( @@ -10595,10 +14891,106 @@ index 79858e559f..e411a3c21d 100644 test_inline_ip_reassembly, &ipv4_4frag_burst_vector), TEST_CASES_END() /**< NULL terminate unit test array */ +diff --git a/dpdk/app/test/test_security_inline_proto_vectors.h b/dpdk/app/test/test_security_inline_proto_vectors.h +index 003537e200..d0a4b948e4 100644 +--- a/dpdk/app/test/test_security_inline_proto_vectors.h ++++ b/dpdk/app/test/test_security_inline_proto_vectors.h +@@ -88,7 +88,7 @@ struct ip_reassembly_test_packet pkt_ipv6_udp_p1 = { + .l4_offset = 40, + .data = { + /* IP */ +- 0x60, 0x00, 0x00, 0x00, 0x05, 0xb4, 0x2C, 0x40, ++ 0x60, 0x00, 0x00, 0x00, 0x05, 0xb4, 0x11, 0x40, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0xff, 0xff, 0x0d, 0x00, 0x00, 0x02, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +@@ -135,7 +135,7 @@ struct ip_reassembly_test_packet pkt_ipv6_udp_p2 = { + .l4_offset = 40, + .data = { + /* IP */ +- 0x60, 0x00, 0x00, 0x00, 0x11, 0x5a, 0x2c, 0x40, ++ 0x60, 0x00, 0x00, 0x00, 0x11, 0x5a, 0x11, 0x40, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0xff, 0xff, 0x0d, 0x00, 0x00, 0x02, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +diff --git a/dpdk/config/arm/meson.build b/dpdk/config/arm/meson.build +index 6442ec9596..5028c74613 100644 +--- a/dpdk/config/arm/meson.build ++++ b/dpdk/config/arm/meson.build +@@ -43,7 +43,9 @@ implementer_generic = { + }, + 'generic_aarch32': { + 'march': 'armv8-a', +- 'compiler_options': ['-mfpu=neon'], ++ 'force_march': true, ++ 'march_features': ['simd'], ++ 'compiler_options': ['-mfpu=auto'], + 'flags': [ + ['RTE_ARCH_ARM_NEON_MEMCPY', false], + ['RTE_ARCH_STRICT_ALIGN', true], +@@ -613,21 +615,25 @@ if update_flags + # probe supported archs and their features + candidate_march = '' + if part_number_config.has_key('march') +- supported_marchs = ['armv8.6-a', 'armv8.5-a', 'armv8.4-a', 'armv8.3-a', +- 'armv8.2-a', 'armv8.1-a', 'armv8-a'] +- check_compiler_support = false +- foreach supported_march: supported_marchs +- if supported_march == part_number_config['march'] +- # start checking from this version downwards +- check_compiler_support = true +- endif +- if (check_compiler_support and +- cc.has_argument('-march=' + supported_march)) +- candidate_march = supported_march +- # highest supported march version found +- break +- endif +- endforeach ++ if part_number_config.get('force_march', false) ++ candidate_march = part_number_config['march'] ++ else ++ supported_marchs = ['armv8.6-a', 'armv8.5-a', 'armv8.4-a', 'armv8.3-a', ++ 'armv8.2-a', 'armv8.1-a', 'armv8-a'] ++ check_compiler_support = false ++ foreach supported_march: supported_marchs ++ if supported_march == part_number_config['march'] ++ # start checking from this version downwards ++ check_compiler_support = true ++ endif ++ if (check_compiler_support and ++ cc.has_argument('-march=' + supported_march)) ++ candidate_march = supported_march ++ # highest supported march version found ++ break ++ endif ++ endforeach ++ endif + if candidate_march == '' + error('No suitable armv8 march version found.') + endif +@@ -659,7 +665,7 @@ if update_flags + # apply supported compiler options + if part_number_config.has_key('compiler_options') + foreach flag: part_number_config['compiler_options'] +- if cc.has_argument(flag) ++ if cc.has_multi_arguments(machine_args + [flag]) + machine_args += flag + else + warning('Configuration compiler option ' + diff --git a/dpdk/config/meson.build b/dpdk/config/meson.build -index 6d9ffd4f4b..7cd375e991 100644 +index 6d9ffd4f4b..265aaa995c 100644 --- a/dpdk/config/meson.build +++ b/dpdk/config/meson.build +@@ -122,7 +122,7 @@ if cpu_instruction_set == 'generic' + elif host_machine.cpu_family().startswith('ppc') + cpu_instruction_set = 'power8' + elif host_machine.cpu_family().startswith('riscv') +- cpu_instruction_set = 'riscv' ++ cpu_instruction_set = 'rv64gc' + endif + endif + @@ -139,7 +139,7 @@ endif toolchain = cc.get_id() @@ -10774,6 +15166,22 @@ index a55ce38800..0f7ff5282d 100644 print('.. table:: ' + table_name + '\n', file=outfile) print_table_header(outfile, num_cols, header_names, title) print_table_body(outfile, num_cols, ini_files, ini_data, default_features) +diff --git a/dpdk/doc/guides/cryptodevs/qat.rst b/dpdk/doc/guides/cryptodevs/qat.rst +index d1e64475c4..b1b893a251 100644 +--- a/dpdk/doc/guides/cryptodevs/qat.rst ++++ b/dpdk/doc/guides/cryptodevs/qat.rst +@@ -399,9 +399,9 @@ to see the full table) + +-----+-----+-----+-----+----------+---------------+---------------+------------+--------+------+--------+--------+ + | Yes | No | No | 4 | 4xxx | IDZ/ N/A | qat_4xxx | 4xxx | 4940 | 4 | 4941 | 16 | + +-----+-----+-----+-----+----------+---------------+---------------+------------+--------+------+--------+--------+ +- | Yes | Yes | Yes | 4 | 401xxx | linux/5.19+ | qat_401xxx | 4xxx | 4942 | 2 | 4943 | 16 | ++ | Yes | Yes | Yes | 4 | 401xxx | linux/5.19+ | qat_4xxx | 4xxx | 4942 | 2 | 4943 | 16 | + +-----+-----+-----+-----+----------+---------------+---------------+------------+--------+------+--------+--------+ +- | Yes | No | No | 4 | 401xxx | IDZ/ N/A | qat_401xxx | 4xxx | 4942 | 2 | 4943 | 16 | ++ | Yes | No | No | 4 | 401xxx | IDZ/ N/A | qat_4xxx | 4xxx | 4942 | 2 | 4943 | 16 | + +-----+-----+-----+-----+----------+---------------+---------------+------------+--------+------+--------+--------+ + + * Note: Symmetric mixed crypto algorithms feature on Gen 2 works only with IDZ driver version 4.9.0+ diff --git a/dpdk/doc/guides/gpus/cuda.rst b/dpdk/doc/guides/gpus/cuda.rst index 114e3bc8cb..6520c17c3e 100644 --- a/dpdk/doc/guides/gpus/cuda.rst @@ -10925,10 +15333,44 @@ index 9db2865b71..5cdf0ddee6 100644 L4 checksum offload = P Timestamp offload = P diff --git a/dpdk/doc/guides/nics/hns3.rst b/dpdk/doc/guides/nics/hns3.rst -index 791c9cc2ed..6fbd4320ef 100644 +index 791c9cc2ed..380024600b 100644 --- a/dpdk/doc/guides/nics/hns3.rst +++ b/dpdk/doc/guides/nics/hns3.rst -@@ -81,7 +81,8 @@ Runtime Config Options +@@ -30,7 +30,6 @@ Features of the HNS3 PMD are: + - DCB + - Scattered and gather for TX and RX + - Vector Poll mode driver +-- Dump register + - SR-IOV VF + - Multi-process + - MAC/VLAN filter +@@ -38,6 +37,15 @@ Features of the HNS3 PMD are: + - NUMA support + - Generic flow API + - IEEE1588/802.1AS timestamping ++- Basic stats ++- Extended stats ++- Traffic Management API ++- Speed capabilities ++- Link Auto-negotiation ++- Link flow control ++- Dump register ++- Dump private info from device ++- FW version + + Prerequisites + ------------- +@@ -58,7 +66,8 @@ The following options can be modified in the ``config/rte_config.h`` file. + + - ``RTE_LIBRTE_HNS3_MAX_TQP_NUM_PER_PF`` (default ``256``) + +- Number of MAX queues reserved for PF. ++ Number of MAX queues reserved for PF on HIP09 and HIP10. ++ The MAX queue number is also determined by the value the firmware report. + + Runtime Config Options + ~~~~~~~~~~~~~~~~~~~~~~ +@@ -81,7 +90,8 @@ Runtime Config Options ``common``. For example:: @@ -10938,7 +15380,7 @@ index 791c9cc2ed..6fbd4320ef 100644 - ``tx_func_hint`` (default ``none``) -@@ -101,7 +102,8 @@ Runtime Config Options +@@ -101,7 +111,8 @@ Runtime Config Options ``common``. For example:: @@ -10948,7 +15390,7 @@ index 791c9cc2ed..6fbd4320ef 100644 - ``dev_caps_mask`` (default ``0``) -@@ -113,22 +115,25 @@ Runtime Config Options +@@ -113,22 +124,25 @@ Runtime Config Options Its main purpose is to debug and avoid problems. For example:: @@ -10988,7 +15430,7 @@ index 791c9cc2ed..6fbd4320ef 100644 Link status event Pre-conditions ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -@@ -137,7 +142,8 @@ Firmware 1.8.0.0 and later versions support reporting link changes to the PF. +@@ -137,7 +151,8 @@ Firmware 1.8.0.0 and later versions support reporting link changes to the PF. Therefore, to use the LSC for the PF driver, ensure that the firmware version also supports reporting link changes. If the VF driver needs to support LSC, special patch must be added: @@ -10998,11 +15440,125 @@ index 791c9cc2ed..6fbd4320ef 100644 Note: The patch has been uploaded to 5.13 of the Linux kernel mainline. +@@ -197,36 +212,50 @@ Generic flow API + + - ``RSS Flow`` + +- RSS Flow supports to set hash input set, hash function, enable hash +- and configure queues. +- For example: +- Configure queues as queue 0, 1, 2, 3. ++ RSS Flow supports for creating rule base on input tuple, hash key, queues ++ and hash algorithm. But hash key, queues and hash algorithm are the global ++ configuration for hardware which will affect other rules. ++ The rule just setting input tuple is completely independent. ++ ++ Run ``testpmd``: + + .. code-block:: console + +- testpmd> flow create 0 ingress pattern end actions rss types end \ +- queues 0 1 2 3 end / end ++ dpdk-testpmd -a 0000:7d:00.0 -l 10-18 -- -i --rxq=8 --txq=8 ++ ++ All IP packets can be distributed to 8 queues. + +- Enable hash and set input set for IPv4-TCP. ++ Set IPv4-TCP packet is distributed to 8 queues based on L3/L4 SRC only. + + .. code-block:: console + +- testpmd> flow create 0 ingress pattern eth / ipv4 / tcp / end \ +- actions rss types ipv4-tcp l3-src-only end queues end / end ++ testpmd> flow create 0 ingress pattern eth / ipv4 / tcp / end actions \ ++ rss types ipv4-tcp l4-src-only l3-src-only end queues end / end + +- Set symmetric hash enable for flow type IPv4-TCP. ++ Disable IPv4 packet RSS hash. + + .. code-block:: console + +- testpmd> flow create 0 ingress pattern eth / ipv4 / tcp / end \ +- actions rss types ipv4-tcp end queues end func symmetric_toeplitz / end ++ testpmd> flow create 0 ingress pattern eth / ipv4 / end actions rss \ ++ types none end queues end / end + +- Set hash function as simple xor. ++ Set hash function as symmetric Toeplitz. + + .. code-block:: console + + testpmd> flow create 0 ingress pattern end actions rss types end \ +- queues end func simple_xor / end ++ queues end func symmetric_toeplitz / end ++ ++ In this case, all packets that enabled RSS are hashed using symmetric ++ Toeplitz algorithm. ++ ++ Flush all RSS rules ++ ++ .. code-block:: console ++ ++ testpmd> flow flush 0 ++ ++ The RSS configurations of hardwre is back to the one ethdev ops set. + + Statistics + ---------- +diff --git a/dpdk/doc/guides/nics/i40e.rst b/dpdk/doc/guides/nics/i40e.rst +index a6c7dbd080..b37859a721 100644 +--- a/dpdk/doc/guides/nics/i40e.rst ++++ b/dpdk/doc/guides/nics/i40e.rst +@@ -88,13 +88,16 @@ Windows Prerequisites + - To load NetUIO driver, follow the steps mentioned in `dpdk-kmods repository + `_. + +-Recommended Matching List +-------------------------- +- +-It is highly recommended to upgrade the i40e kernel driver and firmware to +-avoid the compatibility issues with i40e PMD. Here is the suggested matching +-list which has been tested and verified. The detailed information can refer +-to chapter Tested Platforms/Tested NICs in release notes. ++Kernel driver and Firmware Matching List ++---------------------------------------- ++ ++It is highly recommended to upgrade the i40e kernel driver and firmware ++to avoid the compatibility issues with i40e PMD. ++The table below shows a summary of the DPDK versions ++with corresponding out-of-tree Linux kernel drivers and firmware. ++The full list of in-tree and out-of-tree Linux kernel drivers from kernel.org ++and Linux distributions that were tested and verified ++are listed in the Tested Platforms section of the Release Notes for each release. + + For X710/XL710/XXV710, + diff --git a/dpdk/doc/guides/nics/ice.rst b/dpdk/doc/guides/nics/ice.rst -index ce075e067c..b3dc72d421 100644 +index ce075e067c..ccf9d68203 100644 --- a/dpdk/doc/guides/nics/ice.rst +++ b/dpdk/doc/guides/nics/ice.rst -@@ -331,18 +331,18 @@ Additional Options +@@ -41,13 +41,16 @@ Windows Prerequisites + - Loading of private Dynamic Device Personalization (DDP) package is not supported on Windows. + + +-Recommended Matching List +-------------------------- ++Kernel driver, DDP and Firmware Matching List ++--------------------------------------------- + + It is highly recommended to upgrade the ice kernel driver, firmware and DDP package + to avoid the compatibility issues with ice PMD. +-Here is the suggested matching list which has been tested and verified. +-The detailed information can refer to chapter Tested Platforms/Tested NICs in release notes. ++The table below shows a summary of the DPDK versions ++with corresponding out-of-tree Linux kernel drivers, DDP package and firmware. ++The full list of in-tree and out-of-tree Linux kernel drivers from kernel.org ++and Linux distributions that were tested and verified ++are listed in the Tested Platforms section of the Release Notes for each release. + + +-----------+---------------+-----------------+-----------+--------------+-----------+ + | DPDK | Kernel Driver | OS Default DDP | COMMS DDP | Wireless DDP | Firmware | +@@ -331,18 +334,18 @@ Additional Options ip link set dev enp24s0f0 vf 0 trust on @@ -11025,6 +15581,19 @@ index ce075e067c..b3dc72d421 100644 #. Send the packet, and it should be displayed on tcpdump:: +diff --git a/dpdk/doc/guides/nics/ixgbe.rst b/dpdk/doc/guides/nics/ixgbe.rst +index 5db7376992..a3a19a0f49 100644 +--- a/dpdk/doc/guides/nics/ixgbe.rst ++++ b/dpdk/doc/guides/nics/ixgbe.rst +@@ -16,8 +16,6 @@ The wider register gives space to hold multiple packet buffers so as to save ins + There is no change to PMD API. The RX/TX handler are the only two entries for vPMD packet I/O. + They are transparently registered at runtime RX/TX execution if all condition checks pass. + +-1. To date, only an SSE version of IX GBE vPMD is available. +- + Some constraints apply as pre-conditions for specific optimizations on bulk packet transfers. + The following sections explain RX and TX constraints in the vPMD. + diff --git a/dpdk/doc/guides/nics/mana.rst b/dpdk/doc/guides/nics/mana.rst index 005c0b2ca7..341146c4e7 100644 --- a/dpdk/doc/guides/nics/mana.rst @@ -11065,10 +15634,25 @@ index 005c0b2ca7..341146c4e7 100644 ------------------------------ diff --git a/dpdk/doc/guides/nics/mlx5.rst b/dpdk/doc/guides/nics/mlx5.rst -index 51f51259e3..937fa5c6e0 100644 +index 51f51259e3..239e297d2a 100644 --- a/dpdk/doc/guides/nics/mlx5.rst +++ b/dpdk/doc/guides/nics/mlx5.rst -@@ -1121,6 +1121,9 @@ for an additional list of options shared with other mlx5 drivers. +@@ -455,8 +455,12 @@ Limitations + encapsulation actions. + - For NIC Rx flow, supports ``MARK``, ``COUNT``, ``QUEUE``, ``RSS`` in the + sample actions list. +- - For E-Switch mirroring flow, supports ``RAW ENCAP``, ``Port ID``, +- ``VXLAN ENCAP``, ``NVGRE ENCAP`` in the sample actions list. ++ - For E-Switch mirroring flow, supports ``RAW_ENCAP``, ``PORT_ID``, ++ ``VXLAN_ENCAP``, ``NVGRE_ENCAP`` in the sample actions list. ++ - For E-Switch mirroring flow with sample ratio = 1, the ``ENCAP`` action ++ supports uplink port only. ++ - For E-Switch mirroring flow with sample ratio = 1, the ``PORT`` and ``JUMP`` actions ++ are not supported without presented ``ENCAP`` action in the sample actions list. + - For ConnectX-5 trusted device, the application metadata with SET_TAG index 0 + is not supported before ``RTE_FLOW_ACTION_TYPE_SAMPLE`` action. + +@@ -1121,6 +1125,9 @@ for an additional list of options shared with other mlx5 drivers. - 0. If representor matching is disabled, then there will be no implicit item added. As a result, ingress flow rules will match traffic coming to any port, not only the port on which flow rule is created. @@ -11078,7 +15662,7 @@ index 51f51259e3..937fa5c6e0 100644 - 1. If representor matching is enabled (default setting), then each ingress pattern template has an implicit REPRESENTED_PORT -@@ -1547,6 +1550,14 @@ shortened below as "OFED". +@@ -1547,6 +1554,14 @@ shortened below as "OFED". | | | ConnectX-5 | | ConnectX-5 | +-----------------------+-----------------+-----------------+ @@ -11116,11 +15700,88 @@ index 2f7417bddd..07df0d35a2 100644 It is possible to specify a remote netdevice to capture packets from by adding ``remote=foo1``, for example:: +diff --git a/dpdk/doc/guides/nics/virtio.rst b/dpdk/doc/guides/nics/virtio.rst +index c422e7347a..7eba49cc83 100644 +--- a/dpdk/doc/guides/nics/virtio.rst ++++ b/dpdk/doc/guides/nics/virtio.rst +@@ -307,6 +307,7 @@ Prerequisites for Rx interrupts + ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + To support Rx interrupts, ++ + #. Check if guest kernel supports VFIO-NOIOMMU: + + Linux started to support VFIO-NOIOMMU since 4.8.0. Make sure the guest +@@ -469,12 +470,16 @@ according to below configuration: + + #. Split virtqueue mergeable path: If Rx mergeable is negotiated, in-order feature is + not negotiated, this path will be selected. ++ + #. Split virtqueue non-mergeable path: If Rx mergeable and in-order feature are not + negotiated, also Rx offload(s) are requested, this path will be selected. ++ + #. Split virtqueue in-order mergeable path: If Rx mergeable and in-order feature are + both negotiated, this path will be selected. ++ + #. Split virtqueue in-order non-mergeable path: If in-order feature is negotiated and + Rx mergeable is not negotiated, this path will be selected. ++ + #. Split virtqueue vectorized Rx path: If Rx mergeable is disabled and no Rx offload + requested, this path will be selected. + +@@ -483,16 +488,21 @@ according to below configuration: + + #. Packed virtqueue mergeable path: If Rx mergeable is negotiated, in-order feature + is not negotiated, this path will be selected. ++ + #. Packed virtqueue non-mergeable path: If Rx mergeable and in-order feature are not + negotiated, this path will be selected. ++ + #. Packed virtqueue in-order mergeable path: If in-order and Rx mergeable feature are + both negotiated, this path will be selected. ++ + #. Packed virtqueue in-order non-mergeable path: If in-order feature is negotiated and + Rx mergeable is not negotiated, this path will be selected. ++ + #. Packed virtqueue vectorized Rx path: If building and running environment support + (AVX512 || NEON) && in-order feature is negotiated && Rx mergeable + is not negotiated && TCP_LRO Rx offloading is disabled && vectorized option enabled, + this path will be selected. ++ + #. Packed virtqueue vectorized Tx path: If building and running environment support + (AVX512 || NEON) && in-order feature is negotiated && vectorized option enabled, + this path will be selected. +@@ -570,5 +580,7 @@ or configuration, below steps can help you identify which path you selected and + root cause faster. + + #. Run vhost/virtio test case; ++ + #. Run "perf top" and check virtio Rx/Tx callback names; ++ + #. Identify which virtio path is selected refer to above table. diff --git a/dpdk/doc/guides/platform/cnxk.rst b/dpdk/doc/guides/platform/cnxk.rst -index aadd60b5d4..0eafde71d6 100644 +index aadd60b5d4..dca4e789b3 100644 --- a/dpdk/doc/guides/platform/cnxk.rst +++ b/dpdk/doc/guides/platform/cnxk.rst -@@ -253,7 +253,7 @@ context or stats using debugfs. +@@ -111,7 +111,9 @@ where even VF bound to the first domain and odd VF bound to the second domain. + Typical application usage models are, + + #. Communication between the Linux kernel and DPDK application. ++ + #. Exception path to Linux kernel from DPDK application as SW ``KNI`` replacement. ++ + #. Communication between two different DPDK applications. + + SDP interface +@@ -130,6 +132,7 @@ can bind PF or VF to use SDP interface and it will be enumerated as ethdev ports + The primary use case for SDP is to enable the smart NIC use case. Typical usage models are, + + #. Communication channel between remote host and cnxk SoC over PCIe. ++ + #. Transfer packets received from network interface to remote host over PCIe and + vice-versa. + +@@ -253,7 +256,7 @@ context or stats using debugfs. Enable ``debugfs`` by: @@ -11369,6 +16030,19 @@ index d7307a29bb..7733424aac 100644 Now we can arm the event timer with ``rte_event_timer_arm_burst()``: +diff --git a/dpdk/doc/guides/prog_guide/generic_segmentation_offload_lib.rst b/dpdk/doc/guides/prog_guide/generic_segmentation_offload_lib.rst +index e605b86376..30d13bcc61 100644 +--- a/dpdk/doc/guides/prog_guide/generic_segmentation_offload_lib.rst ++++ b/dpdk/doc/guides/prog_guide/generic_segmentation_offload_lib.rst +@@ -204,7 +204,7 @@ To segment an outgoing packet, an application must: + - a flag, that indicates whether the IPv4 headers of output segments should + contain fixed or incremental ID values. + +-2. Set the appropriate ol_flags in the mbuf. ++#. Set the appropriate ol_flags in the mbuf. + + - The GSO library use the value of an mbuf's ``ol_flags`` attribute to + determine how a packet should be segmented. It is the application's diff --git a/dpdk/doc/guides/prog_guide/graph_lib.rst b/dpdk/doc/guides/prog_guide/graph_lib.rst index 1cfdc86433..4ab0623f44 100644 --- a/dpdk/doc/guides/prog_guide/graph_lib.rst @@ -11959,6 +16633,91 @@ index 3e6242803d..d0b7833a2f 100644 - Queue operations are asynchronous and not thread-safe. - Operations can thus be invoked by the app's datapath, +diff --git a/dpdk/doc/guides/prog_guide/rte_security.rst b/dpdk/doc/guides/prog_guide/rte_security.rst +index 7418e35c1b..ae8b0aaef3 100644 +--- a/dpdk/doc/guides/prog_guide/rte_security.rst ++++ b/dpdk/doc/guides/prog_guide/rte_security.rst +@@ -671,68 +671,27 @@ Security session configuration + + Security Session configuration structure is defined as ``rte_security_session_conf`` + +-.. code-block:: c +- +- struct rte_security_session_conf { +- enum rte_security_session_action_type action_type; +- /**< Type of action to be performed on the session */ +- enum rte_security_session_protocol protocol; +- /**< Security protocol to be configured */ +- union { +- struct rte_security_ipsec_xform ipsec; +- struct rte_security_macsec_xform macsec; +- struct rte_security_pdcp_xform pdcp; +- struct rte_security_docsis_xform docsis; +- }; +- /**< Configuration parameters for security session */ +- struct rte_crypto_sym_xform *crypto_xform; +- /**< Security Session Crypto Transformations */ +- void *userdata; +- /**< Application specific userdata to be saved with session */ +- }; ++.. literalinclude:: ../../../lib/security/rte_security.h ++ :language: c ++ :start-after: Structure rte_security_session_conf 8< ++ :end-before: >8 End of structure rte_security_session_conf. + + The configuration structure reuses the ``rte_crypto_sym_xform`` struct for crypto related + configuration. The ``rte_security_session_action_type`` struct is used to specify whether the + session is configured for Lookaside Protocol offload or Inline Crypto or Inline Protocol + Offload. + +-.. code-block:: c +- +- enum rte_security_session_action_type { +- RTE_SECURITY_ACTION_TYPE_NONE, +- /**< No security actions */ +- RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO, +- /**< Crypto processing for security protocol is processed inline +- * during transmission +- */ +- RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL, +- /**< All security protocol processing is performed inline during +- * transmission +- */ +- RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL, +- /**< All security protocol processing including crypto is performed +- * on a lookaside accelerator +- */ +- RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO +- /**< Similar to ACTION_TYPE_NONE but crypto processing for security +- * protocol is processed synchronously by a CPU. +- */ +- }; ++.. literalinclude:: ../../../lib/security/rte_security.h ++ :language: c ++ :start-after: Enumeration of rte_security_session_action_type 8< ++ :end-before: >8 End enumeration of rte_security_session_action_type. + + The ``rte_security_session_protocol`` is defined as + +-.. code-block:: c +- +- enum rte_security_session_protocol { +- RTE_SECURITY_PROTOCOL_IPSEC = 1, +- /**< IPsec Protocol */ +- RTE_SECURITY_PROTOCOL_MACSEC, +- /**< MACSec Protocol */ +- RTE_SECURITY_PROTOCOL_PDCP, +- /**< PDCP Protocol */ +- RTE_SECURITY_PROTOCOL_DOCSIS, +- /**< DOCSIS Protocol */ +- }; ++.. literalinclude:: ../../../lib/security/rte_security.h ++ :language: c ++ :start-after: Enumeration of rte_security_session_protocol 8< ++ :end-before: >8 End enumeration of rte_security_session_protocol. + + Currently the library defines configuration parameters for IPsec and PDCP only. + For other protocols like MACSec, structures and enums are defined as place holders diff --git a/dpdk/doc/guides/rawdevs/ntb.rst b/dpdk/doc/guides/rawdevs/ntb.rst index 2bb115d13f..f8befc6594 100644 --- a/dpdk/doc/guides/rawdevs/ntb.rst @@ -12020,10 +16779,10 @@ index 2bb115d13f..f8befc6594 100644 +- 3rd Generation Intel® Xeon® Scalable Processors. +- 2nd Generation Intel® Xeon® Scalable Processors. diff --git a/dpdk/doc/guides/rel_notes/release_22_11.rst b/dpdk/doc/guides/rel_notes/release_22_11.rst -index 26e0560725..764017b5e5 100644 +index 26e0560725..7ce5c436c0 100644 --- a/dpdk/doc/guides/rel_notes/release_22_11.rst +++ b/dpdk/doc/guides/rel_notes/release_22_11.rst -@@ -805,3 +805,722 @@ Tested Platforms +@@ -805,3 +805,1094 @@ Tested Platforms ~~~~~~~~~~~~~ * drivers: fix symbol exports when map is omitted @@ -12746,6 +17505,378 @@ index 26e0560725..764017b5e5 100644 +* Intel(R) Testing + + * Cryptodev: Performance drop for 1c1t scenario ++ ++22.11.4 Release Notes ++--------------------- ++ ++ ++22.11.4 Fixes ++~~~~~~~~~~~~~ ++ ++* app/bbdev: fix link with NXP LA12XX ++* app/dumpcap: allow multiple invocations ++* app/dumpcap: fix mbuf pool ring type ++* app/pipeline: add sigint handler ++* app/procinfo: adjust format of RSS info ++* app/procinfo: fix RSS info ++* app/procinfo: remove unnecessary rte_malloc ++* app/test: disable graph auto test for windows ++* app/test: fix reference to master in bonding test ++* app/testpmd: add explicit check for tunnel TSO ++* app/testpmd: fix early exit from signal ++* app/testpmd: fix help string ++* app/testpmd: fix primary process not polling all queues ++* app/testpmd: fix tunnel TSO capability check ++* app/testpmd: fix tunnel TSO configuration ++* app/testpmd: remove useless check in TSO command ++* baseband/acc: fix ACC100 HARQ input alignment ++* baseband/acc: fix TB mode on VRB1 ++* build: add libarchive to optional external dependencies ++* bus/dpaa: fix build with asserts for GCC 13 ++* bus/ifpga: fix driver header dependency ++* bus/pci: add PASID control ++* bus/pci: fix device ID log ++* ci: fix race on container image name ++* common/cnxk: fix aura disable handling ++* common/cnxk: fix default flow action setting ++* common/cnxk: fix different size bit operations ++* common/cnxk: fix DPI memzone name ++* common/cnxk: fix incorrect aura ID ++* common/cnxk: fix pool buffer size in opaque mode ++* common/cnxk: fix RSS key configuration ++* common/cnxk: fix SDP channel mask ++* common/cnxk: fix xstats for different packet sizes ++* common/cnxk: remove dead Meson code ++* common/cnxk: replace direct API usage in REE ++* common/mlx5: fix controller index parsing ++* common/mlx5: replace use of PMD log type ++* config/arm: fix aarch32 build with GCC 13 ++* config: fix RISC-V native build ++* crypto/cnxk: fix IPsec CCM and GCM capabilities ++* cryptodev: add missing doc for security context ++* crypto/dpaa2_sec: fix debug prints ++* crypto/dpaa_sec: fix debug prints ++* crypto/ipsec_mb: add dependency check for cross build ++* crypto/nitrox: fix panic with high number of segments ++* crypto/openssl: fix memory leaks in asym session ++* crypto/qat: fix raw API null algorithm digest ++* dma/cnxk: fix chunk buffer failure return code ++* dma/cnxk: fix device reconfigure ++* dma/cnxk: fix device state ++* doc: fix hns3 build option about max queue number ++* doc: fix RSS flow description in hns3 guide ++* doc: fix some ordered lists ++* doc: remove number of commands in vDPA guide ++* doc: remove restriction on ixgbe vector support ++* doc: replace code blocks with includes in security guide ++* doc: update features in hns3 guide ++* doc: update kernel module entry in QAT guide ++* doc: update versions recommendations for i40e and ice ++* eal/riscv: fix vector type alignment ++* eal/unix: fix firmware reading with external xz helper ++* eal/windows: fix build with recent MinGW ++* ethdev: account for smaller MTU when setting default ++* ethdev: add check in async flow action query ++* ethdev: fix 32-bit build with GCC 13 ++* ethdev: fix ESP packet type description ++* ethdev: fix function name in comment ++* event/cnxk: fix CASP usage for clang ++* event/cnxk: fix context flush in port cleanup ++* event/cnxk: fix getwork mode devargs parsing ++* event/cnxk: fix return values for capability API ++* eventdev/crypto: fix circular buffer full case ++* eventdev/eth_rx: fix timestamp field register in mbuf ++* eventdev: fix alignment padding ++* eventdev: fix device pointer for vdev-based devices ++* eventdev: fix missing driver names in info struct ++* eventdev: fix symbol export for port maintenance ++* event/dlb2: disable PASID ++* event/dlb2: fix disable PASID ++* event/dlb2: fix missing queue ordering capability flag ++* event/dlb2: fix name check in self-test ++* event/sw: fix ordering corruption with op release ++* event/sw: remove obsolete comment ++* examples/ethtool: fix pause configuration ++* examples/ipsec-secgw: fix partial overflow ++* fib6: fix adding default route as first route ++* fib: fix adding default route overwriting entire table ++* gpu/cuda: fix build with external GDRCopy ++* hash: align SSE lookup to scalar implementation ++* malloc: remove return from void functions ++* mem: fix deadlock with multiprocess ++* mempool: clarify enqueue/dequeue ops documentation ++* mempool/cnxk: fix alloc from non-EAL threads ++* mempool/cnxk: fix free from non-EAL threads ++* mempool: fix default ops for an empty mempool ++* mempool: fix get function documentation ++* meter: fix RFC4115 trTCM API Doxygen ++* net/af_packet: fix Rx and Tx queue state ++* net/af_xdp: fix Rx and Tx queue state ++* net/ark: support single function with multiple port ++* net/avp: fix Rx and Tx queue state ++* net/axgbe: identify CPU with cpuid ++* net/bnx2x: fix Rx and Tx queue state ++* net/bnxt: fix Rx and Tx queue state ++* net/bonding: fix header for C++ ++* net/bonding: fix link status callback stop ++* net/bonding: fix possible overrun ++* net/bonding: fix Rx and Tx queue state ++* net/cnxk: fix data offset in vector Tx ++* net/cnxk: fix uninitialized variable ++* net/cxgbe: fix Rx and Tx queue state ++* net/dpaa2: fix Rx and Tx queue state ++* net/dpaa: fix Rx and Tx queue state ++* net/e1000: fix Rx and Tx queue state ++* net/ena: fix Rx and Tx queue state ++* net/enetc: fix Rx and Tx queue state ++* net/enic: avoid extra unlock in MTU set ++* net/enic: fix Rx and Tx queue state ++* net/gve: fix max MTU limit ++* net/gve: fix RX buffer size alignment ++* net/gve: update max Rx packet length to be based on MTU ++* net/hinic: fix Rx and Tx queue state ++* net/hns3: fix double stats for IMP and global reset ++* net/hns3: fix error code for multicast resource ++* net/hns3: fix flushing multicast MAC address ++* net/hns3: fix ignored reset event ++* net/hns3: fix IMP or global reset ++* net/hns3: fix LRO offload to report ++* net/hns3: fix mailbox sync ++* net/hns3: fix multiple reset detected log ++* net/hns3: fix order in NEON Rx ++* net/hns3: fix reset event status ++* net/hns3: fix setting DCB capability ++* net/hns3: fix some error logs ++* net/hns3: fix some return values ++* net/hns3: fix traffic management dump text alignment ++* net/hns3: fix traffic management thread safety ++* net/hns3: fix typo in function name ++* net/hns3: fix unchecked Rx free threshold ++* net/hns3: fix uninitialized hash algo value ++* net/hns3: fix VF default MAC modified when set failed ++* net/hns3: fix VF reset handler interruption ++* net/hns3: keep set/get algo key functions local ++* net/hns3: refactor interrupt state query ++* net/hns3: remove reset log in secondary ++* net/i40e: fix buffer leak on Rx reconfiguration ++* net/i40e: fix FDIR queue receives broadcast packets ++* net/iavf: fix checksum offloading ++* net/iavf: fix ESN session update ++* net/iavf: fix indent in Tx path ++* net/iavf: fix port stats clearing ++* net/iavf: fix TSO with big segments ++* net/iavf: fix Tx debug ++* net/iavf: fix Tx offload flags check ++* net/iavf: fix Tx offload mask ++* net/iavf: fix Tx preparation ++* net/iavf: fix VLAN offload strip flag ++* net/iavf: remove log from Tx prepare function ++* net/iavf: unregister interrupt handler before FD close ++* net/ice: fix crash on closing representor ports ++* net/ice: fix DCF port statistics ++* net/ice: fix initial link status ++* net/ice: fix L1 check interval ++* net/ice: fix TM configuration clearing ++* net/ice: fix TSO with big segments ++* net/ice: fix Tx preparation ++* net/ice: remove log from Tx prepare function ++* net/ice: write timestamp to first segment in scattered Rx ++* net/ipn3ke: fix Rx and Tx queue state ++* net/mana: add 32-bit short doorbell ++* net/mana: add missing new line to data path logs ++* net/mana: enable 32-bit build ++* net/memif: fix Rx and Tx queue state ++* net/mlx4: fix Rx and Tx queue state ++* net/mlx5: fix counter query during port close ++* net/mlx5: fix decap action checking in sample flow ++* net/mlx5: fix destroying external representor flow ++* net/mlx5: fix E-Switch mirror flow rule validation ++* net/mlx5: fix flow thread safety flag for HWS ++* net/mlx5: fix flow workspace double free in Windows ++* net/mlx5: fix hairpin queue states ++* net/mlx5: fix hairpin queue unbind ++* net/mlx5: fix jump ipool entry size ++* net/mlx5: fix LACP redirection in Rx domain ++* net/mlx5: fix leak in sysfs port name translation ++* net/mlx5: fix missing flow rules for external SQ ++* net/mlx5: fix MPRQ stride size check ++* net/mlx5: fix multi-segment Tx inline data length ++* net/mlx5: fix NIC flow capability query ++* net/mlx5: fix offset size in conntrack flow action ++* net/mlx5: fix shared Rx queue list management ++* net/mlx5: fix unlock mismatch ++* net/mlx5: fix use after free on Rx queue start ++* net/mlx5: fix validation of sample encap flow action ++* net/mlx5/hws: fix field copy bind ++* net/mlx5/hws: fix integrity bits level ++* net/mlx5: zero UDP checksum over IPv4 in encapsulation ++* net/mvneta: fix Rx and Tx queue state ++* net/mvpp2: fix Rx and Tx queue state ++* net/netvsc: increase VSP response timeout to 60 seconds ++* net/nfp: fix control message packets ++* net/nfp: fix crash on close ++* net/nfp: fix DMA error after abnormal exit ++* net/nfp: fix initialization of physical representors ++* net/nfp: fix link status interrupt ++* net/nfp: fix reconfigure logic in PF initialization ++* net/nfp: fix reconfigure logic in VF initialization ++* net/nfp: fix reconfigure logic of set MAC address ++* net/nfp: fix Rx and Tx queue state ++* net/ngbe: add proper memory barriers in Rx ++* net/ngbe: check process type in close operation ++* net/ngbe: fix flow control ++* net/ngbe: fix Rx and Tx queue state ++* net/ngbe: keep link down after device close ++* net/ngbe: prevent NIC from slowing down link speed ++* net/ngbe: reconfigure MAC Rx when link update ++* net/null: fix Rx and Tx queue state ++* net/octeon_ep: fix Rx and Tx queue state ++* net/octeontx: fix Rx and Tx queue state ++* net/pfe: fix Rx and Tx queue state ++* net/ring: fix Rx and Tx queue state ++* net/sfc: account for data offset on Tx ++* net/sfc: add missing error code indication to MAE init path ++* net/sfc: fix Rx and Tx queue state ++* net/sfc: remove null dereference in log ++* net/sfc: set max Rx packet length for representors ++* net/softnic: fix Rx and Tx queue state ++* net/tap: fix IPv4 checksum offloading ++* net/tap: fix L4 checksum offloading ++* net/tap: fix RSS for fragmented packets ++* net/tap: use MAC address parse API instead of local parser ++* net/txgbe: add proper memory barriers in Rx ++* net/txgbe: add Tx queue maximum limit ++* net/txgbe: check process type in close operation ++* net/txgbe: fix GRE tunnel packet checksum ++* net/txgbe: fix out of bound access ++* net/txgbe: fix Rx and Tx queue state ++* net/txgbe: keep link down after device close ++* net/txgbe: reconfigure MAC Rx when link update ++* net/vhost: fix Rx and Tx queue state ++* net/virtio: fix link state interrupt vector setting ++* net/virtio: fix missing next flag in Tx packed ring ++* net/virtio: fix Rx and Tx queue state ++* net/vmxnet3: fix Rx and Tx queue state ++* pdump: fix error number on IPC response ++* random: initialize state for unregistered non-EAL threads ++* rawdev: fix device class in log message ++* Revert "eventdev: fix alignment padding" ++* Revert "net/iavf: fix abnormal disable HW interrupt" ++* test/bbdev: assert failed test for queue configure ++* test/bbdev: fix Python script subprocess ++* test/bonding: add missing check ++* test/bonding: fix uninitialized RSS configuration ++* test/bonding: remove unreachable statement ++* test/crypto: fix IV in some vectors ++* test/crypto: fix return value for GMAC case ++* test/crypto: fix typo in asym tests ++* test/crypto: skip some synchronous tests with CPU crypto ++* test/event: fix crypto null device creation ++* test: fix named test macro ++* test/hash: fix creation error log ++* test/security: fix IPv6 next header field ++* usertools/pmdinfo: fix usage typos ++* vdpa/mlx5: fix unregister kick handler order ++* vhost: fix checking virtqueue access in stats API ++* vhost: fix check on virtqueue access in async registration ++* vhost: fix check on virtqueue access in in-flight getter ++* vhost: fix missing check on virtqueue access ++* vhost: fix missing lock protection in power monitor API ++* vhost: fix missing spinlock unlock ++* vhost: fix missing vring call check on virtqueue access ++ ++22.11.4 Validation ++~~~~~~~~~~~~~~~~~~ ++ ++* Redhat Testing ++ ++ * Test scenarios ++ ++ * Guest with device assignment(PF) throughput testing(1G hugepage size) ++ * Guest with device assignment(PF) throughput testing(2M hugepage size) ++ * Guest with device assignment(VF) throughput testing ++ * PVP (host dpdk testpmd as vswitch) 1Q: throughput testing ++ * PVP vhost-user 4Q throughput testing ++ * PVP vhost-user 2Q throughput testing ++ * PVP vhost-user 1Q - cross numa node throughput testing ++ * Guest with vhost-user 2 queues throughput testing ++ * vhost-user reconnect with dpdk-client, qemu-server qemu reconnect ++ * vhost-user reconnect with dpdk-client, qemu-server ovs reconnect ++ * PVP 1Q live migration testing ++ * PVP 1Q cross numa node live migration testing ++ * Guest with ovs+dpdk+vhost-user 1Q live migration testing ++ * Guest with ovs+dpdk+vhost-user 1Q live migration testing (2M) ++ * Guest with ovs+dpdk+vhost-user 2Q live migration testing ++ * Guest with ovs+dpdk+vhost-user 4Q live migration testing ++ * Host PF + DPDK testing ++ * Host VF + DPDK testing ++ ++ * Test Versions and device ++ ++ * qemu-kvm-7.2.0 ++ * kernel 5.14 ++ * X540-AT2 NIC(ixgbe, 10G) ++ ++* Nvidia(R) Testing ++ ++ * Basic functionality: ++ ++ * Send and receive multiple types of traffic. ++ * testpmd xstats counter test. ++ * testpmd timestamp test. ++ * Changing/checking link status through testpmd. ++ * rte_flow tests (https://doc.dpdk.org/guides/nics/mlx5.html#supported-hardware-offloads) ++ * RSS tests. ++ * VLAN filtering, stripping, and insertion tests. ++ * Checksum and TSO tests. ++ * ptype tests. ++ * link_status_interrupt example application tests. ++ * l3fwd-power example application tests. ++ * Multi-process example applications tests. ++ * Hardware LRO tests. ++ * Regex application tests. ++ * Buffer Split tests. ++ * Tx scheduling tests. ++ ++ * Test platform ++ ++ * NIC: ConnectX-6 Dx / OS: Ubuntu 20.04 / Driver: MLNX_OFED_LINUX-23.10-1.1.9.0 / Firmware: 22.39.2048 ++ * NIC: ConnectX-7 / OS: Ubuntu 20.04 / Driver: MLNX_OFED_LINUX-23.10-1.1.9.0 / Firmware: 28.39.2048 ++ * DPU: BlueField-2 / DOCA SW version: 2.5.0 / Firmware: 24.39.2048 ++ ++ * OS/driver: ++ ++ * Ubuntu 20.04.6 with MLNX_OFED_LINUX-23.10-1.1.9.0. ++ * Ubuntu 20.04.6 with rdma-core master (9016f34). ++ * Ubuntu 20.04.6 with rdma-core v28.0. ++ * Fedora 38 with rdma-core v44.0. ++ * Fedora 40 (Rawhide) with rdma-core v48.0. ++ * OpenSUSE Leap 15.5 with rdma-core v42.0. ++ * Windows Server 2019 with Clang 16.0.6. ++ ++* Intel(R) Testing ++ ++ * Basic Intel(R) NIC testing ++ ++ * Build & CFLAG compile: cover the build test combination with latest GCC/Clang version and the popular OS revision such as Ubuntu20.04, Ubuntu22.04, Fedora38, RHEL8.7, RHEL9.2, FreeBSD13.2, SUSE15, CentOS7.9, openEuler22.03-SP1,OpenAnolis8.8 etc. ++ * PF(i40e, ixgbe): test scenarios including RTE_FLOW/TSO/Jumboframe/checksum offload/VLAN/VXLAN, etc. ++ * VF(i40e, ixgbe): test scenarios including VF-RTE_FLOW/TSO/Jumboframe/checksum offload/VLAN/VXLAN, etc. ++ * PF/VF(ice): test scenarios including Switch features/Package Management/Flow Director/Advanced Tx/Advanced RSS/ACL/DCF/Flexible Descriptor, etc. ++ * Intel NIC single core/NIC performance: test scenarios including PF/VF single core performance test, etc. ++ * IPsec: test scenarios including ipsec/ipsec-gw/ipsec library basic test - QAT&SW/FIB library, etc. ++ ++ * Basic cryptodev and virtio testing ++ ++ * Virtio: both function and performance test are covered. Such as PVP/Virtio_loopback/virtio-user loopback/virtio-net VM2VM perf testing/VMAWARE ESXI 8.0, etc. ++ * Cryptodev: ++ ++ * Function test: test scenarios including Cryptodev API testing/CompressDev ISA-L/QAT/ZLIB PMD Testing/FIPS, etc. ++ * Performance test: test scenarios including Thoughput Performance/Cryptodev Latency, etc. ++ ++22.11.4 Known Issues ++~~~~~~~~~~~~~~~~~~~~ ++ ++ diff --git a/dpdk/doc/guides/sample_app_ug/l2_forward_cat.rst b/dpdk/doc/guides/sample_app_ug/l2_forward_cat.rst index 3ada3575ba..51621b692f 100644 --- a/dpdk/doc/guides/sample_app_ug/l2_forward_cat.rst @@ -12796,6 +17927,20 @@ index 49d50136bc..7c86bf484a 100644 The application should start successfully and display as follows: +diff --git a/dpdk/doc/guides/sample_app_ug/vdpa.rst b/dpdk/doc/guides/sample_app_ug/vdpa.rst +index cb9c4f2169..51e69fc20d 100644 +--- a/dpdk/doc/guides/sample_app_ug/vdpa.rst ++++ b/dpdk/doc/guides/sample_app_ug/vdpa.rst +@@ -38,8 +38,7 @@ where + * --iface specifies the path prefix of the UNIX domain socket file, e.g. + /tmp/vhost-user-, then the socket files will be named as /tmp/vhost-user- + (n starts from 0). +-* --interactive means run the vdpa sample in interactive mode, currently 4 +- internal cmds are supported: ++* --interactive means run the vDPA sample in interactive mode: + + 1. help: show help message + 2. list: list all available vdpa devices diff --git a/dpdk/doc/guides/tools/cryptoperf.rst b/dpdk/doc/guides/tools/cryptoperf.rst index c77e253417..f30784674d 100644 --- a/dpdk/doc/guides/tools/cryptoperf.rst @@ -12808,8 +17953,19 @@ index c77e253417..f30784674d 100644 aes-cbc-mac aes-cmac aes-gmac +diff --git a/dpdk/doc/guides/tools/pmdinfo.rst b/dpdk/doc/guides/tools/pmdinfo.rst +index a9217de4ee..fdb9030171 100644 +--- a/dpdk/doc/guides/tools/pmdinfo.rst ++++ b/dpdk/doc/guides/tools/pmdinfo.rst +@@ -82,5 +82,5 @@ Get only the required kernel modules for a given device: + .. code-block:: console + + $ dpdk-pmdinfo.py /usr/bin/dpdk-testpmd | \ +- jq '.[] | select(.pci_ids[] | .vendor == "15b3" and .device == "1013").kmod' ++ jq '.[] | select(.pci_ids[]? | .vendor == "15b3" and .device == "1013").kmod' + "* ib_uverbs & mlx5_core & mlx5_ib" diff --git a/dpdk/drivers/baseband/acc/rte_acc100_pmd.c b/dpdk/drivers/baseband/acc/rte_acc100_pmd.c -index ba8247d47e..7757db81fe 100644 +index ba8247d47e..56a11e98b8 100644 --- a/dpdk/drivers/baseband/acc/rte_acc100_pmd.c +++ b/dpdk/drivers/baseband/acc/rte_acc100_pmd.c @@ -622,6 +622,7 @@ acc100_dev_close(struct rte_bbdev *dev) @@ -12829,6 +17985,15 @@ index ba8247d47e..7757db81fe 100644 /* Report the AQ Index */ return (group_idx << ACC100_GRP_ID_SHIFT) + aq_idx; } +@@ -1220,7 +1221,7 @@ acc100_fcw_ld_fill(struct rte_bbdev_dec_op *op, struct acc_fcw_ld *fcw, + - op->ldpc_dec.n_filler); + + /* Alignment on next 64B - Already enforced from HC output */ +- harq_in_length = RTE_ALIGN_FLOOR(harq_in_length, ACC_HARQ_ALIGN_64B); ++ harq_in_length = RTE_ALIGN_CEIL(harq_in_length, ACC_HARQ_ALIGN_64B); + + /* Stronger alignment requirement when in decompression mode */ + if (fcw->hcin_decomp_mode > 0) @@ -3422,9 +3423,9 @@ acc100_enqueue_ldpc_enc_cb(struct rte_bbdev_queue_data *q_data, } avail--; @@ -12867,7 +18032,7 @@ index ba8247d47e..7757db81fe 100644 if (unlikely(ops == 0)) return 0; diff --git a/dpdk/drivers/baseband/acc/rte_acc200_pmd.c b/dpdk/drivers/baseband/acc/rte_acc200_pmd.c -index c5123cfef0..b25a83a588 100644 +index c5123cfef0..4fc078fe26 100644 --- a/dpdk/drivers/baseband/acc/rte_acc200_pmd.c +++ b/dpdk/drivers/baseband/acc/rte_acc200_pmd.c @@ -1848,6 +1848,9 @@ enqueue_enc_one_op_tb(struct acc_queue *q, struct rte_bbdev_enc_op *op, @@ -12891,7 +18056,17 @@ index c5123cfef0..b25a83a588 100644 /* Set SDone on last CB descriptor for TB mode. */ desc->req.sdone_enable = 1; -@@ -2079,6 +2086,10 @@ enqueue_ldpc_dec_one_op_cb(struct acc_queue *q, struct rte_bbdev_dec_op *op, +@@ -1903,7 +1910,8 @@ enqueue_ldpc_enc_one_op_tb(struct acc_queue *q, struct rte_bbdev_enc_op *op, + uint16_t init_enq_descs = enq_descs; + uint32_t in_offset = 0, out_offset = 0; + +- input_len_B = ((op->ldpc_enc.basegraph == 1 ? 22 : 10) * op->ldpc_enc.z_c) >> 3; ++ input_len_B = ((op->ldpc_enc.basegraph == 1 ? 22 : 10) * op->ldpc_enc.z_c ++ - op->ldpc_enc.n_filler) >> 3; + + if (check_bit(op->ldpc_enc.op_flags, RTE_BBDEV_LDPC_CRC_24B_ATTACH)) + input_len_B -= 3; +@@ -2079,6 +2087,10 @@ enqueue_ldpc_dec_one_op_cb(struct acc_queue *q, struct rte_bbdev_dec_op *op, } } @@ -12902,7 +18077,7 @@ index c5123cfef0..b25a83a588 100644 #ifdef RTE_LIBRTE_BBDEV_DEBUG rte_memdump(stderr, "FCW", &desc->req.fcw_ld, sizeof(desc->req.fcw_ld) - 8); -@@ -2128,6 +2139,9 @@ enqueue_ldpc_dec_one_op_tb(struct acc_queue *q, struct rte_bbdev_dec_op *op, +@@ -2128,6 +2140,9 @@ enqueue_ldpc_dec_one_op_tb(struct acc_queue *q, struct rte_bbdev_dec_op *op, } while (mbuf_total_left > 0 && r < c) { @@ -12912,7 +18087,7 @@ index c5123cfef0..b25a83a588 100644 if (check_bit(op->ldpc_dec.op_flags, RTE_BBDEV_LDPC_DEC_SCATTER_GATHER)) seg_total_left = rte_pktmbuf_data_len(input) - in_offset; else -@@ -2173,6 +2187,10 @@ enqueue_ldpc_dec_one_op_tb(struct acc_queue *q, struct rte_bbdev_dec_op *op, +@@ -2173,6 +2188,10 @@ enqueue_ldpc_dec_one_op_tb(struct acc_queue *q, struct rte_bbdev_dec_op *op, r++; } @@ -12923,7 +18098,7 @@ index c5123cfef0..b25a83a588 100644 #ifdef RTE_LIBRTE_BBDEV_DEBUG if (check_mbuf_total_left(mbuf_total_left) != 0) return -EINVAL; -@@ -2215,6 +2233,8 @@ enqueue_dec_one_op_tb(struct acc_queue *q, struct rte_bbdev_dec_op *op, +@@ -2215,6 +2234,8 @@ enqueue_dec_one_op_tb(struct acc_queue *q, struct rte_bbdev_dec_op *op, r = op->turbo_dec.tb_params.r; while (mbuf_total_left > 0 && r < c) { @@ -12932,7 +18107,7 @@ index c5123cfef0..b25a83a588 100644 seg_total_left = rte_pktmbuf_data_len(input) - in_offset; -@@ -2265,6 +2285,10 @@ enqueue_dec_one_op_tb(struct acc_queue *q, struct rte_bbdev_dec_op *op, +@@ -2265,6 +2286,10 @@ enqueue_dec_one_op_tb(struct acc_queue *q, struct rte_bbdev_dec_op *op, r++; } @@ -12943,7 +18118,7 @@ index c5123cfef0..b25a83a588 100644 /* Set SDone on last CB descriptor for TB mode */ desc->req.sdone_enable = 1; -@@ -2636,7 +2660,8 @@ acc200_enqueue_ldpc_dec(struct rte_bbdev_queue_data *q_data, +@@ -2636,7 +2661,8 @@ acc200_enqueue_ldpc_dec(struct rte_bbdev_queue_data *q_data, /* Dequeue one encode operations from ACC200 device in CB mode. */ static inline int dequeue_enc_one_op_cb(struct acc_queue *q, struct rte_bbdev_enc_op **ref_op, @@ -12953,7 +18128,7 @@ index c5123cfef0..b25a83a588 100644 { union acc_dma_desc *desc, atom_desc; union acc_dma_rsp_desc rsp; -@@ -2649,6 +2674,9 @@ dequeue_enc_one_op_cb(struct acc_queue *q, struct rte_bbdev_enc_op **ref_op, +@@ -2649,6 +2675,9 @@ dequeue_enc_one_op_cb(struct acc_queue *q, struct rte_bbdev_enc_op **ref_op, desc = q->ring_addr + desc_idx; atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc, __ATOMIC_RELAXED); @@ -12963,7 +18138,7 @@ index c5123cfef0..b25a83a588 100644 /* Check fdone bit. */ if (!(atom_desc.rsp.val & ACC_FDONE)) return -1; -@@ -2690,7 +2718,7 @@ dequeue_enc_one_op_cb(struct acc_queue *q, struct rte_bbdev_enc_op **ref_op, +@@ -2690,7 +2719,7 @@ dequeue_enc_one_op_cb(struct acc_queue *q, struct rte_bbdev_enc_op **ref_op, static inline int dequeue_enc_one_op_tb(struct acc_queue *q, struct rte_bbdev_enc_op **ref_op, uint16_t *dequeued_ops, uint32_t *aq_dequeued, @@ -12972,7 +18147,7 @@ index c5123cfef0..b25a83a588 100644 { union acc_dma_desc *desc, *last_desc, atom_desc; union acc_dma_rsp_desc rsp; -@@ -2701,6 +2729,9 @@ dequeue_enc_one_op_tb(struct acc_queue *q, struct rte_bbdev_enc_op **ref_op, +@@ -2701,6 +2730,9 @@ dequeue_enc_one_op_tb(struct acc_queue *q, struct rte_bbdev_enc_op **ref_op, desc = acc_desc_tail(q, *dequeued_descs); atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc, __ATOMIC_RELAXED); @@ -12982,7 +18157,7 @@ index c5123cfef0..b25a83a588 100644 /* Check fdone bit. */ if (!(atom_desc.rsp.val & ACC_FDONE)) return -1; -@@ -2864,7 +2895,7 @@ dequeue_ldpc_dec_one_op_cb(struct rte_bbdev_queue_data *q_data, +@@ -2864,7 +2896,7 @@ dequeue_ldpc_dec_one_op_cb(struct rte_bbdev_queue_data *q_data, return 1; } @@ -12991,7 +18166,7 @@ index c5123cfef0..b25a83a588 100644 static inline int dequeue_dec_one_op_tb(struct acc_queue *q, struct rte_bbdev_dec_op **ref_op, uint16_t dequeued_cbs, uint32_t *aq_dequeued) -@@ -2918,8 +2949,12 @@ dequeue_dec_one_op_tb(struct acc_queue *q, struct rte_bbdev_dec_op **ref_op, +@@ -2918,8 +2950,12 @@ dequeue_dec_one_op_tb(struct acc_queue *q, struct rte_bbdev_dec_op **ref_op, /* CRC invalid if error exists. */ if (!op->status) op->status |= rsp.crc_status << RTE_BBDEV_CRC_ERROR; @@ -13006,7 +18181,7 @@ index c5123cfef0..b25a83a588 100644 /* Check if this is the last desc in batch (Atomic Queue). */ if (desc->req.last_desc_in_batch) { -@@ -2961,25 +2996,23 @@ acc200_dequeue_enc(struct rte_bbdev_queue_data *q_data, +@@ -2961,25 +2997,23 @@ acc200_dequeue_enc(struct rte_bbdev_queue_data *q_data, cbm = op->turbo_enc.code_block_mode; @@ -13036,7 +18211,7 @@ index c5123cfef0..b25a83a588 100644 q_data->queue_stats.dequeued_count += dequeued_ops; return dequeued_ops; -@@ -3005,15 +3038,13 @@ acc200_dequeue_ldpc_enc(struct rte_bbdev_queue_data *q_data, +@@ -3005,15 +3039,13 @@ acc200_dequeue_ldpc_enc(struct rte_bbdev_queue_data *q_data, if (cbm == RTE_BBDEV_TRANSPORT_BLOCK) ret = dequeue_enc_one_op_tb(q, &ops[dequeued_ops], &dequeued_ops, &aq_dequeued, @@ -13119,6 +18294,36 @@ index 417ec63394..aeb9a76f9e 100644 ext_deps += dep_turbo ext_deps += dependency('flexran_sdk_crc', required: true) ext_deps += dependency('flexran_sdk_rate_matching', required: true) +diff --git a/dpdk/drivers/bus/dpaa/base/qbman/qman.c b/dpdk/drivers/bus/dpaa/base/qbman/qman.c +index 3949bf8712..83db0a534e 100644 +--- a/dpdk/drivers/bus/dpaa/base/qbman/qman.c ++++ b/dpdk/drivers/bus/dpaa/base/qbman/qman.c +@@ -1,7 +1,7 @@ + /* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) + * + * Copyright 2008-2016 Freescale Semiconductor Inc. +- * Copyright 2017,2019 NXP ++ * Copyright 2017,2019-2023 NXP + * + */ + +@@ -897,7 +897,7 @@ mr_loop: + /* Lookup in the retirement table */ + fq = table_find_fq(p, + be32_to_cpu(msg->fq.fqid)); +- DPAA_BUG_ON(!fq); ++ DPAA_BUG_ON(fq != NULL); + fq_state_change(p, fq, &swapped_msg, verb); + if (fq->cb.fqs) + fq->cb.fqs(p, fq, &swapped_msg); +@@ -909,6 +909,7 @@ mr_loop: + #else + fq = (void *)(uintptr_t)msg->fq.contextB; + #endif ++ DPAA_BUG_ON(fq != NULL); + fq_state_change(p, fq, msg, verb); + if (fq->cb.fqs) + fq->cb.fqs(p, fq, &swapped_msg); diff --git a/dpdk/drivers/bus/fslmc/mc/mc_sys.c b/dpdk/drivers/bus/fslmc/mc/mc_sys.c index ab9a074835..76fdcd5c8a 100644 --- a/dpdk/drivers/bus/fslmc/mc/mc_sys.c @@ -13136,6 +18341,18 @@ index ab9a074835..76fdcd5c8a 100644 /* Read the response back into the command buffer */ mc_read_response(mc_io->regs, cmd); +diff --git a/dpdk/drivers/bus/ifpga/bus_ifpga_driver.h b/dpdk/drivers/bus/ifpga/bus_ifpga_driver.h +index 7b75c2ddbc..5bbe36d6e0 100644 +--- a/dpdk/drivers/bus/ifpga/bus_ifpga_driver.h ++++ b/dpdk/drivers/bus/ifpga/bus_ifpga_driver.h +@@ -16,6 +16,7 @@ extern "C" { + #endif /* __cplusplus */ + + #include ++#include + #include + #include + #include diff --git a/dpdk/drivers/bus/ifpga/ifpga_bus.c b/dpdk/drivers/bus/ifpga/ifpga_bus.c index bb943b58b5..07e316b38e 100644 --- a/dpdk/drivers/bus/ifpga/ifpga_bus.c @@ -13170,9 +18387,18 @@ index fab3483d9f..fe83e1a04e 100644 #include #include diff --git a/dpdk/drivers/bus/pci/pci_common.c b/dpdk/drivers/bus/pci/pci_common.c -index bc3a7f39fe..e32a9d517a 100644 +index bc3a7f39fe..756e308fdf 100644 --- a/dpdk/drivers/bus/pci/pci_common.c +++ b/dpdk/drivers/bus/pci/pci_common.c +@@ -304,7 +304,7 @@ rte_pci_probe_one_driver(struct rte_pci_driver *dr, + } + } + +- RTE_LOG(INFO, EAL, "Probe PCI driver: %s (%x:%x) device: "PCI_PRI_FMT" (socket %i)\n", ++ RTE_LOG(INFO, EAL, "Probe PCI driver: %s (%x:%04x) device: "PCI_PRI_FMT" (socket %i)\n", + dr->driver.name, dev->id.vendor_id, dev->id.device_id, + loc->domain, loc->bus, loc->devid, loc->function, + dev->device.numa_node); @@ -448,7 +448,7 @@ pci_cleanup(void) int ret = 0; @@ -13190,6 +18416,60 @@ index bc3a7f39fe..e32a9d517a 100644 /* free interrupt handles */ rte_intr_instance_free(dev->intr_handle); dev->intr_handle = NULL; +@@ -883,6 +884,16 @@ rte_pci_set_bus_master(struct rte_pci_device *dev, bool enable) + return 0; + } + ++int ++rte_pci_pasid_set_state(const struct rte_pci_device *dev, ++ off_t offset, bool enable) ++{ ++ uint16_t pasid = enable; ++ return rte_pci_write_config(dev, &pasid, sizeof(pasid), offset) < 0 ++ ? -1 ++ : 0; ++} ++ + struct rte_pci_bus rte_pci_bus = { + .bus = { + .scan = rte_pci_scan, +diff --git a/dpdk/drivers/bus/pci/rte_bus_pci.h b/dpdk/drivers/bus/pci/rte_bus_pci.h +index b193114fe5..76cbf49ab8 100644 +--- a/dpdk/drivers/bus/pci/rte_bus_pci.h ++++ b/dpdk/drivers/bus/pci/rte_bus_pci.h +@@ -101,6 +101,20 @@ off_t rte_pci_find_ext_capability(struct rte_pci_device *dev, uint32_t cap); + __rte_experimental + int rte_pci_set_bus_master(struct rte_pci_device *dev, bool enable); + ++/** ++ * Enable/Disable PASID (Process Address Space ID). ++ * ++ * @param dev ++ * A pointer to a rte_pci_device structure. ++ * @param offset ++ * Offset of the PASID external capability. ++ * @param enable ++ * Flag to enable or disable PASID. ++ */ ++__rte_internal ++int rte_pci_pasid_set_state(const struct rte_pci_device *dev, ++ off_t offset, bool enable); ++ + /** + * Read PCI config space. + * +diff --git a/dpdk/drivers/bus/pci/version.map b/dpdk/drivers/bus/pci/version.map +index 161ab86d3b..f262af3316 100644 +--- a/dpdk/drivers/bus/pci/version.map ++++ b/dpdk/drivers/bus/pci/version.map +@@ -27,6 +27,7 @@ INTERNAL { + global: + + rte_pci_get_sysfs_path; ++ rte_pci_pasid_set_state; + rte_pci_register; + rte_pci_unregister; + }; diff --git a/dpdk/drivers/bus/vdev/vdev.c b/dpdk/drivers/bus/vdev/vdev.c index 41bc07dde7..7974b27295 100644 --- a/dpdk/drivers/bus/vdev/vdev.c @@ -13257,6 +18537,43 @@ index 85105472a1..bdb5433d13 100644 /* Outer header flow label source */ if (!ipsec_xfrm->options.copy_flabel) { sa->w2.s.ipv4_df_src_or_ipv6_flw_lbl_src = +diff --git a/dpdk/drivers/common/cnxk/cnxk_security_ar.h b/dpdk/drivers/common/cnxk/cnxk_security_ar.h +index deb38db0d0..d0151a752c 100644 +--- a/dpdk/drivers/common/cnxk/cnxk_security_ar.h ++++ b/dpdk/drivers/common/cnxk/cnxk_security_ar.h +@@ -17,7 +17,7 @@ + BITS_PER_LONG_LONG) + + #define WORD_SHIFT 6 +-#define WORD_SIZE (1 << WORD_SHIFT) ++#define WORD_SIZE (1ULL << WORD_SHIFT) + #define WORD_MASK (WORD_SIZE - 1) + + #define IPSEC_ANTI_REPLAY_FAILED (-1) +diff --git a/dpdk/drivers/common/cnxk/hw/nix.h b/dpdk/drivers/common/cnxk/hw/nix.h +index 425c335bf3..3246ea6573 100644 +--- a/dpdk/drivers/common/cnxk/hw/nix.h ++++ b/dpdk/drivers/common/cnxk/hw/nix.h +@@ -617,6 +617,7 @@ + #define NIX_RX_ACTIONOP_RSS (0x4ull) + #define NIX_RX_ACTIONOP_PF_FUNC_DROP (0x5ull) + #define NIX_RX_ACTIONOP_MIRROR (0x6ull) ++#define NIX_RX_ACTIONOP_DEFAULT (0xfull) + + #define NIX_RX_VTAGACTION_VTAG0_RELPTR (0x0ull) + #define NIX_RX_VTAGACTION_VTAG1_RELPTR (0x4ull) +diff --git a/dpdk/drivers/common/cnxk/meson.build b/dpdk/drivers/common/cnxk/meson.build +index 849735921c..ca8170624d 100644 +--- a/dpdk/drivers/common/cnxk/meson.build ++++ b/dpdk/drivers/common/cnxk/meson.build +@@ -8,7 +8,6 @@ if not is_linux or not dpdk_conf.get('RTE_ARCH_64') + subdir_done() + endif + +-config_flag_fmt = 'RTE_LIBRTE_@0@_COMMON' + deps = ['eal', 'pci', 'bus_pci', 'mbuf', 'security'] + sources = files( + 'roc_ae.c', diff --git a/dpdk/drivers/common/cnxk/roc_dev.c b/dpdk/drivers/common/cnxk/roc_dev.c index 59128a3552..33865f43fa 100644 --- a/dpdk/drivers/common/cnxk/roc_dev.c @@ -13269,6 +18586,24 @@ index 59128a3552..33865f43fa 100644 dev->hwcap |= DEV_HWCAP_F_VF; break; } +diff --git a/dpdk/drivers/common/cnxk/roc_dpi.c b/dpdk/drivers/common/cnxk/roc_dpi.c +index 93c8318a3d..0e2f803077 100644 +--- a/dpdk/drivers/common/cnxk/roc_dpi.c ++++ b/dpdk/drivers/common/cnxk/roc_dpi.c +@@ -81,10 +81,10 @@ roc_dpi_configure(struct roc_dpi *roc_dpi) + return rc; + } + +- snprintf(name, sizeof(name), "dpimem%d", roc_dpi->vfid); ++ snprintf(name, sizeof(name), "dpimem%d:%d:%d:%d", pci_dev->addr.domain, pci_dev->addr.bus, ++ pci_dev->addr.devid, pci_dev->addr.function); + buflen = DPI_CMD_QUEUE_SIZE * DPI_CMD_QUEUE_BUFS; +- dpi_mz = plt_memzone_reserve_aligned(name, buflen, 0, +- DPI_CMD_QUEUE_SIZE); ++ dpi_mz = plt_memzone_reserve_aligned(name, buflen, 0, DPI_CMD_QUEUE_SIZE); + if (dpi_mz == NULL) { + plt_err("dpi memzone reserve failed"); + rc = -ENOMEM; diff --git a/dpdk/drivers/common/cnxk/roc_io.h b/dpdk/drivers/common/cnxk/roc_io.h index 13f98ed549..45cbb4e587 100644 --- a/dpdk/drivers/common/cnxk/roc_io.h @@ -13314,9 +18649,18 @@ index 13f98ed549..45cbb4e587 100644 static __plt_always_inline void diff --git a/dpdk/drivers/common/cnxk/roc_mbox.h b/dpdk/drivers/common/cnxk/roc_mbox.h -index 8b0384c737..fd9d3e73cd 100644 +index 8b0384c737..18aa97b84a 100644 --- a/dpdk/drivers/common/cnxk/roc_mbox.h +++ b/dpdk/drivers/common/cnxk/roc_mbox.h +@@ -471,7 +471,7 @@ struct lmtst_tbl_setup_req { + + struct cgx_stats_rsp { + struct mbox_msghdr hdr; +-#define CGX_RX_STATS_COUNT 13 ++#define CGX_RX_STATS_COUNT 9 + #define CGX_TX_STATS_COUNT 18 + uint64_t __io rx_stats[CGX_RX_STATS_COUNT]; + uint64_t __io tx_stats[CGX_TX_STATS_COUNT]; @@ -1169,7 +1169,7 @@ struct nix_bp_cfg_req { * so maximum 256 channels are possible. */ @@ -13362,7 +18706,7 @@ index 782536db4c..92ff44888d 100644 plt_err("Failed to get rq mask rc=%d", rc); return rc; diff --git a/dpdk/drivers/common/cnxk/roc_nix_inl_dev.c b/dpdk/drivers/common/cnxk/roc_nix_inl_dev.c -index c3d94dd0da..4ab4209dba 100644 +index c3d94dd0da..746b3d0a03 100644 --- a/dpdk/drivers/common/cnxk/roc_nix_inl_dev.c +++ b/dpdk/drivers/common/cnxk/roc_nix_inl_dev.c @@ -265,7 +265,7 @@ nix_inl_sso_setup(struct nix_inl_dev *inl_dev) @@ -13374,6 +18718,42 @@ index c3d94dd0da..4ab4209dba 100644 if (rc) { plt_err("Failed to setup hwgrp xaq aura, rc=%d", rc); goto destroy_pool; +@@ -643,7 +643,8 @@ no_pool: + } + + /* Setup xaq for hwgrps */ +- rc = sso_hwgrp_alloc_xaq(&inl_dev->dev, inl_dev->xaq.aura_handle, 1); ++ rc = sso_hwgrp_alloc_xaq(&inl_dev->dev, ++ roc_npa_aura_handle_to_aura(inl_dev->xaq.aura_handle), 1); + if (rc) { + plt_err("Failed to setup hwgrp xaq aura, rc=%d", rc); + return rc; +diff --git a/dpdk/drivers/common/cnxk/roc_npa.c b/dpdk/drivers/common/cnxk/roc_npa.c +index ee42434c38..ea58030477 100644 +--- a/dpdk/drivers/common/cnxk/roc_npa.c ++++ b/dpdk/drivers/common/cnxk/roc_npa.c +@@ -115,6 +115,8 @@ npa_aura_pool_fini(struct mbox *mbox, uint32_t aura_id, uint64_t aura_handle) + aura_req->op = NPA_AQ_INSTOP_WRITE; + aura_req->aura.ena = 0; + aura_req->aura_mask.ena = ~aura_req->aura_mask.ena; ++ aura_req->aura.bp_ena = 0; ++ aura_req->aura_mask.bp_ena = ~aura_req->aura_mask.bp_ena; + + rc = mbox_process(mbox); + if (rc < 0) +@@ -370,7 +372,11 @@ npa_aura_pool_pair_alloc(struct npa_lf *lf, const uint32_t block_size, + /* Update pool fields */ + pool->stack_base = mz->iova; + pool->ena = 1; +- pool->buf_size = block_size / ROC_ALIGN; ++ /* In opaque mode buffer size must be 0 */ ++ if (!pool->nat_align) ++ pool->buf_size = 0; ++ else ++ pool->buf_size = block_size / ROC_ALIGN; + pool->stack_max_pages = stack_size; + pool->shift = plt_log2_u32(block_count); + pool->shift = pool->shift < 8 ? 0 : pool->shift - 8; diff --git a/dpdk/drivers/common/cnxk/roc_npa.h b/dpdk/drivers/common/cnxk/roc_npa.h index fed1942404..46b668a310 100644 --- a/dpdk/drivers/common/cnxk/roc_npa.h @@ -13425,14 +18805,95 @@ index fed1942404..46b668a310 100644 line_count = status->count; diff --git a/dpdk/drivers/common/cnxk/roc_npc.c b/dpdk/drivers/common/cnxk/roc_npc.c -index b38389b18a..5e1ca6bc03 100644 +index b38389b18a..a5b5f95dec 100644 --- a/dpdk/drivers/common/cnxk/roc_npc.c +++ b/dpdk/drivers/common/cnxk/roc_npc.c -@@ -1242,12 +1242,39 @@ npc_vtag_action_program(struct roc_npc *roc_npc, +@@ -634,11 +634,15 @@ npc_parse_actions(struct roc_npc *roc_npc, const struct roc_npc_attr *attr, + if (req_act == ROC_NPC_ACTION_TYPE_VLAN_STRIP) { + /* Only VLAN action is provided */ + flow->npc_action = NIX_RX_ACTIONOP_UCAST; +- } else if (req_act & +- (ROC_NPC_ACTION_TYPE_PF | ROC_NPC_ACTION_TYPE_VF)) { +- flow->npc_action = NIX_RX_ACTIONOP_UCAST; +- if (req_act & ROC_NPC_ACTION_TYPE_QUEUE) +- flow->npc_action |= (uint64_t)rq << 20; ++ } else if (req_act & (ROC_NPC_ACTION_TYPE_PF | ROC_NPC_ACTION_TYPE_VF)) { ++ /* Check if any other action is set */ ++ if ((req_act == ROC_NPC_ACTION_TYPE_PF) || (req_act == ROC_NPC_ACTION_TYPE_VF)) { ++ flow->npc_action = NIX_RX_ACTIONOP_DEFAULT; ++ } else { ++ flow->npc_action = NIX_RX_ACTIONOP_UCAST; ++ if (req_act & ROC_NPC_ACTION_TYPE_QUEUE) ++ flow->npc_action |= (uint64_t)rq << 20; ++ } + } else if (req_act & ROC_NPC_ACTION_TYPE_DROP) { + flow->npc_action = NIX_RX_ACTIONOP_DROP; + } else if (req_act & ROC_NPC_ACTION_TYPE_QUEUE) { +@@ -649,8 +653,7 @@ npc_parse_actions(struct roc_npc *roc_npc, const struct roc_npc_attr *attr, + } else if (req_act & ROC_NPC_ACTION_TYPE_SEC) { + flow->npc_action = NIX_RX_ACTIONOP_UCAST_IPSEC; + flow->npc_action |= (uint64_t)rq << 20; +- } else if (req_act & +- (ROC_NPC_ACTION_TYPE_FLAG | ROC_NPC_ACTION_TYPE_MARK)) { ++ } else if (req_act & (ROC_NPC_ACTION_TYPE_FLAG | ROC_NPC_ACTION_TYPE_MARK)) { + flow->npc_action = NIX_RX_ACTIONOP_UCAST; + } else if (req_act & ROC_NPC_ACTION_TYPE_COUNT) { + /* Keep ROC_NPC_ACTION_TYPE_COUNT_ACT always at the end +@@ -832,9 +835,35 @@ npc_rss_action_configure(struct roc_npc *roc_npc, + uint8_t key[ROC_NIX_RSS_KEY_LEN]; + const uint8_t *key_ptr; + uint8_t flowkey_algx; ++ uint32_t key_len; + uint16_t *reta; + int rc; + ++ roc_nix_rss_key_get(roc_nix, key); ++ if (rss->key == NULL) { ++ key_ptr = key; ++ } else { ++ key_len = rss->key_len; ++ if (key_len > ROC_NIX_RSS_KEY_LEN) ++ key_len = ROC_NIX_RSS_KEY_LEN; ++ ++ for (i = 0; i < key_len; i++) { ++ if (key[i] != rss->key[i]) { ++ plt_err("RSS key config not supported"); ++ plt_err("New Key:"); ++ for (i = 0; i < key_len; i++) ++ plt_dump_no_nl("0x%.2x ", rss->key[i]); ++ plt_dump_no_nl("\n"); ++ plt_err("Configured Key:"); ++ for (i = 0; i < ROC_NIX_RSS_KEY_LEN; i++) ++ plt_dump_no_nl("0x%.2x ", key[i]); ++ plt_dump_no_nl("\n"); ++ return -ENOTSUP; ++ } ++ } ++ key_ptr = rss->key; ++ } ++ + rc = npc_rss_free_grp_get(npc, &rss_grp_idx); + /* RSS group :0 is not usable for flow rss action */ + if (rc < 0 || rss_grp_idx == 0) +@@ -849,13 +878,6 @@ npc_rss_action_configure(struct roc_npc *roc_npc, + + *rss_grp = rss_grp_idx; + +- if (rss->key == NULL) { +- roc_nix_rss_key_default_fill(roc_nix, key); +- key_ptr = key; +- } else { +- key_ptr = rss->key; +- } +- + roc_nix_rss_key_set(roc_nix, key_ptr); + + /* If queue count passed in the rss action is less than +@@ -1242,12 +1264,40 @@ npc_vtag_action_program(struct roc_npc *roc_npc, return 0; } -+static void ++void +roc_npc_sdp_channel_get(struct roc_npc *roc_npc, uint16_t *chan_base, uint16_t *chan_mask) +{ + struct roc_nix *roc_nix = roc_npc->roc_nix; @@ -13447,8 +18908,9 @@ index b38389b18a..5e1ca6bc03 100644 + num_bits = (sizeof(uint32_t) * 8) - __builtin_clz(range) - 1; + /* Set mask for (15 - numbits) MSB bits */ + *chan_mask = (uint16_t)~GENMASK(num_bits, 0); ++ *chan_mask &= 0xFFF; + } else { -+ *chan_mask = (uint16_t)GENMASK(15, 0); ++ *chan_mask = (uint16_t)GENMASK(11, 0); + } + + mask = (uint16_t)GENMASK(num_bits, 0); @@ -13468,7 +18930,7 @@ index b38389b18a..5e1ca6bc03 100644 struct roc_npc_flow *flow, *flow_iter; struct npc_parse_state parse_state; struct npc_flow_list *list; -@@ -1260,11 +1287,9 @@ roc_npc_flow_create(struct roc_npc *roc_npc, const struct roc_npc_attr *attr, +@@ -1260,11 +1310,9 @@ roc_npc_flow_create(struct roc_npc *roc_npc, const struct roc_npc_attr *attr, npc->sdp_channel = roc_npc->sdp_channel; npc->sdp_channel_mask = roc_npc->sdp_channel_mask; } else { @@ -13484,7 +18946,7 @@ index b38389b18a..5e1ca6bc03 100644 } diff --git a/dpdk/drivers/common/cnxk/roc_npc.h b/dpdk/drivers/common/cnxk/roc_npc.h -index 1b4e5521cb..60f9c5d634 100644 +index 1b4e5521cb..25e916ce08 100644 --- a/dpdk/drivers/common/cnxk/roc_npc.h +++ b/dpdk/drivers/common/cnxk/roc_npc.h @@ -123,6 +123,17 @@ struct roc_ipv6_hdr { @@ -13505,6 +18967,13 @@ index 1b4e5521cb..60f9c5d634 100644 struct roc_npc_flow_item_ipv6 { struct roc_ipv6_hdr hdr; /**< IPv6 header definition. */ uint32_t has_hop_ext : 1; +@@ -374,4 +385,6 @@ int __roc_api roc_npc_mcam_init(struct roc_npc *roc_npc, + struct roc_npc_flow *flow, int mcam_id); + int __roc_api roc_npc_mcam_move(struct roc_npc *roc_npc, uint16_t old_ent, + uint16_t new_ent); ++void __roc_api roc_npc_sdp_channel_get(struct roc_npc *roc_npc, uint16_t *chan_base, ++ uint16_t *chan_mask); + #endif /* _ROC_NPC_H_ */ diff --git a/dpdk/drivers/common/cnxk/roc_npc_mcam.c b/dpdk/drivers/common/cnxk/roc_npc_mcam.c index a725cabc57..3bf35cdf48 100644 --- a/dpdk/drivers/common/cnxk/roc_npc_mcam.c @@ -13594,7 +19063,7 @@ index a725cabc57..3bf35cdf48 100644 int diff --git a/dpdk/drivers/common/cnxk/roc_npc_mcam_dump.c b/dpdk/drivers/common/cnxk/roc_npc_mcam_dump.c -index fe57811a84..cc1599ef33 100644 +index fe57811a84..52f7d96b41 100644 --- a/dpdk/drivers/common/cnxk/roc_npc_mcam_dump.c +++ b/dpdk/drivers/common/cnxk/roc_npc_mcam_dump.c @@ -69,8 +69,10 @@ static const char *const ltype_str[NPC_MAX_LID][NPC_MAX_LT] = { @@ -13610,6 +19079,17 @@ index fe57811a84..cc1599ef33 100644 [NPC_LID_LB][0] = "NONE", [NPC_LID_LB][NPC_LT_LB_CTAG] = "LB_CTAG", [NPC_LID_LB][NPC_LT_LB_STAG_QINQ] = "LB_STAG_QINQ", +@@ -444,6 +446,10 @@ npc_flow_dump_rx_action(FILE *file, uint64_t npc_action) + plt_strlcpy(index_name, "Multicast/mirror table index", + NPC_MAX_FIELD_NAME_SIZE); + break; ++ case NIX_RX_ACTIONOP_DEFAULT: ++ fprintf(file, "NIX_RX_ACTIONOP_DEFAULT (%" PRIu64 ")\n", ++ (uint64_t)NIX_RX_ACTIONOP_DEFAULT); ++ break; + default: + plt_err("Unknown NIX_RX_ACTIONOP found"); + return; diff --git a/dpdk/drivers/common/cnxk/roc_npc_parse.c b/dpdk/drivers/common/cnxk/roc_npc_parse.c index ff00c746d6..e695b755d7 100644 --- a/dpdk/drivers/common/cnxk/roc_npc_parse.c @@ -14119,6 +19599,41 @@ index 8bdabc116d..fda3073cba 100644 npc_mask_is_supported(const char *mask, const char *hw_mask, int len) { /* +diff --git a/dpdk/drivers/common/cnxk/roc_ree.c b/dpdk/drivers/common/cnxk/roc_ree.c +index 1eb2ae7272..b6392658c3 100644 +--- a/dpdk/drivers/common/cnxk/roc_ree.c ++++ b/dpdk/drivers/common/cnxk/roc_ree.c +@@ -441,7 +441,7 @@ static void + roc_ree_lf_err_intr_unregister(struct roc_ree_vf *vf, uint16_t msix_off, + uintptr_t base) + { +- struct rte_pci_device *pci_dev = vf->pci_dev; ++ struct plt_pci_device *pci_dev = vf->pci_dev; + + /* Disable error interrupts */ + plt_write64(~0ull, base + REE_LF_MISC_INT_ENA_W1C); +@@ -468,7 +468,7 @@ static int + roc_ree_lf_err_intr_register(struct roc_ree_vf *vf, uint16_t msix_off, + uintptr_t base) + { +- struct rte_pci_device *pci_dev = vf->pci_dev; ++ struct plt_pci_device *pci_dev = vf->pci_dev; + int ret; + + /* Disable error interrupts */ +diff --git a/dpdk/drivers/common/cnxk/roc_ree.h b/dpdk/drivers/common/cnxk/roc_ree.h +index e138e4de66..bf994e7124 100644 +--- a/dpdk/drivers/common/cnxk/roc_ree.h ++++ b/dpdk/drivers/common/cnxk/roc_ree.h +@@ -68,7 +68,7 @@ struct roc_ree_qp { + /**< Base address where BAR is mapped */ + struct roc_ree_pending_queue pend_q; + /**< Pending queue */ +- rte_iova_t iq_dma_addr; ++ plt_iova_t iq_dma_addr; + /**< Instruction queue address */ + uint32_t roc_regexdev_jobid; + /**< Job ID */ diff --git a/dpdk/drivers/common/cnxk/roc_se.h b/dpdk/drivers/common/cnxk/roc_se.h index c357c19c0b..5b0ddac42d 100644 --- a/dpdk/drivers/common/cnxk/roc_se.h @@ -14143,6 +19658,18 @@ index c357c19c0b..5b0ddac42d 100644 union cpt_inst_w4 template_w4; /* Below fields are accessed by hardware */ union { +diff --git a/dpdk/drivers/common/cnxk/version.map b/dpdk/drivers/common/cnxk/version.map +index 17f0ec6b48..ae9eaf360c 100644 +--- a/dpdk/drivers/common/cnxk/version.map ++++ b/dpdk/drivers/common/cnxk/version.map +@@ -353,6 +353,7 @@ INTERNAL { + roc_npc_mcam_write_entry; + roc_npc_mcam_read_counter; + roc_npc_profile_name_get; ++ roc_npc_sdp_channel_get; + roc_npc_validate_portid_action; + roc_ot_ipsec_inb_sa_init; + roc_ot_ipsec_outb_sa_init; diff --git a/dpdk/drivers/common/iavf/iavf_common.c b/dpdk/drivers/common/iavf/iavf_common.c index 855a0ab2f5..dc7662bc1b 100644 --- a/dpdk/drivers/common/iavf/iavf_common.c @@ -14331,10 +19858,33 @@ index 7e1575efc8..b13ae29844 100644 if libmtcr_ul_found has_sym_args += [ diff --git a/dpdk/drivers/common/mlx5/linux/mlx5_common_os.c b/dpdk/drivers/common/mlx5/linux/mlx5_common_os.c -index aafff60eeb..2ebb8ac8b6 100644 +index aafff60eeb..41345e1597 100644 --- a/dpdk/drivers/common/mlx5/linux/mlx5_common_os.c +++ b/dpdk/drivers/common/mlx5/linux/mlx5_common_os.c -@@ -555,7 +555,7 @@ mlx5_os_pd_prepare(struct mlx5_common_device *cdev) +@@ -96,10 +96,11 @@ mlx5_translate_port_name(const char *port_name_in, + char ctrl = 0, pf_c1, pf_c2, vf_c1, vf_c2, eol; + char *end; + int sc_items; ++ int32_t ctrl_num = -1; + +- sc_items = sscanf(port_name_in, "%c%d", +- &ctrl, &port_info_out->ctrl_num); ++ sc_items = sscanf(port_name_in, "%c%d", &ctrl, &ctrl_num); + if (sc_items == 2 && ctrl == 'c') { ++ port_info_out->ctrl_num = ctrl_num; + port_name_in++; /* 'c' */ + port_name_in += snprintf(NULL, 0, "%d", + port_info_out->ctrl_num); +@@ -266,7 +267,7 @@ mlx5_glue_path(char *buf, size_t size) + goto error; + return buf; + error: +- RTE_LOG(ERR, PMD, "unable to append \"-glue\" to last component of" ++ DRV_LOG(ERR, "unable to append \"-glue\" to last component of" + " RTE_EAL_PMD_PATH (\"" RTE_EAL_PMD_PATH "\"), please" + " re-configure DPDK"); + return NULL; +@@ -555,7 +556,7 @@ mlx5_os_pd_prepare(struct mlx5_common_device *cdev) } static struct ibv_device * @@ -14343,7 +19893,7 @@ index aafff60eeb..2ebb8ac8b6 100644 { int n; struct ibv_device **ibv_list = mlx5_glue->get_device_list(&n); -@@ -564,6 +564,8 @@ mlx5_os_get_ibv_device(const struct rte_pci_addr *addr) +@@ -564,6 +565,8 @@ mlx5_os_get_ibv_device(const struct rte_pci_addr *addr) uint8_t guid2[32] = {0}; int ret1, ret2 = -1; struct rte_pci_addr paddr; @@ -14352,7 +19902,7 @@ index aafff60eeb..2ebb8ac8b6 100644 if (ibv_list == NULL || !n) { rte_errno = ENOSYS; -@@ -579,11 +581,11 @@ mlx5_os_get_ibv_device(const struct rte_pci_addr *addr) +@@ -579,11 +582,11 @@ mlx5_os_get_ibv_device(const struct rte_pci_addr *addr) if (ret1 > 0) ret2 = mlx5_get_device_guid(&paddr, guid2, sizeof(guid2)); /* Bond device can bond secondary PCIe */ @@ -14369,7 +19919,7 @@ index aafff60eeb..2ebb8ac8b6 100644 ibv_match = ibv_list[n]; break; } -@@ -697,7 +699,7 @@ mlx5_os_get_ibv_dev(const struct rte_device *dev) +@@ -697,7 +700,7 @@ mlx5_os_get_ibv_dev(const struct rte_device *dev) struct ibv_device *ibv; if (mlx5_dev_is_pci(dev)) @@ -14463,18 +20013,28 @@ index 73178ce0f3..fdf03f2a53 100644 switch (pci_dev->id.device_id) { case PCI_DEVICE_ID_MELLANOX_CONNECTX4VF: diff --git a/dpdk/drivers/common/mlx5/mlx5_devx_cmds.c b/dpdk/drivers/common/mlx5/mlx5_devx_cmds.c -index 59cebb530f..5742f9e831 100644 +index 59cebb530f..9fdca2fecc 100644 --- a/dpdk/drivers/common/mlx5/mlx5_devx_cmds.c +++ b/dpdk/drivers/common/mlx5/mlx5_devx_cmds.c -@@ -1002,6 +1002,7 @@ mlx5_devx_cmd_query_hca_attr(void *ctx, +@@ -543,7 +543,7 @@ mlx5_devx_cmd_query_hca_vdpa_attr(void *ctx, + MLX5_GET_HCA_CAP_OP_MOD_VDPA_EMULATION | + MLX5_HCA_CAP_OPMOD_GET_CUR); + if (!hcattr) { +- RTE_LOG(DEBUG, PMD, "Failed to query devx VDPA capabilities"); ++ DRV_LOG(DEBUG, "Failed to query devx VDPA capabilities"); + vdpa_attr->valid = 0; + } else { + vdpa_attr->valid = 1; +@@ -1002,6 +1002,8 @@ mlx5_devx_cmd_query_hca_attr(void *ctx, general_obj_types) & MLX5_GENERAL_OBJ_TYPES_CAP_CONN_TRACK_OFFLOAD); attr->rq_delay_drop = MLX5_GET(cmd_hca_cap, hcattr, rq_delay_drop); + attr->nic_flow_table = MLX5_GET(cmd_hca_cap, hcattr, nic_flow_table); ++ attr->nic_flow_table = MLX5_GET(cmd_hca_cap, hcattr, nic_flow_table); attr->max_flow_counter_15_0 = MLX5_GET(cmd_hca_cap, hcattr, max_flow_counter_15_0); attr->max_flow_counter_31_16 = MLX5_GET(cmd_hca_cap, hcattr, -@@ -1013,7 +1014,9 @@ mlx5_devx_cmd_query_hca_attr(void *ctx, +@@ -1013,7 +1015,9 @@ mlx5_devx_cmd_query_hca_attr(void *ctx, attr->flow_access_aso_opc_mod = MLX5_GET(cmd_hca_cap, hcattr, flow_access_aso_opc_mod); if (attr->crypto) { @@ -16315,10 +21875,10 @@ index 221a0a5235..a5271d7227 100644 RTE_LOG(INFO, PMD, "CCP PMD already initialized\n"); return -EFAULT; diff --git a/dpdk/drivers/crypto/cnxk/cnxk_cryptodev_capabilities.c b/dpdk/drivers/crypto/cnxk/cnxk_cryptodev_capabilities.c -index 6c28f8942e..eb4e6ff966 100644 +index 6c28f8942e..b4d1925d23 100644 --- a/dpdk/drivers/crypto/cnxk/cnxk_cryptodev_capabilities.c +++ b/dpdk/drivers/crypto/cnxk/cnxk_cryptodev_capabilities.c -@@ -936,8 +936,8 @@ static const struct rte_cryptodev_capabilities sec_caps_aes[] = { +@@ -851,8 +851,8 @@ static const struct rte_cryptodev_capabilities sec_caps_aes[] = { .increment = 4 }, .iv_size = { @@ -16421,6 +21981,59 @@ index b07fc22858..32e2b2cd64 100644 } cpt_inst_w5.s.gather_sz = ((i + 2) / 3); +diff --git a/dpdk/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c b/dpdk/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c +index c25e40030b..cf1339f266 100644 +--- a/dpdk/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c ++++ b/dpdk/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c +@@ -1676,7 +1676,7 @@ dpaa2_sec_dump(struct rte_crypto_op *op) + + if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) + sess = CRYPTODEV_GET_SYM_SESS_PRIV(op->sym->session); +-#ifdef RTE_LIBRTE_SECURITY ++#ifdef RTE_LIB_SECURITY + else if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) + sess = SECURITY_GET_SESS_PRIV(op->sym->session); + #endif +@@ -1703,7 +1703,7 @@ dpaa2_sec_dump(struct rte_crypto_op *op) + sess->digest_length, sess->status, + sess->ext_params.aead_ctxt.auth_only_len, + sess->ext_params.aead_ctxt.auth_cipher_text); +-#ifdef RTE_LIBRTE_SECURITY ++#ifdef RTE_LIB_SECURITY + printf("PDCP session params:\n" + "\tDomain:\t\t%d\n\tBearer:\t\t%d\n\tpkt_dir:\t%d\n\thfn_ovd:" + "\t%d\n\tsn_size:\t%d\n\thfn_ovd_offset:\t%d\n\thfn:\t\t%d\n" +diff --git a/dpdk/drivers/crypto/dpaa_sec/dpaa_sec.c b/dpdk/drivers/crypto/dpaa_sec/dpaa_sec.c +index db52683847..7807c83e54 100644 +--- a/dpdk/drivers/crypto/dpaa_sec/dpaa_sec.c ++++ b/dpdk/drivers/crypto/dpaa_sec/dpaa_sec.c +@@ -671,7 +671,7 @@ dpaa_sec_dump(struct dpaa_sec_op_ctx *ctx, struct dpaa_sec_qp *qp) + + if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) + sess = CRYPTODEV_GET_SYM_SESS_PRIV(op->sym->session); +-#ifdef RTE_LIBRTE_SECURITY ++#ifdef RTE_LIB_SECURITY + else if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) + sess = SECURITY_GET_SESS_PRIV(op->sym->session); + #endif +@@ -682,7 +682,7 @@ dpaa_sec_dump(struct dpaa_sec_op_ctx *ctx, struct dpaa_sec_qp *qp) + + cdb = &sess->cdb; + rte_memcpy(&c_cdb, cdb, sizeof(struct sec_cdb)); +-#ifdef RTE_LIBRTE_SECURITY ++#ifdef RTE_LIB_SECURITY + printf("\nsession protocol type = %d\n", sess->proto_alg); + #endif + printf("\n****************************************\n" +@@ -707,7 +707,7 @@ dpaa_sec_dump(struct dpaa_sec_op_ctx *ctx, struct dpaa_sec_qp *qp) + sess->iv.length, sess->iv.offset, + sess->digest_length, sess->auth_only_len, + sess->auth_cipher_text); +-#ifdef RTE_LIBRTE_SECURITY ++#ifdef RTE_LIB_SECURITY + printf("PDCP session params:\n" + "\tDomain:\t\t%d\n\tBearer:\t\t%d\n\tpkt_dir:\t%d\n\thfn_ovd:" + "\t%d\n\tsn_size:\t%d\n\tsdap_enabled:\t%d\n\thfn_ovd_offset:" diff --git a/dpdk/drivers/crypto/ipsec_mb/ipsec_mb_ops.c b/dpdk/drivers/crypto/ipsec_mb/ipsec_mb_ops.c index 71e02cd051..30f919cd40 100644 --- a/dpdk/drivers/crypto/ipsec_mb/ipsec_mb_ops.c @@ -16496,6 +22109,22 @@ index 50b789a29b..64f2b4b604 100644 /* Safe to proceed, return 0 */ return 0; +diff --git a/dpdk/drivers/crypto/ipsec_mb/meson.build b/dpdk/drivers/crypto/ipsec_mb/meson.build +index ec147d2110..4100d921ff 100644 +--- a/dpdk/drivers/crypto/ipsec_mb/meson.build ++++ b/dpdk/drivers/crypto/ipsec_mb/meson.build +@@ -16,6 +16,11 @@ lib = cc.find_library('IPSec_MB', required: false) + if not lib.found() + build = false + reason = 'missing dependency, "libIPSec_MB"' ++# if the lib is found, check it's the right format ++elif meson.version().version_compare('>=0.60') and not cc.links( ++ 'int main(void) {return 0;}', dependencies: lib) ++ build = false ++ reason = 'incompatible dependency, "libIPSec_MB"' + else + ext_deps += lib + diff --git a/dpdk/drivers/crypto/ipsec_mb/pmd_aesni_mb.c b/dpdk/drivers/crypto/ipsec_mb/pmd_aesni_mb.c index 147a38932d..ac20d01937 100644 --- a/dpdk/drivers/crypto/ipsec_mb/pmd_aesni_mb.c @@ -16571,8 +22200,92 @@ index 8ed069f428..e64df1a462 100644 if (processed_ops != 1) break; +diff --git a/dpdk/drivers/crypto/nitrox/nitrox_sym_reqmgr.c b/dpdk/drivers/crypto/nitrox/nitrox_sym_reqmgr.c +index 9edb0cc00f..d7e8ff7db4 100644 +--- a/dpdk/drivers/crypto/nitrox/nitrox_sym_reqmgr.c ++++ b/dpdk/drivers/crypto/nitrox/nitrox_sym_reqmgr.c +@@ -10,8 +10,11 @@ + #include "nitrox_sym_reqmgr.h" + #include "nitrox_logs.h" + +-#define MAX_SGBUF_CNT 16 +-#define MAX_SGCOMP_CNT 5 ++#define MAX_SUPPORTED_MBUF_SEGS 16 ++/* IV + AAD + ORH + CC + DIGEST */ ++#define ADDITIONAL_SGBUF_CNT 5 ++#define MAX_SGBUF_CNT (MAX_SUPPORTED_MBUF_SEGS + ADDITIONAL_SGBUF_CNT) ++#define MAX_SGCOMP_CNT (RTE_ALIGN_MUL_CEIL(MAX_SGBUF_CNT, 4) / 4) + /* SLC_STORE_INFO */ + #define MIN_UDD_LEN 16 + /* PKT_IN_HDR + SLC_STORE_INFO */ +@@ -303,7 +306,7 @@ create_sglist_from_mbuf(struct nitrox_sgtable *sgtbl, struct rte_mbuf *mbuf, + datalen -= mlen; + } + +- RTE_VERIFY(cnt <= MAX_SGBUF_CNT); ++ RTE_ASSERT(cnt <= MAX_SGBUF_CNT); + sgtbl->map_bufs_cnt = cnt; + return 0; + } +@@ -375,7 +378,7 @@ create_cipher_outbuf(struct nitrox_softreq *sr) + sr->out.sglist[cnt].virt = &sr->resp.completion; + cnt++; + +- RTE_VERIFY(cnt <= MAX_SGBUF_CNT); ++ RTE_ASSERT(cnt <= MAX_SGBUF_CNT); + sr->out.map_bufs_cnt = cnt; + + create_sgcomp(&sr->out); +@@ -600,7 +603,7 @@ create_aead_outbuf(struct nitrox_softreq *sr, struct nitrox_sglist *digest) + resp.completion); + sr->out.sglist[cnt].virt = &sr->resp.completion; + cnt++; +- RTE_VERIFY(cnt <= MAX_SGBUF_CNT); ++ RTE_ASSERT(cnt <= MAX_SGBUF_CNT); + sr->out.map_bufs_cnt = cnt; + + create_sgcomp(&sr->out); +@@ -774,6 +777,14 @@ nitrox_process_se_req(uint16_t qno, struct rte_crypto_op *op, + { + int err; + ++ if (unlikely(op->sym->m_src->nb_segs > MAX_SUPPORTED_MBUF_SEGS || ++ (op->sym->m_dst && ++ op->sym->m_dst->nb_segs > MAX_SUPPORTED_MBUF_SEGS))) { ++ NITROX_LOG(ERR, "Mbuf segments not supported. " ++ "Max supported %d\n", MAX_SUPPORTED_MBUF_SEGS); ++ return -ENOTSUP; ++ } ++ + softreq_init(sr, sr->iova); + sr->ctx = ctx; + sr->op = op; +diff --git a/dpdk/drivers/crypto/openssl/openssl_pmd_private.h b/dpdk/drivers/crypto/openssl/openssl_pmd_private.h +index ed6841e460..4e224b040b 100644 +--- a/dpdk/drivers/crypto/openssl/openssl_pmd_private.h ++++ b/dpdk/drivers/crypto/openssl/openssl_pmd_private.h +@@ -189,6 +189,8 @@ struct openssl_asym_session { + struct dh { + DH *dh_key; + uint32_t key_op; ++ BIGNUM *p; ++ BIGNUM *g; + #if (OPENSSL_VERSION_NUMBER >= 0x30000000L) + OSSL_PARAM_BLD * param_bld; + OSSL_PARAM_BLD *param_bld_peer; +@@ -198,6 +200,10 @@ struct openssl_asym_session { + DSA *dsa; + #if (OPENSSL_VERSION_NUMBER >= 0x30000000L) + OSSL_PARAM_BLD * param_bld; ++ BIGNUM *p; ++ BIGNUM *g; ++ BIGNUM *q; ++ BIGNUM *priv_key; + #endif + } s; + } u; diff --git a/dpdk/drivers/crypto/openssl/rte_openssl_pmd.c b/dpdk/drivers/crypto/openssl/rte_openssl_pmd.c -index 05449b6e98..6825b0469e 100644 +index 05449b6e98..6ae31cb5cd 100644 --- a/dpdk/drivers/crypto/openssl/rte_openssl_pmd.c +++ b/dpdk/drivers/crypto/openssl/rte_openssl_pmd.c @@ -696,7 +696,7 @@ openssl_set_session_auth_parameters(struct openssl_session *sess, @@ -16691,7 +22404,15 @@ index 05449b6e98..6825b0469e 100644 return 0; err_dsa_sign: -@@ -2633,7 +2636,7 @@ process_openssl_rsa_op_evp(struct rte_crypto_op *cop, +@@ -1957,6 +1960,7 @@ err_dsa_sign: + EVP_PKEY_CTX_free(key_ctx); + if (dsa_ctx) + EVP_PKEY_CTX_free(dsa_ctx); ++ EVP_PKEY_free(pkey); + return -1; + } + +@@ -2633,7 +2637,7 @@ process_openssl_rsa_op_evp(struct rte_crypto_op *cop, if (EVP_PKEY_verify_recover(rsa_ctx, tmp, &outlen, op->rsa.sign.data, op->rsa.sign.length) <= 0) { @@ -16700,7 +22421,7 @@ index 05449b6e98..6825b0469e 100644 goto err_rsa; } -@@ -2645,7 +2648,7 @@ process_openssl_rsa_op_evp(struct rte_crypto_op *cop, +@@ -2645,7 +2649,7 @@ process_openssl_rsa_op_evp(struct rte_crypto_op *cop, op->rsa.message.length)) { OPENSSL_LOG(ERR, "RSA sign Verification failed"); } @@ -16709,8 +22430,210 @@ index 05449b6e98..6825b0469e 100644 break; default: +diff --git a/dpdk/drivers/crypto/openssl/rte_openssl_pmd_ops.c b/dpdk/drivers/crypto/openssl/rte_openssl_pmd_ops.c +index defed4429e..24d6d48262 100644 +--- a/dpdk/drivers/crypto/openssl/rte_openssl_pmd_ops.c ++++ b/dpdk/drivers/crypto/openssl/rte_openssl_pmd_ops.c +@@ -1087,22 +1087,21 @@ err_rsa: + } + case RTE_CRYPTO_ASYM_XFORM_DH: + { +- BIGNUM *p = NULL; +- BIGNUM *g = NULL; +- +- p = BN_bin2bn((const unsigned char *) ++ DH *dh = NULL; ++#if (OPENSSL_VERSION_NUMBER >= 0x30000000L) ++ BIGNUM **p = &asym_session->u.dh.p; ++ BIGNUM **g = &asym_session->u.dh.g; ++ *p = BN_bin2bn((const unsigned char *) + xform->dh.p.data, + xform->dh.p.length, +- p); +- g = BN_bin2bn((const unsigned char *) ++ *p); ++ *g = BN_bin2bn((const unsigned char *) + xform->dh.g.data, + xform->dh.g.length, +- g); +- if (!p || !g) ++ *g); ++ if (!*p || !*g) + goto err_dh; + +- DH *dh = NULL; +-#if (OPENSSL_VERSION_NUMBER >= 0x30000000L) + OSSL_PARAM_BLD *param_bld = NULL; + param_bld = OSSL_PARAM_BLD_new(); + if (!param_bld) { +@@ -1112,9 +1111,9 @@ err_rsa: + if ((!OSSL_PARAM_BLD_push_utf8_string(param_bld, + "group", "ffdhe2048", 0)) + || (!OSSL_PARAM_BLD_push_BN(param_bld, +- OSSL_PKEY_PARAM_FFC_P, p)) ++ OSSL_PKEY_PARAM_FFC_P, *p)) + || (!OSSL_PARAM_BLD_push_BN(param_bld, +- OSSL_PKEY_PARAM_FFC_G, g))) { ++ OSSL_PKEY_PARAM_FFC_G, *g))) { + OSSL_PARAM_BLD_free(param_bld); + goto err_dh; + } +@@ -1129,9 +1128,9 @@ err_rsa: + if ((!OSSL_PARAM_BLD_push_utf8_string(param_bld_peer, + "group", "ffdhe2048", 0)) + || (!OSSL_PARAM_BLD_push_BN(param_bld_peer, +- OSSL_PKEY_PARAM_FFC_P, p)) ++ OSSL_PKEY_PARAM_FFC_P, *p)) + || (!OSSL_PARAM_BLD_push_BN(param_bld_peer, +- OSSL_PKEY_PARAM_FFC_G, g))) { ++ OSSL_PKEY_PARAM_FFC_G, *g))) { + OSSL_PARAM_BLD_free(param_bld); + OSSL_PARAM_BLD_free(param_bld_peer); + goto err_dh; +@@ -1140,6 +1139,20 @@ err_rsa: + asym_session->u.dh.param_bld = param_bld; + asym_session->u.dh.param_bld_peer = param_bld_peer; + #else ++ BIGNUM *p = NULL; ++ BIGNUM *g = NULL; ++ ++ p = BN_bin2bn((const unsigned char *) ++ xform->dh.p.data, ++ xform->dh.p.length, ++ p); ++ g = BN_bin2bn((const unsigned char *) ++ xform->dh.g.data, ++ xform->dh.g.length, ++ g); ++ if (!p || !g) ++ goto err_dh; ++ + dh = DH_new(); + if (dh == NULL) { + OPENSSL_LOG(ERR, +@@ -1158,41 +1171,48 @@ err_rsa: + + err_dh: + OPENSSL_LOG(ERR, " failed to set dh params\n"); ++#if (OPENSSL_VERSION_NUMBER >= 0x30000000L) ++ BN_free(*p); ++ BN_free(*g); ++#else + BN_free(p); + BN_free(g); ++#endif + return -1; + } + case RTE_CRYPTO_ASYM_XFORM_DSA: + { + #if (OPENSSL_VERSION_NUMBER >= 0x30000000L) +- BIGNUM *p = NULL, *g = NULL; +- BIGNUM *q = NULL, *priv_key = NULL; ++ BIGNUM **p = &asym_session->u.s.p; ++ BIGNUM **g = &asym_session->u.s.g; ++ BIGNUM **q = &asym_session->u.s.q; ++ BIGNUM **priv_key = &asym_session->u.s.priv_key; + BIGNUM *pub_key = BN_new(); + BN_zero(pub_key); + OSSL_PARAM_BLD *param_bld = NULL; + +- p = BN_bin2bn((const unsigned char *) ++ *p = BN_bin2bn((const unsigned char *) + xform->dsa.p.data, + xform->dsa.p.length, +- p); ++ *p); + +- g = BN_bin2bn((const unsigned char *) ++ *g = BN_bin2bn((const unsigned char *) + xform->dsa.g.data, + xform->dsa.g.length, +- g); ++ *g); + +- q = BN_bin2bn((const unsigned char *) ++ *q = BN_bin2bn((const unsigned char *) + xform->dsa.q.data, + xform->dsa.q.length, +- q); +- if (!p || !q || !g) ++ *q); ++ if (!*p || !*q || !*g) + goto err_dsa; + +- priv_key = BN_bin2bn((const unsigned char *) ++ *priv_key = BN_bin2bn((const unsigned char *) + xform->dsa.x.data, + xform->dsa.x.length, +- priv_key); +- if (priv_key == NULL) ++ *priv_key); ++ if (*priv_key == NULL) + goto err_dsa; + + param_bld = OSSL_PARAM_BLD_new(); +@@ -1201,10 +1221,11 @@ err_dh: + goto err_dsa; + } + +- if (!OSSL_PARAM_BLD_push_BN(param_bld, OSSL_PKEY_PARAM_FFC_P, p) +- || !OSSL_PARAM_BLD_push_BN(param_bld, OSSL_PKEY_PARAM_FFC_G, g) +- || !OSSL_PARAM_BLD_push_BN(param_bld, OSSL_PKEY_PARAM_FFC_Q, q) +- || !OSSL_PARAM_BLD_push_BN(param_bld, OSSL_PKEY_PARAM_PRIV_KEY, priv_key)) { ++ if (!OSSL_PARAM_BLD_push_BN(param_bld, OSSL_PKEY_PARAM_FFC_P, *p) ++ || !OSSL_PARAM_BLD_push_BN(param_bld, OSSL_PKEY_PARAM_FFC_G, *g) ++ || !OSSL_PARAM_BLD_push_BN(param_bld, OSSL_PKEY_PARAM_FFC_Q, *q) ++ || !OSSL_PARAM_BLD_push_BN(param_bld, OSSL_PKEY_PARAM_PRIV_KEY, ++ *priv_key)) { + OSSL_PARAM_BLD_free(param_bld); + OPENSSL_LOG(ERR, "failed to allocate resources\n"); + goto err_dsa; +@@ -1268,18 +1289,25 @@ err_dh: + if (ret) { + DSA_free(dsa); + OPENSSL_LOG(ERR, "Failed to set keys\n"); +- return -1; ++ goto err_dsa; + } + asym_session->u.s.dsa = dsa; + asym_session->xfrm_type = RTE_CRYPTO_ASYM_XFORM_DSA; + break; + #endif + err_dsa: ++#if (OPENSSL_VERSION_NUMBER >= 0x30000000L) ++ BN_free(*p); ++ BN_free(*q); ++ BN_free(*g); ++ BN_free(*priv_key); ++#else + BN_free(p); + BN_free(q); + BN_free(g); + BN_free(priv_key); + BN_free(pub_key); ++#endif + return -1; + } + default: +@@ -1357,10 +1385,16 @@ static void openssl_reset_asym_session(struct openssl_asym_session *sess) + if (sess->u.dh.dh_key) + DH_free(sess->u.dh.dh_key); + #endif ++ BN_clear_free(sess->u.dh.p); ++ BN_clear_free(sess->u.dh.g); + break; + case RTE_CRYPTO_ASYM_XFORM_DSA: + #if (OPENSSL_VERSION_NUMBER >= 0x30000000L) + sess->u.s.param_bld = NULL; ++ BN_clear_free(sess->u.s.p); ++ BN_clear_free(sess->u.s.q); ++ BN_clear_free(sess->u.s.g); ++ BN_clear_free(sess->u.s.priv_key); + #else + if (sess->u.s.dsa) + DSA_free(sess->u.s.dsa); diff --git a/dpdk/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c b/dpdk/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c -index 7f00f6097d..1f6f63c831 100644 +index 7f00f6097d..84d58accc7 100644 --- a/dpdk/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c +++ b/dpdk/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c @@ -140,6 +140,9 @@ static struct rte_cryptodev_capabilities qat_sym_crypto_caps_gen3[] = { @@ -16732,6 +22655,53 @@ index 7f00f6097d..1f6f63c831 100644 if (unlikely(ofs.raw == UINT64_MAX)) { op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; return -EINVAL; +@@ -602,6 +605,8 @@ qat_sym_dp_enqueue_single_auth_gen3(void *qp_data, uint8_t *drv_ctx, + struct icp_qat_fw_la_bulk_req *req; + int32_t data_len; + uint32_t tail = dp_ctx->tail; ++ struct rte_crypto_va_iova_ptr null_digest; ++ struct rte_crypto_va_iova_ptr *job_digest = digest; + + req = (struct icp_qat_fw_la_bulk_req *)( + (uint8_t *)tx_queue->base_addr + tail); +@@ -615,7 +620,12 @@ qat_sym_dp_enqueue_single_auth_gen3(void *qp_data, uint8_t *drv_ctx, + if (unlikely(data_len < 0)) + return -1; + +- enqueue_one_auth_job_gen3(ctx, cookie, req, digest, auth_iv, ofs, ++ if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) { ++ null_digest.iova = cookie->digest_null_phys_addr; ++ job_digest = &null_digest; ++ } ++ ++ enqueue_one_auth_job_gen3(ctx, cookie, req, job_digest, auth_iv, ofs, + (uint32_t)data_len); + + dp_ctx->tail = tail; +@@ -637,6 +647,8 @@ qat_sym_dp_enqueue_auth_jobs_gen3(void *qp_data, uint8_t *drv_ctx, + uint32_t tail; + struct icp_qat_fw_la_bulk_req *req; + int32_t data_len; ++ struct rte_crypto_va_iova_ptr null_digest; ++ struct rte_crypto_va_iova_ptr *job_digest = NULL; + + n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num); + if (unlikely(n == 0)) { +@@ -669,7 +681,13 @@ qat_sym_dp_enqueue_auth_jobs_gen3(void *qp_data, uint8_t *drv_ctx, + + if (unlikely(data_len < 0)) + break; +- enqueue_one_auth_job_gen3(ctx, cookie, req, &vec->digest[i], ++ if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) { ++ null_digest.iova = cookie->digest_null_phys_addr; ++ job_digest = &null_digest; ++ } else ++ job_digest = &vec->digest[i]; ++ ++ enqueue_one_auth_job_gen3(ctx, cookie, req, job_digest, + &vec->auth_iv[i], ofs, (uint32_t)data_len); + tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask; + } diff --git a/dpdk/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c b/dpdk/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c index b1e5fa9a82..b219a418ba 100644 --- a/dpdk/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c @@ -16807,7 +22777,7 @@ index 524c291340..7972c7cfeb 100644 auth_iova_end = cvec->iova + remaining_off; diff --git a/dpdk/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c b/dpdk/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c -index 91d5cfa71d..2709b0ab04 100644 +index 91d5cfa71d..888dea4ad9 100644 --- a/dpdk/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c +++ b/dpdk/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c @@ -274,7 +274,7 @@ qat_sym_build_op_auth_gen1(void *in_op, struct qat_sym_session *ctx, @@ -16828,6 +22798,107 @@ index 91d5cfa71d..2709b0ab04 100644 if (unlikely(ofs.raw == UINT64_MAX)) { op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; return -EINVAL; +@@ -607,6 +607,8 @@ qat_sym_dp_enqueue_single_auth_gen1(void *qp_data, uint8_t *drv_ctx, + struct icp_qat_fw_la_bulk_req *req; + int32_t data_len; + uint32_t tail = dp_ctx->tail; ++ struct rte_crypto_va_iova_ptr null_digest; ++ struct rte_crypto_va_iova_ptr *job_digest = digest; + + req = (struct icp_qat_fw_la_bulk_req *)( + (uint8_t *)tx_queue->base_addr + tail); +@@ -620,8 +622,13 @@ qat_sym_dp_enqueue_single_auth_gen1(void *qp_data, uint8_t *drv_ctx, + if (unlikely(data_len < 0)) + return -1; + +- enqueue_one_auth_job_gen1(ctx, req, digest, auth_iv, ofs, +- (uint32_t)data_len); ++ if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) { ++ null_digest.iova = cookie->digest_null_phys_addr; ++ job_digest = &null_digest; ++ } ++ ++ enqueue_one_auth_job_gen1(ctx, req, job_digest, auth_iv, ofs, ++ (uint32_t)data_len); + + dp_ctx->tail = tail; + dp_ctx->cached_enqueue++; +@@ -646,6 +653,8 @@ qat_sym_dp_enqueue_auth_jobs_gen1(void *qp_data, uint8_t *drv_ctx, + uint32_t tail; + struct icp_qat_fw_la_bulk_req *req; + int32_t data_len; ++ struct rte_crypto_va_iova_ptr null_digest; ++ struct rte_crypto_va_iova_ptr *job_digest = NULL; + + n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num); + if (unlikely(n == 0)) { +@@ -678,7 +687,14 @@ qat_sym_dp_enqueue_auth_jobs_gen1(void *qp_data, uint8_t *drv_ctx, + + if (unlikely(data_len < 0)) + break; +- enqueue_one_auth_job_gen1(ctx, req, &vec->digest[i], ++ ++ if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) { ++ null_digest.iova = cookie->digest_null_phys_addr; ++ job_digest = &null_digest; ++ } else ++ job_digest = &vec->digest[i]; ++ ++ enqueue_one_auth_job_gen1(ctx, req, job_digest, + &vec->auth_iv[i], ofs, (uint32_t)data_len); + tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask; + +@@ -715,6 +731,8 @@ qat_sym_dp_enqueue_single_chain_gen1(void *qp_data, uint8_t *drv_ctx, + struct icp_qat_fw_la_bulk_req *req; + int32_t data_len; + uint32_t tail = dp_ctx->tail; ++ struct rte_crypto_va_iova_ptr null_digest; ++ struct rte_crypto_va_iova_ptr *job_digest = digest; + + req = (struct icp_qat_fw_la_bulk_req *)( + (uint8_t *)tx_queue->base_addr + tail); +@@ -727,8 +745,13 @@ qat_sym_dp_enqueue_single_chain_gen1(void *qp_data, uint8_t *drv_ctx, + if (unlikely(data_len < 0)) + return -1; + ++ if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) { ++ null_digest.iova = cookie->digest_null_phys_addr; ++ job_digest = &null_digest; ++ } ++ + if (unlikely(enqueue_one_chain_job_gen1(ctx, req, data, n_data_vecs, +- NULL, 0, cipher_iv, digest, auth_iv, ofs, ++ NULL, 0, cipher_iv, job_digest, auth_iv, ofs, + (uint32_t)data_len))) + return -1; + +@@ -756,6 +779,8 @@ qat_sym_dp_enqueue_chain_jobs_gen1(void *qp_data, uint8_t *drv_ctx, + uint32_t tail; + struct icp_qat_fw_la_bulk_req *req; + int32_t data_len; ++ struct rte_crypto_va_iova_ptr null_digest; ++ struct rte_crypto_va_iova_ptr *job_digest; + + n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num); + if (unlikely(n == 0)) { +@@ -789,10 +814,16 @@ qat_sym_dp_enqueue_chain_jobs_gen1(void *qp_data, uint8_t *drv_ctx, + if (unlikely(data_len < 0)) + break; + ++ if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) { ++ null_digest.iova = cookie->digest_null_phys_addr; ++ job_digest = &null_digest; ++ } else ++ job_digest = &vec->digest[i]; ++ + if (unlikely(enqueue_one_chain_job_gen1(ctx, req, + vec->src_sgl[i].vec, vec->src_sgl[i].num, + NULL, 0, +- &vec->iv[i], &vec->digest[i], ++ &vec->iv[i], job_digest, + &vec->auth_iv[i], ofs, (uint32_t)data_len))) + break; + diff --git a/dpdk/drivers/crypto/qat/qat_sym.c b/dpdk/drivers/crypto/qat/qat_sym.c index 08e92191a3..18f99089e8 100644 --- a/dpdk/drivers/crypto/qat/qat_sym.c @@ -16923,6 +22994,212 @@ index 9d1ce46622..4e8bbf0e09 100644 }; struct scheduler_parse_map { +diff --git a/dpdk/drivers/dma/cnxk/cnxk_dmadev.c b/dpdk/drivers/dma/cnxk/cnxk_dmadev.c +index a6f4a31e0e..72dec60a9c 100644 +--- a/dpdk/drivers/dma/cnxk/cnxk_dmadev.c ++++ b/dpdk/drivers/dma/cnxk/cnxk_dmadev.c +@@ -45,14 +45,22 @@ cnxk_dmadev_configure(struct rte_dma_dev *dev, + int rc = 0; + + RTE_SET_USED(conf); +- RTE_SET_USED(conf); +- RTE_SET_USED(conf_sz); + RTE_SET_USED(conf_sz); ++ + dpivf = dev->fp_obj->dev_private; ++ ++ if (dpivf->flag & CNXK_DPI_DEV_CONFIG) ++ return rc; ++ + rc = roc_dpi_configure(&dpivf->rdpi); +- if (rc < 0) ++ if (rc < 0) { + plt_err("DMA configure failed err = %d", rc); ++ goto done; ++ } ++ ++ dpivf->flag |= CNXK_DPI_DEV_CONFIG; + ++done: + return rc; + } + +@@ -69,6 +77,9 @@ cnxk_dmadev_vchan_setup(struct rte_dma_dev *dev, uint16_t vchan, + RTE_SET_USED(vchan); + RTE_SET_USED(conf_sz); + ++ if (dpivf->flag & CNXK_DPI_VCHAN_CONFIG) ++ return 0; ++ + header->cn9k.pt = DPI_HDR_PT_ZBW_CA; + + switch (conf->direction) { +@@ -108,6 +119,7 @@ cnxk_dmadev_vchan_setup(struct rte_dma_dev *dev, uint16_t vchan, + dpivf->conf.c_desc.max_cnt = DPI_MAX_DESC; + dpivf->conf.c_desc.head = 0; + dpivf->conf.c_desc.tail = 0; ++ dpivf->flag |= CNXK_DPI_VCHAN_CONFIG; + + return 0; + } +@@ -125,6 +137,10 @@ cn10k_dmadev_vchan_setup(struct rte_dma_dev *dev, uint16_t vchan, + RTE_SET_USED(vchan); + RTE_SET_USED(conf_sz); + ++ ++ if (dpivf->flag & CNXK_DPI_VCHAN_CONFIG) ++ return 0; ++ + header->cn10k.pt = DPI_HDR_PT_ZBW_CA; + + switch (conf->direction) { +@@ -164,6 +180,7 @@ cn10k_dmadev_vchan_setup(struct rte_dma_dev *dev, uint16_t vchan, + dpivf->conf.c_desc.max_cnt = DPI_MAX_DESC; + dpivf->conf.c_desc.head = 0; + dpivf->conf.c_desc.tail = 0; ++ dpivf->flag |= CNXK_DPI_VCHAN_CONFIG; + + return 0; + } +@@ -173,10 +190,15 @@ cnxk_dmadev_start(struct rte_dma_dev *dev) + { + struct cnxk_dpi_vf_s *dpivf = dev->fp_obj->dev_private; + ++ if (dpivf->flag & CNXK_DPI_DEV_START) ++ return 0; ++ + dpivf->desc_idx = 0; + dpivf->num_words = 0; + roc_dpi_enable(&dpivf->rdpi); + ++ dpivf->flag |= CNXK_DPI_DEV_START; ++ + return 0; + } + +@@ -187,6 +209,8 @@ cnxk_dmadev_stop(struct rte_dma_dev *dev) + + roc_dpi_disable(&dpivf->rdpi); + ++ dpivf->flag &= ~CNXK_DPI_DEV_START; ++ + return 0; + } + +@@ -198,6 +222,8 @@ cnxk_dmadev_close(struct rte_dma_dev *dev) + roc_dpi_disable(&dpivf->rdpi); + roc_dpi_dev_fini(&dpivf->rdpi); + ++ dpivf->flag = 0; ++ + return 0; + } + +@@ -206,8 +232,7 @@ __dpi_queue_write(struct roc_dpi *dpi, uint64_t *cmds, int cmd_count) + { + uint64_t *ptr = dpi->chunk_base; + +- if ((cmd_count < DPI_MIN_CMD_SIZE) || (cmd_count > DPI_MAX_CMD_SIZE) || +- cmds == NULL) ++ if ((cmd_count < DPI_MIN_CMD_SIZE) || (cmd_count > DPI_MAX_CMD_SIZE) || cmds == NULL) + return -EINVAL; + + /* +@@ -223,11 +248,15 @@ __dpi_queue_write(struct roc_dpi *dpi, uint64_t *cmds, int cmd_count) + int count; + uint64_t *new_buff = dpi->chunk_next; + +- dpi->chunk_next = +- (void *)roc_npa_aura_op_alloc(dpi->aura_handle, 0); ++ dpi->chunk_next = (void *)roc_npa_aura_op_alloc(dpi->aura_handle, 0); + if (!dpi->chunk_next) { +- plt_err("Failed to alloc next buffer from NPA"); +- return -ENOMEM; ++ plt_dp_dbg("Failed to alloc next buffer from NPA"); ++ ++ /* NPA failed to allocate a buffer. Restoring chunk_next ++ * to its original address. ++ */ ++ dpi->chunk_next = new_buff; ++ return -ENOSPC; + } + + /* +@@ -261,13 +290,17 @@ __dpi_queue_write(struct roc_dpi *dpi, uint64_t *cmds, int cmd_count) + /* queue index may be greater than pool size */ + if (dpi->chunk_head >= dpi->pool_size_m1) { + new_buff = dpi->chunk_next; +- dpi->chunk_next = +- (void *)roc_npa_aura_op_alloc(dpi->aura_handle, +- 0); ++ dpi->chunk_next = (void *)roc_npa_aura_op_alloc(dpi->aura_handle, 0); + if (!dpi->chunk_next) { +- plt_err("Failed to alloc next buffer from NPA"); +- return -ENOMEM; ++ plt_dp_dbg("Failed to alloc next buffer from NPA"); ++ ++ /* NPA failed to allocate a buffer. Restoring chunk_next ++ * to its original address. ++ */ ++ dpi->chunk_next = new_buff; ++ return -ENOSPC; + } ++ + /* Write next buffer address */ + *ptr = (uint64_t)new_buff; + dpi->chunk_base = new_buff; +@@ -628,8 +661,7 @@ static const struct rte_dma_dev_ops cnxk_dmadev_ops = { + }; + + static int +-cnxk_dmadev_probe(struct rte_pci_driver *pci_drv __rte_unused, +- struct rte_pci_device *pci_dev) ++cnxk_dmadev_probe(struct rte_pci_driver *pci_drv __rte_unused, struct rte_pci_device *pci_dev) + { + struct cnxk_dpi_vf_s *dpivf = NULL; + char name[RTE_DEV_NAME_MAX_LEN]; +@@ -648,8 +680,7 @@ cnxk_dmadev_probe(struct rte_pci_driver *pci_drv __rte_unused, + memset(name, 0, sizeof(name)); + rte_pci_device_name(&pci_dev->addr, name, sizeof(name)); + +- dmadev = rte_dma_pmd_allocate(name, pci_dev->device.numa_node, +- sizeof(*dpivf)); ++ dmadev = rte_dma_pmd_allocate(name, pci_dev->device.numa_node, sizeof(*dpivf)); + if (dmadev == NULL) { + plt_err("dma device allocation failed for %s", name); + return -ENOMEM; +@@ -682,6 +713,8 @@ cnxk_dmadev_probe(struct rte_pci_driver *pci_drv __rte_unused, + if (rc < 0) + goto err_out_free; + ++ dmadev->state = RTE_DMA_DEV_READY; ++ + return 0; + + err_out_free: +diff --git a/dpdk/drivers/dma/cnxk/cnxk_dmadev.h b/dpdk/drivers/dma/cnxk/cnxk_dmadev.h +index e1f5694f50..d58554787f 100644 +--- a/dpdk/drivers/dma/cnxk/cnxk_dmadev.h ++++ b/dpdk/drivers/dma/cnxk/cnxk_dmadev.h +@@ -15,6 +15,10 @@ + */ + #define DPI_REQ_CDATA 0xFF + ++#define CNXK_DPI_DEV_CONFIG (1ULL << 0) ++#define CNXK_DPI_VCHAN_CONFIG (1ULL << 1) ++#define CNXK_DPI_DEV_START (1ULL << 2) ++ + struct cnxk_dpi_compl_s { + uint64_t cdata; + void *cb_data; +@@ -39,6 +43,7 @@ struct cnxk_dpi_vf_s { + uint64_t cmd[DPI_MAX_CMD_SIZE]; + uint32_t num_words; + uint16_t desc_idx; ++ uint16_t flag; + }; + + #endif diff --git a/dpdk/drivers/dma/dpaa2/dpaa2_qdma.c b/dpdk/drivers/dma/dpaa2/dpaa2_qdma.c index d5a5f08ecc..8968bb853b 100644 --- a/dpdk/drivers/dma/dpaa2/dpaa2_qdma.c @@ -17046,8 +23323,104 @@ index 9b6da655fd..daf35eccce 100644 return 0; } +diff --git a/dpdk/drivers/event/cnxk/cn10k_eventdev.c b/dpdk/drivers/event/cnxk/cn10k_eventdev.c +index 30c922b5fc..d8e7c83462 100644 +--- a/dpdk/drivers/event/cnxk/cn10k_eventdev.c ++++ b/dpdk/drivers/event/cnxk/cn10k_eventdev.c +@@ -197,12 +197,14 @@ cn10k_sso_hws_reset(void *arg, void *hws) + cnxk_sso_hws_swtag_untag(base + + SSOW_LF_GWS_OP_SWTAG_UNTAG); + plt_write64(0, base + SSOW_LF_GWS_OP_DESCHED); ++ } else if (pend_tt != SSO_TT_EMPTY) { ++ plt_write64(0, base + SSOW_LF_GWS_OP_SWTAG_FLUSH); + } + + /* Wait for desched to complete. */ + do { + pend_state = plt_read64(base + SSOW_LF_GWS_PENDSTATE); +- } while (pend_state & BIT_ULL(58)); ++ } while (pend_state & (BIT_ULL(58) | BIT_ULL(56))); + + switch (dev->gw_mode) { + case CN10K_GW_MODE_PREF: +@@ -711,11 +713,16 @@ cn10k_sso_port_quiesce(struct rte_eventdev *event_dev, void *port, + cn10k_sso_hws_get_work_empty(ws, &ev, + (NIX_RX_OFFLOAD_MAX - 1) | NIX_RX_REAS_F | + NIX_RX_MULTI_SEG_F | CPT_RX_WQE_F); +- if (is_pend && ev.u64) { ++ if (is_pend && ev.u64) + if (flush_cb) + flush_cb(event_dev->data->dev_id, ev, args); ++ ptag = (plt_read64(ws->base + SSOW_LF_GWS_TAG) >> 32) & SSO_TT_EMPTY; ++ if (ptag != SSO_TT_EMPTY) + cnxk_sso_hws_swtag_flush(ws->base); +- } ++ ++ do { ++ ptag = plt_read64(ws->base + SSOW_LF_GWS_PENDSTATE); ++ } while (ptag & BIT_ULL(56)); + + /* Check if we have work in PRF_WQE0, if so extract it. */ + switch (dev->gw_mode) { +@@ -741,8 +748,11 @@ cn10k_sso_port_quiesce(struct rte_eventdev *event_dev, void *port, + if (ev.u64) { + if (flush_cb) + flush_cb(event_dev->data->dev_id, ev, args); +- cnxk_sso_hws_swtag_flush(ws->base); + } ++ cnxk_sso_hws_swtag_flush(ws->base); ++ do { ++ ptag = plt_read64(ws->base + SSOW_LF_GWS_PENDSTATE); ++ } while (ptag & BIT_ULL(56)); + } + ws->swtag_req = 0; + plt_write64(0, ws->base + SSOW_LF_GWS_OP_GWC_INVAL); +@@ -1024,8 +1034,8 @@ static int + cn10k_crypto_adapter_caps_get(const struct rte_eventdev *event_dev, + const struct rte_cryptodev *cdev, uint32_t *caps) + { +- CNXK_VALID_DEV_OR_ERR_RET(event_dev->dev, "event_cn10k"); +- CNXK_VALID_DEV_OR_ERR_RET(cdev->device, "crypto_cn10k"); ++ CNXK_VALID_DEV_OR_ERR_RET(event_dev->dev, "event_cn10k", ENOTSUP); ++ CNXK_VALID_DEV_OR_ERR_RET(cdev->device, "crypto_cn10k", ENOTSUP); + + *caps = RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD | + RTE_EVENT_CRYPTO_ADAPTER_CAP_SESSION_PRIVATE_DATA | +@@ -1043,8 +1053,8 @@ cn10k_crypto_adapter_qp_add(const struct rte_eventdev *event_dev, + struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev); + int ret; + +- CNXK_VALID_DEV_OR_ERR_RET(event_dev->dev, "event_cn10k"); +- CNXK_VALID_DEV_OR_ERR_RET(cdev->device, "crypto_cn10k"); ++ CNXK_VALID_DEV_OR_ERR_RET(event_dev->dev, "event_cn10k", EINVAL); ++ CNXK_VALID_DEV_OR_ERR_RET(cdev->device, "crypto_cn10k", EINVAL); + + dev->is_ca_internal_port = 1; + cn10k_sso_fp_fns_set((struct rte_eventdev *)(uintptr_t)event_dev); +@@ -1059,8 +1069,8 @@ static int + cn10k_crypto_adapter_qp_del(const struct rte_eventdev *event_dev, const struct rte_cryptodev *cdev, + int32_t queue_pair_id) + { +- CNXK_VALID_DEV_OR_ERR_RET(event_dev->dev, "event_cn10k"); +- CNXK_VALID_DEV_OR_ERR_RET(cdev->device, "crypto_cn10k"); ++ CNXK_VALID_DEV_OR_ERR_RET(event_dev->dev, "event_cn10k", EINVAL); ++ CNXK_VALID_DEV_OR_ERR_RET(cdev->device, "crypto_cn10k", EINVAL); + + return cnxk_crypto_adapter_qp_del(cdev, queue_pair_id); + } +@@ -1078,8 +1088,8 @@ cn10k_crypto_adapter_vec_limits(const struct rte_eventdev *event_dev, + const struct rte_cryptodev *cdev, + struct rte_event_crypto_adapter_vector_limits *limits) + { +- CNXK_VALID_DEV_OR_ERR_RET(event_dev->dev, "event_cn10k"); +- CNXK_VALID_DEV_OR_ERR_RET(cdev->device, "crypto_cn10k"); ++ CNXK_VALID_DEV_OR_ERR_RET(event_dev->dev, "event_cn10k", EINVAL); ++ CNXK_VALID_DEV_OR_ERR_RET(cdev->device, "crypto_cn10k", EINVAL); + + limits->log2_sz = false; + limits->min_sz = 0; diff --git a/dpdk/drivers/event/cnxk/cn10k_worker.h b/dpdk/drivers/event/cnxk/cn10k_worker.h -index 75a2ff244a..a93d40ed40 100644 +index 75a2ff244a..1bce0631e5 100644 --- a/dpdk/drivers/event/cnxk/cn10k_worker.h +++ b/dpdk/drivers/event/cnxk/cn10k_worker.h @@ -100,9 +100,6 @@ cn10k_wqe_to_mbuf(uint64_t wqe, const uint64_t __mbuf, uint8_t port_id, @@ -17071,7 +23444,38 @@ index 75a2ff244a..a93d40ed40 100644 if (flags & NIX_RX_OFFLOAD_SECURITY_F) { const uint64_t mbuf_init = 0x100010000ULL | RTE_PKTMBUF_HEADROOM | -@@ -316,6 +317,9 @@ cn10k_sso_hws_get_work(struct cn10k_sso_hws *ws, struct rte_event *ev, +@@ -299,23 +300,39 @@ cn10k_sso_hws_get_work(struct cn10k_sso_hws *ws, struct rte_event *ev, + } gw; + + gw.get_work = ws->gw_wdata; +-#if defined(RTE_ARCH_ARM64) && !defined(__clang__) ++#if defined(RTE_ARCH_ARM64) ++#if !defined(__clang__) + asm volatile( + PLT_CPU_FEATURE_PREAMBLE + "caspal %[wdata], %H[wdata], %[wdata], %H[wdata], [%[gw_loc]]\n" + : [wdata] "+r"(gw.get_work) + : [gw_loc] "r"(ws->base + SSOW_LF_GWS_OP_GET_WORK0) + : "memory"); ++#else ++ register uint64_t x0 __asm("x0") = (uint64_t)gw.u64[0]; ++ register uint64_t x1 __asm("x1") = (uint64_t)gw.u64[1]; ++ asm volatile(".arch armv8-a+lse\n" ++ "caspal %[x0], %[x1], %[x0], %[x1], [%[dst]]\n" ++ : [x0] "+r"(x0), [x1] "+r"(x1) ++ : [dst] "r"(ws->base + SSOW_LF_GWS_OP_GET_WORK0) ++ : "memory"); ++ gw.u64[0] = x0; ++ gw.u64[1] = x1; ++#endif + #else + plt_write64(gw.u64[0], ws->base + SSOW_LF_GWS_OP_GET_WORK0); + do { + roc_load_pair(gw.u64[0], gw.u64[1], + ws->base + SSOW_LF_GWS_WQE0); + } while (gw.u64[0] & BIT_ULL(63)); ++ rte_atomic_thread_fence(__ATOMIC_SEQ_CST); + #endif ws->gw_rdata = gw.u64[0]; if (gw.u64[1]) cn10k_sso_hws_post_process(ws, gw.u64, flags); @@ -17081,6 +23485,97 @@ index 75a2ff244a..a93d40ed40 100644 ev->event = gw.u64[0]; ev->u64 = gw.u64[1]; +diff --git a/dpdk/drivers/event/cnxk/cn9k_eventdev.c b/dpdk/drivers/event/cnxk/cn9k_eventdev.c +index f5a42a86f8..803e7ddd07 100644 +--- a/dpdk/drivers/event/cnxk/cn9k_eventdev.c ++++ b/dpdk/drivers/event/cnxk/cn9k_eventdev.c +@@ -223,16 +223,16 @@ cn9k_sso_hws_reset(void *arg, void *hws) + cnxk_sso_hws_swtag_untag( + base + SSOW_LF_GWS_OP_SWTAG_UNTAG); + plt_write64(0, base + SSOW_LF_GWS_OP_DESCHED); ++ } else if (pend_tt != SSO_TT_EMPTY) { ++ plt_write64(0, base + SSOW_LF_GWS_OP_SWTAG_FLUSH); + } + + /* Wait for desched to complete. */ + do { + pend_state = plt_read64(base + SSOW_LF_GWS_PENDSTATE); +- } while (pend_state & BIT_ULL(58)); +- ++ } while (pend_state & (BIT_ULL(58) | BIT_ULL(56))); + plt_write64(0, base + SSOW_LF_GWS_OP_GWC_INVAL); + } +- + if (dev->dual_ws) + dws->swtag_req = 0; + else +@@ -846,12 +846,25 @@ cn9k_sso_port_quiesce(struct rte_eventdev *event_dev, void *port, + base, &ev, dev->rx_offloads, + dev->dual_ws ? dws->lookup_mem : ws->lookup_mem, + dev->dual_ws ? dws->tstamp : ws->tstamp); +- if (is_pend && ev.u64) { ++ if (is_pend && ev.u64) + if (flush_cb) + flush_cb(event_dev->data->dev_id, ev, args); +- cnxk_sso_hws_swtag_flush(ws->base); +- } ++ ++ ptag = (plt_read64(base + SSOW_LF_GWS_TAG) >> 32) & SSO_TT_EMPTY; ++ if (ptag != SSO_TT_EMPTY) ++ cnxk_sso_hws_swtag_flush(base); ++ ++ do { ++ ptag = plt_read64(base + SSOW_LF_GWS_PENDSTATE); ++ } while (ptag & BIT_ULL(56)); ++ ++ plt_write64(0, base + SSOW_LF_GWS_OP_GWC_INVAL); + } ++ ++ if (dev->dual_ws) ++ dws->swtag_req = 0; ++ else ++ ws->swtag_req = 0; + } + + static int +@@ -1110,11 +1123,11 @@ cn9k_sso_tx_adapter_queue_del(uint8_t id, const struct rte_eventdev *event_dev, + } + + static int +-cn9k_crypto_adapter_caps_get(const struct rte_eventdev *event_dev, +- const struct rte_cryptodev *cdev, uint32_t *caps) ++cn9k_crypto_adapter_caps_get(const struct rte_eventdev *event_dev, const struct rte_cryptodev *cdev, ++ uint32_t *caps) + { +- CNXK_VALID_DEV_OR_ERR_RET(event_dev->dev, "event_cn9k"); +- CNXK_VALID_DEV_OR_ERR_RET(cdev->device, "crypto_cn9k"); ++ CNXK_VALID_DEV_OR_ERR_RET(event_dev->dev, "event_cn9k", ENOTSUP); ++ CNXK_VALID_DEV_OR_ERR_RET(cdev->device, "crypto_cn9k", ENOTSUP); + + *caps = RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD | + RTE_EVENT_CRYPTO_ADAPTER_CAP_SESSION_PRIVATE_DATA; +@@ -1131,8 +1144,8 @@ cn9k_crypto_adapter_qp_add(const struct rte_eventdev *event_dev, + struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev); + int ret; + +- CNXK_VALID_DEV_OR_ERR_RET(event_dev->dev, "event_cn9k"); +- CNXK_VALID_DEV_OR_ERR_RET(cdev->device, "crypto_cn9k"); ++ CNXK_VALID_DEV_OR_ERR_RET(event_dev->dev, "event_cn9k", EINVAL); ++ CNXK_VALID_DEV_OR_ERR_RET(cdev->device, "crypto_cn9k", EINVAL); + + dev->is_ca_internal_port = 1; + cn9k_sso_fp_fns_set((struct rte_eventdev *)(uintptr_t)event_dev); +@@ -1147,8 +1160,8 @@ static int + cn9k_crypto_adapter_qp_del(const struct rte_eventdev *event_dev, const struct rte_cryptodev *cdev, + int32_t queue_pair_id) + { +- CNXK_VALID_DEV_OR_ERR_RET(event_dev->dev, "event_cn9k"); +- CNXK_VALID_DEV_OR_ERR_RET(cdev->device, "crypto_cn9k"); ++ CNXK_VALID_DEV_OR_ERR_RET(event_dev->dev, "event_cn9k", EINVAL); ++ CNXK_VALID_DEV_OR_ERR_RET(cdev->device, "crypto_cn9k", EINVAL); + + return cnxk_crypto_adapter_qp_del(cdev, queue_pair_id); + } diff --git a/dpdk/drivers/event/cnxk/cn9k_worker.h b/dpdk/drivers/event/cnxk/cn9k_worker.h index 4c3932da47..0ccdb7baf3 100644 --- a/dpdk/drivers/event/cnxk/cn9k_worker.h @@ -17107,7 +23602,7 @@ index 4c3932da47..0ccdb7baf3 100644 /* Write CPT instruction to lmt line */ vst1q_u64(lmt_addr, cmd01); diff --git a/dpdk/drivers/event/cnxk/cnxk_eventdev.c b/dpdk/drivers/event/cnxk/cnxk_eventdev.c -index db62d32a81..93e46e1b9b 100644 +index db62d32a81..d2f1708297 100644 --- a/dpdk/drivers/event/cnxk/cnxk_eventdev.c +++ b/dpdk/drivers/event/cnxk/cnxk_eventdev.c @@ -319,9 +319,9 @@ int @@ -17122,6 +23617,15 @@ index db62d32a81..93e46e1b9b 100644 return 0; } +@@ -551,7 +551,7 @@ cnxk_sso_parse_devargs(struct cnxk_sso_evdev *dev, struct rte_devargs *devargs) + &dev->force_ena_bp); + rte_kvargs_process(kvlist, CN9K_SSO_SINGLE_WS, &parse_kvargs_flag, + &single_ws); +- rte_kvargs_process(kvlist, CN10K_SSO_GW_MODE, &parse_kvargs_flag, ++ rte_kvargs_process(kvlist, CN10K_SSO_GW_MODE, &parse_kvargs_value, + &dev->gw_mode); + dev->dual_ws = !single_ws; + rte_kvargs_free(kvlist); @@ -613,9 +613,8 @@ cnxk_sso_fini(struct rte_eventdev *event_dev) cnxk_tim_fini(); @@ -17133,6 +23637,32 @@ index db62d32a81..93e46e1b9b 100644 } int +diff --git a/dpdk/drivers/event/cnxk/cnxk_eventdev.h b/dpdk/drivers/event/cnxk/cnxk_eventdev.h +index 738e335ea4..44a39648e3 100644 +--- a/dpdk/drivers/event/cnxk/cnxk_eventdev.h ++++ b/dpdk/drivers/event/cnxk/cnxk_eventdev.h +@@ -65,10 +65,10 @@ + (min + val / ((max + cnt - 1) / cnt)) + #define CNXK_SSO_FLUSH_RETRY_MAX 0xfff + +-#define CNXK_VALID_DEV_OR_ERR_RET(dev, drv_name) \ ++#define CNXK_VALID_DEV_OR_ERR_RET(dev, drv_name, err_val) \ + do { \ + if (strncmp(dev->driver->name, drv_name, strlen(drv_name))) \ +- return -EINVAL; \ ++ return -err_val; \ + } while (0) + + typedef void *(*cnxk_sso_init_hws_mem_t)(void *dev, uint8_t port_id); +@@ -128,7 +128,7 @@ struct cnxk_sso_evdev { + /* CN9K */ + uint8_t dual_ws; + /* CN10K */ +- uint8_t gw_mode; ++ uint32_t gw_mode; + /* Crypto adapter */ + uint8_t is_ca_internal_port; + } __rte_cache_aligned; diff --git a/dpdk/drivers/event/cnxk/cnxk_eventdev_adptr.c b/dpdk/drivers/event/cnxk/cnxk_eventdev_adptr.c index 5ec436382c..e78d215630 100644 --- a/dpdk/drivers/event/cnxk/cnxk_eventdev_adptr.c @@ -17316,8 +23846,186 @@ index eda84c6f31..6be31f6f9d 100644 } else { chunk = (struct cnxk_tim_ent *)mirr_bkt->current_chunk; chunk += (tim_ring->nb_chunk_slots - chunk_remainder); +diff --git a/dpdk/drivers/event/dlb2/dlb2.c b/dpdk/drivers/event/dlb2/dlb2.c +index 60c5cd4804..fa1ccb25ba 100644 +--- a/dpdk/drivers/event/dlb2/dlb2.c ++++ b/dpdk/drivers/event/dlb2/dlb2.c +@@ -72,6 +72,7 @@ static struct rte_event_dev_info evdev_dlb2_default_info = { + .max_single_link_event_port_queue_pairs = + DLB2_MAX_NUM_DIR_PORTS(DLB2_HW_V2), + .event_dev_cap = (RTE_EVENT_DEV_CAP_EVENT_QOS | ++ RTE_EVENT_DEV_CAP_NONSEQ_MODE | + RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED | + RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES | + RTE_EVENT_DEV_CAP_BURST_MODE | +diff --git a/dpdk/drivers/event/dlb2/dlb2_selftest.c b/dpdk/drivers/event/dlb2/dlb2_selftest.c +index 1863ffe049..62aa11d981 100644 +--- a/dpdk/drivers/event/dlb2/dlb2_selftest.c ++++ b/dpdk/drivers/event/dlb2/dlb2_selftest.c +@@ -1475,7 +1475,7 @@ test_fail: + int + test_dlb2_eventdev(void) + { +- const char *dlb2_eventdev_name = "dlb2_event"; ++ const char *dlb2_eventdev_name = "event_dlb2"; + uint8_t num_evdevs = rte_event_dev_count(); + int i, ret = 0; + int found = 0, skipped = 0, passed = 0, failed = 0; +@@ -1489,7 +1489,7 @@ test_dlb2_eventdev(void) + + /* skip non-dlb2 event devices */ + if (strncmp(info.driver_name, dlb2_eventdev_name, +- sizeof(*info.driver_name)) != 0) { ++ strlen(dlb2_eventdev_name)) != 0) { + skipped++; + continue; + } +diff --git a/dpdk/drivers/event/dlb2/pf/dlb2_main.c b/dpdk/drivers/event/dlb2/pf/dlb2_main.c +index 717aa4fc08..12b65c3b98 100644 +--- a/dpdk/drivers/event/dlb2/pf/dlb2_main.c ++++ b/dpdk/drivers/event/dlb2/pf/dlb2_main.c +@@ -26,6 +26,7 @@ + #define PF_ID_ZERO 0 /* PF ONLY! */ + #define NO_OWNER_VF 0 /* PF ONLY! */ + #define NOT_VF_REQ false /* PF ONLY! */ ++#define DLB2_PCI_PASID_CAP_OFFSET 0x148 /* PASID capability offset */ + + #define DLB2_PCI_CAP_POINTER 0x34 + #define DLB2_PCI_CAP_NEXT(hdr) (((hdr) >> 8) & 0xFC) +@@ -46,6 +47,7 @@ + #define DLB2_PCI_CAP_ID_MSIX 0x11 + #define DLB2_PCI_EXT_CAP_ID_PRI 0x13 + #define DLB2_PCI_EXT_CAP_ID_ACS 0xD ++#define DLB2_PCI_EXT_CAP_ID_PASID 0x1B /* Process Address Space ID */ + + #define DLB2_PCI_PRI_CTRL_ENABLE 0x1 + #define DLB2_PCI_PRI_ALLOC_REQ 0xC +@@ -64,6 +66,8 @@ + #define DLB2_PCI_ACS_CR 0x8 + #define DLB2_PCI_ACS_UF 0x10 + #define DLB2_PCI_ACS_EC 0x20 ++#define DLB2_PCI_PASID_CTRL 0x06 /* PASID control register */ ++#define DLB2_PCI_PASID_CAP_OFFSET 0x148 /* PASID capability offset */ + + static int dlb2_pci_find_capability(struct rte_pci_device *pdev, uint32_t id) + { +@@ -257,12 +261,14 @@ dlb2_pf_reset(struct dlb2_dev *dlb2_dev) + uint16_t rt_ctl_word; + uint32_t pri_reqs_dword; + uint16_t pri_ctrl_word; ++ uint16_t pasid_ctrl; + + int pcie_cap_offset; + int pri_cap_offset; + int msix_cap_offset; + int err_cap_offset; + int acs_cap_offset; ++ int pasid_cap_offset; + int wait_count; + + uint16_t devsta_busy_word; +@@ -582,6 +588,38 @@ dlb2_pf_reset(struct dlb2_dev *dlb2_dev) + } + } + ++ /* The current Linux kernel vfio driver does not expose PASID capability to ++ * users. It also enables PASID by default, which breaks DLB PF PMD. We have ++ * to use the hardcoded offset for now to disable PASID. ++ */ ++ pasid_cap_offset = DLB2_PCI_PASID_CAP_OFFSET; ++ ++ off = pasid_cap_offset + DLB2_PCI_PASID_CTRL; ++ if (rte_pci_read_config(pdev, &pasid_ctrl, 2, off) != 2) ++ pasid_ctrl = 0; ++ ++ if (pasid_ctrl) { ++ DLB2_INFO(dlb2_dev, "DLB2 disabling pasid...\n"); ++ ++ pasid_ctrl = 0; ++ ret = rte_pci_write_config(pdev, &pasid_ctrl, 2, off); ++ if (ret != 2) { ++ DLB2_LOG_ERR("[%s()] failed to write the pcie config space at offset %d\n", ++ __func__, (int)off); ++ return ret; ++ } ++ } ++ ++ /* Disable PASID if it is enabled by default, which ++ * breaks the DLB if enabled. ++ */ ++ off = DLB2_PCI_PASID_CAP_OFFSET + RTE_PCI_PASID_CTRL; ++ if (rte_pci_pasid_set_state(pdev, off, false)) { ++ DLB2_LOG_ERR("[%s()] failed to write the pcie config space at offset %d\n", ++ __func__, (int)off); ++ return -1; ++ } ++ + return 0; + } + +diff --git a/dpdk/drivers/event/dpaa/dpaa_eventdev.c b/dpdk/drivers/event/dpaa/dpaa_eventdev.c +index 4b3d16735b..2532abbe78 100644 +--- a/dpdk/drivers/event/dpaa/dpaa_eventdev.c ++++ b/dpdk/drivers/event/dpaa/dpaa_eventdev.c +@@ -993,14 +993,14 @@ dpaa_event_check_flags(const char *params) + } + + static int +-dpaa_event_dev_create(const char *name, const char *params) ++dpaa_event_dev_create(const char *name, const char *params, struct rte_vdev_device *vdev) + { + struct rte_eventdev *eventdev; + struct dpaa_eventdev *priv; + + eventdev = rte_event_pmd_vdev_init(name, + sizeof(struct dpaa_eventdev), +- rte_socket_id()); ++ rte_socket_id(), vdev); + if (eventdev == NULL) { + DPAA_EVENTDEV_ERR("Failed to create eventdev vdev %s", name); + goto fail; +@@ -1050,7 +1050,7 @@ dpaa_event_dev_probe(struct rte_vdev_device *vdev) + + params = rte_vdev_device_args(vdev); + +- return dpaa_event_dev_create(name, params); ++ return dpaa_event_dev_create(name, params, vdev); + } + + static int +diff --git a/dpdk/drivers/event/dpaa2/dpaa2_eventdev.c b/dpdk/drivers/event/dpaa2/dpaa2_eventdev.c +index fa1a1ade80..1e64806849 100644 +--- a/dpdk/drivers/event/dpaa2/dpaa2_eventdev.c ++++ b/dpdk/drivers/event/dpaa2/dpaa2_eventdev.c +@@ -1086,7 +1086,7 @@ dpaa2_eventdev_setup_dpci(struct dpaa2_dpci_dev *dpci_dev, + } + + static int +-dpaa2_eventdev_create(const char *name) ++dpaa2_eventdev_create(const char *name, struct rte_vdev_device *vdev) + { + struct rte_eventdev *eventdev; + struct dpaa2_eventdev *priv; +@@ -1096,7 +1096,7 @@ dpaa2_eventdev_create(const char *name) + + eventdev = rte_event_pmd_vdev_init(name, + sizeof(struct dpaa2_eventdev), +- rte_socket_id()); ++ rte_socket_id(), vdev); + if (eventdev == NULL) { + DPAA2_EVENTDEV_ERR("Failed to create Event device %s", name); + goto fail; +@@ -1190,7 +1190,7 @@ dpaa2_eventdev_probe(struct rte_vdev_device *vdev) + + name = rte_vdev_device_name(vdev); + DPAA2_EVENTDEV_INFO("Initializing %s", name); +- return dpaa2_eventdev_create(name); ++ return dpaa2_eventdev_create(name, vdev); + } + + static int diff --git a/dpdk/drivers/event/dsw/dsw_evdev.c b/dpdk/drivers/event/dsw/dsw_evdev.c -index ffabf0d23d..6c5cde2468 100644 +index ffabf0d23d..abe8e68525 100644 --- a/dpdk/drivers/event/dsw/dsw_evdev.c +++ b/dpdk/drivers/event/dsw/dsw_evdev.c @@ -363,6 +363,10 @@ static int @@ -17331,8 +24039,148 @@ index ffabf0d23d..6c5cde2468 100644 dsw->num_ports = 0; dsw->num_queues = 0; +@@ -430,7 +434,7 @@ dsw_probe(struct rte_vdev_device *vdev) + name = rte_vdev_device_name(vdev); + + dev = rte_event_pmd_vdev_init(name, sizeof(struct dsw_evdev), +- rte_socket_id()); ++ rte_socket_id(), vdev); + if (dev == NULL) + return -EFAULT; + +diff --git a/dpdk/drivers/event/octeontx/ssovf_evdev.c b/dpdk/drivers/event/octeontx/ssovf_evdev.c +index 650266b996..d5e223077d 100644 +--- a/dpdk/drivers/event/octeontx/ssovf_evdev.c ++++ b/dpdk/drivers/event/octeontx/ssovf_evdev.c +@@ -880,7 +880,7 @@ ssovf_vdev_probe(struct rte_vdev_device *vdev) + } + + eventdev = rte_event_pmd_vdev_init(name, sizeof(struct ssovf_evdev), +- rte_socket_id()); ++ rte_socket_id(), vdev); + if (eventdev == NULL) { + ssovf_log_err("Failed to create eventdev vdev %s", name); + return -ENOMEM; +diff --git a/dpdk/drivers/event/opdl/opdl_evdev.c b/dpdk/drivers/event/opdl/opdl_evdev.c +index 9ce8b39b60..08ace84bbe 100644 +--- a/dpdk/drivers/event/opdl/opdl_evdev.c ++++ b/dpdk/drivers/event/opdl/opdl_evdev.c +@@ -696,7 +696,7 @@ opdl_probe(struct rte_vdev_device *vdev) + } + } + dev = rte_event_pmd_vdev_init(name, +- sizeof(struct opdl_evdev), socket_id); ++ sizeof(struct opdl_evdev), socket_id, vdev); + + if (dev == NULL) { + PMD_DRV_LOG(ERR, "eventdev vdev init() failed"); +diff --git a/dpdk/drivers/event/skeleton/skeleton_eventdev.c b/dpdk/drivers/event/skeleton/skeleton_eventdev.c +index 8513b9a013..45c13c62c7 100644 +--- a/dpdk/drivers/event/skeleton/skeleton_eventdev.c ++++ b/dpdk/drivers/event/skeleton/skeleton_eventdev.c +@@ -427,12 +427,12 @@ RTE_PMD_REGISTER_PCI_TABLE(event_skeleton_pci, pci_id_skeleton_map); + /* VDEV based event device */ + + static int +-skeleton_eventdev_create(const char *name, int socket_id) ++skeleton_eventdev_create(const char *name, int socket_id, struct rte_vdev_device *vdev) + { + struct rte_eventdev *eventdev; + + eventdev = rte_event_pmd_vdev_init(name, +- sizeof(struct skeleton_eventdev), socket_id); ++ sizeof(struct skeleton_eventdev), socket_id, vdev); + if (eventdev == NULL) { + PMD_DRV_ERR("Failed to create eventdev vdev %s", name); + goto fail; +@@ -458,7 +458,7 @@ skeleton_eventdev_probe(struct rte_vdev_device *vdev) + name = rte_vdev_device_name(vdev); + RTE_LOG(INFO, PMD, "Initializing %s on NUMA node %d\n", name, + rte_socket_id()); +- return skeleton_eventdev_create(name, rte_socket_id()); ++ return skeleton_eventdev_create(name, rte_socket_id(), vdev); + } + + static int +diff --git a/dpdk/drivers/event/sw/sw_evdev.c b/dpdk/drivers/event/sw/sw_evdev.c +index 3531821dd4..2a2763ee9b 100644 +--- a/dpdk/drivers/event/sw/sw_evdev.c ++++ b/dpdk/drivers/event/sw/sw_evdev.c +@@ -1074,7 +1074,7 @@ sw_probe(struct rte_vdev_device *vdev) + min_burst_size, deq_burst_size, refill_once); + + dev = rte_event_pmd_vdev_init(name, +- sizeof(struct sw_evdev), socket_id); ++ sizeof(struct sw_evdev), socket_id, vdev); + if (dev == NULL) { + SW_LOG_ERR("eventdev vdev init() failed"); + return -EFAULT; +diff --git a/dpdk/drivers/event/sw/sw_evdev_scheduler.c b/dpdk/drivers/event/sw/sw_evdev_scheduler.c +index 8bc21944f5..25f97c4ffb 100644 +--- a/dpdk/drivers/event/sw/sw_evdev_scheduler.c ++++ b/dpdk/drivers/event/sw/sw_evdev_scheduler.c +@@ -90,8 +90,10 @@ sw_schedule_atomic_to_cq(struct sw_evdev *sw, struct sw_qid * const qid, + sw->cq_ring_space[cq]--; + + int head = (p->hist_head++ & (SW_PORT_HIST_LIST-1)); +- p->hist_list[head].fid = flow_id; +- p->hist_list[head].qid = qid_id; ++ p->hist_list[head] = (struct sw_hist_list_entry) { ++ .qid = qid_id, ++ .fid = flow_id, ++ }; + + p->stats.tx_pkts++; + qid->stats.tx_pkts++; +@@ -162,8 +164,10 @@ sw_schedule_parallel_to_cq(struct sw_evdev *sw, struct sw_qid * const qid, + qid->stats.tx_pkts++; + + const int head = (p->hist_head & (SW_PORT_HIST_LIST-1)); +- p->hist_list[head].fid = SW_HASH_FLOWID(qe->flow_id); +- p->hist_list[head].qid = qid_id; ++ p->hist_list[head] = (struct sw_hist_list_entry) { ++ .qid = qid_id, ++ .fid = SW_HASH_FLOWID(qe->flow_id), ++ }; + + if (keep_order) + rob_ring_dequeue(qid->reorder_buffer_freelist, +@@ -368,12 +372,6 @@ __pull_port_lb(struct sw_evdev *sw, uint32_t port_id, int allow_reorder) + if (!allow_reorder && !eop) + flags = QE_FLAG_VALID; + +- /* +- * if we don't have space for this packet in an IQ, +- * then move on to next queue. Technically, for a +- * packet that needs reordering, we don't need to check +- * here, but it simplifies things not to special-case +- */ + uint32_t iq_num = PRIO_TO_IQ(qe->priority); + struct sw_qid *qid = &sw->qids[qe->queue_id]; + +@@ -419,7 +417,6 @@ __pull_port_lb(struct sw_evdev *sw, uint32_t port_id, int allow_reorder) + struct reorder_buffer_entry *rob_entry = + hist_entry->rob_entry; + +- hist_entry->rob_entry = NULL; + /* Although fragmentation not currently + * supported by eventdev API, we support it + * here. Open: How do we alert the user that +diff --git a/dpdk/drivers/gpu/cuda/gdrcopy.c b/dpdk/drivers/gpu/cuda/gdrcopy.c +index 322a5dbeb2..bd56b73ce4 100644 +--- a/dpdk/drivers/gpu/cuda/gdrcopy.c ++++ b/dpdk/drivers/gpu/cuda/gdrcopy.c +@@ -6,6 +6,8 @@ + + #ifdef DRIVERS_GPU_CUDA_GDRCOPY_H + ++#include ++ + static void *gdrclib; + static gdr_t (*sym_gdr_open)(void); + static int (*sym_gdr_pin_buffer)(gdr_t g, unsigned long addr, size_t size, diff --git a/dpdk/drivers/mempool/cnxk/cn10k_mempool_ops.c b/dpdk/drivers/mempool/cnxk/cn10k_mempool_ops.c -index ba826f0f01..ff0015d8de 100644 +index ba826f0f01..9d6982fdab 100644 --- a/dpdk/drivers/mempool/cnxk/cn10k_mempool_ops.c +++ b/dpdk/drivers/mempool/cnxk/cn10k_mempool_ops.c @@ -9,6 +9,7 @@ @@ -17343,7 +24191,20 @@ index ba826f0f01..ff0015d8de 100644 enum batch_op_status { BATCH_ALLOC_OP_NOT_ISSUED = 0, -@@ -178,7 +179,7 @@ cn10k_mempool_get_count(const struct rte_mempool *mp) +@@ -150,6 +151,12 @@ cn10k_mempool_enq(struct rte_mempool *mp, void *const *obj_table, + */ + rte_io_wmb(); + ++ /* For non-EAL threads, rte_lcore_id() will not be valid. Hence ++ * fallback to bulk alloc ++ */ ++ if (unlikely(rte_lcore_id() == LCORE_ID_ANY)) ++ return cnxk_mempool_enq(mp, obj_table, n); ++ + if (n == 1) { + roc_npa_aura_op_free(mp->pool_id, 1, ptr[0]); + return 0; +@@ -178,7 +185,7 @@ cn10k_mempool_get_count(const struct rte_mempool *mp) if (mem->status == BATCH_ALLOC_OP_ISSUED) count += roc_npa_aura_batch_alloc_count( @@ -17352,8 +24213,417 @@ index ba826f0f01..ff0015d8de 100644 if (mem->status == BATCH_ALLOC_OP_DONE) count += mem->sz; +@@ -250,6 +257,12 @@ cn10k_mempool_deq(struct rte_mempool *mp, void **obj_table, unsigned int n) + } + } + ++ /* For non-EAL threads, rte_lcore_id() will not be valid. Hence ++ * fallback to bulk alloc ++ */ ++ if (unlikely(rte_lcore_id() == LCORE_ID_ANY)) ++ return cnxk_mempool_deq(mp, obj_table, n); ++ + if (unlikely(count != n)) { + /* No partial alloc allowed. Free up allocated pointers */ + cn10k_mempool_enq(mp, obj_table, count); +diff --git a/dpdk/drivers/net/af_packet/rte_eth_af_packet.c b/dpdk/drivers/net/af_packet/rte_eth_af_packet.c +index c13a0942aa..397a32db58 100644 +--- a/dpdk/drivers/net/af_packet/rte_eth_af_packet.c ++++ b/dpdk/drivers/net/af_packet/rte_eth_af_packet.c +@@ -313,7 +313,14 @@ eth_af_packet_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) + static int + eth_dev_start(struct rte_eth_dev *dev) + { ++ struct pmd_internals *internals = dev->data->dev_private; ++ uint16_t i; ++ + dev->data->dev_link.link_status = RTE_ETH_LINK_UP; ++ for (i = 0; i < internals->nb_queues; i++) { ++ dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; ++ dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; ++ } + return 0; + } + +@@ -341,6 +348,8 @@ eth_dev_stop(struct rte_eth_dev *dev) + + internals->rx_queue[i].sockfd = -1; + internals->tx_queue[i].sockfd = -1; ++ dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; ++ dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; + } + + dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN; +diff --git a/dpdk/drivers/net/af_xdp/rte_eth_af_xdp.c b/dpdk/drivers/net/af_xdp/rte_eth_af_xdp.c +index b6ec9bf490..738f4158e0 100644 +--- a/dpdk/drivers/net/af_xdp/rte_eth_af_xdp.c ++++ b/dpdk/drivers/net/af_xdp/rte_eth_af_xdp.c +@@ -672,7 +672,13 @@ eth_af_xdp_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) + static int + eth_dev_start(struct rte_eth_dev *dev) + { ++ uint16_t i; ++ + dev->data->dev_link.link_status = RTE_ETH_LINK_UP; ++ for (i = 0; i < dev->data->nb_rx_queues; i++) { ++ dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; ++ dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; ++ } + + return 0; + } +@@ -681,7 +687,14 @@ eth_dev_start(struct rte_eth_dev *dev) + static int + eth_dev_stop(struct rte_eth_dev *dev) + { ++ uint16_t i; ++ + dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN; ++ for (i = 0; i < dev->data->nb_rx_queues; i++) { ++ dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; ++ dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; ++ } ++ + return 0; + } + +diff --git a/dpdk/drivers/net/ark/ark_ethdev.c b/dpdk/drivers/net/ark/ark_ethdev.c +index c654a229f7..c1681e8ecd 100644 +--- a/dpdk/drivers/net/ark/ark_ethdev.c ++++ b/dpdk/drivers/net/ark/ark_ethdev.c +@@ -299,6 +299,7 @@ eth_ark_dev_init(struct rte_eth_dev *dev) + int ret; + int port_count = 1; + int p; ++ uint16_t num_queues; + bool rqpacing = false; + + ark->eth_dev = dev; +@@ -426,6 +427,7 @@ eth_ark_dev_init(struct rte_eth_dev *dev) + ark->user_ext.dev_get_port_count(dev, + ark->user_data[dev->data->port_id]); + ark->num_ports = port_count; ++ num_queues = ark_api_num_queues_per_port(ark->mpurx.v, port_count); + + for (p = 0; p < port_count; p++) { + struct rte_eth_dev *eth_dev; +@@ -451,7 +453,18 @@ eth_ark_dev_init(struct rte_eth_dev *dev) + } + + eth_dev->device = &pci_dev->device; +- eth_dev->data->dev_private = ark; ++ /* Device requires new dev_private data */ ++ eth_dev->data->dev_private = ++ rte_zmalloc_socket(name, ++ sizeof(struct ark_adapter), ++ RTE_CACHE_LINE_SIZE, ++ rte_socket_id()); ++ ++ memcpy(eth_dev->data->dev_private, ark, ++ sizeof(struct ark_adapter)); ++ ark = eth_dev->data->dev_private; ++ ark->qbase = p * num_queues; ++ + eth_dev->dev_ops = ark->eth_dev->dev_ops; + eth_dev->tx_pkt_burst = ark->eth_dev->tx_pkt_burst; + eth_dev->rx_pkt_burst = ark->eth_dev->rx_pkt_burst; +diff --git a/dpdk/drivers/net/ark/ark_ethdev_rx.c b/dpdk/drivers/net/ark/ark_ethdev_rx.c +index cbc0416bc2..38bc69dff4 100644 +--- a/dpdk/drivers/net/ark/ark_ethdev_rx.c ++++ b/dpdk/drivers/net/ark/ark_ethdev_rx.c +@@ -68,7 +68,7 @@ struct ark_rx_queue { + static int + eth_ark_rx_hw_setup(struct rte_eth_dev *dev, + struct ark_rx_queue *queue, +- uint16_t rx_queue_id __rte_unused, uint16_t rx_queue_idx) ++ uint16_t rx_queue_idx) + { + rte_iova_t queue_base; + rte_iova_t phys_addr_q_base; +@@ -124,7 +124,7 @@ eth_ark_dev_rx_queue_setup(struct rte_eth_dev *dev, + uint32_t i; + int status; + +- int qidx = queue_idx; ++ int qidx = ark->qbase + queue_idx; + + /* We may already be setup, free memory prior to re-allocation */ + if (dev->data->rx_queues[queue_idx] != NULL) { +@@ -215,7 +215,7 @@ eth_ark_dev_rx_queue_setup(struct rte_eth_dev *dev, + } + /* MPU Setup */ + if (status == 0) +- status = eth_ark_rx_hw_setup(dev, queue, qidx, queue_idx); ++ status = eth_ark_rx_hw_setup(dev, queue, queue_idx); + + if (unlikely(status != 0)) { + struct rte_mbuf **mbuf; +diff --git a/dpdk/drivers/net/ark/ark_ethdev_tx.c b/dpdk/drivers/net/ark/ark_ethdev_tx.c +index 5940a592a2..4792754f19 100644 +--- a/dpdk/drivers/net/ark/ark_ethdev_tx.c ++++ b/dpdk/drivers/net/ark/ark_ethdev_tx.c +@@ -229,7 +229,7 @@ eth_ark_tx_queue_setup(struct rte_eth_dev *dev, + struct ark_tx_queue *queue; + int status; + +- int qidx = queue_idx; ++ int qidx = ark->qbase + queue_idx; + + if (!rte_is_power_of_2(nb_desc)) { + ARK_PMD_LOG(ERR, +diff --git a/dpdk/drivers/net/ark/ark_global.h b/dpdk/drivers/net/ark/ark_global.h +index 71d0b53e03..2f198edfe4 100644 +--- a/dpdk/drivers/net/ark/ark_global.h ++++ b/dpdk/drivers/net/ark/ark_global.h +@@ -112,7 +112,10 @@ struct ark_adapter { + ark_pkt_chkr_t pc; + ark_pkt_dir_t pd; + ++ /* For single function, multiple ports */ + int num_ports; ++ uint16_t qbase; ++ + bool isvf; + + /* Packet generator/checker args */ +diff --git a/dpdk/drivers/net/avp/avp_ethdev.c b/dpdk/drivers/net/avp/avp_ethdev.c +index b2a08f5635..53d9e38c93 100644 +--- a/dpdk/drivers/net/avp/avp_ethdev.c ++++ b/dpdk/drivers/net/avp/avp_ethdev.c +@@ -2036,6 +2036,7 @@ static int + avp_dev_start(struct rte_eth_dev *eth_dev) + { + struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); ++ uint16_t i; + int ret; + + rte_spinlock_lock(&avp->lock); +@@ -2056,6 +2057,11 @@ avp_dev_start(struct rte_eth_dev *eth_dev) + /* remember current link state */ + avp->flags |= AVP_F_LINKUP; + ++ for (i = 0; i < avp->num_rx_queues; i++) ++ eth_dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; ++ for (i = 0; i < avp->num_tx_queues; i++) ++ eth_dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; ++ + ret = 0; + + unlock: +@@ -2067,6 +2073,7 @@ static int + avp_dev_stop(struct rte_eth_dev *eth_dev) + { + struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); ++ uint16_t i; + int ret; + + rte_spinlock_lock(&avp->lock); +@@ -2086,6 +2093,11 @@ avp_dev_stop(struct rte_eth_dev *eth_dev) + ret); + } + ++ for (i = 0; i < avp->num_rx_queues; i++) ++ eth_dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; ++ for (i = 0; i < avp->num_tx_queues; i++) ++ eth_dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; ++ + unlock: + rte_spinlock_unlock(&avp->lock); + return ret; +diff --git a/dpdk/drivers/net/axgbe/axgbe_ethdev.c b/dpdk/drivers/net/axgbe/axgbe_ethdev.c +index b071e4e460..da48a3ac42 100644 +--- a/dpdk/drivers/net/axgbe/axgbe_ethdev.c ++++ b/dpdk/drivers/net/axgbe/axgbe_ethdev.c +@@ -12,6 +12,12 @@ + + #include "eal_filesystem.h" + ++#ifdef RTE_ARCH_X86 ++#include ++#else ++#define __cpuid(n, a, b, c, d) ++#endif ++ + static int eth_axgbe_dev_init(struct rte_eth_dev *eth_dev); + static int axgbe_dev_configure(struct rte_eth_dev *dev); + static int axgbe_dev_start(struct rte_eth_dev *dev); +@@ -172,9 +178,14 @@ static const struct axgbe_xstats axgbe_xstats_strings[] = { + + /* The set of PCI devices this driver supports */ + #define AMD_PCI_VENDOR_ID 0x1022 +-#define AMD_PCI_RV_ROOT_COMPLEX_ID 0x15d0 +-#define AMD_PCI_YC_ROOT_COMPLEX_ID 0x14b5 +-#define AMD_PCI_SNOWY_ROOT_COMPLEX_ID 0x1450 ++ ++#define Fam17h 0x17 ++#define Fam19h 0x19 ++ ++#define CPUID_VENDOR_AuthenticAMD_ebx 0x68747541 ++#define CPUID_VENDOR_AuthenticAMD_ecx 0x444d4163 ++#define CPUID_VENDOR_AuthenticAMD_edx 0x69746e65 ++ + #define AMD_PCI_AXGBE_DEVICE_V2A 0x1458 + #define AMD_PCI_AXGBE_DEVICE_V2B 0x1459 + +@@ -2122,29 +2133,6 @@ static void axgbe_default_config(struct axgbe_port *pdata) + pdata->power_down = 0; + } + +-/* +- * Return PCI root complex device id on success else 0 +- */ +-static uint16_t +-get_pci_rc_devid(void) +-{ +- char pci_sysfs[PATH_MAX]; +- const struct rte_pci_addr pci_rc_addr = {0, 0, 0, 0}; +- unsigned long device_id; +- +- snprintf(pci_sysfs, sizeof(pci_sysfs), "%s/" PCI_PRI_FMT "/device", +- rte_pci_get_sysfs_path(), pci_rc_addr.domain, +- pci_rc_addr.bus, pci_rc_addr.devid, pci_rc_addr.function); +- +- /* get device id */ +- if (eal_parse_sysfs_value(pci_sysfs, &device_id) < 0) { +- PMD_INIT_LOG(ERR, "Error in reading PCI sysfs\n"); +- return 0; +- } +- +- return (uint16_t)device_id; +-} +- + /* + * It returns 0 on success. + */ +@@ -2158,6 +2146,9 @@ eth_axgbe_dev_init(struct rte_eth_dev *eth_dev) + uint32_t len; + int ret; + ++ unsigned int eax = 0, ebx = 0, ecx = 0, edx = 0; ++ unsigned char cpu_family = 0, cpu_model = 0; ++ + eth_dev->dev_ops = &axgbe_eth_dev_ops; + + eth_dev->rx_descriptor_status = axgbe_dev_rx_descriptor_status; +@@ -2196,26 +2187,55 @@ eth_axgbe_dev_init(struct rte_eth_dev *eth_dev) + pdata->vdata = &axgbe_v2b; + + /* +- * Use PCI root complex device ID to identify the CPU ++ * Use CPUID to get Family and model ID to identify the CPU + */ +- switch (get_pci_rc_devid()) { +- case AMD_PCI_RV_ROOT_COMPLEX_ID: +- pdata->xpcs_window_def_reg = PCS_V2_RV_WINDOW_DEF; +- pdata->xpcs_window_sel_reg = PCS_V2_RV_WINDOW_SELECT; +- break; +- case AMD_PCI_YC_ROOT_COMPLEX_ID: +- pdata->xpcs_window_def_reg = PCS_V2_YC_WINDOW_DEF; +- pdata->xpcs_window_sel_reg = PCS_V2_YC_WINDOW_SELECT; +- /* Yellow Carp devices do not need cdr workaround */ +- pdata->vdata->an_cdr_workaround = 0; ++ __cpuid(0x0, eax, ebx, ecx, edx); ++ ++ if (ebx == CPUID_VENDOR_AuthenticAMD_ebx && ++ edx == CPUID_VENDOR_AuthenticAMD_edx && ++ ecx == CPUID_VENDOR_AuthenticAMD_ecx) { ++ int unknown_cpu = 0; ++ eax = 0, ebx = 0, ecx = 0, edx = 0; ++ ++ __cpuid(0x1, eax, ebx, ecx, edx); ++ ++ cpu_family = ((GET_BITS(eax, 8, 4)) + (GET_BITS(eax, 20, 8))); ++ cpu_model = ((GET_BITS(eax, 4, 4)) | (((GET_BITS(eax, 16, 4)) << 4) & 0xF0)); ++ ++ switch (cpu_family) { ++ case Fam17h: ++ /* V1000/R1000 */ ++ if (cpu_model >= 0x10 && cpu_model <= 0x1F) { ++ pdata->xpcs_window_def_reg = PCS_V2_RV_WINDOW_DEF; ++ pdata->xpcs_window_sel_reg = PCS_V2_RV_WINDOW_SELECT; ++ /* EPYC 3000 */ ++ } else if (cpu_model >= 0x01 && cpu_model <= 0x0F) { ++ pdata->xpcs_window_def_reg = PCS_V2_WINDOW_DEF; ++ pdata->xpcs_window_sel_reg = PCS_V2_WINDOW_SELECT; ++ } else { ++ unknown_cpu = 1; ++ } + break; +- case AMD_PCI_SNOWY_ROOT_COMPLEX_ID: +- pdata->xpcs_window_def_reg = PCS_V2_WINDOW_DEF; +- pdata->xpcs_window_sel_reg = PCS_V2_WINDOW_SELECT; ++ case Fam19h: ++ /* V3000 (Yellow Carp) */ ++ if (cpu_model >= 0x44 && cpu_model <= 0x47) { ++ pdata->xpcs_window_def_reg = PCS_V2_YC_WINDOW_DEF; ++ pdata->xpcs_window_sel_reg = PCS_V2_YC_WINDOW_SELECT; ++ ++ /* Yellow Carp devices do not need cdr workaround */ ++ pdata->vdata->an_cdr_workaround = 0; ++ } else { ++ unknown_cpu = 1; ++ } + break; +- default: +- PMD_DRV_LOG(ERR, "No supported devices found\n"); +- return -ENODEV; ++ default: ++ unknown_cpu = 1; ++ break; ++ } ++ if (unknown_cpu) { ++ PMD_DRV_LOG(ERR, "Unknown CPU family, no supported axgbe device found\n"); ++ return -ENODEV; ++ } + } + + /* Configure the PCS indirect addressing support */ +diff --git a/dpdk/drivers/net/bnx2x/bnx2x_ethdev.c b/dpdk/drivers/net/bnx2x/bnx2x_ethdev.c +index 4448cf2de2..1327cbe912 100644 +--- a/dpdk/drivers/net/bnx2x/bnx2x_ethdev.c ++++ b/dpdk/drivers/net/bnx2x/bnx2x_ethdev.c +@@ -211,6 +211,7 @@ bnx2x_dev_start(struct rte_eth_dev *dev) + { + struct bnx2x_softc *sc = dev->data->dev_private; + int ret = 0; ++ uint16_t i; + + PMD_INIT_FUNC_TRACE(sc); + +@@ -244,6 +245,11 @@ bnx2x_dev_start(struct rte_eth_dev *dev) + + bnx2x_print_device_info(sc); + ++ for (i = 0; i < dev->data->nb_tx_queues; i++) ++ dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; ++ for (i = 0; i < dev->data->nb_rx_queues; i++) ++ dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; ++ + return ret; + } + +@@ -252,6 +258,7 @@ bnx2x_dev_stop(struct rte_eth_dev *dev) + { + struct bnx2x_softc *sc = dev->data->dev_private; + int ret = 0; ++ uint16_t i; + + PMD_INIT_FUNC_TRACE(sc); + +@@ -277,6 +284,11 @@ bnx2x_dev_stop(struct rte_eth_dev *dev) + return ret; + } + ++ for (i = 0; i < dev->data->nb_tx_queues; i++) ++ dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; ++ for (i = 0; i < dev->data->nb_rx_queues; i++) ++ dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; ++ + return 0; + } + diff --git a/dpdk/drivers/net/bnxt/bnxt_ethdev.c b/dpdk/drivers/net/bnxt/bnxt_ethdev.c -index b3de490d36..753e86b4b2 100644 +index b3de490d36..e3ba48ac0b 100644 --- a/dpdk/drivers/net/bnxt/bnxt_ethdev.c +++ b/dpdk/drivers/net/bnxt/bnxt_ethdev.c @@ -1017,7 +1017,6 @@ static int bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev, @@ -17364,7 +24634,27 @@ index b3de490d36..753e86b4b2 100644 dev_info->rx_desc_lim.nb_min = BNXT_MIN_RING_DESC; dev_info->rx_desc_lim.nb_max = BNXT_MAX_RX_RING_DESC; -@@ -5859,6 +5858,7 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev, void *params __rte_unused) +@@ -1483,6 +1482,7 @@ static int bnxt_dev_stop(struct rte_eth_dev *eth_dev) + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); + struct rte_intr_handle *intr_handle = pci_dev->intr_handle; + struct rte_eth_link link; ++ uint16_t i; + int ret; + + eth_dev->data->dev_started = 0; +@@ -1543,6 +1543,11 @@ static int bnxt_dev_stop(struct rte_eth_dev *eth_dev) + + eth_dev->data->scattered_rx = 0; + ++ for (i = 0; i < eth_dev->data->nb_rx_queues; i++) ++ eth_dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; ++ for (i = 0; i < eth_dev->data->nb_tx_queues; i++) ++ eth_dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; ++ + return 0; + } + +@@ -5859,6 +5864,7 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev, void *params __rte_unused) rte_eth_copy_pci_info(eth_dev, pci_dev); eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS; @@ -17418,6 +24708,50 @@ index 67e016775c..21c2217092 100644 bnxt_free_hwrm_tx_ring(bp, tx_queue_id); rc = bnxt_alloc_hwrm_tx_ring(bp, tx_queue_id); if (rc) +diff --git a/dpdk/drivers/net/bonding/rte_eth_bond_8023ad.c b/dpdk/drivers/net/bonding/rte_eth_bond_8023ad.c +index 4a266bb2ca..928dfca7af 100644 +--- a/dpdk/drivers/net/bonding/rte_eth_bond_8023ad.c ++++ b/dpdk/drivers/net/bonding/rte_eth_bond_8023ad.c +@@ -654,12 +654,9 @@ tx_machine(struct bond_dev_private *internals, uint16_t slave_id) + } + + static uint16_t +-max_index(uint64_t *a, int n) ++max_index(uint64_t *a, uint16_t n) + { +- if (n <= 0) +- return -1; +- +- int i, max_i = 0; ++ uint16_t i, max_i = 0; + uint64_t max = a[0]; + + for (i = 1; i < n; ++i) { +diff --git a/dpdk/drivers/net/bonding/rte_eth_bond_8023ad.h b/dpdk/drivers/net/bonding/rte_eth_bond_8023ad.h +index 7eb392f8c8..025bd0ec54 100644 +--- a/dpdk/drivers/net/bonding/rte_eth_bond_8023ad.h ++++ b/dpdk/drivers/net/bonding/rte_eth_bond_8023ad.h +@@ -197,10 +197,6 @@ int + rte_eth_bond_8023ad_slave_info(uint16_t port_id, uint16_t slave_id, + struct rte_eth_bond_8023ad_slave_info *conf); + +-#ifdef __cplusplus +-} +-#endif +- + /** + * Configure a slave port to start collecting. + * +@@ -331,4 +327,9 @@ rte_eth_bond_8023ad_agg_selection_get(uint16_t port_id); + int + rte_eth_bond_8023ad_agg_selection_set(uint16_t port_id, + enum rte_bond_8023ad_agg_selection agg_selection); ++ ++#ifdef __cplusplus ++} ++#endif ++ + #endif /* RTE_ETH_BOND_8023AD_H_ */ diff --git a/dpdk/drivers/net/bonding/rte_eth_bond_api.c b/dpdk/drivers/net/bonding/rte_eth_bond_api.c index c0178369b4..85d0528b7c 100644 --- a/dpdk/drivers/net/bonding/rte_eth_bond_api.c @@ -17457,10 +24791,45 @@ index 6553166f5c..c137efd55f 100644 if (socket_id >= 0 && socket_id < RTE_MAX_NUMA_NODES) { *(int *)extra_args = (int)socket_id; diff --git a/dpdk/drivers/net/bonding/rte_eth_bond_pmd.c b/dpdk/drivers/net/bonding/rte_eth_bond_pmd.c -index b9bcebc6cb..8cd78ce1ed 100644 +index b9bcebc6cb..8df632fa6e 100644 --- a/dpdk/drivers/net/bonding/rte_eth_bond_pmd.c +++ b/dpdk/drivers/net/bonding/rte_eth_bond_pmd.c -@@ -3362,7 +3362,7 @@ static int +@@ -2089,6 +2089,11 @@ bond_ethdev_start(struct rte_eth_dev *eth_dev) + internals->mode == BONDING_MODE_ALB) + bond_tlb_enable(internals); + ++ for (i = 0; i < eth_dev->data->nb_rx_queues; i++) ++ eth_dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; ++ for (i = 0; i < eth_dev->data->nb_tx_queues; i++) ++ eth_dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; ++ + return 0; + + out_err: +@@ -2155,6 +2160,10 @@ bond_ethdev_stop(struct rte_eth_dev *eth_dev) + eth_dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN; + eth_dev->data->dev_started = 0; + ++ if (internals->link_status_polling_enabled) { ++ rte_eal_alarm_cancel(bond_ethdev_slave_link_status_change_monitor, ++ (void *)&rte_eth_devices[internals->port_id]); ++ } + internals->link_status_polling_enabled = 0; + for (i = 0; i < internals->slave_count; i++) { + uint16_t slave_id = internals->slaves[i].port_id; +@@ -2174,6 +2183,11 @@ bond_ethdev_stop(struct rte_eth_dev *eth_dev) + deactivate_slave(eth_dev, slave_id); + } + ++ for (i = 0; i < eth_dev->data->nb_rx_queues; i++) ++ eth_dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; ++ for (i = 0; i < eth_dev->data->nb_tx_queues; i++) ++ eth_dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; ++ + return 0; + } + +@@ -3362,7 +3376,7 @@ static int bond_alloc(struct rte_vdev_device *dev, uint8_t mode) { const char *name = rte_vdev_device_name(dev); @@ -17469,7 +24838,7 @@ index b9bcebc6cb..8cd78ce1ed 100644 struct bond_dev_private *internals = NULL; struct rte_eth_dev *eth_dev = NULL; uint32_t vlan_filter_bmp_size; -@@ -3564,7 +3564,7 @@ bond_probe(struct rte_vdev_device *dev) +@@ -3564,7 +3578,7 @@ bond_probe(struct rte_vdev_device *dev) port_id = bond_alloc(dev, bonding_mode); if (port_id < 0) { RTE_BOND_LOG(ERR, "Failed to create socket %s in mode %u on " @@ -17478,7 +24847,7 @@ index b9bcebc6cb..8cd78ce1ed 100644 goto parse_error; } internals = rte_eth_devices[port_id].data->dev_private; -@@ -3589,7 +3589,7 @@ bond_probe(struct rte_vdev_device *dev) +@@ -3589,7 +3603,7 @@ bond_probe(struct rte_vdev_device *dev) rte_eth_dev_probing_finish(&rte_eth_devices[port_id]); RTE_BOND_LOG(INFO, "Create bonded device %s on port %d in mode %u on " @@ -17488,9 +24857,29 @@ index b9bcebc6cb..8cd78ce1ed 100644 parse_error: diff --git a/dpdk/drivers/net/cnxk/cn10k_rx.h b/dpdk/drivers/net/cnxk/cn10k_rx.h -index 721127dddd..20384e64c7 100644 +index 721127dddd..b60c158d55 100644 --- a/dpdk/drivers/net/cnxk/cn10k_rx.h +++ b/dpdk/drivers/net/cnxk/cn10k_rx.h +@@ -888,7 +888,7 @@ cn10k_nix_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t pkts, + struct nix_cqe_hdr_s *cq; + struct rte_mbuf *mbuf; + uint64_t aura_handle; +- uint64_t sa_base; ++ uint64_t sa_base = 0; + uint16_t lmt_id; + uint64_t laddr; + +@@ -1025,9 +1025,9 @@ cn10k_nix_recv_pkts_vector(void *args, struct rte_mbuf **mbufs, uint16_t pkts, + uint8x16_t f0, f1, f2, f3; + uint16_t lmt_id, d_off; + uint64_t lbase, laddr; ++ uintptr_t sa_base = 0; + uint16_t packets = 0; + uint16_t pkts_left; +- uintptr_t sa_base; + uint32_t head; + uintptr_t cq0; + @@ -1216,6 +1216,12 @@ cn10k_nix_recv_pkts_vector(void *args, struct rte_mbuf **mbufs, uint16_t pkts, mbuf2 = (struct rte_mbuf *)vgetq_lane_u64(mbuf23, 0); mbuf3 = (struct rte_mbuf *)vgetq_lane_u64(mbuf23, 1); @@ -17518,7 +24907,7 @@ index 721127dddd..20384e64c7 100644 nix_mbuf_validate_next(mbuf1); nix_mbuf_validate_next(mbuf2); diff --git a/dpdk/drivers/net/cnxk/cn10k_tx.h b/dpdk/drivers/net/cnxk/cn10k_tx.h -index 815cd2ff1f..63cf6821d2 100644 +index 815cd2ff1f..84b5faa137 100644 --- a/dpdk/drivers/net/cnxk/cn10k_tx.h +++ b/dpdk/drivers/net/cnxk/cn10k_tx.h @@ -1696,10 +1696,12 @@ cn10k_nix_xmit_store(struct rte_mbuf *mbuf, uint8_t segdw, uintptr_t laddr, @@ -17534,6 +24923,23 @@ index 815cd2ff1f..63cf6821d2 100644 } } +@@ -1907,13 +1909,13 @@ again: + vsetq_lane_u64(((struct rte_mbuf *)mbuf0)->data_off, vld1q_u64(mbuf0), 1); + len_olflags0 = vld1q_u64(mbuf0 + 3); + dataoff_iova1 = +- vsetq_lane_u64(((struct rte_mbuf *)mbuf0)->data_off, vld1q_u64(mbuf1), 1); ++ vsetq_lane_u64(((struct rte_mbuf *)mbuf1)->data_off, vld1q_u64(mbuf1), 1); + len_olflags1 = vld1q_u64(mbuf1 + 3); + dataoff_iova2 = +- vsetq_lane_u64(((struct rte_mbuf *)mbuf0)->data_off, vld1q_u64(mbuf2), 1); ++ vsetq_lane_u64(((struct rte_mbuf *)mbuf2)->data_off, vld1q_u64(mbuf2), 1); + len_olflags2 = vld1q_u64(mbuf2 + 3); + dataoff_iova3 = +- vsetq_lane_u64(((struct rte_mbuf *)mbuf0)->data_off, vld1q_u64(mbuf3), 1); ++ vsetq_lane_u64(((struct rte_mbuf *)mbuf3)->data_off, vld1q_u64(mbuf3), 1); + len_olflags3 = vld1q_u64(mbuf3 + 3); + + /* Move mbufs to point pool */ diff --git a/dpdk/drivers/net/cnxk/cn9k_ethdev_sec.c b/dpdk/drivers/net/cnxk/cn9k_ethdev_sec.c index 67966a4e49..327f221e38 100644 --- a/dpdk/drivers/net/cnxk/cn9k_ethdev_sec.c @@ -17837,6 +25243,109 @@ index 6d155d924c..422c5d74df 100644 in_actions[i].type = ROC_NPC_ACTION_TYPE_END; return 0; +diff --git a/dpdk/drivers/net/cxgbe/cxgbe_ethdev.c b/dpdk/drivers/net/cxgbe/cxgbe_ethdev.c +index 45bbeaef0c..8cc3d9f257 100644 +--- a/dpdk/drivers/net/cxgbe/cxgbe_ethdev.c ++++ b/dpdk/drivers/net/cxgbe/cxgbe_ethdev.c +@@ -414,6 +414,7 @@ int cxgbe_dev_stop(struct rte_eth_dev *eth_dev) + { + struct port_info *pi = eth_dev->data->dev_private; + struct adapter *adapter = pi->adapter; ++ uint16_t i; + + CXGBE_FUNC_TRACE(); + +@@ -429,6 +430,11 @@ int cxgbe_dev_stop(struct rte_eth_dev *eth_dev) + t4_sge_eth_clear_queues(pi); + eth_dev->data->scattered_rx = 0; + ++ for (i = 0; i < eth_dev->data->nb_rx_queues; i++) ++ eth_dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; ++ for (i = 0; i < eth_dev->data->nb_tx_queues; i++) ++ eth_dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; ++ + return 0; + } + +diff --git a/dpdk/drivers/net/dpaa/dpaa_ethdev.c b/dpdk/drivers/net/dpaa/dpaa_ethdev.c +index a6c86113d1..ef4c06db6a 100644 +--- a/dpdk/drivers/net/dpaa/dpaa_ethdev.c ++++ b/dpdk/drivers/net/dpaa/dpaa_ethdev.c +@@ -399,6 +399,7 @@ static void dpaa_interrupt_handler(void *param) + static int dpaa_eth_dev_start(struct rte_eth_dev *dev) + { + struct dpaa_if *dpaa_intf = dev->data->dev_private; ++ uint16_t i; + + PMD_INIT_FUNC_TRACE(); + +@@ -413,12 +414,18 @@ static int dpaa_eth_dev_start(struct rte_eth_dev *dev) + + fman_if_enable_rx(dev->process_private); + ++ for (i = 0; i < dev->data->nb_rx_queues; i++) ++ dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; ++ for (i = 0; i < dev->data->nb_tx_queues; i++) ++ dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; ++ + return 0; + } + + static int dpaa_eth_dev_stop(struct rte_eth_dev *dev) + { + struct fman_if *fif = dev->process_private; ++ uint16_t i; + + PMD_INIT_FUNC_TRACE(); + dev->data->dev_started = 0; +@@ -427,6 +434,11 @@ static int dpaa_eth_dev_stop(struct rte_eth_dev *dev) + fman_if_disable_rx(fif); + dev->tx_pkt_burst = dpaa_eth_tx_drop_all; + ++ for (i = 0; i < dev->data->nb_rx_queues; i++) ++ dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; ++ for (i = 0; i < dev->data->nb_tx_queues; i++) ++ dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; ++ + return 0; + } + +diff --git a/dpdk/drivers/net/dpaa2/dpaa2_ethdev.c b/dpdk/drivers/net/dpaa2/dpaa2_ethdev.c +index 679f33ae1a..8e610b6bba 100644 +--- a/dpdk/drivers/net/dpaa2/dpaa2_ethdev.c ++++ b/dpdk/drivers/net/dpaa2/dpaa2_ethdev.c +@@ -1278,6 +1278,11 @@ dpaa2_dev_start(struct rte_eth_dev *dev) + if (priv->en_ordered) + dev->tx_pkt_burst = dpaa2_dev_tx_ordered; + ++ for (i = 0; i < dev->data->nb_rx_queues; i++) ++ dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; ++ for (i = 0; i < dev->data->nb_tx_queues; i++) ++ dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; ++ + return 0; + } + +@@ -1295,6 +1300,7 @@ dpaa2_dev_stop(struct rte_eth_dev *dev) + struct rte_device *rdev = dev->device; + struct rte_intr_handle *intr_handle; + struct rte_dpaa2_device *dpaa2_dev; ++ uint16_t i; + + dpaa2_dev = container_of(rdev, struct rte_dpaa2_device, device); + intr_handle = dpaa2_dev->intr_handle; +@@ -1329,6 +1335,11 @@ dpaa2_dev_stop(struct rte_eth_dev *dev) + memset(&link, 0, sizeof(link)); + rte_eth_linkstatus_set(dev, &link); + ++ for (i = 0; i < dev->data->nb_rx_queues; i++) ++ dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; ++ for (i = 0; i < dev->data->nb_tx_queues; i++) ++ dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; ++ + return 0; + } + diff --git a/dpdk/drivers/net/dpaa2/dpaa2_rxtx.c b/dpdk/drivers/net/dpaa2/dpaa2_rxtx.c index f60e78e1fd..85910bbd8f 100644 --- a/dpdk/drivers/net/dpaa2/dpaa2_rxtx.c @@ -17885,7 +25394,7 @@ index 8ee9be12ad..18efa78ac3 100644 dev_info->rx_queue_offload_capa = em_get_rx_queue_offloads_capa(); dev_info->rx_offload_capa = em_get_rx_port_offloads_capa() | diff --git a/dpdk/drivers/net/e1000/em_rxtx.c b/dpdk/drivers/net/e1000/em_rxtx.c -index d48fd52404..cb5ce2307b 100644 +index d48fd52404..df5fbb7823 100644 --- a/dpdk/drivers/net/e1000/em_rxtx.c +++ b/dpdk/drivers/net/e1000/em_rxtx.c @@ -1030,6 +1030,7 @@ eth_em_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, @@ -17905,8 +25414,44 @@ index d48fd52404..cb5ce2307b 100644 /* Prefetch data of first segment, if configured to do so. */ rte_packet_prefetch((char *)first_seg->buf_addr + +@@ -1575,6 +1576,8 @@ em_dev_clear_queues(struct rte_eth_dev *dev) + em_tx_queue_release_mbufs(txq); + em_reset_tx_queue(txq); + } ++ ++ dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; + } + + for (i = 0; i < dev->data->nb_rx_queues; i++) { +@@ -1583,6 +1586,8 @@ em_dev_clear_queues(struct rte_eth_dev *dev) + em_rx_queue_release_mbufs(rxq); + em_reset_rx_queue(rxq); + } ++ ++ dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; + } + } + +@@ -1811,6 +1816,8 @@ eth_em_rx_init(struct rte_eth_dev *dev) + rxdctl |= E1000_RXDCTL_GRAN; + E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl); + ++ dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; ++ + /* + * Due to EM devices not having any sort of hardware + * limit for packet length, jumbo frame of any size +@@ -1945,6 +1952,8 @@ eth_em_tx_init(struct rte_eth_dev *dev) + txdctl |= (txq->wthresh & 0x3F) << 16; + txdctl |= E1000_TXDCTL_GRAN; + E1000_WRITE_REG(hw, E1000_TXDCTL(i), txdctl); ++ ++ dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; + } + + /* Program the Transmit Control Register. */ diff --git a/dpdk/drivers/net/e1000/igb_rxtx.c b/dpdk/drivers/net/e1000/igb_rxtx.c -index f32dee46df..1d23e081b6 100644 +index f32dee46df..6027cfbfb1 100644 --- a/dpdk/drivers/net/e1000/igb_rxtx.c +++ b/dpdk/drivers/net/e1000/igb_rxtx.c @@ -1853,6 +1853,7 @@ igb_dev_clear_queues(struct rte_eth_dev *dev) @@ -17941,6 +25486,68 @@ index f32dee46df..1d23e081b6 100644 } /* Program the Transmit Control Register. */ +@@ -2740,6 +2744,8 @@ eth_igbvf_rx_init(struct rte_eth_dev *dev) + else + rxdctl |= ((rxq->wthresh & 0x1F) << 16); + E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl); ++ ++ dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; + } + + if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_SCATTER) { +@@ -2811,6 +2817,8 @@ eth_igbvf_tx_init(struct rte_eth_dev *dev) + txdctl |= ((txq->wthresh & 0x1F) << 16); + txdctl |= E1000_TXDCTL_QUEUE_ENABLE; + E1000_WRITE_REG(hw, E1000_TXDCTL(i), txdctl); ++ ++ dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; + } + + } +diff --git a/dpdk/drivers/net/ena/ena_ethdev.c b/dpdk/drivers/net/ena/ena_ethdev.c +index efcb163027..7345e480f8 100644 +--- a/dpdk/drivers/net/ena/ena_ethdev.c ++++ b/dpdk/drivers/net/ena/ena_ethdev.c +@@ -1171,6 +1171,7 @@ static int ena_start(struct rte_eth_dev *dev) + struct ena_adapter *adapter = dev->data->dev_private; + uint64_t ticks; + int rc = 0; ++ uint16_t i; + + /* Cannot allocate memory in secondary process */ + if (rte_eal_process_type() != RTE_PROC_PRIMARY) { +@@ -1208,6 +1209,11 @@ static int ena_start(struct rte_eth_dev *dev) + ++adapter->dev_stats.dev_start; + adapter->state = ENA_ADAPTER_STATE_RUNNING; + ++ for (i = 0; i < dev->data->nb_rx_queues; i++) ++ dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; ++ for (i = 0; i < dev->data->nb_tx_queues; i++) ++ dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; ++ + return 0; + + err_rss_init: +@@ -1223,6 +1229,7 @@ static int ena_stop(struct rte_eth_dev *dev) + struct ena_com_dev *ena_dev = &adapter->ena_dev; + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = pci_dev->intr_handle; ++ uint16_t i; + int rc; + + /* Cannot free memory in secondary process */ +@@ -1254,6 +1261,11 @@ static int ena_stop(struct rte_eth_dev *dev) + adapter->state = ENA_ADAPTER_STATE_STOPPED; + dev->data->dev_started = 0; + ++ for (i = 0; i < dev->data->nb_rx_queues; i++) ++ dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; ++ for (i = 0; i < dev->data->nb_tx_queues; i++) ++ dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; ++ + return 0; + } + diff --git a/dpdk/drivers/net/ena/ena_rss.c b/dpdk/drivers/net/ena/ena_rss.c index b682d01c20..d0ba9d5c0a 100644 --- a/dpdk/drivers/net/ena/ena_rss.c @@ -17953,11 +25560,112 @@ index b682d01c20..d0ba9d5c0a 100644 return rc; } } +diff --git a/dpdk/drivers/net/enetc/enetc_ethdev.c b/dpdk/drivers/net/enetc/enetc_ethdev.c +index 1b4337bc48..c9352f0746 100644 +--- a/dpdk/drivers/net/enetc/enetc_ethdev.c ++++ b/dpdk/drivers/net/enetc/enetc_ethdev.c +@@ -17,6 +17,7 @@ enetc_dev_start(struct rte_eth_dev *dev) + ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct enetc_hw *enetc_hw = &hw->hw; + uint32_t val; ++ uint16_t i; + + PMD_INIT_FUNC_TRACE(); + if (hw->device_id == ENETC_DEV_ID_VF) +@@ -45,6 +46,11 @@ enetc_dev_start(struct rte_eth_dev *dev) + ENETC_PM0_IFM_XGMII); + } + ++ for (i = 0; i < dev->data->nb_rx_queues; i++) ++ dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; ++ for (i = 0; i < dev->data->nb_tx_queues; i++) ++ dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; ++ + return 0; + } + +@@ -55,6 +61,7 @@ enetc_dev_stop(struct rte_eth_dev *dev) + ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct enetc_hw *enetc_hw = &hw->hw; + uint32_t val; ++ uint16_t i; + + PMD_INIT_FUNC_TRACE(); + dev->data->dev_started = 0; +@@ -69,6 +76,11 @@ enetc_dev_stop(struct rte_eth_dev *dev) + enetc_port_wr(enetc_hw, ENETC_PM0_CMD_CFG, + val & (~(ENETC_PM0_TX_EN | ENETC_PM0_RX_EN))); + ++ for (i = 0; i < dev->data->nb_rx_queues; i++) ++ dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; ++ for (i = 0; i < dev->data->nb_tx_queues; i++) ++ dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; ++ + return 0; + } + +diff --git a/dpdk/drivers/net/enic/enic_ethdev.c b/dpdk/drivers/net/enic/enic_ethdev.c +index cdf0915591..a487256fa1 100644 +--- a/dpdk/drivers/net/enic/enic_ethdev.c ++++ b/dpdk/drivers/net/enic/enic_ethdev.c +@@ -368,6 +368,7 @@ static int enicpmd_dev_stop(struct rte_eth_dev *eth_dev) + { + struct rte_eth_link link; + struct enic *enic = pmd_priv(eth_dev); ++ uint16_t i; + + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return 0; +@@ -378,6 +379,11 @@ static int enicpmd_dev_stop(struct rte_eth_dev *eth_dev) + memset(&link, 0, sizeof(link)); + rte_eth_linkstatus_set(eth_dev, &link); + ++ for (i = 0; i < eth_dev->data->nb_rx_queues; i++) ++ eth_dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; ++ for (i = 0; i < eth_dev->data->nb_tx_queues; i++) ++ eth_dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; ++ + return 0; + } + +diff --git a/dpdk/drivers/net/enic/enic_main.c b/dpdk/drivers/net/enic/enic_main.c +index 19a99a82c5..a6aaa760ca 100644 +--- a/dpdk/drivers/net/enic/enic_main.c ++++ b/dpdk/drivers/net/enic/enic_main.c +@@ -1639,7 +1639,7 @@ int enic_set_mtu(struct enic *enic, uint16_t new_mtu) + * packet length. + */ + if (!eth_dev->data->dev_started) +- goto set_mtu_done; ++ return rc; + + /* + * The device has started, re-do RQs on the fly. In the process, we diff --git a/dpdk/drivers/net/gve/gve_ethdev.c b/dpdk/drivers/net/gve/gve_ethdev.c -index 97781f0ed3..e357f16e16 100644 +index 97781f0ed3..0796d37760 100644 --- a/dpdk/drivers/net/gve/gve_ethdev.c +++ b/dpdk/drivers/net/gve/gve_ethdev.c -@@ -282,7 +282,6 @@ gve_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) +@@ -5,6 +5,7 @@ + #include "gve_ethdev.h" + #include "base/gve_adminq.h" + #include "base/gve_register.h" ++#include "rte_ether.h" + + const char gve_version_str[] = GVE_VERSION; + static const char gve_version_prefix[] = GVE_VERSION_PREFIX; +@@ -274,15 +275,14 @@ gve_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) + dev_info->max_mac_addrs = 1; + dev_info->max_rx_queues = priv->max_nb_rxq; + dev_info->max_tx_queues = priv->max_nb_txq; +- dev_info->min_rx_bufsize = GVE_MIN_BUF_SIZE; +- dev_info->max_rx_pktlen = GVE_MAX_RX_PKTLEN; +- dev_info->max_mtu = GVE_MAX_MTU; +- dev_info->min_mtu = GVE_MIN_MTU; ++ dev_info->min_rx_bufsize = GVE_RX_MIN_BUF_SIZE_GQI; ++ dev_info->max_rx_pktlen = priv->max_mtu + RTE_ETHER_HDR_LEN; ++ dev_info->max_mtu = priv->max_mtu; ++ dev_info->min_mtu = RTE_ETHER_MIN_MTU; + dev_info->rx_offload_capa = 0; dev_info->tx_offload_capa = RTE_ETH_TX_OFFLOAD_MULTI_SEGS | @@ -17965,11 +25673,182 @@ index 97781f0ed3..e357f16e16 100644 RTE_ETH_TX_OFFLOAD_UDP_CKSUM | RTE_ETH_TX_OFFLOAD_TCP_CKSUM | RTE_ETH_TX_OFFLOAD_SCTP_CKSUM | +diff --git a/dpdk/drivers/net/gve/gve_ethdev.h b/dpdk/drivers/net/gve/gve_ethdev.h +index 235e55899e..b7702a1249 100644 +--- a/dpdk/drivers/net/gve/gve_ethdev.h ++++ b/dpdk/drivers/net/gve/gve_ethdev.h +@@ -28,11 +28,9 @@ + #define GVE_DEFAULT_TX_FREE_THRESH 256 + #define GVE_TX_MAX_FREE_SZ 512 + +-#define GVE_MIN_BUF_SIZE 1024 +-#define GVE_MAX_RX_PKTLEN 65535 +- +-#define GVE_MAX_MTU RTE_ETHER_MTU +-#define GVE_MIN_MTU RTE_ETHER_MIN_MTU ++#define GVE_RX_BUF_ALIGN_GQI 2048 ++#define GVE_RX_MIN_BUF_SIZE_GQI 2048 ++#define GVE_RX_MAX_BUF_SIZE_GQI 4096 + + /* A list of pages registered with the device during setup and used by a queue + * as buffers +diff --git a/dpdk/drivers/net/gve/gve_rx.c b/dpdk/drivers/net/gve/gve_rx.c +index 518c9d109c..50f9f5c370 100644 +--- a/dpdk/drivers/net/gve/gve_rx.c ++++ b/dpdk/drivers/net/gve/gve_rx.c +@@ -222,6 +222,7 @@ gve_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id, + const struct rte_memzone *mz; + struct gve_rx_queue *rxq; + uint16_t free_thresh; ++ uint32_t mbuf_len; + int err = 0; + + if (nb_desc != hw->rx_desc_cnt) { +@@ -265,7 +266,11 @@ gve_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id, + rxq->hw = hw; + rxq->ntfy_addr = &hw->db_bar2[rte_be_to_cpu_32(hw->irq_dbs[rxq->ntfy_id].id)]; + +- rxq->rx_buf_len = rte_pktmbuf_data_room_size(rxq->mpool) - RTE_PKTMBUF_HEADROOM; ++ mbuf_len = ++ rte_pktmbuf_data_room_size(rxq->mpool) - RTE_PKTMBUF_HEADROOM; ++ rxq->rx_buf_len = ++ RTE_MIN((uint16_t)GVE_RX_MAX_BUF_SIZE_GQI, ++ RTE_ALIGN_FLOOR(mbuf_len, GVE_RX_BUF_ALIGN_GQI)); + + /* Allocate software ring */ + rxq->sw_ring = rte_zmalloc_socket("gve rx sw ring", sizeof(struct rte_mbuf *) * nb_desc, +diff --git a/dpdk/drivers/net/hinic/hinic_pmd_ethdev.c b/dpdk/drivers/net/hinic/hinic_pmd_ethdev.c +index 7aa5e7d8e9..adc9f75c81 100644 +--- a/dpdk/drivers/net/hinic/hinic_pmd_ethdev.c ++++ b/dpdk/drivers/net/hinic/hinic_pmd_ethdev.c +@@ -980,6 +980,7 @@ static int hinic_dev_start(struct rte_eth_dev *dev) + int rc; + char *name; + struct hinic_nic_dev *nic_dev; ++ uint16_t i; + + nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); + name = dev->data->name; +@@ -1047,6 +1048,11 @@ static int hinic_dev_start(struct rte_eth_dev *dev) + + rte_bit_relaxed_set32(HINIC_DEV_START, &nic_dev->dev_status); + ++ for (i = 0; i < dev->data->nb_rx_queues; i++) ++ dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; ++ for (i = 0; i < dev->data->nb_tx_queues; i++) ++ dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; ++ + return 0; + + en_port_fail: +@@ -1169,6 +1175,7 @@ static int hinic_dev_stop(struct rte_eth_dev *dev) + uint16_t port_id; + struct hinic_nic_dev *nic_dev; + struct rte_eth_link link; ++ uint16_t i; + + nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); + name = dev->data->name; +@@ -1215,6 +1222,11 @@ static int hinic_dev_stop(struct rte_eth_dev *dev) + hinic_free_all_rx_mbuf(dev); + hinic_free_all_tx_mbuf(dev); + ++ for (i = 0; i < dev->data->nb_rx_queues; i++) ++ dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; ++ for (i = 0; i < dev->data->nb_tx_queues; i++) ++ dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; ++ + return 0; + } + +diff --git a/dpdk/drivers/net/hns3/hns3_cmd.c b/dpdk/drivers/net/hns3/hns3_cmd.c +index bdfc85f934..7bdf7740c1 100644 +--- a/dpdk/drivers/net/hns3/hns3_cmd.c ++++ b/dpdk/drivers/net/hns3/hns3_cmd.c +@@ -507,6 +507,8 @@ hns3_parse_capability(struct hns3_hw *hw, + hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_RAS_IMP_B, 1); + if (hns3_get_bit(caps, HNS3_CAPS_TM_B)) + hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_TM_B, 1); ++ if (hns3_get_bit(caps, HNS3_CAPS_GRO_B)) ++ hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_GRO_B, 1); + } + + static uint32_t +@@ -519,6 +521,41 @@ hns3_build_api_caps(void) + return rte_cpu_to_le_32(api_caps); + } + ++static void ++hns3_set_dcb_capability(struct hns3_hw *hw) ++{ ++ struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); ++ struct rte_pci_device *pci_dev; ++ struct rte_eth_dev *eth_dev; ++ uint16_t device_id; ++ ++ if (hns->is_vf) ++ return; ++ ++ eth_dev = &rte_eth_devices[hw->data->port_id]; ++ pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); ++ device_id = pci_dev->id.device_id; ++ ++ if (device_id == HNS3_DEV_ID_25GE_RDMA || ++ device_id == HNS3_DEV_ID_50GE_RDMA || ++ device_id == HNS3_DEV_ID_100G_RDMA_MACSEC || ++ device_id == HNS3_DEV_ID_200G_RDMA) ++ hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_DCB_B, 1); ++} ++ ++static void ++hns3_set_default_capability(struct hns3_hw *hw) ++{ ++ hns3_set_dcb_capability(hw); ++ ++ /* ++ * The firmware of the network engines with HIP08 do not report some ++ * capabilities, like GRO. Set default capabilities for it. ++ */ ++ if (hw->revision < PCI_REVISION_ID_HIP09_A) ++ hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_GRO_B, 1); ++} ++ + static int + hns3_cmd_query_firmware_version_and_capability(struct hns3_hw *hw) + { +@@ -536,6 +573,9 @@ hns3_cmd_query_firmware_version_and_capability(struct hns3_hw *hw) + return ret; + + hw->fw_version = rte_le_to_cpu_32(resp->firmware); ++ ++ hns3_set_default_capability(hw); ++ + /* + * Make sure mask the capability before parse capability because it + * may overwrite resp's data. +@@ -659,9 +699,6 @@ hns3_cmd_init(struct hns3_hw *hw) + hw->cmq.csq.next_to_use = 0; + hw->cmq.crq.next_to_clean = 0; + hw->cmq.crq.next_to_use = 0; +- hw->mbx_resp.head = 0; +- hw->mbx_resp.tail = 0; +- hw->mbx_resp.lost = 0; + hns3_cmd_init_regs(hw); + + rte_spinlock_unlock(&hw->cmq.crq.lock); diff --git a/dpdk/drivers/net/hns3/hns3_cmd.h b/dpdk/drivers/net/hns3/hns3_cmd.h -index 994dfc48cc..eb394c9dec 100644 +index 994dfc48cc..0a4d59bd9b 100644 --- a/dpdk/drivers/net/hns3/hns3_cmd.h +++ b/dpdk/drivers/net/hns3/hns3_cmd.h -@@ -606,6 +606,7 @@ struct hns3_rss_input_tuple_cmd { +@@ -322,6 +322,7 @@ enum HNS3_CAPS_BITS { + HNS3_CAPS_RAS_IMP_B, + HNS3_CAPS_RXD_ADV_LAYOUT_B = 15, + HNS3_CAPS_TM_B = 19, ++ HNS3_CAPS_GRO_B = 20, + }; + + /* Capabilities of VF dependent on the PF */ +@@ -606,6 +607,7 @@ struct hns3_rss_input_tuple_cmd { #define HNS3_RSS_CFG_TBL_SIZE_H 4 #define HNS3_RSS_CFG_TBL_BW_H 2 #define HNS3_RSS_CFG_TBL_BW_L 8 @@ -17978,7 +25857,7 @@ index 994dfc48cc..eb394c9dec 100644 /* Configure the indirection table, opcode:0x0D07 */ struct hns3_rss_indirection_table_cmd { diff --git a/dpdk/drivers/net/hns3/hns3_common.c b/dpdk/drivers/net/hns3/hns3_common.c -index 7adc6a4972..f077ef5057 100644 +index 7adc6a4972..5d9df03733 100644 --- a/dpdk/drivers/net/hns3/hns3_common.c +++ b/dpdk/drivers/net/hns3/hns3_common.c @@ -10,6 +10,7 @@ @@ -17989,7 +25868,17 @@ index 7adc6a4972..f077ef5057 100644 #include "hns3_common.h" int -@@ -90,10 +91,11 @@ hns3_dev_infos_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *info) +@@ -69,8 +70,7 @@ hns3_dev_infos_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *info) + RTE_ETH_RX_OFFLOAD_SCATTER | + RTE_ETH_RX_OFFLOAD_VLAN_STRIP | + RTE_ETH_RX_OFFLOAD_VLAN_FILTER | +- RTE_ETH_RX_OFFLOAD_RSS_HASH | +- RTE_ETH_RX_OFFLOAD_TCP_LRO); ++ RTE_ETH_RX_OFFLOAD_RSS_HASH); + info->tx_offload_capa = (RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM | + RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | + RTE_ETH_TX_OFFLOAD_TCP_CKSUM | +@@ -90,13 +90,16 @@ hns3_dev_infos_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *info) if (hns3_dev_get_support(hw, OUTER_UDP_CKSUM)) info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM; @@ -18004,7 +25893,12 @@ index 7adc6a4972..f077ef5057 100644 if (hns3_dev_get_support(hw, PTP)) info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_TIMESTAMP; -@@ -128,7 +130,7 @@ hns3_dev_infos_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *info) ++ if (hns3_dev_get_support(hw, GRO)) ++ info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_TCP_LRO; + + info->rx_desc_lim = (struct rte_eth_desc_lim) { + .nb_max = HNS3_MAX_RING_DESC, +@@ -128,7 +131,7 @@ hns3_dev_infos_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *info) }; info->reta_size = hw->rss_ind_tbl_size; @@ -18013,7 +25907,7 @@ index 7adc6a4972..f077ef5057 100644 info->flow_type_rss_offloads = HNS3_ETH_RSS_SUPPORT; info->default_rxportconf.burst_size = HNS3_DEFAULT_PORT_CONF_BURST_SIZE; -@@ -161,6 +163,9 @@ hns3_parse_io_hint_func(const char *key, const char *value, void *extra_args) +@@ -161,6 +164,9 @@ hns3_parse_io_hint_func(const char *key, const char *value, void *extra_args) RTE_SET_USED(key); @@ -18023,7 +25917,7 @@ index 7adc6a4972..f077ef5057 100644 if (strcmp(value, "vec") == 0) hint = HNS3_IO_FUNC_HINT_VEC; else if (strcmp(value, "sve") == 0) -@@ -201,6 +206,9 @@ hns3_parse_dev_caps_mask(const char *key, const char *value, void *extra_args) +@@ -201,6 +207,9 @@ hns3_parse_dev_caps_mask(const char *key, const char *value, void *extra_args) RTE_SET_USED(key); @@ -18033,7 +25927,7 @@ index 7adc6a4972..f077ef5057 100644 val = strtoull(value, NULL, HNS3_CONVERT_TO_HEXADECIMAL); *(uint64_t *)extra_args = val; -@@ -214,6 +222,9 @@ hns3_parse_mbx_time_limit(const char *key, const char *value, void *extra_args) +@@ -214,6 +223,9 @@ hns3_parse_mbx_time_limit(const char *key, const char *value, void *extra_args) RTE_SET_USED(key); @@ -18043,7 +25937,40 @@ index 7adc6a4972..f077ef5057 100644 val = strtoul(value, NULL, HNS3_CONVERT_TO_DECIMAL); /* -@@ -845,3 +856,87 @@ hns3_get_pci_revision_id(struct hns3_hw *hw, uint8_t *revision_id) +@@ -340,7 +352,7 @@ hns3_set_mc_addr_chk_param(struct hns3_hw *hw, + hns3_err(hw, "failed to set mc mac addr, nb_mc_addr(%u) " + "invalid. valid range: 0~%d", + nb_mc_addr, HNS3_MC_MACADDR_NUM); +- return -EINVAL; ++ return -ENOSPC; + } + + /* Check if input mac addresses are valid */ +@@ -398,6 +410,7 @@ hns3_set_mc_mac_addr_list(struct rte_eth_dev *dev, + uint32_t nb_mc_addr) + { + struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); ++ struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); + struct rte_ether_addr *addr; + int cur_addr_num; + int set_addr_num; +@@ -405,6 +418,15 @@ hns3_set_mc_mac_addr_list(struct rte_eth_dev *dev, + int ret; + int i; + ++ if (mc_addr_set == NULL || nb_mc_addr == 0) { ++ rte_spinlock_lock(&hw->lock); ++ ret = hns3_configure_all_mc_mac_addr(hns, true); ++ if (ret == 0) ++ hw->mc_addrs_num = 0; ++ rte_spinlock_unlock(&hw->lock); ++ return ret; ++ } ++ + /* Check if input parameters are valid */ + ret = hns3_set_mc_addr_chk_param(hw, mc_addr_set, nb_mc_addr); + if (ret) +@@ -845,3 +867,87 @@ hns3_get_pci_revision_id(struct hns3_hw *hw, uint8_t *revision_id) return 0; } @@ -18144,7 +26071,7 @@ index 5aa001f0cc..8eaeda26e7 100644 #endif /* HNS3_COMMON_H */ diff --git a/dpdk/drivers/net/hns3/hns3_dcb.c b/dpdk/drivers/net/hns3/hns3_dcb.c -index af045b22f7..07b8c46a81 100644 +index af045b22f7..2831d3dc62 100644 --- a/dpdk/drivers/net/hns3/hns3_dcb.c +++ b/dpdk/drivers/net/hns3/hns3_dcb.c @@ -237,9 +237,9 @@ hns3_dcb_qs_weight_cfg(struct hns3_hw *hw, uint16_t qs_id, uint8_t dwrr) @@ -18172,8 +26099,121 @@ index af045b22f7..07b8c46a81 100644 pg_info = &hw->dcb_info.pg_info[hw->dcb_info.tc_info[i].pgid]; ets_weight->tc_weight[i] = pg_info->tc_dwrr[i]; } +@@ -1089,7 +1082,7 @@ hns3_dcb_map_cfg(struct hns3_hw *hw) + + ret = hns3_pg_to_pri_map(hw); + if (ret) { +- hns3_err(hw, "pri_to_pg mapping fail: %d", ret); ++ hns3_err(hw, "pg_to_pri mapping fail: %d", ret); + return ret; + } + +diff --git a/dpdk/drivers/net/hns3/hns3_dump.c b/dpdk/drivers/net/hns3/hns3_dump.c +index ae62bb56c8..bac4427227 100644 +--- a/dpdk/drivers/net/hns3/hns3_dump.c ++++ b/dpdk/drivers/net/hns3/hns3_dump.c +@@ -101,6 +101,7 @@ hns3_get_dev_feature_capability(FILE *file, struct hns3_hw *hw) + {HNS3_DEV_SUPPORT_RAS_IMP_B, "RAS IMP"}, + {HNS3_DEV_SUPPORT_TM_B, "TM"}, + {HNS3_DEV_SUPPORT_VF_VLAN_FLT_MOD_B, "VF VLAN FILTER MOD"}, ++ {HNS3_DEV_SUPPORT_GRO_B, "GRO"} + }; + uint32_t i; + +@@ -660,10 +661,10 @@ hns3_get_tm_conf_shaper_info(FILE *file, struct hns3_tm_conf *conf) + if (conf->nb_shaper_profile == 0) + return; + +- fprintf(file, " shaper_profile:\n"); ++ fprintf(file, "\t -- shaper_profile:\n"); + TAILQ_FOREACH(shaper_profile, shaper_profile_list, node) { + fprintf(file, +- " id=%u reference_count=%u peak_rate=%" PRIu64 "Bps\n", ++ "\t id=%u reference_count=%u peak_rate=%" PRIu64 "Bps\n", + shaper_profile->shaper_profile_id, + shaper_profile->reference_count, + shaper_profile->profile.peak.rate); +@@ -677,8 +678,8 @@ hns3_get_tm_conf_port_node_info(FILE *file, struct hns3_tm_conf *conf) + return; + + fprintf(file, +- " port_node:\n" +- " node_id=%u reference_count=%u shaper_profile_id=%d\n", ++ "\t -- port_node:\n" ++ "\t node_id=%u reference_count=%u shaper_profile_id=%d\n", + conf->root->id, conf->root->reference_count, + conf->root->shaper_profile ? + (int)conf->root->shaper_profile->shaper_profile_id : -1); +@@ -695,7 +696,7 @@ hns3_get_tm_conf_tc_node_info(FILE *file, struct hns3_tm_conf *conf) + if (conf->nb_tc_node == 0) + return; + +- fprintf(file, " tc_node:\n"); ++ fprintf(file, "\t -- tc_node:\n"); + memset(tc_node, 0, sizeof(tc_node)); + TAILQ_FOREACH(tm_node, tc_list, node) { + tidx = hns3_tm_calc_node_tc_no(conf, tm_node->id); +@@ -708,7 +709,7 @@ hns3_get_tm_conf_tc_node_info(FILE *file, struct hns3_tm_conf *conf) + if (tm_node == NULL) + continue; + fprintf(file, +- " id=%u TC%u reference_count=%u parent_id=%d " ++ "\t id=%u TC%u reference_count=%u parent_id=%d " + "shaper_profile_id=%d\n", + tm_node->id, hns3_tm_calc_node_tc_no(conf, tm_node->id), + tm_node->reference_count, +@@ -734,7 +735,7 @@ hns3_get_tm_conf_queue_format_info(FILE *file, struct hns3_tm_node **queue_node, + end_queue_id = (i + 1) * HNS3_PERLINE_QUEUES - 1; + if (end_queue_id > nb_tx_queues - 1) + end_queue_id = nb_tx_queues - 1; +- fprintf(file, " %04u - %04u | ", start_queue_id, ++ fprintf(file, "\t %04u - %04u | ", start_queue_id, + end_queue_id); + for (j = start_queue_id; j < nb_tx_queues; j++) { + if (j >= end_queue_id + 1) +@@ -763,8 +764,8 @@ hns3_get_tm_conf_queue_node_info(FILE *file, struct hns3_tm_conf *conf, + return; + + fprintf(file, +- " queue_node:\n" +- " tx queue id | mapped tc (8 mean node not exist)\n"); ++ "\t -- queue_node:\n" ++ "\t tx queue id | mapped tc (8 mean node not exist)\n"); + + memset(queue_node, 0, sizeof(queue_node)); + memset(queue_node_tc, 0, sizeof(queue_node_tc)); +@@ -914,6 +915,8 @@ hns3_eth_dev_priv_dump(struct rte_eth_dev *dev, FILE *file) + struct hns3_adapter *hns = dev->data->dev_private; + struct hns3_hw *hw = &hns->hw; + ++ rte_spinlock_lock(&hw->lock); ++ + hns3_get_device_basic_info(file, dev); + hns3_get_dev_feature_capability(file, hw); + hns3_get_rxtx_queue_info(file, dev); +@@ -923,8 +926,10 @@ hns3_eth_dev_priv_dump(struct rte_eth_dev *dev, FILE *file) + * VF only supports dumping basic info, feature capability and queue + * info. + */ +- if (hns->is_vf) ++ if (hns->is_vf) { ++ rte_spinlock_unlock(&hw->lock); + return 0; ++ } + + hns3_get_dev_mac_info(file, hns); + hns3_get_vlan_config_info(file, hw); +@@ -932,6 +937,8 @@ hns3_eth_dev_priv_dump(struct rte_eth_dev *dev, FILE *file) + hns3_get_tm_conf_info(file, dev); + hns3_get_flow_ctrl_info(file, dev); + ++ rte_spinlock_unlock(&hw->lock); ++ + return 0; + } + diff --git a/dpdk/drivers/net/hns3/hns3_ethdev.c b/dpdk/drivers/net/hns3/hns3_ethdev.c -index d326f70129..b9a848540b 100644 +index d326f70129..27f9dd2eb2 100644 --- a/dpdk/drivers/net/hns3/hns3_ethdev.c +++ b/dpdk/drivers/net/hns3/hns3_ethdev.c @@ -15,6 +15,7 @@ @@ -18192,10 +26232,16 @@ index d326f70129..b9a848540b 100644 #define HNS3_RESET_WAIT_MS 100 #define HNS3_RESET_WAIT_CNT 200 -@@ -60,6 +62,13 @@ enum hns3_evt_cause { +@@ -60,6 +62,19 @@ enum hns3_evt_cause { HNS3_VECTOR0_EVENT_OTHER, }; ++struct hns3_intr_state { ++ uint32_t vector0_state; ++ uint32_t cmdq_state; ++ uint32_t hw_err_state; ++}; ++ +#define HNS3_SPEEDS_SUPP_FEC (RTE_ETH_LINK_SPEED_10G | \ + RTE_ETH_LINK_SPEED_25G | \ + RTE_ETH_LINK_SPEED_40G | \ @@ -18206,7 +26252,7 @@ index d326f70129..b9a848540b 100644 static const struct rte_eth_fec_capa speed_fec_capa_tbl[] = { { RTE_ETH_SPEED_NUM_10G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) | RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) | -@@ -83,8 +92,7 @@ static const struct rte_eth_fec_capa speed_fec_capa_tbl[] = { +@@ -83,8 +98,7 @@ static const struct rte_eth_fec_capa speed_fec_capa_tbl[] = { RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) | RTE_ETH_FEC_MODE_CAPA_MASK(RS) }, @@ -18216,7 +26262,175 @@ index d326f70129..b9a848540b 100644 RTE_ETH_FEC_MODE_CAPA_MASK(RS) } }; -@@ -286,6 +294,19 @@ hns3_handle_mac_tnl(struct hns3_hw *hw) +@@ -120,63 +134,51 @@ hns3_pf_enable_irq0(struct hns3_hw *hw) + } + + static enum hns3_evt_cause +-hns3_proc_imp_reset_event(struct hns3_adapter *hns, bool is_delay, +- uint32_t *vec_val) ++hns3_proc_imp_reset_event(struct hns3_adapter *hns, uint32_t *vec_val) + { + struct hns3_hw *hw = &hns->hw; + + __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED); + hns3_atomic_set_bit(HNS3_IMP_RESET, &hw->reset.pending); + *vec_val = BIT(HNS3_VECTOR0_IMPRESET_INT_B); +- if (!is_delay) { +- hw->reset.stats.imp_cnt++; +- hns3_warn(hw, "IMP reset detected, clear reset status"); +- } else { +- hns3_schedule_delayed_reset(hns); +- hns3_warn(hw, "IMP reset detected, don't clear reset status"); +- } ++ hw->reset.stats.imp_cnt++; ++ hns3_warn(hw, "IMP reset detected, clear reset status"); + + return HNS3_VECTOR0_EVENT_RST; + } + + static enum hns3_evt_cause +-hns3_proc_global_reset_event(struct hns3_adapter *hns, bool is_delay, +- uint32_t *vec_val) ++hns3_proc_global_reset_event(struct hns3_adapter *hns, uint32_t *vec_val) + { + struct hns3_hw *hw = &hns->hw; + + __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED); + hns3_atomic_set_bit(HNS3_GLOBAL_RESET, &hw->reset.pending); + *vec_val = BIT(HNS3_VECTOR0_GLOBALRESET_INT_B); +- if (!is_delay) { +- hw->reset.stats.global_cnt++; +- hns3_warn(hw, "Global reset detected, clear reset status"); +- } else { +- hns3_schedule_delayed_reset(hns); +- hns3_warn(hw, +- "Global reset detected, don't clear reset status"); +- } ++ hw->reset.stats.global_cnt++; ++ hns3_warn(hw, "Global reset detected, clear reset status"); + + return HNS3_VECTOR0_EVENT_RST; + } + ++static void ++hns3_query_intr_state(struct hns3_hw *hw, struct hns3_intr_state *state) ++{ ++ state->vector0_state = hns3_read_dev(hw, HNS3_VECTOR0_OTHER_INT_STS_REG); ++ state->cmdq_state = hns3_read_dev(hw, HNS3_VECTOR0_CMDQ_SRC_REG); ++ state->hw_err_state = hns3_read_dev(hw, HNS3_RAS_PF_OTHER_INT_STS_REG); ++} ++ + static enum hns3_evt_cause + hns3_check_event_cause(struct hns3_adapter *hns, uint32_t *clearval) + { + struct hns3_hw *hw = &hns->hw; +- uint32_t vector0_int_stats; +- uint32_t cmdq_src_val; +- uint32_t hw_err_src_reg; ++ struct hns3_intr_state state; + uint32_t val; + enum hns3_evt_cause ret; +- bool is_delay; + +- /* fetch the events from their corresponding regs */ +- vector0_int_stats = hns3_read_dev(hw, HNS3_VECTOR0_OTHER_INT_STS_REG); +- cmdq_src_val = hns3_read_dev(hw, HNS3_VECTOR0_CMDQ_SRC_REG); +- hw_err_src_reg = hns3_read_dev(hw, HNS3_RAS_PF_OTHER_INT_STS_REG); ++ hns3_query_intr_state(hw, &state); + +- is_delay = clearval == NULL ? true : false; + /* + * Assumption: If by any chance reset and mailbox events are reported + * together then we will only process reset event and defer the +@@ -184,49 +186,72 @@ hns3_check_event_cause(struct hns3_adapter *hns, uint32_t *clearval) + * RX CMDQ event this time we would receive again another interrupt + * from H/W just for the mailbox. + */ +- if (BIT(HNS3_VECTOR0_IMPRESET_INT_B) & vector0_int_stats) { /* IMP */ +- ret = hns3_proc_imp_reset_event(hns, is_delay, &val); ++ if (BIT(HNS3_VECTOR0_IMPRESET_INT_B) & state.vector0_state) { /* IMP */ ++ ret = hns3_proc_imp_reset_event(hns, &val); + goto out; + } + + /* Global reset */ +- if (BIT(HNS3_VECTOR0_GLOBALRESET_INT_B) & vector0_int_stats) { +- ret = hns3_proc_global_reset_event(hns, is_delay, &val); ++ if (BIT(HNS3_VECTOR0_GLOBALRESET_INT_B) & state.vector0_state) { ++ ret = hns3_proc_global_reset_event(hns, &val); + goto out; + } + + /* Check for vector0 1588 event source */ +- if (BIT(HNS3_VECTOR0_1588_INT_B) & vector0_int_stats) { ++ if (BIT(HNS3_VECTOR0_1588_INT_B) & state.vector0_state) { + val = BIT(HNS3_VECTOR0_1588_INT_B); + ret = HNS3_VECTOR0_EVENT_PTP; + goto out; + } + + /* check for vector0 msix event source */ +- if (vector0_int_stats & HNS3_VECTOR0_REG_MSIX_MASK || +- hw_err_src_reg & HNS3_RAS_REG_NFE_MASK) { +- val = vector0_int_stats | hw_err_src_reg; ++ if (state.vector0_state & HNS3_VECTOR0_REG_MSIX_MASK || ++ state.hw_err_state & HNS3_RAS_REG_NFE_MASK) { ++ val = state.vector0_state | state.hw_err_state; + ret = HNS3_VECTOR0_EVENT_ERR; + goto out; + } + + /* check for vector0 mailbox(=CMDQ RX) event source */ +- if (BIT(HNS3_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_val) { +- cmdq_src_val &= ~BIT(HNS3_VECTOR0_RX_CMDQ_INT_B); +- val = cmdq_src_val; ++ if (BIT(HNS3_VECTOR0_RX_CMDQ_INT_B) & state.cmdq_state) { ++ state.cmdq_state &= ~BIT(HNS3_VECTOR0_RX_CMDQ_INT_B); ++ val = state.cmdq_state; + ret = HNS3_VECTOR0_EVENT_MBX; + goto out; + } + +- val = vector0_int_stats; ++ val = state.vector0_state; + ret = HNS3_VECTOR0_EVENT_OTHER; +-out: + +- if (clearval) +- *clearval = val; ++out: ++ *clearval = val; + return ret; + } + ++void ++hns3_clear_reset_event(struct hns3_hw *hw) ++{ ++ uint32_t clearval = 0; ++ ++ switch (hw->reset.level) { ++ case HNS3_IMP_RESET: ++ clearval = BIT(HNS3_VECTOR0_IMPRESET_INT_B); ++ break; ++ case HNS3_GLOBAL_RESET: ++ clearval = BIT(HNS3_VECTOR0_GLOBALRESET_INT_B); ++ break; ++ default: ++ break; ++ } ++ ++ if (clearval == 0) ++ return; ++ ++ hns3_write_dev(hw, HNS3_MISC_RESET_STS_REG, clearval); ++ ++ hns3_pf_enable_irq0(hw); ++} ++ + static void + hns3_clear_event_cause(struct hns3_hw *hw, uint32_t event_type, uint32_t regclr) + { +@@ -286,6 +311,47 @@ hns3_handle_mac_tnl(struct hns3_hw *hw) } } @@ -18233,18 +26447,90 @@ index d326f70129..b9a848540b 100644 + } +} + ++static bool ++hns3_reset_event_valid(struct hns3_hw *hw) ++{ ++ struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); ++ enum hns3_reset_level new_req = HNS3_NONE_RESET; ++ enum hns3_reset_level last_req; ++ uint32_t vector0_int; ++ ++ vector0_int = hns3_read_dev(hw, HNS3_VECTOR0_OTHER_INT_STS_REG); ++ if (BIT(HNS3_VECTOR0_IMPRESET_INT_B) & vector0_int) ++ new_req = HNS3_IMP_RESET; ++ else if (BIT(HNS3_VECTOR0_GLOBALRESET_INT_B) & vector0_int) ++ new_req = HNS3_GLOBAL_RESET; ++ if (new_req == HNS3_NONE_RESET) ++ return true; ++ ++ last_req = hns3_get_reset_level(hns, &hw->reset.pending); ++ if (last_req == HNS3_NONE_RESET) ++ return true; ++ ++ if (new_req > last_req) ++ return true; ++ ++ hns3_warn(hw, "last_req (%u) less than or equal to new_req (%u) ignore", ++ last_req, new_req); ++ return false; ++} ++ static void hns3_interrupt_handler(void *param) { -@@ -305,6 +326,7 @@ hns3_interrupt_handler(void *param) - vector0_int = hns3_read_dev(hw, HNS3_VECTOR0_OTHER_INT_STS_REG); - ras_int = hns3_read_dev(hw, HNS3_RAS_PF_OTHER_INT_STS_REG); - cmdq_int = hns3_read_dev(hw, HNS3_VECTOR0_CMDQ_SRC_REG); +@@ -293,24 +359,25 @@ hns3_interrupt_handler(void *param) + struct hns3_adapter *hns = dev->data->dev_private; + struct hns3_hw *hw = &hns->hw; + enum hns3_evt_cause event_cause; ++ struct hns3_intr_state state; + uint32_t clearval = 0; +- uint32_t vector0_int; +- uint32_t ras_int; +- uint32_t cmdq_int; ++ ++ if (!hns3_reset_event_valid(hw)) ++ return; + + /* Disable interrupt */ + hns3_pf_disable_irq0(hw); + + event_cause = hns3_check_event_cause(hns, &clearval); +- vector0_int = hns3_read_dev(hw, HNS3_VECTOR0_OTHER_INT_STS_REG); +- ras_int = hns3_read_dev(hw, HNS3_RAS_PF_OTHER_INT_STS_REG); +- cmdq_int = hns3_read_dev(hw, HNS3_VECTOR0_CMDQ_SRC_REG); ++ hns3_query_intr_state(hw, &state); + hns3_delay_before_clear_event_cause(hw, event_cause, clearval); hns3_clear_event_cause(hw, event_cause, clearval); /* vector 0 interrupt is shared with reset and mailbox source events. */ if (event_cause == HNS3_VECTOR0_EVENT_ERR) { -@@ -2257,6 +2279,7 @@ hns3_dev_link_update(struct rte_eth_dev *eth_dev, int wait_to_complete) + hns3_warn(hw, "received interrupt: vector0_int_stat:0x%x " + "ras_int_stat:0x%x cmdq_int_stat:0x%x", +- vector0_int, ras_int, cmdq_int); ++ state.vector0_state, state.hw_err_state, ++ state.cmdq_state); + hns3_handle_mac_tnl(hw); + hns3_handle_error(hns); + } else if (event_cause == HNS3_VECTOR0_EVENT_RST) { +@@ -321,11 +388,16 @@ hns3_interrupt_handler(void *param) + } else if (event_cause != HNS3_VECTOR0_EVENT_PTP) { + hns3_warn(hw, "received unknown event: vector0_int_stat:0x%x " + "ras_int_stat:0x%x cmdq_int_stat:0x%x", +- vector0_int, ras_int, cmdq_int); ++ state.vector0_state, state.hw_err_state, ++ state.cmdq_state); + } + + /* Enable interrupt if it is not cause by reset */ +- hns3_pf_enable_irq0(hw); ++ if (event_cause == HNS3_VECTOR0_EVENT_ERR || ++ event_cause == HNS3_VECTOR0_EVENT_MBX || ++ event_cause == HNS3_VECTOR0_EVENT_PTP || ++ event_cause == HNS3_VECTOR0_EVENT_OTHER) ++ hns3_pf_enable_irq0(hw); + } + + static int +@@ -2257,6 +2329,7 @@ hns3_dev_link_update(struct rte_eth_dev *eth_dev, int wait_to_complete) struct rte_eth_link new_link; int ret; @@ -18252,7 +26538,7 @@ index d326f70129..b9a848540b 100644 /* When port is stopped, report link down. */ if (eth_dev->data->dev_started == 0) { new_link.link_autoneg = mac->link_autoneg; -@@ -2280,7 +2303,6 @@ hns3_dev_link_update(struct rte_eth_dev *eth_dev, int wait_to_complete) +@@ -2280,7 +2353,6 @@ hns3_dev_link_update(struct rte_eth_dev *eth_dev, int wait_to_complete) rte_delay_ms(HNS3_LINK_CHECK_INTERVAL); } while (retry_cnt--); @@ -18260,7 +26546,7 @@ index d326f70129..b9a848540b 100644 hns3_setup_linkstatus(eth_dev, &new_link); out: -@@ -2647,69 +2669,6 @@ hns3_parse_speed(int speed_cmd, uint32_t *speed) +@@ -2647,93 +2719,13 @@ hns3_parse_speed(int speed_cmd, uint32_t *speed) return 0; } @@ -18330,7 +26616,31 @@ index d326f70129..b9a848540b 100644 static int hns3_get_capability(struct hns3_hw *hw) { -@@ -3677,7 +3636,7 @@ hns3_get_mac_ethertype_cmd_status(uint16_t cmdq_resp, uint8_t resp_code) + struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); +- struct rte_pci_device *pci_dev; + struct hns3_pf *pf = &hns->pf; +- struct rte_eth_dev *eth_dev; +- uint16_t device_id; + int ret; + +- eth_dev = &rte_eth_devices[hw->data->port_id]; +- pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); +- device_id = pci_dev->id.device_id; +- +- if (device_id == HNS3_DEV_ID_25GE_RDMA || +- device_id == HNS3_DEV_ID_50GE_RDMA || +- device_id == HNS3_DEV_ID_100G_RDMA_MACSEC || +- device_id == HNS3_DEV_ID_200G_RDMA) +- hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_DCB_B, 1); +- +- ret = hns3_get_pci_revision_id(hw, &hw->revision); +- if (ret) +- return ret; +- + ret = hns3_query_mac_stats_reg_num(hw); + if (ret) + return ret; +@@ -3677,7 +3669,7 @@ hns3_get_mac_ethertype_cmd_status(uint16_t cmdq_resp, uint8_t resp_code) if (cmdq_resp) { PMD_INIT_LOG(ERR, @@ -18339,7 +26649,7 @@ index d326f70129..b9a848540b 100644 cmdq_resp); return -EIO; } -@@ -4451,6 +4410,12 @@ hns3_init_hardware(struct hns3_adapter *hns) +@@ -4451,6 +4443,12 @@ hns3_init_hardware(struct hns3_adapter *hns) goto err_mac_init; } @@ -18352,7 +26662,18 @@ index d326f70129..b9a848540b 100644 return 0; err_mac_init: -@@ -4630,10 +4595,6 @@ hns3_init_pf(struct rte_eth_dev *eth_dev) +@@ -4590,6 +4588,10 @@ hns3_init_pf(struct rte_eth_dev *eth_dev) + /* Get hardware io base address from pcie BAR2 IO space */ + hw->io_base = pci_dev->mem_resource[2].addr; + ++ ret = hns3_get_pci_revision_id(hw, &hw->revision); ++ if (ret) ++ return ret; ++ + /* Firmware command queue initialize */ + ret = hns3_cmd_init_queue(hw); + if (ret) { +@@ -4630,10 +4632,6 @@ hns3_init_pf(struct rte_eth_dev *eth_dev) goto err_intr_callback_register; } @@ -18363,7 +26684,7 @@ index d326f70129..b9a848540b 100644 /* Enable interrupt */ rte_intr_enable(pci_dev->intr_handle); hns3_pf_enable_irq0(hw); -@@ -4690,6 +4651,7 @@ err_enable_intr: +@@ -4690,6 +4688,7 @@ err_enable_intr: hns3_fdir_filter_uninit(hns); err_fdir: hns3_uninit_umv_space(hw); @@ -18371,7 +26692,7 @@ index d326f70129..b9a848540b 100644 err_init_hw: hns3_stats_uninit(hw); err_get_config: -@@ -4725,6 +4687,7 @@ hns3_uninit_pf(struct rte_eth_dev *eth_dev) +@@ -4725,6 +4724,7 @@ hns3_uninit_pf(struct rte_eth_dev *eth_dev) hns3_flow_uninit(eth_dev); hns3_fdir_filter_uninit(hns); hns3_uninit_umv_space(hw); @@ -18379,7 +26700,7 @@ index d326f70129..b9a848540b 100644 hns3_stats_uninit(hw); hns3_config_mac_tnl_int(hw, false); hns3_pf_disable_irq0(hw); -@@ -5115,8 +5078,7 @@ hns3_dev_start(struct rte_eth_dev *dev) +@@ -5115,8 +5115,7 @@ hns3_dev_start(struct rte_eth_dev *dev) rte_spinlock_unlock(&hw->lock); hns3_rx_scattered_calc(dev); @@ -18389,7 +26710,7 @@ index d326f70129..b9a848540b 100644 /* Enable interrupt of all rx queues before enabling queues */ hns3_dev_all_rx_queue_intr_enable(hw, true); -@@ -5194,12 +5156,7 @@ hns3_dev_stop(struct rte_eth_dev *dev) +@@ -5194,12 +5193,7 @@ hns3_dev_stop(struct rte_eth_dev *dev) dev->data->dev_started = 0; hw->adapter_state = HNS3_NIC_STOPPING; @@ -18403,7 +26724,7 @@ index d326f70129..b9a848540b 100644 rte_spinlock_lock(&hw->lock); if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) { -@@ -5373,16 +5330,7 @@ hns3_check_fc_autoneg_valid(struct hns3_hw *hw, uint8_t autoneg) +@@ -5373,16 +5367,7 @@ hns3_check_fc_autoneg_valid(struct hns3_hw *hw, uint8_t autoneg) if (!pf->support_fc_autoneg) { if (autoneg != 0) { @@ -18421,7 +26742,83 @@ index d326f70129..b9a848540b 100644 return -EOPNOTSUPP; } -@@ -5662,17 +5610,6 @@ hns3_func_reset_cmd(struct hns3_hw *hw, int func_id) +@@ -5591,31 +5576,60 @@ is_pf_reset_done(struct hns3_hw *hw) + return true; + } + ++static enum hns3_reset_level ++hns3_detect_reset_event(struct hns3_hw *hw) ++{ ++ struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); ++ enum hns3_reset_level new_req = HNS3_NONE_RESET; ++ enum hns3_reset_level last_req; ++ uint32_t vector0_intr_state; ++ ++ last_req = hns3_get_reset_level(hns, &hw->reset.pending); ++ vector0_intr_state = hns3_read_dev(hw, HNS3_VECTOR0_OTHER_INT_STS_REG); ++ if (BIT(HNS3_VECTOR0_IMPRESET_INT_B) & vector0_intr_state) { ++ __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED); ++ new_req = HNS3_IMP_RESET; ++ } else if (BIT(HNS3_VECTOR0_GLOBALRESET_INT_B) & vector0_intr_state) { ++ __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED); ++ new_req = HNS3_GLOBAL_RESET; ++ } ++ ++ if (new_req == HNS3_NONE_RESET) ++ return HNS3_NONE_RESET; ++ ++ if (last_req == HNS3_NONE_RESET || last_req < new_req) { ++ hns3_schedule_delayed_reset(hns); ++ hns3_warn(hw, "High level reset detected, delay do reset"); ++ } ++ ++ return new_req; ++} ++ + bool + hns3_is_reset_pending(struct hns3_adapter *hns) + { ++ enum hns3_reset_level new_req; + struct hns3_hw *hw = &hns->hw; +- enum hns3_reset_level reset; ++ enum hns3_reset_level last_req; + + /* +- * Check the registers to confirm whether there is reset pending. +- * Note: This check may lead to schedule reset task, but only primary +- * process can process the reset event. Therefore, limit the +- * checking under only primary process. ++ * Only primary can process can process the reset event, ++ * so don't check reset event in secondary. + */ +- if (rte_eal_process_type() == RTE_PROC_PRIMARY) +- hns3_check_event_cause(hns, NULL); ++ if (rte_eal_process_type() != RTE_PROC_PRIMARY) ++ return false; + +- reset = hns3_get_reset_level(hns, &hw->reset.pending); +- if (reset != HNS3_NONE_RESET && hw->reset.level != HNS3_NONE_RESET && +- hw->reset.level < reset) { +- hns3_warn(hw, "High level reset %d is pending", reset); ++ new_req = hns3_detect_reset_event(hw); ++ last_req = hns3_get_reset_level(hns, &hw->reset.pending); ++ if (last_req != HNS3_NONE_RESET && new_req != HNS3_NONE_RESET && ++ new_req < last_req) { ++ hns3_warn(hw, "High level reset %d is pending", last_req); + return true; + } +- reset = hns3_get_reset_level(hns, &hw->reset.request); +- if (reset != HNS3_NONE_RESET && hw->reset.level != HNS3_NONE_RESET && +- hw->reset.level < reset) { +- hns3_warn(hw, "High level reset %d is request", reset); ++ last_req = hns3_get_reset_level(hns, &hw->reset.request); ++ if (last_req != HNS3_NONE_RESET && hw->reset.level != HNS3_NONE_RESET && ++ hw->reset.level < last_req) { ++ hns3_warn(hw, "High level reset %d is request", last_req); + return true; + } + return false; +@@ -5662,17 +5676,6 @@ hns3_func_reset_cmd(struct hns3_hw *hw, int func_id) return hns3_cmd_send(hw, &desc, 1); } @@ -18439,7 +26836,7 @@ index d326f70129..b9a848540b 100644 static void hns3_msix_process(struct hns3_adapter *hns, enum hns3_reset_level reset_level) { -@@ -5690,7 +5627,9 @@ hns3_msix_process(struct hns3_adapter *hns, enum hns3_reset_level reset_level) +@@ -5690,7 +5693,9 @@ hns3_msix_process(struct hns3_adapter *hns, enum hns3_reset_level reset_level) switch (reset_level) { case HNS3_IMP_RESET: @@ -18450,7 +26847,7 @@ index d326f70129..b9a848540b 100644 hns3_warn(hw, "IMP Reset requested time=%ld.%.6ld", tv.tv_sec, tv.tv_usec); break; -@@ -5815,12 +5754,7 @@ hns3_stop_service(struct hns3_adapter *hns) +@@ -5815,12 +5820,7 @@ hns3_stop_service(struct hns3_adapter *hns) rte_eal_alarm_cancel(hns3_service_handler, eth_dev); hns3_update_linkstatus_and_event(hw, false); } @@ -18464,7 +26861,7 @@ index d326f70129..b9a848540b 100644 rte_spinlock_lock(&hw->lock); if (hns->hw.adapter_state == HNS3_NIC_STARTED || -@@ -5853,8 +5787,7 @@ hns3_start_service(struct hns3_adapter *hns) +@@ -5853,8 +5853,7 @@ hns3_start_service(struct hns3_adapter *hns) hw->reset.level == HNS3_GLOBAL_RESET) hns3_set_rst_done(hw); eth_dev = &rte_eth_devices[hw->data->port_id]; @@ -18474,7 +26871,7 @@ index d326f70129..b9a848540b 100644 if (hw->adapter_state == HNS3_NIC_STARTED) { /* * This API parent function already hold the hns3_hw.lock, the -@@ -6003,56 +5936,27 @@ hns3_reset_service(void *param) +@@ -6003,56 +6002,27 @@ hns3_reset_service(void *param) hns3_msix_process(hns, reset_level); } @@ -18547,7 +26944,7 @@ index d326f70129..b9a848540b 100644 } static int -@@ -6061,28 +5965,28 @@ hns3_fec_get_capability(struct rte_eth_dev *dev, +@@ -6061,28 +6031,28 @@ hns3_fec_get_capability(struct rte_eth_dev *dev, unsigned int num) { struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); @@ -18591,7 +26988,7 @@ index d326f70129..b9a848540b 100644 static int get_current_fec_auto_state(struct hns3_hw *hw, uint8_t *state) { -@@ -6220,61 +6124,27 @@ hns3_set_fec_hw(struct hns3_hw *hw, uint32_t mode) +@@ -6220,61 +6190,27 @@ hns3_set_fec_hw(struct hns3_hw *hw, uint32_t mode) } static uint32_t @@ -18648,11 +27045,11 @@ index d326f70129..b9a848540b 100644 uint32_t cur_capa; - uint32_t num = FEC_CAPA_NUM; - int ret; -- + - ret = hns3_fec_get_capability(dev, fec_capa, num); - if (ret < 0) - return ret; - +- - /* HNS3 PMD only support one bit set mode, e.g. 0x1, 0x4 */ - if (!is_fec_mode_one_bit_set(mode)) { - hns3_err(hw, "FEC mode(0x%x) not supported in HNS3 PMD, " @@ -18662,7 +27059,7 @@ index d326f70129..b9a848540b 100644 return -EINVAL; } -@@ -6282,12 +6152,27 @@ hns3_fec_set(struct rte_eth_dev *dev, uint32_t mode) +@@ -6282,12 +6218,27 @@ hns3_fec_set(struct rte_eth_dev *dev, uint32_t mode) * Check whether the configured mode is within the FEC capability. * If not, the configured mode will not be supported. */ @@ -18693,7 +27090,7 @@ index d326f70129..b9a848540b 100644 rte_spinlock_lock(&hw->lock); ret = hns3_set_fec_hw(hw, mode); if (ret) { -@@ -6342,7 +6227,7 @@ hns3_optical_module_existed(struct hns3_hw *hw) +@@ -6342,7 +6293,7 @@ hns3_optical_module_existed(struct hns3_hw *hw) ret = hns3_cmd_send(hw, &desc, 1); if (ret) { hns3_err(hw, @@ -18702,7 +27099,7 @@ index d326f70129..b9a848540b 100644 ret); return false; } -@@ -6380,7 +6265,7 @@ hns3_get_module_eeprom_data(struct hns3_hw *hw, uint32_t offset, +@@ -6380,7 +6331,7 @@ hns3_get_module_eeprom_data(struct hns3_hw *hw, uint32_t offset, ret = hns3_cmd_send(hw, desc, HNS3_SFP_INFO_CMD_NUM); if (ret) { @@ -18711,7 +27108,7 @@ index d326f70129..b9a848540b 100644 ret); return ret; } -@@ -6417,7 +6302,7 @@ hns3_get_module_eeprom(struct rte_eth_dev *dev, +@@ -6417,7 +6368,7 @@ hns3_get_module_eeprom(struct rte_eth_dev *dev, return -ENOTSUP; if (!hns3_optical_module_existed(hw)) { @@ -18720,7 +27117,7 @@ index d326f70129..b9a848540b 100644 return -EIO; } -@@ -6480,7 +6365,7 @@ hns3_get_module_info(struct rte_eth_dev *dev, +@@ -6480,7 +6431,7 @@ hns3_get_module_info(struct rte_eth_dev *dev, modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8636_MAX_LEN; break; default: @@ -18730,7 +27127,7 @@ index d326f70129..b9a848540b 100644 return -EINVAL; } diff --git a/dpdk/drivers/net/hns3/hns3_ethdev.h b/dpdk/drivers/net/hns3/hns3_ethdev.h -index 2457754b3d..58572948fe 100644 +index 2457754b3d..9e67e93d3f 100644 --- a/dpdk/drivers/net/hns3/hns3_ethdev.h +++ b/dpdk/drivers/net/hns3/hns3_ethdev.h @@ -871,13 +871,6 @@ struct hns3_adapter { @@ -18747,7 +27144,15 @@ index 2457754b3d..58572948fe 100644 enum hns3_dev_cap { HNS3_DEV_SUPPORT_DCB_B, HNS3_DEV_SUPPORT_COPPER_B, -@@ -996,15 +989,6 @@ static inline uint32_t hns3_read_reg(void *base, uint32_t reg) +@@ -891,6 +884,7 @@ enum hns3_dev_cap { + HNS3_DEV_SUPPORT_RAS_IMP_B, + HNS3_DEV_SUPPORT_TM_B, + HNS3_DEV_SUPPORT_VF_VLAN_FLT_MOD_B, ++ HNS3_DEV_SUPPORT_GRO_B, + }; + + #define hns3_dev_get_support(hw, _name) \ +@@ -996,15 +990,6 @@ static inline uint32_t hns3_read_reg(void *base, uint32_t reg) #define hns3_read_dev(a, reg) \ hns3_read_reg((a)->io_base, (reg)) @@ -18763,9 +27168,12 @@ index 2457754b3d..58572948fe 100644 static inline uint64_t hns3_atomic_test_bit(unsigned int nr, volatile uint64_t *addr) { -@@ -1046,21 +1030,6 @@ void hns3vf_update_link_status(struct hns3_hw *hw, uint8_t link_status, +@@ -1045,22 +1030,9 @@ void hns3_update_linkstatus_and_event(struct hns3_hw *hw, bool query); + void hns3vf_update_link_status(struct hns3_hw *hw, uint8_t link_status, uint32_t link_speed, uint8_t link_duplex); void hns3vf_update_push_lsc_cap(struct hns3_hw *hw, bool supported); ++void hns3_clear_reset_event(struct hns3_hw *hw); ++void hns3vf_clear_reset_event(struct hns3_hw *hw); -int hns3_restore_ptp(struct hns3_adapter *hns); -int hns3_mbuf_dyn_rx_timestamp_register(struct rte_eth_dev *dev, @@ -18785,14 +27193,63 @@ index 2457754b3d..58572948fe 100644 static inline bool is_reset_pending(struct hns3_adapter *hns) +@@ -1073,4 +1045,15 @@ is_reset_pending(struct hns3_adapter *hns) + return ret; + } + ++static inline void ++hns3_clear_reset_status(struct hns3_hw *hw) ++{ ++ struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); ++ ++ if (hns->is_vf) ++ hns3vf_clear_reset_event(hw); ++ else ++ hns3_clear_reset_event(hw); ++} ++ + #endif /* HNS3_ETHDEV_H */ diff --git a/dpdk/drivers/net/hns3/hns3_ethdev_vf.c b/dpdk/drivers/net/hns3/hns3_ethdev_vf.c -index d220522c43..5aac62a41f 100644 +index d220522c43..702a61aad9 100644 --- a/dpdk/drivers/net/hns3/hns3_ethdev_vf.c +++ b/dpdk/drivers/net/hns3/hns3_ethdev_vf.c -@@ -688,67 +688,6 @@ hns3vf_interrupt_handler(void *param) - hns3vf_enable_irq0(hw); +@@ -250,6 +250,8 @@ hns3vf_set_default_mac_addr(struct rte_eth_dev *dev, + hns3_err(hw, "Failed to set mac addr(%s) for vf: %d", + mac_str, ret); + } ++ rte_spinlock_unlock(&hw->lock); ++ return ret; + } + + rte_ether_addr_copy(mac_addr, +@@ -610,6 +612,19 @@ hns3vf_enable_irq0(struct hns3_hw *hw) + hns3_write_dev(hw, HNS3_MISC_VECTOR_REG_BASE, 1); } ++void ++hns3vf_clear_reset_event(struct hns3_hw *hw) ++{ ++ uint32_t clearval; ++ uint32_t cmdq_stat_reg; ++ ++ cmdq_stat_reg = hns3_read_dev(hw, HNS3_VECTOR0_CMDQ_STAT_REG); ++ clearval = cmdq_stat_reg & ~BIT(HNS3_VECTOR0_RST_INT_B); ++ hns3_write_dev(hw, HNS3_VECTOR0_CMDQ_SRC_REG, clearval); ++ ++ hns3vf_enable_irq0(hw); ++} ++ + static enum hns3vf_evt_cause + hns3vf_check_event_cause(struct hns3_adapter *hns, uint32_t *clearval) + { +@@ -684,69 +699,10 @@ hns3vf_interrupt_handler(void *param) + break; + } + +- /* Enable interrupt */ +- hns3vf_enable_irq0(hw); +-} +- -static void -hns3vf_set_default_dev_specifications(struct hns3_hw *hw) -{ @@ -18852,21 +27309,28 @@ index d220522c43..5aac62a41f 100644 - hns3vf_parse_dev_specifications(hw, desc); - - return hns3vf_check_dev_specifications(hw); --} -- ++ /* Enable interrupt if it is not caused by reset */ ++ if (event_cause == HNS3VF_VECTOR0_EVENT_MBX || ++ event_cause == HNS3VF_VECTOR0_EVENT_OTHER) ++ hns3vf_enable_irq0(hw); + } + void - hns3vf_update_push_lsc_cap(struct hns3_hw *hw, bool supported) +@@ -821,12 +777,8 @@ hns3vf_get_capability(struct hns3_hw *hw) { -@@ -826,7 +765,7 @@ hns3vf_get_capability(struct hns3_hw *hw) - return ret; + int ret; +- ret = hns3_get_pci_revision_id(hw, &hw->revision); +- if (ret) +- return ret; +- if (hw->revision < PCI_REVISION_ID_HIP09_A) { - hns3vf_set_default_dev_specifications(hw); + hns3_set_default_dev_specifications(hw); hw->intr.mapping_mode = HNS3_INTR_MAPPING_VEC_RSV_ONE; hw->intr.gl_unit = HNS3_INTR_COALESCE_GL_UINT_2US; hw->tso_mode = HNS3_TSO_SW_CAL_PSEUDO_H_CSUM; -@@ -837,7 +776,7 @@ hns3vf_get_capability(struct hns3_hw *hw) +@@ -837,7 +789,7 @@ hns3vf_get_capability(struct hns3_hw *hw) return 0; } @@ -18875,7 +27339,18 @@ index d220522c43..5aac62a41f 100644 if (ret) { PMD_INIT_LOG(ERR, "failed to query dev specifications, ret = %d", -@@ -1633,12 +1572,7 @@ hns3vf_dev_stop(struct rte_eth_dev *dev) +@@ -1477,6 +1429,10 @@ hns3vf_init_vf(struct rte_eth_dev *eth_dev) + /* Get hardware io base address from pcie BAR2 IO space */ + hw->io_base = pci_dev->mem_resource[2].addr; + ++ ret = hns3_get_pci_revision_id(hw, &hw->revision); ++ if (ret) ++ return ret; ++ + /* Firmware command queue initialize */ + ret = hns3_cmd_init_queue(hw); + if (ret) { +@@ -1633,12 +1589,7 @@ hns3vf_dev_stop(struct rte_eth_dev *dev) dev->data->dev_started = 0; hw->adapter_state = HNS3_NIC_STOPPING; @@ -18889,7 +27364,7 @@ index d220522c43..5aac62a41f 100644 rte_spinlock_lock(&hw->lock); if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) { -@@ -1740,8 +1674,10 @@ hns3vf_do_start(struct hns3_adapter *hns, bool reset_queue) +@@ -1740,8 +1691,10 @@ hns3vf_do_start(struct hns3_adapter *hns, bool reset_queue) hns3_enable_rxd_adv_layout(hw); ret = hns3_init_queues(hns, reset_queue); @@ -18901,7 +27376,7 @@ index d220522c43..5aac62a41f 100644 return hns3_restore_filter(hns); } -@@ -1792,8 +1728,7 @@ hns3vf_dev_start(struct rte_eth_dev *dev) +@@ -1792,8 +1745,7 @@ hns3vf_dev_start(struct rte_eth_dev *dev) rte_spinlock_unlock(&hw->lock); hns3_rx_scattered_calc(dev); @@ -18911,7 +27386,27 @@ index d220522c43..5aac62a41f 100644 /* Enable interrupt of all rx queues before enabling queues */ hns3_dev_all_rx_queue_intr_enable(hw, true); -@@ -1963,11 +1898,7 @@ hns3vf_stop_service(struct hns3_adapter *hns) +@@ -1859,14 +1811,13 @@ hns3vf_is_reset_pending(struct hns3_adapter *hns) + return false; + + /* +- * Check the registers to confirm whether there is reset pending. +- * Note: This check may lead to schedule reset task, but only primary +- * process can process the reset event. Therefore, limit the +- * checking under only primary process. ++ * Only primary can process can process the reset event, ++ * so don't check reset event in secondary. + */ +- if (rte_eal_process_type() == RTE_PROC_PRIMARY) +- hns3vf_check_event_cause(hns, NULL); ++ if (rte_eal_process_type() != RTE_PROC_PRIMARY) ++ return false; + ++ hns3vf_check_event_cause(hns, NULL); + reset = hns3vf_get_reset_level(hw, &hw->reset.pending); + if (hw->reset.level != HNS3_NONE_RESET && reset != HNS3_NONE_RESET && + hw->reset.level < reset) { +@@ -1963,11 +1914,7 @@ hns3vf_stop_service(struct hns3_adapter *hns) } hw->mac.link_status = RTE_ETH_LINK_DOWN; @@ -18924,7 +27419,7 @@ index d220522c43..5aac62a41f 100644 rte_spinlock_lock(&hw->lock); if (hw->adapter_state == HNS3_NIC_STARTED || -@@ -1999,8 +1930,7 @@ hns3vf_start_service(struct hns3_adapter *hns) +@@ -1999,8 +1946,7 @@ hns3vf_start_service(struct hns3_adapter *hns) struct rte_eth_dev *eth_dev; eth_dev = &rte_eth_devices[hw->data->port_id]; @@ -18934,8 +27429,34 @@ index d220522c43..5aac62a41f 100644 rte_eal_alarm_set(HNS3VF_KEEP_ALIVE_INTERVAL, hns3vf_keep_alive_handler, eth_dev); +@@ -2231,8 +2177,11 @@ hns3vf_reinit_dev(struct hns3_adapter *hns) + */ + if (pci_dev->kdrv == RTE_PCI_KDRV_IGB_UIO || + pci_dev->kdrv == RTE_PCI_KDRV_UIO_GENERIC) { +- if (hns3vf_enable_msix(pci_dev, true)) ++ ret = hns3vf_enable_msix(pci_dev, true); ++ if (ret != 0) { + hns3_err(hw, "Failed to enable msix"); ++ return ret; ++ } + } + + rte_intr_enable(pci_dev->intr_handle); +diff --git a/dpdk/drivers/net/hns3/hns3_fdir.c b/dpdk/drivers/net/hns3/hns3_fdir.c +index 48a91fb517..a3e79619ec 100644 +--- a/dpdk/drivers/net/hns3/hns3_fdir.c ++++ b/dpdk/drivers/net/hns3/hns3_fdir.c +@@ -974,7 +974,7 @@ int hns3_fdir_filter_program(struct hns3_adapter *hns, + rule->key_conf.spec.src_port, + rule->key_conf.spec.dst_port, ret); + else +- hns3_remove_fdir_filter(hw, fdir_info, &rule->key_conf); ++ ret = hns3_remove_fdir_filter(hw, fdir_info, &rule->key_conf); + + return ret; + } diff --git a/dpdk/drivers/net/hns3/hns3_flow.c b/dpdk/drivers/net/hns3/hns3_flow.c -index a2c1589c39..d5c9c22633 100644 +index a2c1589c39..da17fa6e69 100644 --- a/dpdk/drivers/net/hns3/hns3_flow.c +++ b/dpdk/drivers/net/hns3/hns3_flow.c @@ -10,6 +10,125 @@ @@ -19142,6 +27663,15 @@ index a2c1589c39..d5c9c22633 100644 } static inline struct hns3_flow_counter * +@@ -802,7 +927,7 @@ hns3_parse_sctp(const struct rte_flow_item *item, struct hns3_fdir_rule *rule, + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM_MASK, + item, +- "Only support src & dst port in SCTP"); ++ "Only support src & dst port & v-tag in SCTP"); + if (sctp_mask->hdr.src_port) { + hns3_set_bit(rule->input_set, INNER_SRC_PORT, 1); + rule->key_conf.mask.src_port = @@ -1246,7 +1371,6 @@ hns3_filterlist_flush(struct rte_eth_dev *dev) { struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); @@ -20369,6 +28899,216 @@ index e4b2fdf2e6..1b49673f11 100644 TAILQ_HEAD(hns3_rss_filter_list, hns3_rss_conf_ele); TAILQ_HEAD(hns3_flow_mem_list, hns3_flow_mem); +diff --git a/dpdk/drivers/net/hns3/hns3_intr.c b/dpdk/drivers/net/hns3/hns3_intr.c +index 44a1119415..916bf30dcb 100644 +--- a/dpdk/drivers/net/hns3/hns3_intr.c ++++ b/dpdk/drivers/net/hns3/hns3_intr.c +@@ -2434,8 +2434,8 @@ hns3_schedule_reset(struct hns3_adapter *hns) + if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) == + SCHEDULE_DEFERRED) + rte_eal_alarm_cancel(hw->reset.ops->reset_service, hns); +- else +- __atomic_store_n(&hw->reset.schedule, SCHEDULE_REQUESTED, ++ ++ __atomic_store_n(&hw->reset.schedule, SCHEDULE_REQUESTED, + __ATOMIC_RELAXED); + + rte_eal_alarm_set(SWITCH_CONTEXT_US, hw->reset.ops->reset_service, hns); +@@ -2749,6 +2749,7 @@ hns3_reset_post(struct hns3_adapter *hns) + /* IMP will wait ready flag before reset */ + hns3_notify_reset_ready(hw, false); + hns3_clear_reset_level(hw, &hw->reset.pending); ++ hns3_clear_reset_status(hw); + __atomic_store_n(&hns->hw.reset.resetting, 0, __ATOMIC_RELAXED); + hw->reset.attempts = 0; + hw->reset.stats.success_cnt++; +@@ -2798,6 +2799,7 @@ hns3_reset_fail_handle(struct hns3_adapter *hns) + struct timeval tv; + + hns3_clear_reset_level(hw, &hw->reset.pending); ++ hns3_clear_reset_status(hw); + if (hns3_reset_err_handle(hns)) { + hw->reset.stage = RESET_STAGE_PREWAIT; + hns3_schedule_reset(hns); +diff --git a/dpdk/drivers/net/hns3/hns3_mbx.c b/dpdk/drivers/net/hns3/hns3_mbx.c +index 8e0a58aa02..f1743c195e 100644 +--- a/dpdk/drivers/net/hns3/hns3_mbx.c ++++ b/dpdk/drivers/net/hns3/hns3_mbx.c +@@ -40,23 +40,6 @@ hns3_resp_to_errno(uint16_t resp_code) + return -EIO; + } + +-static void +-hns3_mbx_proc_timeout(struct hns3_hw *hw, uint16_t code, uint16_t subcode) +-{ +- if (hw->mbx_resp.matching_scheme == +- HNS3_MBX_RESP_MATCHING_SCHEME_OF_ORIGINAL) { +- hw->mbx_resp.lost++; +- hns3_err(hw, +- "VF could not get mbx(%u,%u) head(%u) tail(%u) " +- "lost(%u) from PF", +- code, subcode, hw->mbx_resp.head, hw->mbx_resp.tail, +- hw->mbx_resp.lost); +- return; +- } +- +- hns3_err(hw, "VF could not get mbx(%u,%u) from PF", code, subcode); +-} +- + static int + hns3_get_mbx_resp(struct hns3_hw *hw, uint16_t code, uint16_t subcode, + uint8_t *resp_data, uint16_t resp_len) +@@ -67,7 +50,6 @@ hns3_get_mbx_resp(struct hns3_hw *hw, uint16_t code, uint16_t subcode, + struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); + struct hns3_mbx_resp_status *mbx_resp; + uint32_t wait_time = 0; +- bool received; + + if (resp_len > HNS3_MBX_MAX_RESP_DATA_SIZE) { + hns3_err(hw, "VF mbx response len(=%u) exceeds maximum(=%d)", +@@ -93,20 +75,14 @@ hns3_get_mbx_resp(struct hns3_hw *hw, uint16_t code, uint16_t subcode, + hns3_dev_handle_mbx_msg(hw); + rte_delay_us(HNS3_WAIT_RESP_US); + +- if (hw->mbx_resp.matching_scheme == +- HNS3_MBX_RESP_MATCHING_SCHEME_OF_ORIGINAL) +- received = (hw->mbx_resp.head == +- hw->mbx_resp.tail + hw->mbx_resp.lost); +- else +- received = hw->mbx_resp.received_match_resp; +- if (received) ++ if (hw->mbx_resp.received_match_resp) + break; + + wait_time += HNS3_WAIT_RESP_US; + } + hw->mbx_resp.req_msg_data = 0; + if (wait_time >= mbx_time_limit) { +- hns3_mbx_proc_timeout(hw, code, subcode); ++ hns3_err(hw, "VF could not get mbx(%u,%u) from PF", code, subcode); + return -ETIME; + } + rte_io_rmb(); +@@ -132,7 +108,6 @@ hns3_mbx_prepare_resp(struct hns3_hw *hw, uint16_t code, uint16_t subcode) + * we get the exact scheme which is used. + */ + hw->mbx_resp.req_msg_data = (uint32_t)code << 16 | subcode; +- hw->mbx_resp.head++; + + /* Update match_id and ensure the value of match_id is not zero */ + hw->mbx_resp.match_id++; +@@ -185,7 +160,6 @@ hns3_send_mbx_msg(struct hns3_hw *hw, uint16_t code, uint16_t subcode, + req->match_id = hw->mbx_resp.match_id; + ret = hns3_cmd_send(hw, &desc, 1); + if (ret) { +- hw->mbx_resp.head--; + rte_spinlock_unlock(&hw->mbx_resp.lock); + hns3_err(hw, "VF failed(=%d) to send mbx message to PF", + ret); +@@ -254,41 +228,10 @@ hns3_handle_asserting_reset(struct hns3_hw *hw, + hns3_schedule_reset(HNS3_DEV_HW_TO_ADAPTER(hw)); + } + +-/* +- * Case1: receive response after timeout, req_msg_data +- * is 0, not equal resp_msg, do lost-- +- * Case2: receive last response during new send_mbx_msg, +- * req_msg_data is different with resp_msg, let +- * lost--, continue to wait for response. +- */ +-static void +-hns3_update_resp_position(struct hns3_hw *hw, uint32_t resp_msg) +-{ +- struct hns3_mbx_resp_status *resp = &hw->mbx_resp; +- uint32_t tail = resp->tail + 1; +- +- if (tail > resp->head) +- tail = resp->head; +- if (resp->req_msg_data != resp_msg) { +- if (resp->lost) +- resp->lost--; +- hns3_warn(hw, "Received a mismatched response req_msg(%x) " +- "resp_msg(%x) head(%u) tail(%u) lost(%u)", +- resp->req_msg_data, resp_msg, resp->head, tail, +- resp->lost); +- } else if (tail + resp->lost > resp->head) { +- resp->lost--; +- hns3_warn(hw, "Received a new response again resp_msg(%x) " +- "head(%u) tail(%u) lost(%u)", resp_msg, +- resp->head, tail, resp->lost); +- } +- rte_io_wmb(); +- resp->tail = tail; +-} +- + static void + hns3_handle_mbx_response(struct hns3_hw *hw, struct hns3_mbx_pf_to_vf_cmd *req) + { ++#define HNS3_MBX_RESP_CODE_OFFSET 16 + struct hns3_mbx_resp_status *resp = &hw->mbx_resp; + uint32_t msg_data; + +@@ -298,12 +241,6 @@ hns3_handle_mbx_response(struct hns3_hw *hw, struct hns3_mbx_pf_to_vf_cmd *req) + * match_id to its response. So VF could use the match_id + * to match the request. + */ +- if (resp->matching_scheme != +- HNS3_MBX_RESP_MATCHING_SCHEME_OF_MATCH_ID) { +- resp->matching_scheme = +- HNS3_MBX_RESP_MATCHING_SCHEME_OF_MATCH_ID; +- hns3_info(hw, "detect mailbox support match id!"); +- } + if (req->match_id == resp->match_id) { + resp->resp_status = hns3_resp_to_errno(req->msg[3]); + memcpy(resp->additional_info, &req->msg[4], +@@ -319,11 +256,19 @@ hns3_handle_mbx_response(struct hns3_hw *hw, struct hns3_mbx_pf_to_vf_cmd *req) + * support copy request's match_id to its response. So VF follows the + * original scheme to process. + */ ++ msg_data = (uint32_t)req->msg[1] << HNS3_MBX_RESP_CODE_OFFSET | req->msg[2]; ++ if (resp->req_msg_data != msg_data) { ++ hns3_warn(hw, ++ "received response tag (%u) is mismatched with requested tag (%u)", ++ msg_data, resp->req_msg_data); ++ return; ++ } ++ + resp->resp_status = hns3_resp_to_errno(req->msg[3]); + memcpy(resp->additional_info, &req->msg[4], + HNS3_MBX_MAX_RESP_DATA_SIZE); +- msg_data = (uint32_t)req->msg[1] << 16 | req->msg[2]; +- hns3_update_resp_position(hw, msg_data); ++ rte_io_wmb(); ++ resp->received_match_resp = true; + } + + static void +diff --git a/dpdk/drivers/net/hns3/hns3_mbx.h b/dpdk/drivers/net/hns3/hns3_mbx.h +index c378783c6c..4a328802b9 100644 +--- a/dpdk/drivers/net/hns3/hns3_mbx.h ++++ b/dpdk/drivers/net/hns3/hns3_mbx.h +@@ -93,21 +93,11 @@ enum hns3_mbx_link_fail_subcode { + #define HNS3_MBX_MAX_RESP_DATA_SIZE 8 + #define HNS3_MBX_DEF_TIME_LIMIT_MS 500 + +-enum { +- HNS3_MBX_RESP_MATCHING_SCHEME_OF_ORIGINAL = 0, +- HNS3_MBX_RESP_MATCHING_SCHEME_OF_MATCH_ID +-}; +- + struct hns3_mbx_resp_status { + rte_spinlock_t lock; /* protects against contending sync cmd resp */ + +- uint8_t matching_scheme; +- + /* The following fields used in the matching scheme for original */ + uint32_t req_msg_data; +- uint32_t head; +- uint32_t tail; +- uint32_t lost; + + /* The following fields used in the matching scheme for match_id */ + uint16_t match_id; diff --git a/dpdk/drivers/net/hns3/hns3_mp.c b/dpdk/drivers/net/hns3/hns3_mp.c index 7184f9ad58..556f1941c6 100644 --- a/dpdk/drivers/net/hns3/hns3_mp.c @@ -20607,7 +29347,7 @@ index 459bbaf773..6b037f81c1 100644 #define HNS3_GL_USEC_TO_REG(gl_usec) ((gl_usec) >> 1) /* rl_usec convert to hardware count, as writing each 1 represents 4us */ diff --git a/dpdk/drivers/net/hns3/hns3_rss.c b/dpdk/drivers/net/hns3/hns3_rss.c -index ca5a129234..6126512bd7 100644 +index ca5a129234..eeeca71a5c 100644 --- a/dpdk/drivers/net/hns3/hns3_rss.c +++ b/dpdk/drivers/net/hns3/hns3_rss.c @@ -18,56 +18,11 @@ const uint8_t hns3_hash_key[HNS3_RSS_KEY_SIZE] = { @@ -20974,8 +29714,9 @@ index ca5a129234..6126512bd7 100644 - * Used to set algorithm, key_offset and hash key of rss. + * Used to set algorithm and hash key of RSS. */ - int +-int -hns3_rss_set_algo_key(struct hns3_hw *hw, const uint8_t *key) ++static int +hns3_rss_set_algo_key(struct hns3_hw *hw, uint8_t hash_algo, + const uint8_t *key, uint8_t key_len) { @@ -21040,7 +29781,7 @@ index ca5a129234..6126512bd7 100644 + return 0; +} + -+int ++static int +hns3_rss_get_algo_key(struct hns3_hw *hw, uint8_t *hash_algo, + uint8_t *key, uint8_t key_len) +{ @@ -21463,7 +30204,7 @@ index ca5a129234..6126512bd7 100644 struct hns3_adapter *hns = dev->data->dev_private; struct hns3_hw *hw = &hns->hw; - struct hns3_rss_conf *rss_cfg = &hw->rss_info; -+ uint8_t hash_algo; ++ uint8_t hash_algo = 0; + int ret; rte_spinlock_lock(&hw->lock); @@ -21518,10 +30259,10 @@ index ca5a129234..6126512bd7 100644 hw->rss_ind_tbl_size); + if (ret != 0) + goto out; - ++ + memcpy(rss_cfg->rss_indirection_tbl, indirection_tbl, + sizeof(uint16_t) * hw->rss_ind_tbl_size); -+ + +out: rte_spinlock_unlock(&hw->lock); return ret; @@ -21578,7 +30319,7 @@ index ca5a129234..6126512bd7 100644 +{ + uint8_t rss_key[HNS3_RSS_KEY_SIZE_MAX] = {0}; + bool modify_key, modify_algo; -+ uint8_t hash_algo; ++ uint8_t hash_algo = 0; + int ret; + + modify_key = (key != NULL && key_len > 0); @@ -21690,7 +30431,7 @@ index ca5a129234..6126512bd7 100644 + hw->rss_info.rss_hf = 0; } diff --git a/dpdk/drivers/net/hns3/hns3_rss.h b/dpdk/drivers/net/hns3/hns3_rss.h -index 8e8b056f4e..415430a399 100644 +index 8e8b056f4e..9d182a8025 100644 --- a/dpdk/drivers/net/hns3/hns3_rss.h +++ b/dpdk/drivers/net/hns3/hns3_rss.h @@ -8,27 +8,107 @@ @@ -21838,7 +30579,7 @@ index 8e8b056f4e..415430a399 100644 /* * For IPv6 SCTP packets type, check whether the NIC hardware support * RSS hash using the src/dst port as the input tuple. For Kunpeng920 -@@ -108,7 +186,16 @@ int hns3_set_rss_indir_table(struct hns3_hw *hw, uint16_t *indir, +@@ -108,7 +186,12 @@ int hns3_set_rss_indir_table(struct hns3_hw *hw, uint16_t *indir, int hns3_rss_reset_indir_table(struct hns3_hw *hw); int hns3_config_rss(struct hns3_adapter *hns); void hns3_rss_uninit(struct hns3_adapter *hns); @@ -21847,17 +30588,13 @@ index 8e8b056f4e..415430a399 100644 -int hns3_rss_set_algo_key(struct hns3_hw *hw, const uint8_t *key); +int hns3_set_rss_tuple_field(struct hns3_hw *hw, uint64_t tuple_fields); +int hns3_get_rss_tuple_field(struct hns3_hw *hw, uint64_t *tuple_fields); -+int hns3_rss_set_algo_key(struct hns3_hw *hw, uint8_t hash_algo, -+ const uint8_t *key, uint8_t key_len); -+int hns3_rss_get_algo_key(struct hns3_hw *hw, uint8_t *hash_algo, -+ uint8_t *key, uint8_t key_len); +uint64_t hns3_rss_calc_tuple_filed(uint64_t rss_hf); +int hns3_update_rss_algo_key(struct hns3_hw *hw, uint8_t hash_algo, + uint8_t *key, uint8_t key_len); #endif /* HNS3_RSS_H */ diff --git a/dpdk/drivers/net/hns3/hns3_rxtx.c b/dpdk/drivers/net/hns3/hns3_rxtx.c -index f1163ce8a9..296aba8b35 100644 +index f1163ce8a9..9d473dbc22 100644 --- a/dpdk/drivers/net/hns3/hns3_rxtx.c +++ b/dpdk/drivers/net/hns3/hns3_rxtx.c @@ -50,6 +50,8 @@ hns3_rx_queue_release_mbufs(struct hns3_rx_queue *rxq) @@ -21887,7 +30624,20 @@ index f1163ce8a9..296aba8b35 100644 goto cfg_fake_tx_q_fail; } -@@ -2786,6 +2788,7 @@ hns3_rx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id, +@@ -1787,6 +1789,12 @@ hns3_rx_queue_conf_check(struct hns3_hw *hw, const struct rte_eth_rxconf *conf, + return -EINVAL; + } + ++ if (conf->rx_free_thresh >= nb_desc) { ++ hns3_err(hw, "rx_free_thresh (%u) must be less than %u", ++ conf->rx_free_thresh, nb_desc); ++ return -EINVAL; ++ } ++ + if (conf->rx_drop_en == 0) + hns3_warn(hw, "if no descriptors available, packets are always " + "dropped and rx_drop_en (1) is fixed on"); +@@ -2786,6 +2794,7 @@ hns3_rx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id, { hns3_recv_scattered_pkts, "Scalar Scattered" }, { hns3_recv_pkts_vec, "Vector Neon" }, { hns3_recv_pkts_vec_sve, "Vector Sve" }, @@ -21895,7 +30645,17 @@ index f1163ce8a9..296aba8b35 100644 }; eth_rx_burst_t pkt_burst = dev->rx_pkt_burst; -@@ -4272,24 +4275,31 @@ int +@@ -3115,6 +3124,9 @@ hns3_config_gro(struct hns3_hw *hw, bool en) + struct hns3_cmd_desc desc; + int ret; + ++ if (!hns3_dev_get_support(hw, GRO)) ++ return 0; ++ + hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_GRO_GENERIC_CONFIG, false); + req = (struct hns3_cfg_gro_status_cmd *)desc.data; + +@@ -4272,24 +4284,31 @@ int hns3_tx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id, struct rte_eth_burst_mode *mode) { @@ -21942,7 +30702,7 @@ index f1163ce8a9..296aba8b35 100644 } static bool -@@ -4303,11 +4313,6 @@ hns3_tx_check_simple_support(struct rte_eth_dev *dev) +@@ -4303,11 +4322,6 @@ hns3_tx_check_simple_support(struct rte_eth_dev *dev) static bool hns3_get_tx_prep_needed(struct rte_eth_dev *dev) { @@ -21954,7 +30714,7 @@ index f1163ce8a9..296aba8b35 100644 #define HNS3_DEV_TX_CSKUM_TSO_OFFLOAD_MASK (\ RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | \ RTE_ETH_TX_OFFLOAD_TCP_CKSUM | \ -@@ -4321,27 +4326,30 @@ hns3_get_tx_prep_needed(struct rte_eth_dev *dev) +@@ -4321,27 +4335,30 @@ hns3_get_tx_prep_needed(struct rte_eth_dev *dev) RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO) uint64_t tx_offload = dev->data->dev_conf.txmode.offloads; @@ -21992,7 +30752,7 @@ index f1163ce8a9..296aba8b35 100644 if (hns->tx_func_hint == HNS3_IO_FUNC_HINT_VEC && vec_allowed) return hns3_xmit_pkts_vec; -@@ -4349,19 +4357,14 @@ hns3_get_tx_function(struct rte_eth_dev *dev, eth_tx_prep_t *prep) +@@ -4349,19 +4366,14 @@ hns3_get_tx_function(struct rte_eth_dev *dev, eth_tx_prep_t *prep) return hns3_xmit_pkts_vec_sve; if (hns->tx_func_hint == HNS3_IO_FUNC_HINT_SIMPLE && simple_allowed) return hns3_xmit_pkts_simple; @@ -22013,7 +30773,7 @@ index f1163ce8a9..296aba8b35 100644 return hns3_xmit_pkts; } -@@ -4401,7 +4404,6 @@ hns3_set_rxtx_function(struct rte_eth_dev *eth_dev) +@@ -4401,7 +4413,6 @@ hns3_set_rxtx_function(struct rte_eth_dev *eth_dev) { struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); struct hns3_adapter *hns = eth_dev->data->dev_private; @@ -22021,7 +30781,7 @@ index f1163ce8a9..296aba8b35 100644 if (hns->hw.adapter_state == HNS3_NIC_STARTED && __atomic_load_n(&hns->hw.reset.resetting, __ATOMIC_RELAXED) == 0) { -@@ -4409,16 +4411,16 @@ hns3_set_rxtx_function(struct rte_eth_dev *eth_dev) +@@ -4409,16 +4420,16 @@ hns3_set_rxtx_function(struct rte_eth_dev *eth_dev) eth_dev->rx_descriptor_status = hns3_dev_rx_descriptor_status; eth_dev->tx_pkt_burst = hw->set_link_down ? rte_eth_pkt_burst_dummy : @@ -22041,7 +30801,7 @@ index f1163ce8a9..296aba8b35 100644 hns3_eth_dev_fp_ops_config(eth_dev); } -@@ -4469,6 +4471,13 @@ hns3_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id) +@@ -4469,6 +4480,13 @@ hns3_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id) return -ENOTSUP; rte_spinlock_lock(&hw->lock); @@ -22055,7 +30815,7 @@ index f1163ce8a9..296aba8b35 100644 ret = hns3_reset_queue(hw, rx_queue_id, HNS3_RING_TYPE_RX); if (ret) { hns3_err(hw, "fail to reset Rx queue %u, ret = %d.", -@@ -4477,6 +4486,9 @@ hns3_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id) +@@ -4477,6 +4495,9 @@ hns3_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id) return ret; } @@ -22065,7 +30825,7 @@ index f1163ce8a9..296aba8b35 100644 ret = hns3_init_rxq(hns, rx_queue_id); if (ret) { hns3_err(hw, "fail to init Rx queue %u, ret = %d.", -@@ -4515,6 +4527,13 @@ hns3_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id) +@@ -4515,6 +4536,13 @@ hns3_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id) return -ENOTSUP; rte_spinlock_lock(&hw->lock); @@ -22079,7 +30839,7 @@ index f1163ce8a9..296aba8b35 100644 hns3_enable_rxq(rxq, false); hns3_rx_queue_release_mbufs(rxq); -@@ -4537,6 +4556,13 @@ hns3_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id) +@@ -4537,6 +4565,13 @@ hns3_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id) return -ENOTSUP; rte_spinlock_lock(&hw->lock); @@ -22093,7 +30853,7 @@ index f1163ce8a9..296aba8b35 100644 ret = hns3_reset_queue(hw, tx_queue_id, HNS3_RING_TYPE_TX); if (ret) { hns3_err(hw, "fail to reset Tx queue %u, ret = %d.", -@@ -4563,6 +4589,13 @@ hns3_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id) +@@ -4563,6 +4598,13 @@ hns3_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id) return -ENOTSUP; rte_spinlock_lock(&hw->lock); @@ -22107,7 +30867,7 @@ index f1163ce8a9..296aba8b35 100644 hns3_enable_txq(txq, false); hns3_tx_queue_release_mbufs(txq); /* -@@ -4756,10 +4789,8 @@ hns3_stop_tx_datapath(struct rte_eth_dev *dev) +@@ -4756,10 +4798,8 @@ hns3_stop_tx_datapath(struct rte_eth_dev *dev) void hns3_start_tx_datapath(struct rte_eth_dev *dev) { @@ -22120,7 +30880,7 @@ index f1163ce8a9..296aba8b35 100644 hns3_eth_dev_fp_ops_config(dev); if (rte_eal_process_type() == RTE_PROC_SECONDARY) -@@ -4767,3 +4798,31 @@ hns3_start_tx_datapath(struct rte_eth_dev *dev) +@@ -4767,3 +4807,31 @@ hns3_start_tx_datapath(struct rte_eth_dev *dev) hns3_mp_req_start_tx(dev); } @@ -22175,7 +30935,7 @@ index ea1a805491..fa39f6481a 100644 #endif /* HNS3_RXTX_H */ diff --git a/dpdk/drivers/net/hns3/hns3_rxtx_vec_neon.h b/dpdk/drivers/net/hns3/hns3_rxtx_vec_neon.h -index 55d9bf817d..a20a6b6acb 100644 +index 55d9bf817d..1048b9db87 100644 --- a/dpdk/drivers/net/hns3/hns3_rxtx_vec_neon.h +++ b/dpdk/drivers/net/hns3/hns3_rxtx_vec_neon.h @@ -142,8 +142,8 @@ hns3_recv_burst_vec(struct hns3_rx_queue *__restrict rxq, @@ -22189,8 +30949,137 @@ index 55d9bf817d..a20a6b6acb 100644 0xff, 0xff, /* rte_mbuf.vlan_tci init zero */ 8, 9, 10, 11, /* rx.rss_hash to rte_mbuf.hash.rss */ }; +@@ -180,19 +180,12 @@ hns3_recv_burst_vec(struct hns3_rx_queue *__restrict rxq, + bd_vld = vset_lane_u16(rxdp[2].rx.bdtype_vld_udp0, bd_vld, 2); + bd_vld = vset_lane_u16(rxdp[3].rx.bdtype_vld_udp0, bd_vld, 3); + +- /* load 2 mbuf pointer */ +- mbp1 = vld1q_u64((uint64_t *)&sw_ring[pos]); +- + bd_vld = vshl_n_u16(bd_vld, + HNS3_UINT16_BIT - 1 - HNS3_RXD_VLD_B); + bd_vld = vreinterpret_u16_s16( + vshr_n_s16(vreinterpret_s16_u16(bd_vld), + HNS3_UINT16_BIT - 1)); + stat = ~vget_lane_u64(vreinterpret_u64_u16(bd_vld), 0); +- +- /* load 2 mbuf pointer again */ +- mbp2 = vld1q_u64((uint64_t *)&sw_ring[pos + 2]); +- + if (likely(stat == 0)) + bd_valid_num = HNS3_DEFAULT_DESCS_PER_LOOP; + else +@@ -200,20 +193,20 @@ hns3_recv_burst_vec(struct hns3_rx_queue *__restrict rxq, + if (bd_valid_num == 0) + break; + +- /* use offset to control below data load oper ordering */ +- offset = rxq->offset_table[bd_valid_num]; ++ /* load 4 mbuf pointer */ ++ mbp1 = vld1q_u64((uint64_t *)&sw_ring[pos]); ++ mbp2 = vld1q_u64((uint64_t *)&sw_ring[pos + 2]); + +- /* store 2 mbuf pointer into rx_pkts */ ++ /* store 4 mbuf pointer into rx_pkts */ + vst1q_u64((uint64_t *)&rx_pkts[pos], mbp1); ++ vst1q_u64((uint64_t *)&rx_pkts[pos + 2], mbp2); + +- /* read first two descs */ ++ /* use offset to control below data load oper ordering */ ++ offset = rxq->offset_table[bd_valid_num]; ++ ++ /* read 4 descs */ + descs[0] = vld2q_u64((uint64_t *)(rxdp + offset)); + descs[1] = vld2q_u64((uint64_t *)(rxdp + offset + 1)); +- +- /* store 2 mbuf pointer into rx_pkts again */ +- vst1q_u64((uint64_t *)&rx_pkts[pos + 2], mbp2); +- +- /* read remains two descs */ + descs[2] = vld2q_u64((uint64_t *)(rxdp + offset + 2)); + descs[3] = vld2q_u64((uint64_t *)(rxdp + offset + 3)); + +@@ -221,56 +214,47 @@ hns3_recv_burst_vec(struct hns3_rx_queue *__restrict rxq, + pkt_mbuf1.val[1] = vreinterpretq_u8_u64(descs[0].val[1]); + pkt_mbuf2.val[0] = vreinterpretq_u8_u64(descs[1].val[0]); + pkt_mbuf2.val[1] = vreinterpretq_u8_u64(descs[1].val[1]); ++ pkt_mbuf3.val[0] = vreinterpretq_u8_u64(descs[2].val[0]); ++ pkt_mbuf3.val[1] = vreinterpretq_u8_u64(descs[2].val[1]); ++ pkt_mbuf4.val[0] = vreinterpretq_u8_u64(descs[3].val[0]); ++ pkt_mbuf4.val[1] = vreinterpretq_u8_u64(descs[3].val[1]); + +- /* pkt 1,2 convert format from desc to pktmbuf */ ++ /* 4 packets convert format from desc to pktmbuf */ + pkt_mb1 = vqtbl2q_u8(pkt_mbuf1, shuf_desc_fields_msk); + pkt_mb2 = vqtbl2q_u8(pkt_mbuf2, shuf_desc_fields_msk); ++ pkt_mb3 = vqtbl2q_u8(pkt_mbuf3, shuf_desc_fields_msk); ++ pkt_mb4 = vqtbl2q_u8(pkt_mbuf4, shuf_desc_fields_msk); + +- /* store the first 8 bytes of pkt 1,2 mbuf's rearm_data */ +- *(uint64_t *)&sw_ring[pos + 0].mbuf->rearm_data = +- rxq->mbuf_initializer; +- *(uint64_t *)&sw_ring[pos + 1].mbuf->rearm_data = +- rxq->mbuf_initializer; +- +- /* pkt 1,2 remove crc */ ++ /* 4 packets remove crc */ + tmp = vsubq_u16(vreinterpretq_u16_u8(pkt_mb1), crc_adjust); + pkt_mb1 = vreinterpretq_u8_u16(tmp); + tmp = vsubq_u16(vreinterpretq_u16_u8(pkt_mb2), crc_adjust); + pkt_mb2 = vreinterpretq_u8_u16(tmp); ++ tmp = vsubq_u16(vreinterpretq_u16_u8(pkt_mb3), crc_adjust); ++ pkt_mb3 = vreinterpretq_u8_u16(tmp); ++ tmp = vsubq_u16(vreinterpretq_u16_u8(pkt_mb4), crc_adjust); ++ pkt_mb4 = vreinterpretq_u8_u16(tmp); + +- pkt_mbuf3.val[0] = vreinterpretq_u8_u64(descs[2].val[0]); +- pkt_mbuf3.val[1] = vreinterpretq_u8_u64(descs[2].val[1]); +- pkt_mbuf4.val[0] = vreinterpretq_u8_u64(descs[3].val[0]); +- pkt_mbuf4.val[1] = vreinterpretq_u8_u64(descs[3].val[1]); +- +- /* pkt 3,4 convert format from desc to pktmbuf */ +- pkt_mb3 = vqtbl2q_u8(pkt_mbuf3, shuf_desc_fields_msk); +- pkt_mb4 = vqtbl2q_u8(pkt_mbuf4, shuf_desc_fields_msk); +- +- /* pkt 1,2 save to rx_pkts mbuf */ ++ /* save packet info to rx_pkts mbuf */ + vst1q_u8((void *)&sw_ring[pos + 0].mbuf->rx_descriptor_fields1, + pkt_mb1); + vst1q_u8((void *)&sw_ring[pos + 1].mbuf->rx_descriptor_fields1, + pkt_mb2); ++ vst1q_u8((void *)&sw_ring[pos + 2].mbuf->rx_descriptor_fields1, ++ pkt_mb3); ++ vst1q_u8((void *)&sw_ring[pos + 3].mbuf->rx_descriptor_fields1, ++ pkt_mb4); + +- /* pkt 3,4 remove crc */ +- tmp = vsubq_u16(vreinterpretq_u16_u8(pkt_mb3), crc_adjust); +- pkt_mb3 = vreinterpretq_u8_u16(tmp); +- tmp = vsubq_u16(vreinterpretq_u16_u8(pkt_mb4), crc_adjust); +- pkt_mb4 = vreinterpretq_u8_u16(tmp); +- +- /* store the first 8 bytes of pkt 3,4 mbuf's rearm_data */ ++ /* store the first 8 bytes of packets mbuf's rearm_data */ ++ *(uint64_t *)&sw_ring[pos + 0].mbuf->rearm_data = ++ rxq->mbuf_initializer; ++ *(uint64_t *)&sw_ring[pos + 1].mbuf->rearm_data = ++ rxq->mbuf_initializer; + *(uint64_t *)&sw_ring[pos + 2].mbuf->rearm_data = + rxq->mbuf_initializer; + *(uint64_t *)&sw_ring[pos + 3].mbuf->rearm_data = + rxq->mbuf_initializer; + +- /* pkt 3,4 save to rx_pkts mbuf */ +- vst1q_u8((void *)&sw_ring[pos + 2].mbuf->rx_descriptor_fields1, +- pkt_mb3); +- vst1q_u8((void *)&sw_ring[pos + 3].mbuf->rx_descriptor_fields1, +- pkt_mb4); +- + rte_prefetch_non_temporal(rxdp + HNS3_DEFAULT_DESCS_PER_LOOP); + + parse_retcode = hns3_desc_parse_field(rxq, &sw_ring[pos], diff --git a/dpdk/drivers/net/hns3/hns3_stats.c b/dpdk/drivers/net/hns3/hns3_stats.c -index bad65fcbed..c2e692a2c5 100644 +index bad65fcbed..9a1e8935e5 100644 --- a/dpdk/drivers/net/hns3/hns3_stats.c +++ b/dpdk/drivers/net/hns3/hns3_stats.c @@ -317,7 +317,7 @@ hns3_update_mac_stats(struct hns3_hw *hw) @@ -22202,8 +31091,267 @@ index bad65fcbed..c2e692a2c5 100644 int ret; /* The first desc has a 64-bit header, so need to consider it. */ +@@ -771,7 +771,7 @@ hns3_mac_stats_reset(struct hns3_hw *hw) + return 0; + } + +-static int ++static uint16_t + hns3_get_imissed_stats_num(struct hns3_adapter *hns) + { + #define NO_IMISSED_STATS_NUM 0 +@@ -993,7 +993,7 @@ hns3_imissed_stats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, + struct hns3_adapter *hns = dev->data->dev_private; + struct hns3_hw *hw = &hns->hw; + struct hns3_rx_missed_stats *imissed_stats = &hw->imissed_stats; +- int imissed_stats_num; ++ uint16_t imissed_stats_num; + int cnt = *count; + char *addr; + uint16_t i; +@@ -1170,7 +1170,7 @@ hns3_imissed_stats_name_get(struct rte_eth_dev *dev, + { + struct hns3_adapter *hns = dev->data->dev_private; + uint32_t cnt = *count; +- int imissed_stats_num; ++ uint16_t imissed_stats_num; + uint16_t i; + + imissed_stats_num = hns3_get_imissed_stats_num(hns); +@@ -1539,8 +1539,13 @@ hns3_stats_init(struct hns3_hw *hw) + return ret; + } + +- if (!hns->is_vf) +- hns3_mac_stats_reset(hw); ++ if (!hns->is_vf) { ++ ret = hns3_mac_stats_reset(hw); ++ if (ret) { ++ hns3_err(hw, "reset mac stats failed, ret = %d", ret); ++ return ret; ++ } ++ } + + return hns3_tqp_stats_init(hw); + } +diff --git a/dpdk/drivers/net/hns3/hns3_tm.c b/dpdk/drivers/net/hns3/hns3_tm.c +index e1089b6bd0..d969164014 100644 +--- a/dpdk/drivers/net/hns3/hns3_tm.c ++++ b/dpdk/drivers/net/hns3/hns3_tm.c +@@ -739,7 +739,7 @@ hns3_tm_node_type_get(struct rte_eth_dev *dev, uint32_t node_id, + } + + static void +-hns3_tm_nonleaf_level_capsbilities_get(struct rte_eth_dev *dev, ++hns3_tm_nonleaf_level_capabilities_get(struct rte_eth_dev *dev, + uint32_t level_id, + struct rte_tm_level_capabilities *cap) + { +@@ -818,7 +818,7 @@ hns3_tm_level_capabilities_get(struct rte_eth_dev *dev, + memset(cap, 0, sizeof(struct rte_tm_level_capabilities)); + + if (level_id != HNS3_TM_NODE_LEVEL_QUEUE) +- hns3_tm_nonleaf_level_capsbilities_get(dev, level_id, cap); ++ hns3_tm_nonleaf_level_capabilities_get(dev, level_id, cap); + else + hns3_tm_leaf_level_capabilities_get(dev, cap); + +@@ -1081,21 +1081,6 @@ fail_clear: + return -EINVAL; + } + +-static int +-hns3_tm_hierarchy_commit_wrap(struct rte_eth_dev *dev, +- int clear_on_fail, +- struct rte_tm_error *error) +-{ +- struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); +- int ret; +- +- rte_spinlock_lock(&hw->lock); +- ret = hns3_tm_hierarchy_commit(dev, clear_on_fail, error); +- rte_spinlock_unlock(&hw->lock); +- +- return ret; +-} +- + static int + hns3_tm_node_shaper_do_update(struct hns3_hw *hw, + uint32_t node_id, +@@ -1195,6 +1180,148 @@ hns3_tm_node_shaper_update(struct rte_eth_dev *dev, + return 0; + } + ++static int ++hns3_tm_capabilities_get_wrap(struct rte_eth_dev *dev, ++ struct rte_tm_capabilities *cap, ++ struct rte_tm_error *error) ++{ ++ struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); ++ int ret; ++ ++ rte_spinlock_lock(&hw->lock); ++ ret = hns3_tm_capabilities_get(dev, cap, error); ++ rte_spinlock_unlock(&hw->lock); ++ ++ return ret; ++} ++ ++static int ++hns3_tm_shaper_profile_add_wrap(struct rte_eth_dev *dev, ++ uint32_t shaper_profile_id, ++ struct rte_tm_shaper_params *profile, ++ struct rte_tm_error *error) ++{ ++ struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); ++ int ret; ++ ++ rte_spinlock_lock(&hw->lock); ++ ret = hns3_tm_shaper_profile_add(dev, shaper_profile_id, profile, error); ++ rte_spinlock_unlock(&hw->lock); ++ ++ return ret; ++} ++ ++static int ++hns3_tm_shaper_profile_del_wrap(struct rte_eth_dev *dev, ++ uint32_t shaper_profile_id, ++ struct rte_tm_error *error) ++{ ++ struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); ++ int ret; ++ ++ rte_spinlock_lock(&hw->lock); ++ ret = hns3_tm_shaper_profile_del(dev, shaper_profile_id, error); ++ rte_spinlock_unlock(&hw->lock); ++ ++ return ret; ++} ++ ++static int ++hns3_tm_node_add_wrap(struct rte_eth_dev *dev, uint32_t node_id, ++ uint32_t parent_node_id, uint32_t priority, ++ uint32_t weight, uint32_t level_id, ++ struct rte_tm_node_params *params, ++ struct rte_tm_error *error) ++{ ++ struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); ++ int ret; ++ ++ rte_spinlock_lock(&hw->lock); ++ ret = hns3_tm_node_add(dev, node_id, parent_node_id, priority, ++ weight, level_id, params, error); ++ rte_spinlock_unlock(&hw->lock); ++ ++ return ret; ++} ++ ++static int ++hns3_tm_node_delete_wrap(struct rte_eth_dev *dev, ++ uint32_t node_id, ++ struct rte_tm_error *error) ++{ ++ struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); ++ int ret; ++ ++ rte_spinlock_lock(&hw->lock); ++ ret = hns3_tm_node_delete(dev, node_id, error); ++ rte_spinlock_unlock(&hw->lock); ++ ++ return ret; ++} ++ ++static int ++hns3_tm_node_type_get_wrap(struct rte_eth_dev *dev, ++ uint32_t node_id, ++ int *is_leaf, ++ struct rte_tm_error *error) ++{ ++ struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); ++ int ret; ++ ++ rte_spinlock_lock(&hw->lock); ++ ret = hns3_tm_node_type_get(dev, node_id, is_leaf, error); ++ rte_spinlock_unlock(&hw->lock); ++ ++ return ret; ++} ++ ++static int ++hns3_tm_level_capabilities_get_wrap(struct rte_eth_dev *dev, ++ uint32_t level_id, ++ struct rte_tm_level_capabilities *cap, ++ struct rte_tm_error *error) ++{ ++ struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); ++ int ret; ++ ++ rte_spinlock_lock(&hw->lock); ++ ret = hns3_tm_level_capabilities_get(dev, level_id, cap, error); ++ rte_spinlock_unlock(&hw->lock); ++ ++ return ret; ++} ++ ++static int ++hns3_tm_node_capabilities_get_wrap(struct rte_eth_dev *dev, ++ uint32_t node_id, ++ struct rte_tm_node_capabilities *cap, ++ struct rte_tm_error *error) ++{ ++ struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); ++ int ret; ++ ++ rte_spinlock_lock(&hw->lock); ++ ret = hns3_tm_node_capabilities_get(dev, node_id, cap, error); ++ rte_spinlock_unlock(&hw->lock); ++ ++ return ret; ++} ++ ++static int ++hns3_tm_hierarchy_commit_wrap(struct rte_eth_dev *dev, ++ int clear_on_fail, ++ struct rte_tm_error *error) ++{ ++ struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); ++ int ret; ++ ++ rte_spinlock_lock(&hw->lock); ++ ret = hns3_tm_hierarchy_commit(dev, clear_on_fail, error); ++ rte_spinlock_unlock(&hw->lock); ++ ++ return ret; ++} ++ + static int + hns3_tm_node_shaper_update_wrap(struct rte_eth_dev *dev, + uint32_t node_id, +@@ -1213,14 +1340,14 @@ hns3_tm_node_shaper_update_wrap(struct rte_eth_dev *dev, + } + + static const struct rte_tm_ops hns3_tm_ops = { +- .capabilities_get = hns3_tm_capabilities_get, +- .shaper_profile_add = hns3_tm_shaper_profile_add, +- .shaper_profile_delete = hns3_tm_shaper_profile_del, +- .node_add = hns3_tm_node_add, +- .node_delete = hns3_tm_node_delete, +- .node_type_get = hns3_tm_node_type_get, +- .level_capabilities_get = hns3_tm_level_capabilities_get, +- .node_capabilities_get = hns3_tm_node_capabilities_get, ++ .capabilities_get = hns3_tm_capabilities_get_wrap, ++ .shaper_profile_add = hns3_tm_shaper_profile_add_wrap, ++ .shaper_profile_delete = hns3_tm_shaper_profile_del_wrap, ++ .node_add = hns3_tm_node_add_wrap, ++ .node_delete = hns3_tm_node_delete_wrap, ++ .node_type_get = hns3_tm_node_type_get_wrap, ++ .level_capabilities_get = hns3_tm_level_capabilities_get_wrap, ++ .node_capabilities_get = hns3_tm_node_capabilities_get_wrap, + .hierarchy_commit = hns3_tm_hierarchy_commit_wrap, + .node_shaper_update = hns3_tm_node_shaper_update_wrap, + }; diff --git a/dpdk/drivers/net/i40e/i40e_ethdev.c b/dpdk/drivers/net/i40e/i40e_ethdev.c -index 7726a89d99..cb0070f94b 100644 +index 7726a89d99..0ff334745d 100644 --- a/dpdk/drivers/net/i40e/i40e_ethdev.c +++ b/dpdk/drivers/net/i40e/i40e_ethdev.c @@ -387,7 +387,6 @@ static int i40e_set_default_mac_addr(struct rte_eth_dev *dev, @@ -22282,7 +31430,31 @@ index 7726a89d99..cb0070f94b 100644 uint32_t rep_cnt = MAX_REPEAT_TIME; struct i40e_link_status link_status; int status; -@@ -6738,7 +6751,6 @@ i40e_dev_handle_aq_msg(struct rte_eth_dev *dev) +@@ -5993,14 +6006,16 @@ i40e_vsi_setup(struct i40e_pf *pf, + } + } + +- /* MAC/VLAN configuration */ +- rte_memcpy(&filter.mac_addr, &broadcast, RTE_ETHER_ADDR_LEN); +- filter.filter_type = I40E_MACVLAN_PERFECT_MATCH; ++ if (vsi->type != I40E_VSI_FDIR) { ++ /* MAC/VLAN configuration for non-FDIR VSI*/ ++ rte_memcpy(&filter.mac_addr, &broadcast, RTE_ETHER_ADDR_LEN); ++ filter.filter_type = I40E_MACVLAN_PERFECT_MATCH; + +- ret = i40e_vsi_add_mac(vsi, &filter); +- if (ret != I40E_SUCCESS) { +- PMD_DRV_LOG(ERR, "Failed to add MACVLAN filter"); +- goto fail_msix_alloc; ++ ret = i40e_vsi_add_mac(vsi, &filter); ++ if (ret != I40E_SUCCESS) { ++ PMD_DRV_LOG(ERR, "Failed to add MACVLAN filter"); ++ goto fail_msix_alloc; ++ } + } + + /* Get VSI BW information */ +@@ -6738,7 +6753,6 @@ i40e_dev_handle_aq_msg(struct rte_eth_dev *dev) if (!ret) rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL); @@ -22290,7 +31462,7 @@ index 7726a89d99..cb0070f94b 100644 break; default: PMD_DRV_LOG(DEBUG, "Request %u is not supported yet", -@@ -12123,40 +12135,6 @@ i40e_cloud_filter_qinq_create(struct i40e_pf *pf) +@@ -12123,40 +12137,6 @@ i40e_cloud_filter_qinq_create(struct i40e_pf *pf) return ret; } @@ -22374,7 +31546,7 @@ index 65a826d51c..67df77890a 100644 if (attr->priority) { rte_flow_error_set(error, EINVAL, diff --git a/dpdk/drivers/net/i40e/i40e_rxtx.c b/dpdk/drivers/net/i40e/i40e_rxtx.c -index 788ffb51c2..b4f65b58fa 100644 +index 788ffb51c2..d96bbbb677 100644 --- a/dpdk/drivers/net/i40e/i40e_rxtx.c +++ b/dpdk/drivers/net/i40e/i40e_rxtx.c @@ -304,10 +304,7 @@ i40e_txd_enable_checksum(uint64_t ol_flags, @@ -22403,7 +31575,20 @@ index 788ffb51c2..b4f65b58fa 100644 /* Enable checksum offloading */ if (ol_flags & I40E_TX_CKSUM_OFFLOAD_MASK) i40e_txd_enable_checksum(ol_flags, &td_cmd, -@@ -2904,6 +2904,8 @@ i40e_rx_queue_config(struct i40e_rx_queue *rxq) +@@ -1918,6 +1918,12 @@ i40e_dev_rx_queue_setup_runtime(struct rte_eth_dev *dev, + if (use_def_burst_func) + ad->rx_bulk_alloc_allowed = false; + i40e_set_rx_function(dev); ++ ++ if (ad->rx_vec_allowed && i40e_rxq_vec_setup(rxq)) { ++ PMD_DRV_LOG(ERR, "Failed vector rx setup."); ++ return -EINVAL; ++ } ++ + return 0; + } else if (ad->rx_vec_allowed && !rte_is_power_of_2(rxq->nb_rx_desc)) { + PMD_DRV_LOG(ERR, "Vector mode is allowed, but descriptor" +@@ -2904,6 +2910,8 @@ i40e_rx_queue_config(struct i40e_rx_queue *rxq) rxq->rx_hdr_len = 0; rxq->rx_buf_len = RTE_ALIGN_FLOOR(buf_size, (1 << I40E_RXQ_CTX_DBUFF_SHIFT)); @@ -22481,6 +31666,18 @@ index 60c97d5331..74ff54c653 100644 m = rte_pktmbuf_prefree_seg(txep[0].mbuf); if (likely(m)) { free[0] = m; +diff --git a/dpdk/drivers/net/i40e/i40e_rxtx_vec_common.h b/dpdk/drivers/net/i40e/i40e_rxtx_vec_common.h +index fe1a6ec75e..8b745630e4 100644 +--- a/dpdk/drivers/net/i40e/i40e_rxtx_vec_common.h ++++ b/dpdk/drivers/net/i40e/i40e_rxtx_vec_common.h +@@ -201,6 +201,7 @@ i40e_rxq_vec_setup_default(struct i40e_rx_queue *rxq) + rte_compiler_barrier(); + p = (uintptr_t)&mb_def.rearm_data; + rxq->mbuf_initializer = *(uint64_t *)p; ++ rxq->rx_using_sse = 1; + return 0; + } + diff --git a/dpdk/drivers/net/i40e/i40e_rxtx_vec_neon.c b/dpdk/drivers/net/i40e/i40e_rxtx_vec_neon.c index 12e6f1cbcb..90e388ae27 100644 --- a/dpdk/drivers/net/i40e/i40e_rxtx_vec_neon.c @@ -22520,10 +31717,19 @@ index 1edebab8dc..aa18650ffa 100644 struct iavf_parser_list dist_parser_list; struct iavf_parser_list ipsec_crypto_parser_list; diff --git a/dpdk/drivers/net/iavf/iavf_ethdev.c b/dpdk/drivers/net/iavf/iavf_ethdev.c -index 3196210f2c..51a8f24973 100644 +index 3196210f2c..a12ea39444 100644 --- a/dpdk/drivers/net/iavf/iavf_ethdev.c +++ b/dpdk/drivers/net/iavf/iavf_ethdev.c -@@ -1065,6 +1065,9 @@ iavf_dev_stop(struct rte_eth_dev *dev) +@@ -131,6 +131,8 @@ static int iavf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, + uint16_t queue_id); + static int iavf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, + uint16_t queue_id); ++static void iavf_dev_interrupt_handler(void *param); ++static void iavf_disable_irq0(struct iavf_hw *hw); + static int iavf_dev_flow_ops_get(struct rte_eth_dev *dev, + const struct rte_flow_ops **ops); + static int iavf_set_mc_addr_list(struct rte_eth_dev *dev, +@@ -1065,6 +1067,9 @@ iavf_dev_stop(struct rte_eth_dev *dev) PMD_INIT_FUNC_TRACE(); @@ -22533,7 +31739,7 @@ index 3196210f2c..51a8f24973 100644 if (adapter->closed) return -1; -@@ -1075,8 +1078,6 @@ iavf_dev_stop(struct rte_eth_dev *dev) +@@ -1075,8 +1080,6 @@ iavf_dev_stop(struct rte_eth_dev *dev) if (adapter->stopped == 1) return 0; @@ -22542,7 +31748,7 @@ index 3196210f2c..51a8f24973 100644 /* Disable the interrupt for Rx */ rte_intr_efd_disable(intr_handle); /* Rx interrupt vector mapping free */ -@@ -1089,6 +1090,8 @@ iavf_dev_stop(struct rte_eth_dev *dev) +@@ -1089,6 +1092,8 @@ iavf_dev_stop(struct rte_eth_dev *dev) iavf_add_del_mc_addr_list(adapter, vf->mc_addrs, vf->mc_addrs_num, false); @@ -22551,7 +31757,48 @@ index 3196210f2c..51a8f24973 100644 adapter->stopped = 1; dev->data->dev_started = 0; -@@ -2607,6 +2610,9 @@ iavf_dev_init(struct rte_eth_dev *eth_dev) +@@ -1178,6 +1183,8 @@ iavf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) + .nb_max = IAVF_MAX_RING_DESC, + .nb_min = IAVF_MIN_RING_DESC, + .nb_align = IAVF_ALIGN_RING_DESC, ++ .nb_mtu_seg_max = IAVF_TX_MAX_MTU_SEG, ++ .nb_seg_max = IAVF_MAX_RING_DESC, + }; + + dev_info->err_handle_mode = RTE_ETH_ERROR_HANDLE_MODE_PASSIVE; +@@ -1350,6 +1357,7 @@ iavf_dev_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) + struct iavf_adapter *adapter = + IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); + struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter); ++ struct rte_eth_conf *dev_conf = &dev->data->dev_conf; + int err; + + if (adapter->closed) +@@ -1368,6 +1376,23 @@ iavf_dev_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) + err = iavf_add_del_vlan(adapter, vlan_id, on); + if (err) + return -EIO; ++ ++ /* For i40e kernel driver which only supports vlan(v1) VIRTCHNL OP, ++ * it will set strip on when setting filter on but dpdk side will not ++ * change strip flag. To be consistent with dpdk side, disable strip ++ * again. ++ * ++ * For i40e kernel driver which supports vlan v2, dpdk will invoke vlan v2 ++ * related function, so it won't go through here. ++ */ ++ if (adapter->hw.mac.type == IAVF_MAC_XL710 || ++ adapter->hw.mac.type == IAVF_MAC_X722_VF) { ++ if (on && !(dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)) { ++ err = iavf_disable_vlan_strip(adapter); ++ if (err) ++ return -EIO; ++ } ++ } + return 0; + } + +@@ -2607,6 +2632,9 @@ iavf_dev_init(struct rte_eth_dev *eth_dev) adapter->dev_data = eth_dev->data; adapter->stopped = 1; @@ -22561,7 +31808,7 @@ index 3196210f2c..51a8f24973 100644 if (iavf_init_vf(eth_dev) != 0) { PMD_INIT_LOG(ERR, "Init vf failed"); return -1; -@@ -2634,8 +2640,6 @@ iavf_dev_init(struct rte_eth_dev *eth_dev) +@@ -2634,8 +2662,6 @@ iavf_dev_init(struct rte_eth_dev *eth_dev) rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.addr, ð_dev->data->mac_addrs[0]); @@ -22570,7 +31817,53 @@ index 3196210f2c..51a8f24973 100644 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) { /* register callback func to eal lib */ -@@ -2732,6 +2736,18 @@ iavf_dev_close(struct rte_eth_dev *dev) +@@ -2667,18 +2693,19 @@ iavf_dev_init(struct rte_eth_dev *eth_dev) + ret = iavf_security_ctx_create(adapter); + if (ret) { + PMD_INIT_LOG(ERR, "failed to create ipsec crypto security instance"); +- return ret; ++ goto flow_init_err; + } + + ret = iavf_security_init(adapter); + if (ret) { + PMD_INIT_LOG(ERR, "failed to initialized ipsec crypto resources"); +- return ret; ++ goto security_init_err; + } + } + + iavf_default_rss_disable(adapter); + ++ iavf_dev_stats_reset(eth_dev); + + /* Start device watchdog */ + iavf_dev_watchdog_enable(adapter); +@@ -2686,7 +2713,23 @@ iavf_dev_init(struct rte_eth_dev *eth_dev) + + return 0; + ++security_init_err: ++ iavf_security_ctx_destroy(adapter); ++ + flow_init_err: ++ iavf_disable_irq0(hw); ++ ++ if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) { ++ /* disable uio intr before callback unregiser */ ++ rte_intr_disable(pci_dev->intr_handle); ++ ++ /* unregister callback func from eal lib */ ++ rte_intr_callback_unregister(pci_dev->intr_handle, ++ iavf_dev_interrupt_handler, eth_dev); ++ } else { ++ rte_eal_alarm_cancel(iavf_dev_alarm_handler, eth_dev); ++ } ++ + rte_free(eth_dev->data->mac_addrs); + eth_dev->data->mac_addrs = NULL; + +@@ -2732,6 +2775,18 @@ iavf_dev_close(struct rte_eth_dev *dev) if (vf->promisc_unicast_enabled || vf->promisc_multicast_enabled) iavf_config_promisc(adapter, false, false); @@ -22621,8 +31914,20 @@ index ae6fb38594..cf4d677101 100644 uint8_t tmp_val = 0; uint8_t tmp_c = 0; int i, j; +diff --git a/dpdk/drivers/net/iavf/iavf_ipsec_crypto.c b/dpdk/drivers/net/iavf/iavf_ipsec_crypto.c +index 868921cac5..26459088af 100644 +--- a/dpdk/drivers/net/iavf/iavf_ipsec_crypto.c ++++ b/dpdk/drivers/net/iavf/iavf_ipsec_crypto.c +@@ -828,6 +828,7 @@ iavf_ipsec_crypto_sa_update_esn(struct iavf_adapter *adapter, + /* set request params */ + request->ipsec_data.sa_update->sa_index = sess->sa.hw_idx; + request->ipsec_data.sa_update->esn_hi = sess->esn.hi; ++ request->ipsec_data.sa_update->esn_low = sess->esn.low; + + /* send virtual channel request to add SA to hardware database */ + rc = iavf_ipsec_crypto_request(adapter, diff --git a/dpdk/drivers/net/iavf/iavf_rxtx.c b/dpdk/drivers/net/iavf/iavf_rxtx.c -index cf87a6beda..4a38a7b985 100644 +index cf87a6beda..6a0cf31a4c 100644 --- a/dpdk/drivers/net/iavf/iavf_rxtx.c +++ b/dpdk/drivers/net/iavf/iavf_rxtx.c @@ -654,6 +654,7 @@ iavf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, @@ -22647,7 +31952,27 @@ index cf87a6beda..4a38a7b985 100644 eip_typ |= IAVF_TXD_CTX_QW0_L4T_CS_MASK; } -@@ -2616,10 +2620,21 @@ iavf_build_data_desc_cmd_offset_fields(volatile uint64_t *qw1, +@@ -2479,7 +2483,7 @@ iavf_fill_ctx_desc_segmentation_field(volatile uint64_t *field, + total_length -= m->outer_l3_len + m->outer_l2_len; + } + +-#ifdef RTE_LIBRTE_IAVF_DEBUG_TX ++#ifdef RTE_ETHDEV_DEBUG_TX + if (!m->l4_len || !m->tso_segsz) + PMD_TX_LOG(DEBUG, "L4 length %d, LSO Segment size %d", + m->l4_len, m->tso_segsz); +@@ -2593,6 +2597,10 @@ iavf_build_data_desc_cmd_offset_fields(volatile uint64_t *qw1, + l2tag1 |= m->vlan_tci; + } + ++ if ((m->ol_flags & ++ (IAVF_TX_CKSUM_OFFLOAD_MASK | RTE_MBUF_F_TX_SEC_OFFLOAD)) == 0) ++ goto skip_cksum; ++ + /* Set MACLEN */ + if (m->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK && + !(m->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD)) +@@ -2616,10 +2624,21 @@ iavf_build_data_desc_cmd_offset_fields(volatile uint64_t *qw1, offset |= (m->l3_len >> 2) << IAVF_TX_DESC_LENGTH_IPLEN_SHIFT; } @@ -22671,8 +31996,65 @@ index cf87a6beda..4a38a7b985 100644 } /* Enable L4 checksum offloads */ +@@ -2641,6 +2660,7 @@ iavf_build_data_desc_cmd_offset_fields(volatile uint64_t *qw1, + break; + } + ++skip_cksum: + *qw1 = rte_cpu_to_le_64((((uint64_t)command << + IAVF_TXD_DATA_QW1_CMD_SHIFT) & IAVF_TXD_DATA_QW1_CMD_MASK) | + (((uint64_t)offset << IAVF_TXD_DATA_QW1_OFFSET_SHIFT) & +@@ -2810,7 +2830,7 @@ iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) + txe->last_id = desc_idx_last; + desc_idx = txe->next_id; + txe = txn; +- } ++ } + + if (nb_desc_ipsec) { + volatile struct iavf_tx_ipsec_desc *ipsec_desc = +@@ -2823,7 +2843,7 @@ iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) + if (txe->mbuf) { + rte_pktmbuf_free_seg(txe->mbuf); + txe->mbuf = NULL; +- } ++ } + + iavf_fill_ipsec_desc(ipsec_desc, ipsec_md, &ipseclen); + +@@ -2956,7 +2976,6 @@ iavf_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts, + struct rte_mbuf *m; + struct iavf_tx_queue *txq = tx_queue; + struct rte_eth_dev *dev = &rte_eth_devices[txq->port_id]; +- uint16_t max_frame_size = dev->data->mtu + IAVF_ETH_OVERHEAD; + struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); + struct iavf_adapter *adapter = IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); + +@@ -2974,7 +2993,8 @@ iavf_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts, + return i; + } + } else if ((m->tso_segsz < IAVF_MIN_TSO_MSS) || +- (m->tso_segsz > IAVF_MAX_TSO_MSS)) { ++ (m->tso_segsz > IAVF_MAX_TSO_MSS) || ++ (m->nb_segs > txq->nb_tx_desc)) { + /* MSS outside the range are considered malicious */ + rte_errno = EINVAL; + return i; +@@ -2985,11 +3005,8 @@ iavf_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts, + return i; + } + +- /* check the data_len in mbuf */ +- if (m->data_len < IAVF_TX_MIN_PKT_LEN || +- m->data_len > max_frame_size) { ++ if (m->pkt_len < IAVF_TX_MIN_PKT_LEN) { + rte_errno = EINVAL; +- PMD_DRV_LOG(ERR, "INVALID mbuf: bad data_len=[%hu]", m->data_len); + return i; + } + diff --git a/dpdk/drivers/net/iavf/iavf_rxtx.h b/dpdk/drivers/net/iavf/iavf_rxtx.h -index a6ad88885b..354326c235 100644 +index a6ad88885b..180f7ec108 100644 --- a/dpdk/drivers/net/iavf/iavf_rxtx.h +++ b/dpdk/drivers/net/iavf/iavf_rxtx.h @@ -16,6 +16,9 @@ @@ -22703,6 +32085,15 @@ index a6ad88885b..354326c235 100644 RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | \ RTE_ETH_TX_OFFLOAD_SCTP_CKSUM | \ RTE_ETH_TX_OFFLOAD_UDP_CKSUM | \ +@@ -76,7 +79,7 @@ + RTE_MBUF_F_TX_TUNNEL_MASK | \ + RTE_MBUF_F_TX_OUTER_IP_CKSUM | \ + RTE_MBUF_F_TX_OUTER_UDP_CKSUM | \ +- RTE_ETH_TX_OFFLOAD_SECURITY) ++ RTE_MBUF_F_TX_SEC_OFFLOAD) + + #define IAVF_TX_OFFLOAD_NOTSUP_MASK \ + (RTE_MBUF_F_TX_OFFLOAD_MASK ^ IAVF_TX_OFFLOAD_MASK) diff --git a/dpdk/drivers/net/iavf/iavf_rxtx_vec_avx2.c b/dpdk/drivers/net/iavf/iavf_rxtx_vec_avx2.c index 862f6eb0c0..b4ebac9d34 100644 --- a/dpdk/drivers/net/iavf/iavf_rxtx_vec_avx2.c @@ -23397,7 +32788,7 @@ index 7f42ebabe9..7becf6d187 100644 uint16_t num_vfs; diff --git a/dpdk/drivers/net/ice/ice_dcf_ethdev.c b/dpdk/drivers/net/ice/ice_dcf_ethdev.c -index dcbf2af5b0..7304ea721c 100644 +index dcbf2af5b0..223fa39f33 100644 --- a/dpdk/drivers/net/ice/ice_dcf_ethdev.c +++ b/dpdk/drivers/net/ice/ice_dcf_ethdev.c @@ -115,7 +115,8 @@ ice_dcf_init_rxq(struct rte_eth_dev *dev, struct ice_rx_queue *rxq) @@ -23410,6 +32801,113 @@ index dcbf2af5b0..7304ea721c 100644 max_pkt_len = RTE_MIN(ICE_SUPPORT_CHAIN_NUM * rxq->rx_buf_len, dev->data->mtu + ICE_ETH_OVERHEAD); +@@ -670,7 +671,6 @@ ice_dcf_dev_stop(struct rte_eth_dev *dev) + struct ice_dcf_adapter *dcf_ad = dev->data->dev_private; + struct rte_intr_handle *intr_handle = dev->intr_handle; + struct ice_adapter *ad = &dcf_ad->parent; +- struct ice_dcf_hw *hw = &dcf_ad->real_hw; + + if (ad->pf.adapter_stopped == 1) { + PMD_DRV_LOG(DEBUG, "Port is already stopped"); +@@ -697,7 +697,6 @@ ice_dcf_dev_stop(struct rte_eth_dev *dev) + + dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN; + ad->pf.adapter_stopped = 1; +- hw->tm_conf.committed = false; + + return 0; + } +@@ -1598,6 +1597,26 @@ ice_dcf_free_repr_info(struct ice_dcf_adapter *dcf_adapter) + } + } + ++int ++ice_dcf_handle_vf_repr_close(struct ice_dcf_adapter *dcf_adapter, ++ uint16_t vf_id) ++{ ++ struct ice_dcf_repr_info *vf_rep_info; ++ ++ if (dcf_adapter->num_reprs >= vf_id) { ++ PMD_DRV_LOG(ERR, "Invalid VF id: %d", vf_id); ++ return -1; ++ } ++ ++ if (!dcf_adapter->repr_infos) ++ return 0; ++ ++ vf_rep_info = &dcf_adapter->repr_infos[vf_id]; ++ vf_rep_info->vf_rep_eth_dev = NULL; ++ ++ return 0; ++} ++ + static int + ice_dcf_init_repr_info(struct ice_dcf_adapter *dcf_adapter) + { +@@ -1621,11 +1640,10 @@ ice_dcf_dev_close(struct rte_eth_dev *dev) + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return 0; + ++ ice_dcf_vf_repr_notify_all(adapter, false); + (void)ice_dcf_dev_stop(dev); + + ice_free_queues(dev); +- +- ice_dcf_free_repr_info(adapter); + ice_dcf_uninit_parent_adapter(dev); + ice_dcf_uninit_hw(dev, &adapter->real_hw); + +@@ -1815,7 +1833,7 @@ ice_dcf_dev_reset(struct rte_eth_dev *dev) + ice_dcf_reset_hw(dev, hw); + } + +- ret = ice_dcf_dev_uninit(dev); ++ ret = ice_dcf_dev_close(dev); + if (ret) + return ret; + +@@ -1917,13 +1935,20 @@ ice_dcf_dev_init(struct rte_eth_dev *eth_dev) + return -1; + } + ++ ice_dcf_stats_reset(eth_dev); ++ + dcf_config_promisc(adapter, false, false); ++ ice_dcf_vf_repr_notify_all(adapter, true); ++ + return 0; + } + + static int + ice_dcf_dev_uninit(struct rte_eth_dev *eth_dev) + { ++ struct ice_dcf_adapter *adapter = eth_dev->data->dev_private; ++ ++ ice_dcf_free_repr_info(adapter); + ice_dcf_dev_close(eth_dev); + + return 0; +diff --git a/dpdk/drivers/net/ice/ice_dcf_ethdev.h b/dpdk/drivers/net/ice/ice_dcf_ethdev.h +index 4baaec4b8b..6dcbaac5eb 100644 +--- a/dpdk/drivers/net/ice/ice_dcf_ethdev.h ++++ b/dpdk/drivers/net/ice/ice_dcf_ethdev.h +@@ -60,6 +60,7 @@ struct ice_dcf_vf_repr { + struct rte_ether_addr mac_addr; + uint16_t switch_domain_id; + uint16_t vf_id; ++ bool dcf_valid; + + struct ice_dcf_vlan outer_vlan_info; /* DCF always handle outer VLAN */ + }; +@@ -80,6 +81,8 @@ int ice_dcf_vf_repr_init(struct rte_eth_dev *vf_rep_eth_dev, void *init_param); + int ice_dcf_vf_repr_uninit(struct rte_eth_dev *vf_rep_eth_dev); + int ice_dcf_vf_repr_init_vlan(struct rte_eth_dev *vf_rep_eth_dev); + void ice_dcf_vf_repr_stop_all(struct ice_dcf_adapter *dcf_adapter); ++void ice_dcf_vf_repr_notify_all(struct ice_dcf_adapter *dcf_adapter, bool valid); ++int ice_dcf_handle_vf_repr_close(struct ice_dcf_adapter *dcf_adapter, uint16_t vf_id); + bool ice_dcf_adminq_need_retry(struct ice_adapter *ad); + + #endif /* _ICE_DCF_ETHDEV_H_ */ diff --git a/dpdk/drivers/net/ice/ice_dcf_parent.c b/dpdk/drivers/net/ice/ice_dcf_parent.c index 01e390ddda..0563edb0b2 100644 --- a/dpdk/drivers/net/ice/ice_dcf_parent.c @@ -23434,8 +32932,138 @@ index 01e390ddda..0563edb0b2 100644 return NULL; } +diff --git a/dpdk/drivers/net/ice/ice_dcf_sched.c b/dpdk/drivers/net/ice/ice_dcf_sched.c +index a231c1e60b..b08bc5f1de 100644 +--- a/dpdk/drivers/net/ice/ice_dcf_sched.c ++++ b/dpdk/drivers/net/ice/ice_dcf_sched.c +@@ -237,6 +237,7 @@ ice_dcf_node_add(struct rte_eth_dev *dev, uint32_t node_id, + enum ice_dcf_tm_node_type node_type = ICE_DCF_TM_NODE_TYPE_MAX; + struct ice_dcf_tm_shaper_profile *shaper_profile = NULL; + struct ice_dcf_adapter *adapter = dev->data->dev_private; ++ struct ice_adapter *ad = &adapter->parent; + struct ice_dcf_hw *hw = &adapter->real_hw; + struct ice_dcf_tm_node *parent_node; + struct ice_dcf_tm_node *tm_node; +@@ -246,10 +247,10 @@ ice_dcf_node_add(struct rte_eth_dev *dev, uint32_t node_id, + if (!params || !error) + return -EINVAL; + +- /* if already committed */ +- if (hw->tm_conf.committed) { ++ /* if port is running */ ++ if (!ad->pf.adapter_stopped) { + error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED; +- error->message = "already committed"; ++ error->message = "port is running"; + return -EINVAL; + } + +@@ -400,16 +401,17 @@ ice_dcf_node_delete(struct rte_eth_dev *dev, uint32_t node_id, + { + enum ice_dcf_tm_node_type node_type = ICE_DCF_TM_NODE_TYPE_MAX; + struct ice_dcf_adapter *adapter = dev->data->dev_private; ++ struct ice_adapter *ad = &adapter->parent; + struct ice_dcf_hw *hw = &adapter->real_hw; + struct ice_dcf_tm_node *tm_node; + + if (!error) + return -EINVAL; + +- /* if already committed */ +- if (hw->tm_conf.committed) { ++ /* if port is running */ ++ if (!ad->pf.adapter_stopped) { + error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED; +- error->message = "already committed"; ++ error->message = "port is running"; + return -EINVAL; + } + +diff --git a/dpdk/drivers/net/ice/ice_dcf_vf_representor.c b/dpdk/drivers/net/ice/ice_dcf_vf_representor.c +index b9fcfc80ad..af281f069a 100644 +--- a/dpdk/drivers/net/ice/ice_dcf_vf_representor.c ++++ b/dpdk/drivers/net/ice/ice_dcf_vf_representor.c +@@ -50,9 +50,28 @@ ice_dcf_vf_repr_dev_stop(struct rte_eth_dev *dev) + return 0; + } + ++static void ++ice_dcf_vf_repr_notify_one(struct rte_eth_dev *dev, bool valid) ++{ ++ struct ice_dcf_vf_repr *repr = dev->data->dev_private; ++ ++ repr->dcf_valid = valid; ++} ++ + static int + ice_dcf_vf_repr_dev_close(struct rte_eth_dev *dev) + { ++ struct ice_dcf_vf_repr *repr = dev->data->dev_private; ++ struct ice_dcf_adapter *dcf_adapter; ++ int err; ++ ++ if (repr->dcf_valid) { ++ dcf_adapter = repr->dcf_eth_dev->data->dev_private; ++ err = ice_dcf_handle_vf_repr_close(dcf_adapter, repr->vf_id); ++ if (err) ++ PMD_DRV_LOG(ERR, "VF representor invalid"); ++ } ++ + return ice_dcf_vf_repr_uninit(dev); + } + +@@ -111,14 +130,15 @@ ice_dcf_vf_repr_link_update(__rte_unused struct rte_eth_dev *ethdev, + static __rte_always_inline struct ice_dcf_hw * + ice_dcf_vf_repr_hw(struct ice_dcf_vf_repr *repr) + { +- struct ice_dcf_adapter *dcf_adapter = +- repr->dcf_eth_dev->data->dev_private; ++ struct ice_dcf_adapter *dcf_adapter; + +- if (!dcf_adapter) { ++ if (!repr->dcf_valid) { + PMD_DRV_LOG(ERR, "DCF for VF representor has been released\n"); + return NULL; + } + ++ dcf_adapter = repr->dcf_eth_dev->data->dev_private; ++ + return &dcf_adapter->real_hw; + } + +@@ -414,6 +434,7 @@ ice_dcf_vf_repr_init(struct rte_eth_dev *vf_rep_eth_dev, void *init_param) + repr->dcf_eth_dev = param->dcf_eth_dev; + repr->switch_domain_id = param->switch_domain_id; + repr->vf_id = param->vf_id; ++ repr->dcf_valid = true; + repr->outer_vlan_info.port_vlan_ena = false; + repr->outer_vlan_info.stripping_ena = false; + repr->outer_vlan_info.tpid = RTE_ETHER_TYPE_VLAN; +@@ -488,3 +509,22 @@ ice_dcf_vf_repr_stop_all(struct ice_dcf_adapter *dcf_adapter) + vf_rep_eth_dev->data->dev_started = 0; + } + } ++ ++void ++ice_dcf_vf_repr_notify_all(struct ice_dcf_adapter *dcf_adapter, bool valid) ++{ ++ uint16_t vf_id; ++ struct rte_eth_dev *vf_rep_eth_dev; ++ ++ if (!dcf_adapter->repr_infos) ++ return; ++ ++ for (vf_id = 0; vf_id < dcf_adapter->real_hw.num_vfs; vf_id++) { ++ vf_rep_eth_dev = dcf_adapter->repr_infos[vf_id].vf_rep_eth_dev; ++ ++ if (!vf_rep_eth_dev) ++ continue; ++ ++ ice_dcf_vf_repr_notify_one(vf_rep_eth_dev, valid); ++ } ++} diff --git a/dpdk/drivers/net/ice/ice_ethdev.c b/dpdk/drivers/net/ice/ice_ethdev.c -index 0bc739daf0..bba2715b1d 100644 +index 0bc739daf0..8ce7d0ebaa 100644 --- a/dpdk/drivers/net/ice/ice_ethdev.c +++ b/dpdk/drivers/net/ice/ice_ethdev.c @@ -2399,6 +2399,17 @@ ice_dev_init(struct rte_eth_dev *dev) @@ -23488,7 +33116,16 @@ index 0bc739daf0..bba2715b1d 100644 ret = ice_aq_set_rss_key(hw, vsi->idx, &key); if (ret) goto out; -@@ -3660,6 +3678,16 @@ ice_dev_start(struct rte_eth_dev *dev) +@@ -3590,6 +3608,8 @@ ice_get_init_link_status(struct rte_eth_dev *dev) + + if (link_status.link_info & ICE_AQ_LINK_UP) + pf->init_link_up = true; ++ else ++ pf->init_link_up = false; + } + + static int +@@ -3660,6 +3680,16 @@ ice_dev_start(struct rte_eth_dev *dev) } } @@ -23505,7 +33142,27 @@ index 0bc739daf0..bba2715b1d 100644 /* program Rx queues' context in hardware*/ for (nb_rxq = 0; nb_rxq < data->nb_rx_queues; nb_rxq++) { ret = ice_rx_queue_start(dev, nb_rxq); -@@ -5800,11 +5828,6 @@ ice_timesync_enable(struct rte_eth_dev *dev) +@@ -3856,6 +3886,8 @@ ice_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) + .nb_max = ICE_MAX_RING_DESC, + .nb_min = ICE_MIN_RING_DESC, + .nb_align = ICE_ALIGN_RING_DESC, ++ .nb_mtu_seg_max = ICE_TX_MTU_SEG_MAX, ++ .nb_seg_max = ICE_MAX_RING_DESC, + }; + + dev_info->speed_capa = RTE_ETH_LINK_SPEED_10M | +@@ -3926,8 +3958,8 @@ ice_atomic_write_link_status(struct rte_eth_dev *dev, + static int + ice_link_update(struct rte_eth_dev *dev, int wait_to_complete) + { +-#define CHECK_INTERVAL 100 /* 100ms */ +-#define MAX_REPEAT_TIME 10 /* 1s (10 * 100ms) in total */ ++#define CHECK_INTERVAL 50 /* 50ms */ ++#define MAX_REPEAT_TIME 40 /* 2s (40 * 50ms) in total */ + struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ice_link_status link_status; + struct rte_eth_link link, old; +@@ -5800,11 +5832,6 @@ ice_timesync_enable(struct rte_eth_dev *dev) return -1; } @@ -23517,7 +33174,7 @@ index 0bc739daf0..bba2715b1d 100644 if (hw->func_caps.ts_func_info.src_tmr_owned) { ret = ice_ptp_init_phc(hw); if (ret) { -@@ -5925,16 +5948,17 @@ ice_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts) +@@ -5925,16 +5952,17 @@ ice_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts) struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct ice_adapter *ad = ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); @@ -23540,7 +33197,7 @@ index 0bc739daf0..bba2715b1d 100644 } time = ((uint64_t)hi << 32) | lo; -@@ -5950,6 +5974,7 @@ ice_timesync_disable(struct rte_eth_dev *dev) +@@ -5950,6 +5978,7 @@ ice_timesync_disable(struct rte_eth_dev *dev) struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct ice_adapter *ad = ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); @@ -23548,7 +33205,7 @@ index 0bc739daf0..bba2715b1d 100644 uint64_t val; uint8_t lport; -@@ -5957,12 +5982,12 @@ ice_timesync_disable(struct rte_eth_dev *dev) +@@ -5957,12 +5986,12 @@ ice_timesync_disable(struct rte_eth_dev *dev) ice_clear_phy_tstamp(hw, lport, 0); @@ -23626,7 +33283,7 @@ index f35727856e..52646e9408 100644 uint8_t tmp_c = 0; int i, j; diff --git a/dpdk/drivers/net/ice/ice_rxtx.c b/dpdk/drivers/net/ice/ice_rxtx.c -index 0ea0045836..598edb29c9 100644 +index 0ea0045836..9a653cbc4a 100644 --- a/dpdk/drivers/net/ice/ice_rxtx.c +++ b/dpdk/drivers/net/ice/ice_rxtx.c @@ -259,7 +259,8 @@ ice_program_hw_rx_queue(struct ice_rx_queue *rxq) @@ -23685,6 +33342,15 @@ index 0ea0045836..598edb29c9 100644 rxq->time_high = rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high); if (unlikely(is_tsinit)) { +@@ -2143,7 +2149,7 @@ ice_recv_scattered_pkts(void *rx_queue, + } + rxq->hw_time_update = rte_get_timer_cycles() / + (rte_get_timer_hz() / 1000); +- *RTE_MBUF_DYNFIELD(rxm, ++ *RTE_MBUF_DYNFIELD(first_seg, + (ice_timestamp_dynfield_offset), + rte_mbuf_timestamp_t *) = ts_ns; + pkt_flags |= ice_timestamp_dynflag; @@ -2617,7 +2623,8 @@ ice_recv_pkts(void *rx_queue, rxd_to_pkt_fields_ops[rxq->rxdid](rxq, rxm, &rxd); pkt_flags = ice_rxd_error_to_pkt_flags(rx_stat_err0); @@ -23731,11 +33397,72 @@ index 0ea0045836..598edb29c9 100644 /* Enable checksum offloading */ if (ol_flags & ICE_TX_CKSUM_OFFLOAD_MASK) +@@ -3662,23 +3670,34 @@ ice_check_empty_mbuf(struct rte_mbuf *tx_pkt) + } + + uint16_t +-ice_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts, ++ice_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts) + { + int i, ret; + uint64_t ol_flags; + struct rte_mbuf *m; +- struct ice_tx_queue *txq = tx_queue; +- struct rte_eth_dev *dev = &rte_eth_devices[txq->port_id]; +- uint16_t max_frame_size = dev->data->mtu + ICE_ETH_OVERHEAD; + + for (i = 0; i < nb_pkts; i++) { + m = tx_pkts[i]; + ol_flags = m->ol_flags; + +- if (ol_flags & RTE_MBUF_F_TX_TCP_SEG && ++ if (!(ol_flags & RTE_MBUF_F_TX_TCP_SEG) && ++ /** ++ * No TSO case: nb->segs, pkt_len to not exceed ++ * the limites. ++ */ ++ (m->nb_segs > ICE_TX_MTU_SEG_MAX || ++ m->pkt_len > ICE_FRAME_SIZE_MAX)) { ++ rte_errno = EINVAL; ++ return i; ++ } else if (ol_flags & RTE_MBUF_F_TX_TCP_SEG && ++ /** TSO case: tso_segsz, nb_segs, pkt_len not exceed ++ * the limits. ++ */ + (m->tso_segsz < ICE_MIN_TSO_MSS || + m->tso_segsz > ICE_MAX_TSO_MSS || ++ m->nb_segs > ++ ((struct ice_tx_queue *)tx_queue)->nb_tx_desc || + m->pkt_len > ICE_MAX_TSO_FRAME_SIZE)) { + /** + * MSS outside the range are considered malicious +@@ -3687,11 +3706,8 @@ ice_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts, + return i; + } + +- /* check the data_len in mbuf */ +- if (m->data_len < ICE_TX_MIN_PKT_LEN || +- m->data_len > max_frame_size) { ++ if (m->pkt_len < ICE_TX_MIN_PKT_LEN) { + rte_errno = EINVAL; +- PMD_DRV_LOG(ERR, "INVALID mbuf: bad data_len=[%hu]", m->data_len); + return i; + } + +@@ -3710,7 +3726,6 @@ ice_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts, + + if (ice_check_empty_mbuf(m) != 0) { + rte_errno = EINVAL; +- PMD_DRV_LOG(ERR, "INVALID mbuf: last mbuf data_len=[0]"); + return i; + } + } diff --git a/dpdk/drivers/net/ice/ice_rxtx.h b/dpdk/drivers/net/ice/ice_rxtx.h -index 4947d5c25f..268289716e 100644 +index 4947d5c25f..bd2c4abec9 100644 --- a/dpdk/drivers/net/ice/ice_rxtx.h +++ b/dpdk/drivers/net/ice/ice_rxtx.h -@@ -51,6 +51,9 @@ extern int ice_timestamp_dynfield_offset; +@@ -51,8 +51,13 @@ extern int ice_timestamp_dynfield_offset; /* Max header size can be 2K - 64 bytes */ #define ICE_RX_HDR_BUF_SIZE (2048 - 64) @@ -23744,8 +33471,12 @@ index 4947d5c25f..268289716e 100644 + #define ICE_HEADER_SPLIT_ENA BIT(0) ++#define ICE_TX_MTU_SEG_MAX 8 ++ typedef void (*ice_rx_release_mbufs_t)(struct ice_rx_queue *rxq); -@@ -117,6 +120,7 @@ struct ice_rx_queue { + typedef void (*ice_tx_release_mbufs_t)(struct ice_tx_queue *txq); + typedef void (*ice_rxd_to_pkt_fields_t)(struct ice_rx_queue *rxq, +@@ -117,6 +122,7 @@ struct ice_rx_queue { uint64_t hw_time_update; /* SW time of HW record updating */ struct rte_eth_rxseg_split rxseg[ICE_RX_MAX_NSEG]; uint32_t rxseg_nb; @@ -23753,7 +33484,7 @@ index 4947d5c25f..268289716e 100644 }; struct ice_tx_entry { -@@ -349,26 +353,27 @@ static inline +@@ -349,26 +355,27 @@ static inline uint64_t ice_tstamp_convert_32b_64b(struct ice_hw *hw, struct ice_adapter *ad, uint32_t flag, uint32_t in_timestamp) { @@ -24042,10 +33773,50 @@ index 70a06a3b15..2c15611a23 100644 rte_eth_dev_destroy(ethdev, ipn3ke_rpst_uninit); } diff --git a/dpdk/drivers/net/ipn3ke/ipn3ke_representor.c b/dpdk/drivers/net/ipn3ke/ipn3ke_representor.c -index 2ef96a984a..e50fc73f43 100644 +index 2ef96a984a..5361867785 100644 --- a/dpdk/drivers/net/ipn3ke/ipn3ke_representor.c +++ b/dpdk/drivers/net/ipn3ke/ipn3ke_representor.c -@@ -2579,7 +2579,7 @@ ipn3ke_rpst_scan_handle_request(__rte_unused void *param) +@@ -120,6 +120,7 @@ ipn3ke_rpst_dev_start(struct rte_eth_dev *dev) + uint64_t base_mac; + uint32_t val; + char attr_name[IPN3KE_RAWDEV_ATTR_LEN_MAX]; ++ uint16_t i; + + rawdev = hw->rawdev; + +@@ -190,6 +191,11 @@ ipn3ke_rpst_dev_start(struct rte_eth_dev *dev) + + ipn3ke_rpst_link_update(dev, 0); + ++ for (i = 0; i < dev->data->nb_rx_queues; i++) ++ dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; ++ for (i = 0; i < dev->data->nb_tx_queues; i++) ++ dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; ++ + return 0; + } + +@@ -198,6 +204,7 @@ ipn3ke_rpst_dev_stop(struct rte_eth_dev *dev) + { + struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(dev); + struct ipn3ke_rpst *rpst = IPN3KE_DEV_PRIVATE_TO_RPST(dev); ++ uint16_t i; + + if (hw->retimer.mac_type == IFPGA_RAWDEV_RETIMER_MAC_TYPE_10GE_XFI) { + /* Disable the TX path */ +@@ -207,6 +214,11 @@ ipn3ke_rpst_dev_stop(struct rte_eth_dev *dev) + ipn3ke_xmac_rx_disable(hw, rpst->port_id, 0); + } + ++ for (i = 0; i < dev->data->nb_rx_queues; i++) ++ dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; ++ for (i = 0; i < dev->data->nb_tx_queues; i++) ++ dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; ++ + return 0; + } + +@@ -2579,7 +2591,7 @@ ipn3ke_rpst_scan_handle_request(__rte_unused void *param) } rte_delay_us(50 * MS); @@ -24244,7 +34015,7 @@ index c9d6ca9efe..a3a7c68806 100644 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(i), rxq->nb_rx_desc - 1); diff --git a/dpdk/drivers/net/mana/gdma.c b/dpdk/drivers/net/mana/gdma.c -index 3d4039014f..65685fe236 100644 +index 3d4039014f..7f66a7a7cf 100644 --- a/dpdk/drivers/net/mana/gdma.c +++ b/dpdk/drivers/net/mana/gdma.c @@ -14,12 +14,12 @@ gdma_get_wqe_pointer(struct mana_gdma_queue *queue) @@ -24342,7 +34113,105 @@ index 3d4039014f..65685fe236 100644 wq_buffer_pointer = gdma_get_wqe_pointer(queue); wq_buffer_pointer += write_dma_client_oob(wq_buffer_pointer, work_req, -@@ -242,15 +235,15 @@ mana_ring_doorbell(void *db_page, enum gdma_queue_types queue_type, +@@ -173,6 +166,97 @@ gdma_post_work_request(struct mana_gdma_queue *queue, + return 0; + } + ++#ifdef RTE_ARCH_32 ++union gdma_short_doorbell_entry { ++ uint32_t as_uint32; ++ ++ struct { ++ uint32_t tail_ptr_incr : 16; /* Number of CQEs */ ++ uint32_t id : 12; ++ uint32_t reserved : 3; ++ uint32_t arm : 1; ++ } cq; ++ ++ struct { ++ uint32_t tail_ptr_incr : 16; /* In number of bytes */ ++ uint32_t id : 12; ++ uint32_t reserved : 4; ++ } rq; ++ ++ struct { ++ uint32_t tail_ptr_incr : 16; /* In number of bytes */ ++ uint32_t id : 12; ++ uint32_t reserved : 4; ++ } sq; ++ ++ struct { ++ uint32_t tail_ptr_incr : 16; /* Number of EQEs */ ++ uint32_t id : 12; ++ uint32_t reserved : 3; ++ uint32_t arm : 1; ++ } eq; ++}; /* HW DATA */ ++ ++enum { ++ DOORBELL_SHORT_OFFSET_SQ = 0x10, ++ DOORBELL_SHORT_OFFSET_RQ = 0x410, ++ DOORBELL_SHORT_OFFSET_CQ = 0x810, ++ DOORBELL_SHORT_OFFSET_EQ = 0xFF0, ++}; ++ ++/* ++ * Write to hardware doorbell to notify new activity. ++ */ ++int ++mana_ring_short_doorbell(void *db_page, enum gdma_queue_types queue_type, ++ uint32_t queue_id, uint32_t tail_incr, uint8_t arm) ++{ ++ uint8_t *addr = db_page; ++ union gdma_short_doorbell_entry e = {}; ++ ++ if ((queue_id & ~GDMA_SHORT_DB_QID_MASK) || ++ (tail_incr & ~GDMA_SHORT_DB_INC_MASK)) { ++ DP_LOG(ERR, "%s: queue_id %u or " ++ "tail_incr %u overflowed, queue type %d", ++ __func__, queue_id, tail_incr, queue_type); ++ return -EINVAL; ++ } ++ ++ switch (queue_type) { ++ case GDMA_QUEUE_SEND: ++ e.sq.id = queue_id; ++ e.sq.tail_ptr_incr = tail_incr; ++ addr += DOORBELL_SHORT_OFFSET_SQ; ++ break; ++ ++ case GDMA_QUEUE_RECEIVE: ++ e.rq.id = queue_id; ++ e.rq.tail_ptr_incr = tail_incr; ++ addr += DOORBELL_SHORT_OFFSET_RQ; ++ break; ++ ++ case GDMA_QUEUE_COMPLETION: ++ e.cq.id = queue_id; ++ e.cq.tail_ptr_incr = tail_incr; ++ e.cq.arm = arm; ++ addr += DOORBELL_SHORT_OFFSET_CQ; ++ break; ++ ++ default: ++ DP_LOG(ERR, "Unsupported queue type %d", queue_type); ++ return -1; ++ } ++ ++ /* Ensure all writes are done before ringing doorbell */ ++ rte_wmb(); ++ ++ DP_LOG(DEBUG, "db_page %p addr %p queue_id %u type %u tail %u arm %u", ++ db_page, addr, queue_id, queue_type, tail_incr, arm); ++ ++ rte_write32(e.as_uint32, addr); ++ return 0; ++} ++#else + union gdma_doorbell_entry { + uint64_t as_uint64; + +@@ -242,62 +326,69 @@ mana_ring_doorbell(void *db_page, enum gdma_queue_types queue_type, break; default: @@ -24361,7 +34230,9 @@ index 3d4039014f..65685fe236 100644 rte_write64(e.as_uint64, addr); return 0; -@@ -259,45 +252,51 @@ mana_ring_doorbell(void *db_page, enum gdma_queue_types queue_type, + } ++#endif + /* * Poll completion queue for completions. */ @@ -24442,7 +34313,7 @@ index 3d4039014f..65685fe236 100644 + return num_comp; } diff --git a/dpdk/drivers/net/mana/mana.c b/dpdk/drivers/net/mana/mana.c -index 43221e743e..7630118d4f 100644 +index 43221e743e..896b53ed35 100644 --- a/dpdk/drivers/net/mana/mana.c +++ b/dpdk/drivers/net/mana/mana.c @@ -487,6 +487,15 @@ mana_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, @@ -24535,6 +34406,15 @@ index 43221e743e..7630118d4f 100644 /* There is no good way to get stats->imissed, not setting it */ +@@ -1238,7 +1260,7 @@ mana_probe_port(struct ibv_device *ibdev, struct ibv_device_attr_ex *dev_attr, + /* Create a parent domain with the port number */ + attr.pd = priv->ib_pd; + attr.comp_mask = IBV_PARENT_DOMAIN_INIT_ATTR_PD_CONTEXT; +- attr.pd_context = (void *)(uint64_t)port; ++ attr.pd_context = (void *)(uintptr_t)port; + priv->ib_parent_pd = ibv_alloc_parent_domain(ctx, &attr); + if (!priv->ib_parent_pd) { + DRV_LOG(ERR, "ibv_alloc_parent_domain failed port %d", port); @@ -1321,6 +1343,7 @@ failed: /* * Goes through the IB device list to look for the IB port matching the @@ -24634,10 +34514,32 @@ index 43221e743e..7630118d4f 100644 rte_spinlock_unlock(&mana_shared_data_lock); diff --git a/dpdk/drivers/net/mana/mana.h b/dpdk/drivers/net/mana/mana.h -index 4a05238a96..7dfacd57f3 100644 +index 4a05238a96..f70a3e0b3d 100644 --- a/dpdk/drivers/net/mana/mana.h +++ b/dpdk/drivers/net/mana/mana.h -@@ -142,19 +142,6 @@ struct gdma_header { +@@ -50,6 +50,21 @@ struct mana_shared_data { + #define MAX_TX_WQE_SIZE 512 + #define MAX_RX_WQE_SIZE 256 + ++/* For 32 bit only */ ++#ifdef RTE_ARCH_32 ++#define GDMA_SHORT_DB_INC_MASK 0xffff ++#define GDMA_SHORT_DB_QID_MASK 0xfff ++ ++#define GDMA_SHORT_DB_MAX_WQE (0x10000 / GDMA_WQE_ALIGNMENT_UNIT_SIZE) ++ ++#define TX_WQE_SHORT_DB_THRESHOLD \ ++ (GDMA_SHORT_DB_MAX_WQE - \ ++ (MAX_TX_WQE_SIZE / GDMA_WQE_ALIGNMENT_UNIT_SIZE)) ++#define RX_WQE_SHORT_DB_THRESHOLD \ ++ (GDMA_SHORT_DB_MAX_WQE - \ ++ (MAX_RX_WQE_SIZE / GDMA_WQE_ALIGNMENT_UNIT_SIZE)) ++#endif ++ + /* Values from the GDMA specification document, WQE format description */ + #define INLINE_OOB_SMALL_SIZE_IN_BYTES 8 + #define INLINE_OOB_LARGE_SIZE_IN_BYTES 24 +@@ -142,19 +157,6 @@ struct gdma_header { #define COMPLETION_QUEUE_OWNER_MASK \ ((1 << (COMPLETION_QUEUE_ENTRY_OWNER_BITS_SIZE)) - 1) @@ -24657,7 +34559,7 @@ index 4a05238a96..7dfacd57f3 100644 struct gdma_hardware_completion_entry { char dma_client_data[GDMA_COMP_DATA_SIZE]; union { -@@ -391,6 +378,11 @@ struct mana_gdma_queue { +@@ -391,6 +393,11 @@ struct mana_gdma_queue { #define MANA_MR_BTREE_PER_QUEUE_N 64 @@ -24669,7 +34571,7 @@ index 4a05238a96..7dfacd57f3 100644 struct mana_txq { struct mana_priv *priv; uint32_t num_desc; -@@ -399,6 +391,7 @@ struct mana_txq { +@@ -399,6 +406,7 @@ struct mana_txq { struct mana_gdma_queue gdma_sq; struct mana_gdma_queue gdma_cq; @@ -24677,29 +34579,43 @@ index 4a05238a96..7dfacd57f3 100644 uint32_t tx_vp_offset; -@@ -433,6 +426,7 @@ struct mana_rxq { +@@ -431,8 +439,14 @@ struct mana_rxq { + */ + uint32_t desc_ring_head, desc_ring_tail; ++#ifdef RTE_ARCH_32 ++ /* For storing wqe increment count btw each short doorbell ring */ ++ uint32_t wqe_cnt_to_short_db; ++#endif ++ struct mana_gdma_queue gdma_rq; struct mana_gdma_queue gdma_cq; + struct gdma_comp *gdma_comp_buf; struct mana_stats stats; struct mana_mr_btree mr_btree; -@@ -447,6 +441,9 @@ extern int mana_logtype_init; +@@ -447,19 +461,28 @@ extern int mana_logtype_init; rte_log(RTE_LOG_ ## level, mana_logtype_driver, "%s(): " fmt "\n", \ __func__, ## args) +#define DP_LOG(level, fmt, args...) \ -+ RTE_LOG_DP(level, PMD, fmt, ## args) ++ RTE_LOG_DP(level, PMD, fmt "\n", ## args) + #define PMD_INIT_LOG(level, fmt, args...) \ rte_log(RTE_LOG_ ## level, mana_logtype_init, "%s(): " fmt "\n",\ __func__, ## args) -@@ -455,11 +452,11 @@ extern int mana_logtype_init; + #define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, " >>") + ++#ifdef RTE_ARCH_32 ++int mana_ring_short_doorbell(void *db_page, enum gdma_queue_types queue_type, ++ uint32_t queue_id, uint32_t tail_incr, ++ uint8_t arm); ++#else int mana_ring_doorbell(void *db_page, enum gdma_queue_types queue_type, uint32_t queue_id, uint32_t tail, uint8_t arm); -int mana_rq_ring_doorbell(struct mana_rxq *rxq, uint8_t arm); ++#endif +int mana_rq_ring_doorbell(struct mana_rxq *rxq); int gdma_post_work_request(struct mana_gdma_queue *queue, @@ -24709,7 +34625,7 @@ index 4a05238a96..7dfacd57f3 100644 uint8_t *gdma_get_wqe_pointer(struct mana_gdma_queue *queue); uint16_t mana_rx_burst(void *dpdk_rxq, struct rte_mbuf **rx_pkts, -@@ -473,8 +470,9 @@ uint16_t mana_rx_burst_removed(void *dpdk_rxq, struct rte_mbuf **pkts, +@@ -473,8 +496,9 @@ uint16_t mana_rx_burst_removed(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t mana_tx_burst_removed(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n); @@ -24721,6 +34637,22 @@ index 4a05238a96..7dfacd57f3 100644 int mana_start_rx_queues(struct rte_eth_dev *dev); int mana_start_tx_queues(struct rte_eth_dev *dev); +diff --git a/dpdk/drivers/net/mana/meson.build b/dpdk/drivers/net/mana/meson.build +index 493f0d26d4..2d72eca5a8 100644 +--- a/dpdk/drivers/net/mana/meson.build ++++ b/dpdk/drivers/net/mana/meson.build +@@ -1,9 +1,9 @@ + # SPDX-License-Identifier: BSD-3-Clause + # Copyright(c) 2022 Microsoft Corporation + +-if not is_linux or not dpdk_conf.has('RTE_ARCH_X86_64') ++if not is_linux or not dpdk_conf.has('RTE_ARCH_X86') + build = false +- reason = 'only supported on x86_64 Linux' ++ reason = 'only supported on x86 Linux' + subdir_done() + endif + diff --git a/dpdk/drivers/net/mana/mp.c b/dpdk/drivers/net/mana/mp.c index 92432c431d..738487f65a 100644 --- a/dpdk/drivers/net/mana/mp.c @@ -24737,7 +34669,7 @@ index 92432c431d..738487f65a 100644 mr = rte_calloc("MANA MR", 1, sizeof(*mr), 0); if (!mr) { diff --git a/dpdk/drivers/net/mana/mr.c b/dpdk/drivers/net/mana/mr.c -index 22df0917bb..fec0dc961c 100644 +index 22df0917bb..b8e6ea0bbf 100644 --- a/dpdk/drivers/net/mana/mr.c +++ b/dpdk/drivers/net/mana/mr.c @@ -47,23 +47,23 @@ mana_new_pmd_mr(struct mana_mr_btree *local_tree, struct mana_priv *priv, @@ -24755,7 +34687,7 @@ index 22df0917bb..fec0dc961c 100644 - "registering memory chunk start 0x%" PRIx64 " len %u", - ranges[i].start, ranges[i].len); + DP_LOG(DEBUG, -+ "registering memory chunk start 0x%" PRIx64 " len %u", ++ "registering memory chunk start 0x%" PRIxPTR " len %u", + ranges[i].start, ranges[i].len); if (rte_eal_process_type() == RTE_PROC_SECONDARY) { @@ -24767,7 +34699,7 @@ index 22df0917bb..fec0dc961c 100644 - "MR failed start 0x%" PRIx64 " len %u", - ranges[i].start, ranges[i].len); + DP_LOG(ERR, -+ "MR failed start 0x%" PRIx64 " len %u", ++ "MR failed start 0x%" PRIxPTR " len %u", + ranges[i].start, ranges[i].len); return ret; } @@ -24778,7 +34710,7 @@ index 22df0917bb..fec0dc961c 100644 if (ibv_mr) { - DRV_LOG(DEBUG, "MR lkey %u addr %p len %" PRIu64, - ibv_mr->lkey, ibv_mr->addr, ibv_mr->length); -+ DP_LOG(DEBUG, "MR lkey %u addr %p len %" PRIu64, ++ DP_LOG(DEBUG, "MR lkey %u addr %p len %zu", + ibv_mr->lkey, ibv_mr->addr, ibv_mr->length); mr = rte_calloc("MANA MR", 1, sizeof(*mr), 0); @@ -24803,7 +34735,7 @@ index 22df0917bb..fec0dc961c 100644 } else { - DRV_LOG(ERR, "MR failed at 0x%" PRIx64 " len %u", - ranges[i].start, ranges[i].len); -+ DP_LOG(ERR, "MR failed at 0x%" PRIx64 " len %u", ++ DP_LOG(ERR, "MR failed at 0x%" PRIxPTR " len %u", + ranges[i].start, ranges[i].len); return -errno; } @@ -24834,7 +34766,7 @@ index 22df0917bb..fec0dc961c 100644 - DRV_LOG(DEBUG, - "Local mr lkey %u addr 0x%" PRIx64 " len %" PRIu64, - mr->lkey, mr->addr, mr->len); -+ DP_LOG(DEBUG, "Local mr lkey %u addr 0x%" PRIx64 " len %" PRIu64, ++ DP_LOG(DEBUG, "Local mr lkey %u addr 0x%" PRIxPTR " len %zu", + mr->lkey, mr->addr, mr->len); return mr; } @@ -24852,7 +34784,7 @@ index 22df0917bb..fec0dc961c 100644 - "Added local MR key %u addr 0x%" PRIx64 " len %" PRIu64, - mr->lkey, mr->addr, mr->len); + DP_LOG(DEBUG, -+ "Added local MR key %u addr 0x%" PRIx64 " len %" PRIu64, ++ "Added local MR key %u addr 0x%" PRIxPTR " len %zu", + mr->lkey, mr->addr, mr->len); return mr; } @@ -24894,7 +34826,7 @@ index 22df0917bb..fec0dc961c 100644 - "addr 0x%" PRIx64 " len %zu idx %u sum 0x%" PRIx64 " not found", - addr, len, *idx, addr + len); + DP_LOG(DEBUG, -+ "addr 0x%" PRIx64 " len %zu idx %u sum 0x%" PRIx64 " not found", ++ "addr 0x%" PRIxPTR " len %zu idx %u sum 0x%" PRIxPTR " not found", + addr, len, *idx, addr + len); return NULL; @@ -24905,7 +34837,7 @@ index 22df0917bb..fec0dc961c 100644 if (mana_mr_btree_lookup(bt, &idx, entry->addr, entry->len)) { - DRV_LOG(DEBUG, "Addr 0x%" PRIx64 " len %zu exists in btree", - entry->addr, entry->len); -+ DP_LOG(DEBUG, "Addr 0x%" PRIx64 " len %zu exists in btree", ++ DP_LOG(DEBUG, "Addr 0x%" PRIxPTR " len %zu exists in btree", + entry->addr, entry->len); return 0; } @@ -24928,13 +34860,13 @@ index 22df0917bb..fec0dc961c 100644 - "Inserted MR b-tree table %p idx %d addr 0x%" PRIx64 " len %zu", - table, idx, entry->addr, entry->len); + DP_LOG(DEBUG, -+ "Inserted MR b-tree table %p idx %d addr 0x%" PRIx64 " len %zu", ++ "Inserted MR b-tree table %p idx %d addr 0x%" PRIxPTR " len %zu", + table, idx, entry->addr, entry->len); return 0; } diff --git a/dpdk/drivers/net/mana/rx.c b/dpdk/drivers/net/mana/rx.c -index 55247889c1..fdb56ce05d 100644 +index 55247889c1..371510b473 100644 --- a/dpdk/drivers/net/mana/rx.c +++ b/dpdk/drivers/net/mana/rx.c @@ -22,7 +22,7 @@ static uint8_t mana_rss_hash_key_default[TOEPLITZ_HASH_KEY_SIZE_IN_BYTES] = { @@ -24946,18 +34878,26 @@ index 55247889c1..fdb56ce05d 100644 { struct mana_priv *priv = rxq->priv; int ret; -@@ -36,13 +36,16 @@ mana_rq_ring_doorbell(struct mana_rxq *rxq, uint8_t arm) +@@ -36,13 +36,24 @@ mana_rq_ring_doorbell(struct mana_rxq *rxq, uint8_t arm) db_page = process_priv->db_page; } + /* Hardware Spec specifies that software client should set 0 for + * wqe_cnt for Receive Queues. + */ ++#ifdef RTE_ARCH_32 ++ ret = mana_ring_short_doorbell(db_page, GDMA_QUEUE_RECEIVE, ++ rxq->gdma_rq.id, ++ rxq->wqe_cnt_to_short_db * ++ GDMA_WQE_ALIGNMENT_UNIT_SIZE, ++ 0); ++#else ret = mana_ring_doorbell(db_page, GDMA_QUEUE_RECEIVE, rxq->gdma_rq.id, rxq->gdma_rq.head * GDMA_WQE_ALIGNMENT_UNIT_SIZE, - arm); + 0); ++#endif if (ret) - DRV_LOG(ERR, "failed to ring RX doorbell ret %d", ret); @@ -24965,7 +34905,7 @@ index 55247889c1..fdb56ce05d 100644 return ret; } -@@ -52,8 +55,8 @@ mana_alloc_and_post_rx_wqe(struct mana_rxq *rxq) +@@ -52,8 +63,8 @@ mana_alloc_and_post_rx_wqe(struct mana_rxq *rxq) { struct rte_mbuf *mbuf = NULL; struct gdma_sgl_element sgl[1]; @@ -24976,7 +34916,7 @@ index 55247889c1..fdb56ce05d 100644 struct mana_priv *priv = rxq->priv; int ret; struct mana_mr_cache *mr; -@@ -66,13 +69,12 @@ mana_alloc_and_post_rx_wqe(struct mana_rxq *rxq) +@@ -66,13 +77,12 @@ mana_alloc_and_post_rx_wqe(struct mana_rxq *rxq) mr = mana_find_pmd_mr(&rxq->mr_btree, priv, mbuf); if (!mr) { @@ -24991,7 +34931,7 @@ index 55247889c1..fdb56ce05d 100644 sgl[0].address = rte_cpu_to_le_64(rte_pktmbuf_mtod(mbuf, uint64_t)); sgl[0].memory_key = mr->lkey; -@@ -87,17 +89,17 @@ mana_alloc_and_post_rx_wqe(struct mana_rxq *rxq) +@@ -87,17 +97,20 @@ mana_alloc_and_post_rx_wqe(struct mana_rxq *rxq) request.flags = 0; request.client_data_unit = NOT_USING_CLIENT_DATA_UNIT; @@ -25005,6 +34945,9 @@ index 55247889c1..fdb56ce05d 100644 desc->pkt = mbuf; - desc->wqe_size_in_bu = wqe_info.wqe_size_in_bu; + desc->wqe_size_in_bu = wqe_size_in_bu; ++#ifdef RTE_ARCH_32 ++ rxq->wqe_cnt_to_short_db += wqe_size_in_bu; ++#endif rxq->desc_ring_head = (rxq->desc_ring_head + 1) % rxq->num_desc; } else { - DRV_LOG(ERR, "failed to post recv ret %d", ret); @@ -25012,7 +34955,13 @@ index 55247889c1..fdb56ce05d 100644 return ret; } -@@ -116,12 +118,12 @@ mana_alloc_and_post_rx_wqes(struct mana_rxq *rxq) +@@ -113,15 +126,25 @@ mana_alloc_and_post_rx_wqes(struct mana_rxq *rxq) + int ret; + uint32_t i; + ++#ifdef RTE_ARCH_32 ++ rxq->wqe_cnt_to_short_db = 0; ++#endif for (i = 0; i < rxq->num_desc; i++) { ret = mana_alloc_and_post_rx_wqe(rxq); if (ret) { @@ -25020,6 +34969,13 @@ index 55247889c1..fdb56ce05d 100644 + DP_LOG(ERR, "failed to post RX ret = %d", ret); return ret; } ++ ++#ifdef RTE_ARCH_32 ++ if (rxq->wqe_cnt_to_short_db > RX_WQE_SHORT_DB_THRESHOLD) { ++ mana_rq_ring_doorbell(rxq); ++ rxq->wqe_cnt_to_short_db = 0; ++ } ++#endif } - mana_rq_ring_doorbell(rxq, rxq->num_desc); @@ -25027,7 +34983,7 @@ index 55247889c1..fdb56ce05d 100644 return ret; } -@@ -381,27 +383,20 @@ uint16_t +@@ -381,27 +404,24 @@ uint16_t mana_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) { uint16_t pkt_received = 0; @@ -25039,13 +34995,16 @@ index 55247889c1..fdb56ce05d 100644 struct rte_mbuf *mbuf; int ret; + uint32_t num_pkts; ++#ifdef RTE_ARCH_32 ++ rxq->wqe_cnt_to_short_db = 0; ++#endif - while (pkt_received < pkts_n && - gdma_poll_completion_queue(&rxq->gdma_cq, &comp) == 1) { - struct mana_rxq_desc *desc; - struct mana_rx_comp_oob *oob = - (struct mana_rx_comp_oob *)&comp.completion_data[0]; -- + - if (comp.work_queue_number != rxq->gdma_rq.id) { - DRV_LOG(ERR, "rxq comp id mismatch wqid=0x%x rcid=0x%x", - comp.work_queue_number, rxq->gdma_rq.id); @@ -25063,7 +35022,7 @@ index 55247889c1..fdb56ce05d 100644 rxq->gdma_rq.tail += desc->wqe_size_in_bu; mbuf = desc->pkt; -@@ -411,22 +406,22 @@ mana_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) +@@ -411,22 +431,22 @@ mana_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) break; case CQE_RX_TRUNCATED: @@ -25091,7 +35050,7 @@ index 55247889c1..fdb56ce05d 100644 mbuf->data_off = RTE_PKTMBUF_HEADROOM; mbuf->nb_segs = 1; -@@ -470,7 +465,7 @@ drop: +@@ -470,19 +490,38 @@ drop: /* Post another request */ ret = mana_alloc_and_post_rx_wqe(rxq); if (ret) { @@ -25100,7 +35059,17 @@ index 55247889c1..fdb56ce05d 100644 break; } -@@ -478,7 +473,7 @@ drop: + wqe_posted++; ++ ++#ifdef RTE_ARCH_32 ++ /* Ring short doorbell if approaching the wqe increment ++ * limit. ++ */ ++ if (rxq->wqe_cnt_to_short_db > RX_WQE_SHORT_DB_THRESHOLD) { ++ mana_rq_ring_doorbell(rxq); ++ rxq->wqe_cnt_to_short_db = 0; ++ } ++#endif } if (wqe_posted) @@ -25109,7 +35078,20 @@ index 55247889c1..fdb56ce05d 100644 return pkt_received; } -@@ -490,8 +485,8 @@ mana_arm_cq(struct mana_rxq *rxq, uint8_t arm) + ++#ifdef RTE_ARCH_32 ++static int ++mana_arm_cq(struct mana_rxq *rxq __rte_unused, uint8_t arm __rte_unused) ++{ ++ DP_LOG(ERR, "Do not support in 32 bit"); ++ ++ return -ENODEV; ++} ++#else + static int + mana_arm_cq(struct mana_rxq *rxq, uint8_t arm) + { +@@ -490,12 +529,13 @@ mana_arm_cq(struct mana_rxq *rxq, uint8_t arm) uint32_t head = rxq->gdma_cq.head % (rxq->gdma_cq.count << COMPLETION_QUEUE_ENTRY_OWNER_BITS_SIZE); @@ -25120,7 +35102,12 @@ index 55247889c1..fdb56ce05d 100644 return mana_ring_doorbell(priv->db_page, GDMA_QUEUE_COMPLETION, rxq->gdma_cq.id, head, arm); -@@ -521,8 +516,8 @@ mana_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id) + } ++#endif + + int + mana_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id) +@@ -521,8 +561,8 @@ mana_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id) if (ret) { if (ret != EAGAIN) @@ -25132,10 +35119,10 @@ index 55247889c1..fdb56ce05d 100644 ibv_ack_cq_events(rxq->cq, 1); } diff --git a/dpdk/drivers/net/mana/tx.c b/dpdk/drivers/net/mana/tx.c -index 300bf27cc1..39cc59550e 100644 +index 300bf27cc1..3e255157f9 100644 --- a/dpdk/drivers/net/mana/tx.c +++ b/dpdk/drivers/net/mana/tx.c -@@ -170,32 +170,35 @@ mana_tx_burst(void *dpdk_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) +@@ -170,32 +170,38 @@ mana_tx_burst(void *dpdk_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) { struct mana_txq *txq = dpdk_txq; struct mana_priv *priv = txq->priv; @@ -25144,6 +35131,9 @@ index 300bf27cc1..39cc59550e 100644 void *db_page; uint16_t pkt_sent = 0; + uint32_t num_comp; ++#ifdef RTE_ARCH_32 ++ uint32_t wqe_count = 0; ++#endif /* Process send completions from GDMA */ - while (gdma_poll_completion_queue(&txq->gdma_cq, &comp) == 1) { @@ -25181,7 +35171,7 @@ index 300bf27cc1..39cc59550e 100644 rte_pktmbuf_free(desc->pkt); } -@@ -208,14 +211,14 @@ mana_tx_burst(void *dpdk_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) +@@ -208,14 +214,14 @@ mana_tx_burst(void *dpdk_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) for (uint16_t pkt_idx = 0; pkt_idx < nb_pkts; pkt_idx++) { struct rte_mbuf *m_pkt = tx_pkts[pkt_idx]; struct rte_mbuf *m_seg = m_pkt; @@ -25200,7 +35190,7 @@ index 300bf27cc1..39cc59550e 100644 continue; } -@@ -257,12 +260,14 @@ mana_tx_burst(void *dpdk_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) +@@ -257,12 +263,14 @@ mana_tx_burst(void *dpdk_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) tcp_hdr->cksum = rte_ipv6_phdr_cksum(ip_hdr, m_pkt->ol_flags); } else { @@ -25216,7 +35206,7 @@ index 300bf27cc1..39cc59550e 100644 } if ((m_pkt->ol_flags & RTE_MBUF_F_TX_L4_MASK) == -@@ -297,10 +302,12 @@ mana_tx_burst(void *dpdk_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) +@@ -297,10 +305,12 @@ mana_tx_burst(void *dpdk_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) m_pkt->ol_flags); } else { @@ -25230,7 +35220,7 @@ index 300bf27cc1..39cc59550e 100644 } tx_oob.short_oob.suppress_tx_CQE_generation = 0; -@@ -310,20 +317,20 @@ mana_tx_burst(void *dpdk_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) +@@ -310,20 +320,20 @@ mana_tx_burst(void *dpdk_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) get_vsq_frame_num(txq->gdma_sq.id); tx_oob.short_oob.short_vport_offset = txq->tx_vp_offset; @@ -25263,7 +35253,7 @@ index 300bf27cc1..39cc59550e 100644 /* Create SGL for packet data buffers */ for (seg_idx = 0; seg_idx < m_pkt->nb_segs; seg_idx++) { -@@ -331,8 +338,8 @@ mana_tx_burst(void *dpdk_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) +@@ -331,8 +341,8 @@ mana_tx_burst(void *dpdk_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) mana_find_pmd_mr(&txq->mr_btree, priv, m_seg); if (!mr) { @@ -25274,7 +35264,7 @@ index 300bf27cc1..39cc59550e 100644 break; } -@@ -342,11 +349,11 @@ mana_tx_burst(void *dpdk_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) +@@ -342,11 +352,11 @@ mana_tx_burst(void *dpdk_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) sgl.gdma_sgl[seg_idx].size = m_seg->data_len; sgl.gdma_sgl[seg_idx].memory_key = mr->lkey; @@ -25291,7 +35281,7 @@ index 300bf27cc1..39cc59550e 100644 m_seg = m_seg->next; } -@@ -355,11 +362,10 @@ mana_tx_burst(void *dpdk_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) +@@ -355,11 +365,10 @@ mana_tx_burst(void *dpdk_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) if (seg_idx != m_pkt->nb_segs) continue; @@ -25305,7 +35295,7 @@ index 300bf27cc1..39cc59550e 100644 work_req.sgl = sgl.gdma_sgl; work_req.num_sgl_elements = m_pkt->nb_segs; -@@ -370,24 +376,24 @@ mana_tx_burst(void *dpdk_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) +@@ -370,24 +379,38 @@ mana_tx_burst(void *dpdk_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) work_req.client_data_unit = NOT_USING_CLIENT_DATA_UNIT; ret = gdma_post_work_request(&txq->gdma_sq, &work_req, @@ -25328,6 +35318,20 @@ index 300bf27cc1..39cc59550e 100644 - nb_pkts, pkt_idx); + DP_LOG(DEBUG, "nb_pkts %u pkt[%d] sent", + nb_pkts, pkt_idx); ++#ifdef RTE_ARCH_32 ++ wqe_count += wqe_size_in_bu; ++ if (wqe_count > TX_WQE_SHORT_DB_THRESHOLD) { ++ /* wqe_count approaching to short doorbell ++ * increment limit. Stop processing further ++ * more packets and just ring short ++ * doorbell. ++ */ ++ DP_LOG(DEBUG, "wqe_count %u reaching limit, " ++ "pkt_sent %d", ++ wqe_count, pkt_sent); ++ break; ++ } ++#endif } else { - DRV_LOG(INFO, "pkt[%d] failed to post send ret %d", - pkt_idx, ret); @@ -25336,15 +35340,70 @@ index 300bf27cc1..39cc59550e 100644 break; } } -@@ -409,7 +415,7 @@ mana_tx_burst(void *dpdk_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) +@@ -403,13 +426,21 @@ mana_tx_burst(void *dpdk_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) + } + + if (pkt_sent) { ++#ifdef RTE_ARCH_32 ++ ret = mana_ring_short_doorbell(db_page, GDMA_QUEUE_SEND, ++ txq->gdma_sq.id, ++ wqe_count * ++ GDMA_WQE_ALIGNMENT_UNIT_SIZE, ++ 0); ++#else + ret = mana_ring_doorbell(db_page, GDMA_QUEUE_SEND, + txq->gdma_sq.id, + txq->gdma_sq.head * GDMA_WQE_ALIGNMENT_UNIT_SIZE, 0); ++#endif if (ret) - DRV_LOG(ERR, "mana_ring_doorbell failed ret %d", ret); + DP_LOG(ERR, "mana_ring_doorbell failed ret %d", ret); } return pkt_sent; +diff --git a/dpdk/drivers/net/memif/rte_eth_memif.c b/dpdk/drivers/net/memif/rte_eth_memif.c +index 1b1c1a652b..3b83a5a8bb 100644 +--- a/dpdk/drivers/net/memif/rte_eth_memif.c ++++ b/dpdk/drivers/net/memif/rte_eth_memif.c +@@ -1240,6 +1240,7 @@ memif_dev_start(struct rte_eth_dev *dev) + { + struct pmd_internals *pmd = dev->data->dev_private; + int ret = 0; ++ uint16_t i; + + switch (pmd->role) { + case MEMIF_ROLE_CLIENT: +@@ -1254,13 +1255,28 @@ memif_dev_start(struct rte_eth_dev *dev) + break; + } + ++ if (ret == 0) { ++ for (i = 0; i < dev->data->nb_rx_queues; i++) ++ dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; ++ for (i = 0; i < dev->data->nb_tx_queues; i++) ++ dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; ++ } ++ + return ret; + } + + static int + memif_dev_stop(struct rte_eth_dev *dev) + { ++ uint16_t i; ++ + memif_disconnect(dev); ++ ++ for (i = 0; i < dev->data->nb_rx_queues; i++) ++ dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; ++ for (i = 0; i < dev->data->nb_tx_queues; i++) ++ dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; ++ + return 0; + } + diff --git a/dpdk/drivers/net/meson.build b/dpdk/drivers/net/meson.build index 6470bf3636..f83a6de117 100644 --- a/dpdk/drivers/net/meson.build @@ -25357,6 +35416,51 @@ index 6470bf3636..f83a6de117 100644 'memif', 'mlx4', 'mlx5', +diff --git a/dpdk/drivers/net/mlx4/mlx4.c b/dpdk/drivers/net/mlx4/mlx4.c +index a54016f4a2..1389b606cc 100644 +--- a/dpdk/drivers/net/mlx4/mlx4.c ++++ b/dpdk/drivers/net/mlx4/mlx4.c +@@ -292,6 +292,7 @@ mlx4_dev_start(struct rte_eth_dev *dev) + { + struct mlx4_priv *priv = dev->data->dev_private; + struct rte_flow_error error; ++ uint16_t i; + int ret; + + if (priv->started) +@@ -327,6 +328,12 @@ mlx4_dev_start(struct rte_eth_dev *dev) + dev->rx_pkt_burst = mlx4_rx_burst; + /* Enable datapath on secondary process. */ + mlx4_mp_req_start_rxtx(dev); ++ ++ for (i = 0; i < dev->data->nb_rx_queues; i++) ++ dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; ++ for (i = 0; i < dev->data->nb_tx_queues; i++) ++ dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; ++ + return 0; + err: + mlx4_dev_stop(dev); +@@ -345,6 +352,7 @@ static int + mlx4_dev_stop(struct rte_eth_dev *dev) + { + struct mlx4_priv *priv = dev->data->dev_private; ++ uint16_t i; + + if (!priv->started) + return 0; +@@ -359,6 +367,11 @@ mlx4_dev_stop(struct rte_eth_dev *dev) + mlx4_rxq_intr_disable(priv); + mlx4_rss_deinit(priv); + ++ for (i = 0; i < dev->data->nb_rx_queues; i++) ++ dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; ++ for (i = 0; i < dev->data->nb_tx_queues; i++) ++ dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; ++ + return 0; + } + diff --git a/dpdk/drivers/net/mlx5/hws/mlx5dr_cmd.c b/dpdk/drivers/net/mlx5/hws/mlx5dr_cmd.c index 721376b8da..acad42e12e 100644 --- a/dpdk/drivers/net/mlx5/hws/mlx5dr_cmd.c @@ -25396,7 +35500,7 @@ index 721376b8da..acad42e12e 100644 if (!devx_obj->obj) { DR_LOG(ERR, "Failed to create header_modify_pattern"); diff --git a/dpdk/drivers/net/mlx5/hws/mlx5dr_definer.c b/dpdk/drivers/net/mlx5/hws/mlx5dr_definer.c -index 6b98eb8c96..e61ca7b9b9 100644 +index 6b98eb8c96..6fc5d70f67 100644 --- a/dpdk/drivers/net/mlx5/hws/mlx5dr_definer.c +++ b/dpdk/drivers/net/mlx5/hws/mlx5dr_definer.c @@ -123,6 +123,7 @@ struct mlx5dr_definer_conv_data { @@ -25423,6 +35527,51 @@ index 6b98eb8c96..e61ca7b9b9 100644 } if (m->next_proto_id) { +@@ -1322,7 +1328,6 @@ mlx5dr_definer_conv_item_integrity(struct mlx5dr_definer_conv_data *cd, + { + const struct rte_flow_item_integrity *m = item->mask; + struct mlx5dr_definer_fc *fc; +- bool inner = cd->tunnel; + + if (!m) + return 0; +@@ -1333,7 +1338,7 @@ mlx5dr_definer_conv_item_integrity(struct mlx5dr_definer_conv_data *cd, + } + + if (m->l3_ok || m->ipv4_csum_ok || m->l4_ok || m->l4_csum_ok) { +- fc = &cd->fc[DR_CALC_FNAME(INTEGRITY, inner)]; ++ fc = &cd->fc[DR_CALC_FNAME(INTEGRITY, m->level)]; + fc->item_idx = item_idx; + fc->tag_set = &mlx5dr_definer_integrity_set; + DR_CALC_SET_HDR(fc, oks1, oks1_bits); +@@ -1563,8 +1568,7 @@ mlx5dr_definer_conv_items_to_hl(struct mlx5dr_context *ctx, + break; + case RTE_FLOW_ITEM_TYPE_INTEGRITY: + ret = mlx5dr_definer_conv_item_integrity(&cd, items, i); +- item_flags |= cd.tunnel ? MLX5_FLOW_ITEM_INNER_INTEGRITY : +- MLX5_FLOW_ITEM_OUTER_INTEGRITY; ++ item_flags |= MLX5_FLOW_ITEM_INTEGRITY; + break; + case RTE_FLOW_ITEM_TYPE_CONNTRACK: + ret = mlx5dr_definer_conv_item_conntrack(&cd, items, i); +@@ -1629,11 +1633,15 @@ mlx5dr_definer_find_byte_in_tag(struct mlx5dr_definer *definer, + uint32_t *tag_byte_off) + { + uint8_t byte_offset; +- int i; ++ int i, dw_to_scan; ++ ++ /* Avoid accessing unused DW selectors */ ++ dw_to_scan = mlx5dr_definer_is_jumbo(definer) ? ++ DW_SELECTORS : DW_SELECTORS_MATCH; + + /* Add offset since each DW covers multiple BYTEs */ + byte_offset = hl_byte_off % DW_SIZE; +- for (i = 0; i < DW_SELECTORS; i++) { ++ for (i = 0; i < dw_to_scan; i++) { + if (definer->dw_selector[i] == hl_byte_off / DW_SIZE) { + *tag_byte_off = byte_offset + DW_SIZE * (DW_SELECTORS - i - 1); + return 0; diff --git a/dpdk/drivers/net/mlx5/hws/mlx5dr_definer.h b/dpdk/drivers/net/mlx5/hws/mlx5dr_definer.h index d52c6b0627..5b38a54e6b 100644 --- a/dpdk/drivers/net/mlx5/hws/mlx5dr_definer.h @@ -25477,7 +35626,7 @@ index 5c8bbe6fc6..a8aba31cbe 100644 return 0; diff --git a/dpdk/drivers/net/mlx5/linux/mlx5_ethdev_os.c b/dpdk/drivers/net/mlx5/linux/mlx5_ethdev_os.c -index 72268c0c8a..639e629fe4 100644 +index 72268c0c8a..dd5a0c546d 100644 --- a/dpdk/drivers/net/mlx5/linux/mlx5_ethdev_os.c +++ b/dpdk/drivers/net/mlx5/linux/mlx5_ethdev_os.c @@ -28,6 +28,7 @@ @@ -25569,7 +35718,7 @@ index 72268c0c8a..639e629fe4 100644 if (!if_indextoname(ifindex, ifname)) { rte_errno = errno; -@@ -1062,8 +1079,21 @@ mlx5_sysfs_switch_info(unsigned int ifindex, struct mlx5_switch_info *info) +@@ -1062,8 +1079,22 @@ mlx5_sysfs_switch_info(unsigned int ifindex, struct mlx5_switch_info *info) file = fopen(phys_port_name, "rb"); if (file != NULL) { @@ -25578,6 +35727,7 @@ index 72268c0c8a..639e629fe4 100644 + + line_size = getline(&port_name, &port_name_size, file); + if (line_size < 0) { ++ free(port_name); + fclose(file); + rte_errno = errno; + return -rte_errno; @@ -25592,7 +35742,7 @@ index 72268c0c8a..639e629fe4 100644 fclose(file); } file = fopen(phys_switch_id, "rb"); -@@ -1776,3 +1806,70 @@ exit: +@@ -1776,3 +1807,70 @@ exit: mlx5_free(sset_info); return ret; } @@ -25664,10 +35814,44 @@ index 72268c0c8a..639e629fe4 100644 +} + diff --git a/dpdk/drivers/net/mlx5/linux/mlx5_flow_os.c b/dpdk/drivers/net/mlx5/linux/mlx5_flow_os.c -index 3c9a823edf..b139bb75b9 100644 +index 3c9a823edf..2767b11708 100644 --- a/dpdk/drivers/net/mlx5/linux/mlx5_flow_os.c +++ b/dpdk/drivers/net/mlx5/linux/mlx5_flow_os.c -@@ -51,7 +51,7 @@ mlx5_flow_os_validate_item_esp(const struct rte_flow_item *item, +@@ -8,6 +8,10 @@ + + /* Key of thread specific flow workspace data. */ + static rte_thread_key key_workspace; ++/* Flow workspace global list head for garbage collector. */ ++static struct mlx5_flow_workspace *gc_head; ++/* Spinlock for operating flow workspace list. */ ++static rte_spinlock_t mlx5_flow_workspace_lock = RTE_SPINLOCK_INITIALIZER; + + int + mlx5_flow_os_validate_item_esp(const struct rte_flow_item *item, +@@ -48,10 +52,30 @@ mlx5_flow_os_validate_item_esp(const struct rte_flow_item *item, + return 0; + } + ++void ++mlx5_flow_os_workspace_gc_add(struct mlx5_flow_workspace *ws) ++{ ++ rte_spinlock_lock(&mlx5_flow_workspace_lock); ++ ws->gc = gc_head; ++ gc_head = ws; ++ rte_spinlock_unlock(&mlx5_flow_workspace_lock); ++} ++ ++static void ++mlx5_flow_os_workspace_gc_release(void) ++{ ++ while (gc_head) { ++ struct mlx5_flow_workspace *wks = gc_head; ++ ++ gc_head = wks->gc; ++ flow_release_workspace(wks); ++ } ++} ++ int mlx5_flow_os_init_workspace_once(void) { @@ -25676,10 +35860,55 @@ index 3c9a823edf..b139bb75b9 100644 DRV_LOG(ERR, "Can't create flow workspace data thread key."); rte_errno = ENOMEM; return -rte_errno; +@@ -75,4 +99,5 @@ void + mlx5_flow_os_release_workspace(void) + { + rte_thread_key_delete(key_workspace); ++ mlx5_flow_os_workspace_gc_release(); + } +diff --git a/dpdk/drivers/net/mlx5/linux/mlx5_flow_os.h b/dpdk/drivers/net/mlx5/linux/mlx5_flow_os.h +index ed71289322..3f7a94c9ee 100644 +--- a/dpdk/drivers/net/mlx5/linux/mlx5_flow_os.h ++++ b/dpdk/drivers/net/mlx5/linux/mlx5_flow_os.h +@@ -526,4 +526,12 @@ mlx5_flow_os_validate_item_esp(const struct rte_flow_item *item, + uint8_t target_protocol, + struct rte_flow_error *error); + ++/** ++ * Add per thread workspace to the global list for garbage collection. ++ * ++ * @param[in] ws ++ * Pointer to the flow workspace. ++ */ ++void mlx5_flow_os_workspace_gc_add(struct mlx5_flow_workspace *ws); ++ + #endif /* RTE_PMD_MLX5_FLOW_OS_H_ */ diff --git a/dpdk/drivers/net/mlx5/linux/mlx5_os.c b/dpdk/drivers/net/mlx5/linux/mlx5_os.c -index a71474c90a..6fdade7dab 100644 +index a71474c90a..28bf7211e4 100644 --- a/dpdk/drivers/net/mlx5/linux/mlx5_os.c +++ b/dpdk/drivers/net/mlx5/linux/mlx5_os.c +@@ -474,6 +474,10 @@ mlx5_alloc_shared_dr(struct mlx5_priv *priv) + err = mlx5_alloc_table_hash_list(priv); + if (err) + goto error; ++ sh->default_miss_action = ++ mlx5_glue->dr_create_flow_action_default_miss(); ++ if (!sh->default_miss_action) ++ DRV_LOG(WARNING, "Default miss action is not supported."); + if (priv->sh->config.dv_flow_en == 2) + return 0; + /* The resources below are only valid with DV support. */ +@@ -597,10 +601,6 @@ mlx5_alloc_shared_dr(struct mlx5_priv *priv) + + __mlx5_discovery_misc5_cap(priv); + #endif /* HAVE_MLX5DV_DR */ +- sh->default_miss_action = +- mlx5_glue->dr_create_flow_action_default_miss(); +- if (!sh->default_miss_action) +- DRV_LOG(WARNING, "Default miss action is not supported."); + LIST_INIT(&sh->shared_rxqs); + return 0; + error: @@ -873,10 +873,10 @@ mlx5_flow_drop_action_config(struct rte_eth_dev *dev __rte_unused) */ if (!priv->sh->drop_action_check_flag) { @@ -25693,7 +35922,7 @@ index a71474c90a..6fdade7dab 100644 priv->root_drop_action = priv->sh->dr_drop_action; else priv->root_drop_action = priv->drop_queue.hrxq->action; -@@ -1613,6 +1613,22 @@ err_secondary: +@@ -1613,6 +1613,23 @@ err_secondary: err = EINVAL; goto error; } @@ -25713,14 +35942,44 @@ index a71474c90a..6fdade7dab 100644 + "matching is disabled", + eth_dev->data->port_id); + } ++ eth_dev->data->dev_flags |= RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE; return eth_dev; #else DRV_LOG(ERR, "DV support is missing for HWS."); diff --git a/dpdk/drivers/net/mlx5/mlx5.c b/dpdk/drivers/net/mlx5/mlx5.c -index e55be8720e..8250c94803 100644 +index e55be8720e..1a5f95b22b 100644 --- a/dpdk/drivers/net/mlx5/mlx5.c +++ b/dpdk/drivers/net/mlx5/mlx5.c -@@ -1401,7 +1401,8 @@ mlx5_shared_dev_ctx_args_config(struct mlx5_dev_ctx_shared *sh, +@@ -241,7 +241,12 @@ static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = { + .type = "mlx5_port_id_ipool", + }, + [MLX5_IPOOL_JUMP] = { +- .size = sizeof(struct mlx5_flow_tbl_data_entry), ++ /* ++ * MLX5_IPOOL_JUMP ipool entry size depends on selected flow engine. ++ * When HW steering is enabled mlx5_flow_group struct is used. ++ * Otherwise mlx5_flow_tbl_data_entry struct is used. ++ */ ++ .size = 0, + .trunk_size = 64, + .grow_trunk = 3, + .grow_shift = 2, +@@ -902,6 +907,14 @@ mlx5_flow_ipool_create(struct mlx5_dev_ctx_shared *sh) + sizeof(struct mlx5_flow_handle) : + MLX5_FLOW_HANDLE_VERBS_SIZE; + break; ++#if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H) ++ /* Set MLX5_IPOOL_JUMP ipool entry size depending on selected flow engine. */ ++ case MLX5_IPOOL_JUMP: ++ cfg.size = sh->config.dv_flow_en == 2 ? ++ sizeof(struct mlx5_flow_group) : ++ sizeof(struct mlx5_flow_tbl_data_entry); ++ break; ++#endif + } + if (sh->config.reclaim_mode) { + cfg.release_mem_en = 1; +@@ -1401,7 +1414,8 @@ mlx5_shared_dev_ctx_args_config(struct mlx5_dev_ctx_shared *sh, rte_errno = ENODEV; return -rte_errno; } @@ -25730,15 +35989,17 @@ index e55be8720e..8250c94803 100644 DRV_LOG(WARNING, "\"tx_skew\" doesn't affect without \"tx_pp\"."); } -@@ -1729,6 +1730,7 @@ mlx5_free_shared_dev_ctx(struct mlx5_dev_ctx_shared *sh) - if (LIST_EMPTY(&mlx5_dev_ctx_list)) { - mlx5_os_net_cleanup(); - mlx5_flow_os_release_workspace(); -+ mlx5_flow_workspace_gc_release(); - } +@@ -1598,6 +1612,9 @@ mlx5_alloc_shared_dev_ctx(const struct mlx5_dev_spawn_data *spawn, + /* Add context to the global device list. */ + LIST_INSERT_HEAD(&mlx5_dev_ctx_list, sh, next); + rte_spinlock_init(&sh->geneve_tlv_opt_sl); ++ /* Init counter pool list header and lock. */ ++ LIST_INIT(&sh->hws_cpool_list); ++ rte_spinlock_init(&sh->cpool_lock); + exit: pthread_mutex_unlock(&mlx5_dev_ctx_list_mutex); - if (sh->flex_parsers_dv) { -@@ -1976,8 +1978,12 @@ mlx5_proc_priv_init(struct rte_eth_dev *dev) + return sh; +@@ -1976,8 +1993,12 @@ mlx5_proc_priv_init(struct rte_eth_dev *dev) void mlx5_proc_priv_uninit(struct rte_eth_dev *dev) { @@ -25752,18 +36013,27 @@ index e55be8720e..8250c94803 100644 mlx5_free(dev->process_private); dev->process_private = NULL; } -@@ -2473,6 +2479,7 @@ mlx5_port_args_config(struct mlx5_priv *priv, struct mlx5_kvargs_ctrl *mkvlist, +@@ -2473,6 +2494,7 @@ mlx5_port_args_config(struct mlx5_priv *priv, struct mlx5_kvargs_ctrl *mkvlist, config->mprq.max_memcpy_len = MLX5_MPRQ_MEMCPY_DEFAULT_LEN; config->mprq.min_rxqs_num = MLX5_MPRQ_MIN_RXQS; config->mprq.log_stride_num = MLX5_MPRQ_DEFAULT_LOG_STRIDE_NUM; -+ config->mprq.log_stride_size = MLX5_MPRQ_DEFAULT_LOG_STRIDE_SIZE; ++ config->mprq.log_stride_size = MLX5_ARG_UNSET; config->log_hp_size = MLX5_ARG_UNSET; config->std_delay_drop = 0; config->hp_delay_drop = 0; diff --git a/dpdk/drivers/net/mlx5/mlx5.h b/dpdk/drivers/net/mlx5/mlx5.h -index 31982002ee..5f8361c52b 100644 +index 31982002ee..96a269ccd0 100644 --- a/dpdk/drivers/net/mlx5/mlx5.h +++ b/dpdk/drivers/net/mlx5/mlx5.h +@@ -1245,7 +1245,7 @@ struct mlx5_aso_ct_action { + /* General action object for reply dir. */ + void *dr_action_rply; + uint32_t refcnt; /* Action used count in device flows. */ +- uint16_t offset; /* Offset of ASO CT in DevX objects bulk. */ ++ uint32_t offset; /* Offset of ASO CT in DevX objects bulk. */ + uint16_t peer; /* The only peer port index could also use this CT. */ + enum mlx5_aso_ct_state state; /* ASO CT state. */ + bool is_original; /* The direction of the DR action to be used. */ @@ -1367,7 +1367,7 @@ struct mlx5_dev_ctx_shared { uint32_t tunnel_header_0_1:1; /* tunnel_header_0_1 is supported. */ uint32_t tunnel_header_2_3:1; /* tunnel_header_2_3 is supported. */ @@ -25773,7 +36043,16 @@ index 31982002ee..5f8361c52b 100644 uint32_t drop_action_check_flag:1; /* Check Flag for drop action. */ uint32_t flow_priority_check_flag:1; /* Check Flag for flow priority. */ uint32_t metadata_regc_check_flag:1; /* Check Flag for metadata REGC. */ -@@ -1463,6 +1463,8 @@ struct mlx5_dev_ctx_shared { +@@ -1455,6 +1455,8 @@ struct mlx5_dev_ctx_shared { + uint32_t host_shaper_rate:8; + uint32_t lwm_triggered:1; + struct mlx5_hws_cnt_svc_mng *cnt_svc; ++ rte_spinlock_t cpool_lock; ++ LIST_HEAD(hws_cpool_list, mlx5_hws_cnt_pool) hws_cpool_list; /* Count pool list. */ + struct mlx5_dev_shared_port port[]; /* per device port data array. */ + }; + +@@ -1463,6 +1465,8 @@ struct mlx5_dev_ctx_shared { * Caution, secondary process may rebuild the struct during port start. */ struct mlx5_proc_priv { @@ -25782,7 +36061,58 @@ index 31982002ee..5f8361c52b 100644 size_t uar_table_sz; /* Size of UAR register table. */ struct mlx5_uar_data uar_table[]; -@@ -1663,6 +1665,7 @@ struct mlx5_priv { +@@ -1635,10 +1639,50 @@ struct mlx5_obj_ops { + + #define MLX5_RSS_HASH_FIELDS_LEN RTE_DIM(mlx5_rss_hash_fields) + ++enum mlx5_hw_ctrl_flow_type { ++ MLX5_HW_CTRL_FLOW_TYPE_GENERAL, ++ MLX5_HW_CTRL_FLOW_TYPE_SQ_MISS_ROOT, ++ MLX5_HW_CTRL_FLOW_TYPE_SQ_MISS, ++ MLX5_HW_CTRL_FLOW_TYPE_DEFAULT_JUMP, ++ MLX5_HW_CTRL_FLOW_TYPE_TX_META_COPY, ++ MLX5_HW_CTRL_FLOW_TYPE_TX_REPR_MATCH, ++ MLX5_HW_CTRL_FLOW_TYPE_LACP_RX, ++ MLX5_HW_CTRL_FLOW_TYPE_DEFAULT_RX_RSS, ++}; ++ ++/** Additional info about control flow rule. */ ++struct mlx5_hw_ctrl_flow_info { ++ /** Determines the kind of control flow rule. */ ++ enum mlx5_hw_ctrl_flow_type type; ++ union { ++ /** ++ * If control flow is a SQ miss flow (root or not), ++ * then fields contains matching SQ number. ++ */ ++ uint32_t esw_mgr_sq; ++ /** ++ * If control flow is a Tx representor matching, ++ * then fields contains matching SQ number. ++ */ ++ uint32_t tx_repr_sq; ++ }; ++}; ++ ++/** Entry for tracking control flow rules in HWS. */ + struct mlx5_hw_ctrl_flow { + LIST_ENTRY(mlx5_hw_ctrl_flow) next; ++ /** ++ * Owner device is a port on behalf of which flow rule was created. ++ * ++ * It's different from the port which really created the flow rule ++ * if and only if flow rule is created on transfer proxy port ++ * on behalf of representor port. ++ */ + struct rte_eth_dev *owner_dev; ++ /** Pointer to flow rule handle. */ + struct rte_flow *flow; ++ /** Additional information about the control flow rule. */ ++ struct mlx5_hw_ctrl_flow_info info; + }; + + struct mlx5_flow_hw_ctrl_rx; +@@ -1663,6 +1707,7 @@ struct mlx5_priv { unsigned int mtr_en:1; /* Whether support meter. */ unsigned int mtr_reg_share:1; /* Whether support meter REG_C share. */ unsigned int lb_used:1; /* Loopback queue is referred to. */ @@ -25790,7 +36120,29 @@ index 31982002ee..5f8361c52b 100644 uint32_t mark_enabled:1; /* If mark action is enabled on rxqs. */ uint16_t domain_id; /* Switch domain identifier. */ uint16_t vport_id; /* Associated VF vport index (if any). */ -@@ -2163,6 +2166,8 @@ int mlx5_txpp_xstats_get_names(struct rte_eth_dev *dev, +@@ -1685,10 +1730,12 @@ struct mlx5_priv { + void *root_drop_action; /* Pointer to root drop action. */ + rte_spinlock_t hw_ctrl_lock; + LIST_HEAD(hw_ctrl_flow, mlx5_hw_ctrl_flow) hw_ctrl_flows; ++ LIST_HEAD(hw_ext_ctrl_flow, mlx5_hw_ctrl_flow) hw_ext_ctrl_flows; + struct rte_flow_template_table *hw_esw_sq_miss_root_tbl; + struct rte_flow_template_table *hw_esw_sq_miss_tbl; + struct rte_flow_template_table *hw_esw_zero_tbl; + struct rte_flow_template_table *hw_tx_meta_cpy_tbl; ++ struct rte_flow_template_table *hw_lacp_rx_tbl; + struct rte_flow_pattern_template *hw_tx_repr_tagging_pt; + struct rte_flow_actions_template *hw_tx_repr_tagging_at; + struct rte_flow_template_table *hw_tx_repr_tagging_tbl; +@@ -1768,6 +1815,8 @@ struct mlx5_priv { + struct mlx5dr_action *hw_drop[2]; + /* HW steering global tag action. */ + struct mlx5dr_action *hw_tag[2]; ++ /* HW steering global default miss action. */ ++ struct mlx5dr_action *hw_def_miss; + /* HW steering create ongoing rte flow table list header. */ + LIST_HEAD(flow_hw_tbl_ongo, rte_flow_template_table) flow_hw_tbl_ongo; + struct mlx5_indexed_pool *acts_ipool; /* Action data indexed pool. */ +@@ -2163,6 +2212,8 @@ int mlx5_txpp_xstats_get_names(struct rte_eth_dev *dev, struct rte_eth_xstat_name *xstats_names, unsigned int n, unsigned int n_used); void mlx5_txpp_interrupt_handler(void *cb_arg); @@ -25815,7 +36167,7 @@ index 02deaac612..7e0ec91328 100644 mlx5_devx_tir_destroy(hrxq); if (hrxq->ind_table->ind_table != NULL) diff --git a/dpdk/drivers/net/mlx5/mlx5_flow.c b/dpdk/drivers/net/mlx5/mlx5_flow.c -index a0cf677fb0..942dccf518 100644 +index a0cf677fb0..01b463adec 100644 --- a/dpdk/drivers/net/mlx5/mlx5_flow.c +++ b/dpdk/drivers/net/mlx5/mlx5_flow.c @@ -364,7 +364,7 @@ mlx5_flow_expand_rss_skip_explicit(const struct mlx5_flow_expand_node graph[], @@ -25876,7 +36228,32 @@ index a0cf677fb0..942dccf518 100644 return 0; } -@@ -6125,13 +6138,14 @@ flow_sample_split_prep(struct rte_eth_dev *dev, +@@ -5878,6 +5891,7 @@ flow_check_match_action(const struct rte_flow_action actions[], + { + const struct rte_flow_action_sample *sample; + const struct rte_flow_action_raw_decap *decap; ++ const struct rte_flow_action *action_cur = NULL; + int actions_n = 0; + uint32_t ratio = 0; + int sub_type = 0; +@@ -5938,12 +5952,12 @@ flow_check_match_action(const struct rte_flow_action actions[], + break; + case RTE_FLOW_ACTION_TYPE_RAW_DECAP: + decap = actions->conf; +- while ((++actions)->type == RTE_FLOW_ACTION_TYPE_VOID) ++ action_cur = actions; ++ while ((++action_cur)->type == RTE_FLOW_ACTION_TYPE_VOID) + ; +- actions_n++; +- if (actions->type == RTE_FLOW_ACTION_TYPE_RAW_ENCAP) { ++ if (action_cur->type == RTE_FLOW_ACTION_TYPE_RAW_ENCAP) { + const struct rte_flow_action_raw_encap *encap = +- actions->conf; ++ action_cur->conf; + if (decap->size <= + MLX5_ENCAPSULATION_DECISION_SIZE && + encap->size > +@@ -6125,13 +6139,14 @@ flow_sample_split_prep(struct rte_eth_dev *dev, /* Prepare the prefix tag action. */ append_index++; set_tag = (void *)(actions_pre + actions_n + append_index); @@ -25893,7 +36270,7 @@ index a0cf677fb0..942dccf518 100644 if (ret < 0) return ret; mlx5_ipool_malloc(priv->sh->ipool -@@ -6918,36 +6932,6 @@ flow_tunnel_from_rule(const struct mlx5_flow *flow) +@@ -6918,36 +6933,6 @@ flow_tunnel_from_rule(const struct mlx5_flow *flow) return tunnel; } @@ -25930,7 +36307,7 @@ index a0cf677fb0..942dccf518 100644 /** * Create a flow and add it to @p list. * -@@ -7066,8 +7050,7 @@ flow_list_create(struct rte_eth_dev *dev, enum mlx5_flow_type type, +@@ -7066,8 +7051,7 @@ flow_list_create(struct rte_eth_dev *dev, enum mlx5_flow_type type, if (attr->ingress) rss = flow_get_rss_action(dev, p_actions_rx); if (rss) { @@ -25940,7 +36317,7 @@ index a0cf677fb0..942dccf518 100644 /* * The following information is required by * mlx5_flow_hashfields_adjust() in advance. -@@ -7555,12 +7538,34 @@ flow_release_workspace(void *data) +@@ -7555,7 +7539,6 @@ flow_release_workspace(void *data) while (wks) { next = wks->next; @@ -25948,35 +36325,7 @@ index a0cf677fb0..942dccf518 100644 free(wks); wks = next; } - } - -+static struct mlx5_flow_workspace *gc_head; -+static rte_spinlock_t mlx5_flow_workspace_lock = RTE_SPINLOCK_INITIALIZER; -+ -+static void -+mlx5_flow_workspace_gc_add(struct mlx5_flow_workspace *ws) -+{ -+ rte_spinlock_lock(&mlx5_flow_workspace_lock); -+ ws->gc = gc_head; -+ gc_head = ws; -+ rte_spinlock_unlock(&mlx5_flow_workspace_lock); -+} -+ -+void -+mlx5_flow_workspace_gc_release(void) -+{ -+ while (gc_head) { -+ struct mlx5_flow_workspace *wks = gc_head; -+ -+ gc_head = wks->gc; -+ flow_release_workspace(wks); -+ } -+} -+ - /** - * Get thread specific current flow workspace. - * -@@ -7586,23 +7591,17 @@ mlx5_flow_get_thread_workspace(void) +@@ -7586,23 +7569,17 @@ mlx5_flow_get_thread_workspace(void) static struct mlx5_flow_workspace* flow_alloc_thread_workspace(void) { @@ -26006,15 +36355,15 @@ index a0cf677fb0..942dccf518 100644 } /** -@@ -7623,6 +7622,7 @@ mlx5_flow_push_thread_workspace(void) +@@ -7623,6 +7600,7 @@ mlx5_flow_push_thread_workspace(void) data = flow_alloc_thread_workspace(); if (!data) return NULL; -+ mlx5_flow_workspace_gc_add(data); ++ mlx5_flow_os_workspace_gc_add(data); } else if (!curr->inuse) { data = curr; } else if (curr->next) { -@@ -7971,6 +7971,10 @@ mlx5_flow_isolate(struct rte_eth_dev *dev, +@@ -7971,6 +7949,10 @@ mlx5_flow_isolate(struct rte_eth_dev *dev, "port must be stopped first"); return -rte_errno; } @@ -26025,7 +36374,7 @@ index a0cf677fb0..942dccf518 100644 priv->isolated = !!enable; if (enable) dev->dev_ops = &mlx5_dev_ops_isolate; -@@ -9758,23 +9762,47 @@ mlx5_flow_dev_dump_sh_all(struct rte_eth_dev *dev, +@@ -9758,23 +9740,47 @@ mlx5_flow_dev_dump_sh_all(struct rte_eth_dev *dev, } i = lcore_index; @@ -26090,7 +36439,7 @@ index a0cf677fb0..942dccf518 100644 } } -@@ -10104,9 +10132,19 @@ mlx5_action_handle_update(struct rte_eth_dev *dev, +@@ -10104,9 +10110,19 @@ mlx5_action_handle_update(struct rte_eth_dev *dev, const struct mlx5_flow_driver_ops *fops = flow_get_drv_ops(flow_get_drv_type(dev, &attr)); int ret; @@ -26112,7 +36461,7 @@ index a0cf677fb0..942dccf518 100644 if (ret) return ret; return flow_drv_action_update(dev, handle, update, fops, -@@ -10841,7 +10879,7 @@ mlx5_flow_tunnel_validate(struct rte_eth_dev *dev, +@@ -10841,7 +10857,7 @@ mlx5_flow_tunnel_validate(struct rte_eth_dev *dev, if (!is_tunnel_offload_active(dev)) return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL, @@ -26122,7 +36471,7 @@ index a0cf677fb0..942dccf518 100644 return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL, diff --git a/dpdk/drivers/net/mlx5/mlx5_flow.h b/dpdk/drivers/net/mlx5/mlx5_flow.h -index 1f57ecd6e1..9724b88996 100644 +index 1f57ecd6e1..eb87f84166 100644 --- a/dpdk/drivers/net/mlx5/mlx5_flow.h +++ b/dpdk/drivers/net/mlx5/mlx5_flow.h @@ -1437,10 +1437,10 @@ struct mlx5_flow_workspace { @@ -26137,16 +36486,15 @@ index 1f57ecd6e1..9724b88996 100644 uint32_t flow_idx; /* Intermediate device flow index. */ struct mlx5_flow_meter_info *fm; /* Pointer to the meter in flow. */ struct mlx5_flow_meter_policy *policy; -@@ -1926,6 +1926,8 @@ struct mlx5_flow_driver_ops { +@@ -1926,6 +1926,7 @@ struct mlx5_flow_driver_ops { struct mlx5_flow_workspace *mlx5_flow_push_thread_workspace(void); void mlx5_flow_pop_thread_workspace(void); struct mlx5_flow_workspace *mlx5_flow_get_thread_workspace(void); -+void mlx5_flow_workspace_gc_release(void); + __extension__ struct flow_grp_info { uint64_t external:1; -@@ -2226,7 +2228,8 @@ int mlx5_validate_action_rss(struct rte_eth_dev *dev, +@@ -2226,7 +2227,8 @@ int mlx5_validate_action_rss(struct rte_eth_dev *dev, int mlx5_flow_validate_action_count(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, struct rte_flow_error *error); @@ -26156,8 +36504,38 @@ index 1f57ecd6e1..9724b88996 100644 const struct rte_flow_attr *attr, struct rte_flow_error *error); int mlx5_flow_validate_action_flag(uint64_t action_flags, +@@ -2579,10 +2581,13 @@ int mlx5_flow_pick_transfer_proxy(struct rte_eth_dev *dev, + int mlx5_flow_hw_flush_ctrl_flows(struct rte_eth_dev *dev); + + int mlx5_flow_hw_esw_create_sq_miss_flow(struct rte_eth_dev *dev, +- uint32_t sqn); ++ uint32_t sqn, bool external); ++int mlx5_flow_hw_esw_destroy_sq_miss_flow(struct rte_eth_dev *dev, ++ uint32_t sqn); + int mlx5_flow_hw_esw_create_default_jump_flow(struct rte_eth_dev *dev); + int mlx5_flow_hw_create_tx_default_mreg_copy_flow(struct rte_eth_dev *dev); +-int mlx5_flow_hw_tx_repr_matching_flow(struct rte_eth_dev *dev, uint32_t sqn); ++int mlx5_flow_hw_tx_repr_matching_flow(struct rte_eth_dev *dev, uint32_t sqn, bool external); ++int mlx5_flow_hw_lacp_rx_flow(struct rte_eth_dev *dev); + int mlx5_flow_actions_validate(struct rte_eth_dev *dev, + const struct rte_flow_actions_template_attr *attr, + const struct rte_flow_action actions[], +diff --git a/dpdk/drivers/net/mlx5/mlx5_flow_aso.c b/dpdk/drivers/net/mlx5/mlx5_flow_aso.c +index 29bd7ce9e8..8441be3dea 100644 +--- a/dpdk/drivers/net/mlx5/mlx5_flow_aso.c ++++ b/dpdk/drivers/net/mlx5/mlx5_flow_aso.c +@@ -932,7 +932,8 @@ mlx5_aso_mtr_completion_handle(struct mlx5_aso_sq *sq, bool need_lock) + rte_spinlock_lock(&sq->sqsl); + max = (uint16_t)(sq->head - sq->tail); + if (unlikely(!max)) { +- rte_spinlock_unlock(&sq->sqsl); ++ if (need_lock) ++ rte_spinlock_unlock(&sq->sqsl); + return; + } + do { diff --git a/dpdk/drivers/net/mlx5/mlx5_flow_dv.c b/dpdk/drivers/net/mlx5/mlx5_flow_dv.c -index 62c38b87a1..22058ed980 100644 +index 62c38b87a1..5c2af50fe5 100644 --- a/dpdk/drivers/net/mlx5/mlx5_flow_dv.c +++ b/dpdk/drivers/net/mlx5/mlx5_flow_dv.c @@ -2129,6 +2129,8 @@ flow_dv_validate_item_meta(struct rte_eth_dev *dev __rte_unused, @@ -26190,7 +36568,235 @@ index 62c38b87a1..22058ed980 100644 return 0; } -@@ -7051,9 +7060,10 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, +@@ -4445,6 +4454,7 @@ flow_dv_zero_encap_udp_csum(void *data, struct rte_flow_error *error) + { + struct rte_ether_hdr *eth = NULL; + struct rte_vlan_hdr *vlan = NULL; ++ struct rte_ipv4_hdr *ipv4 = NULL; + struct rte_ipv6_hdr *ipv6 = NULL; + struct rte_udp_hdr *udp = NULL; + char *next_hdr; +@@ -4461,24 +4471,27 @@ flow_dv_zero_encap_udp_csum(void *data, struct rte_flow_error *error) + next_hdr += sizeof(struct rte_vlan_hdr); + } + +- /* HW calculates IPv4 csum. no need to proceed */ +- if (proto == RTE_ETHER_TYPE_IPV4) +- return 0; +- + /* non IPv4/IPv6 header. not supported */ +- if (proto != RTE_ETHER_TYPE_IPV6) { ++ if (proto != RTE_ETHER_TYPE_IPV4 && proto != RTE_ETHER_TYPE_IPV6) { + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, "Cannot offload non IPv4/IPv6"); + } + +- ipv6 = (struct rte_ipv6_hdr *)next_hdr; +- +- /* ignore non UDP */ +- if (ipv6->proto != IPPROTO_UDP) +- return 0; ++ if (proto == RTE_ETHER_TYPE_IPV4) { ++ ipv4 = (struct rte_ipv4_hdr *)next_hdr; ++ /* ignore non UDP */ ++ if (ipv4->next_proto_id != IPPROTO_UDP) ++ return 0; ++ udp = (struct rte_udp_hdr *)(ipv4 + 1); ++ } else { ++ ipv6 = (struct rte_ipv6_hdr *)next_hdr; ++ /* ignore non UDP */ ++ if (ipv6->proto != IPPROTO_UDP) ++ return 0; ++ udp = (struct rte_udp_hdr *)(ipv6 + 1); ++ } + +- udp = (struct rte_udp_hdr *)(ipv6 + 1); + udp->dgram_cksum = 0; + + return 0; +@@ -5750,6 +5763,7 @@ flow_dv_modify_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry) + */ + static int + flow_dv_validate_action_sample(uint64_t *action_flags, ++ uint64_t *sub_action_flags, + const struct rte_flow_action *action, + struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, +@@ -5758,14 +5772,15 @@ flow_dv_validate_action_sample(uint64_t *action_flags, + const struct rte_flow_action_rss **sample_rss, + const struct rte_flow_action_count **count, + int *fdb_mirror, ++ uint16_t *sample_port_id, + bool root, + struct rte_flow_error *error) + { + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_sh_config *dev_conf = &priv->sh->config; + const struct rte_flow_action_sample *sample = action->conf; ++ const struct rte_flow_action_port_id *port = NULL; + const struct rte_flow_action *act; +- uint64_t sub_action_flags = 0; + uint16_t queue_index = 0xFFFF; + int actions_n = 0; + int ret; +@@ -5812,20 +5827,20 @@ flow_dv_validate_action_sample(uint64_t *action_flags, + switch (act->type) { + case RTE_FLOW_ACTION_TYPE_QUEUE: + ret = mlx5_flow_validate_action_queue(act, +- sub_action_flags, ++ *sub_action_flags, + dev, + attr, error); + if (ret < 0) + return ret; + queue_index = ((const struct rte_flow_action_queue *) + (act->conf))->index; +- sub_action_flags |= MLX5_FLOW_ACTION_QUEUE; ++ *sub_action_flags |= MLX5_FLOW_ACTION_QUEUE; + ++actions_n; + break; + case RTE_FLOW_ACTION_TYPE_RSS: + *sample_rss = act->conf; + ret = mlx5_flow_validate_action_rss(act, +- sub_action_flags, ++ *sub_action_flags, + dev, attr, + item_flags, + error); +@@ -5841,48 +5856,57 @@ flow_dv_validate_action_sample(uint64_t *action_flags, + "or level in the same flow"); + if (*sample_rss != NULL && (*sample_rss)->queue_num) + queue_index = (*sample_rss)->queue[0]; +- sub_action_flags |= MLX5_FLOW_ACTION_RSS; ++ *sub_action_flags |= MLX5_FLOW_ACTION_RSS; + ++actions_n; + break; + case RTE_FLOW_ACTION_TYPE_MARK: + ret = flow_dv_validate_action_mark(dev, act, +- sub_action_flags, ++ *sub_action_flags, + attr, error); + if (ret < 0) + return ret; + if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) +- sub_action_flags |= MLX5_FLOW_ACTION_MARK | ++ *sub_action_flags |= MLX5_FLOW_ACTION_MARK | + MLX5_FLOW_ACTION_MARK_EXT; + else +- sub_action_flags |= MLX5_FLOW_ACTION_MARK; ++ *sub_action_flags |= MLX5_FLOW_ACTION_MARK; + ++actions_n; + break; + case RTE_FLOW_ACTION_TYPE_COUNT: + ret = flow_dv_validate_action_count +- (dev, false, *action_flags | sub_action_flags, ++ (dev, false, *action_flags | *sub_action_flags, + root, error); + if (ret < 0) + return ret; + *count = act->conf; +- sub_action_flags |= MLX5_FLOW_ACTION_COUNT; ++ *sub_action_flags |= MLX5_FLOW_ACTION_COUNT; + *action_flags |= MLX5_FLOW_ACTION_COUNT; + ++actions_n; + break; + case RTE_FLOW_ACTION_TYPE_PORT_ID: + case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT: + ret = flow_dv_validate_action_port_id(dev, +- sub_action_flags, ++ *sub_action_flags, + act, + attr, + error); + if (ret) + return ret; +- sub_action_flags |= MLX5_FLOW_ACTION_PORT_ID; ++ if (act->type == RTE_FLOW_ACTION_TYPE_PORT_ID) { ++ port = (const struct rte_flow_action_port_id *) ++ act->conf; ++ *sample_port_id = port->original ? ++ dev->data->port_id : port->id; ++ } else { ++ *sample_port_id = ((const struct rte_flow_action_ethdev *) ++ act->conf)->port_id; ++ } ++ *sub_action_flags |= MLX5_FLOW_ACTION_PORT_ID; + ++actions_n; + break; + case RTE_FLOW_ACTION_TYPE_RAW_ENCAP: + ret = flow_dv_validate_action_raw_encap_decap +- (dev, NULL, act->conf, attr, &sub_action_flags, ++ (dev, NULL, act->conf, attr, sub_action_flags, + &actions_n, action, item_flags, error); + if (ret < 0) + return ret; +@@ -5891,12 +5915,12 @@ flow_dv_validate_action_sample(uint64_t *action_flags, + case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP: + case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP: + ret = flow_dv_validate_action_l2_encap(dev, +- sub_action_flags, ++ *sub_action_flags, + act, attr, + error); + if (ret < 0) + return ret; +- sub_action_flags |= MLX5_FLOW_ACTION_ENCAP; ++ *sub_action_flags |= MLX5_FLOW_ACTION_ENCAP; + ++actions_n; + break; + default: +@@ -5908,7 +5932,7 @@ flow_dv_validate_action_sample(uint64_t *action_flags, + } + } + if (attr->ingress) { +- if (!(sub_action_flags & (MLX5_FLOW_ACTION_QUEUE | ++ if (!(*sub_action_flags & (MLX5_FLOW_ACTION_QUEUE | + MLX5_FLOW_ACTION_RSS))) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, +@@ -5930,17 +5954,17 @@ flow_dv_validate_action_sample(uint64_t *action_flags, + "E-Switch doesn't support " + "any optional action " + "for sampling"); +- if (sub_action_flags & MLX5_FLOW_ACTION_QUEUE) ++ if (*sub_action_flags & MLX5_FLOW_ACTION_QUEUE) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, + "unsupported action QUEUE"); +- if (sub_action_flags & MLX5_FLOW_ACTION_RSS) ++ if (*sub_action_flags & MLX5_FLOW_ACTION_RSS) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, + "unsupported action QUEUE"); +- if (!(sub_action_flags & MLX5_FLOW_ACTION_PORT_ID)) ++ if (!(*sub_action_flags & MLX5_FLOW_ACTION_PORT_ID)) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, +@@ -5949,16 +5973,16 @@ flow_dv_validate_action_sample(uint64_t *action_flags, + *fdb_mirror = 1; + } + /* Continue validation for Xcap actions.*/ +- if ((sub_action_flags & MLX5_FLOW_XCAP_ACTIONS) && ++ if ((*sub_action_flags & MLX5_FLOW_XCAP_ACTIONS) && + (queue_index == 0xFFFF || !mlx5_rxq_is_hairpin(dev, queue_index))) { +- if ((sub_action_flags & MLX5_FLOW_XCAP_ACTIONS) == ++ if ((*sub_action_flags & MLX5_FLOW_XCAP_ACTIONS) == + MLX5_FLOW_XCAP_ACTIONS) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, "encap and decap " + "combination aren't " + "supported"); +- if (attr->ingress && (sub_action_flags & MLX5_FLOW_ACTION_ENCAP)) ++ if (attr->ingress && (*sub_action_flags & MLX5_FLOW_ACTION_ENCAP)) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, "encap is not supported" +@@ -7051,11 +7075,17 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, bool def_policy = false; bool shared_count = false; uint16_t udp_dport = 0; @@ -26198,11 +36804,18 @@ index 62c38b87a1..22058ed980 100644 + uint32_t tag_id = 0, tag_bitmap = 0; const struct rte_flow_action_age *non_shared_age = NULL; const struct rte_flow_action_count *count = NULL; ++ const struct rte_flow_action_port_id *port = NULL; + const struct mlx5_rte_flow_item_tag *mlx5_tag; struct mlx5_priv *act_priv = NULL; int aso_after_sample = 0; ++ struct mlx5_priv *port_priv = NULL; ++ uint64_t sub_action_flags = 0; ++ uint16_t sample_port_id = 0; ++ uint16_t port_id = 0; -@@ -7371,7 +7381,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, + if (items == NULL) + return -1; +@@ -7371,7 +7401,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, last_item = MLX5_FLOW_LAYER_ICMP6; break; case RTE_FLOW_ITEM_TYPE_TAG: @@ -26211,7 +36824,7 @@ index 62c38b87a1..22058ed980 100644 attr, error); if (ret < 0) return ret; -@@ -7381,6 +7391,13 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, +@@ -7381,6 +7411,13 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, last_item = MLX5_FLOW_ITEM_SQ; break; case MLX5_RTE_FLOW_ITEM_TYPE_TAG: @@ -26225,7 +36838,22 @@ index 62c38b87a1..22058ed980 100644 break; case RTE_FLOW_ITEM_TYPE_GTP: ret = flow_dv_validate_item_gtp(dev, items, item_flags, -@@ -7562,7 +7579,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, +@@ -7486,6 +7523,14 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, + error); + if (ret) + return ret; ++ if (type == RTE_FLOW_ACTION_TYPE_PORT_ID) { ++ port = (const struct rte_flow_action_port_id *) ++ actions->conf; ++ port_id = port->original ? dev->data->port_id : port->id; ++ } else { ++ port_id = ((const struct rte_flow_action_ethdev *) ++ actions->conf)->port_id; ++ } + action_flags |= MLX5_FLOW_ACTION_PORT_ID; + ++actions_n; + break; +@@ -7562,7 +7607,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, rw_act_num += MLX5_ACT_NUM_SET_TAG; break; case RTE_FLOW_ACTION_TYPE_DROP: @@ -26234,7 +36862,51 @@ index 62c38b87a1..22058ed980 100644 attr, error); if (ret < 0) return ret; -@@ -9223,12 +9240,10 @@ flow_dv_translate_item_vxlan(struct rte_eth_dev *dev, +@@ -7985,11 +8030,13 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, + break; + case RTE_FLOW_ACTION_TYPE_SAMPLE: + ret = flow_dv_validate_action_sample(&action_flags, ++ &sub_action_flags, + actions, dev, + attr, item_flags, + rss, &sample_rss, + &sample_count, + &fdb_mirror, ++ &sample_port_id, + is_root, + error); + if (ret < 0) +@@ -8301,6 +8348,29 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, NULL, + "sample before ASO action is not supported"); ++ if (sub_action_flags & MLX5_FLOW_ACTION_PORT_ID) { ++ port_priv = mlx5_port_to_eswitch_info(sample_port_id, false); ++ if (flow_source_vport_representor(priv, port_priv)) { ++ if (sub_action_flags & MLX5_FLOW_ACTION_ENCAP) ++ return rte_flow_error_set(error, ENOTSUP, ++ RTE_FLOW_ERROR_TYPE_ACTION, NULL, ++ "mirror to rep port with encap is not supported"); ++ } else { ++ if (!(sub_action_flags & MLX5_FLOW_ACTION_ENCAP) && ++ (action_flags & MLX5_FLOW_ACTION_JUMP)) ++ return rte_flow_error_set(error, ENOTSUP, ++ RTE_FLOW_ERROR_TYPE_ACTION, NULL, ++ "mirror to wire port without encap is not supported"); ++ } ++ } ++ if ((action_flags & MLX5_FLOW_ACTION_PORT_ID) && ++ (action_flags & MLX5_FLOW_ACTION_ENCAP)) { ++ port_priv = mlx5_port_to_eswitch_info(port_id, false); ++ if (flow_source_vport_representor(priv, port_priv)) ++ return rte_flow_error_set(error, ENOTSUP, ++ RTE_FLOW_ERROR_TYPE_ACTION, NULL, ++ "mirror to rep port with encap is not supported"); ++ } + } + /* + * Validation the NIC Egress flow on representor, except implicit +@@ -9223,12 +9293,10 @@ flow_dv_translate_item_vxlan(struct rte_eth_dev *dev, { const struct rte_flow_item_vxlan *vxlan_m; const struct rte_flow_item_vxlan *vxlan_v; @@ -26247,7 +36919,7 @@ index 62c38b87a1..22058ed980 100644 char *vni_v; uint16_t dport; int size; -@@ -9280,24 +9295,11 @@ flow_dv_translate_item_vxlan(struct rte_eth_dev *dev, +@@ -9280,24 +9348,11 @@ flow_dv_translate_item_vxlan(struct rte_eth_dev *dev, vni_v[i] = vxlan_m->vni[i] & vxlan_v->vni[i]; return; } @@ -26274,7 +36946,7 @@ index 62c38b87a1..22058ed980 100644 } /** -@@ -13717,7 +13719,12 @@ flow_dv_translate_items_sws(struct rte_eth_dev *dev, +@@ -13717,7 +13772,12 @@ flow_dv_translate_items_sws(struct rte_eth_dev *dev, * is the suffix flow. */ dev_flow->handle->layers |= wks.item_flags; @@ -26288,7 +36960,7 @@ index 62c38b87a1..22058ed980 100644 return 0; } -@@ -14820,7 +14827,7 @@ flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow, +@@ -14820,7 +14880,7 @@ flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow, } dv->actions[n++] = priv->sh->default_miss_action; } @@ -26297,7 +36969,7 @@ index 62c38b87a1..22058ed980 100644 __flow_dv_adjust_buf_size(&dv->value.size, misc_mask); err = mlx5_flow_os_create_flow(dv_h->matcher->matcher_object, (void *)&dv->value, n, -@@ -17020,7 +17027,7 @@ flow_dv_destroy_def_policy(struct rte_eth_dev *dev) +@@ -17020,7 +17080,7 @@ flow_dv_destroy_def_policy(struct rte_eth_dev *dev) static int __flow_dv_create_policy_flow(struct rte_eth_dev *dev, uint32_t color_reg_c_idx, @@ -26306,7 +36978,7 @@ index 62c38b87a1..22058ed980 100644 int actions_n, void *actions, bool match_src_port, const struct rte_flow_item *item, void **rule, const struct rte_flow_attr *attr) -@@ -17050,9 +17057,9 @@ __flow_dv_create_policy_flow(struct rte_eth_dev *dev, +@@ -17050,9 +17110,9 @@ __flow_dv_create_policy_flow(struct rte_eth_dev *dev, } flow_dv_match_meta_reg(value.buf, (enum modify_reg)color_reg_c_idx, rte_col_2_mlx5_col(color), UINT32_MAX); @@ -26318,7 +36990,7 @@ index 62c38b87a1..22058ed980 100644 actions_n, actions, rule); if (ret) { DRV_LOG(ERR, "Failed to create meter policy%d flow.", color); -@@ -17206,7 +17213,7 @@ __flow_dv_create_domain_policy_rules(struct rte_eth_dev *dev, +@@ -17206,7 +17266,7 @@ __flow_dv_create_domain_policy_rules(struct rte_eth_dev *dev, /* Create flow, matching color. */ if (__flow_dv_create_policy_flow(dev, color_reg_c_idx, (enum rte_color)i, @@ -26327,7 +36999,7 @@ index 62c38b87a1..22058ed980 100644 acts[i].actions_n, acts[i].dv_actions, svport_match, NULL, &color_rule->rule, &attr)) { -@@ -17674,7 +17681,7 @@ flow_dv_create_mtr_tbls(struct rte_eth_dev *dev, +@@ -17674,7 +17734,7 @@ flow_dv_create_mtr_tbls(struct rte_eth_dev *dev, actions[i++] = priv->sh->dr_drop_action; flow_dv_match_meta_reg_all(matcher_para.buf, value.buf, (enum modify_reg)mtr_id_reg_c, 0, 0); @@ -26336,7 +37008,7 @@ index 62c38b87a1..22058ed980 100644 __flow_dv_adjust_buf_size(&value.size, misc_mask); ret = mlx5_flow_os_create_flow (mtrmng->def_matcher[domain]->matcher_object, -@@ -17719,7 +17726,7 @@ flow_dv_create_mtr_tbls(struct rte_eth_dev *dev, +@@ -17719,7 +17779,7 @@ flow_dv_create_mtr_tbls(struct rte_eth_dev *dev, fm->drop_cnt, NULL); actions[i++] = cnt->action; actions[i++] = priv->sh->dr_drop_action; @@ -26345,7 +37017,7 @@ index 62c38b87a1..22058ed980 100644 __flow_dv_adjust_buf_size(&value.size, misc_mask); ret = mlx5_flow_os_create_flow(drop_matcher->matcher_object, (void *)&value, i, actions, -@@ -18199,7 +18206,7 @@ flow_dv_meter_hierarchy_rule_create(struct rte_eth_dev *dev, +@@ -18199,7 +18259,7 @@ flow_dv_meter_hierarchy_rule_create(struct rte_eth_dev *dev, goto err_exit; } if (__flow_dv_create_policy_flow(dev, color_reg_c_idx, (enum rte_color)j, @@ -26354,7 +37026,7 @@ index 62c38b87a1..22058ed980 100644 acts.actions_n, acts.dv_actions, true, item, &color_rule->rule, &attr)) { rte_spinlock_unlock(&mtr_policy->sl); -@@ -18909,7 +18916,7 @@ flow_dv_validate_mtr_policy_acts(struct rte_eth_dev *dev, +@@ -18909,7 +18969,7 @@ flow_dv_validate_mtr_policy_acts(struct rte_eth_dev *dev, break; case RTE_FLOW_ACTION_TYPE_DROP: ret = mlx5_flow_validate_action_drop @@ -26363,7 +37035,7 @@ index 62c38b87a1..22058ed980 100644 if (ret < 0) return -rte_mtr_error_set(error, ENOTSUP, -@@ -19243,7 +19250,7 @@ flow_dv_discover_priorities(struct rte_eth_dev *dev, +@@ -19243,7 +19303,7 @@ flow_dv_discover_priorities(struct rte_eth_dev *dev, break; } /* Try to apply the flow to HW. */ @@ -26373,7 +37045,7 @@ index 62c38b87a1..22058ed980 100644 err = mlx5_flow_os_create_flow (flow.handle->dvh.matcher->matcher_object, diff --git a/dpdk/drivers/net/mlx5/mlx5_flow_hw.c b/dpdk/drivers/net/mlx5/mlx5_flow_hw.c -index a3c8056515..102f67a925 100644 +index a3c8056515..6b889e9f81 100644 --- a/dpdk/drivers/net/mlx5/mlx5_flow_hw.c +++ b/dpdk/drivers/net/mlx5/mlx5_flow_hw.c @@ -1243,6 +1243,8 @@ flow_hw_meter_mark_alloc(struct rte_eth_dev *dev, uint32_t queue, @@ -26385,7 +37057,33 @@ index a3c8056515..102f67a925 100644 aso_mtr = mlx5_ipool_malloc(priv->hws_mpool->idx_pool, &mtr_id); if (!aso_mtr) return NULL; -@@ -3252,14 +3254,18 @@ flow_hw_translate_group(struct rte_eth_dev *dev, +@@ -1361,7 +1363,7 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev, + else + type = MLX5DR_TABLE_TYPE_NIC_RX; + for (; !actions_end; actions++, masks++) { +- switch (actions->type) { ++ switch ((int)actions->type) { + case RTE_FLOW_ACTION_TYPE_INDIRECT: + action_pos = at->actions_off[actions - at->actions]; + if (!attr->group) { +@@ -1665,6 +1667,16 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev, + action_pos)) + goto err; + break; ++ case MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS: ++ /* Internal, can be skipped. */ ++ if (!!attr->group) { ++ DRV_LOG(ERR, "DEFAULT MISS action is only" ++ " supported in root table."); ++ goto err; ++ } ++ action_pos = at->actions_off[actions - at->actions]; ++ acts->rule_acts[action_pos].action = priv->hw_def_miss; ++ break; + case RTE_FLOW_ACTION_TYPE_END: + actions_end = true; + break; +@@ -3252,14 +3264,18 @@ flow_hw_translate_group(struct rte_eth_dev *dev, "group index not supported"); *table_group = group + 1; } else if (config->dv_esw_en && @@ -26409,7 +37107,95 @@ index a3c8056515..102f67a925 100644 */ if (group > MLX5_HW_MAX_EGRESS_GROUP) return rte_flow_error_set(error, EINVAL, -@@ -4534,6 +4540,9 @@ error: +@@ -3863,6 +3879,34 @@ flow_hw_validate_action_push_vlan(struct rte_eth_dev *dev, + #undef X_FIELD + } + ++static int ++flow_hw_validate_action_default_miss(struct rte_eth_dev *dev, ++ const struct rte_flow_actions_template_attr *attr, ++ uint64_t action_flags, ++ struct rte_flow_error *error) ++{ ++ /* ++ * The private DEFAULT_MISS action is used internally for LACP in control ++ * flows. So this validation can be ignored. It can be kept right now since ++ * the validation will be done only once. ++ */ ++ struct mlx5_priv *priv = dev->data->dev_private; ++ ++ if (!attr->ingress || attr->egress || attr->transfer) ++ return rte_flow_error_set(error, EINVAL, ++ RTE_FLOW_ERROR_TYPE_ACTION, NULL, ++ "DEFAULT MISS is only supported in ingress."); ++ if (!priv->hw_def_miss) ++ return rte_flow_error_set(error, EINVAL, ++ RTE_FLOW_ERROR_TYPE_ACTION, NULL, ++ "DEFAULT MISS action does not exist."); ++ if (action_flags & MLX5_FLOW_FATE_ACTIONS) ++ return rte_flow_error_set(error, EINVAL, ++ RTE_FLOW_ERROR_TYPE_ACTION, NULL, ++ "DEFAULT MISS should be the only termination."); ++ return 0; ++} ++ + static int + mlx5_flow_hw_actions_validate(struct rte_eth_dev *dev, + const struct rte_flow_actions_template_attr *attr, +@@ -3896,7 +3940,7 @@ mlx5_flow_hw_actions_validate(struct rte_eth_dev *dev, + RTE_FLOW_ERROR_TYPE_ACTION, + action, + "mask type does not match action type"); +- switch (action->type) { ++ switch ((int)action->type) { + case RTE_FLOW_ACTION_TYPE_VOID: + break; + case RTE_FLOW_ACTION_TYPE_INDIRECT: +@@ -4022,6 +4066,13 @@ mlx5_flow_hw_actions_validate(struct rte_eth_dev *dev, + case RTE_FLOW_ACTION_TYPE_END: + actions_end = true; + break; ++ case MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS: ++ ret = flow_hw_validate_action_default_miss(dev, attr, ++ action_flags, error); ++ if (ret < 0) ++ return ret; ++ action_flags |= MLX5_FLOW_ACTION_DEFAULT_MISS; ++ break; + default: + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, +@@ -4041,8 +4092,7 @@ flow_hw_actions_validate(struct rte_eth_dev *dev, + const struct rte_flow_action masks[], + struct rte_flow_error *error) + { +- return mlx5_flow_hw_actions_validate(dev, attr, actions, masks, NULL, +- error); ++ return mlx5_flow_hw_actions_validate(dev, attr, actions, masks, NULL, error); + } + + +@@ -4143,7 +4193,7 @@ flow_hw_dr_actions_template_create(struct rte_flow_actions_template *at) + + if (curr_off >= MLX5_HW_MAX_ACTS) + goto err_actions_num; +- switch (at->actions[i].type) { ++ switch ((int)at->actions[i].type) { + case RTE_FLOW_ACTION_TYPE_VOID: + break; + case RTE_FLOW_ACTION_TYPE_INDIRECT: +@@ -4221,6 +4271,10 @@ flow_hw_dr_actions_template_create(struct rte_flow_actions_template *at) + } + at->actions_off[i] = cnt_off; + break; ++ case MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS: ++ at->actions_off[i] = curr_off; ++ action_types[curr_off++] = MLX5DR_ACTION_TYP_MISS; ++ break; + default: + type = mlx5_hw_dr_action_types[at->actions[i].type]; + at->actions_off[i] = curr_off; +@@ -4534,6 +4588,9 @@ error: mlx5dr_action_template_destroy(at->tmpl); mlx5_free(at); } @@ -26419,7 +37205,7 @@ index a3c8056515..102f67a925 100644 return NULL; } -@@ -4614,8 +4623,9 @@ flow_hw_pattern_validate(struct rte_eth_dev *dev, +@@ -4614,8 +4671,9 @@ flow_hw_pattern_validate(struct rte_eth_dev *dev, struct rte_flow_error *error) { struct mlx5_priv *priv = dev->data->dev_private; @@ -26430,7 +37216,7 @@ index a3c8056515..102f67a925 100644 if (!attr->ingress && !attr->egress && !attr->transfer) return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ATTR, NULL, -@@ -4657,16 +4667,26 @@ flow_hw_pattern_validate(struct rte_eth_dev *dev, +@@ -4657,16 +4715,26 @@ flow_hw_pattern_validate(struct rte_eth_dev *dev, switch (type) { case RTE_FLOW_ITEM_TYPE_TAG: { @@ -26460,7 +37246,7 @@ index a3c8056515..102f67a925 100644 break; } case MLX5_RTE_FLOW_ITEM_TYPE_TAG: -@@ -4680,6 +4700,12 @@ flow_hw_pattern_validate(struct rte_eth_dev *dev, +@@ -4680,6 +4748,12 @@ flow_hw_pattern_validate(struct rte_eth_dev *dev, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, "Unsupported internal tag index"); @@ -26473,7 +37259,7 @@ index a3c8056515..102f67a925 100644 break; } case RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT: -@@ -4790,7 +4816,7 @@ flow_hw_pattern_template_create(struct rte_eth_dev *dev, +@@ -4790,7 +4864,7 @@ flow_hw_pattern_template_create(struct rte_eth_dev *dev, struct rte_flow_pattern_template *it; struct rte_flow_item *copied_items = NULL; const struct rte_flow_item *tmpl_items; @@ -26482,7 +37268,7 @@ index a3c8056515..102f67a925 100644 struct rte_flow_item port = { .type = RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT, .mask = &rte_flow_item_ethdev_mask, -@@ -5271,12 +5297,14 @@ flow_hw_free_vport_actions(struct mlx5_priv *priv) +@@ -5271,12 +5345,14 @@ flow_hw_free_vport_actions(struct mlx5_priv *priv) * * @param dev * Pointer to Ethernet device. @@ -26498,7 +37284,7 @@ index a3c8056515..102f67a925 100644 { struct rte_flow_pattern_template_attr attr = { .relaxed_matching = 0, -@@ -5295,7 +5323,7 @@ flow_hw_create_tx_repr_sq_pattern_tmpl(struct rte_eth_dev *dev) +@@ -5295,7 +5371,7 @@ flow_hw_create_tx_repr_sq_pattern_tmpl(struct rte_eth_dev *dev) }, }; @@ -26507,7 +37293,7 @@ index a3c8056515..102f67a925 100644 } static __rte_always_inline uint32_t -@@ -5353,12 +5381,15 @@ flow_hw_update_action_mask(struct rte_flow_action *action, +@@ -5353,12 +5429,15 @@ flow_hw_update_action_mask(struct rte_flow_action *action, * * @param dev * Pointer to Ethernet device. @@ -26524,7 +37310,7 @@ index a3c8056515..102f67a925 100644 { uint32_t tag_mask = flow_hw_tx_tag_regc_mask(dev); uint32_t tag_value = flow_hw_tx_tag_regc_value(dev); -@@ -5444,7 +5475,7 @@ flow_hw_create_tx_repr_tag_jump_acts_tmpl(struct rte_eth_dev *dev) +@@ -5444,7 +5523,7 @@ flow_hw_create_tx_repr_tag_jump_acts_tmpl(struct rte_eth_dev *dev) NULL, NULL); idx++; MLX5_ASSERT(idx <= RTE_DIM(actions_v)); @@ -26533,7 +37319,7 @@ index a3c8056515..102f67a925 100644 } static void -@@ -5473,12 +5504,14 @@ flow_hw_cleanup_tx_repr_tagging(struct rte_eth_dev *dev) +@@ -5473,12 +5552,14 @@ flow_hw_cleanup_tx_repr_tagging(struct rte_eth_dev *dev) * * @param dev * Pointer to Ethernet device. @@ -26549,7 +37335,7 @@ index a3c8056515..102f67a925 100644 { struct mlx5_priv *priv = dev->data->dev_private; struct rte_flow_template_table_attr attr = { -@@ -5496,20 +5529,22 @@ flow_hw_setup_tx_repr_tagging(struct rte_eth_dev *dev) +@@ -5496,20 +5577,22 @@ flow_hw_setup_tx_repr_tagging(struct rte_eth_dev *dev) MLX5_ASSERT(priv->sh->config.dv_esw_en); MLX5_ASSERT(priv->sh->config.repr_matching); @@ -26579,7 +37365,7 @@ index a3c8056515..102f67a925 100644 flow_hw_cleanup_tx_repr_tagging(dev); return -rte_errno; } -@@ -5540,12 +5575,15 @@ flow_hw_esw_mgr_regc_marker(struct rte_eth_dev *dev) +@@ -5540,12 +5623,15 @@ flow_hw_esw_mgr_regc_marker(struct rte_eth_dev *dev) * * @param dev * Pointer to Ethernet device. @@ -26596,7 +37382,7 @@ index a3c8056515..102f67a925 100644 { struct rte_flow_pattern_template_attr attr = { .relaxed_matching = 0, -@@ -5575,7 +5613,7 @@ flow_hw_create_ctrl_esw_mgr_pattern_template(struct rte_eth_dev *dev) +@@ -5575,7 +5661,7 @@ flow_hw_create_ctrl_esw_mgr_pattern_template(struct rte_eth_dev *dev) }, }; @@ -26605,7 +37391,7 @@ index a3c8056515..102f67a925 100644 } /** -@@ -5588,12 +5626,15 @@ flow_hw_create_ctrl_esw_mgr_pattern_template(struct rte_eth_dev *dev) +@@ -5588,12 +5674,15 @@ flow_hw_create_ctrl_esw_mgr_pattern_template(struct rte_eth_dev *dev) * * @param dev * Pointer to Ethernet device. @@ -26622,7 +37408,7 @@ index a3c8056515..102f67a925 100644 { struct rte_flow_pattern_template_attr attr = { .relaxed_matching = 0, -@@ -5626,7 +5667,7 @@ flow_hw_create_ctrl_regc_sq_pattern_template(struct rte_eth_dev *dev) +@@ -5626,7 +5715,7 @@ flow_hw_create_ctrl_regc_sq_pattern_template(struct rte_eth_dev *dev) }, }; @@ -26631,7 +37417,7 @@ index a3c8056515..102f67a925 100644 } /** -@@ -5636,12 +5677,15 @@ flow_hw_create_ctrl_regc_sq_pattern_template(struct rte_eth_dev *dev) +@@ -5636,12 +5725,15 @@ flow_hw_create_ctrl_regc_sq_pattern_template(struct rte_eth_dev *dev) * * @param dev * Pointer to Ethernet device. @@ -26648,7 +37434,7 @@ index a3c8056515..102f67a925 100644 { struct rte_flow_pattern_template_attr attr = { .relaxed_matching = 0, -@@ -5660,7 +5704,7 @@ flow_hw_create_ctrl_port_pattern_template(struct rte_eth_dev *dev) +@@ -5660,7 +5752,7 @@ flow_hw_create_ctrl_port_pattern_template(struct rte_eth_dev *dev) }, }; @@ -26657,7 +37443,7 @@ index a3c8056515..102f67a925 100644 } /* -@@ -5670,12 +5714,15 @@ flow_hw_create_ctrl_port_pattern_template(struct rte_eth_dev *dev) +@@ -5670,12 +5762,15 @@ flow_hw_create_ctrl_port_pattern_template(struct rte_eth_dev *dev) * * @param dev * Pointer to Ethernet device. @@ -26674,7 +37460,7 @@ index a3c8056515..102f67a925 100644 { struct rte_flow_pattern_template_attr tx_pa_attr = { .relaxed_matching = 0, -@@ -5696,10 +5743,8 @@ flow_hw_create_tx_default_mreg_copy_pattern_template(struct rte_eth_dev *dev) +@@ -5696,10 +5791,44 @@ flow_hw_create_tx_default_mreg_copy_pattern_template(struct rte_eth_dev *dev) .type = RTE_FLOW_ITEM_TYPE_END, }, }; @@ -26683,10 +37469,46 @@ index a3c8056515..102f67a925 100644 - RTE_SET_USED(drop_err); - return flow_hw_pattern_template_create(dev, &tx_pa_attr, eth_all, &drop_err); + return flow_hw_pattern_template_create(dev, &tx_pa_attr, eth_all, error); ++} ++ ++/* ++ * Creating a flow pattern template with all LACP packets matching, only for NIC ++ * ingress domain. ++ * ++ * @param dev ++ * Pointer to Ethernet device. ++ * @param error ++ * Pointer to error structure. ++ * ++ * @return ++ * Pointer to flow pattern template on success, NULL otherwise. ++ */ ++static struct rte_flow_pattern_template * ++flow_hw_create_lacp_rx_pattern_template(struct rte_eth_dev *dev, struct rte_flow_error *error) ++{ ++ struct rte_flow_pattern_template_attr pa_attr = { ++ .relaxed_matching = 0, ++ .ingress = 1, ++ }; ++ struct rte_flow_item_eth lacp_mask = { ++ .dst.addr_bytes = "\x00\x00\x00\x00\x00\x00", ++ .src.addr_bytes = "\x00\x00\x00\x00\x00\x00", ++ .type = 0xFFFF, ++ }; ++ struct rte_flow_item eth_all[] = { ++ [0] = { ++ .type = RTE_FLOW_ITEM_TYPE_ETH, ++ .mask = &lacp_mask, ++ }, ++ [1] = { ++ .type = RTE_FLOW_ITEM_TYPE_END, ++ }, ++ }; ++ return flow_hw_pattern_template_create(dev, &pa_attr, eth_all, error); } /** -@@ -5710,12 +5755,15 @@ flow_hw_create_tx_default_mreg_copy_pattern_template(struct rte_eth_dev *dev) +@@ -5710,12 +5839,15 @@ flow_hw_create_tx_default_mreg_copy_pattern_template(struct rte_eth_dev *dev) * * @param dev * Pointer to Ethernet device. @@ -26703,7 +37525,7 @@ index a3c8056515..102f67a925 100644 { uint32_t marker_mask = flow_hw_esw_mgr_regc_marker_mask(dev); uint32_t marker_bits = flow_hw_esw_mgr_regc_marker(dev); -@@ -5781,7 +5829,7 @@ flow_hw_create_ctrl_regc_jump_actions_template(struct rte_eth_dev *dev) +@@ -5781,7 +5913,7 @@ flow_hw_create_ctrl_regc_jump_actions_template(struct rte_eth_dev *dev) set_reg_v.dst.offset = rte_bsf32(marker_mask); rte_memcpy(set_reg_v.src.value, &marker_bits, sizeof(marker_bits)); rte_memcpy(set_reg_m.src.value, &marker_mask, sizeof(marker_mask)); @@ -26712,7 +37534,7 @@ index a3c8056515..102f67a925 100644 } /** -@@ -5793,13 +5841,16 @@ flow_hw_create_ctrl_regc_jump_actions_template(struct rte_eth_dev *dev) +@@ -5793,13 +5925,16 @@ flow_hw_create_ctrl_regc_jump_actions_template(struct rte_eth_dev *dev) * Pointer to Ethernet device. * @param group * Destination group for this action template. @@ -26730,7 +37552,7 @@ index a3c8056515..102f67a925 100644 { struct rte_flow_actions_template_attr attr = { .transfer = 1, -@@ -5829,8 +5880,8 @@ flow_hw_create_ctrl_jump_actions_template(struct rte_eth_dev *dev, +@@ -5829,8 +5964,8 @@ flow_hw_create_ctrl_jump_actions_template(struct rte_eth_dev *dev, } }; @@ -26741,7 +37563,7 @@ index a3c8056515..102f67a925 100644 } /** -@@ -5839,12 +5890,15 @@ flow_hw_create_ctrl_jump_actions_template(struct rte_eth_dev *dev, +@@ -5839,12 +5974,15 @@ flow_hw_create_ctrl_jump_actions_template(struct rte_eth_dev *dev, * * @param dev * Pointer to Ethernet device. @@ -26758,7 +37580,7 @@ index a3c8056515..102f67a925 100644 { struct rte_flow_actions_template_attr attr = { .transfer = 1, -@@ -5874,8 +5928,7 @@ flow_hw_create_ctrl_port_actions_template(struct rte_eth_dev *dev) +@@ -5874,8 +6012,7 @@ flow_hw_create_ctrl_port_actions_template(struct rte_eth_dev *dev) } }; @@ -26768,7 +37590,7 @@ index a3c8056515..102f67a925 100644 } /* -@@ -5884,12 +5937,15 @@ flow_hw_create_ctrl_port_actions_template(struct rte_eth_dev *dev) +@@ -5884,12 +6021,15 @@ flow_hw_create_ctrl_port_actions_template(struct rte_eth_dev *dev) * * @param dev * Pointer to Ethernet device. @@ -26785,7 +37607,7 @@ index a3c8056515..102f67a925 100644 { struct rte_flow_actions_template_attr tx_act_attr = { .egress = 1, -@@ -5952,11 +6008,9 @@ flow_hw_create_tx_default_mreg_copy_actions_template(struct rte_eth_dev *dev) +@@ -5952,11 +6092,41 @@ flow_hw_create_tx_default_mreg_copy_actions_template(struct rte_eth_dev *dev) .type = RTE_FLOW_ACTION_TYPE_END, }, }; @@ -26795,10 +37617,42 @@ index a3c8056515..102f67a925 100644 return flow_hw_actions_template_create(dev, &tx_act_attr, actions, - masks, &drop_err); + masks, error); ++} ++ ++/* ++ * Creating an actions template to use default miss to re-route packets to the ++ * kernel driver stack. ++ * On root table, only DEFAULT_MISS action can be used. ++ * ++ * @param dev ++ * Pointer to Ethernet device. ++ * @param error ++ * Pointer to error structure. ++ * ++ * @return ++ * Pointer to flow actions template on success, NULL otherwise. ++ */ ++static struct rte_flow_actions_template * ++flow_hw_create_lacp_rx_actions_template(struct rte_eth_dev *dev, struct rte_flow_error *error) ++{ ++ struct rte_flow_actions_template_attr act_attr = { ++ .ingress = 1, ++ }; ++ const struct rte_flow_action actions[] = { ++ [0] = { ++ .type = (enum rte_flow_action_type) ++ MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS, ++ }, ++ [1] = { ++ .type = RTE_FLOW_ACTION_TYPE_END, ++ }, ++ }; ++ ++ return flow_hw_actions_template_create(dev, &act_attr, actions, actions, error); } /** -@@ -5969,6 +6023,8 @@ flow_hw_create_tx_default_mreg_copy_actions_template(struct rte_eth_dev *dev) +@@ -5969,6 +6139,8 @@ flow_hw_create_tx_default_mreg_copy_actions_template(struct rte_eth_dev *dev) * Pointer to flow pattern template. * @param at * Pointer to flow actions template. @@ -26807,7 +37661,7 @@ index a3c8056515..102f67a925 100644 * * @return * Pointer to flow table on success, NULL otherwise. -@@ -5976,7 +6032,8 @@ flow_hw_create_tx_default_mreg_copy_actions_template(struct rte_eth_dev *dev) +@@ -5976,7 +6148,8 @@ flow_hw_create_tx_default_mreg_copy_actions_template(struct rte_eth_dev *dev) static struct rte_flow_template_table* flow_hw_create_ctrl_sq_miss_root_table(struct rte_eth_dev *dev, struct rte_flow_pattern_template *it, @@ -26817,7 +37671,7 @@ index a3c8056515..102f67a925 100644 { struct rte_flow_template_table_attr attr = { .flow_attr = { -@@ -5993,7 +6050,7 @@ flow_hw_create_ctrl_sq_miss_root_table(struct rte_eth_dev *dev, +@@ -5993,7 +6166,7 @@ flow_hw_create_ctrl_sq_miss_root_table(struct rte_eth_dev *dev, .external = false, }; @@ -26826,7 +37680,7 @@ index a3c8056515..102f67a925 100644 } -@@ -6007,6 +6064,8 @@ flow_hw_create_ctrl_sq_miss_root_table(struct rte_eth_dev *dev, +@@ -6007,6 +6180,8 @@ flow_hw_create_ctrl_sq_miss_root_table(struct rte_eth_dev *dev, * Pointer to flow pattern template. * @param at * Pointer to flow actions template. @@ -26835,7 +37689,7 @@ index a3c8056515..102f67a925 100644 * * @return * Pointer to flow table on success, NULL otherwise. -@@ -6014,7 +6073,8 @@ flow_hw_create_ctrl_sq_miss_root_table(struct rte_eth_dev *dev, +@@ -6014,7 +6189,8 @@ flow_hw_create_ctrl_sq_miss_root_table(struct rte_eth_dev *dev, static struct rte_flow_template_table* flow_hw_create_ctrl_sq_miss_table(struct rte_eth_dev *dev, struct rte_flow_pattern_template *it, @@ -26845,7 +37699,7 @@ index a3c8056515..102f67a925 100644 { struct rte_flow_template_table_attr attr = { .flow_attr = { -@@ -6031,7 +6091,7 @@ flow_hw_create_ctrl_sq_miss_table(struct rte_eth_dev *dev, +@@ -6031,7 +6207,7 @@ flow_hw_create_ctrl_sq_miss_table(struct rte_eth_dev *dev, .external = false, }; @@ -26854,7 +37708,7 @@ index a3c8056515..102f67a925 100644 } /* -@@ -6043,6 +6103,8 @@ flow_hw_create_ctrl_sq_miss_table(struct rte_eth_dev *dev, +@@ -6043,6 +6219,8 @@ flow_hw_create_ctrl_sq_miss_table(struct rte_eth_dev *dev, * Pointer to flow pattern template. * @param at * Pointer to flow actions template. @@ -26863,7 +37717,7 @@ index a3c8056515..102f67a925 100644 * * @return * Pointer to flow table on success, NULL otherwise. -@@ -6050,7 +6112,8 @@ flow_hw_create_ctrl_sq_miss_table(struct rte_eth_dev *dev, +@@ -6050,7 +6228,8 @@ flow_hw_create_ctrl_sq_miss_table(struct rte_eth_dev *dev, static struct rte_flow_template_table* flow_hw_create_tx_default_mreg_copy_table(struct rte_eth_dev *dev, struct rte_flow_pattern_template *pt, @@ -26873,7 +37727,7 @@ index a3c8056515..102f67a925 100644 { struct rte_flow_template_table_attr tx_tbl_attr = { .flow_attr = { -@@ -6064,14 +6127,8 @@ flow_hw_create_tx_default_mreg_copy_table(struct rte_eth_dev *dev, +@@ -6064,14 +6243,8 @@ flow_hw_create_tx_default_mreg_copy_table(struct rte_eth_dev *dev, .attr = tx_tbl_attr, .external = false, }; @@ -26889,7 +37743,7 @@ index a3c8056515..102f67a925 100644 } /** -@@ -6084,6 +6141,8 @@ flow_hw_create_tx_default_mreg_copy_table(struct rte_eth_dev *dev, +@@ -6084,6 +6257,8 @@ flow_hw_create_tx_default_mreg_copy_table(struct rte_eth_dev *dev, * Pointer to flow pattern template. * @param at * Pointer to flow actions template. @@ -26898,7 +37752,7 @@ index a3c8056515..102f67a925 100644 * * @return * Pointer to flow table on success, NULL otherwise. -@@ -6091,7 +6150,8 @@ flow_hw_create_tx_default_mreg_copy_table(struct rte_eth_dev *dev, +@@ -6091,7 +6266,8 @@ flow_hw_create_tx_default_mreg_copy_table(struct rte_eth_dev *dev, static struct rte_flow_template_table * flow_hw_create_ctrl_jump_table(struct rte_eth_dev *dev, struct rte_flow_pattern_template *it, @@ -26908,16 +37762,53 @@ index a3c8056515..102f67a925 100644 { struct rte_flow_template_table_attr attr = { .flow_attr = { -@@ -6108,7 +6168,7 @@ flow_hw_create_ctrl_jump_table(struct rte_eth_dev *dev, +@@ -6108,7 +6284,44 @@ flow_hw_create_ctrl_jump_table(struct rte_eth_dev *dev, .external = false, }; - return flow_hw_table_create(dev, &cfg, &it, 1, &at, 1, NULL); + return flow_hw_table_create(dev, &cfg, &it, 1, &at, 1, error); ++} ++ ++/* ++ * Create a table on the root group to for the LACP traffic redirecting. ++ * ++ * @param dev ++ * Pointer to Ethernet device. ++ * @param it ++ * Pointer to flow pattern template. ++ * @param at ++ * Pointer to flow actions template. ++ * ++ * @return ++ * Pointer to flow table on success, NULL otherwise. ++ */ ++static struct rte_flow_template_table * ++flow_hw_create_lacp_rx_table(struct rte_eth_dev *dev, ++ struct rte_flow_pattern_template *it, ++ struct rte_flow_actions_template *at, ++ struct rte_flow_error *error) ++{ ++ struct rte_flow_template_table_attr attr = { ++ .flow_attr = { ++ .group = 0, ++ .priority = 0, ++ .ingress = 1, ++ .egress = 0, ++ .transfer = 0, ++ }, ++ .nb_flows = 1, ++ }; ++ struct mlx5_flow_template_table_cfg cfg = { ++ .attr = attr, ++ .external = false, ++ }; ++ ++ return flow_hw_table_create(dev, &cfg, &it, 1, &at, 1, error); } /** -@@ -6117,12 +6177,14 @@ flow_hw_create_ctrl_jump_table(struct rte_eth_dev *dev, +@@ -6117,114 +6330,155 @@ flow_hw_create_ctrl_jump_table(struct rte_eth_dev *dev, * * @param dev * Pointer to Ethernet device. @@ -26934,8 +37825,15 @@ index a3c8056515..102f67a925 100644 { struct mlx5_priv *priv = dev->data->dev_private; struct rte_flow_pattern_template *esw_mgr_items_tmpl = NULL; -@@ -6135,96 +6197,107 @@ flow_hw_create_ctrl_tables(struct rte_eth_dev *dev) + struct rte_flow_pattern_template *regc_sq_items_tmpl = NULL; + struct rte_flow_pattern_template *port_items_tmpl = NULL; + struct rte_flow_pattern_template *tx_meta_items_tmpl = NULL; ++ struct rte_flow_pattern_template *lacp_rx_items_tmpl = NULL; + struct rte_flow_actions_template *regc_jump_actions_tmpl = NULL; + struct rte_flow_actions_template *port_actions_tmpl = NULL; + struct rte_flow_actions_template *jump_one_actions_tmpl = NULL; struct rte_flow_actions_template *tx_meta_actions_tmpl = NULL; ++ struct rte_flow_actions_template *lacp_rx_actions_tmpl = NULL; uint32_t xmeta = priv->sh->config.dv_xmeta_en; uint32_t repr_matching = priv->sh->config.repr_matching; + int ret; @@ -27054,6 +37952,28 @@ index a3c8056515..102f67a925 100644 " Tx metadata copy flow rule", dev->data->port_id); - goto error; + goto err; ++ } ++ } ++ /* Create LACP default miss table. */ ++ if (!priv->sh->config.lacp_by_user && priv->pf_bond >= 0) { ++ lacp_rx_items_tmpl = flow_hw_create_lacp_rx_pattern_template(dev, error); ++ if (!lacp_rx_items_tmpl) { ++ DRV_LOG(ERR, "port %u failed to create pattern template" ++ " for LACP Rx traffic", dev->data->port_id); ++ goto err; ++ } ++ lacp_rx_actions_tmpl = flow_hw_create_lacp_rx_actions_template(dev, error); ++ if (!lacp_rx_actions_tmpl) { ++ DRV_LOG(ERR, "port %u failed to create actions template" ++ " for LACP Rx traffic", dev->data->port_id); ++ goto err; ++ } ++ priv->hw_lacp_rx_tbl = flow_hw_create_lacp_rx_table(dev, lacp_rx_items_tmpl, ++ lacp_rx_actions_tmpl, error); ++ if (!priv->hw_lacp_rx_tbl) { ++ DRV_LOG(ERR, "port %u failed to create template table for" ++ " for LACP Rx traffic", dev->data->port_id); ++ goto err; } } return 0; @@ -27065,10 +37985,32 @@ index a3c8056515..102f67a925 100644 + ret = rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "Failed to create control tables."); ++ if (priv->hw_tx_meta_cpy_tbl) { ++ flow_hw_table_destroy(dev, priv->hw_tx_meta_cpy_tbl, NULL); ++ priv->hw_tx_meta_cpy_tbl = NULL; ++ } if (priv->hw_esw_zero_tbl) { flow_hw_table_destroy(dev, priv->hw_esw_zero_tbl, NULL); priv->hw_esw_zero_tbl = NULL; -@@ -6253,7 +6326,7 @@ error: +@@ -6237,6 +6491,8 @@ error: + flow_hw_table_destroy(dev, priv->hw_esw_sq_miss_root_tbl, NULL); + priv->hw_esw_sq_miss_root_tbl = NULL; + } ++ if (lacp_rx_actions_tmpl) ++ flow_hw_actions_template_destroy(dev, lacp_rx_actions_tmpl, NULL); + if (tx_meta_actions_tmpl) + flow_hw_actions_template_destroy(dev, tx_meta_actions_tmpl, NULL); + if (jump_one_actions_tmpl) +@@ -6245,6 +6501,8 @@ error: + flow_hw_actions_template_destroy(dev, port_actions_tmpl, NULL); + if (regc_jump_actions_tmpl) + flow_hw_actions_template_destroy(dev, regc_jump_actions_tmpl, NULL); ++ if (lacp_rx_items_tmpl) ++ flow_hw_pattern_template_destroy(dev, lacp_rx_items_tmpl, NULL); + if (tx_meta_items_tmpl) + flow_hw_pattern_template_destroy(dev, tx_meta_items_tmpl, NULL); + if (port_items_tmpl) +@@ -6253,7 +6511,7 @@ error: flow_hw_pattern_template_destroy(dev, regc_sq_items_tmpl, NULL); if (esw_mgr_items_tmpl) flow_hw_pattern_template_destroy(dev, esw_mgr_items_tmpl, NULL); @@ -27077,7 +38019,7 @@ index a3c8056515..102f67a925 100644 } static void -@@ -6376,27 +6449,28 @@ flow_hw_create_vlan(struct rte_eth_dev *dev) +@@ -6376,27 +6634,28 @@ flow_hw_create_vlan(struct rte_eth_dev *dev) MLX5DR_ACTION_FLAG_HWS_FDB }; @@ -27110,7 +38052,15 @@ index a3c8056515..102f67a925 100644 } return 0; } -@@ -6836,8 +6910,7 @@ flow_hw_configure(struct rte_eth_dev *dev, +@@ -6807,6 +7066,7 @@ flow_hw_configure(struct rte_eth_dev *dev, + struct rte_flow_queue_attr ctrl_queue_attr = {0}; + bool is_proxy = !!(priv->sh->config.dv_esw_en && priv->master); + int ret = 0; ++ uint32_t action_flags; + + if (!port_attr || !nb_queue || !queue_attr) { + rte_errno = EINVAL; +@@ -6836,8 +7096,7 @@ flow_hw_configure(struct rte_eth_dev *dev, goto err; } @@ -27120,7 +38070,15 @@ index a3c8056515..102f67a925 100644 _queue_attr[nb_queue] = &ctrl_queue_attr; priv->acts_ipool = mlx5_ipool_create(&cfg); if (!priv->acts_ipool) -@@ -6952,23 +7025,20 @@ flow_hw_configure(struct rte_eth_dev *dev, +@@ -6926,6 +7185,7 @@ flow_hw_configure(struct rte_eth_dev *dev, + priv->nb_queue = nb_q_updated; + rte_spinlock_init(&priv->hw_ctrl_lock); + LIST_INIT(&priv->hw_ctrl_flows); ++ LIST_INIT(&priv->hw_ext_ctrl_flows); + ret = flow_hw_create_ctrl_rx_tables(dev); + if (ret) { + rte_flow_error_set(error, -ret, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, +@@ -6952,23 +7212,34 @@ flow_hw_configure(struct rte_eth_dev *dev, goto err; } if (priv->sh->config.dv_esw_en && priv->sh->config.repr_matching) { @@ -27132,6 +38090,20 @@ index a3c8056515..102f67a925 100644 goto err; - } } ++ /* ++ * DEFAULT_MISS action have different behaviors in different domains. ++ * In FDB, it will steering the packets to the E-switch manager. ++ * In NIC Rx root, it will steering the packet to the kernel driver stack. ++ * An action with all bits set in the flag can be created and the HWS ++ * layer will translate it properly when being used in different rules. ++ */ ++ action_flags = MLX5DR_ACTION_FLAG_ROOT_RX | MLX5DR_ACTION_FLAG_HWS_RX | ++ MLX5DR_ACTION_FLAG_ROOT_TX | MLX5DR_ACTION_FLAG_HWS_TX; ++ if (is_proxy) ++ action_flags |= (MLX5DR_ACTION_FLAG_ROOT_FDB | MLX5DR_ACTION_FLAG_HWS_FDB); ++ priv->hw_def_miss = mlx5dr_action_create_default_miss(priv->dr_ctx, action_flags); ++ if (!priv->hw_def_miss) ++ goto err; if (is_proxy) { ret = flow_hw_create_vport_actions(priv); if (ret) { @@ -27150,7 +38122,7 @@ index a3c8056515..102f67a925 100644 } if (port_attr->nb_conn_tracks) { mem_size = sizeof(struct mlx5_aso_sq) * nb_q_updated + -@@ -7005,12 +7075,18 @@ flow_hw_configure(struct rte_eth_dev *dev, +@@ -7005,12 +7276,18 @@ flow_hw_configure(struct rte_eth_dev *dev, goto err; } ret = mlx5_hws_age_pool_init(dev, port_attr, nb_queue); @@ -27171,7 +38143,7 @@ index a3c8056515..102f67a925 100644 if (_queue_attr) mlx5_free(_queue_attr); if (port_attr->flags & RTE_FLOW_PORT_FLAG_STRICT_QUEUE) -@@ -7178,9 +7254,9 @@ void flow_hw_init_tags_set(struct rte_eth_dev *dev) +@@ -7178,9 +7455,9 @@ void flow_hw_init_tags_set(struct rte_eth_dev *dev) uint32_t meta_mode = priv->sh->config.dv_xmeta_en; uint8_t masks = (uint8_t)priv->sh->cdev->config.hca_attr.set_reg_c; uint32_t i, j; @@ -27183,7 +38155,7 @@ index a3c8056515..102f67a925 100644 /* * The CAPA is global for common device but only used in net. -@@ -7195,29 +7271,35 @@ void flow_hw_init_tags_set(struct rte_eth_dev *dev) +@@ -7195,29 +7472,35 @@ void flow_hw_init_tags_set(struct rte_eth_dev *dev) if (meta_mode == MLX5_XMETA_MODE_META32_HWS) unset |= 1 << (REG_C_1 - REG_C_0); masks &= ~unset; @@ -27224,12 +38196,12 @@ index a3c8056515..102f67a925 100644 + masks = common_masks; + else + goto after_avl_tags; -+ } + } + j = 0; + for (i = 0; i < MLX5_FLOW_HW_TAGS_MAX; i++) { + if ((1 << i) & masks) + mlx5_flow_hw_avl_tags[j++] = (enum modify_reg)(i + (uint32_t)REG_C_0); - } ++ } + /* Clear the rest of unusable tag indexes. */ + for (; j < MLX5_FLOW_HW_TAGS_MAX; j++) + mlx5_flow_hw_avl_tags[j] = REG_NON; @@ -27237,6 +38209,337 @@ index a3c8056515..102f67a925 100644 priv->sh->hws_tags = 1; mlx5_flow_hw_aso_tag = (enum modify_reg)priv->mtr_color_reg; mlx5_flow_hw_avl_tags_init_cnt++; +@@ -8355,6 +8638,10 @@ const struct mlx5_flow_driver_ops mlx5_flow_hw_drv_ops = { + * Pointer to flow rule actions. + * @param action_template_idx + * Index of an action template associated with @p table. ++ * @param info ++ * Additional info about control flow rule. ++ * @param external ++ * External ctrl flow. + * + * @return + * 0 on success, negative errno value otherwise and rte_errno set. +@@ -8366,7 +8653,9 @@ flow_hw_create_ctrl_flow(struct rte_eth_dev *owner_dev, + struct rte_flow_item items[], + uint8_t item_template_idx, + struct rte_flow_action actions[], +- uint8_t action_template_idx) ++ uint8_t action_template_idx, ++ struct mlx5_hw_ctrl_flow_info *info, ++ bool external) + { + struct mlx5_priv *priv = proxy_dev->data->dev_private; + uint32_t queue = CTRL_QUEUE_ID(priv); +@@ -8413,7 +8702,14 @@ flow_hw_create_ctrl_flow(struct rte_eth_dev *owner_dev, + } + entry->owner_dev = owner_dev; + entry->flow = flow; +- LIST_INSERT_HEAD(&priv->hw_ctrl_flows, entry, next); ++ if (info) ++ entry->info = *info; ++ else ++ entry->info.type = MLX5_HW_CTRL_FLOW_TYPE_GENERAL; ++ if (external) ++ LIST_INSERT_HEAD(&priv->hw_ext_ctrl_flows, entry, next); ++ else ++ LIST_INSERT_HEAD(&priv->hw_ctrl_flows, entry, next); + rte_spinlock_unlock(&priv->hw_ctrl_lock); + return 0; + error: +@@ -8587,11 +8883,23 @@ flow_hw_flush_all_ctrl_flows(struct rte_eth_dev *dev) + mlx5_free(cf); + cf = cf_next; + } ++ cf = LIST_FIRST(&priv->hw_ext_ctrl_flows); ++ while (cf != NULL) { ++ cf_next = LIST_NEXT(cf, next); ++ ret = flow_hw_destroy_ctrl_flow(dev, cf->flow); ++ if (ret) { ++ rte_errno = ret; ++ return -ret; ++ } ++ LIST_REMOVE(cf, next); ++ mlx5_free(cf); ++ cf = cf_next; ++ } + return 0; + } + + int +-mlx5_flow_hw_esw_create_sq_miss_flow(struct rte_eth_dev *dev, uint32_t sqn) ++mlx5_flow_hw_esw_create_sq_miss_flow(struct rte_eth_dev *dev, uint32_t sqn, bool external) + { + uint16_t port_id = dev->data->port_id; + struct rte_flow_item_ethdev esw_mgr_spec = { +@@ -8616,6 +8924,10 @@ mlx5_flow_hw_esw_create_sq_miss_flow(struct rte_eth_dev *dev, uint32_t sqn) + }; + struct rte_flow_item items[3] = { { 0 } }; + struct rte_flow_action actions[3] = { { 0 } }; ++ struct mlx5_hw_ctrl_flow_info flow_info = { ++ .type = MLX5_HW_CTRL_FLOW_TYPE_SQ_MISS_ROOT, ++ .esw_mgr_sq = sqn, ++ }; + struct rte_eth_dev *proxy_dev; + struct mlx5_priv *proxy_priv; + uint16_t proxy_port_id = dev->data->port_id; +@@ -8671,7 +8983,7 @@ mlx5_flow_hw_esw_create_sq_miss_flow(struct rte_eth_dev *dev, uint32_t sqn) + .type = RTE_FLOW_ACTION_TYPE_END, + }; + ret = flow_hw_create_ctrl_flow(dev, proxy_dev, proxy_priv->hw_esw_sq_miss_root_tbl, +- items, 0, actions, 0); ++ items, 0, actions, 0, &flow_info, external); + if (ret) { + DRV_LOG(ERR, "Port %u failed to create root SQ miss flow rule for SQ %u, ret %d", + port_id, sqn, ret); +@@ -8700,8 +9012,9 @@ mlx5_flow_hw_esw_create_sq_miss_flow(struct rte_eth_dev *dev, uint32_t sqn) + actions[1] = (struct rte_flow_action){ + .type = RTE_FLOW_ACTION_TYPE_END, + }; ++ flow_info.type = MLX5_HW_CTRL_FLOW_TYPE_SQ_MISS; + ret = flow_hw_create_ctrl_flow(dev, proxy_dev, proxy_priv->hw_esw_sq_miss_tbl, +- items, 0, actions, 0); ++ items, 0, actions, 0, &flow_info, external); + if (ret) { + DRV_LOG(ERR, "Port %u failed to create HWS SQ miss flow rule for SQ %u, ret %d", + port_id, sqn, ret); +@@ -8710,6 +9023,58 @@ mlx5_flow_hw_esw_create_sq_miss_flow(struct rte_eth_dev *dev, uint32_t sqn) + return 0; + } + ++static bool ++flow_hw_is_matching_sq_miss_flow(struct mlx5_hw_ctrl_flow *cf, ++ struct rte_eth_dev *dev, ++ uint32_t sqn) ++{ ++ if (cf->owner_dev != dev) ++ return false; ++ if (cf->info.type == MLX5_HW_CTRL_FLOW_TYPE_SQ_MISS_ROOT && cf->info.esw_mgr_sq == sqn) ++ return true; ++ if (cf->info.type == MLX5_HW_CTRL_FLOW_TYPE_SQ_MISS && cf->info.esw_mgr_sq == sqn) ++ return true; ++ return false; ++} ++ ++int ++mlx5_flow_hw_esw_destroy_sq_miss_flow(struct rte_eth_dev *dev, uint32_t sqn) ++{ ++ uint16_t port_id = dev->data->port_id; ++ uint16_t proxy_port_id = dev->data->port_id; ++ struct rte_eth_dev *proxy_dev; ++ struct mlx5_priv *proxy_priv; ++ struct mlx5_hw_ctrl_flow *cf; ++ struct mlx5_hw_ctrl_flow *cf_next; ++ int ret; ++ ++ ret = rte_flow_pick_transfer_proxy(port_id, &proxy_port_id, NULL); ++ if (ret) { ++ DRV_LOG(ERR, "Unable to pick transfer proxy port for port %u. Transfer proxy " ++ "port must be present for default SQ miss flow rules to exist.", ++ port_id); ++ return ret; ++ } ++ proxy_dev = &rte_eth_devices[proxy_port_id]; ++ proxy_priv = proxy_dev->data->dev_private; ++ if (!proxy_priv->dr_ctx) ++ return 0; ++ if (!proxy_priv->hw_esw_sq_miss_root_tbl || ++ !proxy_priv->hw_esw_sq_miss_tbl) ++ return 0; ++ cf = LIST_FIRST(&proxy_priv->hw_ctrl_flows); ++ while (cf != NULL) { ++ cf_next = LIST_NEXT(cf, next); ++ if (flow_hw_is_matching_sq_miss_flow(cf, dev, sqn)) { ++ claim_zero(flow_hw_destroy_ctrl_flow(proxy_dev, cf->flow)); ++ LIST_REMOVE(cf, next); ++ mlx5_free(cf); ++ } ++ cf = cf_next; ++ } ++ return 0; ++} ++ + int + mlx5_flow_hw_esw_create_default_jump_flow(struct rte_eth_dev *dev) + { +@@ -8738,6 +9103,9 @@ mlx5_flow_hw_esw_create_default_jump_flow(struct rte_eth_dev *dev) + .type = RTE_FLOW_ACTION_TYPE_END, + } + }; ++ struct mlx5_hw_ctrl_flow_info flow_info = { ++ .type = MLX5_HW_CTRL_FLOW_TYPE_DEFAULT_JUMP, ++ }; + struct rte_eth_dev *proxy_dev; + struct mlx5_priv *proxy_priv; + uint16_t proxy_port_id = dev->data->port_id; +@@ -8768,7 +9136,7 @@ mlx5_flow_hw_esw_create_default_jump_flow(struct rte_eth_dev *dev) + } + return flow_hw_create_ctrl_flow(dev, proxy_dev, + proxy_priv->hw_esw_zero_tbl, +- items, 0, actions, 0); ++ items, 0, actions, 0, &flow_info, false); + } + + int +@@ -8814,17 +9182,20 @@ mlx5_flow_hw_create_tx_default_mreg_copy_flow(struct rte_eth_dev *dev) + .type = RTE_FLOW_ACTION_TYPE_END, + }, + }; ++ struct mlx5_hw_ctrl_flow_info flow_info = { ++ .type = MLX5_HW_CTRL_FLOW_TYPE_TX_META_COPY, ++ }; + + MLX5_ASSERT(priv->master); + if (!priv->dr_ctx || !priv->hw_tx_meta_cpy_tbl) + return 0; + return flow_hw_create_ctrl_flow(dev, dev, + priv->hw_tx_meta_cpy_tbl, +- eth_all, 0, copy_reg_action, 0); ++ eth_all, 0, copy_reg_action, 0, &flow_info, false); + } + + int +-mlx5_flow_hw_tx_repr_matching_flow(struct rte_eth_dev *dev, uint32_t sqn) ++mlx5_flow_hw_tx_repr_matching_flow(struct rte_eth_dev *dev, uint32_t sqn, bool external) + { + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_rte_flow_item_sq sq_spec = { +@@ -8849,6 +9220,10 @@ mlx5_flow_hw_tx_repr_matching_flow(struct rte_eth_dev *dev, uint32_t sqn) + { .type = RTE_FLOW_ACTION_TYPE_END }, + { .type = RTE_FLOW_ACTION_TYPE_END }, + }; ++ struct mlx5_hw_ctrl_flow_info flow_info = { ++ .type = MLX5_HW_CTRL_FLOW_TYPE_TX_REPR_MATCH, ++ .tx_repr_sq = sqn, ++ }; + + /* It is assumed that caller checked for representor matching. */ + MLX5_ASSERT(priv->sh->config.repr_matching); +@@ -8874,7 +9249,44 @@ mlx5_flow_hw_tx_repr_matching_flow(struct rte_eth_dev *dev, uint32_t sqn) + actions[2].type = RTE_FLOW_ACTION_TYPE_JUMP; + } + return flow_hw_create_ctrl_flow(dev, dev, priv->hw_tx_repr_tagging_tbl, +- items, 0, actions, 0); ++ items, 0, actions, 0, &flow_info, external); ++} ++ ++int ++mlx5_flow_hw_lacp_rx_flow(struct rte_eth_dev *dev) ++{ ++ struct mlx5_priv *priv = dev->data->dev_private; ++ struct rte_flow_item_eth lacp_item = { ++ .type = RTE_BE16(RTE_ETHER_TYPE_SLOW), ++ }; ++ struct rte_flow_item eth_lacp[] = { ++ [0] = { ++ .type = RTE_FLOW_ITEM_TYPE_ETH, ++ .spec = &lacp_item, ++ .mask = &lacp_item, ++ }, ++ [1] = { ++ .type = RTE_FLOW_ITEM_TYPE_END, ++ }, ++ }; ++ struct rte_flow_action miss_action[] = { ++ [0] = { ++ .type = (enum rte_flow_action_type) ++ MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS, ++ }, ++ [1] = { ++ .type = RTE_FLOW_ACTION_TYPE_END, ++ }, ++ }; ++ struct mlx5_hw_ctrl_flow_info flow_info = { ++ .type = MLX5_HW_CTRL_FLOW_TYPE_LACP_RX, ++ }; ++ ++ MLX5_ASSERT(priv->master); ++ if (!priv->dr_ctx || !priv->hw_lacp_rx_tbl) ++ return 0; ++ return flow_hw_create_ctrl_flow(dev, dev, priv->hw_lacp_rx_tbl, eth_lacp, 0, ++ miss_action, 0, &flow_info, false); + } + + static uint32_t +@@ -8989,6 +9401,9 @@ __flow_hw_ctrl_flows_single(struct rte_eth_dev *dev, + { .type = RTE_FLOW_ACTION_TYPE_RSS }, + { .type = RTE_FLOW_ACTION_TYPE_END }, + }; ++ struct mlx5_hw_ctrl_flow_info flow_info = { ++ .type = MLX5_HW_CTRL_FLOW_TYPE_DEFAULT_RX_RSS, ++ }; + + if (!eth_spec) + return -EINVAL; +@@ -9002,7 +9417,7 @@ __flow_hw_ctrl_flows_single(struct rte_eth_dev *dev, + items[3] = flow_hw_get_ctrl_rx_l4_item(rss_type); + items[4] = (struct rte_flow_item){ .type = RTE_FLOW_ITEM_TYPE_END }; + /* Without VLAN filtering, only a single flow rule must be created. */ +- return flow_hw_create_ctrl_flow(dev, dev, tbl, items, 0, actions, 0); ++ return flow_hw_create_ctrl_flow(dev, dev, tbl, items, 0, actions, 0, &flow_info, false); + } + + static int +@@ -9018,6 +9433,9 @@ __flow_hw_ctrl_flows_single_vlan(struct rte_eth_dev *dev, + { .type = RTE_FLOW_ACTION_TYPE_RSS }, + { .type = RTE_FLOW_ACTION_TYPE_END }, + }; ++ struct mlx5_hw_ctrl_flow_info flow_info = { ++ .type = MLX5_HW_CTRL_FLOW_TYPE_DEFAULT_RX_RSS, ++ }; + unsigned int i; + + if (!eth_spec) +@@ -9040,7 +9458,8 @@ __flow_hw_ctrl_flows_single_vlan(struct rte_eth_dev *dev, + }; + + items[1].spec = &vlan_spec; +- if (flow_hw_create_ctrl_flow(dev, dev, tbl, items, 0, actions, 0)) ++ if (flow_hw_create_ctrl_flow(dev, dev, ++ tbl, items, 0, actions, 0, &flow_info, false)) + return -rte_errno; + } + return 0; +@@ -9058,6 +9477,9 @@ __flow_hw_ctrl_flows_unicast(struct rte_eth_dev *dev, + { .type = RTE_FLOW_ACTION_TYPE_RSS }, + { .type = RTE_FLOW_ACTION_TYPE_END }, + }; ++ struct mlx5_hw_ctrl_flow_info flow_info = { ++ .type = MLX5_HW_CTRL_FLOW_TYPE_DEFAULT_RX_RSS, ++ }; + const struct rte_ether_addr cmp = { + .addr_bytes = "\x00\x00\x00\x00\x00\x00", + }; +@@ -9081,7 +9503,8 @@ __flow_hw_ctrl_flows_unicast(struct rte_eth_dev *dev, + if (!memcmp(mac, &cmp, sizeof(*mac))) + continue; + memcpy(ð_spec.dst.addr_bytes, mac->addr_bytes, RTE_ETHER_ADDR_LEN); +- if (flow_hw_create_ctrl_flow(dev, dev, tbl, items, 0, actions, 0)) ++ if (flow_hw_create_ctrl_flow(dev, dev, ++ tbl, items, 0, actions, 0, &flow_info, false)) + return -rte_errno; + } + return 0; +@@ -9100,6 +9523,9 @@ __flow_hw_ctrl_flows_unicast_vlan(struct rte_eth_dev *dev, + { .type = RTE_FLOW_ACTION_TYPE_RSS }, + { .type = RTE_FLOW_ACTION_TYPE_END }, + }; ++ struct mlx5_hw_ctrl_flow_info flow_info = { ++ .type = MLX5_HW_CTRL_FLOW_TYPE_DEFAULT_RX_RSS, ++ }; + const struct rte_ether_addr cmp = { + .addr_bytes = "\x00\x00\x00\x00\x00\x00", + }; +@@ -9131,7 +9557,8 @@ __flow_hw_ctrl_flows_unicast_vlan(struct rte_eth_dev *dev, + }; + + items[1].spec = &vlan_spec; +- if (flow_hw_create_ctrl_flow(dev, dev, tbl, items, 0, actions, 0)) ++ if (flow_hw_create_ctrl_flow(dev, dev, tbl, items, 0, actions, 0, ++ &flow_info, false)) + return -rte_errno; + } + } diff --git a/dpdk/drivers/net/mlx5/mlx5_flow_verbs.c b/dpdk/drivers/net/mlx5/mlx5_flow_verbs.c index 28ea28bfbe..1e9c7cf7c5 100644 --- a/dpdk/drivers/net/mlx5/mlx5_flow_verbs.c @@ -27269,10 +38572,48 @@ index 28ea28bfbe..1e9c7cf7c5 100644 error); if (ret < 0) diff --git a/dpdk/drivers/net/mlx5/mlx5_hws_cnt.c b/dpdk/drivers/net/mlx5/mlx5_hws_cnt.c -index 51704ef754..8ccc6ab1f8 100644 +index 51704ef754..791fde4458 100644 --- a/dpdk/drivers/net/mlx5/mlx5_hws_cnt.c +++ b/dpdk/drivers/net/mlx5/mlx5_hws_cnt.c -@@ -410,8 +410,7 @@ mlx5_hws_cnt_pool_init(struct mlx5_dev_ctx_shared *sh, +@@ -306,26 +306,25 @@ mlx5_hws_cnt_svc(void *opaque) + (struct mlx5_dev_ctx_shared *)opaque; + uint64_t interval = + (uint64_t)sh->cnt_svc->query_interval * (US_PER_S / MS_PER_S); +- uint16_t port_id; ++ struct mlx5_hws_cnt_pool *hws_cpool; + uint64_t start_cycle, query_cycle = 0; + uint64_t query_us; + uint64_t sleep_us; + + while (sh->cnt_svc->svc_running != 0) { ++ if (rte_spinlock_trylock(&sh->cpool_lock) == 0) ++ continue; + start_cycle = rte_rdtsc(); +- MLX5_ETH_FOREACH_DEV(port_id, sh->cdev->dev) { +- struct mlx5_priv *opriv = +- rte_eth_devices[port_id].data->dev_private; +- if (opriv != NULL && +- opriv->sh == sh && +- opriv->hws_cpool != NULL) { +- __mlx5_hws_cnt_svc(sh, opriv->hws_cpool); +- if (opriv->hws_age_req) +- mlx5_hws_aging_check(opriv, +- opriv->hws_cpool); +- } ++ /* 200ms for 16M counters. */ ++ LIST_FOREACH(hws_cpool, &sh->hws_cpool_list, next) { ++ struct mlx5_priv *opriv = hws_cpool->priv; ++ ++ __mlx5_hws_cnt_svc(sh, hws_cpool); ++ if (opriv->hws_age_req) ++ mlx5_hws_aging_check(opriv, hws_cpool); + } + query_cycle = rte_rdtsc() - start_cycle; ++ rte_spinlock_unlock(&sh->cpool_lock); + query_us = query_cycle / (rte_get_timer_hz() / US_PER_S); + sleep_us = interval - query_us; + if (interval > query_us) +@@ -410,8 +409,7 @@ mlx5_hws_cnt_pool_init(struct mlx5_dev_ctx_shared *sh, goto error; } for (qidx = 0; qidx < ccfg->q_num; qidx++) { @@ -27282,7 +38623,7 @@ index 51704ef754..8ccc6ab1f8 100644 cntp->cache->qcache[qidx] = rte_ring_create(mz_name, ccfg->size, SOCKET_ID_ANY, RING_F_SP_ENQ | RING_F_SC_DEQ | -@@ -634,7 +633,7 @@ mlx5_hws_cnt_pool_create(struct rte_eth_dev *dev, +@@ -634,7 +632,7 @@ mlx5_hws_cnt_pool_create(struct rte_eth_dev *dev, SOCKET_ID_ANY); if (mp_name == NULL) goto error; @@ -27291,6 +38632,65 @@ index 51704ef754..8ccc6ab1f8 100644 dev->data->port_id); pcfg.name = mp_name; pcfg.request_num = pattr->nb_counters; +@@ -660,6 +658,10 @@ mlx5_hws_cnt_pool_create(struct rte_eth_dev *dev, + if (ret != 0) + goto error; + priv->sh->cnt_svc->refcnt++; ++ cpool->priv = priv; ++ rte_spinlock_lock(&priv->sh->cpool_lock); ++ LIST_INSERT_HEAD(&priv->sh->hws_cpool_list, cpool, next); ++ rte_spinlock_unlock(&priv->sh->cpool_lock); + return cpool; + error: + mlx5_hws_cnt_pool_destroy(priv->sh, cpool); +@@ -672,6 +674,13 @@ mlx5_hws_cnt_pool_destroy(struct mlx5_dev_ctx_shared *sh, + { + if (cpool == NULL) + return; ++ /* ++ * 16M counter consumes 200ms to finish the query. ++ * Maybe blocked for at most 200ms here. ++ */ ++ rte_spinlock_lock(&sh->cpool_lock); ++ LIST_REMOVE(cpool, next); ++ rte_spinlock_unlock(&sh->cpool_lock); + if (--sh->cnt_svc->refcnt == 0) + mlx5_hws_cnt_svc_deinit(sh); + mlx5_hws_cnt_pool_action_destroy(cpool); +@@ -1229,11 +1238,13 @@ mlx5_hws_age_pool_destroy(struct mlx5_priv *priv) + { + struct mlx5_age_info *age_info = GET_PORT_AGE_INFO(priv); + ++ rte_spinlock_lock(&priv->sh->cpool_lock); + MLX5_ASSERT(priv->hws_age_req); + mlx5_hws_age_info_destroy(priv); + mlx5_ipool_destroy(age_info->ages_ipool); + age_info->ages_ipool = NULL; + priv->hws_age_req = 0; ++ rte_spinlock_unlock(&priv->sh->cpool_lock); + } + + #endif +diff --git a/dpdk/drivers/net/mlx5/mlx5_hws_cnt.h b/dpdk/drivers/net/mlx5/mlx5_hws_cnt.h +index 030dcead86..b5c19a8e2c 100644 +--- a/dpdk/drivers/net/mlx5/mlx5_hws_cnt.h ++++ b/dpdk/drivers/net/mlx5/mlx5_hws_cnt.h +@@ -97,6 +97,7 @@ struct mlx5_hws_cnt_pool_caches { + }; + + struct mlx5_hws_cnt_pool { ++ LIST_ENTRY(mlx5_hws_cnt_pool) next; + struct mlx5_hws_cnt_pool_cfg cfg __rte_cache_aligned; + struct mlx5_hws_cnt_dcs_mng dcs_mng __rte_cache_aligned; + uint32_t query_gen __rte_cache_aligned; +@@ -107,6 +108,7 @@ struct mlx5_hws_cnt_pool { + struct rte_ring *wait_reset_list; + struct mlx5_hws_cnt_pool_caches *cache; + uint64_t time_of_last_age_check; ++ struct mlx5_priv *priv; + } __rte_cache_aligned; + + /* HWS AGE status. */ diff --git a/dpdk/drivers/net/mlx5/mlx5_rx.c b/dpdk/drivers/net/mlx5/mlx5_rx.c index 917c517b83..b41f7a51f5 100644 --- a/dpdk/drivers/net/mlx5/mlx5_rx.c @@ -27581,7 +38981,7 @@ index e078aaf3dc..6b42e27c89 100644 uint16_t mlx5_rx_burst_mprq(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n); diff --git a/dpdk/drivers/net/mlx5/mlx5_rxq.c b/dpdk/drivers/net/mlx5/mlx5_rxq.c -index 81aa3f074a..6b2af87cd2 100644 +index 81aa3f074a..9179b9d9d7 100644 --- a/dpdk/drivers/net/mlx5/mlx5_rxq.c +++ b/dpdk/drivers/net/mlx5/mlx5_rxq.c @@ -528,12 +528,12 @@ mlx5_rx_queue_stop(struct rte_eth_dev *dev, uint16_t idx) @@ -27602,44 +39002,35 @@ index 81aa3f074a..6b2af87cd2 100644 rte_errno = EINVAL; return -EINVAL; } -@@ -1601,23 +1601,38 @@ mlx5_mprq_prepare(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, +@@ -1601,10 +1601,10 @@ mlx5_mprq_prepare(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, } else { *actual_log_stride_num = config->mprq.log_stride_num; } - if (config->mprq.log_stride_size) { - /* Checks if chosen size of stride is in supported range. */ -- if (config->mprq.log_stride_size > log_max_stride_size || -- config->mprq.log_stride_size < log_min_stride_size) { -- *actual_log_stride_size = log_def_stride_size; + /* Checks if chosen size of stride is in supported range. */ -+ if (config->mprq.log_stride_size > log_max_stride_size || -+ config->mprq.log_stride_size < log_min_stride_size) { -+ *actual_log_stride_size = log_def_stride_size; -+ DRV_LOG(WARNING, -+ "Port %u Rx queue %u size of a stride for Multi-Packet RQ is out of range, setting default value (%u)", -+ dev->data->port_id, idx, -+ RTE_BIT32(log_def_stride_size)); -+ } else { -+ *actual_log_stride_size = config->mprq.log_stride_size; -+ } -+ /* Make the stride fit the mbuf size by default. */ -+ if (*actual_log_stride_size == MLX5_MPRQ_DEFAULT_LOG_STRIDE_SIZE) { -+ if (min_mbuf_size <= RTE_BIT32(log_max_stride_size)) { ++ if (config->mprq.log_stride_size != (uint32_t)MLX5_ARG_UNSET) { + if (config->mprq.log_stride_size > log_max_stride_size || +- config->mprq.log_stride_size < log_min_stride_size) { ++ config->mprq.log_stride_size < log_min_stride_size) { + *actual_log_stride_size = log_def_stride_size; DRV_LOG(WARNING, -- "Port %u Rx queue %u size of a stride for Multi-Packet RQ is out of range, setting default value (%u)", -- dev->data->port_id, idx, -- RTE_BIT32(log_def_stride_size)); -+ "Port %u Rx queue %u size of a stride for Multi-Packet RQ is adjusted to match the mbuf size (%u)", -+ dev->data->port_id, idx, min_mbuf_size); -+ *actual_log_stride_size = log2above(min_mbuf_size); - } else { -- *actual_log_stride_size = config->mprq.log_stride_size; -+ goto unsupport; + "Port %u Rx queue %u size of a stride for Multi-Packet RQ is out of range, setting default value (%u)", +@@ -1614,10 +1614,26 @@ mlx5_mprq_prepare(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, + *actual_log_stride_size = config->mprq.log_stride_size; } -- } else { + } else { - if (min_mbuf_size <= RTE_BIT32(log_max_stride_size)) -- *actual_log_stride_size = log2above(min_mbuf_size); ++ /* Make the stride fit the mbuf size by default. */ ++ if (min_mbuf_size <= RTE_BIT32(log_max_stride_size)) { ++ DRV_LOG(WARNING, ++ "Port %u Rx queue %u size of a stride for Multi-Packet RQ is adjusted to match the mbuf size (%u)", ++ dev->data->port_id, idx, min_mbuf_size); + *actual_log_stride_size = log2above(min_mbuf_size); - else ++ } else { + goto unsupport; ++ } + } + /* Make sure the stride size is greater than the headroom. */ + if (RTE_BIT32(*actual_log_stride_size) < RTE_PKTMBUF_HEADROOM) { @@ -27649,11 +39040,36 @@ index 81aa3f074a..6b2af87cd2 100644 + dev->data->port_id, idx, RTE_PKTMBUF_HEADROOM); + *actual_log_stride_size = log2above(RTE_PKTMBUF_HEADROOM); + } else { - goto unsupport; ++ goto unsupport; + } } log_stride_wqe_size = *actual_log_stride_num + *actual_log_stride_size; /* Check if WQE buffer size is supported by hardware. */ +@@ -1657,6 +1673,8 @@ unsupport: + " min_stride_sz = %u, max_stride_sz = %u).\n" + "Rx segment is %senabled. External mempool is %sused.", + dev->data->port_id, min_mbuf_size, desc, priv->rxqs_n, ++ config->mprq.log_stride_size == (uint32_t)MLX5_ARG_UNSET ? ++ RTE_BIT32(MLX5_MPRQ_DEFAULT_LOG_STRIDE_SIZE) : + RTE_BIT32(config->mprq.log_stride_size), + RTE_BIT32(config->mprq.log_stride_num), + config->mprq.min_rxqs_num, +@@ -2262,6 +2280,7 @@ mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx) + RTE_ETH_QUEUE_STATE_STOPPED; + } + } else { /* Refcnt zero, closing device. */ ++ LIST_REMOVE(rxq_ctrl, next); + LIST_REMOVE(rxq, owner_entry); + if (LIST_EMPTY(&rxq_ctrl->owners)) { + if (!rxq_ctrl->is_hairpin) +@@ -2269,7 +2288,6 @@ mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx) + (&rxq_ctrl->rxq.mr_ctrl.cache_bh); + if (rxq_ctrl->rxq.shared) + LIST_REMOVE(rxq_ctrl, share_entry); +- LIST_REMOVE(rxq_ctrl, next); + mlx5_free(rxq_ctrl); + } + dev->data->rx_queues[idx] = NULL; diff --git a/dpdk/drivers/net/mlx5/mlx5_rxtx_vec.c b/dpdk/drivers/net/mlx5/mlx5_rxtx_vec.c index 0e2eab068a..667475a93e 100644 --- a/dpdk/drivers/net/mlx5/mlx5_rxtx_vec.c @@ -27879,10 +39295,85 @@ index f64fa3587b..615e1d073d 100644 } mlx5_xstats_n = mlx5_txpp_xstats_get_names(dev, xstats_names, diff --git a/dpdk/drivers/net/mlx5/mlx5_trigger.c b/dpdk/drivers/net/mlx5/mlx5_trigger.c -index f54443ed1a..6479e44a94 100644 +index f54443ed1a..b12a1dc1c7 100644 --- a/dpdk/drivers/net/mlx5/mlx5_trigger.c +++ b/dpdk/drivers/net/mlx5/mlx5_trigger.c -@@ -896,11 +896,11 @@ mlx5_hairpin_unbind_single_port(struct rte_eth_dev *dev, uint16_t rx_port) +@@ -226,17 +226,17 @@ mlx5_rxq_start(struct rte_eth_dev *dev) + if (rxq == NULL) + continue; + rxq_ctrl = rxq->ctrl; +- if (!rxq_ctrl->started) { ++ if (!rxq_ctrl->started) + if (mlx5_rxq_ctrl_prepare(dev, rxq_ctrl, i) < 0) + goto error; +- LIST_INSERT_HEAD(&priv->rxqsobj, rxq_ctrl->obj, next); +- } + ret = priv->obj_ops.rxq_obj_new(rxq); + if (ret) { + mlx5_free(rxq_ctrl->obj); + rxq_ctrl->obj = NULL; + goto error; + } ++ if (!rxq_ctrl->started) ++ LIST_INSERT_HEAD(&priv->rxqsobj, rxq_ctrl->obj, next); + rxq_ctrl->started = true; + } + return 0; +@@ -346,8 +346,8 @@ mlx5_hairpin_auto_bind(struct rte_eth_dev *dev) + ret = mlx5_devx_cmd_modify_sq(sq, &sq_attr); + if (ret) + goto error; +- rq_attr.state = MLX5_SQC_STATE_RDY; +- rq_attr.rq_state = MLX5_SQC_STATE_RST; ++ rq_attr.state = MLX5_RQC_STATE_RDY; ++ rq_attr.rq_state = MLX5_RQC_STATE_RST; + rq_attr.hairpin_peer_sq = sq->id; + rq_attr.hairpin_peer_vhca = + priv->sh->cdev->config.hca_attr.vhca_id; +@@ -601,8 +601,8 @@ mlx5_hairpin_queue_peer_bind(struct rte_eth_dev *dev, uint16_t cur_queue, + " mismatch", dev->data->port_id, cur_queue); + return -rte_errno; + } +- rq_attr.state = MLX5_SQC_STATE_RDY; +- rq_attr.rq_state = MLX5_SQC_STATE_RST; ++ rq_attr.state = MLX5_RQC_STATE_RDY; ++ rq_attr.rq_state = MLX5_RQC_STATE_RST; + rq_attr.hairpin_peer_sq = peer_info->qp_id; + rq_attr.hairpin_peer_vhca = peer_info->vhca_id; + ret = mlx5_devx_cmd_modify_rq(rxq_ctrl->obj->rq, &rq_attr); +@@ -666,7 +666,7 @@ mlx5_hairpin_queue_peer_unbind(struct rte_eth_dev *dev, uint16_t cur_queue, + return -rte_errno; + } + sq_attr.state = MLX5_SQC_STATE_RST; +- sq_attr.sq_state = MLX5_SQC_STATE_RST; ++ sq_attr.sq_state = MLX5_SQC_STATE_RDY; + ret = mlx5_devx_cmd_modify_sq(txq_ctrl->obj->sq, &sq_attr); + if (ret == 0) + txq_ctrl->hairpin_status = 0; +@@ -700,8 +700,8 @@ mlx5_hairpin_queue_peer_unbind(struct rte_eth_dev *dev, uint16_t cur_queue, + dev->data->port_id, cur_queue); + return -rte_errno; + } +- rq_attr.state = MLX5_SQC_STATE_RST; +- rq_attr.rq_state = MLX5_SQC_STATE_RST; ++ rq_attr.state = MLX5_RQC_STATE_RST; ++ rq_attr.rq_state = MLX5_RQC_STATE_RDY; + ret = mlx5_devx_cmd_modify_rq(rxq_ctrl->obj->rq, &rq_attr); + if (ret == 0) + rxq->hairpin_status = 0; +@@ -845,6 +845,11 @@ error: + txq_ctrl = mlx5_txq_get(dev, i); + if (txq_ctrl == NULL) + continue; ++ if (!txq_ctrl->is_hairpin || ++ txq_ctrl->hairpin_conf.peers[0].port != rx_port) { ++ mlx5_txq_release(dev, i); ++ continue; ++ } + rx_queue = txq_ctrl->hairpin_conf.peers[0].queue; + rte_eth_hairpin_queue_peer_unbind(rx_port, rx_queue, 0); + mlx5_hairpin_queue_peer_unbind(dev, i, 1); +@@ -896,11 +901,11 @@ mlx5_hairpin_unbind_single_port(struct rte_eth_dev *dev, uint16_t rx_port) } /* Indeed, only the first used queue needs to be checked. */ if (txq_ctrl->hairpin_conf.manual_bind == 0) { @@ -27895,6 +39386,32 @@ index f54443ed1a..6479e44a94 100644 return -rte_errno; } else { return 0; +@@ -1494,13 +1499,13 @@ mlx5_traffic_enable_hws(struct rte_eth_dev *dev) + continue; + queue = mlx5_txq_get_sqn(txq); + if ((priv->representor || priv->master) && config->dv_esw_en) { +- if (mlx5_flow_hw_esw_create_sq_miss_flow(dev, queue)) { ++ if (mlx5_flow_hw_esw_create_sq_miss_flow(dev, queue, false)) { + mlx5_txq_release(dev, i); + goto error; + } + } + if (config->dv_esw_en && config->repr_matching) { +- if (mlx5_flow_hw_tx_repr_matching_flow(dev, queue)) { ++ if (mlx5_flow_hw_tx_repr_matching_flow(dev, queue, false)) { + mlx5_txq_release(dev, i); + goto error; + } +@@ -1519,6 +1524,9 @@ mlx5_traffic_enable_hws(struct rte_eth_dev *dev) + } + if (priv->isolated) + return 0; ++ if (!priv->sh->config.lacp_by_user && priv->pf_bond >= 0) ++ if (mlx5_flow_hw_lacp_rx_flow(dev)) ++ goto error; + if (dev->data->promiscuous) + flags |= MLX5_CTRL_PROMISCUOUS; + if (dev->data->all_multicast) diff --git a/dpdk/drivers/net/mlx5/mlx5_tx.c b/dpdk/drivers/net/mlx5/mlx5_tx.c index a13c7e937c..14e1487e59 100644 --- a/dpdk/drivers/net/mlx5/mlx5_tx.c @@ -27909,7 +39426,7 @@ index a13c7e937c..14e1487e59 100644 mlx5_dump_debug_information(name, "MLX5 Error SQ:", (const void *)((uintptr_t) diff --git a/dpdk/drivers/net/mlx5/mlx5_tx.h b/dpdk/drivers/net/mlx5/mlx5_tx.h -index a44050a1ce..a056be7ca8 100644 +index a44050a1ce..ff23d87b8a 100644 --- a/dpdk/drivers/net/mlx5/mlx5_tx.h +++ b/dpdk/drivers/net/mlx5/mlx5_tx.h @@ -817,7 +817,7 @@ mlx5_tx_wseg_init(struct mlx5_txq_data *restrict txq, @@ -27921,6 +39438,15 @@ index a44050a1ce..a056be7ca8 100644 ws->lkey = RTE_BE32(0); ws->va_high = RTE_BE32(0); ws->va_low = RTE_BE32(0); +@@ -1975,7 +1975,7 @@ mlx5_tx_packet_multi_inline(struct mlx5_txq_data *__rte_restrict txq, + uintptr_t start; + + mbuf = loc->mbuf; +- nxlen = rte_pktmbuf_data_len(mbuf); ++ nxlen = rte_pktmbuf_data_len(mbuf) + vlan; + /* + * Packet length exceeds the allowed inline data length, + * check whether the minimal inlining is required. diff --git a/dpdk/drivers/net/mlx5/mlx5_txpp.c b/dpdk/drivers/net/mlx5/mlx5_txpp.c index f853a67f58..0e1da1d5f5 100644 --- a/dpdk/drivers/net/mlx5/mlx5_txpp.c @@ -27974,6 +39500,29 @@ index f853a67f58..0e1da1d5f5 100644 } } return n_used + n_txpp; +diff --git a/dpdk/drivers/net/mlx5/mlx5_txq.c b/dpdk/drivers/net/mlx5/mlx5_txq.c +index 5543f2c570..d617784dba 100644 +--- a/dpdk/drivers/net/mlx5/mlx5_txq.c ++++ b/dpdk/drivers/net/mlx5/mlx5_txq.c +@@ -1310,8 +1310,16 @@ rte_pmd_mlx5_external_sq_enable(uint16_t port_id, uint32_t sq_num) + return -rte_errno; + } + #ifdef HAVE_MLX5_HWS_SUPPORT +- if (priv->sh->config.dv_flow_en == 2) +- return mlx5_flow_hw_esw_create_sq_miss_flow(dev, sq_num); ++ if (priv->sh->config.dv_flow_en == 2) { ++ if (mlx5_flow_hw_esw_create_sq_miss_flow(dev, sq_num, true)) ++ return -rte_errno; ++ if (priv->sh->config.repr_matching && ++ mlx5_flow_hw_tx_repr_matching_flow(dev, sq_num, true)) { ++ mlx5_flow_hw_esw_destroy_sq_miss_flow(dev, sq_num); ++ return -rte_errno; ++ } ++ return 0; ++ } + #endif + flow = mlx5_flow_create_devx_sq_miss_flow(dev, sq_num); + if (flow > 0) diff --git a/dpdk/drivers/net/mlx5/windows/mlx5_ethdev_os.c b/dpdk/drivers/net/mlx5/windows/mlx5_ethdev_os.c index 88d8213f55..a31e1b5494 100644 --- a/dpdk/drivers/net/mlx5/windows/mlx5_ethdev_os.c @@ -28012,6 +39561,40 @@ index 88d8213f55..a31e1b5494 100644 + rte_errno = ENOTSUP; + return -ENOTSUP; +} +diff --git a/dpdk/drivers/net/mlx5/windows/mlx5_flow_os.c b/dpdk/drivers/net/mlx5/windows/mlx5_flow_os.c +index 5013e9f012..f907b21ecc 100644 +--- a/dpdk/drivers/net/mlx5/windows/mlx5_flow_os.c ++++ b/dpdk/drivers/net/mlx5/windows/mlx5_flow_os.c +@@ -417,6 +417,12 @@ mlx5_flow_os_set_specific_workspace(struct mlx5_flow_workspace *data) + return err; + } + ++void ++mlx5_flow_os_workspace_gc_add(struct mlx5_flow_workspace *ws) ++{ ++ RTE_SET_USED(ws); ++} ++ + int + mlx5_flow_os_validate_item_esp(const struct rte_flow_item *item, + uint64_t item_flags, +diff --git a/dpdk/drivers/net/mlx5/windows/mlx5_flow_os.h b/dpdk/drivers/net/mlx5/windows/mlx5_flow_os.h +index 1c1c17fc41..856d8ba948 100644 +--- a/dpdk/drivers/net/mlx5/windows/mlx5_flow_os.h ++++ b/dpdk/drivers/net/mlx5/windows/mlx5_flow_os.h +@@ -473,4 +473,12 @@ mlx5_flow_os_validate_item_esp(const struct rte_flow_item *item, + uint8_t target_protocol, + struct rte_flow_error *error); + ++/** ++ * Add per thread workspace to the global list for garbage collection. ++ * ++ * @param[in] ws ++ * Pointer to the flow workspace. ++ */ ++void mlx5_flow_os_workspace_gc_add(struct mlx5_flow_workspace *ws); ++ + #endif /* RTE_PMD_MLX5_FLOW_OS_H_ */ diff --git a/dpdk/drivers/net/mlx5/windows/mlx5_os.c b/dpdk/drivers/net/mlx5/windows/mlx5_os.c index 77f04cc931..f401264b61 100644 --- a/dpdk/drivers/net/mlx5/windows/mlx5_os.c @@ -28027,10 +39610,85 @@ index 77f04cc931..f401264b61 100644 DRV_LOG(DEBUG, "Maximum Rx indirection table size is %u", sh->dev_cap.ind_table_max_size); } +diff --git a/dpdk/drivers/net/mvneta/mvneta_ethdev.c b/dpdk/drivers/net/mvneta/mvneta_ethdev.c +index c4355a3f64..ab643709ee 100644 +--- a/dpdk/drivers/net/mvneta/mvneta_ethdev.c ++++ b/dpdk/drivers/net/mvneta/mvneta_ethdev.c +@@ -376,6 +376,10 @@ mvneta_dev_start(struct rte_eth_dev *dev) + goto out; + } + ++ /* start rx queues */ ++ for (i = 0; i < dev->data->nb_rx_queues; i++) ++ dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; ++ + /* start tx queues */ + for (i = 0; i < dev->data->nb_tx_queues; i++) + dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; +@@ -400,6 +404,7 @@ static int + mvneta_dev_stop(struct rte_eth_dev *dev) + { + struct mvneta_priv *priv = dev->data->dev_private; ++ uint16_t i; + + dev->data->dev_started = 0; + +@@ -412,6 +417,14 @@ mvneta_dev_stop(struct rte_eth_dev *dev) + + priv->ppio = NULL; + ++ /* stop rx queues */ ++ for (i = 0; i < dev->data->nb_rx_queues; i++) ++ dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; ++ ++ /* stop tx queues */ ++ for (i = 0; i < dev->data->nb_tx_queues; i++) ++ dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; ++ + return 0; + } + +diff --git a/dpdk/drivers/net/mvpp2/mrvl_ethdev.c b/dpdk/drivers/net/mvpp2/mrvl_ethdev.c +index 8fd3211283..177b8165f3 100644 +--- a/dpdk/drivers/net/mvpp2/mrvl_ethdev.c ++++ b/dpdk/drivers/net/mvpp2/mrvl_ethdev.c +@@ -951,6 +951,9 @@ mrvl_dev_start(struct rte_eth_dev *dev) + goto out; + } + ++ for (i = 0; i < dev->data->nb_rx_queues; i++) ++ dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; ++ + mrvl_flow_init(dev); + mrvl_mtr_init(dev); + mrvl_set_tx_function(dev); +@@ -1076,6 +1079,13 @@ mrvl_flush_bpool(struct rte_eth_dev *dev) + static int + mrvl_dev_stop(struct rte_eth_dev *dev) + { ++ uint16_t i; ++ ++ for (i = 0; i < dev->data->nb_rx_queues; i++) ++ dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; ++ for (i = 0; i < dev->data->nb_tx_queues; i++) ++ dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; ++ + return mrvl_dev_set_link_down(dev); + } + diff --git a/dpdk/drivers/net/netvsc/hn_rndis.c b/dpdk/drivers/net/netvsc/hn_rndis.c -index e6f1f28768..29c6009b2c 100644 +index e6f1f28768..fe36274df8 100644 --- a/dpdk/drivers/net/netvsc/hn_rndis.c +++ b/dpdk/drivers/net/netvsc/hn_rndis.c +@@ -35,7 +35,7 @@ + #include "hn_rndis.h" + #include "ndis.h" + +-#define RNDIS_TIMEOUT_SEC 5 ++#define RNDIS_TIMEOUT_SEC 60 + #define RNDIS_DELAY_MS 10 + + #define HN_RNDIS_XFER_SIZE 0x4000 @@ -329,7 +329,8 @@ void hn_rndis_receive_response(struct hn_data *hv, hn_rndis_dump(data); @@ -28042,7 +39700,7 @@ index e6f1f28768..29c6009b2c 100644 "missing RNDIS header %u", len); return; diff --git a/dpdk/drivers/net/nfp/flower/nfp_flower.c b/dpdk/drivers/net/nfp/flower/nfp_flower.c -index e447258d97..0661c38f08 100644 +index e447258d97..5896d208d0 100644 --- a/dpdk/drivers/net/nfp/flower/nfp_flower.c +++ b/dpdk/drivers/net/nfp/flower/nfp_flower.c @@ -25,7 +25,6 @@ @@ -28053,7 +39711,50 @@ index e447258d97..0661c38f08 100644 static void nfp_pf_repr_enable_queues(struct rte_eth_dev *dev) -@@ -451,7 +450,7 @@ nfp_flower_pf_recv_pkts(void *rx_queue, +@@ -85,6 +84,7 @@ int + nfp_flower_pf_start(struct rte_eth_dev *dev) + { + int ret; ++ uint16_t i; + uint32_t new_ctrl; + uint32_t update = 0; + struct nfp_net_hw *hw; +@@ -137,6 +137,11 @@ nfp_flower_pf_start(struct rte_eth_dev *dev) + return -EIO; + } + ++ for (i = 0; i < dev->data->nb_rx_queues; i++) ++ dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; ++ for (i = 0; i < dev->data->nb_tx_queues; i++) ++ dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; ++ + return 0; + } + +@@ -159,11 +164,13 @@ nfp_flower_pf_stop(struct rte_eth_dev *dev) + for (i = 0; i < dev->data->nb_tx_queues; i++) { + this_tx_q = (struct nfp_net_txq *)dev->data->tx_queues[i]; + nfp_net_reset_tx_queue(this_tx_q); ++ dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; + } + + for (i = 0; i < dev->data->nb_rx_queues; i++) { + this_rx_q = (struct nfp_net_rxq *)dev->data->rx_queues[i]; + nfp_net_reset_rx_queue(this_rx_q); ++ dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; + } + + if (rte_eal_process_type() == RTE_PROC_PRIMARY) +@@ -217,8 +224,6 @@ nfp_flower_pf_close(struct rte_eth_dev *dev) + + nn_cfg_writeb(hw, NFP_NET_CFG_LSC, 0xff); + +- rte_eth_dev_release_port(dev); +- + /* Now it is safe to free all PF resources */ + PMD_DRV_LOG(INFO, "Freeing PF resources"); + nfp_cpp_area_free(pf_dev->ctrl_area); +@@ -451,7 +456,7 @@ nfp_flower_pf_recv_pkts(void *rx_queue, rxds->vals[1] = 0; dma_addr = rte_cpu_to_le_64(RTE_MBUF_DMA_ADDR_DEFAULT(new_mb)); rxds->fld.dd = 0; @@ -28062,7 +39763,7 @@ index e447258d97..0661c38f08 100644 rxds->fld.dma_addr_lo = dma_addr & 0xffffffff; nb_hold++; -@@ -631,13 +630,6 @@ nfp_flower_init_vnic_common(struct nfp_net_hw *hw, const char *vnic_type) +@@ -631,13 +636,6 @@ nfp_flower_init_vnic_common(struct nfp_net_hw *hw, const char *vnic_type) pf_dev = hw->pf_dev; pci_dev = hw->pf_dev->pci_dev; @@ -28076,7 +39777,7 @@ index e447258d97..0661c38f08 100644 hw->device_id = pci_dev->id.device_id; hw->vendor_id = pci_dev->id.vendor_id; hw->subsystem_device_id = pci_dev->id.subsystem_device_id; -@@ -666,6 +658,9 @@ nfp_flower_init_vnic_common(struct nfp_net_hw *hw, const char *vnic_type) +@@ -666,6 +664,9 @@ nfp_flower_init_vnic_common(struct nfp_net_hw *hw, const char *vnic_type) hw->mtu = hw->max_mtu; hw->flbufsz = DEFAULT_FLBUF_SIZE; @@ -28086,26 +39787,148 @@ index e447258d97..0661c38f08 100644 /* read the Rx offset configured from firmware */ if (NFD_CFG_MAJOR_VERSION_of(hw->ver) < 2) hw->rx_offset = NFP_NET_RX_OFFSET; -@@ -703,6 +698,7 @@ nfp_flower_init_ctrl_vnic(struct nfp_net_hw *hw) +@@ -695,6 +696,7 @@ nfp_flower_init_ctrl_vnic(struct nfp_net_hw *hw) + int ret = 0; + uint16_t n_txq; + uint16_t n_rxq; ++ const char *pci_name; + unsigned int numa_node; + struct rte_mempool *mp; + struct nfp_net_rxq *rxq; +@@ -703,6 +705,9 @@ nfp_flower_init_ctrl_vnic(struct nfp_net_hw *hw) struct rte_eth_dev *eth_dev; const struct rte_memzone *tz; struct nfp_app_fw_flower *app_fw_flower; ++ char ctrl_rxring_name[RTE_MEMZONE_NAMESIZE]; ++ char ctrl_txring_name[RTE_MEMZONE_NAMESIZE]; + char ctrl_pktmbuf_pool_name[RTE_MEMZONE_NAMESIZE]; /* Set up some pointers here for ease of use */ pf_dev = hw->pf_dev; -@@ -736,7 +732,10 @@ nfp_flower_init_ctrl_vnic(struct nfp_net_hw *hw) +@@ -734,9 +739,14 @@ nfp_flower_init_ctrl_vnic(struct nfp_net_hw *hw) + goto eth_dev_cleanup; + } ++ pci_name = strchr(pf_dev->pci_dev->name, ':') + 1; ++ /* Create a mbuf pool for the ctrl vNIC */ numa_node = rte_socket_id(); - app_fw_flower->ctrl_pktmbuf_pool = rte_pktmbuf_pool_create("ctrl_mbuf_pool", + snprintf(ctrl_pktmbuf_pool_name, sizeof(ctrl_pktmbuf_pool_name), -+ "%s_ctrlmp", pf_dev->pci_dev->device.name); ++ "%s_ctrlmp", pci_name); + app_fw_flower->ctrl_pktmbuf_pool = + rte_pktmbuf_pool_create(ctrl_pktmbuf_pool_name, 4 * CTRL_VNIC_NB_DESC, 64, 0, 9216, numa_node); if (app_fw_flower->ctrl_pktmbuf_pool == NULL) { PMD_INIT_LOG(ERR, "Create mbuf pool for ctrl vnic failed"); +@@ -773,6 +783,7 @@ nfp_flower_init_ctrl_vnic(struct nfp_net_hw *hw) + eth_dev->data->nb_rx_queues = n_txq; + eth_dev->data->dev_private = hw; + ++ snprintf(ctrl_rxring_name, sizeof(ctrl_rxring_name), "%s_ctrx_ring", pci_name); + /* Set up the Rx queues */ + for (i = 0; i < n_rxq; i++) { + rxq = rte_zmalloc_socket("ethdev RX queue", +@@ -811,7 +822,7 @@ nfp_flower_init_ctrl_vnic(struct nfp_net_hw *hw) + * handle the maximum ring size is allocated in order to allow for + * resizing in later calls to the queue setup function. + */ +- tz = rte_eth_dma_zone_reserve(eth_dev, "ctrl_rx_ring", i, ++ tz = rte_eth_dma_zone_reserve(eth_dev, ctrl_rxring_name, i, + sizeof(struct nfp_net_rx_desc) * NFP_NET_MAX_RX_DESC, + NFP_MEMZONE_ALIGN, numa_node); + if (tz == NULL) { +@@ -830,7 +841,7 @@ nfp_flower_init_ctrl_vnic(struct nfp_net_hw *hw) + sizeof(*rxq->rxbufs) * CTRL_VNIC_NB_DESC, + RTE_CACHE_LINE_SIZE, numa_node); + if (rxq->rxbufs == NULL) { +- rte_eth_dma_zone_free(eth_dev, "ctrl_rx_ring", i); ++ rte_eth_dma_zone_free(eth_dev, ctrl_rxring_name, i); + rte_free(rxq); + ret = -ENOMEM; + goto rx_queue_setup_cleanup; +@@ -848,6 +859,7 @@ nfp_flower_init_ctrl_vnic(struct nfp_net_hw *hw) + nn_cfg_writeb(hw, NFP_NET_CFG_RXR_SZ(i), rte_log2_u32(CTRL_VNIC_NB_DESC)); + } + ++ snprintf(ctrl_txring_name, sizeof(ctrl_txring_name), "%s_cttx_ring", pci_name); + /* Set up the Tx queues */ + for (i = 0; i < n_txq; i++) { + txq = rte_zmalloc_socket("ethdev TX queue", +@@ -866,7 +878,7 @@ nfp_flower_init_ctrl_vnic(struct nfp_net_hw *hw) + * handle the maximum ring size is allocated in order to allow for + * resizing in later calls to the queue setup function. + */ +- tz = rte_eth_dma_zone_reserve(eth_dev, "ctrl_tx_ring", i, ++ tz = rte_eth_dma_zone_reserve(eth_dev, ctrl_txring_name, i, + sizeof(struct nfp_net_nfd3_tx_desc) * NFP_NET_MAX_TX_DESC, + NFP_MEMZONE_ALIGN, numa_node); + if (tz == NULL) { +@@ -896,7 +908,7 @@ nfp_flower_init_ctrl_vnic(struct nfp_net_hw *hw) + sizeof(*txq->txbufs) * CTRL_VNIC_NB_DESC, + RTE_CACHE_LINE_SIZE, numa_node); + if (txq->txbufs == NULL) { +- rte_eth_dma_zone_free(eth_dev, "ctrl_tx_ring", i); ++ rte_eth_dma_zone_free(eth_dev, ctrl_txring_name, i); + rte_free(txq); + ret = -ENOMEM; + goto tx_queue_setup_cleanup; +@@ -921,7 +933,7 @@ tx_queue_setup_cleanup: + txq = eth_dev->data->tx_queues[i]; + if (txq != NULL) { + rte_free(txq->txbufs); +- rte_eth_dma_zone_free(eth_dev, "ctrl_tx_ring", i); ++ rte_eth_dma_zone_free(eth_dev, ctrl_txring_name, i); + rte_free(txq); + } + } +@@ -930,7 +942,7 @@ rx_queue_setup_cleanup: + rxq = eth_dev->data->rx_queues[i]; + if (rxq != NULL) { + rte_free(rxq->rxbufs); +- rte_eth_dma_zone_free(eth_dev, "ctrl_rx_ring", i); ++ rte_eth_dma_zone_free(eth_dev, ctrl_rxring_name, i); + rte_free(rxq); + } + } +@@ -951,28 +963,35 @@ static void + nfp_flower_cleanup_ctrl_vnic(struct nfp_net_hw *hw) + { + uint32_t i; ++ const char *pci_name; + struct nfp_net_rxq *rxq; + struct nfp_net_txq *txq; + struct rte_eth_dev *eth_dev; + struct nfp_app_fw_flower *app_fw_flower; ++ char ctrl_txring_name[RTE_MEMZONE_NAMESIZE]; ++ char ctrl_rxring_name[RTE_MEMZONE_NAMESIZE]; + + eth_dev = hw->eth_dev; + app_fw_flower = NFP_PRIV_TO_APP_FW_FLOWER(hw->pf_dev->app_fw_priv); + ++ pci_name = strchr(app_fw_flower->pf_hw->pf_dev->pci_dev->name, ':') + 1; ++ ++ snprintf(ctrl_txring_name, sizeof(ctrl_txring_name), "%s_cttx_ring", pci_name); + for (i = 0; i < hw->max_tx_queues; i++) { + txq = eth_dev->data->tx_queues[i]; + if (txq != NULL) { + rte_free(txq->txbufs); +- rte_eth_dma_zone_free(eth_dev, "ctrl_tx_ring", i); ++ rte_eth_dma_zone_free(eth_dev, ctrl_txring_name, i); + rte_free(txq); + } + } + ++ snprintf(ctrl_rxring_name, sizeof(ctrl_rxring_name), "%s_ctrx_ring", pci_name); + for (i = 0; i < hw->max_rx_queues; i++) { + rxq = eth_dev->data->rx_queues[i]; + if (rxq != NULL) { + rte_free(rxq->rxbufs); +- rte_eth_dma_zone_free(eth_dev, "ctrl_rx_ring", i); ++ rte_eth_dma_zone_free(eth_dev, ctrl_rxring_name, i); + rte_free(rxq); + } + } diff --git a/dpdk/drivers/net/nfp/flower/nfp_flower_ctrl.c b/dpdk/drivers/net/nfp/flower/nfp_flower_ctrl.c index 3631e764fe..1c6340f3d7 100644 --- a/dpdk/drivers/net/nfp/flower/nfp_flower_ctrl.c @@ -28120,10 +39943,50 @@ index 3631e764fe..1c6340f3d7 100644 nb_hold++; diff --git a/dpdk/drivers/net/nfp/flower/nfp_flower_representor.c b/dpdk/drivers/net/nfp/flower/nfp_flower_representor.c -index 5809c838b3..d319aefb08 100644 +index 5809c838b3..32c4574bdc 100644 --- a/dpdk/drivers/net/nfp/flower/nfp_flower_representor.c +++ b/dpdk/drivers/net/nfp/flower/nfp_flower_representor.c -@@ -528,7 +528,7 @@ static const struct eth_dev_ops nfp_flower_pf_repr_dev_ops = { +@@ -300,6 +300,7 @@ nfp_flower_repr_dev_start(struct rte_eth_dev *dev) + { + struct nfp_flower_representor *repr; + struct nfp_app_fw_flower *app_fw_flower; ++ uint16_t i; + + repr = (struct nfp_flower_representor *)dev->data->dev_private; + app_fw_flower = repr->app_fw_flower; +@@ -311,6 +312,11 @@ nfp_flower_repr_dev_start(struct rte_eth_dev *dev) + + nfp_flower_cmsg_port_mod(app_fw_flower, repr->port_id, true); + ++ for (i = 0; i < dev->data->nb_rx_queues; i++) ++ dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; ++ for (i = 0; i < dev->data->nb_tx_queues; i++) ++ dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; ++ + return 0; + } + +@@ -319,6 +325,7 @@ nfp_flower_repr_dev_stop(struct rte_eth_dev *dev) + { + struct nfp_flower_representor *repr; + struct nfp_app_fw_flower *app_fw_flower; ++ uint16_t i; + + repr = (struct nfp_flower_representor *)dev->data->dev_private; + app_fw_flower = repr->app_fw_flower; +@@ -330,6 +337,11 @@ nfp_flower_repr_dev_stop(struct rte_eth_dev *dev) + repr->nfp_idx, 0); + } + ++ for (i = 0; i < dev->data->nb_rx_queues; i++) ++ dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; ++ for (i = 0; i < dev->data->nb_tx_queues; i++) ++ dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; ++ + return 0; + } + +@@ -528,7 +540,7 @@ static const struct eth_dev_ops nfp_flower_pf_repr_dev_ops = { .stats_reset = nfp_flower_repr_stats_reset, .promiscuous_enable = nfp_net_promisc_enable, @@ -28132,7 +39995,7 @@ index 5809c838b3..d319aefb08 100644 .mac_addr_set = nfp_flower_repr_mac_addr_set, }; -@@ -549,7 +549,7 @@ static const struct eth_dev_ops nfp_flower_repr_dev_ops = { +@@ -549,7 +561,7 @@ static const struct eth_dev_ops nfp_flower_repr_dev_ops = { .stats_reset = nfp_flower_repr_stats_reset, .promiscuous_enable = nfp_net_promisc_enable, @@ -28141,7 +40004,33 @@ index 5809c838b3..d319aefb08 100644 .mac_addr_set = nfp_flower_repr_mac_addr_set, -@@ -730,7 +730,9 @@ nfp_flower_repr_alloc(struct nfp_app_fw_flower *app_fw_flower) +@@ -637,6 +649,7 @@ nfp_flower_repr_init(struct rte_eth_dev *eth_dev, + void *init_params) + { + int ret; ++ uint16_t index; + unsigned int numa_node; + char ring_name[RTE_ETH_NAME_MAX_LEN]; + struct nfp_app_fw_flower *app_fw_flower; +@@ -710,10 +723,13 @@ nfp_flower_repr_init(struct rte_eth_dev *eth_dev, + } + + /* Add repr to correct array */ +- if (repr->repr_type == NFP_REPR_TYPE_PHYS_PORT) +- app_fw_flower->phy_reprs[repr->nfp_idx] = repr; +- else +- app_fw_flower->vf_reprs[repr->vf_id] = repr; ++ if (repr->repr_type == NFP_REPR_TYPE_PHYS_PORT) { ++ index = NFP_FLOWER_CMSG_PORT_PHYS_PORT_NUM(repr->port_id); ++ app_fw_flower->phy_reprs[index] = repr; ++ } else { ++ index = repr->vf_id; ++ app_fw_flower->vf_reprs[index] = repr; ++ } + + return 0; + +@@ -730,7 +746,9 @@ nfp_flower_repr_alloc(struct nfp_app_fw_flower *app_fw_flower) { int i; int ret; @@ -28151,7 +40040,7 @@ index 5809c838b3..d319aefb08 100644 struct nfp_eth_table *nfp_eth_table; struct nfp_eth_table_port *eth_port; struct nfp_flower_representor flower_repr = { -@@ -753,7 +755,13 @@ nfp_flower_repr_alloc(struct nfp_app_fw_flower *app_fw_flower) +@@ -753,7 +771,13 @@ nfp_flower_repr_alloc(struct nfp_app_fw_flower *app_fw_flower) /* PF vNIC reprs get a random MAC address */ rte_eth_random_addr(flower_repr.mac_addr.addr_bytes); @@ -28166,7 +40055,7 @@ index 5809c838b3..d319aefb08 100644 /* Create a eth_dev for this representor */ ret = rte_eth_dev_create(eth_dev->device, flower_repr.name, -@@ -775,7 +783,8 @@ nfp_flower_repr_alloc(struct nfp_app_fw_flower *app_fw_flower) +@@ -775,7 +799,8 @@ nfp_flower_repr_alloc(struct nfp_app_fw_flower *app_fw_flower) /* Copy the real mac of the interface to the representor struct */ rte_ether_addr_copy((struct rte_ether_addr *)eth_port->mac_addr, &flower_repr.mac_addr); @@ -28176,7 +40065,7 @@ index 5809c838b3..d319aefb08 100644 /* * Create a eth_dev for this representor -@@ -806,7 +815,8 @@ nfp_flower_repr_alloc(struct nfp_app_fw_flower *app_fw_flower) +@@ -806,7 +831,8 @@ nfp_flower_repr_alloc(struct nfp_app_fw_flower *app_fw_flower) /* VF reprs get a random MAC address */ rte_eth_random_addr(flower_repr.mac_addr.addr_bytes); @@ -28187,10 +40076,50 @@ index 5809c838b3..d319aefb08 100644 /* This will also allocate private memory for the device*/ ret = rte_eth_dev_create(eth_dev->device, flower_repr.name, diff --git a/dpdk/drivers/net/nfp/nfp_common.c b/dpdk/drivers/net/nfp/nfp_common.c -index 71711bfa22..b673370f20 100644 +index 71711bfa22..33613bb2b3 100644 --- a/dpdk/drivers/net/nfp/nfp_common.c +++ b/dpdk/drivers/net/nfp/nfp_common.c -@@ -977,9 +977,9 @@ nfp_net_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) +@@ -279,7 +279,7 @@ int + nfp_net_set_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr) + { + struct nfp_net_hw *hw; +- uint32_t update, ctrl; ++ uint32_t update, new_ctrl; + + hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); + if ((hw->ctrl & NFP_NET_CFG_CTRL_ENABLE) && +@@ -294,14 +294,18 @@ nfp_net_set_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr) + + /* Signal the NIC about the change */ + update = NFP_NET_CFG_UPDATE_MACADDR; +- ctrl = hw->ctrl; ++ new_ctrl = hw->ctrl; + if ((hw->ctrl & NFP_NET_CFG_CTRL_ENABLE) && + (hw->cap & NFP_NET_CFG_CTRL_LIVE_ADDR)) +- ctrl |= NFP_NET_CFG_CTRL_LIVE_ADDR; +- if (nfp_net_reconfig(hw, ctrl, update) < 0) { ++ new_ctrl |= NFP_NET_CFG_CTRL_LIVE_ADDR; ++ ++ if (nfp_net_reconfig(hw, new_ctrl, update) < 0) { + PMD_INIT_LOG(INFO, "MAC address update failed"); + return -EIO; + } ++ ++ hw->ctrl = new_ctrl; ++ + return 0; + } + +@@ -885,7 +889,7 @@ nfp_net_dev_link_status_print(struct rte_eth_dev *dev) + * If MSI-X auto-masking is enabled clear the mask bit, otherwise + * clear the ICR for the entry. + */ +-static void ++void + nfp_net_irq_unmask(struct rte_eth_dev *dev) + { + struct nfp_net_hw *hw; +@@ -977,9 +981,9 @@ nfp_net_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) return -EBUSY; } @@ -28202,7 +40131,7 @@ index 71711bfa22..b673370f20 100644 mtu, hw->flbufsz); return -ERANGE; } -@@ -1256,7 +1256,7 @@ nfp_net_rss_hash_conf_get(struct rte_eth_dev *dev, +@@ -1256,7 +1260,7 @@ nfp_net_rss_hash_conf_get(struct rte_eth_dev *dev, cfg_rss_ctrl = nn_cfg_readl(hw, NFP_NET_CFG_RSS_CTRL); if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV4) @@ -28211,7 +40140,7 @@ index 71711bfa22..b673370f20 100644 if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV4_TCP) rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_TCP; -@@ -1271,7 +1271,7 @@ nfp_net_rss_hash_conf_get(struct rte_eth_dev *dev, +@@ -1271,7 +1275,7 @@ nfp_net_rss_hash_conf_get(struct rte_eth_dev *dev, rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_UDP; if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV6) @@ -28220,7 +40149,23 @@ index 71711bfa22..b673370f20 100644 /* Propagate current RSS hash functions to caller */ rss_conf->rss_hf = rss_hf; -@@ -1413,6 +1413,24 @@ nfp_net_set_vxlan_port(struct nfp_net_hw *hw, +@@ -1337,6 +1341,7 @@ nfp_net_stop_rx_queue(struct rte_eth_dev *dev) + for (i = 0; i < dev->data->nb_rx_queues; i++) { + this_rx_q = (struct nfp_net_rxq *)dev->data->rx_queues[i]; + nfp_net_reset_rx_queue(this_rx_q); ++ dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; + } + } + +@@ -1362,6 +1367,7 @@ nfp_net_stop_tx_queue(struct rte_eth_dev *dev) + for (i = 0; i < dev->data->nb_tx_queues; i++) { + this_tx_q = (struct nfp_net_txq *)dev->data->tx_queues[i]; + nfp_net_reset_tx_queue(this_tx_q); ++ dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; + } + } + +@@ -1413,6 +1419,24 @@ nfp_net_set_vxlan_port(struct nfp_net_hw *hw, RTE_LOG_REGISTER_SUFFIX(nfp_logtype_init, init, NOTICE); RTE_LOG_REGISTER_SUFFIX(nfp_logtype_driver, driver, NOTICE); RTE_LOG_REGISTER_SUFFIX(nfp_logtype_cpp, cpp, NOTICE); @@ -28246,7 +40191,7 @@ index 71711bfa22..b673370f20 100644 * Local variables: * c-file-style: "Linux" diff --git a/dpdk/drivers/net/nfp/nfp_common.h b/dpdk/drivers/net/nfp/nfp_common.h -index 36c19b47e4..67c8dc33d8 100644 +index 36c19b47e4..d1a07f5a72 100644 --- a/dpdk/drivers/net/nfp/nfp_common.h +++ b/dpdk/drivers/net/nfp/nfp_common.h @@ -111,6 +111,7 @@ struct nfp_net_adapter; @@ -28257,19 +40202,57 @@ index 36c19b47e4..67c8dc33d8 100644 #include #include -@@ -447,6 +448,7 @@ void nfp_net_close_rx_queue(struct rte_eth_dev *dev); +@@ -447,6 +448,8 @@ void nfp_net_close_rx_queue(struct rte_eth_dev *dev); void nfp_net_stop_tx_queue(struct rte_eth_dev *dev); void nfp_net_close_tx_queue(struct rte_eth_dev *dev); int nfp_net_set_vxlan_port(struct nfp_net_hw *hw, size_t idx, uint16_t port); +int nfp_net_check_dma_mask(struct nfp_net_hw *hw, char *name); ++void nfp_net_irq_unmask(struct rte_eth_dev *dev); #define NFP_NET_DEV_PRIVATE_TO_HW(adapter)\ (&((struct nfp_net_adapter *)adapter)->hw) diff --git a/dpdk/drivers/net/nfp/nfp_ethdev.c b/dpdk/drivers/net/nfp/nfp_ethdev.c -index 0956ea81df..29491f6e6d 100644 +index 0956ea81df..9f940a12b6 100644 --- a/dpdk/drivers/net/nfp/nfp_ethdev.c +++ b/dpdk/drivers/net/nfp/nfp_ethdev.c -@@ -517,14 +517,6 @@ nfp_net_init(struct rte_eth_dev *eth_dev) +@@ -70,6 +70,7 @@ nfp_net_start(struct rte_eth_dev *dev) + struct rte_eth_conf *dev_conf; + struct rte_eth_rxmode *rxmode; + uint32_t intr_vector; ++ uint16_t i; + int ret; + + hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); +@@ -154,6 +155,8 @@ nfp_net_start(struct rte_eth_dev *dev) + + nn_cfg_writel(hw, NFP_NET_CFG_CTRL, new_ctrl); + if (nfp_net_reconfig(hw, new_ctrl, update) < 0) ++ hw->ctrl = new_ctrl; ++ + return -EIO; + + /* +@@ -172,7 +175,10 @@ nfp_net_start(struct rte_eth_dev *dev) + nfp_eth_set_configured(dev->process_private, + hw->nfp_idx, 1); + +- hw->ctrl = new_ctrl; ++ for (i = 0; i < dev->data->nb_rx_queues; i++) ++ dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; ++ for (i = 0; i < dev->data->nb_tx_queues; i++) ++ dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; + + return 0; + +@@ -298,7 +304,6 @@ nfp_net_close(struct rte_eth_dev *dev) + /* Mark this port as unused and free device priv resources*/ + nn_cfg_writeb(hw, NFP_NET_CFG_LSC, 0xff); + app_fw_nic->ports[hw->idx] = NULL; +- rte_eth_dev_release_port(dev); + + for (i = 0; i < app_fw_nic->total_phyports; i++) { + /* Check to see if ports are still in use */ +@@ -517,14 +522,6 @@ nfp_net_init(struct rte_eth_dev *eth_dev) /* Use backpointer to the CoreNIC app struct */ app_fw_nic = NFP_PRIV_TO_APP_FW_NIC(pf_dev->app_fw_priv); @@ -28284,7 +40267,7 @@ index 0956ea81df..29491f6e6d 100644 port = ((struct nfp_net_hw *)eth_dev->data->dev_private)->idx; if (port < 0 || port > 7) { PMD_DRV_LOG(ERR, "Port value is wrong"); -@@ -572,6 +564,9 @@ nfp_net_init(struct rte_eth_dev *eth_dev) +@@ -572,6 +569,9 @@ nfp_net_init(struct rte_eth_dev *eth_dev) hw->ver = nn_cfg_readl(hw, NFP_NET_CFG_VERSION); @@ -28294,7 +40277,7 @@ index 0956ea81df..29491f6e6d 100644 if (nfp_net_ethdev_ops_mount(hw, eth_dev)) return -EINVAL; -@@ -609,6 +604,7 @@ nfp_net_init(struct rte_eth_dev *eth_dev) +@@ -609,6 +609,7 @@ nfp_net_init(struct rte_eth_dev *eth_dev) hw->cap = nn_cfg_readl(hw, NFP_NET_CFG_CAP); hw->max_mtu = nn_cfg_readl(hw, NFP_NET_CFG_MAX_MTU); hw->mtu = RTE_ETHER_MTU; @@ -28302,7 +40285,16 @@ index 0956ea81df..29491f6e6d 100644 /* VLAN insertion is incompatible with LSOv2 */ if (hw->cap & NFP_NET_CFG_CTRL_LSO2) -@@ -724,7 +720,7 @@ nfp_fw_upload(struct rte_pci_device *dev, struct nfp_nsp *nsp, char *card) +@@ -690,6 +691,8 @@ nfp_net_init(struct rte_eth_dev *eth_dev) + nfp_net_dev_interrupt_handler, (void *)eth_dev); + /* Telling the firmware about the LSC interrupt entry */ + nn_cfg_writeb(hw, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX); ++ /* Unmask the LSC interrupt */ ++ nfp_net_irq_unmask(eth_dev); + /* Recording current stats counters values */ + nfp_net_stats_reset(eth_dev); + +@@ -724,7 +727,7 @@ nfp_fw_upload(struct rte_pci_device *dev, struct nfp_nsp *nsp, char *card) goto load_fw; /* Then try the PCI name */ snprintf(fw_name, sizeof(fw_name), "%s/pci-%s.nffw", DEFAULT_FW_PATH, @@ -28311,15 +40303,31 @@ index 0956ea81df..29491f6e6d 100644 PMD_DRV_LOG(DEBUG, "Trying with fw file: %s", fw_name); if (rte_firmware_read(fw_name, &fw_buf, &fsize) == 0) -@@ -933,6 +929,7 @@ nfp_pf_init(struct rte_pci_device *pci_dev) - int ret; +@@ -930,9 +933,11 @@ app_cleanup: + static int + nfp_pf_init(struct rte_pci_device *pci_dev) + { +- int ret; ++ uint32_t i; ++ int ret = 0; int err = 0; uint64_t addr; + uint32_t cpp_id; struct nfp_cpp *cpp; enum nfp_app_fw_id app_fw_id; struct nfp_pf_dev *pf_dev; -@@ -1032,7 +1029,8 @@ nfp_pf_init(struct rte_pci_device *pci_dev) +@@ -976,6 +981,10 @@ nfp_pf_init(struct rte_pci_device *pci_dev) + goto hwinfo_cleanup; + } + ++ /* Force the physical port down to clear the possible DMA error */ ++ for (i = 0; i < nfp_eth_table->count; i++) ++ nfp_eth_set_configured(cpp, nfp_eth_table->ports[i].index, 0); ++ + if (nfp_fw_setup(pci_dev, cpp, nfp_eth_table, hwinfo)) { + PMD_INIT_LOG(ERR, "Error when uploading firmware"); + ret = -EIO; +@@ -1032,7 +1041,8 @@ nfp_pf_init(struct rte_pci_device *pci_dev) goto pf_cleanup; } @@ -28330,10 +40338,39 @@ index 0956ea81df..29491f6e6d 100644 &pf_dev->hwqueues_area); if (pf_dev->hw_queues == NULL) { diff --git a/dpdk/drivers/net/nfp/nfp_ethdev_vf.c b/dpdk/drivers/net/nfp/nfp_ethdev_vf.c -index d1427b63bc..1877d6b76b 100644 +index d1427b63bc..435127604a 100644 --- a/dpdk/drivers/net/nfp/nfp_ethdev_vf.c +++ b/dpdk/drivers/net/nfp/nfp_ethdev_vf.c -@@ -291,14 +291,6 @@ nfp_netvf_init(struct rte_eth_dev *eth_dev) +@@ -45,6 +45,7 @@ nfp_netvf_start(struct rte_eth_dev *dev) + struct rte_eth_conf *dev_conf; + struct rte_eth_rxmode *rxmode; + uint32_t intr_vector; ++ uint16_t i; + int ret; + + hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); +@@ -113,6 +114,8 @@ nfp_netvf_start(struct rte_eth_dev *dev) + if (nfp_net_reconfig(hw, new_ctrl, update) < 0) + return -EIO; + ++ hw->ctrl = new_ctrl; ++ + /* + * Allocating rte mbufs for configured rx queues. + * This requires queues being enabled before +@@ -122,7 +125,10 @@ nfp_netvf_start(struct rte_eth_dev *dev) + goto error; + } + +- hw->ctrl = new_ctrl; ++ for (i = 0; i < dev->data->nb_rx_queues; i++) ++ dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; ++ for (i = 0; i < dev->data->nb_tx_queues; i++) ++ dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; + + return 0; + +@@ -291,14 +297,6 @@ nfp_netvf_init(struct rte_eth_dev *eth_dev) pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); @@ -28348,7 +40385,7 @@ index d1427b63bc..1877d6b76b 100644 hw = NFP_NET_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); hw->ctrl_bar = (uint8_t *)pci_dev->mem_resource[0].addr; -@@ -312,6 +304,9 @@ nfp_netvf_init(struct rte_eth_dev *eth_dev) +@@ -312,6 +310,9 @@ nfp_netvf_init(struct rte_eth_dev *eth_dev) hw->ver = nn_cfg_readl(hw, NFP_NET_CFG_VERSION); @@ -28358,7 +40395,7 @@ index d1427b63bc..1877d6b76b 100644 if (nfp_netvf_ethdev_ops_mount(hw, eth_dev)) return -EINVAL; -@@ -366,6 +361,7 @@ nfp_netvf_init(struct rte_eth_dev *eth_dev) +@@ -366,6 +367,7 @@ nfp_netvf_init(struct rte_eth_dev *eth_dev) hw->cap = nn_cfg_readl(hw, NFP_NET_CFG_CAP); hw->max_mtu = nn_cfg_readl(hw, NFP_NET_CFG_MAX_MTU); hw->mtu = RTE_ETHER_MTU; @@ -28366,6 +40403,15 @@ index d1427b63bc..1877d6b76b 100644 /* VLAN insertion is incompatible with LSOv2 */ if (hw->cap & NFP_NET_CFG_CTRL_LSO2) +@@ -450,6 +452,8 @@ nfp_netvf_init(struct rte_eth_dev *eth_dev) + (void *)eth_dev); + /* Telling the firmware about the LSC interrupt entry */ + nn_cfg_writeb(hw, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX); ++ /* Unmask the LSC interrupt */ ++ nfp_net_irq_unmask(eth_dev); + /* Recording current stats counters values */ + nfp_net_stats_reset(eth_dev); + } diff --git a/dpdk/drivers/net/nfp/nfp_flow.c b/dpdk/drivers/net/nfp/nfp_flow.c index 6f79d950db..faa0eda325 100644 --- a/dpdk/drivers/net/nfp/nfp_flow.c @@ -29483,7 +41529,7 @@ index 283cdca367..27243d85c8 100644 if (link_up_wait_to_complete) { for (i = 0; i < hw->mac.max_link_up_time; i++) { diff --git a/dpdk/drivers/net/ngbe/base/ngbe_phy_rtl.c b/dpdk/drivers/net/ngbe/base/ngbe_phy_rtl.c -index 9b323624ec..b0eb6c97c0 100644 +index 9b323624ec..ba63a8058a 100644 --- a/dpdk/drivers/net/ngbe/base/ngbe_phy_rtl.c +++ b/dpdk/drivers/net/ngbe/base/ngbe_phy_rtl.c @@ -120,6 +120,8 @@ s32 ngbe_init_phy_rtl(struct ngbe_hw *hw) @@ -29495,7 +41541,19 @@ index 9b323624ec..b0eb6c97c0 100644 for (i = 0; i < 15; i++) { if (!rd32m(hw, NGBE_STAT, NGBE_STAT_GPHY_IN_RST(hw->bus.lan_id))) -@@ -390,6 +392,26 @@ s32 ngbe_check_phy_link_rtl(struct ngbe_hw *hw, u32 *speed, bool *link_up) +@@ -146,6 +148,11 @@ s32 ngbe_init_phy_rtl(struct ngbe_hw *hw) + hw->phy.write_reg(hw, 27, 0xa42, 0x8011); + hw->phy.write_reg(hw, 28, 0xa42, 0x5737); + ++ /* Disable fall to 100m if signal is not good */ ++ hw->phy.read_reg(hw, 17, 0xa44, &value); ++ value &= ~0x8; ++ hw->phy.write_reg(hw, 17, 0xa44, value); ++ + hw->phy.write_reg(hw, RTL_SCR, 0xa46, RTL_SCR_EXTINI); + hw->phy.read_reg(hw, RTL_SCR, 0xa46, &value); + if (!(value & RTL_SCR_EXTINI)) { +@@ -390,6 +397,26 @@ s32 ngbe_check_phy_link_rtl(struct ngbe_hw *hw, u32 *speed, bool *link_up) *speed = NGBE_LINK_SPEED_10M_FULL; } @@ -29762,10 +41820,57 @@ index c88946f7c3..754faadd6a 100644 return 0; } diff --git a/dpdk/drivers/net/ngbe/base/ngbe_type.h b/dpdk/drivers/net/ngbe/base/ngbe_type.h -index aa5c41146c..37be288a74 100644 +index aa5c41146c..8a7d2cd331 100644 --- a/dpdk/drivers/net/ngbe/base/ngbe_type.h +++ b/dpdk/drivers/net/ngbe/base/ngbe_type.h -@@ -431,8 +431,10 @@ struct ngbe_hw { +@@ -116,6 +116,46 @@ struct ngbe_fc_info { + enum ngbe_fc_mode requested_mode; /* FC mode requested by caller */ + }; + ++/* Flow Control Data Sheet defined values ++ * Calculation and defines taken from 802.1bb Annex O ++ */ ++/* BitTimes (BT) conversion */ ++#define NGBE_BT2KB(BT) (((BT) + (8 * 1024 - 1)) / (8 * 1024)) ++#define NGBE_B2BT(BT) ((BT) * 8) ++ ++/* Calculate Delay to respond to PFC */ ++#define NGBE_PFC_D 672 ++ ++/* Calculate Cable Delay */ ++#define NGBE_CABLE_DC 5556 /* Delay Copper */ ++ ++/* Calculate Interface Delay */ ++#define NGBE_PHY_D 12800 ++#define NGBE_MAC_D 4096 ++#define NGBE_XAUI_D (2 * 1024) ++ ++#define NGBE_ID (NGBE_MAC_D + NGBE_XAUI_D + NGBE_PHY_D) ++ ++/* Calculate Delay incurred from higher layer */ ++#define NGBE_HD 6144 ++ ++/* Calculate PCI Bus delay for low thresholds */ ++#define NGBE_PCI_DELAY 10000 ++ ++/* Calculate delay value in bit times */ ++#define NGBE_DV(_max_frame_link, _max_frame_tc) \ ++ ((36 * \ ++ (NGBE_B2BT(_max_frame_link) + \ ++ NGBE_PFC_D + \ ++ (2 * NGBE_CABLE_DC) + \ ++ (2 * NGBE_ID) + \ ++ NGBE_HD) / 25 + 1) + \ ++ 2 * NGBE_B2BT(_max_frame_tc)) ++ ++#define NGBE_LOW_DV(_max_frame_tc) \ ++ (2 * ((2 * NGBE_B2BT(_max_frame_tc) + \ ++ (36 * NGBE_PCI_DELAY / 25) + 1))) ++ + /* Statistics counters collected by the MAC */ + /* PB[] RxTx */ + struct ngbe_pb_stats { +@@ -431,8 +471,10 @@ struct ngbe_hw { bool offset_loaded; bool is_pf; bool gpio_ctl; @@ -29777,10 +41882,18 @@ index aa5c41146c..37be288a74 100644 u64 rx_qp_packets; u64 tx_qp_packets; diff --git a/dpdk/drivers/net/ngbe/ngbe_ethdev.c b/dpdk/drivers/net/ngbe/ngbe_ethdev.c -index afdb3ad41f..cb643c6eba 100644 +index afdb3ad41f..08e14a05c9 100644 --- a/dpdk/drivers/net/ngbe/ngbe_ethdev.c +++ b/dpdk/drivers/net/ngbe/ngbe_ethdev.c -@@ -160,7 +160,9 @@ static const struct rte_ngbe_xstats_name_off rte_ngbe_stats_strings[] = { +@@ -90,6 +90,7 @@ static int ngbe_dev_misc_interrupt_setup(struct rte_eth_dev *dev); + static int ngbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev); + static void ngbe_dev_interrupt_handler(void *param); + static void ngbe_configure_msix(struct rte_eth_dev *dev); ++static void ngbe_pbthresh_set(struct rte_eth_dev *dev); + + #define NGBE_SET_HWSTRIP(h, q) do {\ + uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \ +@@ -160,7 +161,9 @@ static const struct rte_ngbe_xstats_name_off rte_ngbe_stats_strings[] = { HW_XSTAT(tx_total_packets), HW_XSTAT(rx_total_missed_packets), HW_XSTAT(rx_broadcast_packets), @@ -29790,7 +41903,7 @@ index afdb3ad41f..cb643c6eba 100644 HW_XSTAT(rx_management_packets), HW_XSTAT(tx_management_packets), HW_XSTAT(rx_management_dropped), -@@ -972,9 +974,6 @@ ngbe_dev_start(struct rte_eth_dev *dev) +@@ -972,9 +975,6 @@ ngbe_dev_start(struct rte_eth_dev *dev) PMD_INIT_FUNC_TRACE(); @@ -29800,7 +41913,15 @@ index afdb3ad41f..cb643c6eba 100644 /* disable uio/vfio intr/eventfd mapping */ rte_intr_disable(intr_handle); -@@ -1050,6 +1049,8 @@ ngbe_dev_start(struct rte_eth_dev *dev) +@@ -1038,6 +1038,7 @@ ngbe_dev_start(struct rte_eth_dev *dev) + } + + hw->mac.setup_pba(hw); ++ ngbe_pbthresh_set(dev); + ngbe_configure_port(dev); + + err = ngbe_dev_rxtx_start(dev); +@@ -1050,6 +1051,8 @@ ngbe_dev_start(struct rte_eth_dev *dev) if (hw->is_pf && dev->data->dev_conf.lpbk_mode) goto skip_link_setup; @@ -29809,7 +41930,12 @@ index afdb3ad41f..cb643c6eba 100644 err = hw->mac.check_link(hw, &speed, &link_up, 0); if (err != 0) goto error; -@@ -1168,8 +1169,6 @@ ngbe_dev_stop(struct rte_eth_dev *dev) +@@ -1164,12 +1167,10 @@ ngbe_dev_stop(struct rte_eth_dev *dev) + int vf; + + if (hw->adapter_stopped) +- return 0; ++ goto out; PMD_INIT_FUNC_TRACE(); @@ -29818,7 +41944,37 @@ index afdb3ad41f..cb643c6eba 100644 if (hw->gpio_ctl) { /* gpio0 is used to power on/off control*/ wr32(hw, NGBE_GPIODATA, NGBE_GPIOBIT_0); -@@ -1869,24 +1868,6 @@ ngbe_dev_supported_ptypes_get(struct rte_eth_dev *dev) +@@ -1188,8 +1189,6 @@ ngbe_dev_stop(struct rte_eth_dev *dev) + for (vf = 0; vfinfo != NULL && vf < pci_dev->max_vfs; vf++) + vfinfo[vf].clear_to_send = false; + +- hw->phy.set_phy_power(hw, false); +- + ngbe_dev_clear_queues(dev); + + /* Clear stored conf */ +@@ -1216,6 +1215,10 @@ ngbe_dev_stop(struct rte_eth_dev *dev) + hw->adapter_stopped = true; + dev->data->dev_started = 0; + ++out: ++ /* close phy to prevent reset in dev_close from restarting physical link */ ++ hw->phy.set_phy_power(hw, false); ++ + return 0; + } + +@@ -1259,6 +1262,9 @@ ngbe_dev_close(struct rte_eth_dev *dev) + + PMD_INIT_FUNC_TRACE(); + ++ if (rte_eal_process_type() != RTE_PROC_PRIMARY) ++ return 0; ++ + ngbe_pf_reset_hw(hw); + + ngbe_dev_stop(dev); +@@ -1869,24 +1875,6 @@ ngbe_dev_supported_ptypes_get(struct rte_eth_dev *dev) return NULL; } @@ -29843,7 +41999,7 @@ index afdb3ad41f..cb643c6eba 100644 /* return 0 means link status changed, -1 means not changed */ int ngbe_dev_link_update_share(struct rte_eth_dev *dev, -@@ -1896,7 +1877,6 @@ ngbe_dev_link_update_share(struct rte_eth_dev *dev, +@@ -1896,7 +1884,6 @@ ngbe_dev_link_update_share(struct rte_eth_dev *dev, struct rte_eth_link link; u32 link_speed = NGBE_LINK_SPEED_UNKNOWN; u32 lan_speed = 0; @@ -29851,7 +42007,7 @@ index afdb3ad41f..cb643c6eba 100644 bool link_up; int err; int wait = 1; -@@ -1910,9 +1890,6 @@ ngbe_dev_link_update_share(struct rte_eth_dev *dev, +@@ -1910,9 +1897,6 @@ ngbe_dev_link_update_share(struct rte_eth_dev *dev, hw->mac.get_link_status = true; @@ -29861,7 +42017,7 @@ index afdb3ad41f..cb643c6eba 100644 /* check if it needs to wait to complete, if lsc interrupt is enabled */ if (wait_to_complete == 0 || dev->data->dev_conf.intr_conf.lsc != 0) wait = 0; -@@ -1927,7 +1904,6 @@ ngbe_dev_link_update_share(struct rte_eth_dev *dev, +@@ -1927,7 +1911,6 @@ ngbe_dev_link_update_share(struct rte_eth_dev *dev, if (!link_up) return rte_eth_linkstatus_set(dev, &link); @@ -29869,6 +42025,109 @@ index afdb3ad41f..cb643c6eba 100644 link.link_status = RTE_ETH_LINK_UP; link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX; +@@ -1961,6 +1944,8 @@ ngbe_dev_link_update_share(struct rte_eth_dev *dev, + wr32m(hw, NGBE_MACTXCFG, NGBE_MACTXCFG_SPEED_MASK, + NGBE_MACTXCFG_SPEED_1G | NGBE_MACTXCFG_TE); + } ++ wr32m(hw, NGBE_MACRXFLT, NGBE_MACRXFLT_PROMISC, ++ NGBE_MACRXFLT_PROMISC); + } + + return rte_eth_linkstatus_set(dev, &link); +@@ -2380,6 +2365,93 @@ ngbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) + return -EIO; + } + ++/* Additional bittime to account for NGBE framing */ ++#define NGBE_ETH_FRAMING 20 ++ ++/* ++ * ngbe_fc_hpbthresh_set - calculate high water mark for flow control ++ * ++ * @dv_id: device interface delay ++ * @pb: packet buffer to calculate ++ */ ++static s32 ++ngbe_fc_hpbthresh_set(struct rte_eth_dev *dev) ++{ ++ struct ngbe_hw *hw = ngbe_dev_hw(dev); ++ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); ++ u32 max_frame_size, tc, dv_id, rx_pb; ++ s32 kb, marker; ++ ++ /* Calculate max LAN frame size */ ++ max_frame_size = rd32m(hw, NGBE_FRMSZ, NGBE_FRMSZ_MAX_MASK); ++ tc = max_frame_size + NGBE_ETH_FRAMING; ++ ++ /* Calculate delay value for device */ ++ dv_id = NGBE_DV(tc, tc); ++ ++ /* Loopback switch introduces additional latency */ ++ if (pci_dev->max_vfs) ++ dv_id += NGBE_B2BT(tc); ++ ++ /* Delay value is calculated in bit times convert to KB */ ++ kb = NGBE_BT2KB(dv_id); ++ rx_pb = rd32(hw, NGBE_PBRXSIZE) >> 10; ++ ++ marker = rx_pb - kb; ++ ++ /* It is possible that the packet buffer is not large enough ++ * to provide required headroom. In this case throw an error ++ * to user and do the best we can. ++ */ ++ if (marker < 0) { ++ PMD_DRV_LOG(WARNING, "Packet Buffer can not provide enough headroom to support flow control."); ++ marker = tc + 1; ++ } ++ ++ return marker; ++} ++ ++/* ++ * ngbe_fc_lpbthresh_set - calculate low water mark for flow control ++ * ++ * @dv_id: device interface delay ++ */ ++static s32 ++ngbe_fc_lpbthresh_set(struct rte_eth_dev *dev) ++{ ++ struct ngbe_hw *hw = ngbe_dev_hw(dev); ++ u32 max_frame_size, tc, dv_id; ++ s32 kb; ++ ++ /* Calculate max LAN frame size */ ++ max_frame_size = rd32m(hw, NGBE_FRMSZ, NGBE_FRMSZ_MAX_MASK); ++ tc = max_frame_size + NGBE_ETH_FRAMING; ++ ++ /* Calculate delay value for device */ ++ dv_id = NGBE_LOW_DV(tc); ++ ++ /* Delay value is calculated in bit times convert to KB */ ++ kb = NGBE_BT2KB(dv_id); ++ ++ return kb; ++} ++ ++/* ++ * ngbe_pbthresh_setup - calculate and setup high low water marks ++ */ ++static void ++ngbe_pbthresh_set(struct rte_eth_dev *dev) ++{ ++ struct ngbe_hw *hw = ngbe_dev_hw(dev); ++ ++ hw->fc.high_water = ngbe_fc_hpbthresh_set(dev); ++ hw->fc.low_water = ngbe_fc_lpbthresh_set(dev); ++ ++ /* Low water marks must not be larger than high water marks */ ++ if (hw->fc.low_water > hw->fc.high_water) ++ hw->fc.low_water = 0; ++} ++ + int + ngbe_dev_rss_reta_update(struct rte_eth_dev *dev, + struct rte_eth_rss_reta_entry64 *reta_conf, diff --git a/dpdk/drivers/net/ngbe/ngbe_ethdev.h b/dpdk/drivers/net/ngbe/ngbe_ethdev.h index 8d500fd38c..bb96f6a5e7 100644 --- a/dpdk/drivers/net/ngbe/ngbe_ethdev.h @@ -29882,7 +42141,7 @@ index 8d500fd38c..bb96f6a5e7 100644 struct ngbe_hw_stats *hw_stats); diff --git a/dpdk/drivers/net/ngbe/ngbe_rxtx.c b/dpdk/drivers/net/ngbe/ngbe_rxtx.c -index 9fd24fa444..f31906cc2f 100644 +index 9fd24fa444..54a6f6a887 100644 --- a/dpdk/drivers/net/ngbe/ngbe_rxtx.c +++ b/dpdk/drivers/net/ngbe/ngbe_rxtx.c @@ -24,15 +24,11 @@ @@ -30046,7 +42305,43 @@ index 9fd24fa444..f31906cc2f 100644 } /* -@@ -1939,12 +1897,8 @@ ngbe_get_tx_port_offloads(struct rte_eth_dev *dev) +@@ -1265,11 +1223,22 @@ ngbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, + * of accesses cannot be reordered by the compiler. If they were + * not volatile, they could be reordered which could lead to + * using invalid descriptor fields when read from rxd. ++ * ++ * Meanwhile, to prevent the CPU from executing out of order, we ++ * need to use a proper memory barrier to ensure the memory ++ * ordering below. + */ + rxdp = &rx_ring[rx_id]; + staterr = rxdp->qw1.lo.status; + if (!(staterr & rte_cpu_to_le_32(NGBE_RXD_STAT_DD))) + break; ++ ++ /* ++ * Use acquire fence to ensure that status_error which includes ++ * DD bit is loaded before loading of other descriptor words. ++ */ ++ rte_atomic_thread_fence(__ATOMIC_ACQUIRE); ++ + rxd = *rxdp; + + /* +@@ -1496,6 +1465,12 @@ next_desc: + if (!(staterr & NGBE_RXD_STAT_DD)) + break; + ++ /* ++ * Use acquire fence to ensure that status_error which includes ++ * DD bit is loaded before loading of other descriptor words. ++ */ ++ rte_atomic_thread_fence(__ATOMIC_ACQUIRE); ++ + rxd = *rxdp; + + PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u " +@@ -1939,12 +1914,8 @@ ngbe_get_tx_port_offloads(struct rte_eth_dev *dev) RTE_ETH_TX_OFFLOAD_UDP_CKSUM | RTE_ETH_TX_OFFLOAD_TCP_CKSUM | RTE_ETH_TX_OFFLOAD_SCTP_CKSUM | @@ -30059,7 +42354,7 @@ index 9fd24fa444..f31906cc2f 100644 RTE_ETH_TX_OFFLOAD_MULTI_SEGS; if (hw->is_pf) -@@ -2237,6 +2191,7 @@ ngbe_get_rx_port_offloads(struct rte_eth_dev *dev) +@@ -2237,6 +2208,7 @@ ngbe_get_rx_port_offloads(struct rte_eth_dev *dev) RTE_ETH_RX_OFFLOAD_TCP_CKSUM | RTE_ETH_RX_OFFLOAD_KEEP_CRC | RTE_ETH_RX_OFFLOAD_VLAN_FILTER | @@ -30067,6 +42362,174 @@ index 9fd24fa444..f31906cc2f 100644 RTE_ETH_RX_OFFLOAD_SCATTER; if (hw->is_pf) +@@ -2460,6 +2432,7 @@ ngbe_dev_clear_queues(struct rte_eth_dev *dev) + if (txq != NULL) { + txq->ops->release_mbufs(txq); + txq->ops->reset(txq); ++ dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; + } + } + +@@ -2469,6 +2442,7 @@ ngbe_dev_clear_queues(struct rte_eth_dev *dev) + if (rxq != NULL) { + ngbe_rx_queue_release_mbufs(rxq); + ngbe_reset_rx_queue(adapter, rxq); ++ dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; + } + } + } +diff --git a/dpdk/drivers/net/null/rte_eth_null.c b/dpdk/drivers/net/null/rte_eth_null.c +index 47d9554ec5..1fbe572bd1 100644 +--- a/dpdk/drivers/net/null/rte_eth_null.c ++++ b/dpdk/drivers/net/null/rte_eth_null.c +@@ -188,21 +188,36 @@ eth_dev_configure(struct rte_eth_dev *dev __rte_unused) + static int + eth_dev_start(struct rte_eth_dev *dev) + { ++ uint16_t i; ++ + if (dev == NULL) + return -EINVAL; + + dev->data->dev_link.link_status = RTE_ETH_LINK_UP; ++ ++ for (i = 0; i < dev->data->nb_rx_queues; i++) ++ dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; ++ for (i = 0; i < dev->data->nb_tx_queues; i++) ++ dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; ++ + return 0; + } + + static int + eth_dev_stop(struct rte_eth_dev *dev) + { ++ uint16_t i; ++ + if (dev == NULL) + return 0; + + dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN; + ++ for (i = 0; i < dev->data->nb_rx_queues; i++) ++ dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; ++ for (i = 0; i < dev->data->nb_tx_queues; i++) ++ dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; ++ + return 0; + } + +diff --git a/dpdk/drivers/net/octeon_ep/otx_ep_ethdev.c b/dpdk/drivers/net/octeon_ep/otx_ep_ethdev.c +index c8f4abe4ca..af0c5c824f 100644 +--- a/dpdk/drivers/net/octeon_ep/otx_ep_ethdev.c ++++ b/dpdk/drivers/net/octeon_ep/otx_ep_ethdev.c +@@ -76,6 +76,11 @@ otx_ep_dev_start(struct rte_eth_dev *eth_dev) + + otx_ep_info("dev started\n"); + ++ for (q = 0; q < eth_dev->data->nb_rx_queues; q++) ++ eth_dev->data->rx_queue_state[q] = RTE_ETH_QUEUE_STATE_STARTED; ++ for (q = 0; q < eth_dev->data->nb_tx_queues; q++) ++ eth_dev->data->tx_queue_state[q] = RTE_ETH_QUEUE_STATE_STARTED; ++ + return 0; + } + +@@ -84,9 +89,15 @@ static int + otx_ep_dev_stop(struct rte_eth_dev *eth_dev) + { + struct otx_ep_device *otx_epvf = OTX_EP_DEV(eth_dev); ++ uint16_t i; + + otx_epvf->fn_list.disable_io_queues(otx_epvf); + ++ for (i = 0; i < eth_dev->data->nb_rx_queues; i++) ++ eth_dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; ++ for (i = 0; i < eth_dev->data->nb_tx_queues; i++) ++ eth_dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; ++ + return 0; + } + +diff --git a/dpdk/drivers/net/octeontx/octeontx_ethdev.c b/dpdk/drivers/net/octeontx/octeontx_ethdev.c +index d52a3e73d5..2b97f0163e 100644 +--- a/dpdk/drivers/net/octeontx/octeontx_ethdev.c ++++ b/dpdk/drivers/net/octeontx/octeontx_ethdev.c +@@ -732,6 +732,11 @@ octeontx_dev_start(struct rte_eth_dev *dev) + } + + /* Success */ ++ for (i = 0; i < dev->data->nb_rx_queues; i++) ++ dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; ++ for (i = 0; i < dev->data->nb_tx_queues; i++) ++ dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; ++ + return ret; + + pki_port_stop_error: +@@ -746,6 +751,7 @@ static int + octeontx_dev_stop(struct rte_eth_dev *dev) + { + struct octeontx_nic *nic = octeontx_pmd_priv(dev); ++ uint16_t i; + int ret; + + PMD_INIT_FUNC_TRACE(); +@@ -772,6 +778,11 @@ octeontx_dev_stop(struct rte_eth_dev *dev) + return ret; + } + ++ for (i = 0; i < dev->data->nb_rx_queues; i++) ++ dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; ++ for (i = 0; i < dev->data->nb_tx_queues; i++) ++ dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; ++ + return 0; + } + +diff --git a/dpdk/drivers/net/pfe/pfe_ethdev.c b/dpdk/drivers/net/pfe/pfe_ethdev.c +index 0352a57950..551f3cf193 100644 +--- a/dpdk/drivers/net/pfe/pfe_ethdev.c ++++ b/dpdk/drivers/net/pfe/pfe_ethdev.c +@@ -241,6 +241,7 @@ pfe_eth_open(struct rte_eth_dev *dev) + struct pfe_eth_priv_s *priv = dev->data->dev_private; + struct hif_client_s *client; + struct hif_shm *hif_shm; ++ uint16_t i; + int rc; + + /* Register client driver with HIF */ +@@ -318,6 +319,10 @@ pfe_eth_open(struct rte_eth_dev *dev) + PFE_PMD_INFO("PFE INTERRUPT Mode enabled"); + } + ++ for (i = 0; i < dev->data->nb_rx_queues; i++) ++ dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; ++ for (i = 0; i < dev->data->nb_tx_queues; i++) ++ dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; + + err0: + return rc; +@@ -361,6 +366,7 @@ static int + pfe_eth_stop(struct rte_eth_dev *dev/*, int wake*/) + { + struct pfe_eth_priv_s *priv = dev->data->dev_private; ++ uint16_t i; + + dev->data->dev_started = 0; + +@@ -370,6 +376,11 @@ pfe_eth_stop(struct rte_eth_dev *dev/*, int wake*/) + dev->rx_pkt_burst = rte_eth_pkt_burst_dummy; + dev->tx_pkt_burst = rte_eth_pkt_burst_dummy; + ++ for (i = 0; i < dev->data->nb_rx_queues; i++) ++ dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; ++ for (i = 0; i < dev->data->nb_tx_queues; i++) ++ dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; ++ + return 0; + } + diff --git a/dpdk/drivers/net/qede/qede_ethdev.c b/dpdk/drivers/net/qede/qede_ethdev.c index a4923670d6..22cd470646 100644 --- a/dpdk/drivers/net/qede/qede_ethdev.c @@ -30079,6 +42542,41 @@ index a4923670d6..22cd470646 100644 rss_params.update_rss_config = 1; /* tbl_size has to be set with capabilities */ rss_params.rss_table_size_log = 7; +diff --git a/dpdk/drivers/net/ring/rte_eth_ring.c b/dpdk/drivers/net/ring/rte_eth_ring.c +index bd5a47dd90..80f1859403 100644 +--- a/dpdk/drivers/net/ring/rte_eth_ring.c ++++ b/dpdk/drivers/net/ring/rte_eth_ring.c +@@ -111,15 +111,30 @@ eth_dev_start(struct rte_eth_dev *dev) + static int + eth_dev_stop(struct rte_eth_dev *dev) + { ++ uint16_t i; ++ + dev->data->dev_started = 0; + dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN; ++ ++ for (i = 0; i < dev->data->nb_rx_queues; i++) ++ dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; ++ for (i = 0; i < dev->data->nb_tx_queues; i++) ++ dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; + return 0; + } + + static int + eth_dev_set_link_down(struct rte_eth_dev *dev) + { ++ uint16_t i; ++ + dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN; ++ ++ for (i = 0; i < dev->data->nb_rx_queues; i++) ++ dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; ++ for (i = 0; i < dev->data->nb_tx_queues; i++) ++ dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; ++ + return 0; + } + diff --git a/dpdk/drivers/net/sfc/sfc_dp_rx.h b/dpdk/drivers/net/sfc/sfc_dp_rx.h index 246adbd87c..8a504bdcf1 100644 --- a/dpdk/drivers/net/sfc/sfc_dp_rx.h @@ -30128,11 +42626,49 @@ index 16cd8524d3..37b754fa33 100644 rxq->prefix_size = pinfo->erpl_length; rxq->rearm_data = sfc_ef100_mk_mbuf_rearm_data(rxq->dp.dpq.port_id, +diff --git a/dpdk/drivers/net/sfc/sfc_ef100_tx.c b/dpdk/drivers/net/sfc/sfc_ef100_tx.c +index 4c2205f7a4..1b6374775f 100644 +--- a/dpdk/drivers/net/sfc/sfc_ef100_tx.c ++++ b/dpdk/drivers/net/sfc/sfc_ef100_tx.c +@@ -405,7 +405,7 @@ sfc_ef100_tx_qdesc_send_create(const struct sfc_ef100_txq *txq, + m->l2_len + m->l3_len) >> 1; + } + +- rc = sfc_ef100_tx_map(txq, rte_mbuf_data_iova_default(m), ++ rc = sfc_ef100_tx_map(txq, rte_mbuf_data_iova(m), + rte_pktmbuf_data_len(m), &dma_addr); + if (unlikely(rc != 0)) + return rc; +diff --git a/dpdk/drivers/net/sfc/sfc_ethdev.c b/dpdk/drivers/net/sfc/sfc_ethdev.c +index 2ec743ebce..170ee57931 100644 +--- a/dpdk/drivers/net/sfc/sfc_ethdev.c ++++ b/dpdk/drivers/net/sfc/sfc_ethdev.c +@@ -2055,7 +2055,7 @@ sfc_process_mport_journal_cb(void *data, efx_mport_desc_t *mport, + struct sfc_mport_journal_ctx *ctx = data; + + if (ctx == NULL || ctx->sa == NULL) { +- sfc_err(ctx->sa, "received NULL context or SFC adapter"); ++ SFC_GENERIC_LOG(ERR, "received NULL context or SFC adapter"); + return EINVAL; + } + diff --git a/dpdk/drivers/net/sfc/sfc_mae.c b/dpdk/drivers/net/sfc/sfc_mae.c -index 421bb6da95..c7d28eae71 100644 +index 421bb6da95..b61b9658e3 100644 --- a/dpdk/drivers/net/sfc/sfc_mae.c +++ b/dpdk/drivers/net/sfc/sfc_mae.c -@@ -1180,6 +1180,8 @@ sfc_mae_action_set_disable(struct sfc_adapter *sa, +@@ -281,8 +281,10 @@ sfc_mae_attach(struct sfc_adapter *sa) + bounce_eh->buf_size = limits.eml_encap_header_size_limit; + bounce_eh->buf = rte_malloc("sfc_mae_bounce_eh", + bounce_eh->buf_size, 0); +- if (bounce_eh->buf == NULL) ++ if (bounce_eh->buf == NULL) { ++ rc = ENOMEM; + goto fail_mae_alloc_bounce_eh; ++ } + + mae->nb_outer_rule_prios_max = limits.eml_max_n_outer_prios; + mae->nb_action_rule_prios_max = limits.eml_max_n_action_prios; +@@ -1180,6 +1182,8 @@ sfc_mae_action_set_disable(struct sfc_adapter *sa, } if (fw_rsrc->refcnt == 1) { @@ -30141,7 +42677,7 @@ index 421bb6da95..c7d28eae71 100644 rc = efx_mae_action_set_free(sa->nic, &fw_rsrc->aset_id); if (rc == 0) { sfc_dbg(sa, "disabled action_set=%p with AS_ID=0x%08x", -@@ -3896,12 +3898,10 @@ sfc_mae_rule_parse_actions(struct sfc_adapter *sa, +@@ -3896,12 +3900,10 @@ sfc_mae_rule_parse_actions(struct sfc_adapter *sa, break; case SFC_FT_RULE_SWITCH: /* @@ -30157,7 +42693,7 @@ index 421bb6da95..c7d28eae71 100644 ctx.ft_switch_hit_counter = &spec_mae->ft_ctx->switch_hit_counter; -@@ -3910,8 +3910,25 @@ sfc_mae_rule_parse_actions(struct sfc_adapter *sa, +@@ -3910,8 +3912,25 @@ sfc_mae_rule_parse_actions(struct sfc_adapter *sa, SFC_ASSERT(B_FALSE); } @@ -30183,7 +42719,7 @@ index 421bb6da95..c7d28eae71 100644 sfc_mae_encap_header_del(sa, ctx.encap_header); efx_mae_action_set_spec_fini(sa->nic, ctx.spec); return 0; -@@ -3924,6 +3941,7 @@ sfc_mae_rule_parse_actions(struct sfc_adapter *sa, +@@ -3924,6 +3943,7 @@ sfc_mae_rule_parse_actions(struct sfc_adapter *sa, return 0; fail_action_set_add: @@ -30192,7 +42728,7 @@ index 421bb6da95..c7d28eae71 100644 fail_nb_count: sfc_mae_encap_header_del(sa, ctx.encap_header); diff --git a/dpdk/drivers/net/sfc/sfc_repr.c b/dpdk/drivers/net/sfc/sfc_repr.c -index 417d0073cb..919048e278 100644 +index 417d0073cb..79025e9052 100644 --- a/dpdk/drivers/net/sfc/sfc_repr.c +++ b/dpdk/drivers/net/sfc/sfc_repr.c @@ -9,6 +9,8 @@ @@ -30204,7 +42740,55 @@ index 417d0073cb..919048e278 100644 #include #include #include -@@ -834,6 +836,8 @@ sfc_repr_dev_close(struct rte_eth_dev *dev) +@@ -289,6 +291,7 @@ static int + sfc_repr_dev_start(struct rte_eth_dev *dev) + { + struct sfc_repr *sr = sfc_repr_by_eth_dev(dev); ++ uint16_t i; + int ret; + + sfcr_info(sr, "entry"); +@@ -300,6 +303,11 @@ sfc_repr_dev_start(struct rte_eth_dev *dev) + if (ret != 0) + goto fail_start; + ++ for (i = 0; i < dev->data->nb_rx_queues; i++) ++ dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; ++ for (i = 0; i < dev->data->nb_tx_queues; i++) ++ dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; ++ + sfcr_info(sr, "done"); + + return 0; +@@ -364,6 +372,7 @@ static int + sfc_repr_dev_stop(struct rte_eth_dev *dev) + { + struct sfc_repr *sr = sfc_repr_by_eth_dev(dev); ++ uint16_t i; + int ret; + + sfcr_info(sr, "entry"); +@@ -378,6 +387,11 @@ sfc_repr_dev_stop(struct rte_eth_dev *dev) + + sfc_repr_unlock(sr); + ++ for (i = 0; i < dev->data->nb_rx_queues; i++) ++ dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; ++ for (i = 0; i < dev->data->nb_tx_queues; i++) ++ dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; ++ + sfcr_info(sr, "done"); + + return 0; +@@ -528,6 +542,7 @@ sfc_repr_dev_infos_get(struct rte_eth_dev *dev, + + dev_info->device = dev->device; + ++ dev_info->max_rx_pktlen = EFX_MAC_PDU_MAX; + dev_info->max_rx_queues = SFC_REPR_RXQ_MAX; + dev_info->max_tx_queues = SFC_REPR_TXQ_MAX; + dev_info->default_rxconf.rx_drop_en = 1; +@@ -834,6 +849,8 @@ sfc_repr_dev_close(struct rte_eth_dev *dev) (void)sfc_repr_proxy_del_port(srs->pf_port_id, srs->repr_id); @@ -30213,7 +42797,7 @@ index 417d0073cb..919048e278 100644 dev->rx_pkt_burst = NULL; dev->tx_pkt_burst = NULL; dev->dev_ops = NULL; -@@ -888,6 +892,29 @@ sfc_repr_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) +@@ -888,6 +905,29 @@ sfc_repr_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) return 0; } @@ -30243,7 +42827,7 @@ index 417d0073cb..919048e278 100644 static const struct eth_dev_ops sfc_repr_dev_ops = { .dev_configure = sfc_repr_dev_configure, .dev_start = sfc_repr_dev_start, -@@ -901,6 +928,7 @@ static const struct eth_dev_ops sfc_repr_dev_ops = { +@@ -901,6 +941,7 @@ static const struct eth_dev_ops sfc_repr_dev_ops = { .rx_queue_release = sfc_repr_rx_queue_release, .tx_queue_setup = sfc_repr_tx_queue_setup, .tx_queue_release = sfc_repr_tx_queue_release, @@ -30295,11 +42879,131 @@ index 5c10e8fc74..8f1ee97fa8 100644 rte_spinlock_unlock(&sfc_mae_switch.lock); return 0; } +diff --git a/dpdk/drivers/net/softnic/rte_eth_softnic.c b/dpdk/drivers/net/softnic/rte_eth_softnic.c +index bcf6664460..1b90cf7a21 100644 +--- a/dpdk/drivers/net/softnic/rte_eth_softnic.c ++++ b/dpdk/drivers/net/softnic/rte_eth_softnic.c +@@ -134,6 +134,7 @@ pmd_dev_start(struct rte_eth_dev *dev) + { + struct pmd_internals *p = dev->data->dev_private; + int status; ++ uint16_t i; + + /* Firmware */ + status = softnic_cli_script_process(p, +@@ -146,6 +147,11 @@ pmd_dev_start(struct rte_eth_dev *dev) + /* Link UP */ + dev->data->dev_link.link_status = RTE_ETH_LINK_UP; + ++ for (i = 0; i < dev->data->nb_rx_queues; i++) ++ dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; ++ for (i = 0; i < dev->data->nb_tx_queues; i++) ++ dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; ++ + return 0; + } + +@@ -153,6 +159,7 @@ static int + pmd_dev_stop(struct rte_eth_dev *dev) + { + struct pmd_internals *p = dev->data->dev_private; ++ uint16_t i; + + /* Link DOWN */ + dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN; +@@ -163,6 +170,11 @@ pmd_dev_stop(struct rte_eth_dev *dev) + softnic_softnic_swq_free_keep_rxq_txq(p); + softnic_mempool_free(p); + ++ for (i = 0; i < dev->data->nb_rx_queues; i++) ++ dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; ++ for (i = 0; i < dev->data->nb_tx_queues; i++) ++ dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; ++ + return 0; + } + diff --git a/dpdk/drivers/net/tap/rte_eth_tap.c b/dpdk/drivers/net/tap/rte_eth_tap.c -index f2a6c33a19..66595f8312 100644 +index f2a6c33a19..2fd46c6b0b 100644 --- a/dpdk/drivers/net/tap/rte_eth_tap.c +++ b/dpdk/drivers/net/tap/rte_eth_tap.c -@@ -2303,8 +2303,8 @@ set_mac_type(const char *key __rte_unused, +@@ -559,7 +559,7 @@ tap_tx_l3_cksum(char *packet, uint64_t ol_flags, unsigned int l2_len, + { + void *l3_hdr = packet + l2_len; + +- if (ol_flags & (RTE_MBUF_F_TX_IP_CKSUM | RTE_MBUF_F_TX_IPV4)) { ++ if (ol_flags & RTE_MBUF_F_TX_IP_CKSUM) { + struct rte_ipv4_hdr *iph = l3_hdr; + uint16_t cksum; + +@@ -642,16 +642,25 @@ tap_write_mbufs(struct tx_queue *txq, uint16_t num_mbufs, + + nb_segs = mbuf->nb_segs; + if (txq->csum && +- ((mbuf->ol_flags & (RTE_MBUF_F_TX_IP_CKSUM | RTE_MBUF_F_TX_IPV4) || ++ ((mbuf->ol_flags & RTE_MBUF_F_TX_IP_CKSUM || + (mbuf->ol_flags & RTE_MBUF_F_TX_L4_MASK) == RTE_MBUF_F_TX_UDP_CKSUM || + (mbuf->ol_flags & RTE_MBUF_F_TX_L4_MASK) == RTE_MBUF_F_TX_TCP_CKSUM))) { ++ unsigned int l4_len = 0; ++ + is_cksum = 1; + ++ if ((mbuf->ol_flags & RTE_MBUF_F_TX_L4_MASK) == ++ RTE_MBUF_F_TX_UDP_CKSUM) ++ l4_len = sizeof(struct rte_udp_hdr); ++ else if ((mbuf->ol_flags & RTE_MBUF_F_TX_L4_MASK) == ++ RTE_MBUF_F_TX_TCP_CKSUM) ++ l4_len = sizeof(struct rte_tcp_hdr); ++ + /* Support only packets with at least layer 4 + * header included in the first segment + */ + seg_len = rte_pktmbuf_data_len(mbuf); +- l234_hlen = mbuf->l2_len + mbuf->l3_len + mbuf->l4_len; ++ l234_hlen = mbuf->l2_len + mbuf->l3_len + l4_len; + if (seg_len < l234_hlen) + return -1; + +@@ -661,7 +670,7 @@ tap_write_mbufs(struct tx_queue *txq, uint16_t num_mbufs, + rte_memcpy(m_copy, rte_pktmbuf_mtod(mbuf, void *), + l234_hlen); + tap_tx_l3_cksum(m_copy, mbuf->ol_flags, +- mbuf->l2_len, mbuf->l3_len, mbuf->l4_len, ++ mbuf->l2_len, mbuf->l3_len, l4_len, + &l4_cksum, &l4_phdr_cksum, + &l4_raw_cksum); + iovecs[k].iov_base = m_copy; +@@ -2267,29 +2276,6 @@ set_remote_iface(const char *key __rte_unused, + return 0; + } + +-static int parse_user_mac(struct rte_ether_addr *user_mac, +- const char *value) +-{ +- unsigned int index = 0; +- char mac_temp[strlen(ETH_TAP_USR_MAC_FMT) + 1], *mac_byte = NULL; +- +- if (user_mac == NULL || value == NULL) +- return 0; +- +- strlcpy(mac_temp, value, sizeof(mac_temp)); +- mac_byte = strtok(mac_temp, ":"); +- +- while ((mac_byte != NULL) && +- (strlen(mac_byte) <= 2) && +- (strlen(mac_byte) == strspn(mac_byte, +- ETH_TAP_CMP_MAC_FMT))) { +- user_mac->addr_bytes[index++] = strtoul(mac_byte, NULL, 16); +- mac_byte = strtok(NULL, ":"); +- } +- +- return index; +-} +- + static int + set_mac_type(const char *key __rte_unused, + const char *value, +@@ -2303,15 +2289,15 @@ set_mac_type(const char *key __rte_unused, if (!strncasecmp(ETH_TAP_MAC_FIXED, value, strlen(ETH_TAP_MAC_FIXED))) { static int iface_idx; @@ -30310,6 +43014,3275 @@ index f2a6c33a19..66595f8312 100644 RTE_ETHER_ADDR_LEN); user_mac->addr_bytes[RTE_ETHER_ADDR_LEN - 1] = iface_idx++ + '0'; + goto success; + } + +- if (parse_user_mac(user_mac, value) != 6) ++ if (rte_ether_unformat_addr(value, user_mac) < 0) + goto error; + success: + TAP_LOG(DEBUG, "TAP user MAC param (%s)", value); +diff --git a/dpdk/drivers/net/tap/tap_bpf_insns.h b/dpdk/drivers/net/tap/tap_bpf_insns.h +index 1a91bbad13..53fa76c4e6 100644 +--- a/dpdk/drivers/net/tap/tap_bpf_insns.h ++++ b/dpdk/drivers/net/tap/tap_bpf_insns.h +@@ -1,10 +1,10 @@ + /* SPDX-License-Identifier: BSD-3-Clause +- * Copyright 2017 Mellanox Technologies, Ltd ++ * Auto-generated from tap_bpf_program.c ++ * This not the original source file. Do NOT edit it. + */ + + #include + +-/* bpf_insn array matching cls_q section. See tap_bpf_program.c file */ + static struct bpf_insn cls_q_insns[] = { + {0x61, 2, 1, 52, 0x00000000}, + {0x18, 3, 0, 0, 0xdeadbeef}, +@@ -23,18 +23,17 @@ static struct bpf_insn cls_q_insns[] = { + {0x95, 0, 0, 0, 0x00000000}, + }; + +-/* bpf_insn array matching l3_l4 section. see tap_bpf_program.c file */ + static struct bpf_insn l3_l4_hash_insns[] = { + {0xbf, 7, 1, 0, 0x00000000}, +- {0x61, 8, 7, 16, 0x00000000}, +- {0x61, 6, 7, 76, 0x00000000}, ++ {0x61, 6, 7, 16, 0x00000000}, ++ {0x61, 8, 7, 76, 0x00000000}, + {0x61, 9, 7, 80, 0x00000000}, + {0x18, 1, 0, 0, 0xdeadbeef}, + {0x00, 0, 0, 0, 0x00000000}, + {0x63, 10, 1, -4, 0x00000000}, + {0xbf, 2, 10, 0, 0x00000000}, + {0x07, 2, 0, 0, 0xfffffffc}, +- {0x18, 1, 1, 0, 0x0000cafe}, ++ {0x18, 1, 0, 0, 0x00000000}, + {0x00, 0, 0, 0, 0x00000000}, + {0x85, 0, 0, 0, 0x00000001}, + {0x55, 0, 0, 21, 0x00000000}, +@@ -58,7 +57,7 @@ static struct bpf_insn l3_l4_hash_insns[] = { + {0x07, 1, 0, 0, 0xffffffd0}, + {0xb7, 2, 0, 0, 0x00000023}, + {0x85, 0, 0, 0, 0x00000006}, +- {0x05, 0, 0, 1632, 0x00000000}, ++ {0x05, 0, 0, 1680, 0x00000000}, + {0xb7, 1, 0, 0, 0x0000000e}, + {0x61, 2, 7, 20, 0x00000000}, + {0x15, 2, 0, 10, 0x00000000}, +@@ -66,1630 +65,1678 @@ static struct bpf_insn l3_l4_hash_insns[] = { + {0x55, 2, 0, 8, 0x0000a888}, + {0xbf, 2, 7, 0, 0x00000000}, + {0xb7, 7, 0, 0, 0x00000000}, +- {0xbf, 1, 6, 0, 0x00000000}, ++ {0xbf, 1, 8, 0, 0x00000000}, + {0x07, 1, 0, 0, 0x00000012}, +- {0x2d, 1, 9, 1622, 0x00000000}, ++ {0x2d, 1, 9, 1670, 0x00000000}, + {0xb7, 1, 0, 0, 0x00000012}, +- {0x69, 8, 6, 16, 0x00000000}, ++ {0x69, 6, 8, 16, 0x00000000}, + {0xbf, 7, 2, 0, 0x00000000}, ++ {0x57, 6, 0, 0, 0x0000ffff}, + {0x7b, 10, 7, -56, 0x00000000}, +- {0x57, 8, 0, 0, 0x0000ffff}, +- {0x15, 8, 0, 409, 0x0000dd86}, ++ {0x15, 6, 0, 443, 0x0000dd86}, + {0xb7, 7, 0, 0, 0x00000003}, +- {0x55, 8, 0, 1614, 0x00000008}, +- {0x0f, 6, 1, 0, 0x00000000}, ++ {0x55, 6, 0, 1662, 0x00000008}, ++ {0x0f, 8, 1, 0, 0x00000000}, + {0xb7, 7, 0, 0, 0x00000000}, +- {0xbf, 1, 6, 0, 0x00000000}, ++ {0xbf, 1, 8, 0, 0x00000000}, + {0x07, 1, 0, 0, 0x00000018}, +- {0x2d, 1, 9, 1609, 0x00000000}, +- {0x71, 3, 6, 12, 0x00000000}, +- {0xbf, 1, 3, 0, 0x00000000}, +- {0x67, 1, 0, 0, 0x00000038}, +- {0xc7, 1, 0, 0, 0x00000020}, +- {0x77, 1, 0, 0, 0x0000001f}, +- {0x57, 1, 0, 0, 0x2cc681d1}, +- {0x67, 3, 0, 0, 0x00000018}, ++ {0x2d, 1, 9, 1657, 0x00000000}, ++ {0xb7, 1, 0, 0, 0x00000000}, ++ {0x71, 3, 8, 12, 0x00000000}, ++ {0x71, 2, 8, 9, 0x00000000}, ++ {0x15, 2, 0, 1, 0x00000011}, ++ {0x55, 2, 0, 21, 0x00000006}, ++ {0x71, 2, 8, 7, 0x00000000}, ++ {0x71, 4, 8, 6, 0x00000000}, ++ {0xbf, 5, 4, 0, 0x00000000}, ++ {0x67, 5, 0, 0, 0x00000008}, ++ {0x57, 5, 0, 0, 0x00001f00}, ++ {0x4f, 5, 2, 0, 0x00000000}, ++ {0x57, 4, 0, 0, 0x00000020}, ++ {0x4f, 4, 5, 0, 0x00000000}, ++ {0x55, 4, 0, 12, 0x00000000}, ++ {0xbf, 2, 8, 0, 0x00000000}, ++ {0x07, 2, 0, 0, 0x00000014}, ++ {0x71, 4, 2, 0, 0x00000000}, ++ {0x67, 4, 0, 0, 0x00000018}, ++ {0x71, 1, 2, 1, 0x00000000}, ++ {0x67, 1, 0, 0, 0x00000010}, ++ {0x4f, 1, 4, 0, 0x00000000}, ++ {0x71, 4, 2, 3, 0x00000000}, ++ {0x4f, 1, 4, 0, 0x00000000}, ++ {0x71, 2, 2, 2, 0x00000000}, ++ {0x67, 2, 0, 0, 0x00000008}, ++ {0x4f, 1, 2, 0, 0x00000000}, + {0xbf, 4, 3, 0, 0x00000000}, +- {0x57, 4, 0, 0, 0x40000000}, ++ {0x67, 4, 0, 0, 0x00000038}, ++ {0xc7, 4, 0, 0, 0x00000038}, ++ {0xb7, 2, 0, 0, 0x00000000}, ++ {0x65, 4, 0, 1, 0xffffffff}, ++ {0xb7, 7, 0, 0, 0x2cc681d1}, ++ {0xbf, 4, 3, 0, 0x00000000}, ++ {0x57, 4, 0, 0, 0x00000040}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0x598d03a2}, ++ {0xbf, 4, 3, 0, 0x00000000}, ++ {0x57, 4, 0, 0, 0x00000020}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0xb31a0745}, ++ {0xbf, 4, 3, 0, 0x00000000}, ++ {0x57, 4, 0, 0, 0x00000010}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0x66340e8a}, ++ {0xbf, 4, 3, 0, 0x00000000}, ++ {0x57, 4, 0, 0, 0x00000008}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0xcc681d15}, ++ {0xbf, 4, 3, 0, 0x00000000}, ++ {0x57, 4, 0, 0, 0x00000004}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0x98d03a2b}, ++ {0xbf, 4, 3, 0, 0x00000000}, ++ {0x57, 4, 0, 0, 0x00000002}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0x31a07456}, ++ {0x71, 4, 8, 13, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00000001}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0x6340e8ad}, ++ {0xbf, 3, 4, 0, 0x00000000}, ++ {0x67, 3, 0, 0, 0x00000038}, ++ {0xc7, 3, 0, 0, 0x00000038}, ++ {0xbf, 5, 7, 0, 0x00000000}, ++ {0xa7, 5, 0, 0, 0xc681d15b}, ++ {0x6d, 2, 3, 1, 0x00000000}, ++ {0xbf, 5, 7, 0, 0x00000000}, ++ {0xbf, 3, 4, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00000040}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0x8d03a2b7}, ++ {0xbf, 3, 4, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00000020}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0x1a07456f}, ++ {0xbf, 3, 4, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00000010}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0x340e8ade}, ++ {0xbf, 3, 4, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00000008}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0x681d15bd}, ++ {0xbf, 3, 4, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00000004}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0xd03a2b7b}, ++ {0xbf, 3, 4, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00000002}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0xa07456f6}, ++ {0x71, 3, 8, 14, 0x00000000}, ++ {0x57, 4, 0, 0, 0x00000001}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0x40e8aded}, ++ {0xbf, 4, 3, 0, 0x00000000}, ++ {0x67, 4, 0, 0, 0x00000038}, ++ {0xc7, 4, 0, 0, 0x00000038}, ++ {0xbf, 7, 5, 0, 0x00000000}, ++ {0xa7, 7, 0, 0, 0x81d15bdb}, ++ {0x6d, 2, 4, 1, 0x00000000}, ++ {0xbf, 7, 5, 0, 0x00000000}, ++ {0xbf, 4, 3, 0, 0x00000000}, ++ {0x57, 4, 0, 0, 0x00000040}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0x03a2b7b7}, ++ {0xbf, 4, 3, 0, 0x00000000}, ++ {0x57, 4, 0, 0, 0x00000020}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0x07456f6f}, ++ {0xbf, 4, 3, 0, 0x00000000}, ++ {0x57, 4, 0, 0, 0x00000010}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0x0e8adedf}, ++ {0xbf, 4, 3, 0, 0x00000000}, ++ {0x57, 4, 0, 0, 0x00000008}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0x1d15bdbf}, ++ {0xbf, 4, 3, 0, 0x00000000}, ++ {0x57, 4, 0, 0, 0x00000004}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0x3a2b7b7e}, ++ {0xbf, 4, 3, 0, 0x00000000}, ++ {0x57, 4, 0, 0, 0x00000002}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0x7456f6fd}, ++ {0x71, 4, 8, 15, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00000001}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0xe8adedfa}, ++ {0xbf, 3, 4, 0, 0x00000000}, ++ {0x67, 3, 0, 0, 0x00000038}, ++ {0xc7, 3, 0, 0, 0x00000038}, ++ {0xbf, 5, 7, 0, 0x00000000}, ++ {0xa7, 5, 0, 0, 0xd15bdbf4}, ++ {0x6d, 2, 3, 1, 0x00000000}, ++ {0xbf, 5, 7, 0, 0x00000000}, ++ {0xbf, 3, 4, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00000040}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0xa2b7b7e9}, ++ {0xbf, 3, 4, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00000020}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0x456f6fd3}, ++ {0xbf, 3, 4, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00000010}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0x8adedfa7}, ++ {0xbf, 3, 4, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00000008}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0x15bdbf4f}, ++ {0xbf, 3, 4, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00000004}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0x2b7b7e9e}, ++ {0xbf, 3, 4, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00000002}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0x56f6fd3d}, ++ {0x71, 3, 8, 16, 0x00000000}, ++ {0x57, 4, 0, 0, 0x00000001}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0xadedfa7b}, ++ {0xbf, 4, 3, 0, 0x00000000}, ++ {0x67, 4, 0, 0, 0x00000038}, ++ {0xc7, 4, 0, 0, 0x00000038}, ++ {0xbf, 7, 5, 0, 0x00000000}, ++ {0xa7, 7, 0, 0, 0x5bdbf4f7}, ++ {0x6d, 2, 4, 1, 0x00000000}, ++ {0xbf, 7, 5, 0, 0x00000000}, ++ {0xbf, 4, 3, 0, 0x00000000}, ++ {0x57, 4, 0, 0, 0x00000040}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0xb7b7e9ef}, ++ {0xbf, 4, 3, 0, 0x00000000}, ++ {0x57, 4, 0, 0, 0x00000020}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0x6f6fd3df}, ++ {0xbf, 4, 3, 0, 0x00000000}, ++ {0x57, 4, 0, 0, 0x00000010}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0xdedfa7bf}, ++ {0xbf, 4, 3, 0, 0x00000000}, ++ {0x57, 4, 0, 0, 0x00000008}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0xbdbf4f7f}, ++ {0xbf, 4, 3, 0, 0x00000000}, ++ {0x57, 4, 0, 0, 0x00000004}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0x7b7e9eff}, ++ {0xbf, 4, 3, 0, 0x00000000}, ++ {0x57, 4, 0, 0, 0x00000002}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0xf6fd3dff}, ++ {0x71, 4, 8, 17, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00000001}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0xedfa7bfe}, ++ {0xbf, 3, 4, 0, 0x00000000}, ++ {0x67, 3, 0, 0, 0x00000038}, ++ {0xc7, 3, 0, 0, 0x00000038}, ++ {0xbf, 5, 7, 0, 0x00000000}, ++ {0xa7, 5, 0, 0, 0xdbf4f7fc}, ++ {0x6d, 2, 3, 1, 0x00000000}, ++ {0xbf, 5, 7, 0, 0x00000000}, ++ {0xbf, 3, 4, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00000040}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0xb7e9eff9}, ++ {0xbf, 3, 4, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00000020}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0x6fd3dff2}, ++ {0xbf, 3, 4, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00000010}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0xdfa7bfe5}, ++ {0xbf, 3, 4, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00000008}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0xbf4f7fca}, ++ {0xbf, 3, 4, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00000004}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0x7e9eff94}, ++ {0xbf, 3, 4, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00000002}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0xfd3dff28}, ++ {0x71, 3, 8, 18, 0x00000000}, ++ {0x57, 4, 0, 0, 0x00000001}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0xfa7bfe51}, ++ {0xbf, 6, 3, 0, 0x00000000}, ++ {0x67, 6, 0, 0, 0x00000038}, ++ {0xc7, 6, 0, 0, 0x00000038}, ++ {0xbf, 4, 5, 0, 0x00000000}, ++ {0xa7, 4, 0, 0, 0xf4f7fca2}, ++ {0x6d, 2, 6, 1, 0x00000000}, ++ {0xbf, 4, 5, 0, 0x00000000}, ++ {0xbf, 5, 3, 0, 0x00000000}, ++ {0x57, 5, 0, 0, 0x00000040}, ++ {0x15, 5, 0, 1, 0x00000000}, ++ {0xa7, 4, 0, 0, 0xe9eff945}, ++ {0xbf, 5, 3, 0, 0x00000000}, ++ {0x57, 5, 0, 0, 0x00000020}, ++ {0x15, 5, 0, 1, 0x00000000}, ++ {0xa7, 4, 0, 0, 0xd3dff28a}, ++ {0xbf, 5, 3, 0, 0x00000000}, ++ {0x57, 5, 0, 0, 0x00000010}, ++ {0x15, 5, 0, 1, 0x00000000}, ++ {0xa7, 4, 0, 0, 0xa7bfe514}, ++ {0xbf, 5, 3, 0, 0x00000000}, ++ {0x57, 5, 0, 0, 0x00000008}, ++ {0x15, 5, 0, 1, 0x00000000}, ++ {0xa7, 4, 0, 0, 0x4f7fca28}, ++ {0xbf, 5, 3, 0, 0x00000000}, ++ {0x57, 5, 0, 0, 0x00000004}, ++ {0x15, 5, 0, 1, 0x00000000}, ++ {0xa7, 4, 0, 0, 0x9eff9450}, ++ {0xbf, 5, 3, 0, 0x00000000}, ++ {0x57, 5, 0, 0, 0x00000002}, ++ {0x15, 5, 0, 1, 0x00000000}, ++ {0xa7, 4, 0, 0, 0x3dff28a0}, ++ {0x71, 5, 8, 19, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00000001}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 4, 0, 0, 0x7bfe5141}, ++ {0xbf, 3, 5, 0, 0x00000000}, ++ {0x67, 3, 0, 0, 0x00000038}, ++ {0xc7, 3, 0, 0, 0x00000038}, ++ {0xbf, 7, 4, 0, 0x00000000}, ++ {0xa7, 7, 0, 0, 0xf7fca283}, ++ {0x6d, 2, 3, 1, 0x00000000}, ++ {0xbf, 7, 4, 0, 0x00000000}, ++ {0xbf, 3, 5, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00000040}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0xeff94506}, ++ {0xbf, 3, 5, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00000020}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0xdff28a0c}, ++ {0xbf, 3, 5, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00000010}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0xbfe51418}, ++ {0xbf, 3, 5, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00000008}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0x7fca2831}, ++ {0xbf, 3, 5, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00000004}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0xff945063}, ++ {0xbf, 3, 5, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00000002}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0xff28a0c6}, ++ {0x57, 5, 0, 0, 0x00000001}, ++ {0x15, 5, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0xfe51418c}, ++ {0xbf, 4, 1, 0, 0x00000000}, ++ {0x67, 4, 0, 0, 0x00000020}, ++ {0xc7, 4, 0, 0, 0x00000020}, ++ {0xbf, 3, 7, 0, 0x00000000}, ++ {0xa7, 3, 0, 0, 0xfca28319}, ++ {0x6d, 2, 4, 1, 0x00000000}, ++ {0xbf, 3, 7, 0, 0x00000000}, ++ {0xbf, 2, 1, 0, 0x00000000}, ++ {0x57, 2, 0, 0, 0x40000000}, ++ {0x15, 2, 0, 1, 0x00000000}, ++ {0xa7, 3, 0, 0, 0xf9450633}, ++ {0xbf, 2, 1, 0, 0x00000000}, ++ {0x57, 2, 0, 0, 0x20000000}, ++ {0x79, 6, 10, -56, 0x00000000}, ++ {0x15, 2, 0, 1, 0x00000000}, ++ {0xa7, 3, 0, 0, 0xf28a0c67}, ++ {0xbf, 2, 1, 0, 0x00000000}, ++ {0x57, 2, 0, 0, 0x10000000}, ++ {0x15, 2, 0, 1, 0x00000000}, ++ {0xa7, 3, 0, 0, 0xe51418ce}, ++ {0xbf, 2, 1, 0, 0x00000000}, ++ {0x57, 2, 0, 0, 0x08000000}, ++ {0x15, 2, 0, 1, 0x00000000}, ++ {0xa7, 3, 0, 0, 0xca28319d}, ++ {0xbf, 2, 1, 0, 0x00000000}, ++ {0x57, 2, 0, 0, 0x04000000}, ++ {0x15, 2, 0, 1, 0x00000000}, ++ {0xa7, 3, 0, 0, 0x9450633b}, ++ {0xbf, 2, 1, 0, 0x00000000}, ++ {0x57, 2, 0, 0, 0x02000000}, ++ {0x15, 2, 0, 1, 0x00000000}, ++ {0xa7, 3, 0, 0, 0x28a0c676}, ++ {0xbf, 2, 1, 0, 0x00000000}, ++ {0x57, 2, 0, 0, 0x01000000}, ++ {0x15, 2, 0, 1, 0x00000000}, ++ {0xa7, 3, 0, 0, 0x51418ced}, ++ {0xbf, 2, 1, 0, 0x00000000}, ++ {0x57, 2, 0, 0, 0x00800000}, ++ {0x15, 2, 0, 1, 0x00000000}, ++ {0xa7, 3, 0, 0, 0xa28319db}, ++ {0xbf, 2, 1, 0, 0x00000000}, ++ {0x57, 2, 0, 0, 0x00400000}, ++ {0x15, 2, 0, 1, 0x00000000}, ++ {0xa7, 3, 0, 0, 0x450633b6}, ++ {0xbf, 2, 1, 0, 0x00000000}, ++ {0x57, 2, 0, 0, 0x00200000}, ++ {0x15, 2, 0, 1, 0x00000000}, ++ {0xa7, 3, 0, 0, 0x8a0c676c}, ++ {0xbf, 2, 1, 0, 0x00000000}, ++ {0x57, 2, 0, 0, 0x00100000}, ++ {0x15, 2, 0, 1, 0x00000000}, ++ {0xa7, 3, 0, 0, 0x1418ced8}, ++ {0xbf, 2, 1, 0, 0x00000000}, ++ {0x57, 2, 0, 0, 0x00080000}, ++ {0x15, 2, 0, 1, 0x00000000}, ++ {0xa7, 3, 0, 0, 0x28319db1}, ++ {0xbf, 2, 1, 0, 0x00000000}, ++ {0x57, 2, 0, 0, 0x00040000}, ++ {0x15, 2, 0, 1, 0x00000000}, ++ {0xa7, 3, 0, 0, 0x50633b63}, ++ {0xbf, 2, 1, 0, 0x00000000}, ++ {0x57, 2, 0, 0, 0x00020000}, ++ {0x15, 2, 0, 1, 0x00000000}, ++ {0xa7, 3, 0, 0, 0xa0c676c6}, ++ {0xbf, 2, 1, 0, 0x00000000}, ++ {0x57, 2, 0, 0, 0x00010000}, ++ {0x15, 2, 0, 1, 0x00000000}, ++ {0xa7, 3, 0, 0, 0x418ced8d}, ++ {0xbf, 2, 1, 0, 0x00000000}, ++ {0x57, 2, 0, 0, 0x00008000}, ++ {0x15, 2, 0, 1, 0x00000000}, ++ {0xa7, 3, 0, 0, 0x8319db1a}, ++ {0xbf, 2, 1, 0, 0x00000000}, ++ {0x57, 2, 0, 0, 0x00004000}, ++ {0x15, 2, 0, 1, 0x00000000}, ++ {0xa7, 3, 0, 0, 0x0633b634}, ++ {0xbf, 2, 1, 0, 0x00000000}, ++ {0x57, 2, 0, 0, 0x00002000}, ++ {0x15, 2, 0, 1, 0x00000000}, ++ {0xa7, 3, 0, 0, 0x0c676c68}, ++ {0xbf, 2, 1, 0, 0x00000000}, ++ {0x57, 2, 0, 0, 0x00001000}, ++ {0x15, 2, 0, 1, 0x00000000}, ++ {0xa7, 3, 0, 0, 0x18ced8d1}, ++ {0xbf, 2, 1, 0, 0x00000000}, ++ {0x57, 2, 0, 0, 0x00000800}, ++ {0x15, 2, 0, 1, 0x00000000}, ++ {0xa7, 3, 0, 0, 0x319db1a3}, ++ {0xbf, 2, 1, 0, 0x00000000}, ++ {0x57, 2, 0, 0, 0x00000400}, ++ {0x15, 2, 0, 1, 0x00000000}, ++ {0xa7, 3, 0, 0, 0x633b6347}, ++ {0xbf, 2, 1, 0, 0x00000000}, ++ {0x57, 2, 0, 0, 0x00000200}, ++ {0x15, 2, 0, 1, 0x00000000}, ++ {0xa7, 3, 0, 0, 0xc676c68f}, ++ {0xbf, 2, 1, 0, 0x00000000}, ++ {0x57, 2, 0, 0, 0x00000100}, ++ {0x15, 2, 0, 1, 0x00000000}, ++ {0xa7, 3, 0, 0, 0x8ced8d1f}, ++ {0xbf, 2, 1, 0, 0x00000000}, ++ {0x57, 2, 0, 0, 0x00000080}, ++ {0x15, 2, 0, 1, 0x00000000}, ++ {0xa7, 3, 0, 0, 0x19db1a3e}, ++ {0xbf, 2, 1, 0, 0x00000000}, ++ {0x57, 2, 0, 0, 0x00000040}, ++ {0x15, 2, 0, 1, 0x00000000}, ++ {0xa7, 3, 0, 0, 0x33b6347d}, ++ {0xbf, 2, 1, 0, 0x00000000}, ++ {0x57, 2, 0, 0, 0x00000020}, ++ {0x15, 2, 0, 1, 0x00000000}, ++ {0xa7, 3, 0, 0, 0x676c68fa}, ++ {0xbf, 2, 1, 0, 0x00000000}, ++ {0x57, 2, 0, 0, 0x00000010}, ++ {0x15, 2, 0, 1, 0x00000000}, ++ {0xa7, 3, 0, 0, 0xced8d1f4}, ++ {0xbf, 2, 1, 0, 0x00000000}, ++ {0x57, 2, 0, 0, 0x00000008}, ++ {0x15, 2, 0, 1, 0x00000000}, ++ {0xa7, 3, 0, 0, 0x9db1a3e9}, ++ {0xbf, 2, 1, 0, 0x00000000}, ++ {0x57, 2, 0, 0, 0x00000004}, ++ {0x15, 2, 0, 1, 0x00000000}, ++ {0xa7, 3, 0, 0, 0x3b6347d2}, ++ {0xbf, 2, 1, 0, 0x00000000}, ++ {0x57, 2, 0, 0, 0x00000002}, ++ {0x15, 2, 0, 1, 0x00000000}, ++ {0xa7, 3, 0, 0, 0x76c68fa5}, ++ {0x57, 1, 0, 0, 0x00000001}, ++ {0x15, 1, 0, 1194, 0x00000000}, ++ {0xa7, 3, 0, 0, 0xed8d1f4a}, ++ {0x05, 0, 0, 1192, 0x00000000}, ++ {0x0f, 8, 1, 0, 0x00000000}, ++ {0xb7, 7, 0, 0, 0x00000000}, ++ {0xbf, 1, 8, 0, 0x00000000}, ++ {0x07, 1, 0, 0, 0x0000002c}, ++ {0x2d, 1, 9, 1216, 0x00000000}, ++ {0x61, 2, 8, 8, 0x00000000}, ++ {0xdc, 2, 0, 0, 0x00000040}, ++ {0xc7, 2, 0, 0, 0x00000020}, ++ {0x71, 3, 8, 6, 0x00000000}, ++ {0x15, 3, 0, 2, 0x00000011}, ++ {0xb7, 1, 0, 0, 0x00000000}, ++ {0x55, 3, 0, 12, 0x00000006}, ++ {0xbf, 3, 8, 0, 0x00000000}, ++ {0x07, 3, 0, 0, 0x00000028}, ++ {0x71, 4, 3, 0, 0x00000000}, ++ {0x67, 4, 0, 0, 0x00000018}, ++ {0x71, 1, 3, 1, 0x00000000}, ++ {0x67, 1, 0, 0, 0x00000010}, ++ {0x4f, 1, 4, 0, 0x00000000}, ++ {0x71, 4, 3, 3, 0x00000000}, ++ {0x4f, 1, 4, 0, 0x00000000}, ++ {0x71, 3, 3, 2, 0x00000000}, ++ {0x67, 3, 0, 0, 0x00000008}, ++ {0x4f, 1, 3, 0, 0x00000000}, ++ {0xbf, 4, 2, 0, 0x00000000}, ++ {0x77, 4, 0, 0, 0x0000001f}, ++ {0x57, 4, 0, 0, 0x2cc681d1}, ++ {0xbf, 3, 2, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x40000000}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 4, 0, 0, 0x598d03a2}, ++ {0xbf, 3, 2, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x20000000}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 4, 0, 0, 0xb31a0745}, ++ {0xbf, 3, 2, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x10000000}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 4, 0, 0, 0x66340e8a}, ++ {0xbf, 3, 2, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x08000000}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 4, 0, 0, 0xcc681d15}, ++ {0xbf, 3, 2, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x04000000}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 4, 0, 0, 0x98d03a2b}, ++ {0xbf, 3, 2, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x02000000}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 4, 0, 0, 0x31a07456}, ++ {0xbf, 3, 2, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x01000000}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 4, 0, 0, 0x6340e8ad}, ++ {0xbf, 3, 2, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00800000}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 4, 0, 0, 0xc681d15b}, ++ {0xbf, 3, 2, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00400000}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 4, 0, 0, 0x8d03a2b7}, ++ {0xbf, 3, 2, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00200000}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 4, 0, 0, 0x1a07456f}, ++ {0xbf, 3, 2, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00100000}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 4, 0, 0, 0x340e8ade}, ++ {0xbf, 3, 2, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00080000}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 4, 0, 0, 0x681d15bd}, ++ {0xbf, 3, 2, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00040000}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 4, 0, 0, 0xd03a2b7b}, ++ {0xbf, 3, 2, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00020000}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 4, 0, 0, 0xa07456f6}, ++ {0xbf, 3, 2, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00010000}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 4, 0, 0, 0x40e8aded}, ++ {0xbf, 3, 2, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00008000}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 4, 0, 0, 0x81d15bdb}, ++ {0xbf, 3, 2, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00004000}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 4, 0, 0, 0x03a2b7b7}, ++ {0xbf, 3, 2, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00002000}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 4, 0, 0, 0x07456f6f}, ++ {0xbf, 3, 2, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00001000}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 4, 0, 0, 0x0e8adedf}, ++ {0xbf, 3, 2, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00000800}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 4, 0, 0, 0x1d15bdbf}, ++ {0xbf, 3, 2, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00000400}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 4, 0, 0, 0x3a2b7b7e}, ++ {0xbf, 3, 2, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00000200}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 4, 0, 0, 0x7456f6fd}, ++ {0xbf, 3, 2, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00000100}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 4, 0, 0, 0xe8adedfa}, ++ {0xbf, 3, 2, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00000080}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 4, 0, 0, 0xd15bdbf4}, ++ {0xbf, 3, 2, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00000040}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 4, 0, 0, 0xa2b7b7e9}, ++ {0xbf, 3, 2, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00000020}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 4, 0, 0, 0x456f6fd3}, ++ {0xbf, 3, 2, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00000010}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 4, 0, 0, 0x8adedfa7}, ++ {0xbf, 3, 2, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00000008}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 4, 0, 0, 0x15bdbf4f}, ++ {0x61, 3, 8, 12, 0x00000000}, ++ {0xbf, 5, 2, 0, 0x00000000}, ++ {0x57, 5, 0, 0, 0x00000004}, ++ {0x15, 5, 0, 1, 0x00000000}, ++ {0xa7, 4, 0, 0, 0x2b7b7e9e}, ++ {0xdc, 3, 0, 0, 0x00000040}, ++ {0xbf, 5, 2, 0, 0x00000000}, ++ {0x57, 5, 0, 0, 0x00000002}, ++ {0x15, 5, 0, 1, 0x00000000}, ++ {0xa7, 4, 0, 0, 0x56f6fd3d}, ++ {0xc7, 3, 0, 0, 0x00000020}, ++ {0x57, 2, 0, 0, 0x00000001}, ++ {0x15, 2, 0, 1, 0x00000000}, ++ {0xa7, 4, 0, 0, 0xadedfa7b}, + {0xb7, 2, 0, 0, 0x00000000}, +- {0x1d, 4, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x598d03a2}, ++ {0xbf, 5, 4, 0, 0x00000000}, ++ {0xa7, 5, 0, 0, 0x5bdbf4f7}, ++ {0x6d, 2, 3, 1, 0x00000000}, ++ {0xbf, 5, 4, 0, 0x00000000}, ++ {0xbf, 4, 3, 0, 0x00000000}, ++ {0x57, 4, 0, 0, 0x40000000}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0xb7b7e9ef}, + {0xbf, 4, 3, 0, 0x00000000}, + {0x57, 4, 0, 0, 0x20000000}, +- {0x1d, 4, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xb31a0745}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0x6f6fd3df}, + {0xbf, 4, 3, 0, 0x00000000}, + {0x57, 4, 0, 0, 0x10000000}, +- {0x1d, 4, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x66340e8a}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0xdedfa7bf}, + {0xbf, 4, 3, 0, 0x00000000}, + {0x57, 4, 0, 0, 0x08000000}, +- {0x1d, 4, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xcc681d15}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0xbdbf4f7f}, + {0xbf, 4, 3, 0, 0x00000000}, + {0x57, 4, 0, 0, 0x04000000}, +- {0x1d, 4, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x98d03a2b}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0x7b7e9eff}, + {0xbf, 4, 3, 0, 0x00000000}, + {0x57, 4, 0, 0, 0x02000000}, +- {0x1d, 4, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x31a07456}, +- {0x57, 3, 0, 0, 0x01000000}, +- {0x1d, 3, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x6340e8ad}, +- {0x71, 3, 6, 13, 0x00000000}, +- {0x67, 3, 0, 0, 0x00000010}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0xf6fd3dff}, ++ {0xbf, 4, 3, 0, 0x00000000}, ++ {0x57, 4, 0, 0, 0x01000000}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0xedfa7bfe}, + {0xbf, 4, 3, 0, 0x00000000}, + {0x57, 4, 0, 0, 0x00800000}, +- {0x1d, 4, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xc681d15b}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0xdbf4f7fc}, + {0xbf, 4, 3, 0, 0x00000000}, + {0x57, 4, 0, 0, 0x00400000}, +- {0x1d, 4, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x8d03a2b7}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0xb7e9eff9}, + {0xbf, 4, 3, 0, 0x00000000}, + {0x57, 4, 0, 0, 0x00200000}, +- {0x1d, 4, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x1a07456f}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0x6fd3dff2}, + {0xbf, 4, 3, 0, 0x00000000}, + {0x57, 4, 0, 0, 0x00100000}, +- {0x1d, 4, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x340e8ade}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0xdfa7bfe5}, + {0xbf, 4, 3, 0, 0x00000000}, + {0x57, 4, 0, 0, 0x00080000}, +- {0x1d, 4, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x681d15bd}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0xbf4f7fca}, + {0xbf, 4, 3, 0, 0x00000000}, + {0x57, 4, 0, 0, 0x00040000}, +- {0x1d, 4, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xd03a2b7b}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0x7e9eff94}, + {0xbf, 4, 3, 0, 0x00000000}, + {0x57, 4, 0, 0, 0x00020000}, +- {0x1d, 4, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xa07456f6}, +- {0x57, 3, 0, 0, 0x00010000}, +- {0x1d, 3, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x40e8aded}, +- {0x71, 3, 6, 14, 0x00000000}, +- {0x67, 3, 0, 0, 0x00000008}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0xfd3dff28}, ++ {0xbf, 4, 3, 0, 0x00000000}, ++ {0x57, 4, 0, 0, 0x00010000}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0xfa7bfe51}, + {0xbf, 4, 3, 0, 0x00000000}, + {0x57, 4, 0, 0, 0x00008000}, +- {0x1d, 4, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x81d15bdb}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0xf4f7fca2}, + {0xbf, 4, 3, 0, 0x00000000}, + {0x57, 4, 0, 0, 0x00004000}, +- {0x1d, 4, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x03a2b7b7}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0xe9eff945}, + {0xbf, 4, 3, 0, 0x00000000}, + {0x57, 4, 0, 0, 0x00002000}, +- {0x1d, 4, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x07456f6f}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0xd3dff28a}, + {0xbf, 4, 3, 0, 0x00000000}, + {0x57, 4, 0, 0, 0x00001000}, +- {0x1d, 4, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x0e8adedf}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0xa7bfe514}, + {0xbf, 4, 3, 0, 0x00000000}, + {0x57, 4, 0, 0, 0x00000800}, +- {0x1d, 4, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x1d15bdbf}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0x4f7fca28}, + {0xbf, 4, 3, 0, 0x00000000}, + {0x57, 4, 0, 0, 0x00000400}, +- {0x1d, 4, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x3a2b7b7e}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0x9eff9450}, + {0xbf, 4, 3, 0, 0x00000000}, + {0x57, 4, 0, 0, 0x00000200}, +- {0x1d, 4, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x7456f6fd}, +- {0x57, 3, 0, 0, 0x00000100}, +- {0x1d, 3, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xe8adedfa}, +- {0x71, 3, 6, 15, 0x00000000}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0x3dff28a0}, ++ {0xbf, 4, 3, 0, 0x00000000}, ++ {0x57, 4, 0, 0, 0x00000100}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0x7bfe5141}, + {0xbf, 4, 3, 0, 0x00000000}, + {0x57, 4, 0, 0, 0x00000080}, +- {0x1d, 4, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xd15bdbf4}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0xf7fca283}, + {0xbf, 4, 3, 0, 0x00000000}, + {0x57, 4, 0, 0, 0x00000040}, +- {0x1d, 4, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xa2b7b7e9}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0xeff94506}, + {0xbf, 4, 3, 0, 0x00000000}, + {0x57, 4, 0, 0, 0x00000020}, +- {0x1d, 4, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x456f6fd3}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0xdff28a0c}, + {0xbf, 4, 3, 0, 0x00000000}, + {0x57, 4, 0, 0, 0x00000010}, +- {0x1d, 4, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x8adedfa7}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0xbfe51418}, + {0xbf, 4, 3, 0, 0x00000000}, + {0x57, 4, 0, 0, 0x00000008}, +- {0x1d, 4, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x15bdbf4f}, +- {0xbf, 4, 3, 0, 0x00000000}, +- {0x57, 4, 0, 0, 0x00000004}, +- {0x1d, 4, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x2b7b7e9e}, +- {0xbf, 4, 3, 0, 0x00000000}, +- {0x57, 4, 0, 0, 0x00000002}, +- {0x1d, 4, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x56f6fd3d}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0x7fca2831}, ++ {0x61, 4, 8, 16, 0x00000000}, ++ {0xbf, 6, 3, 0, 0x00000000}, ++ {0x57, 6, 0, 0, 0x00000004}, ++ {0x15, 6, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0xff945063}, ++ {0xdc, 4, 0, 0, 0x00000040}, ++ {0xbf, 6, 3, 0, 0x00000000}, ++ {0x57, 6, 0, 0, 0x00000002}, ++ {0x15, 6, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0xff28a0c6}, ++ {0xc7, 4, 0, 0, 0x00000020}, + {0x57, 3, 0, 0, 0x00000001}, +- {0x1d, 3, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xadedfa7b}, +- {0x71, 4, 6, 16, 0x00000000}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x67, 5, 0, 0, 0x00000038}, +- {0xc7, 5, 0, 0, 0x00000020}, +- {0xb7, 3, 0, 0, 0xffffffff}, +- {0x6d, 5, 3, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x5bdbf4f7}, +- {0x67, 4, 0, 0, 0x00000018}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x40000000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xb7b7e9ef}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x20000000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x6f6fd3df}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x10000000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xdedfa7bf}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x08000000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xbdbf4f7f}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x04000000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x7b7e9eff}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x02000000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xf6fd3dff}, +- {0x57, 4, 0, 0, 0x01000000}, +- {0x1d, 4, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xedfa7bfe}, +- {0x71, 4, 6, 17, 0x00000000}, +- {0x67, 4, 0, 0, 0x00000010}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00800000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xdbf4f7fc}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00400000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xb7e9eff9}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00200000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x6fd3dff2}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00100000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xdfa7bfe5}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00080000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xbf4f7fca}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00040000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x7e9eff94}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00020000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xfd3dff28}, +- {0x57, 4, 0, 0, 0x00010000}, +- {0x1d, 4, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xfa7bfe51}, +- {0x71, 4, 6, 18, 0x00000000}, +- {0x67, 4, 0, 0, 0x00000008}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00008000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xf4f7fca2}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00004000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xe9eff945}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00002000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xd3dff28a}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00001000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xa7bfe514}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000800}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x4f7fca28}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000400}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x9eff9450}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000200}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x3dff28a0}, +- {0x57, 4, 0, 0, 0x00000100}, +- {0x1d, 4, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x7bfe5141}, +- {0x71, 4, 6, 19, 0x00000000}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000080}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xf7fca283}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000040}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xeff94506}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000020}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xdff28a0c}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000010}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xbfe51418}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000008}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x7fca2831}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000004}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xff945063}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000002}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xff28a0c6}, +- {0x57, 4, 0, 0, 0x00000001}, +- {0x1d, 4, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xfe51418c}, +- {0x71, 4, 6, 20, 0x00000000}, +- {0x67, 4, 0, 0, 0x00000008}, +- {0x71, 5, 6, 21, 0x00000000}, +- {0x4f, 4, 5, 0, 0x00000000}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x67, 5, 0, 0, 0x00000030}, +- {0xc7, 5, 0, 0, 0x00000020}, +- {0x6d, 5, 3, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xfca28319}, +- {0x67, 4, 0, 0, 0x00000010}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0xfe51418c}, ++ {0xbf, 7, 5, 0, 0x00000000}, ++ {0xa7, 7, 0, 0, 0xfca28319}, ++ {0x6d, 2, 4, 1, 0x00000000}, ++ {0xbf, 7, 5, 0, 0x00000000}, + {0xbf, 3, 4, 0, 0x00000000}, + {0x57, 3, 0, 0, 0x40000000}, +- {0x1d, 3, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xf9450633}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0xf9450633}, + {0xbf, 3, 4, 0, 0x00000000}, + {0x57, 3, 0, 0, 0x20000000}, +- {0x1d, 3, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xf28a0c67}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0xf28a0c67}, + {0xbf, 3, 4, 0, 0x00000000}, + {0x57, 3, 0, 0, 0x10000000}, +- {0x1d, 3, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xe51418ce}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0xe51418ce}, + {0xbf, 3, 4, 0, 0x00000000}, + {0x57, 3, 0, 0, 0x08000000}, +- {0x1d, 3, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xca28319d}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0xca28319d}, + {0xbf, 3, 4, 0, 0x00000000}, + {0x57, 3, 0, 0, 0x04000000}, +- {0x1d, 3, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x9450633b}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0x9450633b}, + {0xbf, 3, 4, 0, 0x00000000}, + {0x57, 3, 0, 0, 0x02000000}, +- {0x1d, 3, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x28a0c676}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0x28a0c676}, + {0xbf, 3, 4, 0, 0x00000000}, + {0x57, 3, 0, 0, 0x01000000}, +- {0x1d, 3, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x51418ced}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0x51418ced}, + {0xbf, 3, 4, 0, 0x00000000}, + {0x57, 3, 0, 0, 0x00800000}, +- {0x1d, 3, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xa28319db}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0xa28319db}, + {0xbf, 3, 4, 0, 0x00000000}, + {0x57, 3, 0, 0, 0x00400000}, +- {0x1d, 3, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x450633b6}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0x450633b6}, + {0xbf, 3, 4, 0, 0x00000000}, + {0x57, 3, 0, 0, 0x00200000}, +- {0x1d, 3, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x8a0c676c}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0x8a0c676c}, + {0xbf, 3, 4, 0, 0x00000000}, + {0x57, 3, 0, 0, 0x00100000}, +- {0x1d, 3, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x1418ced8}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0x1418ced8}, + {0xbf, 3, 4, 0, 0x00000000}, + {0x57, 3, 0, 0, 0x00080000}, +- {0x1d, 3, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x28319db1}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0x28319db1}, + {0xbf, 3, 4, 0, 0x00000000}, + {0x57, 3, 0, 0, 0x00040000}, +- {0x1d, 3, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x50633b63}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0x50633b63}, + {0xbf, 3, 4, 0, 0x00000000}, + {0x57, 3, 0, 0, 0x00020000}, +- {0x1d, 3, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xa0c676c6}, +- {0x57, 4, 0, 0, 0x00010000}, +- {0x1d, 4, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x418ced8d}, +- {0x71, 3, 6, 22, 0x00000000}, +- {0x67, 3, 0, 0, 0x00000008}, +- {0xbf, 4, 3, 0, 0x00000000}, +- {0x57, 4, 0, 0, 0x00008000}, +- {0x1d, 4, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x8319db1a}, +- {0xbf, 4, 3, 0, 0x00000000}, +- {0x57, 4, 0, 0, 0x00004000}, +- {0x1d, 4, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x0633b634}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0xa0c676c6}, ++ {0xbf, 3, 4, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00010000}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0x418ced8d}, ++ {0xbf, 3, 4, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00008000}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0x8319db1a}, ++ {0xbf, 3, 4, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00004000}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0x0633b634}, ++ {0xbf, 3, 4, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00002000}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0x0c676c68}, ++ {0xbf, 3, 4, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00001000}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0x18ced8d1}, ++ {0xbf, 3, 4, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00000800}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0x319db1a3}, ++ {0xbf, 3, 4, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00000400}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0x633b6347}, ++ {0xbf, 3, 4, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00000200}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0xc676c68f}, ++ {0xbf, 3, 4, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00000100}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0x8ced8d1f}, ++ {0xbf, 3, 4, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00000080}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0x19db1a3e}, ++ {0xbf, 3, 4, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00000040}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0x33b6347d}, ++ {0xbf, 3, 4, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00000020}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0x676c68fa}, ++ {0xbf, 3, 4, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00000010}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0xced8d1f4}, ++ {0xbf, 3, 4, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00000008}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0x9db1a3e9}, ++ {0x61, 3, 8, 20, 0x00000000}, ++ {0xbf, 5, 4, 0, 0x00000000}, ++ {0x57, 5, 0, 0, 0x00000004}, ++ {0x15, 5, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0x3b6347d2}, ++ {0xdc, 3, 0, 0, 0x00000040}, ++ {0xbf, 5, 4, 0, 0x00000000}, ++ {0x57, 5, 0, 0, 0x00000002}, ++ {0x15, 5, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0x76c68fa5}, ++ {0xc7, 3, 0, 0, 0x00000020}, ++ {0x57, 4, 0, 0, 0x00000001}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0xed8d1f4a}, ++ {0xbf, 5, 7, 0, 0x00000000}, ++ {0xa7, 5, 0, 0, 0xdb1a3e94}, ++ {0x6d, 2, 3, 1, 0x00000000}, ++ {0xbf, 5, 7, 0, 0x00000000}, ++ {0xbf, 4, 3, 0, 0x00000000}, ++ {0x57, 4, 0, 0, 0x40000000}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0xb6347d28}, ++ {0xbf, 4, 3, 0, 0x00000000}, ++ {0x57, 4, 0, 0, 0x20000000}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0x6c68fa51}, ++ {0xbf, 4, 3, 0, 0x00000000}, ++ {0x57, 4, 0, 0, 0x10000000}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0xd8d1f4a3}, ++ {0xbf, 4, 3, 0, 0x00000000}, ++ {0x57, 4, 0, 0, 0x08000000}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0xb1a3e946}, ++ {0xbf, 4, 3, 0, 0x00000000}, ++ {0x57, 4, 0, 0, 0x04000000}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0x6347d28d}, ++ {0xbf, 4, 3, 0, 0x00000000}, ++ {0x57, 4, 0, 0, 0x02000000}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0xc68fa51a}, ++ {0xbf, 4, 3, 0, 0x00000000}, ++ {0x57, 4, 0, 0, 0x01000000}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0x8d1f4a35}, ++ {0xbf, 4, 3, 0, 0x00000000}, ++ {0x57, 4, 0, 0, 0x00800000}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0x1a3e946b}, ++ {0xbf, 4, 3, 0, 0x00000000}, ++ {0x57, 4, 0, 0, 0x00400000}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0x347d28d7}, ++ {0xbf, 4, 3, 0, 0x00000000}, ++ {0x57, 4, 0, 0, 0x00200000}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0x68fa51ae}, ++ {0xbf, 4, 3, 0, 0x00000000}, ++ {0x57, 4, 0, 0, 0x00100000}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0xd1f4a35c}, ++ {0xbf, 4, 3, 0, 0x00000000}, ++ {0x57, 4, 0, 0, 0x00080000}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0xa3e946b9}, ++ {0xbf, 4, 3, 0, 0x00000000}, ++ {0x57, 4, 0, 0, 0x00040000}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0x47d28d73}, ++ {0xbf, 4, 3, 0, 0x00000000}, ++ {0x57, 4, 0, 0, 0x00020000}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0x8fa51ae7}, ++ {0xbf, 4, 3, 0, 0x00000000}, ++ {0x57, 4, 0, 0, 0x00010000}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0x1f4a35cf}, ++ {0xbf, 4, 3, 0, 0x00000000}, ++ {0x57, 4, 0, 0, 0x00008000}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0x3e946b9e}, ++ {0xbf, 4, 3, 0, 0x00000000}, ++ {0x57, 4, 0, 0, 0x00004000}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0x7d28d73c}, + {0xbf, 4, 3, 0, 0x00000000}, + {0x57, 4, 0, 0, 0x00002000}, +- {0x1d, 4, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x0c676c68}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0xfa51ae78}, + {0xbf, 4, 3, 0, 0x00000000}, + {0x57, 4, 0, 0, 0x00001000}, +- {0x1d, 4, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x18ced8d1}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0xf4a35cf1}, + {0xbf, 4, 3, 0, 0x00000000}, + {0x57, 4, 0, 0, 0x00000800}, +- {0x1d, 4, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x319db1a3}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0xe946b9e3}, + {0xbf, 4, 3, 0, 0x00000000}, + {0x57, 4, 0, 0, 0x00000400}, +- {0x1d, 4, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x633b6347}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0xd28d73c7}, + {0xbf, 4, 3, 0, 0x00000000}, + {0x57, 4, 0, 0, 0x00000200}, +- {0x1d, 4, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xc676c68f}, +- {0x57, 3, 0, 0, 0x00000100}, +- {0x1d, 3, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x8ced8d1f}, +- {0x71, 3, 6, 23, 0x00000000}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0xa51ae78e}, ++ {0xbf, 4, 3, 0, 0x00000000}, ++ {0x57, 4, 0, 0, 0x00000100}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0x4a35cf1c}, + {0xbf, 4, 3, 0, 0x00000000}, + {0x57, 4, 0, 0, 0x00000080}, +- {0x1d, 4, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x19db1a3e}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0x946b9e38}, + {0xbf, 4, 3, 0, 0x00000000}, + {0x57, 4, 0, 0, 0x00000040}, +- {0x79, 5, 10, -56, 0x00000000}, +- {0x1d, 4, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x33b6347d}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0x28d73c71}, + {0xbf, 4, 3, 0, 0x00000000}, + {0x57, 4, 0, 0, 0x00000020}, +- {0x1d, 4, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x676c68fa}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0x51ae78e3}, + {0xbf, 4, 3, 0, 0x00000000}, + {0x57, 4, 0, 0, 0x00000010}, +- {0x1d, 4, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xced8d1f4}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0xa35cf1c6}, + {0xbf, 4, 3, 0, 0x00000000}, + {0x57, 4, 0, 0, 0x00000008}, +- {0x1d, 4, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x9db1a3e9}, +- {0xbf, 4, 3, 0, 0x00000000}, +- {0x57, 4, 0, 0, 0x00000004}, +- {0x1d, 4, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x3b6347d2}, +- {0xbf, 4, 3, 0, 0x00000000}, +- {0x57, 4, 0, 0, 0x00000002}, +- {0x1d, 4, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x76c68fa5}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0x46b9e38d}, ++ {0x61, 4, 8, 24, 0x00000000}, ++ {0xbf, 6, 3, 0, 0x00000000}, ++ {0x57, 6, 0, 0, 0x00000004}, ++ {0x15, 6, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0x8d73c71b}, ++ {0xdc, 4, 0, 0, 0x00000040}, ++ {0xbf, 6, 3, 0, 0x00000000}, ++ {0x57, 6, 0, 0, 0x00000002}, ++ {0x15, 6, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0x1ae78e36}, ++ {0xc7, 4, 0, 0, 0x00000020}, + {0x57, 3, 0, 0, 0x00000001}, +- {0x1d, 3, 2, 1177, 0x00000000}, +- {0xa7, 1, 0, 0, 0xed8d1f4a}, +- {0x05, 0, 0, 1175, 0x00000000}, +- {0x0f, 6, 1, 0, 0x00000000}, +- {0xb7, 7, 0, 0, 0x00000000}, +- {0xbf, 1, 6, 0, 0x00000000}, +- {0x07, 1, 0, 0, 0x0000002c}, +- {0x2d, 1, 9, 1202, 0x00000000}, +- {0x61, 4, 6, 8, 0x00000000}, +- {0xbf, 1, 4, 0, 0x00000000}, +- {0x67, 1, 0, 0, 0x00000038}, +- {0xc7, 1, 0, 0, 0x00000020}, +- {0x77, 1, 0, 0, 0x0000001f}, +- {0x57, 1, 0, 0, 0x2cc681d1}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0x35cf1c6c}, ++ {0xbf, 7, 5, 0, 0x00000000}, ++ {0xa7, 7, 0, 0, 0x6b9e38d9}, ++ {0x6d, 2, 4, 1, 0x00000000}, ++ {0xbf, 7, 5, 0, 0x00000000}, + {0xbf, 3, 4, 0, 0x00000000}, +- {0x57, 3, 0, 0, 0x00000040}, +- {0xb7, 2, 0, 0, 0x00000000}, +- {0x1d, 3, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x598d03a2}, ++ {0x57, 3, 0, 0, 0x40000000}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0xd73c71b2}, + {0xbf, 3, 4, 0, 0x00000000}, +- {0x57, 3, 0, 0, 0x00000020}, +- {0x1d, 3, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xb31a0745}, ++ {0x57, 3, 0, 0, 0x20000000}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0xae78e364}, + {0xbf, 3, 4, 0, 0x00000000}, +- {0x57, 3, 0, 0, 0x00000010}, +- {0x1d, 3, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x66340e8a}, ++ {0x57, 3, 0, 0, 0x10000000}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0x5cf1c6c9}, + {0xbf, 3, 4, 0, 0x00000000}, +- {0x57, 3, 0, 0, 0x00000008}, +- {0x1d, 3, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xcc681d15}, ++ {0x57, 3, 0, 0, 0x08000000}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0xb9e38d92}, + {0xbf, 3, 4, 0, 0x00000000}, +- {0x57, 3, 0, 0, 0x00000004}, +- {0x1d, 3, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x98d03a2b}, ++ {0x57, 3, 0, 0, 0x04000000}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0x73c71b25}, + {0xbf, 3, 4, 0, 0x00000000}, +- {0x57, 3, 0, 0, 0x00000002}, +- {0x1d, 3, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x31a07456}, ++ {0x57, 3, 0, 0, 0x02000000}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0xe78e364b}, + {0xbf, 3, 4, 0, 0x00000000}, +- {0x57, 3, 0, 0, 0x00000001}, +- {0x1d, 3, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x6340e8ad}, ++ {0x57, 3, 0, 0, 0x01000000}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0xcf1c6c96}, ++ {0xbf, 3, 4, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00800000}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0x9e38d92c}, ++ {0xbf, 3, 4, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00400000}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0x3c71b259}, ++ {0xbf, 3, 4, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00200000}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0x78e364b2}, ++ {0xbf, 3, 4, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00100000}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0xf1c6c964}, ++ {0xbf, 3, 4, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00080000}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0xe38d92c9}, ++ {0xbf, 3, 4, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00040000}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0xc71b2593}, ++ {0xbf, 3, 4, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00020000}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0x8e364b27}, ++ {0xbf, 3, 4, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00010000}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0x1c6c964e}, + {0xbf, 3, 4, 0, 0x00000000}, + {0x57, 3, 0, 0, 0x00008000}, +- {0x1d, 3, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xc681d15b}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0x38d92c9c}, + {0xbf, 3, 4, 0, 0x00000000}, + {0x57, 3, 0, 0, 0x00004000}, +- {0x1d, 3, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x8d03a2b7}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0x71b25938}, + {0xbf, 3, 4, 0, 0x00000000}, + {0x57, 3, 0, 0, 0x00002000}, +- {0x1d, 3, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x1a07456f}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0xe364b270}, + {0xbf, 3, 4, 0, 0x00000000}, + {0x57, 3, 0, 0, 0x00001000}, +- {0x1d, 3, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x340e8ade}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0xc6c964e0}, + {0xbf, 3, 4, 0, 0x00000000}, + {0x57, 3, 0, 0, 0x00000800}, +- {0x1d, 3, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x681d15bd}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0x8d92c9c0}, + {0xbf, 3, 4, 0, 0x00000000}, + {0x57, 3, 0, 0, 0x00000400}, +- {0x1d, 3, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xd03a2b7b}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0x1b259380}, + {0xbf, 3, 4, 0, 0x00000000}, + {0x57, 3, 0, 0, 0x00000200}, +- {0x1d, 3, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xa07456f6}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0x364b2700}, + {0xbf, 3, 4, 0, 0x00000000}, + {0x57, 3, 0, 0, 0x00000100}, +- {0x1d, 3, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x40e8aded}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0x6c964e01}, ++ {0xbf, 3, 4, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00000080}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0xd92c9c03}, ++ {0xbf, 3, 4, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00000040}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0xb2593807}, ++ {0xbf, 3, 4, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00000020}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0x64b2700f}, ++ {0xbf, 3, 4, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00000010}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0xc964e01e}, ++ {0xbf, 3, 4, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00000008}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0x92c9c03d}, ++ {0x61, 3, 8, 28, 0x00000000}, ++ {0xbf, 5, 4, 0, 0x00000000}, ++ {0x57, 5, 0, 0, 0x00000004}, ++ {0x15, 5, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0x2593807a}, ++ {0xdc, 3, 0, 0, 0x00000040}, ++ {0xbf, 5, 4, 0, 0x00000000}, ++ {0x57, 5, 0, 0, 0x00000002}, ++ {0x15, 5, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0x4b2700f4}, ++ {0xc7, 3, 0, 0, 0x00000020}, ++ {0x57, 4, 0, 0, 0x00000001}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0x964e01e8}, ++ {0xbf, 5, 7, 0, 0x00000000}, ++ {0xa7, 5, 0, 0, 0x2c9c03d1}, ++ {0x6d, 2, 3, 1, 0x00000000}, ++ {0xbf, 5, 7, 0, 0x00000000}, ++ {0xbf, 4, 3, 0, 0x00000000}, ++ {0x57, 4, 0, 0, 0x40000000}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0x593807a3}, ++ {0xbf, 4, 3, 0, 0x00000000}, ++ {0x57, 4, 0, 0, 0x20000000}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0xb2700f46}, ++ {0xbf, 4, 3, 0, 0x00000000}, ++ {0x57, 4, 0, 0, 0x10000000}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0x64e01e8d}, ++ {0xbf, 4, 3, 0, 0x00000000}, ++ {0x57, 4, 0, 0, 0x08000000}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0xc9c03d1a}, ++ {0xbf, 4, 3, 0, 0x00000000}, ++ {0x57, 4, 0, 0, 0x04000000}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0x93807a35}, ++ {0xbf, 4, 3, 0, 0x00000000}, ++ {0x57, 4, 0, 0, 0x02000000}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0x2700f46b}, ++ {0xbf, 4, 3, 0, 0x00000000}, ++ {0x57, 4, 0, 0, 0x01000000}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0x4e01e8d6}, ++ {0xbf, 4, 3, 0, 0x00000000}, ++ {0x57, 4, 0, 0, 0x00800000}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0x9c03d1ad}, ++ {0xbf, 4, 3, 0, 0x00000000}, ++ {0x57, 4, 0, 0, 0x00400000}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0x3807a35b}, ++ {0xbf, 4, 3, 0, 0x00000000}, ++ {0x57, 4, 0, 0, 0x00200000}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0x700f46b6}, ++ {0xbf, 4, 3, 0, 0x00000000}, ++ {0x57, 4, 0, 0, 0x00100000}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0xe01e8d6c}, ++ {0xbf, 4, 3, 0, 0x00000000}, ++ {0x57, 4, 0, 0, 0x00080000}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0xc03d1ad9}, ++ {0xbf, 4, 3, 0, 0x00000000}, ++ {0x57, 4, 0, 0, 0x00040000}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0x807a35b3}, ++ {0xbf, 4, 3, 0, 0x00000000}, ++ {0x57, 4, 0, 0, 0x00020000}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0x00f46b66}, ++ {0xbf, 4, 3, 0, 0x00000000}, ++ {0x57, 4, 0, 0, 0x00010000}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0x01e8d6cc}, ++ {0xbf, 4, 3, 0, 0x00000000}, ++ {0x57, 4, 0, 0, 0x00008000}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0x03d1ad99}, ++ {0xbf, 4, 3, 0, 0x00000000}, ++ {0x57, 4, 0, 0, 0x00004000}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0x07a35b32}, ++ {0xbf, 4, 3, 0, 0x00000000}, ++ {0x57, 4, 0, 0, 0x00002000}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0x0f46b665}, ++ {0xbf, 4, 3, 0, 0x00000000}, ++ {0x57, 4, 0, 0, 0x00001000}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0x1e8d6cca}, ++ {0xbf, 4, 3, 0, 0x00000000}, ++ {0x57, 4, 0, 0, 0x00000800}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0x3d1ad994}, ++ {0xbf, 4, 3, 0, 0x00000000}, ++ {0x57, 4, 0, 0, 0x00000400}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0x7a35b328}, ++ {0xbf, 4, 3, 0, 0x00000000}, ++ {0x57, 4, 0, 0, 0x00000200}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0xf46b6651}, ++ {0xbf, 4, 3, 0, 0x00000000}, ++ {0x57, 4, 0, 0, 0x00000100}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0xe8d6cca2}, ++ {0xbf, 4, 3, 0, 0x00000000}, ++ {0x57, 4, 0, 0, 0x00000080}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0xd1ad9944}, ++ {0xbf, 4, 3, 0, 0x00000000}, ++ {0x57, 4, 0, 0, 0x00000040}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0xa35b3289}, ++ {0xbf, 4, 3, 0, 0x00000000}, ++ {0x57, 4, 0, 0, 0x00000020}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0x46b66512}, ++ {0xbf, 4, 3, 0, 0x00000000}, ++ {0x57, 4, 0, 0, 0x00000010}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0x8d6cca25}, ++ {0xbf, 4, 3, 0, 0x00000000}, ++ {0x57, 4, 0, 0, 0x00000008}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0x1ad9944a}, ++ {0x61, 4, 8, 32, 0x00000000}, ++ {0xbf, 6, 3, 0, 0x00000000}, ++ {0x57, 6, 0, 0, 0x00000004}, ++ {0x15, 6, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0x35b32894}, ++ {0xdc, 4, 0, 0, 0x00000040}, ++ {0xbf, 6, 3, 0, 0x00000000}, ++ {0x57, 6, 0, 0, 0x00000002}, ++ {0x15, 6, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0x6b665129}, ++ {0xc7, 4, 0, 0, 0x00000020}, ++ {0x57, 3, 0, 0, 0x00000001}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0xd6cca253}, ++ {0xbf, 7, 5, 0, 0x00000000}, ++ {0xa7, 7, 0, 0, 0xad9944a7}, ++ {0x6d, 2, 4, 1, 0x00000000}, ++ {0xbf, 7, 5, 0, 0x00000000}, ++ {0xbf, 3, 4, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x40000000}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0x5b32894f}, ++ {0xbf, 3, 4, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x20000000}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0xb665129f}, ++ {0xbf, 3, 4, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x10000000}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0x6cca253e}, ++ {0xbf, 3, 4, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x08000000}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0xd9944a7d}, ++ {0xbf, 3, 4, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x04000000}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0xb32894fb}, ++ {0xbf, 3, 4, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x02000000}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0x665129f6}, ++ {0xbf, 3, 4, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x01000000}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0xcca253ec}, + {0xbf, 3, 4, 0, 0x00000000}, + {0x57, 3, 0, 0, 0x00800000}, +- {0x1d, 3, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x81d15bdb}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0x9944a7d9}, + {0xbf, 3, 4, 0, 0x00000000}, + {0x57, 3, 0, 0, 0x00400000}, +- {0x1d, 3, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x03a2b7b7}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0x32894fb2}, + {0xbf, 3, 4, 0, 0x00000000}, + {0x57, 3, 0, 0, 0x00200000}, +- {0x1d, 3, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x07456f6f}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0x65129f65}, + {0xbf, 3, 4, 0, 0x00000000}, + {0x57, 3, 0, 0, 0x00100000}, +- {0x1d, 3, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x0e8adedf}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0xca253eca}, + {0xbf, 3, 4, 0, 0x00000000}, + {0x57, 3, 0, 0, 0x00080000}, +- {0x1d, 3, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x1d15bdbf}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0x944a7d95}, + {0xbf, 3, 4, 0, 0x00000000}, + {0x57, 3, 0, 0, 0x00040000}, +- {0x1d, 3, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x3a2b7b7e}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0x2894fb2a}, + {0xbf, 3, 4, 0, 0x00000000}, + {0x57, 3, 0, 0, 0x00020000}, +- {0x1d, 3, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x7456f6fd}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0x5129f655}, + {0xbf, 3, 4, 0, 0x00000000}, + {0x57, 3, 0, 0, 0x00010000}, +- {0x1d, 3, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xe8adedfa}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0xa253ecab}, ++ {0xbf, 3, 4, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00008000}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0x44a7d956}, ++ {0xbf, 3, 4, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00004000}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0x894fb2ac}, ++ {0xbf, 3, 4, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00002000}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0x129f6558}, ++ {0xbf, 3, 4, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00001000}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0x253ecab1}, ++ {0xbf, 3, 4, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00000800}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0x4a7d9563}, ++ {0xbf, 3, 4, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00000400}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0x94fb2ac7}, ++ {0xbf, 3, 4, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00000200}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0x29f6558f}, ++ {0xbf, 3, 4, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00000100}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0x53ecab1e}, ++ {0xbf, 3, 4, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00000080}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0xa7d9563d}, ++ {0xbf, 3, 4, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00000040}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0x4fb2ac7a}, ++ {0xbf, 3, 4, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00000020}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0x9f6558f5}, ++ {0xbf, 3, 4, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00000010}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0x3ecab1ea}, ++ {0xbf, 3, 4, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00000008}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0x7d9563d5}, ++ {0x61, 3, 8, 36, 0x00000000}, + {0xbf, 5, 4, 0, 0x00000000}, +- {0x67, 5, 0, 0, 0x00000020}, +- {0xc7, 5, 0, 0, 0x00000020}, +- {0xb7, 3, 0, 0, 0xffffffff}, +- {0x6d, 5, 3, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xd15bdbf4}, ++ {0x57, 5, 0, 0, 0x00000004}, ++ {0x15, 5, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0xfb2ac7ab}, ++ {0xdc, 3, 0, 0, 0x00000040}, + {0xbf, 5, 4, 0, 0x00000000}, ++ {0x57, 5, 0, 0, 0x00000002}, ++ {0x15, 5, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0xf6558f56}, ++ {0xc7, 3, 0, 0, 0x00000020}, ++ {0x57, 4, 0, 0, 0x00000001}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0xecab1eac}, ++ {0xbf, 4, 7, 0, 0x00000000}, ++ {0xa7, 4, 0, 0, 0xd9563d59}, ++ {0x6d, 2, 3, 1, 0x00000000}, ++ {0xbf, 4, 7, 0, 0x00000000}, ++ {0xbf, 5, 3, 0, 0x00000000}, + {0x57, 5, 0, 0, 0x40000000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xa2b7b7e9}, +- {0xbf, 5, 4, 0, 0x00000000}, ++ {0x15, 5, 0, 1, 0x00000000}, ++ {0xa7, 4, 0, 0, 0xb2ac7ab2}, ++ {0xbf, 5, 3, 0, 0x00000000}, + {0x57, 5, 0, 0, 0x20000000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x456f6fd3}, +- {0xbf, 5, 4, 0, 0x00000000}, ++ {0x79, 6, 10, -56, 0x00000000}, ++ {0x15, 5, 0, 1, 0x00000000}, ++ {0xa7, 4, 0, 0, 0x6558f564}, ++ {0xbf, 5, 3, 0, 0x00000000}, + {0x57, 5, 0, 0, 0x10000000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x8adedfa7}, +- {0xbf, 5, 4, 0, 0x00000000}, ++ {0x15, 5, 0, 1, 0x00000000}, ++ {0xa7, 4, 0, 0, 0xcab1eac8}, ++ {0xbf, 5, 3, 0, 0x00000000}, + {0x57, 5, 0, 0, 0x08000000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x15bdbf4f}, +- {0xbf, 5, 4, 0, 0x00000000}, ++ {0x15, 5, 0, 1, 0x00000000}, ++ {0xa7, 4, 0, 0, 0x9563d590}, ++ {0xbf, 5, 3, 0, 0x00000000}, + {0x57, 5, 0, 0, 0x04000000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x2b7b7e9e}, +- {0xbf, 5, 4, 0, 0x00000000}, ++ {0x15, 5, 0, 1, 0x00000000}, ++ {0xa7, 4, 0, 0, 0x2ac7ab20}, ++ {0xbf, 5, 3, 0, 0x00000000}, + {0x57, 5, 0, 0, 0x02000000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x56f6fd3d}, +- {0x57, 4, 0, 0, 0x01000000}, +- {0x1d, 4, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xadedfa7b}, +- {0x61, 4, 6, 12, 0x00000000}, +- {0xbf, 5, 4, 0, 0x00000000}, ++ {0x15, 5, 0, 1, 0x00000000}, ++ {0xa7, 4, 0, 0, 0x558f5641}, ++ {0xbf, 5, 3, 0, 0x00000000}, ++ {0x57, 5, 0, 0, 0x01000000}, ++ {0x15, 5, 0, 1, 0x00000000}, ++ {0xa7, 4, 0, 0, 0xab1eac83}, ++ {0xbf, 5, 3, 0, 0x00000000}, ++ {0x57, 5, 0, 0, 0x00800000}, ++ {0x15, 5, 0, 1, 0x00000000}, ++ {0xa7, 4, 0, 0, 0x563d5906}, ++ {0xbf, 5, 3, 0, 0x00000000}, ++ {0x57, 5, 0, 0, 0x00400000}, ++ {0x15, 5, 0, 1, 0x00000000}, ++ {0xa7, 4, 0, 0, 0xac7ab20c}, ++ {0xbf, 5, 3, 0, 0x00000000}, ++ {0x57, 5, 0, 0, 0x00200000}, ++ {0x15, 5, 0, 1, 0x00000000}, ++ {0xa7, 4, 0, 0, 0x58f56418}, ++ {0xbf, 5, 3, 0, 0x00000000}, ++ {0x57, 5, 0, 0, 0x00100000}, ++ {0x15, 5, 0, 1, 0x00000000}, ++ {0xa7, 4, 0, 0, 0xb1eac831}, ++ {0xbf, 5, 3, 0, 0x00000000}, ++ {0x57, 5, 0, 0, 0x00080000}, ++ {0x15, 5, 0, 1, 0x00000000}, ++ {0xa7, 4, 0, 0, 0x63d59063}, ++ {0xbf, 5, 3, 0, 0x00000000}, ++ {0x57, 5, 0, 0, 0x00040000}, ++ {0x15, 5, 0, 1, 0x00000000}, ++ {0xa7, 4, 0, 0, 0xc7ab20c7}, ++ {0xbf, 5, 3, 0, 0x00000000}, ++ {0x57, 5, 0, 0, 0x00020000}, ++ {0x15, 5, 0, 1, 0x00000000}, ++ {0xa7, 4, 0, 0, 0x8f56418f}, ++ {0xbf, 5, 3, 0, 0x00000000}, ++ {0x57, 5, 0, 0, 0x00010000}, ++ {0x15, 5, 0, 1, 0x00000000}, ++ {0xa7, 4, 0, 0, 0x1eac831e}, ++ {0xbf, 5, 3, 0, 0x00000000}, ++ {0x57, 5, 0, 0, 0x00008000}, ++ {0x15, 5, 0, 1, 0x00000000}, ++ {0xa7, 4, 0, 0, 0x3d59063c}, ++ {0xbf, 5, 3, 0, 0x00000000}, ++ {0x57, 5, 0, 0, 0x00004000}, ++ {0x15, 5, 0, 1, 0x00000000}, ++ {0xa7, 4, 0, 0, 0x7ab20c78}, ++ {0xbf, 5, 3, 0, 0x00000000}, ++ {0x57, 5, 0, 0, 0x00002000}, ++ {0x15, 5, 0, 1, 0x00000000}, ++ {0xa7, 4, 0, 0, 0xf56418f0}, ++ {0xbf, 5, 3, 0, 0x00000000}, ++ {0x57, 5, 0, 0, 0x00001000}, ++ {0x15, 5, 0, 1, 0x00000000}, ++ {0xa7, 4, 0, 0, 0xeac831e1}, ++ {0xbf, 5, 3, 0, 0x00000000}, ++ {0x57, 5, 0, 0, 0x00000800}, ++ {0x15, 5, 0, 1, 0x00000000}, ++ {0xa7, 4, 0, 0, 0xd59063c2}, ++ {0xbf, 5, 3, 0, 0x00000000}, ++ {0x57, 5, 0, 0, 0x00000400}, ++ {0x15, 5, 0, 1, 0x00000000}, ++ {0xa7, 4, 0, 0, 0xab20c784}, ++ {0xbf, 5, 3, 0, 0x00000000}, ++ {0x57, 5, 0, 0, 0x00000200}, ++ {0x15, 5, 0, 1, 0x00000000}, ++ {0xa7, 4, 0, 0, 0x56418f09}, ++ {0xbf, 5, 3, 0, 0x00000000}, ++ {0x57, 5, 0, 0, 0x00000100}, ++ {0x15, 5, 0, 1, 0x00000000}, ++ {0xa7, 4, 0, 0, 0xac831e12}, ++ {0xbf, 5, 3, 0, 0x00000000}, + {0x57, 5, 0, 0, 0x00000080}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x5bdbf4f7}, +- {0xbf, 5, 4, 0, 0x00000000}, ++ {0x15, 5, 0, 1, 0x00000000}, ++ {0xa7, 4, 0, 0, 0x59063c25}, ++ {0xbf, 5, 3, 0, 0x00000000}, + {0x57, 5, 0, 0, 0x00000040}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xb7b7e9ef}, +- {0xbf, 5, 4, 0, 0x00000000}, ++ {0x15, 5, 0, 1, 0x00000000}, ++ {0xa7, 4, 0, 0, 0xb20c784b}, ++ {0xbf, 5, 3, 0, 0x00000000}, + {0x57, 5, 0, 0, 0x00000020}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x6f6fd3df}, +- {0xbf, 5, 4, 0, 0x00000000}, ++ {0x15, 5, 0, 1, 0x00000000}, ++ {0xa7, 4, 0, 0, 0x6418f097}, ++ {0xbf, 5, 3, 0, 0x00000000}, + {0x57, 5, 0, 0, 0x00000010}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xdedfa7bf}, +- {0xbf, 5, 4, 0, 0x00000000}, ++ {0x15, 5, 0, 1, 0x00000000}, ++ {0xa7, 4, 0, 0, 0xc831e12f}, ++ {0xbf, 5, 3, 0, 0x00000000}, + {0x57, 5, 0, 0, 0x00000008}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xbdbf4f7f}, +- {0xbf, 5, 4, 0, 0x00000000}, ++ {0x15, 5, 0, 1, 0x00000000}, ++ {0xa7, 4, 0, 0, 0x9063c25f}, ++ {0xbf, 5, 3, 0, 0x00000000}, + {0x57, 5, 0, 0, 0x00000004}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x7b7e9eff}, +- {0xbf, 5, 4, 0, 0x00000000}, ++ {0x15, 5, 0, 1, 0x00000000}, ++ {0xa7, 4, 0, 0, 0x20c784be}, ++ {0xbf, 5, 3, 0, 0x00000000}, + {0x57, 5, 0, 0, 0x00000002}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xf6fd3dff}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000001}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xedfa7bfe}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00008000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xdbf4f7fc}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00004000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xb7e9eff9}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00002000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x6fd3dff2}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00001000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xdfa7bfe5}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000800}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xbf4f7fca}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000400}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x7e9eff94}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000200}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xfd3dff28}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000100}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xfa7bfe51}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00800000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xf4f7fca2}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00400000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xe9eff945}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00200000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xd3dff28a}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00100000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xa7bfe514}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00080000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x4f7fca28}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00040000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x9eff9450}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00020000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x3dff28a0}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00010000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x7bfe5141}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x67, 5, 0, 0, 0x00000020}, +- {0xc7, 5, 0, 0, 0x00000020}, +- {0x6d, 5, 3, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xf7fca283}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x40000000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xeff94506}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x20000000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xdff28a0c}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x10000000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xbfe51418}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x08000000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x7fca2831}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x04000000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xff945063}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x02000000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xff28a0c6}, +- {0x57, 4, 0, 0, 0x01000000}, +- {0x1d, 4, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xfe51418c}, +- {0x61, 4, 6, 16, 0x00000000}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000080}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xfca28319}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000040}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xf9450633}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000020}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xf28a0c67}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000010}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xe51418ce}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000008}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xca28319d}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000004}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x9450633b}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000002}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x28a0c676}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000001}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x51418ced}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00008000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xa28319db}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00004000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x450633b6}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00002000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x8a0c676c}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00001000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x1418ced8}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000800}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x28319db1}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000400}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x50633b63}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000200}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xa0c676c6}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000100}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x418ced8d}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00800000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x8319db1a}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00400000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x0633b634}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00200000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x0c676c68}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00100000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x18ced8d1}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00080000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x319db1a3}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00040000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x633b6347}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00020000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xc676c68f}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00010000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x8ced8d1f}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x67, 5, 0, 0, 0x00000020}, +- {0xc7, 5, 0, 0, 0x00000020}, +- {0x6d, 5, 3, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x19db1a3e}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x40000000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x33b6347d}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x20000000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x676c68fa}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x10000000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xced8d1f4}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x08000000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x9db1a3e9}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x04000000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x3b6347d2}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x02000000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x76c68fa5}, +- {0x57, 4, 0, 0, 0x01000000}, +- {0x1d, 4, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xed8d1f4a}, +- {0x61, 4, 6, 20, 0x00000000}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000080}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xdb1a3e94}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000040}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xb6347d28}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000020}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x6c68fa51}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000010}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xd8d1f4a3}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000008}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xb1a3e946}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000004}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x6347d28d}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000002}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xc68fa51a}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000001}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x8d1f4a35}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00008000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x1a3e946b}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00004000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x347d28d7}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00002000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x68fa51ae}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00001000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xd1f4a35c}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000800}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xa3e946b9}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000400}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x47d28d73}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000200}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x8fa51ae7}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000100}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x1f4a35cf}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00800000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x3e946b9e}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00400000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x7d28d73c}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00200000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xfa51ae78}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00100000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xf4a35cf1}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00080000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xe946b9e3}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00040000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xd28d73c7}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00020000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xa51ae78e}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00010000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x4a35cf1c}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x67, 5, 0, 0, 0x00000020}, +- {0xc7, 5, 0, 0, 0x00000020}, +- {0x6d, 5, 3, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x946b9e38}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x40000000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x28d73c71}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x20000000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x51ae78e3}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x10000000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xa35cf1c6}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x08000000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x46b9e38d}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x04000000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x8d73c71b}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x02000000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x1ae78e36}, +- {0x57, 4, 0, 0, 0x01000000}, +- {0x1d, 4, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x35cf1c6c}, +- {0x61, 4, 6, 24, 0x00000000}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000080}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x6b9e38d9}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000040}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xd73c71b2}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000020}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xae78e364}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000010}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x5cf1c6c9}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000008}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xb9e38d92}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000004}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x73c71b25}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000002}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xe78e364b}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000001}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xcf1c6c96}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00008000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x9e38d92c}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00004000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x3c71b259}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00002000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x78e364b2}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00001000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xf1c6c964}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000800}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xe38d92c9}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000400}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xc71b2593}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000200}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x8e364b27}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000100}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x1c6c964e}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00800000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x38d92c9c}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00400000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x71b25938}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00200000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xe364b270}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00100000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xc6c964e0}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00080000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x8d92c9c0}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00040000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x1b259380}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00020000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x364b2700}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00010000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x6c964e01}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x67, 5, 0, 0, 0x00000020}, +- {0xc7, 5, 0, 0, 0x00000020}, +- {0x6d, 5, 3, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xd92c9c03}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x40000000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xb2593807}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x20000000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x64b2700f}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x10000000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xc964e01e}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x08000000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x92c9c03d}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x04000000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x2593807a}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x02000000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x4b2700f4}, +- {0x57, 4, 0, 0, 0x01000000}, +- {0x1d, 4, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x964e01e8}, +- {0x61, 4, 6, 28, 0x00000000}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000080}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x2c9c03d1}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000040}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x593807a3}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000020}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xb2700f46}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000010}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x64e01e8d}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000008}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xc9c03d1a}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000004}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x93807a35}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000002}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x2700f46b}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000001}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x4e01e8d6}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00008000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x9c03d1ad}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00004000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x3807a35b}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00002000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x700f46b6}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00001000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xe01e8d6c}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000800}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xc03d1ad9}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000400}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x807a35b3}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000200}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x00f46b66}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000100}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x01e8d6cc}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00800000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x03d1ad99}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00400000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x07a35b32}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00200000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x0f46b665}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00100000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x1e8d6cca}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00080000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x3d1ad994}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00040000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x7a35b328}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00020000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xf46b6651}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00010000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xe8d6cca2}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x67, 5, 0, 0, 0x00000020}, +- {0xc7, 5, 0, 0, 0x00000020}, +- {0x6d, 5, 3, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xd1ad9944}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x40000000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xa35b3289}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x20000000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x46b66512}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x10000000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x8d6cca25}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x08000000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x1ad9944a}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x04000000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x35b32894}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x02000000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x6b665129}, +- {0x57, 4, 0, 0, 0x01000000}, +- {0x1d, 4, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xd6cca253}, +- {0x61, 4, 6, 32, 0x00000000}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000080}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xad9944a7}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000040}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x5b32894f}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000020}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xb665129f}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000010}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x6cca253e}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000008}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xd9944a7d}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000004}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xb32894fb}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000002}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x665129f6}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000001}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xcca253ec}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00008000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x9944a7d9}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00004000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x32894fb2}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00002000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x65129f65}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00001000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xca253eca}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000800}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x944a7d95}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000400}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x2894fb2a}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000200}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x5129f655}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000100}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xa253ecab}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00800000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x44a7d956}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00400000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x894fb2ac}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00200000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x129f6558}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00100000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x253ecab1}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00080000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x4a7d9563}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00040000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x94fb2ac7}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00020000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x29f6558f}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00010000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x53ecab1e}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x67, 5, 0, 0, 0x00000020}, +- {0xc7, 5, 0, 0, 0x00000020}, +- {0x6d, 5, 3, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xa7d9563d}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x40000000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x4fb2ac7a}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x20000000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x9f6558f5}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x10000000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x3ecab1ea}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x08000000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x7d9563d5}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x04000000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xfb2ac7ab}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x02000000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xf6558f56}, +- {0x57, 4, 0, 0, 0x01000000}, +- {0x1d, 4, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xecab1eac}, +- {0x61, 4, 6, 36, 0x00000000}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000080}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xd9563d59}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000040}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xb2ac7ab2}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000020}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x6558f564}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000010}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xcab1eac8}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000008}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x9563d590}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000004}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x2ac7ab20}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000002}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x558f5641}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000001}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xab1eac83}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00008000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x563d5906}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00004000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xac7ab20c}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00002000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x58f56418}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00001000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xb1eac831}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000800}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x63d59063}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000400}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xc7ab20c7}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000200}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x8f56418f}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000100}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x1eac831e}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00800000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x3d59063c}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00400000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x7ab20c78}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00200000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xf56418f0}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00100000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xeac831e1}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00080000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xd59063c2}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00040000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xab20c784}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00020000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x56418f09}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00010000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xac831e12}, +- {0xbf, 5, 4, 0, 0x00000000}, ++ {0x15, 5, 0, 1, 0x00000000}, ++ {0xa7, 4, 0, 0, 0x418f097c}, ++ {0x57, 3, 0, 0, 0x00000001}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 4, 0, 0, 0x831e12f9}, ++ {0xbf, 5, 1, 0, 0x00000000}, + {0x67, 5, 0, 0, 0x00000020}, + {0xc7, 5, 0, 0, 0x00000020}, +- {0x6d, 5, 3, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x59063c25}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x40000000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xb20c784b}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x20000000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x6418f097}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x10000000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xc831e12f}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x08000000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x9063c25f}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x04000000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x20c784be}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x02000000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x418f097c}, +- {0x57, 4, 0, 0, 0x01000000}, +- {0x1d, 4, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x831e12f9}, +- {0x71, 4, 6, 40, 0x00000000}, +- {0x67, 4, 0, 0, 0x00000008}, +- {0x71, 5, 6, 41, 0x00000000}, +- {0x4f, 4, 5, 0, 0x00000000}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x67, 5, 0, 0, 0x00000030}, +- {0xc7, 5, 0, 0, 0x00000020}, +- {0x6d, 5, 3, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x063c25f3}, +- {0x67, 4, 0, 0, 0x00000010}, +- {0xbf, 3, 4, 0, 0x00000000}, +- {0x57, 3, 0, 0, 0x40000000}, +- {0x1d, 3, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x0c784be7}, +- {0xbf, 3, 4, 0, 0x00000000}, +- {0x57, 3, 0, 0, 0x20000000}, +- {0x1d, 3, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x18f097cf}, +- {0xbf, 3, 4, 0, 0x00000000}, +- {0x57, 3, 0, 0, 0x10000000}, +- {0x1d, 3, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x31e12f9f}, +- {0xbf, 3, 4, 0, 0x00000000}, +- {0x57, 3, 0, 0, 0x08000000}, +- {0x1d, 3, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x63c25f3f}, +- {0xbf, 3, 4, 0, 0x00000000}, +- {0x57, 3, 0, 0, 0x04000000}, +- {0x1d, 3, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xc784be7f}, +- {0xbf, 3, 4, 0, 0x00000000}, +- {0x57, 3, 0, 0, 0x02000000}, +- {0x1d, 3, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x8f097cff}, +- {0xbf, 3, 4, 0, 0x00000000}, +- {0x57, 3, 0, 0, 0x01000000}, +- {0x1d, 3, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x1e12f9fe}, +- {0xbf, 3, 4, 0, 0x00000000}, +- {0x57, 3, 0, 0, 0x00800000}, +- {0x1d, 3, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x3c25f3fc}, +- {0xbf, 3, 4, 0, 0x00000000}, +- {0x57, 3, 0, 0, 0x00400000}, +- {0x1d, 3, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x784be7f8}, +- {0xbf, 3, 4, 0, 0x00000000}, +- {0x57, 3, 0, 0, 0x00200000}, +- {0x1d, 3, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xf097cff0}, +- {0xbf, 3, 4, 0, 0x00000000}, +- {0x57, 3, 0, 0, 0x00100000}, +- {0x1d, 3, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xe12f9fe0}, +- {0xbf, 3, 4, 0, 0x00000000}, +- {0x57, 3, 0, 0, 0x00080000}, +- {0x1d, 3, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xc25f3fc1}, + {0xbf, 3, 4, 0, 0x00000000}, +- {0x57, 3, 0, 0, 0x00040000}, +- {0x1d, 3, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x84be7f83}, ++ {0xa7, 3, 0, 0, 0x063c25f3}, ++ {0x6d, 2, 5, 1, 0x00000000}, + {0xbf, 3, 4, 0, 0x00000000}, +- {0x57, 3, 0, 0, 0x00020000}, +- {0x1d, 3, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x097cff07}, +- {0x57, 4, 0, 0, 0x00010000}, +- {0x1d, 4, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x12f9fe0f}, +- {0x71, 3, 6, 42, 0x00000000}, +- {0x67, 3, 0, 0, 0x00000008}, +- {0xbf, 4, 3, 0, 0x00000000}, +- {0x57, 4, 0, 0, 0x00008000}, +- {0x1d, 4, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x25f3fc1f}, +- {0xbf, 4, 3, 0, 0x00000000}, +- {0x57, 4, 0, 0, 0x00004000}, +- {0x1d, 4, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x4be7f83f}, +- {0xbf, 4, 3, 0, 0x00000000}, +- {0x57, 4, 0, 0, 0x00002000}, +- {0x1d, 4, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x97cff07f}, +- {0xbf, 4, 3, 0, 0x00000000}, +- {0x57, 4, 0, 0, 0x00001000}, +- {0x1d, 4, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x2f9fe0fe}, +- {0xbf, 4, 3, 0, 0x00000000}, +- {0x57, 4, 0, 0, 0x00000800}, +- {0x1d, 4, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x5f3fc1fd}, +- {0xbf, 4, 3, 0, 0x00000000}, +- {0x57, 4, 0, 0, 0x00000400}, +- {0x1d, 4, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xbe7f83fb}, +- {0xbf, 4, 3, 0, 0x00000000}, +- {0x57, 4, 0, 0, 0x00000200}, +- {0x1d, 4, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x7cff07f7}, +- {0x57, 3, 0, 0, 0x00000100}, +- {0x1d, 3, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xf9fe0fee}, +- {0x71, 3, 6, 43, 0x00000000}, +- {0xbf, 4, 3, 0, 0x00000000}, +- {0x57, 4, 0, 0, 0x00000080}, +- {0x1d, 4, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xf3fc1fdc}, +- {0xbf, 4, 3, 0, 0x00000000}, +- {0x57, 4, 0, 0, 0x00000040}, +- {0x79, 5, 10, -56, 0x00000000}, +- {0x1d, 4, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xe7f83fb8}, +- {0xbf, 4, 3, 0, 0x00000000}, +- {0x57, 4, 0, 0, 0x00000020}, +- {0x1d, 4, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xcff07f70}, +- {0xbf, 4, 3, 0, 0x00000000}, +- {0x57, 4, 0, 0, 0x00000010}, +- {0x1d, 4, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x9fe0fee1}, +- {0xbf, 4, 3, 0, 0x00000000}, +- {0x57, 4, 0, 0, 0x00000008}, +- {0x1d, 4, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x3fc1fdc2}, +- {0xbf, 4, 3, 0, 0x00000000}, +- {0x57, 4, 0, 0, 0x00000004}, +- {0x1d, 4, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x7f83fb85}, +- {0xbf, 4, 3, 0, 0x00000000}, +- {0x57, 4, 0, 0, 0x00000002}, +- {0x1d, 4, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xff07f70a}, +- {0x57, 3, 0, 0, 0x00000001}, +- {0x1d, 3, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xfe0fee15}, +- {0x71, 2, 0, 201, 0x00000000}, +- {0x67, 2, 0, 0, 0x00000008}, +- {0x71, 3, 0, 200, 0x00000000}, +- {0x4f, 2, 3, 0, 0x00000000}, +- {0x71, 3, 0, 203, 0x00000000}, +- {0x67, 3, 0, 0, 0x00000008}, +- {0x71, 4, 0, 202, 0x00000000}, +- {0x4f, 3, 4, 0, 0x00000000}, +- {0x67, 3, 0, 0, 0x00000010}, +- {0x4f, 3, 2, 0, 0x00000000}, +- {0x67, 1, 0, 0, 0x00000020}, +- {0x77, 1, 0, 0, 0x00000020}, + {0xbf, 2, 1, 0, 0x00000000}, +- {0x3f, 2, 3, 0, 0x00000000}, +- {0x2f, 2, 3, 0, 0x00000000}, +- {0x1f, 1, 2, 0, 0x00000000}, +- {0x57, 1, 0, 0, 0x0000000f}, +- {0x67, 1, 0, 0, 0x00000002}, +- {0x0f, 0, 1, 0, 0x00000000}, ++ {0x57, 2, 0, 0, 0x40000000}, ++ {0x15, 2, 0, 1, 0x00000000}, ++ {0xa7, 3, 0, 0, 0x0c784be7}, ++ {0xbf, 2, 1, 0, 0x00000000}, ++ {0x57, 2, 0, 0, 0x20000000}, ++ {0x15, 2, 0, 1, 0x00000000}, ++ {0xa7, 3, 0, 0, 0x18f097cf}, ++ {0xbf, 2, 1, 0, 0x00000000}, ++ {0x57, 2, 0, 0, 0x10000000}, ++ {0x15, 2, 0, 1, 0x00000000}, ++ {0xa7, 3, 0, 0, 0x31e12f9f}, ++ {0xbf, 2, 1, 0, 0x00000000}, ++ {0x57, 2, 0, 0, 0x08000000}, ++ {0x15, 2, 0, 1, 0x00000000}, ++ {0xa7, 3, 0, 0, 0x63c25f3f}, ++ {0xbf, 2, 1, 0, 0x00000000}, ++ {0x57, 2, 0, 0, 0x04000000}, ++ {0x15, 2, 0, 1, 0x00000000}, ++ {0xa7, 3, 0, 0, 0xc784be7f}, ++ {0xbf, 2, 1, 0, 0x00000000}, ++ {0x57, 2, 0, 0, 0x02000000}, ++ {0x15, 2, 0, 1, 0x00000000}, ++ {0xa7, 3, 0, 0, 0x8f097cff}, ++ {0xbf, 2, 1, 0, 0x00000000}, ++ {0x57, 2, 0, 0, 0x01000000}, ++ {0x15, 2, 0, 1, 0x00000000}, ++ {0xa7, 3, 0, 0, 0x1e12f9fe}, ++ {0xbf, 2, 1, 0, 0x00000000}, ++ {0x57, 2, 0, 0, 0x00800000}, ++ {0x15, 2, 0, 1, 0x00000000}, ++ {0xa7, 3, 0, 0, 0x3c25f3fc}, ++ {0xbf, 2, 1, 0, 0x00000000}, ++ {0x57, 2, 0, 0, 0x00400000}, ++ {0x15, 2, 0, 1, 0x00000000}, ++ {0xa7, 3, 0, 0, 0x784be7f8}, ++ {0xbf, 2, 1, 0, 0x00000000}, ++ {0x57, 2, 0, 0, 0x00200000}, ++ {0x15, 2, 0, 1, 0x00000000}, ++ {0xa7, 3, 0, 0, 0xf097cff0}, ++ {0xbf, 2, 1, 0, 0x00000000}, ++ {0x57, 2, 0, 0, 0x00100000}, ++ {0x15, 2, 0, 1, 0x00000000}, ++ {0xa7, 3, 0, 0, 0xe12f9fe0}, ++ {0xbf, 2, 1, 0, 0x00000000}, ++ {0x57, 2, 0, 0, 0x00080000}, ++ {0x15, 2, 0, 1, 0x00000000}, ++ {0xa7, 3, 0, 0, 0xc25f3fc1}, ++ {0xbf, 2, 1, 0, 0x00000000}, ++ {0x57, 2, 0, 0, 0x00040000}, ++ {0x15, 2, 0, 1, 0x00000000}, ++ {0xa7, 3, 0, 0, 0x84be7f83}, ++ {0xbf, 2, 1, 0, 0x00000000}, ++ {0x57, 2, 0, 0, 0x00020000}, ++ {0x15, 2, 0, 1, 0x00000000}, ++ {0xa7, 3, 0, 0, 0x097cff07}, ++ {0xbf, 2, 1, 0, 0x00000000}, ++ {0x57, 2, 0, 0, 0x00010000}, ++ {0x15, 2, 0, 1, 0x00000000}, ++ {0xa7, 3, 0, 0, 0x12f9fe0f}, ++ {0xbf, 2, 1, 0, 0x00000000}, ++ {0x57, 2, 0, 0, 0x00008000}, ++ {0x15, 2, 0, 1, 0x00000000}, ++ {0xa7, 3, 0, 0, 0x25f3fc1f}, ++ {0xbf, 2, 1, 0, 0x00000000}, ++ {0x57, 2, 0, 0, 0x00004000}, ++ {0x15, 2, 0, 1, 0x00000000}, ++ {0xa7, 3, 0, 0, 0x4be7f83f}, ++ {0xbf, 2, 1, 0, 0x00000000}, ++ {0x57, 2, 0, 0, 0x00002000}, ++ {0x15, 2, 0, 1, 0x00000000}, ++ {0xa7, 3, 0, 0, 0x97cff07f}, ++ {0xbf, 2, 1, 0, 0x00000000}, ++ {0x57, 2, 0, 0, 0x00001000}, ++ {0x15, 2, 0, 1, 0x00000000}, ++ {0xa7, 3, 0, 0, 0x2f9fe0fe}, ++ {0xbf, 2, 1, 0, 0x00000000}, ++ {0x57, 2, 0, 0, 0x00000800}, ++ {0x15, 2, 0, 1, 0x00000000}, ++ {0xa7, 3, 0, 0, 0x5f3fc1fd}, ++ {0xbf, 2, 1, 0, 0x00000000}, ++ {0x57, 2, 0, 0, 0x00000400}, ++ {0x15, 2, 0, 1, 0x00000000}, ++ {0xa7, 3, 0, 0, 0xbe7f83fb}, ++ {0xbf, 2, 1, 0, 0x00000000}, ++ {0x57, 2, 0, 0, 0x00000200}, ++ {0x15, 2, 0, 1, 0x00000000}, ++ {0xa7, 3, 0, 0, 0x7cff07f7}, ++ {0xbf, 2, 1, 0, 0x00000000}, ++ {0x57, 2, 0, 0, 0x00000100}, ++ {0x15, 2, 0, 1, 0x00000000}, ++ {0xa7, 3, 0, 0, 0xf9fe0fee}, ++ {0xbf, 2, 1, 0, 0x00000000}, ++ {0x57, 2, 0, 0, 0x00000080}, ++ {0x15, 2, 0, 1, 0x00000000}, ++ {0xa7, 3, 0, 0, 0xf3fc1fdc}, ++ {0xbf, 2, 1, 0, 0x00000000}, ++ {0x57, 2, 0, 0, 0x00000040}, ++ {0x15, 2, 0, 1, 0x00000000}, ++ {0xa7, 3, 0, 0, 0xe7f83fb8}, ++ {0xbf, 2, 1, 0, 0x00000000}, ++ {0x57, 2, 0, 0, 0x00000020}, ++ {0x15, 2, 0, 1, 0x00000000}, ++ {0xa7, 3, 0, 0, 0xcff07f70}, ++ {0xbf, 2, 1, 0, 0x00000000}, ++ {0x57, 2, 0, 0, 0x00000010}, ++ {0x15, 2, 0, 1, 0x00000000}, ++ {0xa7, 3, 0, 0, 0x9fe0fee1}, ++ {0xbf, 2, 1, 0, 0x00000000}, ++ {0x57, 2, 0, 0, 0x00000008}, ++ {0x15, 2, 0, 1, 0x00000000}, ++ {0xa7, 3, 0, 0, 0x3fc1fdc2}, ++ {0xbf, 2, 1, 0, 0x00000000}, ++ {0x57, 2, 0, 0, 0x00000004}, ++ {0x15, 2, 0, 1, 0x00000000}, ++ {0xa7, 3, 0, 0, 0x7f83fb85}, ++ {0xbf, 2, 1, 0, 0x00000000}, ++ {0x57, 2, 0, 0, 0x00000002}, ++ {0x15, 2, 0, 1, 0x00000000}, ++ {0xa7, 3, 0, 0, 0xff07f70a}, ++ {0x57, 1, 0, 0, 0x00000001}, ++ {0x15, 1, 0, 1, 0x00000000}, ++ {0xa7, 3, 0, 0, 0xfe0fee15}, ++ {0x71, 1, 0, 201, 0x00000000}, ++ {0x67, 1, 0, 0, 0x00000008}, ++ {0x71, 2, 0, 200, 0x00000000}, ++ {0x4f, 1, 2, 0, 0x00000000}, ++ {0x71, 2, 0, 202, 0x00000000}, ++ {0x67, 2, 0, 0, 0x00000010}, ++ {0x71, 4, 0, 203, 0x00000000}, ++ {0x67, 4, 0, 0, 0x00000018}, ++ {0x4f, 4, 2, 0, 0x00000000}, ++ {0x4f, 4, 1, 0, 0x00000000}, ++ {0x67, 3, 0, 0, 0x00000020}, ++ {0x77, 3, 0, 0, 0x00000020}, ++ {0x9f, 3, 4, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x0000000f}, ++ {0x67, 3, 0, 0, 0x00000002}, ++ {0x0f, 0, 3, 0, 0x00000000}, + {0x71, 1, 0, 137, 0x00000000}, + {0x67, 1, 0, 0, 0x00000008}, + {0x71, 2, 0, 136, 0x00000000}, + {0x4f, 1, 2, 0, 0x00000000}, + {0x71, 2, 0, 138, 0x00000000}, ++ {0x67, 2, 0, 0, 0x00000010}, + {0x71, 3, 0, 139, 0x00000000}, +- {0x67, 3, 0, 0, 0x00000008}, ++ {0x67, 3, 0, 0, 0x00000018}, + {0x4f, 3, 2, 0, 0x00000000}, +- {0x67, 3, 0, 0, 0x00000010}, + {0x4f, 3, 1, 0, 0x00000000}, + {0x07, 3, 0, 0, 0x7cafe800}, +- {0x63, 5, 3, 52, 0x00000000}, ++ {0x63, 6, 3, 52, 0x00000000}, + {0xb7, 7, 0, 0, 0x00000001}, + {0xbf, 0, 7, 0, 0x00000000}, + {0x95, 0, 0, 0, 0x00000000}, +diff --git a/dpdk/drivers/net/tap/tap_bpf_program.c b/dpdk/drivers/net/tap/tap_bpf_program.c +index 20c310e5e7..d9bb65831a 100644 +--- a/dpdk/drivers/net/tap/tap_bpf_program.c ++++ b/dpdk/drivers/net/tap/tap_bpf_program.c +@@ -131,6 +131,8 @@ rss_l3_l4(struct __sk_buff *skb) + __u8 *key = 0; + __u32 len; + __u32 queue = 0; ++ bool mf = 0; ++ __u16 frag_off = 0; + + rsskey = map_lookup_elem(&map_keys, &key_idx); + if (!rsskey) { +@@ -155,6 +157,8 @@ rss_l3_l4(struct __sk_buff *skb) + return TC_ACT_OK; + + __u8 *src_dst_addr = data + off + offsetof(struct iphdr, saddr); ++ __u8 *frag_off_addr = data + off + offsetof(struct iphdr, frag_off); ++ __u8 *prot_addr = data + off + offsetof(struct iphdr, protocol); + __u8 *src_dst_port = data + off + sizeof(struct iphdr); + struct ipv4_l3_l4_tuple v4_tuple = { + .src_addr = IPv4(*(src_dst_addr + 0), +@@ -165,11 +169,25 @@ rss_l3_l4(struct __sk_buff *skb) + *(src_dst_addr + 5), + *(src_dst_addr + 6), + *(src_dst_addr + 7)), +- .sport = PORT(*(src_dst_port + 0), +- *(src_dst_port + 1)), +- .dport = PORT(*(src_dst_port + 2), +- *(src_dst_port + 3)), ++ .sport = 0, ++ .dport = 0, + }; ++ /** Fetch the L4-payer port numbers only in-case of TCP/UDP ++ ** and also if the packet is not fragmented. Since fragmented ++ ** chunks do not have L4 TCP/UDP header. ++ **/ ++ if (*prot_addr == IPPROTO_UDP || *prot_addr == IPPROTO_TCP) { ++ frag_off = PORT(*(frag_off_addr + 0), ++ *(frag_off_addr + 1)); ++ mf = frag_off & 0x2000; ++ frag_off = frag_off & 0x1fff; ++ if (mf == 0 && frag_off == 0) { ++ v4_tuple.sport = PORT(*(src_dst_port + 0), ++ *(src_dst_port + 1)); ++ v4_tuple.dport = PORT(*(src_dst_port + 2), ++ *(src_dst_port + 3)); ++ } ++ } + __u8 input_len = sizeof(v4_tuple) / sizeof(__u32); + if (rsskey->hash_fields & (1 << HASH_FIELD_IPV4_L3)) + input_len--; +@@ -182,6 +200,9 @@ rss_l3_l4(struct __sk_buff *skb) + offsetof(struct ipv6hdr, saddr); + __u8 *src_dst_port = data + off + + sizeof(struct ipv6hdr); ++ __u8 *next_hdr = data + off + ++ offsetof(struct ipv6hdr, nexthdr); ++ + struct ipv6_l3_l4_tuple v6_tuple; + for (j = 0; j < 4; j++) + *((uint32_t *)&v6_tuple.src_addr + j) = +@@ -191,10 +212,18 @@ rss_l3_l4(struct __sk_buff *skb) + *((uint32_t *)&v6_tuple.dst_addr + j) = + __builtin_bswap32(*((uint32_t *) + src_dst_addr + 4 + j)); +- v6_tuple.sport = PORT(*(src_dst_port + 0), +- *(src_dst_port + 1)); +- v6_tuple.dport = PORT(*(src_dst_port + 2), +- *(src_dst_port + 3)); ++ ++ /** Fetch the L4 header port-numbers only if next-header ++ * is TCP/UDP **/ ++ if (*next_hdr == IPPROTO_UDP || *next_hdr == IPPROTO_TCP) { ++ v6_tuple.sport = PORT(*(src_dst_port + 0), ++ *(src_dst_port + 1)); ++ v6_tuple.dport = PORT(*(src_dst_port + 2), ++ *(src_dst_port + 3)); ++ } else { ++ v6_tuple.sport = 0; ++ v6_tuple.dport = 0; ++ } + + __u8 input_len = sizeof(v6_tuple) / sizeof(__u32); + if (rsskey->hash_fields & (1 << HASH_FIELD_IPV6_L3)) diff --git a/dpdk/drivers/net/txgbe/base/txgbe_hw.c b/dpdk/drivers/net/txgbe/base/txgbe_hw.c index 8966453a03..de96549ae8 100644 --- a/dpdk/drivers/net/txgbe/base/txgbe_hw.c @@ -30383,6 +46356,33 @@ index 8966453a03..de96549ae8 100644 usec_delay(10); txgbe_reset_misc(hw); +diff --git a/dpdk/drivers/net/txgbe/base/txgbe_mng.c b/dpdk/drivers/net/txgbe/base/txgbe_mng.c +index df7145094f..029a0a1fe1 100644 +--- a/dpdk/drivers/net/txgbe/base/txgbe_mng.c ++++ b/dpdk/drivers/net/txgbe/base/txgbe_mng.c +@@ -141,21 +141,7 @@ txgbe_host_interface_command(struct txgbe_hw *hw, u32 *buffer, + for (bi = 0; bi < dword_len; bi++) + buffer[bi] = rd32a(hw, TXGBE_MNGMBX, bi); + +- /* +- * If there is any thing in data position pull it in +- * Read Flash command requires reading buffer length from +- * two byes instead of one byte +- */ +- if (resp->cmd == 0x30) { +- for (; bi < dword_len + 2; bi++) +- buffer[bi] = rd32a(hw, TXGBE_MNGMBX, bi); +- +- buf_len = (((u16)(resp->cmd_or_resp.ret_status) << 3) +- & 0xF00) | resp->buf_len; +- hdr_size += (2 << 2); +- } else { +- buf_len = resp->buf_len; +- } ++ buf_len = resp->buf_len; + if (!buf_len) + goto rel_out; + diff --git a/dpdk/drivers/net/txgbe/base/txgbe_phy.c b/dpdk/drivers/net/txgbe/base/txgbe_phy.c index 9f46d5bdb0..a7c11c50df 100644 --- a/dpdk/drivers/net/txgbe/base/txgbe_phy.c @@ -30522,7 +46522,7 @@ index c3486b472f..75e839b7de 100644 uint64_t isb_dma; diff --git a/dpdk/drivers/net/txgbe/txgbe_ethdev.c b/dpdk/drivers/net/txgbe/txgbe_ethdev.c -index 86ef979b29..2c7d71c0db 100644 +index 86ef979b29..001f8c6473 100644 --- a/dpdk/drivers/net/txgbe/txgbe_ethdev.c +++ b/dpdk/drivers/net/txgbe/txgbe_ethdev.c @@ -179,7 +179,9 @@ static const struct rte_txgbe_xstats_name_off rte_txgbe_stats_strings[] = { @@ -30551,7 +46551,27 @@ index 86ef979b29..2c7d71c0db 100644 rte_eth_copy_pci_info(eth_dev, pci_dev); hw->hw_addr = (void *)pci_dev->mem_resource[0].addr; -@@ -1530,6 +1534,25 @@ txgbe_dev_configure(struct rte_eth_dev *dev) +@@ -1494,6 +1498,19 @@ txgbe_check_mq_mode(struct rte_eth_dev *dev) + return -EINVAL; + } + } ++ ++ /* ++ * When DCB/VT is off, maximum number of queues changes ++ */ ++ if (dev_conf->txmode.mq_mode == RTE_ETH_MQ_TX_NONE) { ++ if (nb_tx_q > TXGBE_NONE_MODE_TX_NB_QUEUES) { ++ PMD_INIT_LOG(ERR, ++ "Neither VT nor DCB are enabled, " ++ "nb_tx_q > %d.", ++ TXGBE_NONE_MODE_TX_NB_QUEUES); ++ return -EINVAL; ++ } ++ } + } + return 0; + } +@@ -1530,6 +1547,25 @@ txgbe_dev_configure(struct rte_eth_dev *dev) return 0; } @@ -30577,7 +46597,7 @@ index 86ef979b29..2c7d71c0db 100644 static void txgbe_dev_phy_intr_setup(struct rte_eth_dev *dev) { -@@ -1647,7 +1670,7 @@ txgbe_dev_start(struct rte_eth_dev *dev) +@@ -1647,7 +1683,7 @@ txgbe_dev_start(struct rte_eth_dev *dev) PMD_INIT_FUNC_TRACE(); /* Stop the link setup handler before resetting the HW. */ @@ -30586,7 +46606,7 @@ index 86ef979b29..2c7d71c0db 100644 /* disable uio/vfio intr/eventfd mapping */ rte_intr_disable(intr_handle); -@@ -1668,6 +1691,10 @@ txgbe_dev_start(struct rte_eth_dev *dev) +@@ -1668,6 +1704,10 @@ txgbe_dev_start(struct rte_eth_dev *dev) hw->mac.get_link_status = true; hw->dev_start = true; @@ -30597,7 +46617,7 @@ index 86ef979b29..2c7d71c0db 100644 /* configure PF module if SRIOV enabled */ txgbe_pf_host_configure(dev); -@@ -1786,6 +1813,7 @@ txgbe_dev_start(struct rte_eth_dev *dev) +@@ -1786,6 +1826,7 @@ txgbe_dev_start(struct rte_eth_dev *dev) speed = (TXGBE_LINK_SPEED_100M_FULL | TXGBE_LINK_SPEED_1GB_FULL | TXGBE_LINK_SPEED_10GB_FULL); @@ -30605,7 +46625,7 @@ index 86ef979b29..2c7d71c0db 100644 } else { if (*link_speeds & RTE_ETH_LINK_SPEED_10G) speed |= TXGBE_LINK_SPEED_10GB_FULL; -@@ -1797,6 +1825,7 @@ txgbe_dev_start(struct rte_eth_dev *dev) +@@ -1797,6 +1838,7 @@ txgbe_dev_start(struct rte_eth_dev *dev) speed |= TXGBE_LINK_SPEED_1GB_FULL; if (*link_speeds & RTE_ETH_LINK_SPEED_100M) speed |= TXGBE_LINK_SPEED_100M_FULL; @@ -30613,7 +46633,12 @@ index 86ef979b29..2c7d71c0db 100644 } err = hw->mac.setup_link(hw, speed, link_up); -@@ -1879,11 +1908,15 @@ txgbe_dev_stop(struct rte_eth_dev *dev) +@@ -1875,15 +1917,19 @@ txgbe_dev_stop(struct rte_eth_dev *dev) + struct txgbe_tm_conf *tm_conf = TXGBE_DEV_TM_CONF(dev); + + if (hw->adapter_stopped) +- return 0; ++ goto out; PMD_INIT_FUNC_TRACE(); @@ -30630,7 +46655,49 @@ index 86ef979b29..2c7d71c0db 100644 /* reset the NIC */ txgbe_pf_reset_hw(hw); hw->adapter_stopped = 0; -@@ -2019,8 +2052,9 @@ txgbe_dev_close(struct rte_eth_dev *dev) +@@ -1894,14 +1940,6 @@ txgbe_dev_stop(struct rte_eth_dev *dev) + for (vf = 0; vfinfo != NULL && vf < pci_dev->max_vfs; vf++) + vfinfo[vf].clear_to_send = false; + +- if (hw->phy.media_type == txgbe_media_type_copper) { +- /* Turn off the copper */ +- hw->phy.set_phy_power(hw, false); +- } else { +- /* Turn off the laser */ +- hw->mac.disable_tx_laser(hw); +- } +- + txgbe_dev_clear_queues(dev); + + /* Clear stored conf */ +@@ -1932,6 +1970,16 @@ txgbe_dev_stop(struct rte_eth_dev *dev) + dev->data->dev_started = 0; + hw->dev_start = false; + ++out: ++ /* close phy to prevent reset in dev_close from restarting physical link */ ++ if (hw->phy.media_type == txgbe_media_type_copper) { ++ /* Turn off the copper */ ++ hw->phy.set_phy_power(hw, false); ++ } else { ++ /* Turn off the laser */ ++ hw->mac.disable_tx_laser(hw); ++ } ++ + return 0; + } + +@@ -1991,6 +2039,9 @@ txgbe_dev_close(struct rte_eth_dev *dev) + + PMD_INIT_FUNC_TRACE(); + ++ if (rte_eal_process_type() != RTE_PROC_PRIMARY) ++ return 0; ++ + txgbe_pf_reset_hw(hw); + + ret = txgbe_dev_stop(dev); +@@ -2019,8 +2070,9 @@ txgbe_dev_close(struct rte_eth_dev *dev) rte_delay_ms(100); } while (retries++ < (10 + TXGBE_LINK_UP_TIME)); @@ -30641,7 +46708,7 @@ index 86ef979b29..2c7d71c0db 100644 /* uninitialize PF if max_vfs not zero */ txgbe_pf_host_uninit(dev); -@@ -2690,11 +2724,52 @@ txgbe_dev_setup_link_alarm_handler(void *param) +@@ -2690,11 +2742,52 @@ txgbe_dev_setup_link_alarm_handler(void *param) intr->flags &= ~TXGBE_FLAG_NEED_LINK_CONFIG; } @@ -30694,7 +46761,7 @@ index 86ef979b29..2c7d71c0db 100644 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); struct rte_eth_link link; u32 link_speed = TXGBE_LINK_SPEED_UNKNOWN; -@@ -2731,10 +2806,24 @@ txgbe_dev_link_update_share(struct rte_eth_dev *dev, +@@ -2731,10 +2824,24 @@ txgbe_dev_link_update_share(struct rte_eth_dev *dev, if ((hw->subsystem_device_id & 0xFF) == TXGBE_DEV_ID_KR_KX_KX4) { hw->mac.bp_down_event(hw); @@ -30723,7 +46790,19 @@ index 86ef979b29..2c7d71c0db 100644 } return rte_eth_linkstatus_set(dev, &link); } else if (!hw->dev_start) { -@@ -2949,9 +3038,6 @@ txgbe_dev_interrupt_get_status(struct rte_eth_dev *dev, +@@ -2773,6 +2880,11 @@ txgbe_dev_link_update_share(struct rte_eth_dev *dev, + break; + } + ++ /* Re configure MAC RX */ ++ if (hw->mac.type == txgbe_mac_raptor) ++ wr32m(hw, TXGBE_MACRXFLT, TXGBE_MACRXFLT_PROMISC, ++ TXGBE_MACRXFLT_PROMISC); ++ + return rte_eth_linkstatus_set(dev, &link); + } + +@@ -2949,9 +3061,6 @@ txgbe_dev_interrupt_get_status(struct rte_eth_dev *dev, rte_intr_type_get(intr_handle) != RTE_INTR_HANDLE_VFIO_MSIX) wr32(hw, TXGBE_PX_INTA, 1); @@ -30733,7 +46812,7 @@ index 86ef979b29..2c7d71c0db 100644 /* read-on-clear nic registers here */ eicr = ((u32 *)hw->isb_mem)[TXGBE_ISB_MISC]; PMD_DRV_LOG(DEBUG, "eicr %x", eicr); -@@ -2974,6 +3060,8 @@ txgbe_dev_interrupt_get_status(struct rte_eth_dev *dev, +@@ -2974,6 +3083,8 @@ txgbe_dev_interrupt_get_status(struct rte_eth_dev *dev, if (eicr & TXGBE_ICRMISC_GPIO) intr->flags |= TXGBE_FLAG_PHY_INTERRUPT; @@ -30742,7 +46821,7 @@ index 86ef979b29..2c7d71c0db 100644 return 0; } -@@ -3143,7 +3231,8 @@ txgbe_dev_interrupt_delayed_handler(void *param) +@@ -3143,7 +3254,8 @@ txgbe_dev_interrupt_delayed_handler(void *param) } /* restore original mask */ @@ -30753,10 +46832,18 @@ index 86ef979b29..2c7d71c0db 100644 intr->mask = intr->mask_orig; intr->mask_orig = 0; diff --git a/dpdk/drivers/net/txgbe/txgbe_ethdev.h b/dpdk/drivers/net/txgbe/txgbe_ethdev.h -index 6a18865e23..b8a39204e2 100644 +index 6a18865e23..545ce4c9e1 100644 --- a/dpdk/drivers/net/txgbe/txgbe_ethdev.h +++ b/dpdk/drivers/net/txgbe/txgbe_ethdev.h -@@ -369,6 +369,9 @@ struct txgbe_adapter { +@@ -40,6 +40,7 @@ + /*Default value of Max Rx Queue*/ + #define TXGBE_MAX_RX_QUEUE_NUM 128 + #define TXGBE_VMDQ_DCB_NB_QUEUES TXGBE_MAX_RX_QUEUE_NUM ++#define TXGBE_NONE_MODE_TX_NB_QUEUES 64 + + #ifndef NBBY + #define NBBY 8 /* number of bits in a byte */ +@@ -369,6 +370,9 @@ struct txgbe_adapter { /* For RSS reta table update */ uint8_t rss_reta_updated; @@ -30766,7 +46853,7 @@ index 6a18865e23..b8a39204e2 100644 }; #define TXGBE_DEV_ADAPTER(dev) \ -@@ -560,6 +563,9 @@ void txgbe_configure_dcb(struct rte_eth_dev *dev); +@@ -560,6 +564,9 @@ void txgbe_configure_dcb(struct rte_eth_dev *dev); int txgbe_dev_link_update_share(struct rte_eth_dev *dev, int wait_to_complete); @@ -30814,8 +46901,56 @@ index 3b1f7c913b..f1341fbf7e 100644 txgbevf_intr_disable(dev); +diff --git a/dpdk/drivers/net/txgbe/txgbe_ptypes.c b/dpdk/drivers/net/txgbe/txgbe_ptypes.c +index e1299d7363..c444d5d3f1 100644 +--- a/dpdk/drivers/net/txgbe/txgbe_ptypes.c ++++ b/dpdk/drivers/net/txgbe/txgbe_ptypes.c +@@ -320,8 +320,6 @@ txgbe_encode_ptype_tunnel(u32 ptype) + ptid |= TXGBE_PTID_TUN_EI; + break; + case RTE_PTYPE_TUNNEL_GRE: +- ptid |= TXGBE_PTID_TUN_EIG; +- break; + case RTE_PTYPE_TUNNEL_VXLAN: + case RTE_PTYPE_TUNNEL_VXLAN_GPE: + case RTE_PTYPE_TUNNEL_NVGRE: +@@ -332,20 +330,6 @@ txgbe_encode_ptype_tunnel(u32 ptype) + return ptid; + } + +- switch (ptype & RTE_PTYPE_INNER_L2_MASK) { +- case RTE_PTYPE_INNER_L2_ETHER: +- ptid |= TXGBE_PTID_TUN_EIGM; +- break; +- case RTE_PTYPE_INNER_L2_ETHER_VLAN: +- ptid |= TXGBE_PTID_TUN_EIGMV; +- break; +- case RTE_PTYPE_INNER_L2_ETHER_QINQ: +- ptid |= TXGBE_PTID_TUN_EIGMV; +- break; +- default: +- break; +- } +- + switch (ptype & RTE_PTYPE_INNER_L3_MASK) { + case RTE_PTYPE_INNER_L3_IPV4: + case RTE_PTYPE_INNER_L3_IPV4_EXT: +diff --git a/dpdk/drivers/net/txgbe/txgbe_ptypes.h b/dpdk/drivers/net/txgbe/txgbe_ptypes.h +index fa6c347d53..6fa8147f05 100644 +--- a/dpdk/drivers/net/txgbe/txgbe_ptypes.h ++++ b/dpdk/drivers/net/txgbe/txgbe_ptypes.h +@@ -348,4 +348,9 @@ struct txgbe_nvgrehdr { + __be32 tni; + }; + ++struct txgbe_grehdr { ++ __be16 flags; ++ __be16 proto; ++}; ++ + #endif /* _TXGBE_PTYPE_H_ */ diff --git a/dpdk/drivers/net/txgbe/txgbe_rxtx.c b/dpdk/drivers/net/txgbe/txgbe_rxtx.c -index ac1bba08a3..427f8b82ac 100644 +index ac1bba08a3..24fc34d3c4 100644 --- a/dpdk/drivers/net/txgbe/txgbe_rxtx.c +++ b/dpdk/drivers/net/txgbe/txgbe_rxtx.c @@ -516,20 +516,21 @@ tx_desc_ol_flags_to_cmdtype(uint64_t ol_flags) @@ -30845,7 +46980,15 @@ index ac1bba08a3..427f8b82ac 100644 ptype |= RTE_PTYPE_L2_ETHER_VLAN; /* L3 level */ -@@ -587,6 +588,16 @@ tx_desc_ol_flags_to_ptid(uint64_t oflags, uint32_t ptype) +@@ -571,7 +572,6 @@ tx_desc_ol_flags_to_ptid(uint64_t oflags, uint32_t ptype) + ptype |= RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4 | + RTE_PTYPE_TUNNEL_GRE; +- ptype |= RTE_PTYPE_INNER_L2_ETHER; + break; + case RTE_MBUF_F_TX_TUNNEL_GENEVE: + ptype |= RTE_PTYPE_L2_ETHER | +@@ -587,6 +587,16 @@ tx_desc_ol_flags_to_ptid(uint64_t oflags, uint32_t ptype) break; } @@ -30862,7 +47005,42 @@ index ac1bba08a3..427f8b82ac 100644 return txgbe_encode_ptype(ptype); } -@@ -776,8 +787,7 @@ txgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, +@@ -694,22 +704,24 @@ txgbe_get_tun_len(struct rte_mbuf *mbuf) + static inline uint8_t + txgbe_parse_tun_ptid(struct rte_mbuf *tx_pkt) + { +- uint64_t l2_none, l2_mac, l2_mac_vlan; ++ uint64_t l2_vxlan, l2_vxlan_mac, l2_vxlan_mac_vlan; ++ uint64_t l2_gre, l2_gre_mac, l2_gre_mac_vlan; + uint8_t ptid = 0; + +- if ((tx_pkt->ol_flags & (RTE_MBUF_F_TX_TUNNEL_VXLAN | +- RTE_MBUF_F_TX_TUNNEL_VXLAN_GPE)) == 0) +- return ptid; ++ l2_vxlan = sizeof(struct txgbe_udphdr) + sizeof(struct txgbe_vxlanhdr); ++ l2_vxlan_mac = l2_vxlan + sizeof(struct rte_ether_hdr); ++ l2_vxlan_mac_vlan = l2_vxlan_mac + sizeof(struct rte_vlan_hdr); + +- l2_none = sizeof(struct txgbe_udphdr) + sizeof(struct txgbe_vxlanhdr); +- l2_mac = l2_none + sizeof(struct rte_ether_hdr); +- l2_mac_vlan = l2_mac + sizeof(struct rte_vlan_hdr); ++ l2_gre = sizeof(struct txgbe_grehdr); ++ l2_gre_mac = l2_gre + sizeof(struct rte_ether_hdr); ++ l2_gre_mac_vlan = l2_gre_mac + sizeof(struct rte_vlan_hdr); + +- if (tx_pkt->l2_len == l2_none) ++ if (tx_pkt->l2_len == l2_vxlan || tx_pkt->l2_len == l2_gre) + ptid = TXGBE_PTID_TUN_EIG; +- else if (tx_pkt->l2_len == l2_mac) ++ else if (tx_pkt->l2_len == l2_vxlan_mac || tx_pkt->l2_len == l2_gre_mac) + ptid = TXGBE_PTID_TUN_EIGM; +- else if (tx_pkt->l2_len == l2_mac_vlan) ++ else if (tx_pkt->l2_len == l2_vxlan_mac_vlan || ++ tx_pkt->l2_len == l2_gre_mac_vlan) + ptid = TXGBE_PTID_TUN_EIGMV; + + return ptid; +@@ -776,8 +788,7 @@ txgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, /* If hardware offload required */ tx_ol_req = ol_flags & TXGBE_TX_OFFLOAD_MASK; if (tx_ol_req) { @@ -30872,6 +47050,97 @@ index ac1bba08a3..427f8b82ac 100644 if (tx_offload.ptid & TXGBE_PTID_PKT_TUN) tx_offload.ptid |= txgbe_parse_tun_ptid(tx_pkt); tx_offload.l2_len = tx_pkt->l2_len; +@@ -1465,11 +1476,22 @@ txgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, + * of accesses cannot be reordered by the compiler. If they were + * not volatile, they could be reordered which could lead to + * using invalid descriptor fields when read from rxd. ++ * ++ * Meanwhile, to prevent the CPU from executing out of order, we ++ * need to use a proper memory barrier to ensure the memory ++ * ordering below. + */ + rxdp = &rx_ring[rx_id]; + staterr = rxdp->qw1.lo.status; + if (!(staterr & rte_cpu_to_le_32(TXGBE_RXD_STAT_DD))) + break; ++ ++ /* ++ * Use acquire fence to ensure that status_error which includes ++ * DD bit is loaded before loading of other descriptor words. ++ */ ++ rte_atomic_thread_fence(__ATOMIC_ACQUIRE); ++ + rxd = *rxdp; + + /* +@@ -1715,32 +1737,10 @@ txgbe_recv_pkts_lro(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts, + + next_desc: + /* +- * The code in this whole file uses the volatile pointer to +- * ensure the read ordering of the status and the rest of the +- * descriptor fields (on the compiler level only!!!). This is so +- * UGLY - why not to just use the compiler barrier instead? DPDK +- * even has the rte_compiler_barrier() for that. +- * +- * But most importantly this is just wrong because this doesn't +- * ensure memory ordering in a general case at all. For +- * instance, DPDK is supposed to work on Power CPUs where +- * compiler barrier may just not be enough! +- * +- * I tried to write only this function properly to have a +- * starting point (as a part of an LRO/RSC series) but the +- * compiler cursed at me when I tried to cast away the +- * "volatile" from rx_ring (yes, it's volatile too!!!). So, I'm +- * keeping it the way it is for now. +- * +- * The code in this file is broken in so many other places and +- * will just not work on a big endian CPU anyway therefore the +- * lines below will have to be revisited together with the rest +- * of the txgbe PMD. +- * +- * TODO: +- * - Get rid of "volatile" and let the compiler do its job. +- * - Use the proper memory barrier (rte_rmb()) to ensure the +- * memory ordering below. ++ * "Volatile" only prevents caching of the variable marked ++ * volatile. Most important, "volatile" cannot prevent the CPU ++ * from executing out of order. So, it is necessary to use a ++ * proper memory barrier to ensure the memory ordering below. + */ + rxdp = &rx_ring[rx_id]; + staterr = rte_le_to_cpu_32(rxdp->qw1.lo.status); +@@ -1748,6 +1748,12 @@ next_desc: + if (!(staterr & TXGBE_RXD_STAT_DD)) + break; + ++ /* ++ * Use acquire fence to ensure that status_error which includes ++ * DD bit is loaded before loading of other descriptor words. ++ */ ++ rte_atomic_thread_fence(__ATOMIC_ACQUIRE); ++ + rxd = *rxdp; + + PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u " +@@ -2795,6 +2801,8 @@ txgbe_dev_clear_queues(struct rte_eth_dev *dev) + txq->ops->release_mbufs(txq); + txq->ops->reset(txq); + } ++ ++ dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; + } + + for (i = 0; i < dev->data->nb_rx_queues; i++) { +@@ -2804,6 +2812,8 @@ txgbe_dev_clear_queues(struct rte_eth_dev *dev) + txgbe_rx_queue_release_mbufs(rxq); + txgbe_reset_rx_queue(adapter, rxq); + } ++ ++ dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; + } + } + @@ -4382,7 +4392,7 @@ txgbe_dev_rx_init(struct rte_eth_dev *dev) */ buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) - @@ -30881,8 +47150,26 @@ index ac1bba08a3..427f8b82ac 100644 srrctl |= TXGBE_RXCFG_PKTLEN(buf_size); wr32(hw, TXGBE_RXCFG(rxq->reg_idx), srrctl); +@@ -4994,6 +5004,8 @@ txgbevf_dev_rxtx_start(struct rte_eth_dev *dev) + } while (--poll_ms && !(txdctl & TXGBE_TXCFG_ENA)); + if (!poll_ms) + PMD_INIT_LOG(ERR, "Could not enable Tx Queue %d", i); ++ else ++ dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; + } + for (i = 0; i < dev->data->nb_rx_queues; i++) { + rxq = dev->data->rx_queues[i]; +@@ -5008,6 +5020,8 @@ txgbevf_dev_rxtx_start(struct rte_eth_dev *dev) + } while (--poll_ms && !(rxdctl & TXGBE_RXCFG_ENA)); + if (!poll_ms) + PMD_INIT_LOG(ERR, "Could not enable Rx Queue %d", i); ++ else ++ dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; + rte_wmb(); + wr32(hw, TXGBE_RXWP(i), rxq->nb_rx_desc - 1); + } diff --git a/dpdk/drivers/net/vhost/rte_eth_vhost.c b/dpdk/drivers/net/vhost/rte_eth_vhost.c -index b152279fac..f7e1b268ed 100644 +index b152279fac..acba97dba3 100644 --- a/dpdk/drivers/net/vhost/rte_eth_vhost.c +++ b/dpdk/drivers/net/vhost/rte_eth_vhost.c @@ -78,8 +78,9 @@ struct vhost_queue { @@ -31006,15 +47293,13 @@ index b152279fac..f7e1b268ed 100644 - struct rte_vhost_vring vring; - struct vhost_queue *vq; - int old_intr_enable, ret = 0; -+ struct vhost_queue *vq = dev->data->rx_queues[qid]; - +- - vq = dev->data->rx_queues[qid]; - if (!vq) { - VHOST_LOG(ERR, "rxq%d is not setup yet\n", qid); - return -1; - } -+ if (vq->vid >= 0) -+ rte_vhost_enable_guest_notification(vq->vid, (qid << 1) + 1, 1); ++ struct vhost_queue *vq = dev->data->rx_queues[qid]; - rte_spinlock_lock(&vq->intr_lock); - old_intr_enable = vq->intr_enable; @@ -31027,7 +47312,9 @@ index b152279fac..f7e1b268ed 100644 - vq->intr_enable = old_intr_enable; - return ret; - } -- ++ if (vq->vid >= 0) ++ rte_vhost_enable_guest_notification(vq->vid, (qid << 1) + 1, 1); + - ret = rte_vhost_get_vhost_vring(vq->vid, (qid << 1) + 1, &vring); - if (ret < 0) { - VHOST_LOG(ERR, "Failed to get rxq%d's vring\n", qid); @@ -31336,9 +47623,11 @@ index b152279fac..f7e1b268ed 100644 rte_spinlock_lock(&state->lock); if (state->cur[vring] == enable) { -@@ -1185,18 +1133,17 @@ eth_dev_start(struct rte_eth_dev *eth_dev) +@@ -1184,22 +1132,27 @@ eth_dev_start(struct rte_eth_dev *eth_dev) + { struct pmd_internal *internal = eth_dev->data->dev_private; struct rte_eth_conf *dev_conf = ð_dev->data->dev_conf; ++ uint16_t i; - queue_setup(eth_dev, internal); - @@ -31364,7 +47653,33 @@ index b152279fac..f7e1b268ed 100644 rte_atomic32_set(&internal->started, 1); update_queuing_status(eth_dev, false); -@@ -1251,6 +1198,8 @@ eth_dev_close(struct rte_eth_dev *dev) ++ for (i = 0; i < eth_dev->data->nb_rx_queues; i++) ++ eth_dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; ++ for (i = 0; i < eth_dev->data->nb_tx_queues; i++) ++ eth_dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; ++ + return 0; + } + +@@ -1207,11 +1160,17 @@ static int + eth_dev_stop(struct rte_eth_dev *dev) + { + struct pmd_internal *internal = dev->data->dev_private; ++ uint16_t i; + + dev->data->dev_started = 0; + rte_atomic32_set(&internal->started, 0); + update_queuing_status(dev, true); + ++ for (i = 0; i < dev->data->nb_rx_queues; i++) ++ dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; ++ for (i = 0; i < dev->data->nb_tx_queues; i++) ++ dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; ++ + return 0; + } + +@@ -1251,6 +1210,8 @@ eth_dev_close(struct rte_eth_dev *dev) rte_free(internal->iface_name); rte_free(internal); @@ -31373,7 +47688,7 @@ index b152279fac..f7e1b268ed 100644 dev->data->dev_private = NULL; rte_free(vring_states[dev->data->port_id]); -@@ -1278,6 +1227,7 @@ eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id, +@@ -1278,6 +1239,7 @@ eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id, vq->mb_pool = mb_pool; vq->virtqueue_id = rx_queue_id * VIRTIO_QNUM + VIRTIO_TXQ; rte_spinlock_init(&vq->intr_lock); @@ -31381,7 +47696,7 @@ index b152279fac..f7e1b268ed 100644 dev->data->rx_queues[rx_queue_id] = vq; return 0; -@@ -1300,6 +1250,7 @@ eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id, +@@ -1300,6 +1262,7 @@ eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id, vq->virtqueue_id = tx_queue_id * VIRTIO_QNUM + VIRTIO_RXQ; rte_spinlock_init(&vq->intr_lock); @@ -31390,7 +47705,7 @@ index b152279fac..f7e1b268ed 100644 return 0; diff --git a/dpdk/drivers/net/virtio/virtio_ethdev.c b/dpdk/drivers/net/virtio/virtio_ethdev.c -index 760ba4e368..c4f6fa55b3 100644 +index 760ba4e368..5e9ed47551 100644 --- a/dpdk/drivers/net/virtio/virtio_ethdev.c +++ b/dpdk/drivers/net/virtio/virtio_ethdev.c @@ -1797,22 +1797,25 @@ static int @@ -31492,7 +47807,7 @@ index 760ba4e368..c4f6fa55b3 100644 PMD_INIT_LOG(DEBUG, "config->max_virtqueue_pairs=%d", config->max_virtqueue_pairs); -@@ -2342,10 +2348,11 @@ virtio_init_device(struct rte_eth_dev *eth_dev, uint64_t req_features) +@@ -2342,13 +2348,22 @@ virtio_init_device(struct rte_eth_dev *eth_dev, uint64_t req_features) return ret; if (eth_dev->data->dev_conf.intr_conf.rxq) { @@ -31506,7 +47821,18 @@ index 760ba4e368..c4f6fa55b3 100644 } } -@@ -2457,6 +2464,9 @@ virtio_dev_speed_capa_get(uint32_t speed) ++ if (eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC) ++ /* Enable vector (0) for Link State Interrupt */ ++ if (VIRTIO_OPS(hw)->set_config_irq(hw, 0) == ++ VIRTIO_MSI_NO_VECTOR) { ++ PMD_DRV_LOG(ERR, "failed to set config vector"); ++ return -EBUSY; ++ } ++ + virtio_reinit_complete(hw); + + return 0; +@@ -2457,6 +2472,9 @@ virtio_dev_speed_capa_get(uint32_t speed) static int vectorized_check_handler(__rte_unused const char *key, const char *value, void *ret_val) { @@ -31516,6 +47842,53 @@ index 760ba4e368..c4f6fa55b3 100644 if (strcmp(value, "1") == 0) *(int *)ret_val = 1; else +@@ -2665,14 +2683,6 @@ virtio_dev_configure(struct rte_eth_dev *dev) + hw->has_tx_offload = tx_offload_enabled(hw); + hw->has_rx_offload = rx_offload_enabled(hw); + +- if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC) +- /* Enable vector (0) for Link State Interrupt */ +- if (VIRTIO_OPS(hw)->set_config_irq(hw, 0) == +- VIRTIO_MSI_NO_VECTOR) { +- PMD_DRV_LOG(ERR, "failed to set config vector"); +- return -EBUSY; +- } +- + if (virtio_with_packed_queue(hw)) { + #if defined(RTE_ARCH_X86_64) && defined(CC_AVX512_SUPPORT) + if ((hw->use_vec_rx || hw->use_vec_tx) && +@@ -2845,6 +2855,11 @@ virtio_dev_start(struct rte_eth_dev *dev) + set_rxtx_funcs(dev); + hw->started = 1; + ++ for (i = 0; i < dev->data->nb_rx_queues; i++) ++ dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; ++ for (i = 0; i < dev->data->nb_tx_queues; i++) ++ dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; ++ + /* Initialize Link state */ + virtio_dev_link_update(dev, 0); + +@@ -2934,6 +2949,7 @@ virtio_dev_stop(struct rte_eth_dev *dev) + struct virtio_hw *hw = dev->data->dev_private; + struct rte_eth_link link; + struct rte_eth_intr_conf *intr_conf = &dev->data->dev_conf.intr_conf; ++ uint16_t i; + + PMD_INIT_LOG(DEBUG, "stop"); + dev->data->dev_started = 0; +@@ -2961,6 +2977,11 @@ virtio_dev_stop(struct rte_eth_dev *dev) + out_unlock: + rte_spinlock_unlock(&hw->state_lock); + ++ for (i = 0; i < dev->data->nb_rx_queues; i++) ++ dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; ++ for (i = 0; i < dev->data->nb_tx_queues; i++) ++ dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; ++ + return 0; + } + diff --git a/dpdk/drivers/net/virtio/virtio_pci.c b/dpdk/drivers/net/virtio/virtio_pci.c index 9cf4d760b4..29eb739b04 100644 --- a/dpdk/drivers/net/virtio/virtio_pci.c @@ -31682,6 +48055,62 @@ index d32abec327..78b1ed9ace 100644 virtio_user_dev_set_status(dev, status); } +diff --git a/dpdk/drivers/net/virtio/virtqueue.h b/dpdk/drivers/net/virtio/virtqueue.h +index f5d8b40cad..5c9230cfe1 100644 +--- a/dpdk/drivers/net/virtio/virtqueue.h ++++ b/dpdk/drivers/net/virtio/virtqueue.h +@@ -771,6 +771,7 @@ virtqueue_enqueue_xmit_packed(struct virtnet_tx *txvq, struct rte_mbuf *cookie, + start_dp[idx].addr = txvq->virtio_net_hdr_mem + + RTE_PTR_DIFF(&txr[idx].tx_hdr, txr); + start_dp[idx].len = vq->hw->vtnet_hdr_size; ++ head_flags |= VRING_DESC_F_NEXT; + hdr = (struct virtio_net_hdr *)&txr[idx].tx_hdr; + idx++; + if (idx >= vq->vq_nentries) { +diff --git a/dpdk/drivers/net/vmxnet3/vmxnet3_ethdev.c b/dpdk/drivers/net/vmxnet3/vmxnet3_ethdev.c +index fd946dec5c..a0959b0c80 100644 +--- a/dpdk/drivers/net/vmxnet3/vmxnet3_ethdev.c ++++ b/dpdk/drivers/net/vmxnet3/vmxnet3_ethdev.c +@@ -957,6 +957,7 @@ vmxnet3_dev_start(struct rte_eth_dev *dev) + { + int ret; + struct vmxnet3_hw *hw = dev->data->dev_private; ++ uint16_t i; + + PMD_INIT_FUNC_TRACE(); + +@@ -1058,6 +1059,11 @@ vmxnet3_dev_start(struct rte_eth_dev *dev) + */ + __vmxnet3_dev_link_update(dev, 0); + ++ for (i = 0; i < dev->data->nb_rx_queues; i++) ++ dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; ++ for (i = 0; i < dev->data->nb_tx_queues; i++) ++ dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; ++ + return VMXNET3_SUCCESS; + } + +@@ -1070,6 +1076,7 @@ vmxnet3_dev_stop(struct rte_eth_dev *dev) + struct rte_eth_link link; + struct vmxnet3_hw *hw = dev->data->dev_private; + struct rte_intr_handle *intr_handle = dev->intr_handle; ++ uint16_t i; + int ret; + + PMD_INIT_FUNC_TRACE(); +@@ -1125,6 +1132,11 @@ vmxnet3_dev_stop(struct rte_eth_dev *dev) + hw->adapter_stopped = 1; + dev->data->dev_started = 0; + ++ for (i = 0; i < dev->data->nb_rx_queues; i++) ++ dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; ++ for (i = 0; i < dev->data->nb_tx_queues; i++) ++ dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; ++ + return 0; + } + diff --git a/dpdk/drivers/net/vmxnet3/vmxnet3_rxtx.c b/dpdk/drivers/net/vmxnet3/vmxnet3_rxtx.c index a875ffec07..14c6504505 100644 --- a/dpdk/drivers/net/vmxnet3/vmxnet3_rxtx.c @@ -32039,6 +48468,30 @@ index 49d68ad1b1..35520ea3ae 100644 pthread_mutex_lock(&internal_list_lock); TAILQ_INSERT_TAIL(&internal_list, list, next); +diff --git a/dpdk/drivers/vdpa/mlx5/mlx5_vdpa.c b/dpdk/drivers/vdpa/mlx5/mlx5_vdpa.c +index 026daa3f24..8bcbedd071 100644 +--- a/dpdk/drivers/vdpa/mlx5/mlx5_vdpa.c ++++ b/dpdk/drivers/vdpa/mlx5/mlx5_vdpa.c +@@ -282,6 +282,7 @@ _internal_mlx5_vdpa_dev_close(struct mlx5_vdpa_priv *priv, + int ret = 0; + int vid = priv->vid; + ++ mlx5_vdpa_virtq_unreg_intr_handle_all(priv); + mlx5_vdpa_cqe_event_unset(priv); + if (priv->state == MLX5_VDPA_STATE_CONFIGURED) { + ret |= mlx5_vdpa_lm_log(priv); +diff --git a/dpdk/drivers/vdpa/mlx5/mlx5_vdpa_cthread.c b/dpdk/drivers/vdpa/mlx5/mlx5_vdpa_cthread.c +index 6e6624e5a3..1d84e422d4 100644 +--- a/dpdk/drivers/vdpa/mlx5/mlx5_vdpa_cthread.c ++++ b/dpdk/drivers/vdpa/mlx5/mlx5_vdpa_cthread.c +@@ -190,7 +190,6 @@ mlx5_vdpa_c_thread_handle(void *arg) + pthread_mutex_unlock(&virtq->virtq_lock); + break; + case MLX5_VDPA_TASK_DEV_CLOSE_NOWAIT: +- mlx5_vdpa_virtq_unreg_intr_handle_all(priv); + pthread_mutex_lock(&priv->steer_update_lock); + mlx5_vdpa_steer_unset(priv); + pthread_mutex_unlock(&priv->steer_update_lock); diff --git a/dpdk/examples/cmdline/parse_obj_list.h b/dpdk/examples/cmdline/parse_obj_list.h index 6516d3e2c2..1223ac1e8b 100644 --- a/dpdk/examples/cmdline/parse_obj_list.h @@ -32054,6 +48507,132 @@ index 6516d3e2c2..1223ac1e8b 100644 struct object { SLIST_ENTRY(object) next; +diff --git a/dpdk/examples/ethtool/ethtool-app/ethapp.c b/dpdk/examples/ethtool/ethtool-app/ethapp.c +index 4ea504ed6a..489cd4f515 100644 +--- a/dpdk/examples/ethtool/ethtool-app/ethapp.c ++++ b/dpdk/examples/ethtool/ethtool-app/ethapp.c +@@ -51,6 +51,13 @@ struct pcmd_intintint_params { + uint16_t rx; + }; + ++struct pcmd_pause_params { ++ cmdline_fixed_string_t cmd; ++ uint16_t port; ++ cmdline_fixed_string_t mode; ++ cmdline_fixed_string_t autoneg; ++ cmdline_fixed_string_t an_status; ++}; + + /* Parameter-less commands */ + cmdline_parse_token_string_t pcmd_quit_token_cmd = +@@ -118,12 +125,18 @@ cmdline_parse_token_num_t pcmd_intintint_token_rx = + + /* Pause commands */ + cmdline_parse_token_string_t pcmd_pause_token_cmd = +- TOKEN_STRING_INITIALIZER(struct pcmd_intstr_params, cmd, "pause"); ++ TOKEN_STRING_INITIALIZER(struct pcmd_pause_params, cmd, "pause"); + cmdline_parse_token_num_t pcmd_pause_token_port = +- TOKEN_NUM_INITIALIZER(struct pcmd_intstr_params, port, RTE_UINT16); +-cmdline_parse_token_string_t pcmd_pause_token_opt = +- TOKEN_STRING_INITIALIZER(struct pcmd_intstr_params, +- opt, "all#tx#rx#none"); ++ TOKEN_NUM_INITIALIZER(struct pcmd_pause_params, port, RTE_UINT16); ++cmdline_parse_token_string_t pcmd_pause_token_mode = ++ TOKEN_STRING_INITIALIZER(struct pcmd_pause_params, ++ mode, "full#tx#rx#none"); ++cmdline_parse_token_string_t pcmd_pause_token_autoneg = ++ TOKEN_STRING_INITIALIZER(struct pcmd_pause_params, ++ autoneg, "autoneg"); ++cmdline_parse_token_string_t pcmd_pause_token_an_status = ++ TOKEN_STRING_INITIALIZER(struct pcmd_pause_params, ++ an_status, "on#off"); + + /* VLAN commands */ + cmdline_parse_token_string_t pcmd_vlan_token_cmd = +@@ -350,13 +363,12 @@ pcmd_module_eeprom_callback(void *ptr_params, + fclose(fp_eeprom); + } + +- + static void + pcmd_pause_callback(void *ptr_params, + __rte_unused struct cmdline *ctx, + void *ptr_data) + { +- struct pcmd_intstr_params *params = ptr_params; ++ struct pcmd_pause_params *params = ptr_params; + struct ethtool_pauseparam info; + int stat; + +@@ -368,39 +380,38 @@ pcmd_pause_callback(void *ptr_params, + stat = rte_ethtool_get_pauseparam(params->port, &info); + } else { + memset(&info, 0, sizeof(info)); +- if (strcasecmp("all", params->opt) == 0) { ++ if (strcasecmp("full", params->mode) == 0) { + info.tx_pause = 1; + info.rx_pause = 1; +- } else if (strcasecmp("tx", params->opt) == 0) { ++ } else if (strcasecmp("tx", params->mode) == 0) { + info.tx_pause = 1; + info.rx_pause = 0; +- } else if (strcasecmp("rx", params->opt) == 0) { ++ } else if (strcasecmp("rx", params->mode) == 0) { + info.tx_pause = 0; + info.rx_pause = 1; + } else { + info.tx_pause = 0; + info.rx_pause = 0; + } +- /* Assume auto-negotiation wanted */ +- info.autoneg = 1; ++ ++ if (strcasecmp("on", params->an_status) == 0) ++ info.autoneg = 1; ++ else ++ info.autoneg = 0; ++ + stat = rte_ethtool_set_pauseparam(params->port, &info); + } + if (stat == 0) { +- if (info.rx_pause && info.tx_pause) +- printf("Port %i: Tx & Rx Paused\n", params->port); +- else if (info.rx_pause) +- printf("Port %i: Rx Paused\n", params->port); +- else if (info.tx_pause) +- printf("Port %i: Tx Paused\n", params->port); +- else +- printf("Port %i: Tx & Rx not paused\n", params->port); ++ printf("Pause parameters for Port %i:\n", params->port); ++ printf("Rx pause: %s\n", info.rx_pause ? "on" : "off"); ++ printf("Tx pause: %s\n", info.tx_pause ? "on" : "off"); ++ printf("Autoneg: %s\n", info.autoneg ? "on" : "off"); + } else if (stat == -ENOTSUP) + printf("Port %i: Operation not supported\n", params->port); + else + printf("Port %i: Error %i\n", params->port, stat); + } + +- + static void + pcmd_open_callback(__rte_unused void *ptr_params, + __rte_unused struct cmdline *ctx, +@@ -737,11 +748,13 @@ cmdline_parse_inst_t pcmd_pause = { + .f = pcmd_pause_callback, + .data = NULL, + .help_str = +- "pause \n Pause/unpause port", ++ "pause autoneg \n Pause/unpause port", + .tokens = { + (void *)&pcmd_pause_token_cmd, + (void *)&pcmd_pause_token_port, +- (void *)&pcmd_pause_token_opt, ++ (void *)&pcmd_pause_token_mode, ++ (void *)&pcmd_pause_token_autoneg, ++ (void *)&pcmd_pause_token_an_status, + NULL + }, + }; diff --git a/dpdk/examples/fips_validation/Makefile b/dpdk/examples/fips_validation/Makefile index bca6647f55..fbb778d57a 100644 --- a/dpdk/examples/fips_validation/Makefile @@ -32394,6 +48973,29 @@ index 0e0012d058..53665adf03 100644 }; struct ipsec_spd_stats { +diff --git a/dpdk/examples/ipsec-secgw/ipsec.h b/dpdk/examples/ipsec-secgw/ipsec.h +index 6bef2a7285..2890e6e267 100644 +--- a/dpdk/examples/ipsec-secgw/ipsec.h ++++ b/dpdk/examples/ipsec-secgw/ipsec.h +@@ -249,11 +249,18 @@ struct offloads { + + extern struct offloads tx_offloads; + ++/* ++ * This structure is used for the key in hash table. ++ * Padding is to force the struct to use 8 bytes, ++ * to ensure memory is not read past this structs boundary ++ * (hash key calculation reads 8 bytes if this struct is size 5 bytes). ++ */ + struct cdev_key { + uint16_t lcore_id; + uint8_t cipher_algo; + uint8_t auth_algo; + uint8_t aead_algo; ++ uint8_t padding[3]; /* padding to 8-byte size should be zeroed */ + }; + + struct socket_ctx { diff --git a/dpdk/examples/ipsec-secgw/sa.c b/dpdk/examples/ipsec-secgw/sa.c index 7da9444a7b..45cd29f18b 100644 --- a/dpdk/examples/ipsec-secgw/sa.c @@ -33114,7 +49716,7 @@ index 2165a0688c..515d0df5ce 100644 return 0; diff --git a/dpdk/lib/cryptodev/rte_cryptodev.h b/dpdk/lib/cryptodev/rte_cryptodev.h -index 86d792e2e7..4c210b876c 100644 +index 86d792e2e7..0c9464649a 100644 --- a/dpdk/lib/cryptodev/rte_cryptodev.h +++ b/dpdk/lib/cryptodev/rte_cryptodev.h @@ -501,6 +501,7 @@ extern const char * @@ -33163,7 +49765,23 @@ index 86d792e2e7..4c210b876c 100644 /** * Configure a device. -@@ -911,11 +917,14 @@ rte_cryptodev_get_sec_ctx(uint8_t dev_id); +@@ -900,6 +906,15 @@ struct rte_cryptodev_cb_rcu { + /**< RCU QSBR variable per queue pair */ + }; + ++/** ++ * Get the security context for the cryptodev. ++ * ++ * @param dev_id ++ * The device identifier. ++ * @return ++ * - NULL on error. ++ * - Pointer to security context on success. ++ */ + void * + rte_cryptodev_get_sec_ctx(uint8_t dev_id); + +@@ -911,11 +926,14 @@ rte_cryptodev_get_sec_ctx(uint8_t dev_id); * @param nb_elts * The number of elements in the mempool. * @param elt_size @@ -33183,7 +49801,7 @@ index 86d792e2e7..4c210b876c 100644 * @param cache_size * The number of per-lcore cache elements * @param priv_size -@@ -926,8 +935,8 @@ rte_cryptodev_get_sec_ctx(uint8_t dev_id); +@@ -926,8 +944,8 @@ rte_cryptodev_get_sec_ctx(uint8_t dev_id); * constraint for the reserved zone. * * @return @@ -33194,7 +49812,7 @@ index 86d792e2e7..4c210b876c 100644 */ __rte_experimental struct rte_mempool * -@@ -968,11 +977,14 @@ rte_cryptodev_asym_session_pool_create(const char *name, uint32_t nb_elts, +@@ -968,11 +986,14 @@ rte_cryptodev_asym_session_pool_create(const char *name, uint32_t nb_elts, * @param dev_id ID of device that we want the session to be used on * @param xforms Symmetric crypto transform operations to apply on flow * processed with this session @@ -33238,7 +49856,7 @@ index dcb554af1e..9cac9c6390 100644 "EAL could not release all resources\n"); exit(exit_code); diff --git a/dpdk/lib/eal/common/eal_common_dynmem.c b/dpdk/lib/eal/common/eal_common_dynmem.c -index 52e52e5986..bdbbe233a0 100644 +index 52e52e5986..95da55d9b0 100644 --- a/dpdk/lib/eal/common/eal_common_dynmem.c +++ b/dpdk/lib/eal/common/eal_common_dynmem.c @@ -120,8 +120,7 @@ eal_dynmem_memseg_lists_init(void) @@ -33261,6 +49879,18 @@ index 52e52e5986..bdbbe233a0 100644 goto out; } msl = &mcfg->memsegs[msl_idx++]; +@@ -253,7 +251,10 @@ eal_dynmem_hugepage_init(void) + */ + memset(&dummy, 0, sizeof(dummy)); + dummy.hugepage_sz = hpi->hugepage_sz; +- if (rte_memseg_list_walk(hugepage_count_walk, &dummy) < 0) ++ /* memory_hotplug_lock is held during initialization, so it's ++ * safe to call thread-unsafe version. ++ */ ++ if (rte_memseg_list_walk_thread_unsafe(hugepage_count_walk, &dummy) < 0) + return -1; + + for (i = 0; i < RTE_DIM(dummy.num_pages); i++) { diff --git a/dpdk/lib/eal/common/eal_common_fbarray.c b/dpdk/lib/eal/common/eal_common_fbarray.c index f11f87979f..169e66e04b 100644 --- a/dpdk/lib/eal/common/eal_common_fbarray.c @@ -33360,6 +49990,39 @@ index 1fc1d6c53b..9676dd73c5 100644 } else if (action(msg, s->sun_path) < 0) { RTE_LOG(ERR, EAL, "Fail to handle message: %s\n", msg->name); } +diff --git a/dpdk/lib/eal/common/rte_malloc.c b/dpdk/lib/eal/common/rte_malloc.c +index 48db264449..dbd8eae5b0 100644 +--- a/dpdk/lib/eal/common/rte_malloc.c ++++ b/dpdk/lib/eal/common/rte_malloc.c +@@ -41,13 +41,13 @@ mem_free(void *addr, const bool trace_ena) + void + rte_free(void *addr) + { +- return mem_free(addr, true); ++ mem_free(addr, true); + } + + void + eal_free_no_trace(void *addr) + { +- return mem_free(addr, false); ++ mem_free(addr, false); + } + + static void * +diff --git a/dpdk/lib/eal/common/rte_random.c b/dpdk/lib/eal/common/rte_random.c +index 166b0d8921..ac1bf6c6ee 100644 +--- a/dpdk/lib/eal/common/rte_random.c ++++ b/dpdk/lib/eal/common/rte_random.c +@@ -79,7 +79,7 @@ rte_srand(uint64_t seed) + unsigned int lcore_id; + + /* add lcore_id to seed to avoid having the same sequence */ +- for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) ++ for (lcore_id = 0; lcore_id < RTE_DIM(rand_states); lcore_id++) + __rte_srand_lfsr258(seed + lcore_id, &rand_states[lcore_id]); + } + diff --git a/dpdk/lib/eal/freebsd/eal.c b/dpdk/lib/eal/freebsd/eal.c index 607684c1a3..122daf6c1f 100644 --- a/dpdk/lib/eal/freebsd/eal.c @@ -33491,6 +50154,21 @@ index f5c49a9870..234b268b91 100644 } #endif +diff --git a/dpdk/lib/eal/include/generic/rte_rwlock.h b/dpdk/lib/eal/include/generic/rte_rwlock.h +index 233d4262be..e479daa867 100644 +--- a/dpdk/lib/eal/include/generic/rte_rwlock.h ++++ b/dpdk/lib/eal/include/generic/rte_rwlock.h +@@ -79,6 +79,10 @@ rte_rwlock_init(rte_rwlock_t *rwl) + /** + * Take a read lock. Loop until the lock is held. + * ++ * @note The RW lock isn't recursive, so calling this function on the same ++ * lock twice without releasing it could potentially result in a deadlock ++ * scenario when a write lock is involved. ++ * + * @param rwl + * A pointer to a rwlock structure. + */ diff --git a/dpdk/lib/eal/linux/eal.c b/dpdk/lib/eal/linux/eal.c index 8c118d0d9f..336698379f 100644 --- a/dpdk/lib/eal/linux/eal.c @@ -33611,6 +50289,31 @@ index a1b6cb31ff..581d9dfc91 100644 strlcpy(found, splitstr[MOUNTPT], len); } /* end while fgets */ +diff --git a/dpdk/lib/eal/linux/eal_memalloc.c b/dpdk/lib/eal/linux/eal_memalloc.c +index f8b1588cae..9853ec78a2 100644 +--- a/dpdk/lib/eal/linux/eal_memalloc.c ++++ b/dpdk/lib/eal/linux/eal_memalloc.c +@@ -1740,7 +1740,10 @@ eal_memalloc_init(void) + eal_get_internal_configuration(); + + if (rte_eal_process_type() == RTE_PROC_SECONDARY) +- if (rte_memseg_list_walk(secondary_msl_create_walk, NULL) < 0) ++ /* memory_hotplug_lock is held during initialization, so it's ++ * safe to call thread-unsafe version. ++ */ ++ if (rte_memseg_list_walk_thread_unsafe(secondary_msl_create_walk, NULL) < 0) + return -1; + if (rte_eal_process_type() == RTE_PROC_PRIMARY && + internal_conf->in_memory) { +@@ -1778,7 +1781,7 @@ eal_memalloc_init(void) + } + + /* initialize all of the fd lists */ +- if (rte_memseg_list_walk(fd_list_create_walk, NULL)) ++ if (rte_memseg_list_walk_thread_unsafe(fd_list_create_walk, NULL)) + return -1; + return 0; + } diff --git a/dpdk/lib/eal/linux/eal_memory.c b/dpdk/lib/eal/linux/eal_memory.c index 60fc8cc6ca..9b6f08fba8 100644 --- a/dpdk/lib/eal/linux/eal_memory.c @@ -33735,6 +50438,63 @@ index 60fc8cc6ca..9b6f08fba8 100644 return -1; } +diff --git a/dpdk/lib/eal/riscv/include/rte_vect.h b/dpdk/lib/eal/riscv/include/rte_vect.h +index 2f97f437a2..da9092a94a 100644 +--- a/dpdk/lib/eal/riscv/include/rte_vect.h ++++ b/dpdk/lib/eal/riscv/include/rte_vect.h +@@ -29,7 +29,7 @@ typedef union rte_xmm { + uint32_t u32[XMM_SIZE / sizeof(uint32_t)]; + uint64_t u64[XMM_SIZE / sizeof(uint64_t)]; + double pd[XMM_SIZE / sizeof(double)]; +-} __rte_aligned(8) rte_xmm_t; ++} __rte_aligned(16) rte_xmm_t; + + static inline xmm_t + vect_load_128(void *p) +diff --git a/dpdk/lib/eal/unix/eal_firmware.c b/dpdk/lib/eal/unix/eal_firmware.c +index d1616b0bd9..1a7cf8e7b7 100644 +--- a/dpdk/lib/eal/unix/eal_firmware.c ++++ b/dpdk/lib/eal/unix/eal_firmware.c +@@ -25,19 +25,31 @@ static int + firmware_open(struct firmware_read_ctx *ctx, const char *name, size_t blocksize) + { + struct archive_entry *e; ++ int err; + + ctx->a = archive_read_new(); + if (ctx->a == NULL) + return -1; +- if (archive_read_support_format_raw(ctx->a) != ARCHIVE_OK || +- archive_read_support_filter_xz(ctx->a) != ARCHIVE_OK || +- archive_read_open_filename(ctx->a, name, blocksize) != ARCHIVE_OK || +- archive_read_next_header(ctx->a, &e) != ARCHIVE_OK) { +- archive_read_free(ctx->a); +- ctx->a = NULL; +- return -1; +- } ++ ++ if (archive_read_support_format_raw(ctx->a) != ARCHIVE_OK) ++ goto error; ++ ++ err = archive_read_support_filter_xz(ctx->a); ++ if (err != ARCHIVE_OK && err != ARCHIVE_WARN) ++ goto error; ++ ++ if (archive_read_open_filename(ctx->a, name, blocksize) != ARCHIVE_OK) ++ goto error; ++ ++ if (archive_read_next_header(ctx->a, &e) != ARCHIVE_OK) ++ goto error; ++ + return 0; ++ ++error: ++ archive_read_free(ctx->a); ++ ctx->a = NULL; ++ return -1; + } + + static ssize_t diff --git a/dpdk/lib/eal/unix/rte_thread.c b/dpdk/lib/eal/unix/rte_thread.c index 37ebfcfca1..f4076122a4 100644 --- a/dpdk/lib/eal/unix/rte_thread.c @@ -33883,6 +50643,29 @@ index adb929a014..56fadc7afe 100644 return fctret; } +diff --git a/dpdk/lib/eal/windows/eal_memory.c b/dpdk/lib/eal/windows/eal_memory.c +index 215d768e2c..31410a41fd 100644 +--- a/dpdk/lib/eal/windows/eal_memory.c ++++ b/dpdk/lib/eal/windows/eal_memory.c +@@ -72,10 +72,18 @@ static VirtualAlloc2_type VirtualAlloc2_ptr; + + #ifdef RTE_TOOLCHAIN_GCC + ++#ifndef MEM_COALESCE_PLACEHOLDERS + #define MEM_COALESCE_PLACEHOLDERS 0x00000001 ++#endif ++#ifndef MEM_PRESERVE_PLACEHOLDER + #define MEM_PRESERVE_PLACEHOLDER 0x00000002 ++#endif ++#ifndef MEM_REPLACE_PLACEHOLDER + #define MEM_REPLACE_PLACEHOLDER 0x00004000 ++#endif ++#ifndef MEM_RESERVE_PLACEHOLDER + #define MEM_RESERVE_PLACEHOLDER 0x00040000 ++#endif + + int + eal_mem_win32api_init(void) diff --git a/dpdk/lib/eal/windows/include/pthread.h b/dpdk/lib/eal/windows/include/pthread.h index 27fd2cca52..f7cf0e9ddf 100644 --- a/dpdk/lib/eal/windows/include/pthread.h @@ -34036,10 +50819,21 @@ index 838b3a8f9f..b61dae849d 100644 if ((data->dev_flags & RTE_ETH_DEV_REPRESENTOR) == 0) return -1; /* not a representor port */ diff --git a/dpdk/lib/ethdev/rte_ethdev.c b/dpdk/lib/ethdev/rte_ethdev.c -index 5d5e18db1e..437d04b34e 100644 +index 5d5e18db1e..4f50e2fa80 100644 --- a/dpdk/lib/ethdev/rte_ethdev.c +++ b/dpdk/lib/ethdev/rte_ethdev.c -@@ -4362,6 +4362,11 @@ rte_eth_fec_set(uint16_t port_id, uint32_t fec_capa) +@@ -1192,7 +1192,9 @@ rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q, + } + + if (dev_conf->rxmode.mtu == 0) +- dev->data->dev_conf.rxmode.mtu = RTE_ETHER_MTU; ++ dev->data->dev_conf.rxmode.mtu = ++ (dev_info.max_mtu == 0) ? RTE_ETHER_MTU : ++ RTE_MIN(dev_info.max_mtu, RTE_ETHER_MTU); + + ret = eth_dev_validate_mtu(port_id, &dev_info, + dev->data->dev_conf.rxmode.mtu); +@@ -4362,6 +4364,11 @@ rte_eth_fec_set(uint16_t port_id, uint32_t fec_capa) RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); dev = &rte_eth_devices[port_id]; @@ -34051,7 +50845,7 @@ index 5d5e18db1e..437d04b34e 100644 if (*dev->dev_ops->fec_set == NULL) return -ENOTSUP; return eth_err(port_id, (*dev->dev_ops->fec_set)(dev, fec_capa)); -@@ -4499,6 +4504,7 @@ int +@@ -4499,6 +4506,7 @@ int rte_eth_dev_default_mac_addr_set(uint16_t port_id, struct rte_ether_addr *addr) { struct rte_eth_dev *dev; @@ -34059,7 +50853,7 @@ index 5d5e18db1e..437d04b34e 100644 int ret; RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); -@@ -4517,6 +4523,15 @@ rte_eth_dev_default_mac_addr_set(uint16_t port_id, struct rte_ether_addr *addr) +@@ -4517,6 +4525,15 @@ rte_eth_dev_default_mac_addr_set(uint16_t port_id, struct rte_ether_addr *addr) if (*dev->dev_ops->mac_addr_set == NULL) return -ENOTSUP; @@ -34075,7 +50869,7 @@ index 5d5e18db1e..437d04b34e 100644 ret = (*dev->dev_ops->mac_addr_set)(dev, addr); if (ret < 0) return ret; -@@ -5935,7 +5950,7 @@ eth_dev_handle_port_dump_priv(const char *cmd __rte_unused, +@@ -5935,7 +5952,7 @@ eth_dev_handle_port_dump_priv(const char *cmd __rte_unused, if (!rte_eth_dev_is_valid_port(port_id)) return -EINVAL; @@ -34084,7 +50878,7 @@ index 5d5e18db1e..437d04b34e 100644 if (buf == NULL) return -ENOMEM; -@@ -6037,10 +6052,8 @@ eth_dev_handle_port_info(const char *cmd __rte_unused, +@@ -6037,10 +6054,8 @@ eth_dev_handle_port_info(const char *cmd __rte_unused, eth_dev->data->nb_tx_queues); rte_tel_data_add_dict_int(d, "port_id", eth_dev->data->port_id); rte_tel_data_add_dict_int(d, "mtu", eth_dev->data->mtu); @@ -34096,7 +50890,7 @@ index 5d5e18db1e..437d04b34e 100644 rte_ether_format_addr(mac_addr, sizeof(mac_addr), eth_dev->data->mac_addrs); rte_tel_data_add_dict_string(d, "mac_addr", mac_addr); -@@ -6068,12 +6081,12 @@ eth_dev_handle_port_info(const char *cmd __rte_unused, +@@ -6068,12 +6083,12 @@ eth_dev_handle_port_info(const char *cmd __rte_unused, rte_tel_data_add_dict_container(d, "rxq_state", rxq_state, 0); rte_tel_data_add_dict_container(d, "txq_state", txq_state, 0); rte_tel_data_add_dict_int(d, "numa_node", eth_dev->data->numa_node); @@ -34114,9 +50908,18 @@ index 5d5e18db1e..437d04b34e 100644 return 0; diff --git a/dpdk/lib/ethdev/rte_ethdev.h b/dpdk/lib/ethdev/rte_ethdev.h -index c129ca1eaf..5f187131e2 100644 +index c129ca1eaf..e73244822a 100644 --- a/dpdk/lib/ethdev/rte_ethdev.h +++ b/dpdk/lib/ethdev/rte_ethdev.h +@@ -3637,7 +3637,7 @@ rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size); + * for example, to count dropped packets, or to retry transmission of packets + * which cannot be sent, this function should be used to register a suitable + * callback function to implement the desired behaviour. +- * The example callback "rte_eth_count_unsent_packet_callback()" is also ++ * The example callback "rte_eth_tx_buffer_count_callback()" is also + * provided as reference. + * + * @param buffer @@ -4177,10 +4177,7 @@ int rte_eth_fec_get_capability(uint16_t port_id, * @param port_id * The port identifier of the Ethernet device. @@ -34166,9 +50969,27 @@ index c129ca1eaf..5f187131e2 100644 int rte_eth_dev_default_mac_addr_set(uint16_t port_id, struct rte_ether_addr *mac_addr); diff --git a/dpdk/lib/ethdev/rte_flow.c b/dpdk/lib/ethdev/rte_flow.c -index 7d0c24366c..1a67a987f5 100644 +index 7d0c24366c..ae22755ee6 100644 --- a/dpdk/lib/ethdev/rte_flow.c +++ b/dpdk/lib/ethdev/rte_flow.c +@@ -654,7 +654,7 @@ rte_flow_conv_action_conf(void *buf, const size_t size, + if (src.rss->key_len && src.rss->key) { + off = RTE_ALIGN_CEIL(off, sizeof(*dst.rss->key)); + tmp = sizeof(*src.rss->key) * src.rss->key_len; +- if (size >= off + tmp) ++ if (size >= (uint64_t)off + (uint64_t)tmp) + dst.rss->key = rte_memcpy + ((void *)((uintptr_t)dst.rss + off), + src.rss->key, tmp); +@@ -663,7 +663,7 @@ rte_flow_conv_action_conf(void *buf, const size_t size, + if (src.rss->queue_num) { + off = RTE_ALIGN_CEIL(off, sizeof(*dst.rss->queue)); + tmp = sizeof(*src.rss->queue) * src.rss->queue_num; +- if (size >= off + tmp) ++ if (size >= (uint64_t)off + (uint64_t)tmp) + dst.rss->queue = rte_memcpy + ((void *)((uintptr_t)dst.rss + off), + src.rss->queue, tmp); @@ -855,7 +855,15 @@ rte_flow_conv_actions(struct rte_flow_action *dst, src -= num; dst -= num; @@ -34186,11 +51007,63 @@ index 7d0c24366c..1a67a987f5 100644 off = RTE_ALIGN_CEIL(off, sizeof(double)); ret = rte_flow_conv_action_conf ((void *)(data + off), +@@ -1879,6 +1887,8 @@ rte_flow_async_action_handle_query(uint16_t port_id, + const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error); + int ret; + ++ if (unlikely(!ops)) ++ return -rte_errno; + ret = ops->async_action_handle_query(dev, queue_id, op_attr, + action_handle, data, user_data, error); + return flow_err(port_id, ret, error); +diff --git a/dpdk/lib/eventdev/eventdev_pmd_vdev.h b/dpdk/lib/eventdev/eventdev_pmd_vdev.h +index 5fa9d699ac..bb433ba955 100644 +--- a/dpdk/lib/eventdev/eventdev_pmd_vdev.h ++++ b/dpdk/lib/eventdev/eventdev_pmd_vdev.h +@@ -45,7 +45,7 @@ extern "C" { + __rte_internal + static inline struct rte_eventdev * + rte_event_pmd_vdev_init(const char *name, size_t dev_private_size, +- int socket_id) ++ int socket_id, struct rte_vdev_device *vdev) + { + + struct rte_eventdev *eventdev; +@@ -67,6 +67,7 @@ rte_event_pmd_vdev_init(const char *name, size_t dev_private_size, + rte_panic("Cannot allocate memzone for private device" + " data"); + } ++ eventdev->dev = &vdev->device; + + return eventdev; + } diff --git a/dpdk/lib/eventdev/rte_event_crypto_adapter.c b/dpdk/lib/eventdev/rte_event_crypto_adapter.c -index 3c585d7b0d..4e1dbefb8e 100644 +index 3c585d7b0d..ea50e405a8 100644 --- a/dpdk/lib/eventdev/rte_event_crypto_adapter.c +++ b/dpdk/lib/eventdev/rte_event_crypto_adapter.c -@@ -497,6 +497,9 @@ eca_enq_to_cryptodev(struct event_crypto_adapter *adapter, struct rte_event *ev, +@@ -240,9 +240,18 @@ eca_circular_buffer_flush_to_cdev(struct crypto_ops_circular_buffer *bufp, + n = *tailp - *headp; + else if (*tailp < *headp) + n = bufp->size - *headp; +- else { +- *nb_ops_flushed = 0; +- return 0; /* buffer empty */ ++ else { /* head == tail case */ ++ /* when head == tail, ++ * circ buff is either full(tail pointer roll over) or empty ++ */ ++ if (bufp->count != 0) { ++ /* circ buffer is full */ ++ n = bufp->count; ++ } else { ++ /* circ buffer is empty */ ++ *nb_ops_flushed = 0; ++ return 0; /* buffer empty */ ++ } + } + + *nb_ops_flushed = rte_cryptodev_enqueue_burst(cdev_id, qp_id, +@@ -497,6 +506,9 @@ eca_enq_to_cryptodev(struct event_crypto_adapter *adapter, struct rte_event *ev, cdev_id, qp_id, &nb_enqueued); @@ -34200,7 +51073,7 @@ index 3c585d7b0d..4e1dbefb8e 100644 /** * If some crypto ops failed to flush to cdev and * space for another batch is not available, stop -@@ -507,9 +510,6 @@ eca_enq_to_cryptodev(struct event_crypto_adapter *adapter, struct rte_event *ev, +@@ -507,9 +519,6 @@ eca_enq_to_cryptodev(struct event_crypto_adapter *adapter, struct rte_event *ev, &qp_info->cbuf))) adapter->stop_enq_to_cryptodev = true; } @@ -34210,7 +51083,7 @@ index 3c585d7b0d..4e1dbefb8e 100644 } return n; -@@ -585,14 +585,15 @@ eca_crypto_adapter_enq_run(struct event_crypto_adapter *adapter, +@@ -585,14 +594,15 @@ eca_crypto_adapter_enq_run(struct event_crypto_adapter *adapter, if (adapter->mode == RTE_EVENT_CRYPTO_ADAPTER_OP_NEW) return 0; @@ -34232,7 +51105,7 @@ index 3c585d7b0d..4e1dbefb8e 100644 stats->event_poll_count++; n = rte_event_dequeue_burst(event_dev_id, event_port_id, ev, BATCH_SIZE, 0); -@@ -603,8 +604,6 @@ eca_crypto_adapter_enq_run(struct event_crypto_adapter *adapter, +@@ -603,8 +613,6 @@ eca_crypto_adapter_enq_run(struct event_crypto_adapter *adapter, nb_enqueued += eca_enq_to_cryptodev(adapter, ev, n); } @@ -34241,7 +51114,7 @@ index 3c585d7b0d..4e1dbefb8e 100644 if ((++adapter->transmit_loop_count & (CRYPTO_ENQ_FLUSH_THRESHOLD - 1)) == 0) { nb_enqueued += eca_crypto_enq_flush(adapter); -@@ -681,7 +680,7 @@ eca_circular_buffer_flush_to_evdev(struct event_crypto_adapter *adapter, +@@ -681,7 +689,7 @@ eca_circular_buffer_flush_to_evdev(struct event_crypto_adapter *adapter, else return 0; /* buffer empty */ @@ -34250,7 +51123,7 @@ index 3c585d7b0d..4e1dbefb8e 100644 bufp->count -= nb_ops_flushed; if (!bufp->count) { *headp = 0; -@@ -766,7 +765,7 @@ eca_crypto_adapter_deq_run(struct event_crypto_adapter *adapter, +@@ -766,7 +774,7 @@ eca_crypto_adapter_deq_run(struct event_crypto_adapter *adapter, for (i = nb_enqueued; i < n; i++) eca_circular_buffer_add( &adapter->ebuf, @@ -34282,9 +51155,37 @@ index 83d154a6ce..2a69290097 100644 uint8_t dev_id, uint16_t cdev_id, struct rte_event_crypto_adapter_vector_limits *limits); diff --git a/dpdk/lib/eventdev/rte_event_eth_rx_adapter.c b/dpdk/lib/eventdev/rte_event_eth_rx_adapter.c -index cf7bbd4d69..170823a03c 100644 +index cf7bbd4d69..6636128378 100644 --- a/dpdk/lib/eventdev/rte_event_eth_rx_adapter.c +++ b/dpdk/lib/eventdev/rte_event_eth_rx_adapter.c +@@ -1906,6 +1906,13 @@ rxa_init_service(struct event_eth_rx_adapter *rx_adapter, uint8_t id) + if (rx_adapter->service_inited) + return 0; + ++ if (rte_mbuf_dyn_rx_timestamp_register( ++ &event_eth_rx_timestamp_dynfield_offset, ++ &event_eth_rx_timestamp_dynflag) != 0) { ++ RTE_EDEV_LOG_ERR("Error registering timestamp field in mbuf\n"); ++ return -rte_errno; ++ } ++ + memset(&service, 0, sizeof(service)); + snprintf(service.name, ETH_RX_ADAPTER_SERVICE_NAME_LEN, + "rte_event_eth_rx_adapter_%d", id); +@@ -2468,13 +2475,6 @@ rxa_create(uint8_t id, uint8_t dev_id, + if (conf_cb == rxa_default_conf_cb) + rx_adapter->default_cb_arg = 1; + +- if (rte_mbuf_dyn_rx_timestamp_register( +- &event_eth_rx_timestamp_dynfield_offset, +- &event_eth_rx_timestamp_dynflag) != 0) { +- RTE_EDEV_LOG_ERR("Error registering timestamp field in mbuf\n"); +- return -rte_errno; +- } +- + rte_eventdev_trace_eth_rx_adapter_create(id, dev_id, conf_cb, + conf_arg); + return 0; @@ -3415,14 +3415,10 @@ rte_event_eth_rx_adapter_instance_get(uint16_t eth_dev_id, if (!rte_event_eth_rx_adapter_caps_get(rx_adapter->eventdev_id, eth_dev_id, @@ -34483,10 +51384,19 @@ index a0f14bf861..a13ddce627 100644 type, lcore_id, NULL, evtims[i]); if (ret < 0) { diff --git a/dpdk/lib/eventdev/rte_eventdev.c b/dpdk/lib/eventdev/rte_eventdev.c -index b0414206d9..78336faa6a 100644 +index b0414206d9..04eeb76d4f 100644 --- a/dpdk/lib/eventdev/rte_eventdev.c +++ b/dpdk/lib/eventdev/rte_eventdev.c -@@ -1678,7 +1678,7 @@ eventdev_build_telemetry_data(int dev_id, +@@ -99,6 +99,8 @@ rte_event_dev_info_get(uint8_t dev_id, struct rte_event_dev_info *dev_info) + dev_info->dequeue_timeout_ns = dev->data->dev_conf.dequeue_timeout_ns; + + dev_info->dev = dev->dev; ++ if (dev->dev != NULL && dev->dev->driver != NULL) ++ dev_info->driver_name = dev->dev->driver->name; + return 0; + } + +@@ -1678,7 +1680,7 @@ eventdev_build_telemetry_data(int dev_id, if (xstat_names == NULL) return -1; @@ -34496,10 +51406,18 @@ index b0414206d9..78336faa6a 100644 free(xstat_names); return -1; diff --git a/dpdk/lib/eventdev/version.map b/dpdk/lib/eventdev/version.map -index dd63ec6f68..c155af6d50 100644 +index dd63ec6f68..56000271a4 100644 --- a/dpdk/lib/eventdev/version.map +++ b/dpdk/lib/eventdev/version.map -@@ -110,6 +110,7 @@ EXPERIMENTAL { +@@ -101,6 +101,7 @@ EXPERIMENTAL { + global: + + # added in 21.11 ++ __rte_eventdev_trace_maintain; + rte_event_eth_rx_adapter_create_with_params; + rte_event_eth_rx_adapter_queue_conf_get; + rte_event_eth_rx_adapter_queue_stats_get; +@@ -110,6 +111,7 @@ EXPERIMENTAL { rte_event_eth_rx_adapter_event_port_get; # added in 22.07 @@ -34508,11 +51426,19 @@ index dd63ec6f68..c155af6d50 100644 rte_event_queue_attr_set; diff --git a/dpdk/lib/fib/dir24_8.c b/dpdk/lib/fib/dir24_8.c -index a8ba4f64ca..3efdcb533c 100644 +index a8ba4f64ca..5f73b8a7f0 100644 --- a/dpdk/lib/fib/dir24_8.c +++ b/dpdk/lib/fib/dir24_8.c -@@ -390,7 +390,7 @@ modify_fib(struct dir24_8_tbl *dp, struct rte_rib *rib, uint32_t ip, +@@ -388,9 +388,15 @@ modify_fib(struct dir24_8_tbl *dp, struct rte_rib *rib, uint32_t ip, + return ret; + ledge = redge + (uint32_t)(1ULL << (32 - tmp_depth)); ++ /* ++ * we got to the end of address space ++ * and wrapped around ++ */ ++ if (ledge == 0) ++ break; } else { redge = ip + (uint32_t)(1ULL << (32 - depth)); - if (ledge == redge) @@ -34520,6 +51446,46 @@ index a8ba4f64ca..3efdcb533c 100644 break; ret = install_to_fib(dp, ledge, redge, next_hop); +diff --git a/dpdk/lib/fib/trie.c b/dpdk/lib/fib/trie.c +index 3e780afdaf..09470e7287 100644 +--- a/dpdk/lib/fib/trie.c ++++ b/dpdk/lib/fib/trie.c +@@ -451,6 +451,14 @@ get_nxt_net(uint8_t *ip, uint8_t depth) + } + } + ++static int ++v6_addr_is_zero(const uint8_t ip[RTE_FIB6_IPV6_ADDR_SIZE]) ++{ ++ uint8_t ip_addr[RTE_FIB6_IPV6_ADDR_SIZE] = {0}; ++ ++ return rte_rib6_is_equal(ip, ip_addr); ++} ++ + static int + modify_dp(struct rte_trie_tbl *dp, struct rte_rib6 *rib, + const uint8_t ip[RTE_FIB6_IPV6_ADDR_SIZE], +@@ -484,11 +492,19 @@ modify_dp(struct rte_trie_tbl *dp, struct rte_rib6 *rib, + return ret; + get_nxt_net(redge, tmp_depth); + rte_rib6_copy_addr(ledge, redge); ++ /* ++ * we got to the end of address space ++ * and wrapped around ++ */ ++ if (v6_addr_is_zero(ledge)) ++ break; + } else { + rte_rib6_copy_addr(redge, ip); + get_nxt_net(redge, depth); +- if (rte_rib6_is_equal(ledge, redge)) ++ if (rte_rib6_is_equal(ledge, redge) && ++ !v6_addr_is_zero(ledge)) + break; ++ + ret = install_to_dp(dp, ledge, redge, + next_hop); + if (ret != 0) diff --git a/dpdk/lib/gpudev/gpudev.c b/dpdk/lib/gpudev/gpudev.c index 805719d00c..8f12abef23 100644 --- a/dpdk/lib/gpudev/gpudev.c @@ -34607,6 +51573,26 @@ index fc6345de07..149414dcd9 100644 return rc; } +diff --git a/dpdk/lib/hash/rte_cuckoo_hash.c b/dpdk/lib/hash/rte_cuckoo_hash.c +index 829b79c89a..a08b5dd875 100644 +--- a/dpdk/lib/hash/rte_cuckoo_hash.c ++++ b/dpdk/lib/hash/rte_cuckoo_hash.c +@@ -1860,11 +1860,15 @@ compare_signatures(uint32_t *prim_hash_matches, uint32_t *sec_hash_matches, + _mm_load_si128( + (__m128i const *)prim_bkt->sig_current), + _mm_set1_epi16(sig))); ++ /* Extract the even-index bits only */ ++ *prim_hash_matches &= 0x5555; + /* Compare all signatures in the bucket */ + *sec_hash_matches = _mm_movemask_epi8(_mm_cmpeq_epi16( + _mm_load_si128( + (__m128i const *)sec_bkt->sig_current), + _mm_set1_epi16(sig))); ++ /* Extract the even-index bits only */ ++ *sec_hash_matches &= 0x5555; + break; + #elif defined(__ARM_NEON) + case RTE_HASH_COMPARE_NEON: { diff --git a/dpdk/lib/hash/rte_thash.c b/dpdk/lib/hash/rte_thash.c index 0249883b8d..2228af576b 100644 --- a/dpdk/lib/hash/rte_thash.c @@ -34751,6 +51737,23 @@ index a30e1e0eaf..3ab7be49fa 100644 } hash; /**< hash information */ }; +diff --git a/dpdk/lib/mbuf/rte_mbuf_ptype.h b/dpdk/lib/mbuf/rte_mbuf_ptype.h +index 17a2dd3576..f2276e2909 100644 +--- a/dpdk/lib/mbuf/rte_mbuf_ptype.h ++++ b/dpdk/lib/mbuf/rte_mbuf_ptype.h +@@ -419,10 +419,10 @@ extern "C" { + * + * Packet format: + * <'ether type'=0x0800 +- * | 'version'=4, 'protocol'=51> ++ * | 'version'=4, 'protocol'=50> + * or, + * <'ether type'=0x86DD +- * | 'version'=6, 'next header'=51> ++ * | 'version'=6, 'next header'=50> + */ + #define RTE_PTYPE_TUNNEL_ESP 0x00009000 + /** diff --git a/dpdk/lib/member/rte_member.h b/dpdk/lib/member/rte_member.h index 072a253c89..d08b143e51 100644 --- a/dpdk/lib/member/rte_member.h @@ -34776,10 +51779,64 @@ index 524ba77620..d5f35aabe9 100644 ss->hash_seeds[i] = rte_rand(); diff --git a/dpdk/lib/mempool/rte_mempool.c b/dpdk/lib/mempool/rte_mempool.c -index f33f455790..950d01ffac 100644 +index f33f455790..3de857abf5 100644 --- a/dpdk/lib/mempool/rte_mempool.c +++ b/dpdk/lib/mempool/rte_mempool.c -@@ -1500,27 +1500,27 @@ mempool_info_cb(struct rte_mempool *mp, void *arg) +@@ -915,6 +915,22 @@ rte_mempool_create_empty(const char *name, unsigned n, unsigned elt_size, + STAILQ_INIT(&mp->elt_list); + STAILQ_INIT(&mp->mem_list); + ++ /* ++ * Since we have 4 combinations of the SP/SC/MP/MC examine the flags to ++ * set the correct index into the table of ops structs. ++ */ ++ if ((flags & RTE_MEMPOOL_F_SP_PUT) && (flags & RTE_MEMPOOL_F_SC_GET)) ++ ret = rte_mempool_set_ops_byname(mp, "ring_sp_sc", NULL); ++ else if (flags & RTE_MEMPOOL_F_SP_PUT) ++ ret = rte_mempool_set_ops_byname(mp, "ring_sp_mc", NULL); ++ else if (flags & RTE_MEMPOOL_F_SC_GET) ++ ret = rte_mempool_set_ops_byname(mp, "ring_mp_sc", NULL); ++ else ++ ret = rte_mempool_set_ops_byname(mp, "ring_mp_mc", NULL); ++ ++ if (ret) ++ goto exit_unlock; ++ + /* + * local_cache pointer is set even if cache_size is zero. + * The local_cache points to just past the elt_pa[] array. +@@ -955,7 +971,6 @@ rte_mempool_create(const char *name, unsigned n, unsigned elt_size, + rte_mempool_obj_cb_t *obj_init, void *obj_init_arg, + int socket_id, unsigned flags) + { +- int ret; + struct rte_mempool *mp; + + mp = rte_mempool_create_empty(name, n, elt_size, cache_size, +@@ -963,22 +978,6 @@ rte_mempool_create(const char *name, unsigned n, unsigned elt_size, + if (mp == NULL) + return NULL; + +- /* +- * Since we have 4 combinations of the SP/SC/MP/MC examine the flags to +- * set the correct index into the table of ops structs. +- */ +- if ((flags & RTE_MEMPOOL_F_SP_PUT) && (flags & RTE_MEMPOOL_F_SC_GET)) +- ret = rte_mempool_set_ops_byname(mp, "ring_sp_sc", NULL); +- else if (flags & RTE_MEMPOOL_F_SP_PUT) +- ret = rte_mempool_set_ops_byname(mp, "ring_sp_mc", NULL); +- else if (flags & RTE_MEMPOOL_F_SC_GET) +- ret = rte_mempool_set_ops_byname(mp, "ring_mp_sc", NULL); +- else +- ret = rte_mempool_set_ops_byname(mp, "ring_mp_mc", NULL); +- +- if (ret) +- goto fail; +- + /* call the mempool priv initializer */ + if (mp_init) + mp_init(mp, mp_init_arg); +@@ -1500,27 +1499,27 @@ mempool_info_cb(struct rte_mempool *mp, void *arg) return; rte_tel_data_add_dict_string(info->d, "name", mp->name); @@ -34819,6 +51876,85 @@ index f33f455790..950d01ffac 100644 } static int +diff --git a/dpdk/lib/mempool/rte_mempool.h b/dpdk/lib/mempool/rte_mempool.h +index 9f530db24b..4a8a2d5dcb 100644 +--- a/dpdk/lib/mempool/rte_mempool.h ++++ b/dpdk/lib/mempool/rte_mempool.h +@@ -465,13 +465,19 @@ typedef int (*rte_mempool_alloc_t)(struct rte_mempool *mp); + typedef void (*rte_mempool_free_t)(struct rte_mempool *mp); + + /** +- * Enqueue an object into the external pool. ++ * Enqueue 'n' objects into the external pool. ++ * @return ++ * - 0: Success ++ * - <0: Error + */ + typedef int (*rte_mempool_enqueue_t)(struct rte_mempool *mp, + void * const *obj_table, unsigned int n); + + /** +- * Dequeue an object from the external pool. ++ * Dequeue 'n' objects from the external pool. ++ * @return ++ * - 0: Success ++ * - <0: Error + */ + typedef int (*rte_mempool_dequeue_t)(struct rte_mempool *mp, + void **obj_table, unsigned int n); +@@ -1484,7 +1490,7 @@ rte_mempool_put(struct rte_mempool *mp, void *obj) + * @param cache + * A pointer to a mempool cache structure. May be NULL if not needed. + * @return +- * - >=0: Success; number of objects supplied. ++ * - 0: Success. + * - <0: Error; code of driver dequeue function. + */ + static __rte_always_inline int +diff --git a/dpdk/lib/meter/rte_meter.h b/dpdk/lib/meter/rte_meter.h +index 0932645d0a..35e2675028 100644 +--- a/dpdk/lib/meter/rte_meter.h ++++ b/dpdk/lib/meter/rte_meter.h +@@ -127,9 +127,6 @@ int + rte_meter_trtcm_profile_config(struct rte_meter_trtcm_profile *p, + struct rte_meter_trtcm_params *params); + /** +- * @warning +- * @b EXPERIMENTAL: this API may change without prior notice +- * + * trTCM RFC 4115 profile configuration + * + * @param p +@@ -173,9 +170,6 @@ rte_meter_trtcm_config(struct rte_meter_trtcm *m, + struct rte_meter_trtcm_profile *p); + + /** +- * @warning +- * @b EXPERIMENTAL: this API may change without prior notice +- * + * trTCM RFC 4115 configuration per metered traffic flow + * + * @param m +@@ -276,9 +270,6 @@ rte_meter_trtcm_color_aware_check(struct rte_meter_trtcm *m, + enum rte_color pkt_color); + + /** +- * @warning +- * @b EXPERIMENTAL: this API may change without prior notice +- * + * trTCM RFC4115 color blind traffic metering + * + * @param m +@@ -300,9 +291,6 @@ rte_meter_trtcm_rfc4115_color_blind_check( + uint32_t pkt_len); + + /** +- * @warning +- * @b EXPERIMENTAL: this API may change without prior notice +- * + * trTCM RFC4115 color aware traffic metering + * + * @param m diff --git a/dpdk/lib/net/rte_ip.h b/dpdk/lib/net/rte_ip.h index 9c8e8206f0..0cafb980ef 100644 --- a/dpdk/lib/net/rte_ip.h @@ -34833,10 +51969,28 @@ index 9c8e8206f0..0cafb980ef 100644 const struct rte_ipv4_hdr *ipv4_hdr, uint16_t l4_off) diff --git a/dpdk/lib/pci/rte_pci.h b/dpdk/lib/pci/rte_pci.h -index 5088157e74..aab761b918 100644 +index 5088157e74..9876c3fb9d 100644 --- a/dpdk/lib/pci/rte_pci.h +++ b/dpdk/lib/pci/rte_pci.h -@@ -104,8 +104,7 @@ struct rte_pci_addr { +@@ -45,6 +45,7 @@ extern "C" { + #define RTE_PCI_EXT_CAP_ID_ERR 0x01 /* Advanced Error Reporting */ + #define RTE_PCI_EXT_CAP_ID_DSN 0x03 /* Device Serial Number */ + #define RTE_PCI_EXT_CAP_ID_SRIOV 0x10 /* SR-IOV*/ ++#define RTE_PCI_EXT_CAP_ID_PASID 0x1B /* Process Address Space ID */ + + /* Single Root I/O Virtualization */ + #define RTE_PCI_SRIOV_CAP 0x04 /* SR-IOV Capabilities */ +@@ -58,6 +59,9 @@ extern "C" { + #define RTE_PCI_SRIOV_VF_DID 0x1a /* VF Device ID */ + #define RTE_PCI_SRIOV_SUP_PGSIZE 0x1c /* Supported Page Sizes */ + ++ ++/* Process Address Space ID (RTE_PCI_EXT_CAP_ID_PASID) */ ++#define RTE_PCI_PASID_CTRL 0x06 /* PASID control register */ + /** Formatting string for PCI device identifier: Ex: 0000:00:01.0 */ + #define PCI_PRI_FMT "%.4" PRIx32 ":%.2" PRIx8 ":%.2" PRIx8 ".%" PRIx8 + #define PCI_PRI_STR_SIZE sizeof("XXXXXXXX:XX:XX.X") +@@ -104,8 +108,7 @@ struct rte_pci_addr { /** * Utility function to write a pci device name, this device name can later be @@ -34847,7 +52001,7 @@ index 5088157e74..aab761b918 100644 * @param addr * The PCI Bus-Device-Function address diff --git a/dpdk/lib/pdump/rte_pdump.c b/dpdk/lib/pdump/rte_pdump.c -index a81544cb57..4b7a4b3483 100644 +index a81544cb57..63835a1a67 100644 --- a/dpdk/lib/pdump/rte_pdump.c +++ b/dpdk/lib/pdump/rte_pdump.c @@ -134,7 +134,7 @@ pdump_copy(uint16_t port_id, uint16_t queue, @@ -34859,6 +52013,19 @@ index a81544cb57..4b7a4b3483 100644 if (unlikely(ring_enq < d_pkts)) { unsigned int drops = d_pkts - ring_enq; +@@ -564,9 +564,10 @@ pdump_prepare_client_request(const char *device, uint16_t queue, + if (rte_mp_request_sync(&mp_req, &mp_reply, &ts) == 0) { + mp_rep = &mp_reply.msgs[0]; + resp = (struct pdump_response *)mp_rep->param; +- rte_errno = resp->err_value; +- if (!resp->err_value) ++ if (resp->err_value == 0) + ret = 0; ++ else ++ rte_errno = -resp->err_value; + free(mp_reply.msgs); + } + diff --git a/dpdk/lib/pipeline/rte_swx_pipeline.c b/dpdk/lib/pipeline/rte_swx_pipeline.c index 0e631dea2b..084c614639 100644 --- a/dpdk/lib/pipeline/rte_swx_pipeline.c @@ -34879,6 +52046,20 @@ index 0e631dea2b..084c614639 100644 } } +diff --git a/dpdk/lib/rawdev/rte_rawdev.c b/dpdk/lib/rawdev/rte_rawdev.c +index 5fbdb94229..891e79dcd7 100644 +--- a/dpdk/lib/rawdev/rte_rawdev.c ++++ b/dpdk/lib/rawdev/rte_rawdev.c +@@ -505,8 +505,7 @@ rte_rawdev_pmd_allocate(const char *name, size_t dev_priv_size, int socket_id) + uint16_t dev_id; + + if (rte_rawdev_pmd_get_named_dev(name) != NULL) { +- RTE_RDEV_ERR("Event device with name %s already allocated!", +- name); ++ RTE_RDEV_ERR("Raw device with name %s already allocated!", name); + return NULL; + } + diff --git a/dpdk/lib/reorder/rte_reorder.c b/dpdk/lib/reorder/rte_reorder.c index 385ee479da..bc85b83b14 100644 --- a/dpdk/lib/reorder/rte_reorder.c @@ -35154,6 +52335,50 @@ index c91697131d..19768d8c38 100644 } __rte_cache_aligned; struct rte_sched_port { +diff --git a/dpdk/lib/security/rte_security.h b/dpdk/lib/security/rte_security.h +index 4bacf9fcd9..fd7013a23d 100644 +--- a/dpdk/lib/security/rte_security.h ++++ b/dpdk/lib/security/rte_security.h +@@ -618,6 +618,7 @@ struct rte_security_docsis_xform { + /** + * Security session action type. + */ ++/* Enumeration of rte_security_session_action_type 8<*/ + enum rte_security_session_action_type { + RTE_SECURITY_ACTION_TYPE_NONE, + /**< No security actions */ +@@ -638,8 +639,10 @@ enum rte_security_session_action_type { + * protocol is processed synchronously by a CPU. + */ + }; ++/* >8 End enumeration of rte_security_session_action_type. */ + + /** Security session protocol definition */ ++/* Enumeration of rte_security_session_protocol 8<*/ + enum rte_security_session_protocol { + RTE_SECURITY_PROTOCOL_IPSEC = 1, + /**< IPsec Protocol */ +@@ -650,10 +653,12 @@ enum rte_security_session_protocol { + RTE_SECURITY_PROTOCOL_DOCSIS, + /**< DOCSIS Protocol */ + }; ++/* >8 End enumeration of rte_security_session_protocol. */ + + /** + * Security session configuration + */ ++/* Structure rte_security_session_conf 8< */ + struct rte_security_session_conf { + enum rte_security_session_action_type action_type; + /**< Type of action to be performed on the session */ +@@ -672,6 +677,7 @@ struct rte_security_session_conf { + void *userdata; + /**< Application specific userdata to be saved with session */ + }; ++/* >8 End of structure rte_security_session_conf. */ + + /** + * Create security session as specified by the session configuration diff --git a/dpdk/lib/table/rte_swx_table_selector.c b/dpdk/lib/table/rte_swx_table_selector.c index ad99f18453..18e021fe6f 100644 --- a/dpdk/lib/table/rte_swx_table_selector.c @@ -35286,6 +52511,254 @@ index 863a6f6d52..669c322e12 100644 for (cmsg = CMSG_FIRSTHDR(&msgh); cmsg != NULL; cmsg = CMSG_NXTHDR(&msgh, cmsg)) { +diff --git a/dpdk/lib/vhost/vhost.c b/dpdk/lib/vhost/vhost.c +index 19c7b92c32..9e28198528 100644 +--- a/dpdk/lib/vhost/vhost.c ++++ b/dpdk/lib/vhost/vhost.c +@@ -1294,6 +1294,7 @@ rte_vhost_vring_call(int vid, uint16_t vring_idx) + { + struct virtio_net *dev; + struct vhost_virtqueue *vq; ++ int ret = 0; + + dev = get_device(vid); + if (!dev) +@@ -1308,14 +1309,20 @@ rte_vhost_vring_call(int vid, uint16_t vring_idx) + + rte_spinlock_lock(&vq->access_lock); + ++ if (unlikely(!vq->access_ok)) { ++ ret = -1; ++ goto out_unlock; ++ } ++ + if (vq_is_packed(dev)) + vhost_vring_call_packed(dev, vq); + else + vhost_vring_call_split(dev, vq); + ++out_unlock: + rte_spinlock_unlock(&vq->access_lock); + +- return 0; ++ return ret; + } + + int +@@ -1323,6 +1330,7 @@ rte_vhost_vring_call_nonblock(int vid, uint16_t vring_idx) + { + struct virtio_net *dev; + struct vhost_virtqueue *vq; ++ int ret = 0; + + dev = get_device(vid); + if (!dev) +@@ -1338,14 +1346,20 @@ rte_vhost_vring_call_nonblock(int vid, uint16_t vring_idx) + if (!rte_spinlock_trylock(&vq->access_lock)) + return -EAGAIN; + ++ if (unlikely(!vq->access_ok)) { ++ ret = -1; ++ goto out_unlock; ++ } ++ + if (vq_is_packed(dev)) + vhost_vring_call_packed(dev, vq); + else + vhost_vring_call_split(dev, vq); + ++out_unlock: + rte_spinlock_unlock(&vq->access_lock); + +- return 0; ++ return ret; + } + + uint16_t +@@ -1368,7 +1382,10 @@ rte_vhost_avail_entries(int vid, uint16_t queue_id) + + rte_spinlock_lock(&vq->access_lock); + +- if (unlikely(!vq->enabled || vq->avail == NULL)) ++ if (unlikely(!vq->access_ok)) ++ goto out; ++ ++ if (unlikely(!vq->enabled)) + goto out; + + ret = *(volatile uint16_t *)&vq->avail->idx - vq->last_used_idx; +@@ -1460,9 +1477,15 @@ rte_vhost_enable_guest_notification(int vid, uint16_t queue_id, int enable) + + rte_spinlock_lock(&vq->access_lock); + ++ if (unlikely(!vq->access_ok)) { ++ ret = -1; ++ goto out_unlock; ++ } ++ + vq->notif_enable = enable; + ret = vhost_enable_guest_notification(dev, vq, enable); + ++out_unlock: + rte_spinlock_unlock(&vq->access_lock); + + return ret; +@@ -1523,7 +1546,10 @@ rte_vhost_rx_queue_count(int vid, uint16_t qid) + + rte_spinlock_lock(&vq->access_lock); + +- if (unlikely(!vq->enabled || vq->avail == NULL)) ++ if (unlikely(!vq->access_ok)) ++ goto out; ++ ++ if (unlikely(!vq->enabled)) + goto out; + + ret = *((volatile uint16_t *)&vq->avail->idx) - vq->last_avail_idx; +@@ -1758,7 +1784,15 @@ rte_vhost_async_channel_register(int vid, uint16_t queue_id) + return -1; + + rte_spinlock_lock(&vq->access_lock); ++ ++ if (unlikely(!vq->access_ok)) { ++ ret = -1; ++ goto out_unlock; ++ } ++ + ret = async_channel_register(dev, vq); ++ ++out_unlock: + rte_spinlock_unlock(&vq->access_lock); + + return ret; +@@ -1814,6 +1848,11 @@ rte_vhost_async_channel_unregister(int vid, uint16_t queue_id) + return ret; + } + ++ if (unlikely(!vq->access_ok)) { ++ ret = -1; ++ goto out_unlock; ++ } ++ + if (!vq->async) { + ret = 0; + } else if (vq->async->pkts_inflight_n) { +@@ -1825,6 +1864,7 @@ rte_vhost_async_channel_unregister(int vid, uint16_t queue_id) + ret = 0; + } + ++out_unlock: + rte_spinlock_unlock(&vq->access_lock); + + return ret; +@@ -1968,9 +2008,15 @@ rte_vhost_async_get_inflight(int vid, uint16_t queue_id) + return ret; + } + ++ if (unlikely(!vq->access_ok)) { ++ ret = -1; ++ goto out_unlock; ++ } ++ + if (vq->async) + ret = vq->async->pkts_inflight_n; + ++out_unlock: + rte_spinlock_unlock(&vq->access_lock); + + return ret; +@@ -2014,6 +2060,7 @@ rte_vhost_get_monitor_addr(int vid, uint16_t queue_id, + { + struct virtio_net *dev = get_device(vid); + struct vhost_virtqueue *vq; ++ int ret = 0; + + if (dev == NULL) + return -1; +@@ -2024,6 +2071,13 @@ rte_vhost_get_monitor_addr(int vid, uint16_t queue_id, + if (vq == NULL) + return -1; + ++ rte_spinlock_lock(&vq->access_lock); ++ ++ if (unlikely(!vq->access_ok)) { ++ ret = -1; ++ goto out_unlock; ++ } ++ + if (vq_is_packed(dev)) { + struct vring_packed_desc *desc; + desc = vq->desc_packed; +@@ -2043,7 +2097,10 @@ rte_vhost_get_monitor_addr(int vid, uint16_t queue_id, + pmc->match = 0; + } + +- return 0; ++out_unlock: ++ rte_spinlock_unlock(&vq->access_lock); ++ ++ return ret; + } + + +@@ -2081,6 +2138,7 @@ rte_vhost_vring_stats_get(int vid, uint16_t queue_id, + struct virtio_net *dev = get_device(vid); + struct vhost_virtqueue *vq; + unsigned int i; ++ int ret = VHOST_NB_VQ_STATS; + + if (dev == NULL) + return -1; +@@ -2097,20 +2155,29 @@ rte_vhost_vring_stats_get(int vid, uint16_t queue_id, + vq = dev->virtqueue[queue_id]; + + rte_spinlock_lock(&vq->access_lock); ++ ++ if (unlikely(!vq->access_ok)) { ++ ret = -1; ++ goto out_unlock; ++ } ++ + for (i = 0; i < VHOST_NB_VQ_STATS; i++) { + stats[i].value = + *(uint64_t *)(((char *)vq) + vhost_vq_stat_strings[i].offset); + stats[i].id = i; + } ++ ++out_unlock: + rte_spinlock_unlock(&vq->access_lock); + +- return VHOST_NB_VQ_STATS; ++ return ret; + } + + int rte_vhost_vring_stats_reset(int vid, uint16_t queue_id) + { + struct virtio_net *dev = get_device(vid); + struct vhost_virtqueue *vq; ++ int ret = 0; + + if (dev == NULL) + return -1; +@@ -2124,10 +2191,17 @@ int rte_vhost_vring_stats_reset(int vid, uint16_t queue_id) + vq = dev->virtqueue[queue_id]; + + rte_spinlock_lock(&vq->access_lock); ++ ++ if (unlikely(!vq->access_ok)) { ++ ret = -1; ++ goto out_unlock; ++ } + memset(&vq->stats, 0, sizeof(vq->stats)); ++ ++out_unlock: + rte_spinlock_unlock(&vq->access_lock); + +- return 0; ++ return ret; + } + + int diff --git a/dpdk/lib/vhost/vhost.h b/dpdk/lib/vhost/vhost.h index ef211ed519..63e2f3f577 100644 --- a/dpdk/lib/vhost/vhost.h @@ -35451,3 +52924,16 @@ index 9abf752f30..26f184f8b2 100644 break; } +diff --git a/dpdk/usertools/dpdk-pmdinfo.py b/dpdk/usertools/dpdk-pmdinfo.py +index 67d023a047..2c728de7b8 100755 +--- a/dpdk/usertools/dpdk-pmdinfo.py ++++ b/dpdk/usertools/dpdk-pmdinfo.py +@@ -23,7 +23,7 @@ Get only the required kernel modules for a given driver: + Get only the required kernel modules for a given device: + + %(prog)s dpdk-testpmd | \ +- jq '.[] | select(.devices[] | .vendor_id == "15b3" and .device_id == "1013").kmod' ++ jq '.[] | select(.pci_ids[]? | .vendor == "15b3" and .device == "1013").kmod' + """ + + import argparse diff --git a/SPECS/openvswitch3.2.spec b/SPECS/openvswitch3.2.spec index 12aa803..85c7a87 100644 --- a/SPECS/openvswitch3.2.spec +++ b/SPECS/openvswitch3.2.spec @@ -57,7 +57,7 @@ Summary: Open vSwitch Group: System Environment/Daemons daemon/database/utilities URL: http://www.openvswitch.org/ Version: 3.2.0 -Release: 56%{?dist} +Release: 63%{?dist} # Nearly all of openvswitch is ASL 2.0. The bugtool is LGPLv2+, and the # lib/sflow*.[ch] files are SISSL @@ -761,6 +761,330 @@ exit 0 %endif %changelog +* Tue Mar 12 2024 Open vSwitch CI - 3.2.0-63 +- Merging upstream branch-3.2 [RH git: 31cd5f6845] + Commit list: + 5ef1e26cbb github: Reduce ASLR entropy to be compatible with asan in llvm 14. + + +* Fri Mar 08 2024 Open vSwitch CI - 3.2.0-62 +- Merging upstream branch-3.2 [RH git: e46db9d824] + Commit list: + 60d2254773 netdev-dpdk: Dump packets that fail Tx preparation. + 999eaf3e93 bfd: Improve state change log message. (#2258496) + a2b0dbbf2e tests: Fix "SSL db: Implementation" test with openssl > 3.2.0. + + +* Fri Mar 08 2024 Open vSwitch CI - 3.2.0-61 +- Merging upstream branch-3.2 [RH git: 84002436e5] + Commit list: + 41a8b58aa9 conntrack: Fix flush not flushing all elements. + ad92b0da17 conntrack: Remove nat_conn introducing key directionality. + + +* Fri Mar 01 2024 Open vSwitch CI - 3.2.0-60 +- Merging upstream branch-3.2 [RH git: 03d7e8565b] + Commit list: + b5f2ed2a21 bond: Reset stats when deleting post recirc rule. + 3bcff01e40 ofproto-dpif-trace: Fix infinite recirculation tracing. + 3efb5ad810 ofproto-dpif-xlate: Fix ignoring IPv6 local_ip for native tunnels. + f4a4416b38 netdev-dummy: Add local route entries for IP addresses. + bfc212deed tests: Move the non-local port as tunnel endpoint test. + dce943f5a8 rstp: Fix deadlock with patch ports. + + +* Fri Mar 01 2024 Open vSwitch CI - 3.2.0-59 +- Merging dpdk subtree [RH git: e25c4b667f] + Commit list: + 319a57029a Revert "build: add libarchive to optional external dependencies" + cb88423913 Merge tag 'v22.11.4' into 22.11 + 076a05a022 Reapply "net/iavf: fix abnormal disable HW interrupt" + caf0f5d395 version: 22.11.4 + 7d6f1cc05f Revert "net/iavf: fix abnormal disable HW interrupt" + dc9c799c7d vhost: fix missing spinlock unlock + 4307659a90 net/mlx5: fix LACP redirection in Rx domain + 6ef77f2a5e net/gve: fix RX buffer size alignment + ee2197cbd4 crypto/openssl: fix memory leaks in asym session + 02cb6cdbcd net/nfp: fix reconfigure logic of set MAC address + 9651485a36 net/nfp: fix link status interrupt + 6350131153 app/test: disable graph auto test for windows + 97cdb0db6d Revert "eventdev: fix alignment padding" + 6c688dec84 event/dlb2: fix disable PASID + 6471372564 net/hns3: fix VF reset handler interruption + 28e7716ee3 net/hns3: fix reset event status + c5d0485861 net/hns3: fix ignored reset event + da708a5b4e ethdev: fix ESP packet type description + 3b67c07511 mempool: clarify enqueue/dequeue ops documentation + 9c68d8a845 mempool: fix get function documentation + 9f0f54a72e doc: remove number of commands in vDPA guide + e1aa1c2ce0 doc: fix some ordered lists + cabe6f3bae doc: remove restriction on ixgbe vector support + de38896353 app/pipeline: add sigint handler + 22a1b5067c test/hash: fix creation error log + 122f600818 usertools/pmdinfo: fix usage typos + 0a742f1ec0 examples/ethtool: fix pause configuration + 3ca387345f app/dumpcap: allow multiple invocations + 578b87a955 pdump: fix error number on IPC response + 364faab59a eal/windows: fix build with recent MinGW + 82dd3dde96 config: fix RISC-V native build + 2a720ab686 examples/ipsec-secgw: fix partial overflow + b5913482ec net/mlx5: fix offset size in conntrack flow action + 434c2fc890 doc: update versions recommendations for i40e and ice + ce75c85b4a doc: fix RSS flow description in hns3 guide + fb6560fcfc doc: update features in hns3 guide + 6a44194cc1 doc: fix hns3 build option about max queue number + 085a72d8f8 net/txgbe: fix out of bound access + e0fa003972 eal/riscv: fix vector type alignment + d3741774d0 net/mlx5: fix MPRQ stride size check + 07a861e472 net/mlx5: zero UDP checksum over IPv4 in encapsulation + c9e75ba878 net/mlx5: fix shared Rx queue list management + 4699a174ea net/mlx5: fix multi-segment Tx inline data length + 14a722ae6b net/mlx5: fix hairpin queue states + ab46753245 net/mlx5: fix hairpin queue unbind + 480df9f41e net/mlx5: fix use after free on Rx queue start + d32e9e689b net/mlx5: fix destroying external representor flow + ca79cce293 net/mlx5: fix missing flow rules for external SQ + 97b9c4dca3 net/mlx5: fix counter query during port close + 49bd7912ee net/mlx5: fix validation of sample encap flow action + f00e3b40fd net/mlx5: fix unlock mismatch + 5ff3454aac app/testpmd: fix tunnel TSO configuration + 2959baf71f app/testpmd: add explicit check for tunnel TSO + 6f1c35e7a8 app/testpmd: fix tunnel TSO capability check + b3009db2e4 net/hns3: fix mailbox sync + be77f806f2 test/bonding: fix uninitialized RSS configuration + 5b8da03c1d ethdev: account for smaller MTU when setting default + 5508c2d500 app/testpmd: remove useless check in TSO command + 28f7dd30ee net/sfc: remove null dereference in log + c38b876902 meter: fix RFC4115 trTCM API Doxygen + 71876ad11a event/dlb2: fix missing queue ordering capability flag + a998d657f5 common/cnxk: fix SDP channel mask + 0d7ea280fe net/ice: fix crash on closing representor ports + 2dfcdb87a5 test/bbdev: assert failed test for queue configure + 7b8504aee3 test/bbdev: fix Python script subprocess + e42af2946a baseband/acc: fix TB mode on VRB1 + 2e2cc882eb event/dlb2: fix name check in self-test + 2cd4ce8518 app/dumpcap: fix mbuf pool ring type + ac8ca59223 event/dlb2: disable PASID + 5586a7be43 bus/pci: add PASID control + dec4b39b20 build: add libarchive to optional external dependencies + cb225e1ae9 config/arm: fix aarch32 build with GCC 13 + e787872a59 net/ice: fix Tx preparation + 8dc1b42e31 net/iavf: fix Tx preparation + c70a8fa66e crypto/nitrox: fix panic with high number of segments + f089d80b08 doc: update kernel module entry in QAT guide + d7b738d6dc net/ice: fix DCF port statistics + c7270eb8cd net/iavf: fix Tx offload flags check + 84c1679b72 net/iavf: fix indent in Tx path + c1c417228a net/iavf: fix Tx offload mask + ae7a0f6233 net/ice: fix L1 check interval + f2ab72ed3d common/mlx5: fix controller index parsing + 72ab4bf42f net/mlx5: fix flow workspace double free in Windows + f0bc006230 net/mlx5/hws: fix integrity bits level + 815a8ce067 net/mlx5: fix flow thread safety flag for HWS + b57dd69e61 net/mlx5: fix E-Switch mirror flow rule validation + a908d0cb5b net/mlx5: fix NIC flow capability query + 74b594dfd3 net/mlx5: fix decap action checking in sample flow + 81410056f5 net/tap: fix RSS for fragmented packets + 85b95b48ce net/nfp: fix DMA error after abnormal exit + 5b470b0567 app/procinfo: adjust format of RSS info + df4470f94c app/procinfo: fix RSS info + 69d6a23bb1 net/tap: fix IPv4 checksum offloading + db03b8a934 net/tap: fix L4 checksum offloading + de2edad82b net/hns3: fix uninitialized hash algo value + 085f8342f5 net/hns3: keep set/get algo key functions local + 555126492b net/hns3: fix some error logs + 84b217ed2c net/hns3: fix some return values + 36f408b7d8 net/hns3: fix LRO offload to report + 518bc13327 net/hns3: fix setting DCB capability + 4f039a69e9 net/enic: avoid extra unlock in MTU set + 73440e9fd3 ethdev: fix 32-bit build with GCC 13 + 14a4c7b3e8 net/ngbe: add proper memory barriers in Rx + 33f8a0ce2c net/txgbe: add proper memory barriers in Rx + 22b7f9edf9 net/bonding: fix possible overrun + d3f778e077 test/bonding: add missing check + 7989293c66 test/bonding: remove unreachable statement + 8d45a7ecac net/nfp: fix reconfigure logic in VF initialization + cb30eb9a11 net/nfp: fix reconfigure logic in PF initialization + 8dcc1b4552 net/hns3: refactor interrupt state query + 9a62d06e64 net/hns3: fix IMP or global reset + 65231cf278 net/hns3: fix multiple reset detected log + 290166c239 net/hns3: remove reset log in secondary + c4bf1adb63 net/hns3: fix double stats for IMP and global reset + 70c868a155 net/hns3: fix unchecked Rx free threshold + b9195e016e net/hns3: fix typo in function name + 976cefa803 app/testpmd: fix early exit from signal + 8f4eb60951 net/gve: update max Rx packet length to be based on MTU + 6f78b589c5 ethdev: fix function name in comment + e671505da1 net/nfp: fix crash on close + 4dfec51b97 net/cnxk: fix data offset in vector Tx + 780be398dc common/cnxk: fix pool buffer size in opaque mode + 8c291d8778 vhost: fix checking virtqueue access in stats API + a07736eb68 vhost: fix missing lock protection in power monitor API + adae353b36 vhost: fix check on virtqueue access in in-flight getter + 7f80528fbd vhost: fix check on virtqueue access in async registration + 2dc5b2aadd vhost: fix missing check on virtqueue access + ac1162d97a vhost: fix missing vring call check on virtqueue access + c1001c18e6 net/virtio: fix link state interrupt vector setting + b485fae237 net/virtio: fix missing next flag in Tx packed ring + 51205657f1 eventdev: fix missing driver names in info struct + 0beed895ef eventdev: fix device pointer for vdev-based devices + 4f5e8c612f malloc: remove return from void functions + f2f948863b bus/ifpga: fix driver header dependency + f42884a20c app/procinfo: remove unnecessary rte_malloc + c20753f52f ethdev: add check in async flow action query + b33ea9e6de net/bonding: fix link status callback stop + d316924b1f app/testpmd: fix primary process not polling all queues + 0cece838b9 net/vmxnet3: fix Rx and Tx queue state + bdaff48945 net/virtio: fix Rx and Tx queue state + b1dfb750b5 net/vhost: fix Rx and Tx queue state + dc77b5d082 net/txgbe: fix Rx and Tx queue state + beb475c1c4 net/softnic: fix Rx and Tx queue state + 8ab4f361b0 net/sfc: fix Rx and Tx queue state + 564958b988 net/ring: fix Rx and Tx queue state + f2edd9823d net/pfe: fix Rx and Tx queue state + 63a8198b22 net/octeontx: fix Rx and Tx queue state + f6ec84e5e5 net/octeon_ep: fix Rx and Tx queue state + eb53c49bad net/null: fix Rx and Tx queue state + df52eadf8f net/ngbe: fix Rx and Tx queue state + 4e28c37ff5 net/nfp: fix Rx and Tx queue state + e2e72d04cb net/mvpp2: fix Rx and Tx queue state + 5116feb88f net/mvneta: fix Rx and Tx queue state + accdf4594b net/mlx4: fix Rx and Tx queue state + 26fe1d02e5 net/memif: fix Rx and Tx queue state + 1f38a20d2e net/ipn3ke: fix Rx and Tx queue state + 0d4ab569a1 net/hinic: fix Rx and Tx queue state + 2492c41642 net/enic: fix Rx and Tx queue state + 3df3bf7e5c net/enetc: fix Rx and Tx queue state + 1efb8fcf47 net/ena: fix Rx and Tx queue state + 36e418d789 net/e1000: fix Rx and Tx queue state + ef3b3501b8 net/dpaa2: fix Rx and Tx queue state + b97544f35a net/dpaa: fix Rx and Tx queue state + 606a11a71c net/cxgbe: fix Rx and Tx queue state + 50177d5bf1 net/bonding: fix Rx and Tx queue state + 5f75982239 net/bnxt: fix Rx and Tx queue state + ad55189740 net/bnx2x: fix Rx and Tx queue state + c0001c9115 net/avp: fix Rx and Tx queue state + b4078f8a6b net/af_xdp: fix Rx and Tx queue state + 6c8f427c57 net/af_packet: fix Rx and Tx queue state + a8fd5060ac net/mana: add missing new line to data path logs + c6b2c85ca4 fib6: fix adding default route as first route + e4649e8d78 fib: fix adding default route overwriting entire table + 461a81717f net/mlx5/hws: fix field copy bind + 1c9bab50cc net/mlx5: fix jump ipool entry size + 62a937220c common/mlx5: replace use of PMD log type + 8115fe3345 net/mlx5: fix leak in sysfs port name translation + 06cd13b57e net/nfp: fix initialization of physical representors + e054f121ef net/ark: support single function with multiple port + b796460bce net/axgbe: identify CPU with cpuid + a7ff0ba143 net/ice: fix TSO with big segments + a2799805b6 net/ice: remove log from Tx prepare function + 50e27677ba net/iavf: fix TSO with big segments + 235b473bf9 net/iavf: remove log from Tx prepare function + eec9ac43a7 net/iavf: fix Tx debug + 101faaba08 net/ice: fix initial link status + b64cc84bb5 net/iavf: fix ESN session update + efdea6d6f4 net/iavf: unregister interrupt handler before FD close + 948b21bd59 net/iavf: fix port stats clearing + 4467e32db9 net/ice: fix TM configuration clearing + 36e302e98b net/i40e: fix buffer leak on Rx reconfiguration + baaa298893 net/iavf: fix checksum offloading + 491e9d37c1 net/iavf: fix VLAN offload strip flag + 52c903949a net/ice: write timestamp to first segment in scattered Rx + d4cd714640 net/i40e: fix FDIR queue receives broadcast packets + 202402218c app/bbdev: fix link with NXP LA12XX + 923d7a21ab baseband/acc: fix ACC100 HARQ input alignment + 9e7a4f889b vdpa/mlx5: fix unregister kick handler order + 529b747a92 common/cnxk: replace direct API usage in REE + 513e507ee4 common/cnxk: remove dead Meson code + 257c5a049a common/cnxk: fix RSS key configuration + 5dc2babce4 common/cnxk: fix aura disable handling + 1ef99ed9dd mempool/cnxk: fix alloc from non-EAL threads + 0da4d859cd mempool/cnxk: fix free from non-EAL threads + 2b29a7975b dma/cnxk: fix chunk buffer failure return code + 46d25f17a6 dma/cnxk: fix device reconfigure + 915fbdb681 dma/cnxk: fix device state + ab0aa8b245 common/cnxk: fix DPI memzone name + 0259cb16f7 net/cnxk: fix uninitialized variable + e38988edfd common/cnxk: fix incorrect aura ID + 0e8848d299 common/cnxk: fix different size bit operations + bc87fece07 common/cnxk: fix xstats for different packet sizes + 8c360e4392 common/cnxk: fix default flow action setting + 1b7fb134d9 event/sw: fix ordering corruption with op release + e9da79d973 eventdev/eth_rx: fix timestamp field register in mbuf + 6d49fdbd99 event/cnxk: fix context flush in port cleanup + 22d4975573 event/sw: remove obsolete comment + bb60e98212 test/event: fix crypto null device creation + 6e9061bd77 event/cnxk: fix return values for capability API + ba2ec17a2f eventdev/crypto: fix circular buffer full case + 15b1bc964b event/cnxk: fix CASP usage for clang + 148478f997 event/cnxk: fix getwork mode devargs parsing + d854ba2422 eventdev: fix alignment padding + 73fa336da5 bus/pci: fix device ID log + 5a03754492 eventdev: fix symbol export for port maintenance + f150a45b65 crypto/ipsec_mb: add dependency check for cross build + 39b817dfee crypto/cnxk: fix IPsec CCM and GCM capabilities + 40ea03b5ea crypto/qat: fix raw API null algorithm digest + 31f6839de6 test/crypto: fix typo in asym tests + 486fb2f97c test/crypto: fix return value for GMAC case + 8506f6d67c test/crypto: skip some synchronous tests with CPU crypto + 5f940557cf test/crypto: fix IV in some vectors + e4cba3bb19 test/security: fix IPv6 next header field + 7004929e29 doc: replace code blocks with includes in security guide + 3d75e696dc crypto/dpaa_sec: fix debug prints + 00819a704d crypto/dpaa2_sec: fix debug prints + 8af3c9994e cryptodev: add missing doc for security context + 1a318a87a3 test: fix named test macro + 086e0e529e app/testpmd: fix help string + 8ef9e184ca net/gve: fix max MTU limit + 6754edbeb5 net/tap: use MAC address parse API instead of local parser + 367755a8dc net/ngbe: check process type in close operation + a340716239 net/txgbe: check process type in close operation + d9b0b9b3e2 net/ngbe: keep link down after device close + adb1b60601 net/txgbe: keep link down after device close + 46e19fcf10 net/ngbe: reconfigure MAC Rx when link update + 768e6a3d83 net/txgbe: reconfigure MAC Rx when link update + 12662c8d63 net/ngbe: prevent NIC from slowing down link speed + 401b94367f net/ngbe: fix flow control + ae0ac0ba92 net/txgbe: fix GRE tunnel packet checksum + 2c62e36805 net/txgbe: add Tx queue maximum limit + 64d7c1df88 net/netvsc: increase VSP response timeout to 60 seconds + af391d2427 hash: align SSE lookup to scalar implementation + 421c47495c mem: fix deadlock with multiprocess + 01d3e3c456 gpu/cuda: fix build with external GDRCopy + e55de889b7 bus/dpaa: fix build with asserts for GCC 13 + 517bb40874 random: initialize state for unregistered non-EAL threads + 4e986000b1 net/hns3: fix order in NEON Rx + 0e82ee1363 net/mana: add 32-bit short doorbell + e9ead33c9d net/mana: enable 32-bit build + b6200f6581 app/test: fix reference to master in bonding test + 2dec2783a0 net/hns3: fix traffic management dump text alignment + 6961856e84 net/hns3: fix traffic management thread safety + 15b43a21b2 net/hns3: fix flushing multicast MAC address + 71c9f50273 net/hns3: fix error code for multicast resource + 25c73bdc49 net/hns3: fix VF default MAC modified when set failed + ce57bf585c net/nfp: fix control message packets + de47856936 net/sfc: add missing error code indication to MAE init path + 563e7c87df net/sfc: account for data offset on Tx + 37863f1caa net/sfc: set max Rx packet length for representors + 38b7efe568 net/bonding: fix header for C++ + f4b30c5e2a rawdev: fix device class in log message + f76fc5ef15 eal/unix: fix firmware reading with external xz helper + 36c07ef565 mempool: fix default ops for an empty mempool + 36001100ff ci: fix race on container image name + + +* Fri Feb 16 2024 Timothy Redaelli - 3.2.0-58 +- Remove .gitmodules [RH git: 110deeacdf] + + +* Wed Feb 14 2024 Open vSwitch CI - 3.2.0-57 +- Merging upstream branch-3.2 [RH git: c3fd52fe9b] + Commit list: + a64c208146 netdev-linux: Avoid deadlock in netdev_get_speed. + 46d1f2a536 ofproto-dpif-monitor: Remove unneeded calls to clear packets. + 1e70185e9a bfd: Set proper offsets and flags in BFD packets. + + * Fri Feb 09 2024 Open vSwitch CI - 3.2.0-56 - Merging upstream branch-3.2 [RH git: 45ac6932dd] Commit list: