From 744ed63539bf722b80f47c779552b16f08229cd5 Mon Sep 17 00:00:00 2001 From: Jenkins Automation Server Date: Feb 12 2021 21:43:40 +0000 Subject: Import openvswitch2.13-2.13.0-79 from Fast DataPath --- diff --git a/SOURCES/openvswitch-2.13.0.patch b/SOURCES/openvswitch-2.13.0.patch index 2a07c43..afa2297 100644 --- a/SOURCES/openvswitch-2.13.0.patch +++ b/SOURCES/openvswitch-2.13.0.patch @@ -1,63 +1,176 @@ -diff --git a/.cirrus.yml b/.cirrus.yml -index 1b32f55d65..263c2cd7ed 100644 ---- a/.cirrus.yml -+++ b/.cirrus.yml -@@ -3,7 +3,7 @@ freebsd_build_task: - freebsd_instance: - matrix: - image_family: freebsd-12-1-snap -- image_family: freebsd-11-3-snap -+ image_family: freebsd-11-4-snap - cpu: 4 - memory: 8G - -@@ -16,6 +16,7 @@ freebsd_build_task: - - prepare_script: - - sysctl -w kern.coredump=0 -+ - pkg update -f - - pkg install -y ${DEPENDENCIES} - - configure_script: -diff --git a/.travis.yml b/.travis.yml -index abd2a9117a..1dca2045ce 100644 ---- a/.travis.yml -+++ b/.travis.yml -@@ -51,6 +51,15 @@ matrix: - - os: osx - compiler: clang - env: OPTS="--disable-ssl" -+ - env: DEB_PACKAGE=1 -+ addons: -+ apt: -+ packages: -+ - linux-headers-$(uname -r) -+ - build-essential -+ - fakeroot -+ - devscripts -+ - equivs - - script: ./.travis/${TRAVIS_OS_NAME}-build.sh $OPTS - -diff --git a/.travis/linux-build.sh b/.travis/linux-build.sh -index bb47b3ee19..6ad2894f53 100755 ---- a/.travis/linux-build.sh -+++ b/.travis/linux-build.sh -@@ -35,7 +35,9 @@ function install_kernel() - - url="${base_url}/linux-${version}.tar.xz" - # Download kernel sources. Try direct link on CDN failure. -- wget ${url} || wget ${url} || wget ${url/cdn/www} +diff --git a/.ci/linux-build.sh b/.ci/linux-build.sh +new file mode 100755 +index 0000000000..ec0b40e472 +--- /dev/null ++++ b/.ci/linux-build.sh +@@ -0,0 +1,244 @@ ++#!/bin/bash ++ ++set -o errexit ++set -x ++ ++CFLAGS_FOR_OVS="-g -O2" ++SPARSE_FLAGS="" ++EXTRA_OPTS="--enable-Werror" ++TARGET="x86_64-native-linuxapp-gcc" ++ ++function install_kernel() ++{ ++ if [[ "$1" =~ ^5.* ]]; then ++ PREFIX="v5.x" ++ elif [[ "$1" =~ ^4.* ]]; then ++ PREFIX="v4.x" ++ elif [[ "$1" =~ ^3.* ]]; then ++ PREFIX="v3.x" ++ else ++ PREFIX="v2.6/longterm/v2.6.32" ++ fi ++ ++ base_url="https://cdn.kernel.org/pub/linux/kernel/${PREFIX}" ++ # Download page with list of all available kernel versions. ++ wget ${base_url}/ ++ # Uncompress in case server returned gzipped page. ++ (file index* | grep ASCII) || (mv index* index.new.gz && gunzip index*) ++ # Get version of the latest stable release. ++ hi_ver=$(echo ${1} | sed 's/\./\\\./') ++ lo_ver=$(cat ./index* | grep -P -o "${hi_ver}\.[0-9]+" | \ ++ sed 's/.*\..*\.\(.*\)/\1/' | sort -h | tail -1) ++ version="${1}.${lo_ver}" ++ ++ rm -rf index* linux-* ++ ++ url="${base_url}/linux-${version}.tar.xz" ++ # Download kernel sources. Try direct link on CDN failure. + wget ${url} || + (rm -f linux-${version}.tar.xz && wget ${url}) || + (rm -f linux-${version}.tar.xz && wget ${url/cdn/www}) - - tar xvf linux-${version}.tar.xz > /dev/null - pushd linux-${version} -@@ -159,13 +161,24 @@ function build_ovs() - fi - } - ++ ++ tar xvf linux-${version}.tar.xz > /dev/null ++ pushd linux-${version} ++ make allmodconfig ++ ++ # Cannot use CONFIG_KCOV: -fsanitize-coverage=trace-pc is not supported by compiler ++ sed -i 's/CONFIG_KCOV=y/CONFIG_KCOV=n/' .config ++ ++ # stack validation depends on tools/objtool, but objtool does not compile on travis. ++ # It is giving following error. ++ # >>> GEN arch/x86/insn/inat-tables.c ++ # >>> Semantic error at 40: Unknown imm opnd: AL ++ # So for now disable stack-validation for the build. ++ ++ sed -i 's/CONFIG_STACK_VALIDATION=y/CONFIG_STACK_VALIDATION=n/' .config ++ make oldconfig ++ ++ # Older kernels do not include openvswitch ++ if [ -d "net/openvswitch" ]; then ++ make net/openvswitch/ ++ else ++ make net/bridge/ ++ fi ++ ++ if [ "$AFXDP" ]; then ++ sudo make headers_install INSTALL_HDR_PATH=/usr ++ pushd tools/lib/bpf/ ++ # Bulding with gcc because there are some issues in make files ++ # that breaks building libbpf with clang on Travis. ++ CC=gcc sudo make install ++ CC=gcc sudo make install_headers ++ sudo ldconfig ++ popd ++ # The Linux kernel defines __always_inline in stddef.h (283d7573), and ++ # sys/cdefs.h tries to re-define it. Older libc-dev package in xenial ++ # doesn't have a fix for this issue. Applying it manually. ++ sudo sed -i '/^# define __always_inline .*/i # undef __always_inline' \ ++ /usr/include/x86_64-linux-gnu/sys/cdefs.h || true ++ EXTRA_OPTS="${EXTRA_OPTS} --enable-afxdp" ++ else ++ EXTRA_OPTS="${EXTRA_OPTS} --with-linux=$(pwd)" ++ echo "Installed kernel source in $(pwd)" ++ fi ++ popd ++} ++ ++function install_dpdk() ++{ ++ local DPDK_VER=$1 ++ local VERSION_FILE="dpdk-dir/travis-dpdk-cache-version" ++ ++ if [ "${DPDK_VER##refs/*/}" != "${DPDK_VER}" ]; then ++ # Avoid using cache for git tree build. ++ rm -rf dpdk-dir ++ ++ DPDK_GIT=${DPDK_GIT:-https://dpdk.org/git/dpdk} ++ git clone --single-branch $DPDK_GIT dpdk-dir -b "${DPDK_VER##refs/*/}" ++ pushd dpdk-dir ++ git log -1 --oneline ++ else ++ if [ -f "${VERSION_FILE}" ]; then ++ VER=$(cat ${VERSION_FILE}) ++ if [ "${VER}" = "${DPDK_VER}" ]; then ++ EXTRA_OPTS="${EXTRA_OPTS} --with-dpdk=$(pwd)/dpdk-dir/build" ++ echo "Found cached DPDK ${VER} build in $(pwd)/dpdk-dir" ++ return ++ fi ++ fi ++ # No cache or version mismatch. ++ rm -rf dpdk-dir ++ wget https://fast.dpdk.org/rel/dpdk-$1.tar.xz ++ tar xvf dpdk-$1.tar.xz > /dev/null ++ DIR_NAME=$(tar -tf dpdk-$1.tar.xz | head -1 | cut -f1 -d"/") ++ mv ${DIR_NAME} dpdk-dir ++ pushd dpdk-dir ++ fi ++ ++ make config CC=gcc T=$TARGET ++ ++ if [ "$DPDK_SHARED" ]; then ++ sed -i '/CONFIG_RTE_BUILD_SHARED_LIB=n/s/=n/=y/' build/.config ++ export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$(pwd)/$TARGET/lib ++ fi ++ ++ # Disable building DPDK kernel modules. Not needed for OVS build or tests. ++ sed -i '/CONFIG_RTE_EAL_IGB_UIO=y/s/=y/=n/' build/.config ++ sed -i '/CONFIG_RTE_KNI_KMOD=y/s/=y/=n/' build/.config ++ ++ # Enable pdump support in DPDK. ++ sed -i '/CONFIG_RTE_LIBRTE_PMD_PCAP=n/s/=n/=y/' build/.config ++ sed -i '/CONFIG_RTE_LIBRTE_PDUMP=n/s/=n/=y/' build/.config ++ ++ # Switching to 'default' machine to make dpdk-dir cache usable on different ++ # CPUs. We can't be sure that all CI machines are exactly same. ++ sed -i '/CONFIG_RTE_MACHINE="native"/s/="native"/="default"/' build/.config ++ ++ make -j4 CC=gcc EXTRA_CFLAGS='-fPIC' ++ EXTRA_OPTS="$EXTRA_OPTS --with-dpdk=$(pwd)/build" ++ echo "Installed DPDK source in $(pwd)" ++ popd ++ echo "${DPDK_VER}" > ${VERSION_FILE} ++} ++ ++function configure_ovs() ++{ ++ ./boot.sh ++ ./configure CFLAGS="${CFLAGS_FOR_OVS}" $* || { cat config.log; exit 1; } ++} ++ ++function build_ovs() ++{ ++ local KERNEL=$1 ++ ++ configure_ovs $OPTS ++ make selinux-policy ++ ++ # Only build datapath if we are testing kernel w/o running testsuite and ++ # AF_XDP support. ++ if [ "${KERNEL}" ] && ! [ "$AFXDP" ]; then ++ pushd datapath ++ make -j4 ++ popd ++ else ++ make -j4 || { cat config.log; exit 1; } ++ fi ++} ++ +if [ "$DEB_PACKAGE" ]; then + mk-build-deps --install --root-cmd sudo --remove debian/control + dpkg-checkbuilddeps @@ -69,45 +182,399 @@ index bb47b3ee19..6ad2894f53 100755 + exit 0 +fi + - if [ "$KERNEL" ]; then - install_kernel $KERNEL - fi - - if [ "$DPDK" ] || [ "$DPDK_SHARED" ]; then - if [ -z "$DPDK_VER" ]; then -- DPDK_VER="19.11" ++if [ "$KERNEL" ]; then ++ install_kernel $KERNEL ++fi ++ ++if [ "$DPDK" ] || [ "$DPDK_SHARED" ]; then ++ if [ -z "$DPDK_VER" ]; then + DPDK_VER="19.11.2" - fi - install_dpdk $DPDK_VER - # Enable pdump support in OVS. -diff --git a/.travis/linux-prepare.sh b/.travis/linux-prepare.sh -index fda13e7d21..71eb347e89 100755 ---- a/.travis/linux-prepare.sh -+++ b/.travis/linux-prepare.sh -@@ -2,14 +2,22 @@ - - set -ev - ++ fi ++ install_dpdk $DPDK_VER ++ # Enable pdump support in OVS. ++ EXTRA_OPTS="${EXTRA_OPTS} --enable-dpdk-pdump" ++ if [ "$CC" = "clang" ]; then ++ # Disregard cast alignment errors until DPDK is fixed ++ CFLAGS_FOR_OVS="${CFLAGS_FOR_OVS} -Wno-cast-align" ++ fi ++fi ++ ++if [ "$CC" = "clang" ]; then ++ CFLAGS_FOR_OVS="${CFLAGS_FOR_OVS} -Wno-error=unused-command-line-argument" ++elif [ "$M32" ]; then ++ # Not using sparse for 32bit builds on 64bit machine. ++ # Adding m32 flag directly to CC to avoid any posiible issues with API/ABI ++ # difference on 'configure' and 'make' stages. ++ export CC="$CC -m32" ++else ++ OPTS="--enable-sparse" ++ if [ "$AFXDP" ]; then ++ # netdev-afxdp uses memset for 64M for umem initialization. ++ SPARSE_FLAGS="${SPARSE_FLAGS} -Wno-memcpy-max-count" ++ fi ++ CFLAGS_FOR_OVS="${CFLAGS_FOR_OVS} ${SPARSE_FLAGS}" ++fi ++ ++save_OPTS="${OPTS} $*" ++OPTS="${EXTRA_OPTS} ${save_OPTS}" ++ ++if [ "$TESTSUITE" ]; then ++ # 'distcheck' will reconfigure with required options. ++ # Now we only need to prepare the Makefile without sparse-wrapped CC. ++ configure_ovs ++ ++ export DISTCHECK_CONFIGURE_FLAGS="$OPTS" ++ if ! make distcheck CFLAGS="${CFLAGS_FOR_OVS}" \ ++ TESTSUITEFLAGS=-j4 RECHECK=yes; then ++ # testsuite.log is necessary for debugging. ++ cat */_build/sub/tests/testsuite.log ++ exit 1 ++ fi ++else ++ if [ -z "${KERNEL_LIST}" ]; then build_ovs ${KERNEL}; ++ else ++ save_EXTRA_OPTS="${EXTRA_OPTS}" ++ for KERNEL in ${KERNEL_LIST}; do ++ echo "==============================" ++ echo "Building with kernel ${KERNEL}" ++ echo "==============================" ++ EXTRA_OPTS="${save_EXTRA_OPTS}" ++ install_kernel ${KERNEL} ++ OPTS="${EXTRA_OPTS} ${save_OPTS}" ++ build_ovs ${KERNEL} ++ make distclean ++ done ++ fi ++fi ++ ++exit 0 +diff --git a/.ci/linux-prepare.sh b/.ci/linux-prepare.sh +new file mode 100755 +index 0000000000..fea905a830 +--- /dev/null ++++ b/.ci/linux-prepare.sh +@@ -0,0 +1,42 @@ ++#!/bin/bash ++ ++set -ev ++ +if [ "$DEB_PACKAGE" ]; then + # We're not using sparse for debian packages, tests are skipped and + # all extra dependencies tracked by mk-build-deps. + exit 0 +fi + - # Build and install sparse. - # - # Explicitly disable sparse support for llvm because some travis - # environments claim to have LLVM (llvm-config exists and works) but - # linking against it fails. ++# Build and install sparse. ++# ++# Explicitly disable sparse support for llvm because some travis ++# environments claim to have LLVM (llvm-config exists and works) but ++# linking against it fails. +# Disabling sqlite support because sindex build fails and we don't +# really need this utility being installed. - git clone git://git.kernel.org/pub/scm/devel/sparse/sparse.git - cd sparse --make -j4 HAVE_LLVM= install ++git clone git://git.kernel.org/pub/scm/devel/sparse/sparse.git ++cd sparse +make -j4 HAVE_LLVM= HAVE_SQLITE= install - cd .. ++cd .. ++ ++pip3 install --disable-pip-version-check --user flake8 hacking ++pip3 install --user --upgrade docutils ++ ++if [ "$M32" ]; then ++ # Installing 32-bit libraries. ++ pkgs="gcc-multilib" ++ if [ -z "$GITHUB_WORKFLOW" ]; then ++ # 32-bit and 64-bit libunwind can not be installed at the same time. ++ # This will remove the 64-bit libunwind and install 32-bit version. ++ # GitHub Actions doesn't have 32-bit versions of these libs. ++ pkgs=$pkgs" libunwind-dev:i386 libunbound-dev:i386" ++ fi ++ ++ sudo apt-get install -y $pkgs ++fi ++ ++# IPv6 is supported by kernel but disabled in TravisCI images: ++# https://github.com/travis-ci/travis-ci/issues/8891 ++# Enable it to avoid skipping of IPv6 related tests. ++sudo sysctl -w net.ipv6.conf.all.disable_ipv6=0 +diff --git a/.ci/osx-build.sh b/.ci/osx-build.sh +new file mode 100755 +index 0000000000..bf2c13fa3c +--- /dev/null ++++ b/.ci/osx-build.sh +@@ -0,0 +1,32 @@ ++#!/bin/bash ++ ++set -o errexit ++ ++CFLAGS="-Werror $CFLAGS" ++EXTRA_OPTS="" ++ ++function configure_ovs() ++{ ++ ./boot.sh && ./configure $* ++} ++ ++configure_ovs $EXTRA_OPTS $* ++ ++if [ "$CC" = "clang" ]; then ++ set make CFLAGS="$CFLAGS -Wno-error=unused-command-line-argument" ++else ++ set make CFLAGS="$CFLAGS $BUILD_ENV" ++fi ++if ! "$@"; then ++ cat config.log ++ exit 1 ++fi ++if [ "$TESTSUITE" ] && [ "$CC" != "clang" ]; then ++ if ! make distcheck RECHECK=yes; then ++ # testsuite.log is necessary for debugging. ++ cat */_build/sub/tests/testsuite.log ++ exit 1 ++ fi ++fi ++ ++exit 0 +diff --git a/.ci/osx-prepare.sh b/.ci/osx-prepare.sh +new file mode 100755 +index 0000000000..b6447aba1b +--- /dev/null ++++ b/.ci/osx-prepare.sh +@@ -0,0 +1,3 @@ ++#!/bin/bash ++set -ev ++pip3 install --user --upgrade docutils +diff --git a/.cirrus.yml b/.cirrus.yml +index 1b32f55d65..263c2cd7ed 100644 +--- a/.cirrus.yml ++++ b/.cirrus.yml +@@ -3,7 +3,7 @@ freebsd_build_task: + freebsd_instance: + matrix: + image_family: freebsd-12-1-snap +- image_family: freebsd-11-3-snap ++ image_family: freebsd-11-4-snap + cpu: 4 + memory: 8G + +@@ -16,6 +16,7 @@ freebsd_build_task: + + prepare_script: + - sysctl -w kern.coredump=0 ++ - pkg update -f + - pkg install -y ${DEPENDENCIES} - pip3 install --disable-pip-version-check --user flake8 hacking + configure_script: +diff --git a/.github/workflows/build-and-test.yml b/.github/workflows/build-and-test.yml +new file mode 100644 +index 0000000000..fe76a866ea +--- /dev/null ++++ b/.github/workflows/build-and-test.yml +@@ -0,0 +1,205 @@ ++name: Build and Test ++ ++on: [push, pull_request] ++ ++jobs: ++ build-linux: ++ env: ++ dependencies: | ++ automake libtool gcc bc libjemalloc1 libjemalloc-dev \ ++ libssl-dev llvm-dev libelf-dev libnuma-dev libpcap-dev \ ++ python3-openssl python3-pip python3-sphinx \ ++ selinux-policy-dev ++ deb_dependencies: | ++ linux-headers-$(uname -r) build-essential fakeroot devscripts equivs ++ AFXDP: ${{ matrix.afxdp }} ++ CC: ${{ matrix.compiler }} ++ DEB_PACKAGE: ${{ matrix.deb_package }} ++ DPDK: ${{ matrix.dpdk }} ++ DPDK_SHARED: ${{ matrix.dpdk_shared }} ++ KERNEL: ${{ matrix.kernel }} ++ KERNEL_LIST: ${{ matrix.kernel_list }} ++ LIBS: ${{ matrix.libs }} ++ M32: ${{ matrix.m32 }} ++ OPTS: ${{ matrix.opts }} ++ TESTSUITE: ${{ matrix.testsuite }} ++ ++ name: linux ${{ join(matrix.*, ' ') }} ++ runs-on: ubuntu-18.04 ++ timeout-minutes: 30 ++ ++ strategy: ++ fail-fast: false ++ matrix: ++ include: ++ - compiler: gcc ++ opts: --disable-ssl ++ - compiler: clang ++ opts: --disable-ssl ++ ++ - compiler: gcc ++ testsuite: test ++ kernel: 3.16 ++ - compiler: clang ++ testsuite: test ++ kernel: 3.16 ++ ++ - compiler: gcc ++ testsuite: test ++ opts: --enable-shared ++ - compiler: clang ++ testsuite: test ++ opts: --enable-shared ++ ++ - compiler: gcc ++ testsuite: test ++ dpdk: dpdk ++ - compiler: clang ++ testsuite: test ++ dpdk: dpdk ++ ++ - compiler: gcc ++ testsuite: test ++ libs: -ljemalloc ++ - compiler: clang ++ testsuite: test ++ libs: -ljemalloc ++ ++ - compiler: gcc ++ kernel_list: 5.0 4.20 4.19 4.18 4.17 4.16 ++ - compiler: clang ++ kernel_list: 5.0 4.20 4.19 4.18 4.17 4.16 ++ ++ - compiler: gcc ++ kernel_list: 4.15 4.14 4.9 4.4 3.16 ++ - compiler: clang ++ kernel_list: 4.15 4.14 4.9 4.4 3.16 ++ ++ - compiler: gcc ++ afxdp: afxdp ++ kernel: 5.3 ++ - compiler: clang ++ afxdp: afxdp ++ kernel: 5.3 ++ ++ - compiler: gcc ++ dpdk: dpdk ++ opts: --enable-shared ++ - compiler: clang ++ dpdk: dpdk ++ opts: --enable-shared ++ ++ - compiler: gcc ++ dpdk_shared: dpdk-shared ++ - compiler: clang ++ dpdk_shared: dpdk-shared ++ ++ - compiler: gcc ++ dpdk_shared: dpdk-shared ++ opts: --enable-shared ++ - compiler: clang ++ dpdk_shared: dpdk-shared ++ opts: --enable-shared ++ ++ - compiler: gcc ++ m32: m32 ++ opts: --disable-ssl ++ ++ - compiler: gcc ++ deb_package: deb ++ ++ steps: ++ - name: checkout ++ uses: actions/checkout@v2 ++ ++ - name: create ci signature file for the dpdk cache key ++ if: matrix.dpdk != '' || matrix.dpdk_shared != '' ++ # This will collect most of DPDK related lines, so hash will be different ++ # if something changed in a way we're building DPDK including DPDK_VER. ++ # This also allows us to use cache from any branch as long as version ++ # and a way we're building DPDK stays the same. ++ run: | ++ grep -irE 'RTE_|DPDK|meson|ninja' -r .ci/ > dpdk-ci-signature ++ cat dpdk-ci-signature ++ ++ - name: cache ++ if: matrix.dpdk != '' || matrix.dpdk_shared != '' ++ uses: actions/cache@v2 ++ env: ++ matrix_key: ${{ matrix.dpdk }}${{ matrix.dpdk_shared }} ++ ci_key: ${{ hashFiles('dpdk-ci-signature') }} ++ with: ++ path: dpdk-dir ++ key: ${{ env.matrix_key }}-${{ env.ci_key }} ++ ++ - name: update APT cache ++ run: sudo apt update ++ - name: install common dependencies ++ if: matrix.deb_package == '' ++ run: sudo apt install -y ${{ env.dependencies }} ++ - name: install dependencies for debian packages ++ if: matrix.deb_package != '' ++ run: sudo apt install -y ${{ env.deb_dependencies }} ++ - name: install libunbound libunwind ++ if: matrix.m32 == '' ++ run: sudo apt install -y libunbound-dev libunwind-dev ++ ++ - name: prepare ++ run: ./.ci/linux-prepare.sh ++ ++ - name: build ++ run: PATH="$PATH:$HOME/bin" ./.ci/linux-build.sh ++ ++ - name: upload deb packages ++ if: matrix.deb_package != '' ++ uses: actions/upload-artifact@v2 ++ with: ++ name: deb-packages ++ path: '/home/runner/work/ovs/*.deb' ++ ++ - name: copy logs on failure ++ if: failure() || cancelled() ++ run: | ++ # upload-artifact@v2 throws exceptions if it tries to upload socket ++ # files and we could have some socket files in testsuite.dir. ++ # Also, upload-artifact@v2 doesn't work well enough with wildcards. ++ # So, we're just archiving everything here to avoid any issues. ++ mkdir logs ++ cp config.log ./logs/ ++ cp -r ./*/_build/sub/tests/testsuite.* ./logs/ || true ++ tar -czvf logs.tgz logs/ ++ ++ - name: upload logs on failure ++ if: failure() || cancelled() ++ uses: actions/upload-artifact@v2 ++ with: ++ name: logs-linux-${{ join(matrix.*, '-') }} ++ path: logs.tgz ++ ++ build-osx: ++ env: ++ CC: clang ++ OPTS: --disable-ssl ++ ++ name: osx clang --disable-ssl ++ runs-on: macos-latest ++ timeout-minutes: 30 ++ ++ strategy: ++ fail-fast: false ++ ++ steps: ++ - name: checkout ++ uses: actions/checkout@v2 ++ - name: install dependencies ++ run: brew install automake libtool ++ - name: prepare ++ run: ./.ci/osx-prepare.sh ++ - name: build ++ run: PATH="$PATH:$HOME/bin" ./.ci/osx-build.sh ++ - name: upload logs on failure ++ if: failure() ++ uses: actions/upload-artifact@v2 ++ with: ++ name: logs-osx-clang---disable-ssl ++ path: config.log diff --git a/AUTHORS.rst b/AUTHORS.rst index fe3935fca2..4c8772f63a 100644 --- a/AUTHORS.rst @@ -203,6 +670,25 @@ index 6702c58a2b..41e1315a4c 100644 Q: Are all the DPDK releases that OVS versions work with maintained? +diff --git a/Documentation/internals/contributing/submitting-patches.rst b/Documentation/internals/contributing/submitting-patches.rst +index 5a314cc60a..f2039595e7 100644 +--- a/Documentation/internals/contributing/submitting-patches.rst ++++ b/Documentation/internals/contributing/submitting-patches.rst +@@ -68,11 +68,9 @@ Testing is also important: + feature. A bug fix patch should preferably add a test that would + fail if the bug recurs. + +-If you are using GitHub, then you may utilize the travis-ci.org CI build system +-by linking your GitHub repository to it. This will run some of the above tests +-automatically when you push changes to your repository. See the "Continuous +-Integration with Travis-CI" in :doc:`/topics/testing` for details on how to set +-it up. ++If you are using GitHub, then you may utilize the GitHub Actions CI build ++system. It will run some of the above tests automatically when you push ++changes to your repository. + + Email Subject + ------------- diff --git a/Documentation/intro/install/dpdk.rst b/Documentation/intro/install/dpdk.rst index dbf88ec43f..86ee19d4c4 100644 --- a/Documentation/intro/install/dpdk.rst @@ -275,6 +761,64 @@ index c6c6fd8bde..4bc5aef59d 100644 $ export DPDK_TARGET=x86_64-native-linuxapp-gcc $ export DPDK_BUILD=$DPDK_DIR/$DPDK_TARGET $ cd $DPDK_DIR +diff --git a/Documentation/topics/testing.rst b/Documentation/topics/testing.rst +index 161e9d442e..fb1cbdf25e 100644 +--- a/Documentation/topics/testing.rst ++++ b/Documentation/topics/testing.rst +@@ -405,45 +405,17 @@ You should invoke scan-view to view analysis results. The last line of output + from ``clang-analyze`` will list the command (containing results directory) + that you should invoke to view the results on a browser. + +-Continuous Integration with Travis CI +-------------------------------------- ++Continuous Integration with GitHub Actions ++------------------------------------------ + +-A .travis.yml file is provided to automatically build Open vSwitch with various +-build configurations and run the testsuite using Travis CI. Builds will be +-performed with gcc, sparse and clang with the -Werror compiler flag included, +-therefore the build will fail if a new warning has been introduced. ++A ``.github/workflows/*.yml`` files provided to automatically build ++Open vSwitch with various build configurations and run the testsuite using ++GitHub Actions. Builds will be performed with gcc, sparse and clang with the ++-Werror compiler flag included, therefore the build will fail if a new warning ++has been introduced. + + The CI build is triggered via git push (regardless of the specific branch) or +-pull request against any Open vSwitch GitHub repository that is linked to +-travis-ci. +- +-Instructions to setup travis-ci for your GitHub repository: +- +-1. Go to https://travis-ci.org/ and sign in using your GitHub ID. +-2. Go to the "Repositories" tab and enable the ovs repository. You may disable +- builds for pushes or pull requests. +-3. In order to avoid forks sending build failures to the upstream mailing list, +- the notification email recipient is encrypted. If you want to receive email +- notification for build failures, replace the encrypted string: +- +- 1. Install the travis-ci CLI (Requires ruby >=2.0): gem install travis +- 2. In your Open vSwitch repository: travis encrypt mylist@mydomain.org +- 3. Add/replace the notifications section in .travis.yml and fill in the +- secure string as returned by travis encrypt:: +- +- notifications: +- email: +- recipients: +- - secure: "....." +- +- .. note:: +- You may remove/omit the notifications section to fall back to default +- notification behaviour which is to send an email directly to the author and +- committer of the failing commit. Note that the email is only sent if the +- author/committer have commit rights for the particular GitHub repository. +- +-4. Pushing a commit to the repository which breaks the build or the +- testsuite will now trigger a email sent to mylist@mydomain.org ++pull request against any Open vSwitch GitHub repository. + + vsperf + ------ diff --git a/Documentation/topics/userspace-tso.rst b/Documentation/topics/userspace-tso.rst index 94eddc0b2f..f7b6b2639a 100644 --- a/Documentation/topics/userspace-tso.rst @@ -312,8 +856,20 @@ index 94eddc0b2f..f7b6b2639a 100644 ~~~~~~~~~~~~~~~~~~ Performance Tuning +diff --git a/Documentation/tutorials/ipsec.rst b/Documentation/tutorials/ipsec.rst +index b4c3235132..d7c56d5fcf 100644 +--- a/Documentation/tutorials/ipsec.rst ++++ b/Documentation/tutorials/ipsec.rst +@@ -298,6 +298,7 @@ For example:: + Otherwise, error message will + be provided + Tunnel Type: gre ++ Local IP: %defaultroute + Remote IP: 2.2.2.2 + SKB mark: None + Local cert: None diff --git a/Makefile.am b/Makefile.am -index b279303d18..27ef9e4b48 100644 +index b279303d18..b3b56cd50e 100644 --- a/Makefile.am +++ b/Makefile.am @@ -46,7 +46,7 @@ AM_CPPFLAGS += -DNDEBUG @@ -325,15 +881,37 @@ index b279303d18..27ef9e4b48 100644 if WIN32 psep=";" +@@ -76,12 +76,12 @@ EXTRA_DIST = \ + MAINTAINERS.rst \ + README.rst \ + NOTICE \ ++ .ci/linux-build.sh \ ++ .ci/linux-prepare.sh \ ++ .ci/osx-build.sh \ ++ .ci/osx-prepare.sh \ + .cirrus.yml \ +- .travis.yml \ +- .travis/linux-build.sh \ +- .travis/linux-prepare.sh \ +- .travis/osx-build.sh \ +- .travis/osx-prepare.sh \ ++ .github/workflows/build-and-test.yml \ + appveyor.yml \ + boot.sh \ + poc/builders/Vagrantfile \ diff --git a/NEWS b/NEWS -index dab94e924d..e0f5fa9717 100644 +index dab94e924d..86d6e0a3d8 100644 --- a/NEWS +++ b/NEWS -@@ -1,3 +1,26 @@ +@@ -1,3 +1,30 @@ +v2.13.2 - xx xxx xxxx +--------------------- + - IPsec: -+ * Fixed support of strongswan 5.7+ in ovs-ipsec-monitor. ++ * Fixed support of strongswan 5.7+ in ovs-monitor-ipsec. ++ * Add option '--no-cleanup' to allow ovs-monitor-ipsec to stop without ++ tearing down IPsec tunnels. ++ * Add option '--no-restart-ike-daemon' to allow ovs-monitor-ipsec to start ++ without restarting ipsec daemon. + - OVSDB: + * New unixctl command 'ovsdb-server/memory-trim-on-compaction on|off'. + If turned on, ovsdb-server will try to reclaim all the unused memory @@ -356,6 +934,41 @@ index dab94e924d..e0f5fa9717 100644 v2.13.0 - 14 Feb 2020 --------------------- - OVN: +@@ -43,6 +70,9 @@ v2.13.0 - 14 Feb 2020 + - 'ovs-appctl dpctl/dump-flows' can now show offloaded=partial for + partially offloaded flows, dp:dpdk for fully offloaded by dpdk, and + type filter supports new filters: "dpdk" and "partially-offloaded". ++ - Add new argument '--offload-stats' for command ++ 'ovs-appctl bridge/dump-flows', ++ so it can display offloaded packets statistics. + + v2.12.0 - 03 Sep 2019 + --------------------- +@@ -117,9 +147,6 @@ v2.12.0 - 03 Sep 2019 + * Add support for conntrack zone-based timeout policy. + - 'ovs-dpctl dump-flows' is no longer suitable for dumping offloaded flows. + 'ovs-appctl dpctl/dump-flows' should be used instead. +- - Add new argument '--offload-stats' for command +- 'ovs-appctl bridge/dump-flows', +- so it can display offloaded packets statistics. + - Add L2 GRE tunnel over IPv6 support. + + v2.11.0 - 19 Feb 2019 +diff --git a/README.rst b/README.rst +index e06ddf2671..8e64d74aee 100644 +--- a/README.rst ++++ b/README.rst +@@ -6,8 +6,8 @@ + Open vSwitch + ============ + +-.. image:: https://travis-ci.org/openvswitch/ovs.png +- :target: https://travis-ci.org/openvswitch/ovs ++.. image:: https://github.com/openvswitch/ovs/workflows/Build%20and%20Test/badge.svg ++ :target: https://github.com/openvswitch/ovs/actions + .. image:: https://ci.appveyor.com/api/projects/status/github/openvswitch/ovs?branch=master&svg=true&retina=true + :target: https://ci.appveyor.com/project/blp/ovs/history + .. image:: https://api.cirrus-ci.com/github/openvswitch/ovs.svg diff --git a/acinclude.m4 b/acinclude.m4 index c1470ccc6b..12fd6c4a51 100644 --- a/acinclude.m4 @@ -717,6 +1330,36 @@ index bc6580d708..b0932186af 100644 } CT_UPDATE_RES; /* Metadata mark for masked write to conntrack mark */ +diff --git a/datapath/conntrack.c b/datapath/conntrack.c +index 838cf63c90..67e0628703 100644 +--- a/datapath/conntrack.c ++++ b/datapath/conntrack.c +@@ -1972,7 +1972,8 @@ static void ovs_ct_limit_exit(struct net *net, struct ovs_net *ovs_net) + struct hlist_head *head = &info->limits[i]; + struct ovs_ct_limit *ct_limit; + +- hlist_for_each_entry_rcu(ct_limit, head, hlist_node) ++ hlist_for_each_entry_rcu(ct_limit, head, hlist_node, ++ lockdep_ovsl_is_held()) + kfree_rcu(ct_limit, rcu); + } + kfree(ovs_net->ct_limit_info->limits); +diff --git a/datapath/datapath.c b/datapath/datapath.c +index 853bfb5af1..3dbdb5b59c 100644 +--- a/datapath/datapath.c ++++ b/datapath/datapath.c +@@ -2437,8 +2437,10 @@ static void __net_exit ovs_exit_net(struct net *dnet) + + ovs_netns_frags6_exit(dnet); + ovs_netns_frags_exit(dnet); +- ovs_ct_exit(dnet); + ovs_lock(); ++ ++ ovs_ct_exit(dnet); ++ + list_for_each_entry_safe(dp, dp_next, &ovs_net->dps, list_node) + __dp_destroy(dp); + diff --git a/datapath/linux/compat/geneve.c b/datapath/linux/compat/geneve.c index c044b14896..bf995aa83a 100644 --- a/datapath/linux/compat/geneve.c @@ -769,6 +1412,41 @@ index 7c346aa31a..a039142e22 100644 #if !defined this_cpu_read #define this_cpu_read(ptr) percpu_read(ptr) #endif +diff --git a/datapath/linux/compat/include/linux/rculist.h b/datapath/linux/compat/include/linux/rculist.h +index 8df8ad8a27..40fd5e1710 100644 +--- a/datapath/linux/compat/include/linux/rculist.h ++++ b/datapath/linux/compat/include/linux/rculist.h +@@ -9,9 +9,28 @@ + #define hlist_pprev_rcu(node) (*((struct hlist_node __rcu **)((node)->pprev))) + #endif + ++/* ++ * Check during list traversal that we are within an RCU reader ++ */ ++ ++#define check_arg_count_one(dummy) ++ ++#ifdef CONFIG_PROVE_RCU_LIST ++#define __list_check_rcu(dummy, cond, extra...) \ ++ ({ \ ++ check_arg_count_one(extra); \ ++ RCU_LOCKDEP_WARN(!cond && !rcu_read_lock_any_held(), \ ++ "RCU-list traversed in non-reader section!"); \ ++ }) ++#else ++#define __list_check_rcu(dummy, cond, extra...) \ ++ ({ check_arg_count_one(extra); }) ++#endif ++ + #undef hlist_for_each_entry_rcu +-#define hlist_for_each_entry_rcu(pos, head, member) \ +- for (pos = hlist_entry_safe (rcu_dereference_raw(hlist_first_rcu(head)),\ ++#define hlist_for_each_entry_rcu(pos, head, member, cond...) \ ++ for (__list_check_rcu(dummy, ## cond, 0), \ ++ pos = hlist_entry_safe(rcu_dereference_raw(hlist_first_rcu(head)),\ + typeof(*(pos)), member); \ + pos; \ + pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(\ diff --git a/datapath/linux/compat/include/linux/skbuff.h b/datapath/linux/compat/include/linux/skbuff.h index 63972891bf..f276898b20 100644 --- a/datapath/linux/compat/include/linux/skbuff.h @@ -38441,7 +39119,7 @@ index 5289a70f6e..cf009f8264 100644 #define OVS_REQ_RDLOCK(...) __attribute__((shared_locks_required(__VA_ARGS__))) #define OVS_ACQ_RDLOCK(...) __attribute__((shared_lock_function(__VA_ARGS__))) diff --git a/ipsec/ovs-monitor-ipsec.in b/ipsec/ovs-monitor-ipsec.in -index 37e3703245..b84608a55d 100755 +index 37e3703245..64111768b3 100755 --- a/ipsec/ovs-monitor-ipsec.in +++ b/ipsec/ovs-monitor-ipsec.in @@ -101,7 +101,7 @@ class XFRM(object): @@ -38503,6 +39181,31 @@ index 37e3703245..b84608a55d 100755 tunnel = monitor.tunnels.get(ifname) for conn in conns: # IPsec "connection" names that we choose in strongswan +@@ -408,19 +416,19 @@ conn prevent_unencrypted_vxlan + """ + + auth_tmpl = {"psk": Template("""\ +- left=%defaultroute ++ left=$local_ip + right=$remote_ip + authby=secret"""), + "pki_remote": Template("""\ +- left=%defaultroute ++ left=$local_ip + right=$remote_ip + leftid=@$local_name + rightid=@$remote_name +- leftcert="$local_name" +- rightcert="$remote_name" ++ leftcert="ovs_certkey_$local_name" ++ rightcert="ovs_cert_$remote_name" + leftrsasigkey=%cert"""), + "pki_ca": Template("""\ +- left=%defaultroute ++ left=$local_ip + right=$remote_ip + leftid=@$local_name + rightid=@$remote_name @@ -536,7 +544,7 @@ conn prevent_unencrypted_vxlan # Delete old connections @@ -38521,7 +39224,83 @@ index 37e3703245..b84608a55d 100755 if line == '': break -@@ -989,7 +997,7 @@ class IPsecMonitor(object): +@@ -617,7 +625,10 @@ conn prevent_unencrypted_vxlan + continue + + conn = m.group(1) +- m = re.match(r"(.*)(-in-\d+|-out-\d+|-\d+)", conn) ++ m = re.match(r"(.*)(-in-\d+|-out-\d+)", conn) ++ if not m: ++ # GRE connections have format - ++ m = re.match(r"(.*)(-\d+)", conn) + if not m: + continue + +@@ -649,7 +660,8 @@ conn prevent_unencrypted_vxlan + proc = subprocess.Popen(['certutil', '-L', '-d', + 'sql:/etc/ipsec.d/'], + stdout=subprocess.PIPE, +- stderr=subprocess.PIPE) ++ stderr=subprocess.PIPE, ++ universal_newlines=True) + lines = proc.stdout.readlines() + + for line in lines: +@@ -678,7 +690,7 @@ conn prevent_unencrypted_vxlan + if proc.returncode: + raise Exception(proc.stderr.read()) + except Exception as e: +- vlog.err("Failed to import ceretificate into NSS.\n" + str(e)) ++ vlog.err("Failed to import certificate into NSS.\n" + str(e)) + + def _nss_delete_cert(self, name): + try: +@@ -690,7 +702,7 @@ conn prevent_unencrypted_vxlan + if proc.returncode: + raise Exception(proc.stderr.read()) + except Exception as e: +- vlog.err("Failed to delete ceretificate from NSS.\n" + str(e)) ++ vlog.err("Failed to delete certificate from NSS.\n" + str(e)) + + def _nss_import_cert_and_key(self, cert, key, name): + try: +@@ -742,6 +754,7 @@ class IPsecTunnel(object): + + unixctl_config_tmpl = Template("""\ + Tunnel Type: $tunnel_type ++ Local IP: $local_ip + Remote IP: $remote_ip + SKB mark: $skb_mark + Local cert: $certificate +@@ -782,6 +795,7 @@ class IPsecTunnel(object): + new_conf = { + "ifname": self.name, + "tunnel_type": row.type, ++ "local_ip": options.get("local_ip", "%defaultroute"), + "remote_ip": options.get("remote_ip"), + "skb_mark": monitor.conf["skb_mark"], + "certificate": monitor.conf["pki"]["certificate"], +@@ -911,7 +925,7 @@ class IPsecTunnel(object): + class IPsecMonitor(object): + """This class monitors and configures IPsec tunnels""" + +- def __init__(self, root_prefix, ike_daemon): ++ def __init__(self, root_prefix, ike_daemon, restart): + self.IPSEC = root_prefix + "/usr/sbin/ipsec" + self.tunnels = {} + +@@ -941,7 +955,9 @@ class IPsecMonitor(object): + not os.access(self.IPSEC, os.X_OK): + vlog.err("IKE daemon is not installed in the system.") + +- self.ike_helper.restart_ike_daemon() ++ if restart: ++ vlog.info("Restarting IKE daemon") ++ self.ike_helper.restart_ike_daemon() + + def is_tunneling_type_supported(self, tunnel_type): + """Returns True if we know how to configure IPsec for these +@@ -989,7 +1005,7 @@ class IPsecMonitor(object): skb_mark = None is_valid = False @@ -38530,7 +39309,7 @@ index 37e3703245..b84608a55d 100755 pki[0] = row.other_config.get("certificate") pki[1] = row.other_config.get("private_key") pki[2] = row.other_config.get("ca_cert") -@@ -1016,7 +1024,7 @@ class IPsecMonitor(object): +@@ -1016,7 +1032,7 @@ class IPsecMonitor(object): table.""" ifaces = set() @@ -38539,7 +39318,7 @@ index 37e3703245..b84608a55d 100755 if not self.is_tunneling_type_supported(row.type): continue if not self.is_ipsec_required(row.options): -@@ -1047,7 +1055,7 @@ class IPsecMonitor(object): +@@ -1047,7 +1063,7 @@ class IPsecMonitor(object): return s = "" conns = self.ike_helper.get_active_conns() @@ -38548,7 +39327,7 @@ index 37e3703245..b84608a55d 100755 s += tunnel.show(policies, securities, conns) unix_conn.reply(s) -@@ -1064,7 +1072,7 @@ class IPsecMonitor(object): +@@ -1064,7 +1080,7 @@ class IPsecMonitor(object): if self.ike_helper.config_global(self): needs_refresh = True @@ -38557,7 +39336,7 @@ index 37e3703245..b84608a55d 100755 if tunnel.last_refreshed_version != tunnel.version: tunnel.last_refreshed_version = tunnel.version needs_refresh = True -@@ -1094,7 +1102,7 @@ class IPsecMonitor(object): +@@ -1094,7 +1110,7 @@ class IPsecMonitor(object): proc.wait() if proc.returncode: raise Exception(proc.stderr.read()) @@ -38566,6 +39345,74 @@ index 37e3703245..b84608a55d 100755 if not m: raise Exception("No CN in the certificate subject.") except Exception as e: +@@ -1136,19 +1152,30 @@ def unixctl_refresh(conn, unused_argv, unused_aux): + conn.reply(None) + + +-def unixctl_exit(conn, unused_argv, unused_aux): ++def unixctl_exit(conn, argv, unused_aux): + global monitor + global exiting ++ ret = None + exiting = True ++ cleanup = True + +- # Make sure persistent global states are cleared +- monitor.update_conf([None, None, None, None], None) +- # Make sure persistent tunnel states are cleared +- for tunnel in monitor.tunnels.keys(): +- monitor.del_tunnel(tunnel) +- monitor.run() ++ for arg in argv: ++ if arg == "--no-cleanup": ++ cleanup = False ++ else: ++ cleanup = False ++ exiting = False ++ ret = str("unrecognized parameter: %s" % arg) ++ ++ if cleanup: ++ # Make sure persistent global states are cleared ++ monitor.update_conf([None, None, None, None], None) ++ # Make sure persistent tunnel states are cleared ++ for tunnel in monitor.tunnels.keys(): ++ monitor.del_tunnel(tunnel) ++ monitor.run() + +- conn.reply(None) ++ conn.reply(ret) + + + def main(): +@@ -1161,6 +1188,8 @@ def main(): + parser.add_argument("--ike-daemon", metavar="IKE-DAEMON", + help="The IKE daemon used for IPsec tunnels" + " (either libreswan or strongswan).") ++ parser.add_argument("--no-restart-ike-daemon", action='store_true', ++ help="Don't restart the IKE daemon on startup.") + + ovs.vlog.add_args(parser) + ovs.daemon.add_args(parser) +@@ -1173,7 +1202,8 @@ def main(): + + root_prefix = args.root_prefix if args.root_prefix else "" + xfrm = XFRM(root_prefix) +- monitor = IPsecMonitor(root_prefix, args.ike_daemon) ++ monitor = IPsecMonitor(root_prefix, args.ike_daemon, ++ not args.no_restart_ike_daemon) + + remote = args.database + schema_helper = ovs.db.idl.SchemaHelper() +@@ -1194,7 +1224,8 @@ def main(): + ovs.unixctl.command_register("tunnels/show", "", 0, 0, + unixctl_show, None) + ovs.unixctl.command_register("refresh", "", 0, 0, unixctl_refresh, None) +- ovs.unixctl.command_register("exit", "", 0, 0, unixctl_exit, None) ++ ovs.unixctl.command_register("exit", "[--no-cleanup]", 0, 1, ++ unixctl_exit, None) + + error, unixctl_server = ovs.unixctl.server.UnixctlServer.create(None) + if error: diff --git a/lib/classifier.c b/lib/classifier.c index 0fad953213..2a1d155dad 100644 --- a/lib/classifier.c @@ -39040,7 +39887,7 @@ index 5b5c96d727..f9c732886f 100644 if (!del_err) { /* Delete from hw success, so old flow was offloaded. diff --git a/lib/jsonrpc.c b/lib/jsonrpc.c -index ed748dbde7..9b297a38ce 100644 +index ed748dbde7..e74771e2bc 100644 --- a/lib/jsonrpc.c +++ b/lib/jsonrpc.c @@ -50,6 +50,10 @@ struct jsonrpc { @@ -39146,7 +39993,25 @@ index ed748dbde7..9b297a38ce 100644 s->stream = NULL; s->seqno++; } else if (error != EAGAIN) { -@@ -1248,3 +1291,18 @@ jsonrpc_session_set_dscp(struct jsonrpc_session *s, uint8_t dscp) +@@ -1110,13 +1153,16 @@ jsonrpc_session_recv(struct jsonrpc_session *s) + + received_bytes = jsonrpc_get_received_bytes(s->rpc); + jsonrpc_recv(s->rpc, &msg); ++ ++ long long int now = time_msec(); ++ reconnect_receive_attempted(s->reconnect, now); + if (received_bytes != jsonrpc_get_received_bytes(s->rpc)) { + /* Data was successfully received. + * + * Previously we only counted receiving a full message as activity, + * but with large messages or a slow connection that policy could + * time out the session mid-message. */ +- reconnect_activity(s->reconnect, time_msec()); ++ reconnect_activity(s->reconnect, now); + } + + if (msg) { +@@ -1248,3 +1294,18 @@ jsonrpc_session_set_dscp(struct jsonrpc_session *s, uint8_t dscp) jsonrpc_session_force_reconnect(s); } } @@ -39866,6 +40731,25 @@ index 22f4cde333..6f509424bc 100644 }; /* A network device (e.g. an Ethernet device). +diff --git a/lib/netdev-vport.c b/lib/netdev-vport.c +index b57d21ff8d..be5ed099fd 100644 +--- a/lib/netdev-vport.c ++++ b/lib/netdev-vport.c +@@ -736,6 +736,14 @@ set_tunnel_config(struct netdev *dev_, const struct smap *args, char **errp) + goto out; + } + } ++ } else if (!strcmp(node->key, "remote_cert") || ++ !strcmp(node->key, "remote_name") || ++ !strcmp(node->key, "psk")) { ++ /* When configuring OVS for IPsec, these keys may be set in the ++ tunnel port's 'options' column. 'ovs-vswitchd' does not directly ++ use them, but they are read by 'ovs-monitor-ipsec'. In order to ++ suppress the "unknown %s argument" warning message below, we ++ handle them here by ignoring them. */ + } else { + ds_put_format(&errors, "%s: unknown %s argument '%s'\n", name, + type, node->key); diff --git a/lib/netdev.c b/lib/netdev.c index f95b19af4d..8c44eee8e9 100644 --- a/lib/netdev.c @@ -39962,10 +40846,37 @@ index 42d3335f0f..97320a4dba 100644 } else { a = attrs[OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_LESS_EQUAL]; diff --git a/lib/odp-util.c b/lib/odp-util.c -index 746d1e97d4..9109c75dc5 100644 +index 746d1e97d4..41cbac0c97 100644 --- a/lib/odp-util.c +++ b/lib/odp-util.c -@@ -5428,13 +5428,16 @@ erspan_to_attr(struct ofpbuf *a, const void *data_) +@@ -1441,14 +1441,20 @@ parse_odp_userspace_action(const char *s, struct ofpbuf *actions) + int n1 = -1; + if (ovs_scan(&s[n], ",tunnel_out_port=%"SCNi32")%n", + &tunnel_out_port, &n1)) { +- odp_put_userspace_action(pid, user_data, user_data_size, +- tunnel_out_port, include_actions, actions); +- res = n + n1; ++ res = odp_put_userspace_action(pid, user_data, user_data_size, ++ tunnel_out_port, include_actions, ++ actions, NULL); ++ if (!res) { ++ res = n + n1; ++ } + goto out; + } else if (s[n] == ')') { +- odp_put_userspace_action(pid, user_data, user_data_size, +- ODPP_NONE, include_actions, actions); +- res = n + 1; ++ res = odp_put_userspace_action(pid, user_data, user_data_size, ++ ODPP_NONE, include_actions, ++ actions, NULL); ++ if (!res) { ++ res = n + 1; ++ } + goto out; + } + } +@@ -5428,13 +5434,16 @@ erspan_to_attr(struct ofpbuf *a, const void *data_) do { \ len = 0; @@ -39989,7 +40900,7 @@ index 746d1e97d4..9109c75dc5 100644 } #define SCAN_FIELD_NESTED__(NAME, TYPE, SCAN_AS, ATTR, FUNC) \ -@@ -6225,7 +6228,9 @@ odp_flow_key_from_flow__(const struct odp_flow_key_parms *parms, +@@ -6225,7 +6234,9 @@ odp_flow_key_from_flow__(const struct odp_flow_key_parms *parms, struct ovs_key_nd_extensions *nd_ext_key; if (data->igmp_group_ip4 != 0 || data->tcp_flags != 0) { @@ -40000,7 +40911,7 @@ index 746d1e97d4..9109c75dc5 100644 OVS_KEY_ATTR_ND_EXTENSIONS, sizeof *nd_ext_key); nd_ext_key->nd_reserved = data->igmp_group_ip4; -@@ -6275,6 +6280,10 @@ odp_key_from_dp_packet(struct ofpbuf *buf, const struct dp_packet *packet) +@@ -6275,6 +6286,10 @@ odp_key_from_dp_packet(struct ofpbuf *buf, const struct dp_packet *packet) nl_msg_put_u32(buf, OVS_KEY_ATTR_PRIORITY, md->skb_priority); @@ -40011,7 +40922,59 @@ index 746d1e97d4..9109c75dc5 100644 if (flow_tnl_dst_is_set(&md->tunnel)) { tun_key_to_attr(buf, &md->tunnel, &md->tunnel, NULL, NULL); } -@@ -7565,6 +7574,28 @@ struct offsetof_sizeof { +@@ -7416,15 +7431,18 @@ odp_key_fitness_to_string(enum odp_key_fitness fitness) + + /* Appends an OVS_ACTION_ATTR_USERSPACE action to 'odp_actions' that specifies + * Netlink PID 'pid'. If 'userdata' is nonnull, adds a userdata attribute +- * whose contents are the 'userdata_size' bytes at 'userdata' and returns the +- * offset within 'odp_actions' of the start of the cookie. (If 'userdata' is +- * null, then the return value is not meaningful.) */ +-size_t ++ * whose contents are the 'userdata_size' bytes at 'userdata' and sets ++ * 'odp_actions_ofs' if nonnull with the offset within 'odp_actions' of the ++ * start of the cookie. (If 'userdata' is null, then the 'odp_actions_ofs' ++ * value is not meaningful.) ++ * ++ * Returns negative error code on failure. */ ++int + odp_put_userspace_action(uint32_t pid, + const void *userdata, size_t userdata_size, + odp_port_t tunnel_out_port, + bool include_actions, +- struct ofpbuf *odp_actions) ++ struct ofpbuf *odp_actions, size_t *odp_actions_ofs) + { + size_t userdata_ofs; + size_t offset; +@@ -7432,6 +7450,9 @@ odp_put_userspace_action(uint32_t pid, + offset = nl_msg_start_nested(odp_actions, OVS_ACTION_ATTR_USERSPACE); + nl_msg_put_u32(odp_actions, OVS_USERSPACE_ATTR_PID, pid); + if (userdata) { ++ if (nl_attr_oversized(userdata_size)) { ++ return -E2BIG; ++ } + userdata_ofs = odp_actions->size + NLA_HDRLEN; + + /* The OVS kernel module before OVS 1.11 and the upstream Linux kernel +@@ -7457,9 +7478,16 @@ odp_put_userspace_action(uint32_t pid, + if (include_actions) { + nl_msg_put_flag(odp_actions, OVS_USERSPACE_ATTR_ACTIONS); + } ++ if (nl_attr_oversized(odp_actions->size - offset - NLA_HDRLEN)) { ++ return -E2BIG; ++ } + nl_msg_end_nested(odp_actions, offset); + +- return userdata_ofs; ++ if (odp_actions_ofs) { ++ *odp_actions_ofs = userdata_ofs; ++ } ++ ++ return 0; + } + + void +@@ -7565,6 +7593,28 @@ struct offsetof_sizeof { int size; }; @@ -40040,7 +41003,7 @@ index 746d1e97d4..9109c75dc5 100644 /* Compares each of the fields in 'key0' and 'key1'. The fields are specified * in 'offsetof_sizeof_arr', which is an array terminated by a 0-size field. * Returns true if all of the fields are equal, false if at least one differs. -@@ -7643,9 +7674,10 @@ commit_set_ether_action(const struct flow *flow, struct flow *base_flow, +@@ -7643,9 +7693,10 @@ commit_set_ether_action(const struct flow *flow, struct flow *base_flow, struct flow_wildcards *wc, bool use_masked) { @@ -40052,7 +41015,7 @@ index 746d1e97d4..9109c75dc5 100644 if (flow->packet_type != htonl(PT_ETH)) { return; } -@@ -7653,11 +7685,13 @@ commit_set_ether_action(const struct flow *flow, struct flow *base_flow, +@@ -7653,11 +7704,13 @@ commit_set_ether_action(const struct flow *flow, struct flow *base_flow, get_ethernet_key(flow, &key); get_ethernet_key(base_flow, &base); get_ethernet_key(&wc->masks, &mask); @@ -40066,7 +41029,7 @@ index 746d1e97d4..9109c75dc5 100644 put_ethernet_key(&mask, &wc->masks); } } -@@ -7781,7 +7815,7 @@ commit_set_ipv4_action(const struct flow *flow, struct flow *base_flow, +@@ -7781,7 +7834,7 @@ commit_set_ipv4_action(const struct flow *flow, struct flow *base_flow, struct ofpbuf *odp_actions, struct flow_wildcards *wc, bool use_masked) { @@ -40075,7 +41038,7 @@ index 746d1e97d4..9109c75dc5 100644 struct offsetof_sizeof ovs_key_ipv4_offsetof_sizeof_arr[] = OVS_KEY_IPV4_OFFSETOF_SIZEOF_ARR; -@@ -7792,6 +7826,7 @@ commit_set_ipv4_action(const struct flow *flow, struct flow *base_flow, +@@ -7792,6 +7845,7 @@ commit_set_ipv4_action(const struct flow *flow, struct flow *base_flow, get_ipv4_key(flow, &key, false); get_ipv4_key(base_flow, &base, false); get_ipv4_key(&wc->masks, &mask, true); @@ -40083,7 +41046,7 @@ index 746d1e97d4..9109c75dc5 100644 mask.ipv4_proto = 0; /* Not writeable. */ mask.ipv4_frag = 0; /* Not writable. */ -@@ -7803,9 +7838,8 @@ commit_set_ipv4_action(const struct flow *flow, struct flow *base_flow, +@@ -7803,9 +7857,8 @@ commit_set_ipv4_action(const struct flow *flow, struct flow *base_flow, if (commit(OVS_KEY_ATTR_IPV4, use_masked, &key, &base, &mask, sizeof key, ovs_key_ipv4_offsetof_sizeof_arr, odp_actions)) { put_ipv4_key(&base, base_flow, false); @@ -40095,7 +41058,7 @@ index 746d1e97d4..9109c75dc5 100644 } } -@@ -7838,7 +7872,7 @@ commit_set_ipv6_action(const struct flow *flow, struct flow *base_flow, +@@ -7838,7 +7891,7 @@ commit_set_ipv6_action(const struct flow *flow, struct flow *base_flow, struct ofpbuf *odp_actions, struct flow_wildcards *wc, bool use_masked) { @@ -40104,7 +41067,7 @@ index 746d1e97d4..9109c75dc5 100644 struct offsetof_sizeof ovs_key_ipv6_offsetof_sizeof_arr[] = OVS_KEY_IPV6_OFFSETOF_SIZEOF_ARR; -@@ -7849,6 +7883,7 @@ commit_set_ipv6_action(const struct flow *flow, struct flow *base_flow, +@@ -7849,6 +7902,7 @@ commit_set_ipv6_action(const struct flow *flow, struct flow *base_flow, get_ipv6_key(flow, &key, false); get_ipv6_key(base_flow, &base, false); get_ipv6_key(&wc->masks, &mask, true); @@ -40112,7 +41075,7 @@ index 746d1e97d4..9109c75dc5 100644 mask.ipv6_proto = 0; /* Not writeable. */ mask.ipv6_frag = 0; /* Not writable. */ mask.ipv6_label &= htonl(IPV6_LABEL_MASK); /* Not writable. */ -@@ -7861,9 +7896,8 @@ commit_set_ipv6_action(const struct flow *flow, struct flow *base_flow, +@@ -7861,9 +7915,8 @@ commit_set_ipv6_action(const struct flow *flow, struct flow *base_flow, if (commit(OVS_KEY_ATTR_IPV6, use_masked, &key, &base, &mask, sizeof key, ovs_key_ipv6_offsetof_sizeof_arr, odp_actions)) { put_ipv6_key(&base, base_flow, false); @@ -40124,7 +41087,7 @@ index 746d1e97d4..9109c75dc5 100644 } } -@@ -7894,17 +7928,19 @@ static enum slow_path_reason +@@ -7894,17 +7947,19 @@ static enum slow_path_reason commit_set_arp_action(const struct flow *flow, struct flow *base_flow, struct ofpbuf *odp_actions, struct flow_wildcards *wc) { @@ -40145,7 +41108,7 @@ index 746d1e97d4..9109c75dc5 100644 put_arp_key(&mask, &wc->masks); return SLOW_ACTION; } -@@ -7931,7 +7967,7 @@ static enum slow_path_reason +@@ -7931,7 +7986,7 @@ static enum slow_path_reason commit_set_icmp_action(const struct flow *flow, struct flow *base_flow, struct ofpbuf *odp_actions, struct flow_wildcards *wc) { @@ -40154,7 +41117,7 @@ index 746d1e97d4..9109c75dc5 100644 struct offsetof_sizeof ovs_key_icmp_offsetof_sizeof_arr[] = OVS_KEY_ICMP_OFFSETOF_SIZEOF_ARR; enum ovs_key_attr attr; -@@ -7947,10 +7983,12 @@ commit_set_icmp_action(const struct flow *flow, struct flow *base_flow, +@@ -7947,10 +8002,12 @@ commit_set_icmp_action(const struct flow *flow, struct flow *base_flow, get_icmp_key(flow, &key); get_icmp_key(base_flow, &base); get_icmp_key(&wc->masks, &mask); @@ -40167,7 +41130,7 @@ index 746d1e97d4..9109c75dc5 100644 put_icmp_key(&mask, &wc->masks); return SLOW_ACTION; } -@@ -7998,17 +8036,19 @@ commit_set_nd_action(const struct flow *flow, struct flow *base_flow, +@@ -7998,17 +8055,19 @@ commit_set_nd_action(const struct flow *flow, struct flow *base_flow, struct ofpbuf *odp_actions, struct flow_wildcards *wc, bool use_masked) { @@ -40188,7 +41151,7 @@ index 746d1e97d4..9109c75dc5 100644 put_nd_key(&mask, &wc->masks); return SLOW_ACTION; } -@@ -8022,18 +8062,20 @@ commit_set_nd_extensions_action(const struct flow *flow, +@@ -8022,18 +8081,20 @@ commit_set_nd_extensions_action(const struct flow *flow, struct ofpbuf *odp_actions, struct flow_wildcards *wc, bool use_masked) { @@ -40210,7 +41173,7 @@ index 746d1e97d4..9109c75dc5 100644 put_nd_extensions_key(&mask, &wc->masks); return SLOW_ACTION; } -@@ -8248,7 +8290,7 @@ commit_set_port_action(const struct flow *flow, struct flow *base_flow, +@@ -8248,7 +8309,7 @@ commit_set_port_action(const struct flow *flow, struct flow *base_flow, bool use_masked) { enum ovs_key_attr key_type; @@ -40219,7 +41182,7 @@ index 746d1e97d4..9109c75dc5 100644 struct offsetof_sizeof ovs_key_tp_offsetof_sizeof_arr[] = OVS_KEY_TCP_OFFSETOF_SIZEOF_ARR; -@@ -8274,10 +8316,12 @@ commit_set_port_action(const struct flow *flow, struct flow *base_flow, +@@ -8274,10 +8335,12 @@ commit_set_port_action(const struct flow *flow, struct flow *base_flow, get_tp_key(flow, &key); get_tp_key(base_flow, &base); get_tp_key(&wc->masks, &mask); @@ -40232,7 +41195,7 @@ index 746d1e97d4..9109c75dc5 100644 put_tp_key(&mask, &wc->masks); } } -@@ -8301,7 +8345,7 @@ commit_set_priority_action(const struct flow *flow, struct flow *base_flow, +@@ -8301,7 +8364,7 @@ commit_set_priority_action(const struct flow *flow, struct flow *base_flow, if (commit(OVS_KEY_ATTR_PRIORITY, use_masked, &key, &base, &mask, sizeof key, ovs_key_prio_offsetof_sizeof_arr, odp_actions)) { base_flow->skb_priority = base; @@ -40241,7 +41204,7 @@ index 746d1e97d4..9109c75dc5 100644 } } -@@ -8325,7 +8369,7 @@ commit_set_pkt_mark_action(const struct flow *flow, struct flow *base_flow, +@@ -8325,7 +8388,7 @@ commit_set_pkt_mark_action(const struct flow *flow, struct flow *base_flow, sizeof key, ovs_key_pkt_mark_offsetof_sizeof_arr, odp_actions)) { base_flow->pkt_mark = base; @@ -40250,6 +41213,28 @@ index 746d1e97d4..9109c75dc5 100644 } } +diff --git a/lib/odp-util.h b/lib/odp-util.h +index 4ecce1aac5..936fadf175 100644 +--- a/lib/odp-util.h ++++ b/lib/odp-util.h +@@ -356,11 +356,12 @@ struct user_action_cookie { + }; + BUILD_ASSERT_DECL(sizeof(struct user_action_cookie) == 48); + +-size_t odp_put_userspace_action(uint32_t pid, +- const void *userdata, size_t userdata_size, +- odp_port_t tunnel_out_port, +- bool include_actions, +- struct ofpbuf *odp_actions); ++int odp_put_userspace_action(uint32_t pid, ++ const void *userdata, size_t userdata_size, ++ odp_port_t tunnel_out_port, ++ bool include_actions, ++ struct ofpbuf *odp_actions, ++ size_t *odp_actions_ofs); + void odp_put_tunnel_action(const struct flow_tnl *tunnel, + struct ofpbuf *odp_actions, + const char *tnl_type); diff --git a/lib/ofp-actions.c b/lib/ofp-actions.c index ddef3b0c87..ef8b2b4527 100644 --- a/lib/ofp-actions.c @@ -40371,9 +41356,18 @@ index 30d1d08eba..00497d940c 100644 struct ovsdb_idl_class { diff --git a/lib/ovsdb-idl.c b/lib/ovsdb-idl.c -index 190143f363..1bc6f574db 100644 +index 190143f363..1ff9b2bb75 100644 --- a/lib/ovsdb-idl.c +++ b/lib/ovsdb-idl.c +@@ -221,7 +221,7 @@ struct ovsdb_idl_db { + struct uuid last_id; + }; + +-static void ovsdb_idl_db_track_clear(struct ovsdb_idl_db *); ++static void ovsdb_idl_db_track_clear(struct ovsdb_idl_db *, bool flush_all); + static void ovsdb_idl_db_add_column(struct ovsdb_idl_db *, + const struct ovsdb_idl_column *); + static void ovsdb_idl_db_omit(struct ovsdb_idl_db *, @@ -240,6 +240,10 @@ static void ovsdb_idl_send_monitor_request(struct ovsdb_idl *, struct ovsdb_idl_db *, enum ovsdb_idl_monitor_method); @@ -40425,14 +41419,17 @@ index 190143f363..1bc6f574db 100644 if (hmap_is_empty(&table->rows)) { continue; } -@@ -634,7 +642,6 @@ ovsdb_idl_db_clear(struct ovsdb_idl_db *db) +@@ -634,9 +642,8 @@ ovsdb_idl_db_clear(struct ovsdb_idl_db *db) } ovsdb_idl_row_destroy_postprocess(db); - db->cond_changed = false; db->cond_seqno = 0; - ovsdb_idl_db_track_clear(db); +- ovsdb_idl_db_track_clear(db); ++ ovsdb_idl_db_track_clear(db, true); + if (changed) { + db->change_seqno++; @@ -692,6 +699,12 @@ ovsdb_idl_send_request(struct ovsdb_idl *idl, struct jsonrpc_msg *request) static void ovsdb_idl_restart_fsm(struct ovsdb_idl *idl) @@ -40457,7 +41454,7 @@ index 190143f363..1bc6f574db 100644 ovsdb_idl_send_cond_change(idl); idl->data.cond_seqno++; break; -@@ -1495,17 +1510,34 @@ ovsdb_idl_condition_equals(const struct ovsdb_idl_condition *a, +@@ -1495,35 +1510,66 @@ ovsdb_idl_condition_equals(const struct ovsdb_idl_condition *a, } static void @@ -40480,9 +41477,9 @@ index 190143f363..1bc6f574db 100644 HMAP_FOR_EACH (clause, hmap_node, &src->clauses) { - ovsdb_idl_condition_add_clause__(dst, clause, clause->hmap_node.hash); + ovsdb_idl_condition_add_clause__(*dst, clause, clause->hmap_node.hash); -+ } -+} -+ + } + } + +static void +ovsdb_idl_condition_move(struct ovsdb_idl_condition **dst, + struct ovsdb_idl_condition **src) @@ -40490,13 +41487,13 @@ index 190143f363..1bc6f574db 100644 + if (*dst) { + ovsdb_idl_condition_destroy(*dst); + free(*dst); - } ++ } + *dst = *src; + *src = NULL; - } - ++} ++ static unsigned int -@@ -1513,17 +1545,33 @@ ovsdb_idl_db_set_condition(struct ovsdb_idl_db *db, + ovsdb_idl_db_set_condition(struct ovsdb_idl_db *db, const struct ovsdb_idl_table_class *tc, const struct ovsdb_idl_condition *condition) { @@ -40507,6 +41504,7 @@ index 190143f363..1bc6f574db 100644 - ovsdb_idl_condition_destroy(&table->condition); - ovsdb_idl_condition_clone(&table->condition, condition); - db->cond_changed = table->cond_changed = true; ++ unsigned int curr_seqno = db->cond_seqno; + + /* Compare the new condition to the last known condition which can be + * either "new" (not sent yet), "requested" or "acked", in this order. @@ -40525,19 +41523,16 @@ index 190143f363..1bc6f574db 100644 + db->cond_changed = true; poll_immediate_wake(); - return seqno + 1; -+ return db->cond_seqno + 1; -+ } else if (table_cond != table->ack_cond) { -+ /* 'condition' was already set but has not been "acked" yet. The IDL -+ * will be up to date when db->cond_seqno gets incremented. */ -+ return db->cond_seqno + 1; } - return seqno; -+ return db->cond_seqno; ++ /* Conditions will be up to date when we receive replies for already ++ * requested and new conditions, if any. */ ++ return curr_seqno + (table->new_cond ? 1 : 0) + (table->req_cond ? 1 : 0); } /* Sets the replication condition for 'tc' in 'idl' to 'condition' and -@@ -1563,9 +1611,8 @@ ovsdb_idl_condition_to_json(const struct ovsdb_idl_condition *cnd) +@@ -1563,9 +1609,8 @@ ovsdb_idl_condition_to_json(const struct ovsdb_idl_condition *cnd) } static struct json * @@ -40548,7 +41543,7 @@ index 190143f363..1bc6f574db 100644 struct json *monitor_cond_change_request = json_object_create(); struct json *cond_json = ovsdb_idl_condition_to_json(cond); -@@ -1585,8 +1632,12 @@ ovsdb_idl_db_compose_cond_change(struct ovsdb_idl_db *db) +@@ -1585,8 +1630,12 @@ ovsdb_idl_db_compose_cond_change(struct ovsdb_idl_db *db) for (size_t i = 0; i < db->class_->n_tables; i++) { struct ovsdb_idl_table *table = &db->tables[i]; @@ -40563,7 +41558,7 @@ index 190143f363..1bc6f574db 100644 if (req) { if (!monitor_cond_change_requests) { monitor_cond_change_requests = json_object_create(); -@@ -1595,7 +1646,11 @@ ovsdb_idl_db_compose_cond_change(struct ovsdb_idl_db *db) +@@ -1595,7 +1644,11 @@ ovsdb_idl_db_compose_cond_change(struct ovsdb_idl_db *db) table->class_->name, json_array_create_1(req)); } @@ -40576,7 +41571,7 @@ index 190143f363..1bc6f574db 100644 } } -@@ -1610,6 +1665,73 @@ ovsdb_idl_db_compose_cond_change(struct ovsdb_idl_db *db) +@@ -1610,6 +1663,73 @@ ovsdb_idl_db_compose_cond_change(struct ovsdb_idl_db *db) return jsonrpc_create_request("monitor_cond_change", params, NULL); } @@ -40650,7 +41645,61 @@ index 190143f363..1bc6f574db 100644 static void ovsdb_idl_send_cond_change(struct ovsdb_idl *idl) { -@@ -1823,6 +1945,11 @@ ovsdb_idl_db_track_clear(struct ovsdb_idl_db *db) +@@ -1753,29 +1873,37 @@ ovsdb_idl_track_is_set(struct ovsdb_idl_table *table) + } + + /* Returns the first tracked row in table with class 'table_class' +- * for the specified 'idl'. Returns NULL if there are no tracked rows */ ++ * for the specified 'idl'. Returns NULL if there are no tracked rows. ++ * Pure orphan rows, i.e. rows that never had any datum, are skipped. */ + const struct ovsdb_idl_row * + ovsdb_idl_track_get_first(const struct ovsdb_idl *idl, + const struct ovsdb_idl_table_class *table_class) + { + struct ovsdb_idl_table *table + = ovsdb_idl_db_table_from_class(&idl->data, table_class); ++ struct ovsdb_idl_row *row; + +- if (!ovs_list_is_empty(&table->track_list)) { +- return CONTAINER_OF(ovs_list_front(&table->track_list), struct ovsdb_idl_row, track_node); ++ LIST_FOR_EACH (row, track_node, &table->track_list) { ++ if (!ovsdb_idl_row_is_orphan(row) || row->tracked_old_datum) { ++ return row; ++ } + } + return NULL; + } + + /* Returns the next tracked row in table after the specified 'row' +- * (in no particular order). Returns NULL if there are no tracked rows */ ++ * (in no particular order). Returns NULL if there are no tracked rows. ++ * Pure orphan rows, i.e. rows that never had any datum, are skipped.*/ + const struct ovsdb_idl_row * + ovsdb_idl_track_get_next(const struct ovsdb_idl_row *row) + { +- if (row->track_node.next != &row->table->track_list) { +- return CONTAINER_OF(row->track_node.next, struct ovsdb_idl_row, track_node); +- } ++ struct ovsdb_idl_table *table = row->table; + ++ LIST_FOR_EACH_CONTINUE (row, track_node, &table->track_list) { ++ if (!ovsdb_idl_row_is_orphan(row) || row->tracked_old_datum) { ++ return row; ++ } ++ } + return NULL; + } + +@@ -1808,7 +1936,7 @@ ovsdb_idl_track_is_updated(const struct ovsdb_idl_row *row, + * loop when it is ready to do ovsdb_idl_run() again. + */ + static void +-ovsdb_idl_db_track_clear(struct ovsdb_idl_db *db) ++ovsdb_idl_db_track_clear(struct ovsdb_idl_db *db, bool flush_all) + { + size_t i; + +@@ -1823,19 +1951,39 @@ ovsdb_idl_db_track_clear(struct ovsdb_idl_db *db) free(row->updated); row->updated = NULL; } @@ -40661,8 +41710,54 @@ index 190143f363..1bc6f574db 100644 + ovs_list_remove(&row->track_node); ovs_list_init(&row->track_node); - if (ovsdb_idl_row_is_orphan(row) && row->tracked_old_datum) { -@@ -2064,13 +2191,15 @@ ovsdb_idl_send_monitor_request(struct ovsdb_idl *idl, struct ovsdb_idl_db *db, +- if (ovsdb_idl_row_is_orphan(row) && row->tracked_old_datum) { ++ if (ovsdb_idl_row_is_orphan(row)) { + ovsdb_idl_row_unparse(row); +- const struct ovsdb_idl_table_class *class = +- row->table->class_; +- for (size_t c = 0; c < class->n_columns; c++) { +- ovsdb_datum_destroy(&row->tracked_old_datum[c], +- &class->columns[c].type); ++ if (row->tracked_old_datum) { ++ const struct ovsdb_idl_table_class *class = ++ row->table->class_; ++ for (size_t c = 0; c < class->n_columns; c++) { ++ ovsdb_datum_destroy(&row->tracked_old_datum[c], ++ &class->columns[c].type); ++ } ++ free(row->tracked_old_datum); ++ row->tracked_old_datum = NULL; ++ } ++ ++ /* Rows that were reused as orphan after being processed ++ * for deletion are still in the table hmap and will be ++ * cleaned up when their src arcs are removed. These rows ++ * will not be reported anymore as "deleted" to IDL ++ * clients. ++ * ++ * The exception is when 'destroy' is explicitly set to ++ * 'true' which usually happens when the complete IDL ++ * contents are being flushed. ++ */ ++ if (flush_all || ovs_list_is_empty(&row->dst_arcs)) { ++ free(row); + } +- free(row->tracked_old_datum); +- row->tracked_old_datum = NULL; +- free(row); + } + } + } +@@ -1850,7 +1998,7 @@ ovsdb_idl_db_track_clear(struct ovsdb_idl_db *db) + void + ovsdb_idl_track_clear(struct ovsdb_idl *idl) + { +- ovsdb_idl_db_track_clear(&idl->data); ++ ovsdb_idl_db_track_clear(&idl->data, false); + } + + static void +@@ -2064,13 +2212,15 @@ ovsdb_idl_send_monitor_request(struct ovsdb_idl *idl, struct ovsdb_idl_db *db, monitor_request = json_object_create(); json_object_put(monitor_request, "columns", columns); @@ -40681,7 +41776,7 @@ index 190143f363..1bc6f574db 100644 } json_object_put(monitor_requests, tc->name, json_array_create_1(monitor_request)); -@@ -2078,8 +2207,6 @@ ovsdb_idl_send_monitor_request(struct ovsdb_idl *idl, struct ovsdb_idl_db *db, +@@ -2078,8 +2228,6 @@ ovsdb_idl_send_monitor_request(struct ovsdb_idl *idl, struct ovsdb_idl_db *db, } free_schema(schema); @@ -40690,7 +41785,7 @@ index 190143f363..1bc6f574db 100644 struct json *params = json_array_create_3( json_string_create(db->class_->database), json_clone(db->monitor_id), -@@ -2504,22 +2631,25 @@ ovsdb_idl_process_update2(struct ovsdb_idl_table *table, +@@ -2504,22 +2652,25 @@ ovsdb_idl_process_update2(struct ovsdb_idl_table *table, return true; } @@ -40729,7 +41824,7 @@ index 190143f363..1bc6f574db 100644 } } } -@@ -2587,7 +2717,14 @@ ovsdb_idl_row_change__(struct ovsdb_idl_row *row, const struct json *row_json, +@@ -2587,7 +2738,14 @@ ovsdb_idl_row_change__(struct ovsdb_idl_row *row, const struct json *row_json, row->change_seqno[change] = row->table->change_seqno[change] = row->table->db->change_seqno + 1; @@ -40744,7 +41839,16 @@ index 190143f363..1bc6f574db 100644 add_tracked_change_for_references(row); if (!row->updated) { row->updated = bitmap_allocate(class->n_columns); -@@ -4663,6 +4800,7 @@ ovsdb_idl_txn_insert(struct ovsdb_idl_txn *txn, +@@ -3021,7 +3179,7 @@ ovsdb_idl_row_clear_old(struct ovsdb_idl_row *row) + { + ovs_assert(row->old_datum == row->new_datum); + if (!ovsdb_idl_row_is_orphan(row)) { +- if (ovsdb_idl_track_is_set(row->table)) { ++ if (ovsdb_idl_track_is_set(row->table) && !row->tracked_old_datum) { + row->tracked_old_datum = row->old_datum; + } else { + const struct ovsdb_idl_table_class *class = row->table->class_; +@@ -4663,6 +4821,7 @@ ovsdb_idl_txn_insert(struct ovsdb_idl_txn *txn, hmap_insert(&row->table->rows, &row->hmap_node, uuid_hash(&row->uuid)); hmap_insert(&txn->txn_rows, &row->txn_node, uuid_hash(&row->uuid)); ovsdb_idl_add_to_indexes(row); @@ -40829,6 +41933,80 @@ index b990ed9d59..0d3290dc37 100644 cursor.vector = impl->vector; cursor.entry_idx = -1; +diff --git a/lib/reconnect.c b/lib/reconnect.c +index c89abab889..a929ddfd2d 100644 +--- a/lib/reconnect.c ++++ b/lib/reconnect.c +@@ -61,6 +61,7 @@ struct reconnect { + long long int last_activity; + long long int last_connected; + long long int last_disconnected; ++ long long int last_receive_attempt; + unsigned int max_tries; + unsigned int backoff_free_tries; + +@@ -109,6 +110,7 @@ reconnect_create(long long int now) + fsm->last_activity = now; + fsm->last_connected = LLONG_MAX; + fsm->last_disconnected = LLONG_MAX; ++ fsm->last_receive_attempt = now; + fsm->max_tries = UINT_MAX; + fsm->creation_time = now; + +@@ -501,6 +503,19 @@ reconnect_activity(struct reconnect *fsm, long long int now) + fsm->last_activity = now; + } + ++/* Tell 'fsm' that some attempt to receive data on the connection was made at ++ * 'now'. The FSM only allows probe interval timer to expire when some attempt ++ * to receive data on the connection was received after the time when it should ++ * have expired. This helps in the case where there's a long delay in the poll ++ * loop and then reconnect_run() executes before the code to try to receive ++ * anything from the remote runs. (To disable this feature, just call ++ * reconnect_receive_attempted(fsm, LLONG_MAX).) */ ++void ++reconnect_receive_attempted(struct reconnect *fsm, long long int now) ++{ ++ fsm->last_receive_attempt = now; ++} ++ + static void + reconnect_transition__(struct reconnect *fsm, long long int now, + enum state state) +@@ -541,13 +556,19 @@ reconnect_deadline__(const struct reconnect *fsm) + case S_ACTIVE: + if (fsm->probe_interval) { + long long int base = MAX(fsm->last_activity, fsm->state_entered); +- return base + fsm->probe_interval; ++ long long int expiration = base + fsm->probe_interval; ++ if (fsm->last_receive_attempt >= expiration) { ++ return expiration; ++ } + } + return LLONG_MAX; + + case S_IDLE: + if (fsm->probe_interval) { +- return fsm->state_entered + fsm->probe_interval; ++ long long int expiration = fsm->state_entered + fsm->probe_interval; ++ if (fsm->last_receive_attempt >= expiration) { ++ return expiration; ++ } + } + return LLONG_MAX; + +diff --git a/lib/reconnect.h b/lib/reconnect.h +index 9f2d469e2d..40cc569c42 100644 +--- a/lib/reconnect.h ++++ b/lib/reconnect.h +@@ -83,6 +83,7 @@ void reconnect_connected(struct reconnect *, long long int now); + void reconnect_connect_failed(struct reconnect *, long long int now, + int error); + void reconnect_activity(struct reconnect *, long long int now); ++void reconnect_receive_attempted(struct reconnect *, long long int now); + + enum reconnect_action { + RECONNECT_CONNECT = 1, diff --git a/lib/sha1.c b/lib/sha1.c index 4f48ef2102..87360d9cd0 100644 --- a/lib/sha1.c @@ -40868,10 +42046,31 @@ index eda265dfc5..a635ff7689 100644 #define SHA1_FMT \ "%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x" \ diff --git a/lib/tc.c b/lib/tc.c -index 12af0192b6..cc8c2d849e 100644 +index 12af0192b6..e772aa515c 100644 --- a/lib/tc.c +++ b/lib/tc.c -@@ -1647,8 +1647,10 @@ nl_parse_single_action(struct nlattr *action, struct tc_flower *flower) +@@ -934,6 +934,7 @@ nl_parse_act_pedit(struct nlattr *options, struct tc_flower *flower) + int flower_off = m->flower_offset; + int sz = m->size; + int mf = m->offset; ++ int ef = ROUND_UP(mf, 4); + + if (m->htype != type) { + continue; +@@ -941,9 +942,10 @@ nl_parse_act_pedit(struct nlattr *options, struct tc_flower *flower) + + /* check overlap between current pedit key, which is always + * 4 bytes (range [off, off + 3]), and a map entry in +- * flower_pedit_map (range [mf, mf + sz - 1]) */ ++ * flower_pedit_map sf = ROUND_DOWN(mf, 4) ++ * (range [sf|mf, (mf + sz - 1)|ef]) */ + if ((keys->off >= mf && keys->off < mf + sz) +- || (keys->off + 3 >= mf && keys->off + 3 < mf + sz)) { ++ || (keys->off + 3 >= mf && keys->off + 3 < ef)) { + int diff = flower_off + (keys->off - mf); + ovs_be32 *dst = (void *) (rewrite_key + diff); + ovs_be32 *dst_m = (void *) (rewrite_mask + diff); +@@ -1647,8 +1649,10 @@ nl_parse_single_action(struct nlattr *action, struct tc_flower *flower) } bs = nl_attr_get_unspec(stats_attrs[TCA_STATS_BASIC], sizeof *bs); @@ -41048,9 +42247,18 @@ index 147ef9c333..97699cb905 100644 memcpy(flow->regs, md->regs, sizeof flow->regs); flow->in_port.ofp_port = md->in_port; diff --git a/ofproto/ofproto-dpif-upcall.c b/ofproto/ofproto-dpif-upcall.c -index 409286ab15..3a290e4918 100644 +index 409286ab15..cb197e9010 100644 --- a/ofproto/ofproto-dpif-upcall.c +++ b/ofproto/ofproto-dpif-upcall.c +@@ -1093,7 +1093,7 @@ compose_slow_path(struct udpif *udpif, struct xlate_out *xout, + } + + odp_put_userspace_action(pid, &cookie, sizeof cookie, +- ODPP_NONE, false, buf); ++ ODPP_NONE, false, buf, NULL); + + if (meter_id != UINT32_MAX) { + nl_msg_end_nested(buf, ac_offset); @@ -1545,7 +1545,8 @@ process_upcall(struct udpif *udpif, struct upcall *upcall, flow_clear_conntrack(&frozen_flow); } @@ -41062,7 +42270,7 @@ index 409286ab15..3a290e4918 100644 ofproto_dpif_send_async_msg(upcall->ofproto, am); diff --git a/ofproto/ofproto-dpif-xlate.c b/ofproto/ofproto-dpif-xlate.c -index 4407f9c97a..dd89cb47c0 100644 +index 4407f9c97a..04a75e12d3 100644 --- a/ofproto/ofproto-dpif-xlate.c +++ b/ofproto/ofproto-dpif-xlate.c @@ -1516,15 +1516,32 @@ xlate_lookup_ofproto_(const struct dpif_backer *backer, @@ -41102,6 +42310,41 @@ index 4407f9c97a..dd89cb47c0 100644 } } +@@ -3076,6 +3093,7 @@ xlate_normal(struct xlate_ctx *ctx) + xlate_report(ctx, OFT_DETAIL, "MLD query, flooding"); + xlate_normal_flood(ctx, in_xbundle, &xvlan); + } ++ return; + } else { + if (is_ip_local_multicast(flow, wc)) { + /* RFC4541: section 2.1.2, item 2: Packets with a dst IP +@@ -3198,12 +3216,11 @@ compose_sample_action(struct xlate_ctx *ctx, + odp_port_t odp_port = ofp_port_to_odp_port( + ctx->xbridge, ctx->xin->flow.in_port.ofp_port); + uint32_t pid = dpif_port_get_pid(ctx->xbridge->dpif, odp_port); +- size_t cookie_offset = odp_put_userspace_action(pid, cookie, +- sizeof *cookie, +- tunnel_out_port, +- include_actions, +- ctx->odp_actions); +- ++ size_t cookie_offset; ++ int res = odp_put_userspace_action(pid, cookie, sizeof *cookie, ++ tunnel_out_port, include_actions, ++ ctx->odp_actions, &cookie_offset); ++ ovs_assert(res == 0); + if (is_sample) { + nl_msg_end_nested(ctx->odp_actions, actions_offset); + nl_msg_end_nested(ctx->odp_actions, sample_offset); +@@ -4796,7 +4813,7 @@ put_controller_user_action(struct xlate_ctx *ctx, + ctx->xin->flow.in_port.ofp_port); + uint32_t pid = dpif_port_get_pid(ctx->xbridge->dpif, odp_port); + odp_put_userspace_action(pid, &cookie, sizeof cookie, ODPP_NONE, +- false, ctx->odp_actions); ++ false, ctx->odp_actions, NULL); + } + + static void @@ -7519,7 +7536,8 @@ xlate_actions(struct xlate_in *xin, struct xlate_out *xout) /* Restore pipeline metadata. May change flow's in_port and other @@ -41143,6 +42386,19 @@ index 08830d8371..8594afad4a 100644 connmgr_send_flow_removed(connmgr, &fr); ovs_mutex_unlock(&ofproto_mutex); } +diff --git a/ovsdb/automake.mk b/ovsdb/automake.mk +index b895f42925..d60f3f4ec8 100644 +--- a/ovsdb/automake.mk ++++ b/ovsdb/automake.mk +@@ -106,7 +106,7 @@ CLEANFILES += $(OVSIDL_BUILT) + # However, current versions of Automake seem to output all variable + # assignments before any targets, so it doesn't seem to be a problem, + # at least for now. +-$(OVSIDL_BUILT): ovsdb/ovsdb-idlc.in ++$(OVSIDL_BUILT): ovsdb/ovsdb-idlc.in python/ovs/dirs.py + + # ovsdb-doc + EXTRA_DIST += ovsdb/ovsdb-doc diff --git a/ovsdb/dot2pic b/ovsdb/dot2pic index de67261ac6..2f858e19d5 100755 --- a/ovsdb/dot2pic @@ -41409,6 +42665,18 @@ index b6957d7300..54ed7c4283 100644 /* "ovsdb-server/reconnect": makes ovsdb-server drop all of its JSON-RPC * connections and reconnect. */ static void +diff --git a/ovsdb/ovsdb-tool.c b/ovsdb/ovsdb-tool.c +index 91662cab84..938a99d914 100644 +--- a/ovsdb/ovsdb-tool.c ++++ b/ovsdb/ovsdb-tool.c +@@ -720,6 +720,7 @@ print_db_changes(struct shash *tables, struct smap *names, + ds_init(&s); + ovsdb_datum_to_string(&datum, type, &s); + value_string = ds_steal_cstr(&s); ++ ovsdb_datum_destroy(&datum, type); + } else { + ovsdb_error_destroy(error); + } diff --git a/ovsdb/ovsdb.c b/ovsdb/ovsdb.c index cfc96b32f8..9042658fa8 100644 --- a/ovsdb/ovsdb.c @@ -41951,6 +43219,49 @@ index 7e62e90ae3..0372302af4 100644 ovsdb_error_destroy(error); } else { /* Permanent error. Transition to "completed" state to report +diff --git a/python/automake.mk b/python/automake.mk +index 2f08c77014..767512f175 100644 +--- a/python/automake.mk ++++ b/python/automake.mk +@@ -74,12 +74,12 @@ ovs-install-data-local: + $(MKDIR_P) python/ovs + sed \ + -e '/^##/d' \ +- -e 's,[@]pkgdatadir[@],$(pkgdatadir),g' \ +- -e 's,[@]RUNDIR[@],$(RUNDIR),g' \ +- -e 's,[@]LOGDIR[@],$(LOGDIR),g' \ +- -e 's,[@]bindir[@],$(bindir),g' \ +- -e 's,[@]sysconfdir[@],$(sysconfdir),g' \ +- -e 's,[@]DBDIR[@],$(DBDIR),g' \ ++ -e 's,[@]pkgdatadir[@],$(pkgdatadir),g' \ ++ -e 's,[@]RUNDIR[@],$(RUNDIR),g' \ ++ -e 's,[@]LOGDIR[@],$(LOGDIR),g' \ ++ -e 's,[@]bindir[@],$(bindir),g' \ ++ -e 's,[@]sysconfdir[@],$(sysconfdir),g' \ ++ -e 's,[@]DBDIR[@],$(DBDIR),g' \ + < $(srcdir)/python/ovs/dirs.py.template \ + > python/ovs/dirs.py.tmp + $(MKDIR_P) $(DESTDIR)$(pkgdatadir)/python/ovs +@@ -107,12 +107,13 @@ ALL_LOCAL += $(srcdir)/python/ovs/dirs.py + $(srcdir)/python/ovs/dirs.py: python/ovs/dirs.py.template + $(AM_V_GEN)sed \ + -e '/^##/d' \ +- -e 's,[@]pkgdatadir[@],/usr/local/share/openvswitch,g' \ +- -e 's,[@]RUNDIR[@],/var/run,g' \ +- -e 's,[@]LOGDIR[@],/usr/local/var/log,g' \ +- -e 's,[@]bindir[@],/usr/local/bin,g' \ +- -e 's,[@]sysconfdir[@],/usr/local/etc,g' \ +- -e 's,[@]DBDIR[@],/usr/local/etc/openvswitch,g' \ ++ -e 's,[@]pkgdatadir[@],$(pkgdatadir),g' \ ++ -e 's,[@]RUNDIR[@],$(RUNDIR),g' \ ++ -e 's,[@]LOGDIR[@],$(LOGDIR),g' \ ++ -e 's,[@]bindir[@],$(bindir),g' \ ++ -e 's,[@]sysconfdir[@],$(sysconfdir),g' \ ++ -e 's,[@]DBDIR[@],$(sysconfdir)/openvswitch,g' \ + < $? > $@.tmp && \ + mv $@.tmp $@ + EXTRA_DIST += python/ovs/dirs.py.template ++CLEANFILES += python/ovs/dirs.py diff --git a/python/build/soutil.py b/python/build/soutil.py index b8027af863..a658823028 100755 --- a/python/build/soutil.py @@ -41964,6 +43275,66 @@ index b8027af863..a658823028 100755 # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. +diff --git a/python/ovs/.gitignore b/python/ovs/.gitignore +index 9852786466..8bbcd824f4 100644 +--- a/python/ovs/.gitignore ++++ b/python/ovs/.gitignore +@@ -1 +1,2 @@ + version.py ++dirs.py +diff --git a/python/ovs/db/idl.py b/python/ovs/db/idl.py +index 020291d486..5850ac7abf 100644 +--- a/python/ovs/db/idl.py ++++ b/python/ovs/db/idl.py +@@ -1567,10 +1567,9 @@ class Transaction(object): + for col, val in row._mutations['_inserts'].items(): + column = row._table.columns[col] + if column.type.is_map(): +- opdat = ["map"] + datum = data.Datum.from_python(column.type, val, + _row_to_uuid) +- opdat.append(datum.as_list()) ++ opdat = self._substitute_uuids(datum.to_json()) + else: + opdat = ["set"] + inner_opdat = [] +diff --git a/python/ovs/dirs.py b/python/ovs/dirs.py +deleted file mode 100644 +index c67aecbb46..0000000000 +--- a/python/ovs/dirs.py ++++ /dev/null +@@ -1,31 +0,0 @@ +-# Licensed under the Apache License, Version 2.0 (the "License"); +-# you may not use this file except in compliance with the License. +-# You may obtain a copy of the License at: +-# +-# http://www.apache.org/licenses/LICENSE-2.0 +-# +-# Unless required by applicable law or agreed to in writing, software +-# distributed under the License is distributed on an "AS IS" BASIS, +-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-# See the License for the specific language governing permissions and +-# limitations under the License. +- +-# The @variables@ in this file are replaced by default directories for +-# use in python/ovs/dirs.py in the source directory and replaced by the +-# configured directories for use in the installed python/ovs/dirs.py. +-# +-import os +- +-# Note that the use of """ is to aid in dealing with paths with quotes in them. +-PKGDATADIR = os.environ.get("OVS_PKGDATADIR", """/usr/local/share/openvswitch""") +-RUNDIR = os.environ.get("OVS_RUNDIR", """/var/run""") +-LOGDIR = os.environ.get("OVS_LOGDIR", """/usr/local/var/log""") +-BINDIR = os.environ.get("OVS_BINDIR", """/usr/local/bin""") +- +-DBDIR = os.environ.get("OVS_DBDIR") +-if not DBDIR: +- sysconfdir = os.environ.get("OVS_SYSCONFDIR") +- if sysconfdir: +- DBDIR = "%s/openvswitch" % sysconfdir +- else: +- DBDIR = """/usr/local/etc/openvswitch""" diff --git a/python/ovs/stream.py b/python/ovs/stream.py index e9bb0c8548..f5a520862c 100644 --- a/python/ovs/stream.py @@ -42011,6 +43382,26 @@ index e9bb0c8548..f5a520862c 100644 @staticmethod def needs_probes(): return True +diff --git a/python/setup.py b/python/setup.py +index b7252800c1..d385d83722 100644 +--- a/python/setup.py ++++ b/python/setup.py +@@ -30,6 +30,15 @@ except IOError: + file=sys.stderr) + sys.exit(-1) + ++try: ++ # Try to open generated ovs/dirs.py. However, in this case we ++ # don't need to exec() ++ open("ovs/dirs.py") ++except IOError: ++ print("Ensure dirs.py is created by running make python/ovs/dirs.py", ++ file=sys.stderr) ++ sys.exit(-1) ++ + ext_errors = (CCompilerError, DistutilsExecError, DistutilsPlatformError) + if sys.platform == 'win32': + ext_errors += (IOError, ValueError) diff --git a/rhel/openvswitch-fedora.spec.in b/rhel/openvswitch-fedora.spec.in index 7bc8c34b80..f6ec347c15 100644 --- a/rhel/openvswitch-fedora.spec.in @@ -42252,7 +43643,7 @@ index 0aeb4e788f..1651e02d29 100644 # Check that ip address and udp port were correctly modified in output packets. diff --git a/tests/idltest.ovsschema b/tests/idltest.ovsschema -index bee79fc50f..d08f7e7ead 100644 +index bee79fc50f..3ddb612b0c 100644 --- a/tests/idltest.ovsschema +++ b/tests/idltest.ovsschema @@ -54,6 +54,15 @@ @@ -42271,6 +43662,105 @@ index bee79fc50f..d08f7e7ead 100644 "simple": { "columns": { "b": { +@@ -171,6 +180,36 @@ + }, + "isRoot" : false + }, ++ "simple5": { ++ "columns" : { ++ "name": {"type": "string"}, ++ "irefmap": { ++ "type": { ++ "key": {"type": "integer"}, ++ "value": {"type": "uuid", ++ "refTable": "simple3"}, ++ "min": 0, ++ "max": "unlimited" ++ } ++ } ++ }, ++ "isRoot": true ++ }, ++ "simple6": { ++ "columns" : { ++ "name": {"type": "string"}, ++ "weak_ref": { ++ "type": { ++ "key": {"type": "uuid", ++ "refTable": "simple", ++ "refType": "weak"}, ++ "min": 0, ++ "max": "unlimited" ++ } ++ } ++ }, ++ "isRoot": true ++ }, + "singleton" : { + "columns" : { + "name" : { +diff --git a/tests/library.at b/tests/library.at +index ac4ea4abf2..1702b7556b 100644 +--- a/tests/library.at ++++ b/tests/library.at +@@ -53,7 +53,8 @@ AT_CHECK([ovstest test-packets]) + AT_CLEANUP + + AT_SETUP([SHA-1]) +-AT_CHECK([ovstest test-sha1], [0], [......... ++AT_KEYWORDS([sha1]) ++AT_CHECK([ovstest test-sha1], [0], [.......... + ]) + AT_CLEANUP + +diff --git a/tests/odp.at b/tests/odp.at +index 3ab9ad62dd..516527f9cc 100644 +--- a/tests/odp.at ++++ b/tests/odp.at +@@ -397,6 +397,43 @@ odp_actions_from_string: error + ]) + AT_CLEANUP + ++AT_SETUP([OVS datapath actions parsing and formatting - userdata overflow]) ++dnl Userdata should fit in a single netlink message, i.e. should be less than ++dnl UINT16_MAX - NLA_HDRLEN = 65535 - 4 = 65531 bytes. OVS should not accept ++dnl larger userdata. OTOH, userdata is part of a nested netlink message, that ++dnl should not be oversized too. 'pid' takes NLA_HDRLEN + 4 = 8 bytes. ++dnl Plus NLA_HDRLEN for the nested header. 'actions' flag takes NLA_HDRLEN = 4 ++dnl and 'tunnel_out_port' takes NLA_HDRLEN + 4 = 8 bytes. ++dnl So, for the variant with 'actions' maximum length of userdata should be: ++dnl UINT16_MAX - NLA_HDRLEN - (NLA_HDRLEN + 4) - NLA_HDRLEN - NLA_HDRLEN ++dnl total max nested header pid actions userdata ++dnl Result: 65515 bytes for the actual userdata. ++dnl For the case with 'tunnel_out_port': 65511 ++dnl Size of userdata will be rounded up to be multiple of 4, so highest ++dnl acceptable sizes are 65512 and 65508. ++ ++dnl String with length 65512 * 2 = 131024 is valid, while 131026 is not. ++data_valid=$( printf '%*s' 131024 | tr ' ' "a") ++data_invalid=$(printf '%*s' 131026 | tr ' ' "a") ++ ++echo "userspace(pid=1234567,userdata(${data_valid}),actions)" > actions.txt ++echo "userspace(pid=1234567,userdata(${data_invalid}),actions)" >> actions.txt ++ ++dnl String with length 65508 * 2 = 131016 is valid, while 131018 is not. ++data_valid=$( printf '%*s' 131016 | tr ' ' "a") ++data_invalid=$(printf '%*s' 131018 | tr ' ' "a") ++ ++echo "userspace(pid=1234567,userdata(${data_valid}),tunnel_out_port=10)" >> actions.txt ++echo "userspace(pid=1234567,userdata(${data_invalid}),tunnel_out_port=10)" >> actions.txt ++ ++AT_CHECK_UNQUOTED([ovstest test-odp parse-actions < actions.txt], [0], [dnl ++`cat actions.txt | head -1` ++odp_actions_from_string: error ++`cat actions.txt | head -3 | tail -1` ++odp_actions_from_string: error ++]) ++AT_CLEANUP ++ + AT_SETUP([OVS datapath keys parsing and formatting - 33 nested encap ]) + AT_DATA([odp-in.txt], [dnl + encap(encap(encap(encap(encap(encap(encap(encap(encap(encap(encap(encap(encap(encap(encap(encap(encap(encap(encap(encap(encap(encap(encap(encap(encap(encap(encap(encap(encap(encap(encap(encap(encap())))))))))))))))))))))))))))))))) diff --git a/tests/ofp-actions.at b/tests/ofp-actions.at index 4893280a99..7cb09498e6 100644 --- a/tests/ofp-actions.at @@ -42475,7 +43965,7 @@ index 55c7a6e179..c8babe3612 100644 AT_CHECK([RUN_OVS_VSCTL([list-dp-cap system])], [0], [recirc=true ]) diff --git a/tests/ovsdb-cluster.at b/tests/ovsdb-cluster.at -index 3a0bd4579e..e0758e954c 100644 +index 3a0bd4579e..92aa427093 100644 --- a/tests/ovsdb-cluster.at +++ b/tests/ovsdb-cluster.at @@ -179,6 +179,41 @@ AT_KEYWORDS([ovsdb server negative unix cluster disconnect]) @@ -42671,8 +44161,17 @@ index 3a0bd4579e..e0758e954c 100644 AT_BANNER([OVSDB - cluster tests]) +@@ -529,7 +701,7 @@ ovsdb|WARN|schema: changed 30 columns in 'Open_vSwitch' database from ephemeral + # Use file instead of var because code inside "while" runs in a subshell. + echo 0 > phase + i=0 +- (while :; do echo; sleep 0.1; done) | while read REPLY; do ++ (while :; do echo || exit 0; sleep 0.1; done) | while read REPLY; do + printf "t=%2d s:" $i + done=0 + for j in $(seq 0 $(expr $n1 - 1)); do diff --git a/tests/ovsdb-idl.at b/tests/ovsdb-idl.at -index cc38d69c10..c12896c587 100644 +index cc38d69c10..8a28dfe4ca 100644 --- a/tests/ovsdb-idl.at +++ b/tests/ovsdb-idl.at @@ -12,25 +12,6 @@ ovsdb_start_idltest () { @@ -42737,15 +44236,18 @@ index cc38d69c10..c12896c587 100644 # OVSDB_CHECK_IDL_C(TITLE, [PRE-IDL-TXN], TRANSACTIONS, OUTPUT, [KEYWORDS], # [FILTER]) # -@@ -954,6 +964,7 @@ AT_CHECK([sort stdout | uuidfilt], [0], +@@ -954,7 +964,10 @@ AT_CHECK([sort stdout | uuidfilt], [0], # Check that ovsdb-idl figured out that table link2 and column l2 are missing. AT_CHECK([grep ovsdb_idl stderr | sort], [0], [dnl +test-ovsdb|ovsdb_idl|idltest database lacks indexed table (database needs upgrade?) test-ovsdb|ovsdb_idl|idltest database lacks link2 table (database needs upgrade?) ++test-ovsdb|ovsdb_idl|idltest database lacks simple5 table (database needs upgrade?) ++test-ovsdb|ovsdb_idl|idltest database lacks simple6 table (database needs upgrade?) test-ovsdb|ovsdb_idl|idltest database lacks singleton table (database needs upgrade?) test-ovsdb|ovsdb_idl|link1 table in idltest database lacks l2 column (database needs upgrade?) -@@ -1150,6 +1161,7 @@ OVSDB_CHECK_IDL_TRACK([track, simple idl, initially populated], + ]) +@@ -1150,6 +1163,7 @@ OVSDB_CHECK_IDL_TRACK([track, simple idl, initially populated], "where": [], "row": {"b": true}}]']], [[000: i=1 r=2 b=true s=mystring u=<0> ia=[1 2 3] ra=[-0.5] ba=[true] sa=[abc def] ua=[<1> <2>] uuid=<3> @@ -42753,7 +44255,119 @@ index cc38d69c10..c12896c587 100644 000: updated columns: b ba i ia r ra s sa u ua 001: {"error":null,"result":[{"count":2}]} 002: i=0 r=0 b=true s= u=<4> ia=[] ra=[] ba=[] sa=[] ua=[] uuid=<5> -@@ -1212,6 +1224,7 @@ OVSDB_CHECK_IDL_TRACK([track, simple idl, initially empty, various ops], +@@ -1158,6 +1172,111 @@ OVSDB_CHECK_IDL_TRACK([track, simple idl, initially populated], + 003: done + ]]) + ++dnl This test creates database with weak references and checks that orphan ++dnl rows created for weak references are not available for iteration via ++dnl list of tracked changes. ++OVSDB_CHECK_IDL_TRACK([track, simple idl, initially populated, orphan weak references], ++ [['["idltest", ++ {"op": "insert", ++ "table": "simple", ++ "row": {"s": "row0_s"}, ++ "uuid-name": "weak_row0"}, ++ {"op": "insert", ++ "table": "simple", ++ "row": {"s": "row1_s"}, ++ "uuid-name": "weak_row1"}, ++ {"op": "insert", ++ "table": "simple", ++ "row": {"s": "row2_s"}, ++ "uuid-name": "weak_row2"}, ++ {"op": "insert", ++ "table": "simple6", ++ "row": {"name": "first_row", ++ "weak_ref": ["set", ++ [["named-uuid", "weak_row0"], ++ ["named-uuid", "weak_row1"], ++ ["named-uuid", "weak_row2"]] ++ ]}}]']], ++ [['condition simple []' \ ++ 'condition simple [["s","==","row1_s"]]' \ ++ '["idltest", ++ {"op": "update", ++ "table": "simple6", ++ "where": [], ++ "row": {"name": "new_name"}}]' \ ++ '["idltest", ++ {"op": "delete", ++ "table": "simple6", ++ "where": []}]']], ++ [[000: change conditions ++001: inserted row: uuid=<0> ++001: name=first_row weak_ref=[] uuid=<0> ++001: updated columns: name weak_ref ++002: change conditions ++003: i=0 r=0 b=false s=row1_s u=<1> ia=[] ra=[] ba=[] sa=[] ua=[] uuid=<2> ++003: inserted row: uuid=<2> ++003: name=first_row weak_ref=[<2>] uuid=<0> ++003: updated columns: s ++004: {"error":null,"result":[{"count":1}]} ++005: name=new_name weak_ref=[<2>] uuid=<0> ++005: updated columns: name ++006: {"error":null,"result":[{"count":1}]} ++007: i=0 r=0 b=false s=row1_s u=<1> ia=[] ra=[] ba=[] sa=[] ua=[] uuid=<2> ++008: done ++]]) ++ ++dnl This test creates database with weak references and checks that the ++dnl content of orphaned rows created for weak references after monitor ++dnl condition change are not leaked when the row is reinserted and deleted. ++OVSDB_CHECK_IDL_TRACK([track, simple idl, initially populated, orphan rows, conditional], ++ [['["idltest", ++ {"op": "insert", ++ "table": "simple", ++ "row": {"s": "row0_s"}, ++ "uuid-name": "weak_row0"}, ++ {"op": "insert", ++ "table": "simple", ++ "row": {"s": "row1_s"}, ++ "uuid-name": "weak_row1"}, ++ {"op": "insert", ++ "table": "simple6", ++ "row": {"name": "first_row", ++ "weak_ref": ["set", ++ [["named-uuid", "weak_row0"]] ++ ]}}]']], ++ [['condition simple []' \ ++ 'condition simple [["s","==","row0_s"]]' \ ++ 'condition simple [["s","==","row1_s"]]' \ ++ 'condition simple [["s","==","row0_s"]]' \ ++ '["idltest", ++ {"op": "delete", ++ "table": "simple6", ++ "where": []}]']], ++ [[000: change conditions ++001: inserted row: uuid=<0> ++001: name=first_row weak_ref=[] uuid=<0> ++001: updated columns: name weak_ref ++002: change conditions ++003: i=0 r=0 b=false s=row0_s u=<1> ia=[] ra=[] ba=[] sa=[] ua=[] uuid=<2> ++003: inserted row: uuid=<2> ++003: name=first_row weak_ref=[<2>] uuid=<0> ++003: updated columns: s ++004: change conditions ++005: i=0 r=0 b=false s=row1_s u=<1> ia=[] ra=[] ba=[] sa=[] ua=[] uuid=<3> ++005: inserted row: uuid=<3> ++005: updated columns: s ++006: change conditions ++007: deleted row: uuid=<3> ++007: i=0 r=0 b=false s=row0_s u=<1> ia=[] ra=[] ba=[] sa=[] ua=[] uuid=<2> ++007: i=0 r=0 b=false s=row1_s u=<1> ia=[] ra=[] ba=[] sa=[] ua=[] uuid=<3> ++007: inserted row: uuid=<2> ++007: name=first_row weak_ref=[<2>] uuid=<0> ++007: updated columns: s ++008: {"error":null,"result":[{"count":1}]} ++009: i=0 r=0 b=false s=row0_s u=<1> ia=[] ra=[] ba=[] sa=[] ua=[] uuid=<2> ++010: done ++]]) ++ + OVSDB_CHECK_IDL_TRACK([track, simple idl, initially empty, various ops], + [], + [['["idltest", +@@ -1212,6 +1331,7 @@ OVSDB_CHECK_IDL_TRACK([track, simple idl, initially empty, various ops], [[000: empty 001: {"error":null,"result":[{"uuid":["uuid","<0>"]},{"uuid":["uuid","<1>"]}]} 002: i=1 r=2 b=true s=mystring u=<2> ia=[1 2 3] ra=[-0.5] ba=[true] sa=[abc def] ua=[<3> <4>] uuid=<0> @@ -42761,7 +44375,7 @@ index cc38d69c10..c12896c587 100644 002: updated columns: b ba i ia r ra s sa u ua 003: {"error":null,"result":[{"count":2}]} 004: i=0 r=0 b=true s= u=<5> ia=[] ra=[] ba=[] sa=[] ua=[] uuid=<1> -@@ -1223,6 +1236,7 @@ OVSDB_CHECK_IDL_TRACK([track, simple idl, initially empty, various ops], +@@ -1223,6 +1343,7 @@ OVSDB_CHECK_IDL_TRACK([track, simple idl, initially empty, various ops], 006: updated columns: r 007: {"error":null,"result":[{"uuid":["uuid","<6>"]}]} 008: i=-1 r=125 b=false s= u=<5> ia=[1] ra=[1.5] ba=[false] sa=[] ua=[] uuid=<6> @@ -42769,16 +44383,36 @@ index cc38d69c10..c12896c587 100644 008: updated columns: ba i ia r ra 009: {"error":null,"result":[{"count":2}]} 010: i=-1 r=125 b=false s=newstring u=<5> ia=[1] ra=[1.5] ba=[false] sa=[] ua=[] uuid=<6> -@@ -1230,7 +1244,7 @@ OVSDB_CHECK_IDL_TRACK([track, simple idl, initially empty, various ops], +@@ -1230,7 +1351,8 @@ OVSDB_CHECK_IDL_TRACK([track, simple idl, initially empty, various ops], 010: updated columns: s 010: updated columns: s 011: {"error":null,"result":[{"count":1}]} -012: ##deleted## uuid=<1> +012: deleted row: uuid=<1> ++012: i=0 r=123.5 b=true s=newstring u=<5> ia=[] ra=[] ba=[] sa=[] ua=[] uuid=<1> 013: reconnect 014: i=-1 r=125 b=false s=newstring u=<5> ia=[1] ra=[1.5] ba=[false] sa=[] ua=[] uuid=<6> 014: i=1 r=123.5 b=true s=mystring u=<2> ia=[1 2 3] ra=[-0.5] ba=[true] sa=[abc def] ua=[<3> <4>] uuid=<0> -@@ -1764,33 +1778,25 @@ OVSDB_CHECK_IDL_COMPOUND_INDEX_WITH_REF([set, simple3 idl-compound-index-with-re +@@ -1288,6 +1410,18 @@ OVSDB_CHECK_IDL_PY([partial-map idl], + 009: done + ]]) + ++OVSDB_CHECK_IDL_PY([partial-map update set refmap idl], ++[['["idltest", {"op":"insert", "table":"simple3", "row":{"name":"myString1"}}, ++ {"op":"insert", "table":"simple5", "row":{"name":"myString2"}}]']], ++['partialmapmutateirefmap'], ++[[000: name=myString1 uset=[] ++000: name=myString2 irefmap=[] ++001: commit, status=success ++002: name=myString1 uset=[] ++002: name=myString2 irefmap=[(1 <0>)] ++003: done ++]]) ++ + m4_define([OVSDB_CHECK_IDL_PARTIAL_UPDATE_SET_COLUMN], + [AT_SETUP([$1 - C]) + AT_KEYWORDS([ovsdb server idl partial update set column positive $5]) +@@ -1764,33 +1898,25 @@ OVSDB_CHECK_IDL_COMPOUND_INDEX_WITH_REF([set, simple3 idl-compound-index-with-re ]]) m4_define([CHECK_STREAM_OPEN_BLOCK], @@ -42826,7 +44460,7 @@ index cc38d69c10..c12896c587 100644 # same as OVSDB_CHECK_IDL but uses Python IDL implementation with tcp # with multiple remotes to assert the idl connects to the leader of the Raft cluster -@@ -1798,7 +1804,7 @@ m4_define([OVSDB_CHECK_IDL_LEADER_ONLY_PY], +@@ -1798,7 +1924,7 @@ m4_define([OVSDB_CHECK_IDL_LEADER_ONLY_PY], [AT_SETUP([$1 - Python3 (leader only)]) AT_KEYWORDS([ovsdb server idl Python leader_only with tcp socket]) m4_define([LPBK],[127.0.0.1]) @@ -42835,7 +44469,7 @@ index cc38d69c10..c12896c587 100644 PARSE_LISTENING_PORT([s2.log], [TCP_PORT_1]) PARSE_LISTENING_PORT([s3.log], [TCP_PORT_2]) PARSE_LISTENING_PORT([s1.log], [TCP_PORT_3]) -@@ -1814,3 +1820,59 @@ m4_define([OVSDB_CHECK_IDL_LEADER_ONLY_PY], +@@ -1814,3 +1940,59 @@ m4_define([OVSDB_CHECK_IDL_LEADER_ONLY_PY], OVSDB_CHECK_IDL_LEADER_ONLY_PY([Check Python IDL connects to leader], 3, ['remote']) OVSDB_CHECK_IDL_LEADER_ONLY_PY([Check Python IDL reconnects to leader], 3, ['remote' '+remotestop' 'remote']) @@ -43074,33 +44708,84 @@ index d7854a1df3..32a77392c6 100755 # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/test-ovsdb.c b/tests/test-ovsdb.c -index b1a4be36bb..aade40f3fb 100644 +index b1a4be36bb..31513c537f 100644 --- a/tests/test-ovsdb.c +++ b/tests/test-ovsdb.c -@@ -2030,7 +2030,7 @@ print_idl(struct ovsdb_idl *idl, int step) +@@ -1904,6 +1904,26 @@ print_idl_row_updated_link2(const struct idltest_link2 *l2, int step) + } + } + ++static void ++print_idl_row_updated_simple6(const struct idltest_simple6 *s6, int step) ++{ ++ size_t i; ++ bool updated = false; ++ ++ for (i = 0; i < IDLTEST_SIMPLE6_N_COLUMNS; i++) { ++ if (idltest_simple6_is_updated(s6, i)) { ++ if (!updated) { ++ printf("%03d: updated columns:", step); ++ updated = true; ++ } ++ printf(" %s", idltest_simple6_columns[i].name); ++ } ++ } ++ if (updated) { ++ printf("\n"); ++ } ++} ++ + static void + print_idl_row_updated_singleton(const struct idltest_singleton *sng, int step) + { +@@ -1991,6 +2011,22 @@ print_idl_row_link2(const struct idltest_link2 *l2, int step) + print_idl_row_updated_link2(l2, step); + } + ++static void ++print_idl_row_simple6(const struct idltest_simple6 *s6, int step) ++{ ++ int i; ++ ++ printf("%03d: name=%s ", step, s6->name); ++ printf("weak_ref=["); ++ for (i = 0; i < s6->n_weak_ref; i++) { ++ printf("%s"UUID_FMT, i ? " " : "", ++ UUID_ARGS(&s6->weak_ref[i]->header_.uuid)); ++ } ++ ++ printf("] uuid="UUID_FMT"\n", UUID_ARGS(&s6->header_.uuid)); ++ print_idl_row_updated_simple6(s6, step); ++} ++ + static void + print_idl_row_singleton(const struct idltest_singleton *sng, int step) + { +@@ -2030,37 +2066,64 @@ print_idl(struct ovsdb_idl *idl, int step) } static void -print_idl_track(struct ovsdb_idl *idl, int step, unsigned int seqno) +print_idl_track(struct ovsdb_idl *idl, int step) { ++ const struct idltest_simple6 *s6; const struct idltest_simple *s; const struct idltest_link1 *l1; -@@ -2038,26 +2038,42 @@ print_idl_track(struct ovsdb_idl *idl, int step, unsigned int seqno) + const struct idltest_link2 *l2; int n = 0; IDLTEST_SIMPLE_FOR_EACH_TRACKED (s, idl) { - if (idltest_simple_row_get_seqno(s, OVSDB_IDL_CHANGE_DELETE) >= seqno) { - printf("%03d: ##deleted## uuid="UUID_FMT"\n", step, UUID_ARGS(&s->header_.uuid)); +- } else { +- print_idl_row_simple(s, step); ++ print_idl_row_simple(s, step); + if (idltest_simple_is_deleted(s)) { + printf("%03d: deleted row: uuid="UUID_FMT"\n", step, + UUID_ARGS(&s->header_.uuid)); - } else { - print_idl_row_simple(s, step); -+ if (idltest_simple_is_new(s)) { -+ printf("%03d: inserted row: uuid="UUID_FMT"\n", step, -+ UUID_ARGS(&s->header_.uuid)); -+ } ++ } else if (idltest_simple_is_new(s)) { ++ printf("%03d: inserted row: uuid="UUID_FMT"\n", step, ++ UUID_ARGS(&s->header_.uuid)); } n++; } @@ -43132,10 +44817,34 @@ index b1a4be36bb..aade40f3fb 100644 + UUID_ARGS(&l2->header_.uuid)); + } + ++ } ++ n++; ++ } ++ IDLTEST_SIMPLE6_FOR_EACH_TRACKED (s6, idl) { ++ print_idl_row_simple6(s6, step); ++ if (idltest_simple6_is_deleted(s6)) { ++ printf("%03d: deleted row: uuid="UUID_FMT"\n", step, ++ UUID_ARGS(&s6->header_.uuid)); ++ } else if (idltest_simple6_is_new(s6)) { ++ printf("%03d: inserted row: uuid="UUID_FMT"\n", step, ++ UUID_ARGS(&s6->header_.uuid)); } n++; } -@@ -2391,6 +2407,10 @@ update_conditions(struct ovsdb_idl *idl, char *commands) ++ + if (!n) { + printf("%03d: empty\n", step); + } +@@ -2282,6 +2345,8 @@ find_table_class(const char *name) + return &idltest_table_link1; + } else if (!strcmp(name, "link2")) { + return &idltest_table_link2; ++ } else if (!strcmp(name, "simple6")) { ++ return &idltest_table_simple6; + } + return NULL; + } +@@ -2391,6 +2456,10 @@ update_conditions(struct ovsdb_idl *idl, char *commands) if (seqno == next_seqno ) { ovs_fatal(0, "condition unchanged"); } @@ -43146,7 +44855,7 @@ index b1a4be36bb..aade40f3fb 100644 ovsdb_idl_condition_destroy(&cond); json_destroy(json); } -@@ -2465,7 +2485,7 @@ do_idl(struct ovs_cmdl_context *ctx) +@@ -2465,7 +2534,7 @@ do_idl(struct ovs_cmdl_context *ctx) /* Print update. */ if (track) { @@ -43155,6 +44864,135 @@ index b1a4be36bb..aade40f3fb 100644 ovsdb_idl_track_clear(idl); } else { print_idl(idl, step++); +diff --git a/tests/test-ovsdb.py b/tests/test-ovsdb.py +index 1b94b79a07..a196802743 100644 +--- a/tests/test-ovsdb.py ++++ b/tests/test-ovsdb.py +@@ -28,6 +28,7 @@ import ovs.util + import ovs.vlog + from ovs.db import data + from ovs.db import error ++from ovs.db.idl import _row_to_uuid as row_to_uuid + from ovs.fatal_signal import signal_alarm + + vlog = ovs.vlog.Vlog("test-ovsdb") +@@ -159,7 +160,8 @@ def get_simple_printable_row_string(row, columns): + is ovs.db.data.Atom): + value = getattr(row, column) + if isinstance(value, dict): +- value = sorted(value.items()) ++ value = sorted((row_to_uuid(k), row_to_uuid(v)) ++ for k, v in value.items()) + s += "%s=%s " % (column, value) + s = s.strip() + s = re.sub('""|,|u?\'', "", s) +@@ -212,6 +214,14 @@ def print_idl(idl, step): + print(s) + n += 1 + ++ if "simple5" in idl.tables: ++ simple5 = idl.tables["simple5"].rows ++ for row in simple5.values(): ++ s = "%03d: " % step ++ s += get_simple_printable_row_string(row, ["name", "irefmap"]) ++ print(s) ++ n += 1 ++ + if "link1" in idl.tables: + l1 = idl.tables["link1"].rows + for row in l1.values(): +@@ -303,6 +313,11 @@ def idltest_find_simple3(idl, i): + return next(idl.index_equal("simple3", "simple3_by_name", i), None) + + ++def idltest_find(idl, table, col, match): ++ return next((r for r in idl.tables[table].rows.values() if ++ getattr(r, col) == match), None) ++ ++ + def idl_set(idl, commands, step): + txn = ovs.db.idl.Transaction(idl) + increment = False +@@ -524,6 +539,12 @@ def idl_set(idl, commands, step): + setattr(new_row3, 'name', 'String3') + new_row3.addvalue('uset', new_row41.uuid) + assert len(getattr(new_row3, 'uset', [])) == 1 ++ elif name == 'partialmapmutateirefmap': ++ row3 = idltest_find_simple3(idl, "myString1") ++ row5 = idltest_find(idl, "simple5", "name", "myString2") ++ row5.setkey('irefmap', 1, row3.uuid) ++ maplen = len(row5.irefmap) ++ assert maplen == 1, "expected 1, got %d" % maplen + else: + sys.stderr.write("unknown command %s\n" % name) + sys.exit(1) +diff --git a/tests/test-reconnect.c b/tests/test-reconnect.c +index 5a14e7fe58..bf0463e25c 100644 +--- a/tests/test-reconnect.c ++++ b/tests/test-reconnect.c +@@ -48,6 +48,7 @@ test_reconnect_main(int argc OVS_UNUSED, char *argv[] OVS_UNUSED) + + now = 1000; + reconnect = reconnect_create(now); ++ reconnect_receive_attempted(reconnect, LLONG_MAX); + reconnect_set_name(reconnect, "remote"); + reconnect_get_stats(reconnect, now, &prev); + printf("### t=%d ###\n", now); +diff --git a/tests/test-sha1.c b/tests/test-sha1.c +index b7279db6aa..cc80888a7d 100644 +--- a/tests/test-sha1.c ++++ b/tests/test-sha1.c +@@ -137,6 +137,42 @@ test_big_vector(void) + free(vec.data); + } + ++static void ++test_huge_vector(void) ++{ ++ enum { SIZE = 1000000000 }; ++ struct test_vector vec = { ++ NULL, SIZE, ++ /* Computed by the sha1sum utility for a file with 10^9 symbols 'a'. */ ++ { 0xD0, 0xF3, 0xE4, 0xF2, 0xF3, 0x1C, 0x66, 0x5A, 0xBB, 0xD8, ++ 0xF5, 0x18, 0xE8, 0x48, 0xD5, 0xCB, 0x80, 0xCA, 0x78, 0xF7 } ++ }; ++ int chunk = random_range(SIZE / 10000); ++ uint8_t md[SHA1_DIGEST_SIZE]; ++ struct sha1_ctx sha1; ++ size_t i, sz; ++ ++ /* It's not user-friendly to allocate 1GB of memory for a unit test, ++ * so we're allocating only a small chunk and re-using it. */ ++ vec.data = xmalloc(chunk); ++ for (i = 0; i < chunk; i++) { ++ vec.data[i] = 'a'; ++ } ++ ++ sha1_init(&sha1); ++ for (sz = 0; sz < SIZE; sz += chunk) { ++ int n = sz + chunk < SIZE ? chunk : SIZE - sz; ++ ++ sha1_update(&sha1, vec.data, n); ++ } ++ sha1_final(&sha1, md); ++ ovs_assert(!memcmp(md, vec.output, SHA1_DIGEST_SIZE)); ++ ++ free(vec.data); ++ putchar('.'); ++ fflush(stdout); ++} ++ + static void + test_shar1_main(int argc OVS_UNUSED, char *argv[] OVS_UNUSED) + { +@@ -147,6 +183,7 @@ test_shar1_main(int argc OVS_UNUSED, char *argv[] OVS_UNUSED) + } + + test_big_vector(); ++ test_huge_vector(); + + putchar('\n'); + } diff --git a/tests/tunnel.at b/tests/tunnel.at index ce000a25e6..7a6b7f42c6 100644 --- a/tests/tunnel.at @@ -43408,6 +45246,62 @@ index e55bfc2ed5..987d211069 100755 self.mtime = time.time() +diff --git a/utilities/ovs-ctl.in b/utilities/ovs-ctl.in +index 8c5cd70327..d71c34e691 100644 +--- a/utilities/ovs-ctl.in ++++ b/utilities/ovs-ctl.in +@@ -43,7 +43,8 @@ set_hostname () { + else + hn="$(uname -n)" + fi +- ovs_vsctl set Open_vSwitch . external-ids:hostname="$hn" ++ # Set the hostname if it wasn't set before ++ ovs_vsctl add Open_vSwitch . external-ids hostname="$hn" + } + + set_system_ids () { +@@ -230,9 +231,14 @@ start_forwarding () { + } + + start_ovs_ipsec () { ++ if test X$RESTART_IKE_DAEMON = Xno; then ++ no_restart="--no-restart-ike-daemon" ++ fi ++ + ${datadir}/scripts/ovs-monitor-ipsec \ + --pidfile=${rundir}/ovs-monitor-ipsec.pid \ + --ike-daemon=$IKE_DAEMON \ ++ $no_restart \ + --log-file --detach --monitor unix:${rundir}/db.sock || return 1 + return 0 + } +@@ -254,8 +260,7 @@ stop_forwarding () { + } + + stop_ovs_ipsec () { +- ${bindir}/ovs-appctl -t ovs-monitor-ipsec exit || return 1 +- return 0 ++ stop_daemon ovs-monitor-ipsec + } + + ## --------------- ## +@@ -341,6 +346,7 @@ set_defaults () { + SPORT= + + IKE_DAEMON= ++ RESTART_IKE_DAEMON=yes + + type_file=$etcdir/system-type.conf + version_file=$etcdir/system-version.conf +@@ -424,6 +430,8 @@ Options for "enable-protocol": + Option for "start-ovs-ipsec": + --ike-daemon=IKE_DAEMON + the IKE daemon for ipsec tunnels (either libreswan or strongswan) ++ --no-restart-ike-daemon ++ do not restart the IKE daemon on startup + + Other options: + -h, --help display this help message diff --git a/utilities/ovs-dev.py b/utilities/ovs-dev.py index 248d22ab9a..c45788acd5 100755 --- a/utilities/ovs-dev.py diff --git a/SPECS/openvswitch2.13.spec b/SPECS/openvswitch2.13.spec index 22556ed..889d6a2 100644 --- a/SPECS/openvswitch2.13.spec +++ b/SPECS/openvswitch2.13.spec @@ -41,7 +41,7 @@ # option to build with libcap-ng, needed for running OVS as regular user %bcond_without libcapng # option to build with ipsec support -%bcond_with ipsec +%bcond_without ipsec # Build python2 (that provides python) and python3 subpackages on Fedora # Build only python3 (that provides python) subpackage on RHEL8 @@ -59,7 +59,7 @@ Summary: Open vSwitch Group: System Environment/Daemons daemon/database/utilities URL: http://www.openvswitch.org/ Version: 2.13.0 -Release: 71%{?commit0:.%{date}git%{shortcommit0}}%{?commit1:dpdk%{shortcommit1}}%{?dist} +Release: 79%{?commit0:.%{date}git%{shortcommit0}}%{?commit1:dpdk%{shortcommit1}}%{?dist} # Nearly all of openvswitch is ASL 2.0. The bugtool is LGPLv2+, and the # lib/sflow*.[ch] files are SISSL @@ -453,7 +453,7 @@ export CPPFLAGS="-I ../include -I ../build-shared/include" export LDFLAGS="%{__global_ldflags} -L $RPM_BUILD_ROOT%{_libdir}" %py3_build %py3_install -[ -f "$RPM_BUILD_ROOT/%{python3_sitearch}/ovs/_json.cpython-%{python3_version_nodots}$(python3-config --abiflags)-%{_arch}-%{_target_os}%{?_gnu}.so" ] +[ -f "$RPM_BUILD_ROOT/%{python3_sitearch}/ovs/_json$(python3-config --extension-suffix)" ] ) popd @@ -703,6 +703,38 @@ exit 0 %endif %changelog +* Wed Jan 06 2021 Open vSwitch CI - 2.13.0-79 +- Merging upstream branch-2.13 + [86c9f9af773d8564a7d26e4e80fba389617fab17] + +* Wed Dec 23 2020 Open vSwitch CI - 2.13.0-78 +- Merging upstream branch-2.13 + [2cfb47639f7b1e375f9802ab0c8a03265eeb6b24] + +* Fri Dec 04 2020 Open vSwitch CI - 2.13.0-77 +- Merging upstream branch-2.13 + [350e017b667ea581716c2c2bc6157beeb1e7fdca] + +* Fri Dec 04 2020 Open vSwitch CI - 2.13.0-76 +- Merging upstream branch-2.13 + [ece49fb665132c11fe815ae13e2bf7ecd36d9c9d] + +* Thu Dec 03 2020 Open vSwitch CI - 2.13.0-75 +- Merging upstream branch-2.13 + [3e10785905d14d986f5721ede1c2235d1912ffba] + +* Wed Dec 02 2020 Open vSwitch CI - 2.13.0-74 +- Merging upstream branch-2.13 + [773fc1e75ba2b56c434caa43f9f78fda02d82f23] + +* Thu Nov 19 2020 Timothy Redaelli - 2.13.0-73 +- Fix building OVS on ppc64le and armv7hl with Python 3.9 + [a6928f88b26288fe7f8716f1e326b09d2fe4423b] + +* Tue Nov 17 2020 Timothy Redaelli - 2.13.0-72 +- redhat: Enable ipsec support (#1782141) + [2c7306b51f559b492c2aed5baba63c41d06cf296] + * Mon Nov 16 2020 Open vSwitch CI - 2.13.0-71 - Merging upstream branch-2.13 [2aae1815250d0236b6ee7f074f864bf4d2af5537]