From cb18693131b9f871751399978a0683635d5420a8 Mon Sep 17 00:00:00 2001 From: Anita Zhang Date: Feb 07 2023 23:21:47 +0000 Subject: patch elake ea014c0..3ff031d for 71.1 hs+fb release - Patch 71.1 with diff between ea014c0..3ff031d from elake/ndctl fork - Sync changes from rhel/ndctl.spec (autogenerated by the GitHub Makefile) The elake patch file was created with `git diff` rather than `git format-patch` because format-patch doesn't handle the merges from the repo well. --- diff --git a/SOURCES/elake-diff-ea014c0-3ff031d.patch b/SOURCES/elake-diff-ea014c0-3ff031d.patch new file mode 100644 index 0000000..31563f0 --- /dev/null +++ b/SOURCES/elake-diff-ea014c0-3ff031d.patch @@ -0,0 +1,29337 @@ +diff --git a/.clang-format b/.clang-format +new file mode 100644 +index 0000000..d2e77d0 +--- /dev/null ++++ b/.clang-format +@@ -0,0 +1,162 @@ ++# SPDX-License-Identifier: GPL-2.0 ++# ++# clang-format configuration file. Intended for clang-format >= 4. ++# Copied from Linux's .clang-format ++# ++# For more information, see: ++# ++# https://clang.llvm.org/docs/ClangFormat.html ++# https://clang.llvm.org/docs/ClangFormatStyleOptions.html ++# ++--- ++AccessModifierOffset: -4 ++AlignAfterOpenBracket: Align ++AlignConsecutiveAssignments: false ++AlignConsecutiveDeclarations: false ++#AlignEscapedNewlines: Left # Unknown to clang-format-4.0 ++AlignOperands: true ++AlignTrailingComments: false ++AllowAllParametersOfDeclarationOnNextLine: false ++AllowShortBlocksOnASingleLine: false ++AllowShortCaseLabelsOnASingleLine: false ++AllowShortFunctionsOnASingleLine: None ++AllowShortIfStatementsOnASingleLine: false ++AllowShortLoopsOnASingleLine: false ++AlwaysBreakAfterDefinitionReturnType: None ++AlwaysBreakAfterReturnType: None ++AlwaysBreakBeforeMultilineStrings: false ++AlwaysBreakTemplateDeclarations: false ++BinPackArguments: true ++BinPackParameters: true ++BraceWrapping: ++ AfterClass: false ++ AfterControlStatement: false ++ AfterEnum: false ++ AfterFunction: true ++ AfterNamespace: true ++ AfterObjCDeclaration: false ++ AfterStruct: false ++ AfterUnion: false ++ #AfterExternBlock: false # Unknown to clang-format-5.0 ++ BeforeCatch: false ++ BeforeElse: false ++ IndentBraces: false ++ #SplitEmptyFunction: true # Unknown to clang-format-4.0 ++ #SplitEmptyRecord: true # Unknown to clang-format-4.0 ++ #SplitEmptyNamespace: true # Unknown to clang-format-4.0 ++BreakBeforeBinaryOperators: None ++BreakBeforeBraces: Custom ++#BreakBeforeInheritanceComma: false # Unknown to clang-format-4.0 ++BreakBeforeTernaryOperators: false ++BreakConstructorInitializersBeforeComma: false ++#BreakConstructorInitializers: BeforeComma # Unknown to clang-format-4.0 ++BreakAfterJavaFieldAnnotations: false ++BreakStringLiterals: false ++ColumnLimit: 80 ++CommentPragmas: '^ IWYU pragma:' ++#CompactNamespaces: false # Unknown to clang-format-4.0 ++ConstructorInitializerAllOnOneLineOrOnePerLine: false ++ConstructorInitializerIndentWidth: 8 ++ContinuationIndentWidth: 8 ++Cpp11BracedListStyle: false ++DerivePointerAlignment: false ++DisableFormat: false ++ExperimentalAutoDetectBinPacking: false ++#FixNamespaceComments: false # Unknown to clang-format-4.0 ++ ++# Taken from: ++# while read -r sym; do ++# printf " - '%s'\n" "$sym"; ++# done < \ ++# <(cscope -dL6 "foreach|for_each" \ ++# | awk '{ print $4 $5 }' | grep -E 'foreach|for_each' \ ++# | sed -e 's/#define//' \ ++# -e 's/*//' \ ++# -e 's/://' \ ++# -e 's/\(.*for_each.*\)(.*/\1/' \ ++# -e 's/\(.*foreach.*\)(.*/\1/' \ ++# | sort -u) ++ForEachMacros: ++ - 'cxl_memdev_foreach' ++ - 'daxctl_dev_foreach' ++ - 'daxctl_mapping_foreach' ++ - 'daxctl_region_foreach' ++ - 'kmod_list_foreach' ++ - 'kmod_list_foreach_reverse' ++ - 'list_for_each' ++ - 'list_for_each_off' ++ - 'list_for_each_rev' ++ - 'list_for_each_safe' ++ - 'list_for_each_safe_off' ++ - 'ndctl_btt_foreach' ++ - 'ndctl_btt_foreach_safe' ++ - 'ndctl_bus_foreach' ++ - 'ndctl_dax_foreach' ++ - 'ndctl_dax_foreach_safe' ++ - 'ndctl_dimm_foreach' ++ - 'ndctl_dimm_foreach_in_interleave_set' ++ - 'ndctl_dimm_foreach_in_region' ++ - 'ndctl_interleave_set_foreach' ++ - 'ndctl_mapping_foreach' ++ - 'ndctl_namespace_badblock_foreach' ++ - 'ndctl_namespace_bb_foreach' ++ - 'ndctl_namespace_foreach' ++ - 'ndctl_namespace_foreach_safe' ++ - 'ndctl_pfn_foreach' ++ - 'ndctl_pfn_foreach_safe' ++ - 'ndctl_region_badblock_foreach' ++ - 'ndctl_region_foreach' ++ - 'udev_list_entry_foreach' ++ ++#IncludeBlocks: Preserve # Unknown to clang-format-5.0 ++IncludeCategories: ++ - Regex: '.*' ++ Priority: 1 ++IncludeIsMainRegex: '(Test)?$' ++IndentCaseLabels: false ++#IndentPPDirectives: None # Unknown to clang-format-5.0 ++IndentWidth: 8 ++IndentWrappedFunctionNames: false ++JavaScriptQuotes: Leave ++JavaScriptWrapImports: true ++KeepEmptyLinesAtTheStartOfBlocks: false ++MacroBlockBegin: '' ++MacroBlockEnd: '' ++MaxEmptyLinesToKeep: 1 ++NamespaceIndentation: None ++#ObjCBinPackProtocolList: Auto # Unknown to clang-format-5.0 ++ObjCBlockIndentWidth: 8 ++ObjCSpaceAfterProperty: true ++ObjCSpaceBeforeProtocolList: true ++ ++# Taken from git's rules ++#PenaltyBreakAssignment: 10 # Unknown to clang-format-4.0 ++PenaltyBreakBeforeFirstCallParameter: 30 ++PenaltyBreakComment: 10 ++PenaltyBreakFirstLessLess: 0 ++PenaltyBreakString: 10 ++PenaltyExcessCharacter: 100 ++PenaltyReturnTypeOnItsOwnLine: 60 ++ ++PointerAlignment: Right ++ReflowComments: false ++SortIncludes: false ++#SortUsingDeclarations: false # Unknown to clang-format-4.0 ++SpaceAfterCStyleCast: false ++SpaceAfterTemplateKeyword: true ++SpaceBeforeAssignmentOperators: true ++#SpaceBeforeCtorInitializerColon: true # Unknown to clang-format-5.0 ++#SpaceBeforeInheritanceColon: true # Unknown to clang-format-5.0 ++SpaceBeforeParens: ControlStatements ++#SpaceBeforeRangeBasedForLoopColon: true # Unknown to clang-format-5.0 ++SpaceInEmptyParentheses: false ++SpacesBeforeTrailingComments: 1 ++SpacesInAngles: false ++SpacesInContainerLiterals: false ++SpacesInCStyleCastParentheses: false ++SpacesInParentheses: false ++SpacesInSquareBrackets: false ++Standard: Cpp03 ++TabWidth: 8 ++UseTab: Always ++... +diff --git a/.gitignore b/.gitignore +index 3ef9ff7..1e2d7a5 100644 +--- a/.gitignore ++++ b/.gitignore +@@ -1,4 +1,5 @@ + *.o ++*.lo + *.xml + .deps/ + .libs/ +@@ -13,15 +14,20 @@ Makefile.in + /libtool + /stamp-h1 + *.1 ++*.3 + Documentation/daxctl/asciidoc.conf + Documentation/ndctl/asciidoc.conf ++Documentation/cxl/asciidoc.conf ++Documentation/cxl/lib/asciidoc.conf + Documentation/daxctl/asciidoctor-extensions.rb + Documentation/ndctl/asciidoctor-extensions.rb ++Documentation/cxl/asciidoctor-extensions.rb ++Documentation/cxl/lib/asciidoctor-extensions.rb ++Documentation/ndctl/attrs.adoc + .dirstamp + daxctl/config.h + daxctl/daxctl + daxctl/lib/libdaxctl.la +-daxctl/lib/libdaxctl.lo + daxctl/lib/libdaxctl.pc + *.a + ndctl/config.h +@@ -29,8 +35,6 @@ ndctl/lib/libndctl.pc + ndctl/ndctl + rhel/ + sles/ndctl.spec +-util/log.lo +-util/sysfs.lo + version.m4 + *.swp + cscope.files +@@ -59,3 +63,9 @@ test/fio.job + test/local-write-0-verify.state + test/ack-shutdown-count-set + test/list-smart-dimm ++cligen/gen/builtin.h ++cligen/gen/cxl.c ++cligen/gen/libcxl.c ++cligen/gen/libcxl.h ++cligen/gen/libcxl.sym ++cligen/gen/memdev.c +diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md +index 4c29d31..4f4865d 100644 +--- a/CONTRIBUTING.md ++++ b/CONTRIBUTING.md +@@ -6,13 +6,14 @@ The following is a set of guidelines that we adhere to, and request that + contributors follow. + + 1. The libnvdimm (kernel subsystem) and ndctl developers primarily use +- the [linux-nvdimm](https://lists.01.org/postorius/lists/linux-nvdimm.lists.01.org/) ++ the [nvdimm](https://subspace.kernel.org/lists.linux.dev.html) + mailing list for everything. It is recommended to send patches to +- **```linux-nvdimm@lists.01.org```** ++ **```nvdimm@lists.linux.dev```** ++ An archive is available on [lore](https://lore.kernel.org/nvdimm/) + + 1. Github [issues](https://github.com/pmem/ndctl/issues) are an acceptable + way to report a problem, but if you just have a question, +- [email](mailto:linux-nvdimm@lists.01.org) the above list. ++ [email](mailto:nvdimm@lists.linux.dev) the above list. + + 1. We follow the Linux Kernel [Coding Style Guide][cs] as applicable. + +diff --git a/COPYING b/COPYING +index 0ec3b27..712b3dd 100644 +--- a/COPYING ++++ b/COPYING +@@ -23,3 +23,6 @@ CC0-1.0 and MIT licenses according with: + LICENSES/other/MIT + + All contributions to the ndctl project are subject to this COPYING file. ++ ++SPD Decoding part included in libcxl.c in NDCTL project is copied from hardinfo. ++Source: https://github.com/lpereira/hardinfo +diff --git a/Documentation/cxl/Makefile.am b/Documentation/cxl/Makefile.am +new file mode 100644 +index 0000000..efabaa3 +--- /dev/null ++++ b/Documentation/cxl/Makefile.am +@@ -0,0 +1,61 @@ ++# SPDX-License-Identifier: GPL-2.0 ++# Copyright (C) 2020-2021 Intel Corporation. All rights reserved. ++ ++if USE_ASCIIDOCTOR ++ ++do_subst = sed -e 's,@Utility@,Cxl,g' -e's,@utility@,cxl,g' ++CONFFILE = asciidoctor-extensions.rb ++asciidoctor-extensions.rb: ../asciidoctor-extensions.rb.in ++ $(AM_V_GEN) $(do_subst) < $< > $@ ++ ++else ++ ++do_subst = sed -e 's,UTILITY,cxl,g' ++CONFFILE = asciidoc.conf ++asciidoc.conf: ../asciidoc.conf.in ++ $(AM_V_GEN) $(do_subst) < $< > $@ ++ ++endif ++ ++man1_MANS = \ ++ cxl.1 \ ++ cxl-list.1 \ ++ cxl-read-labels.1 \ ++ cxl-write-labels.1 \ ++ cxl-zero-labels.1 ++ ++EXTRA_DIST = $(man1_MANS) ++ ++CLEANFILES = $(man1_MANS) ++ ++XML_DEPS = \ ++ ../../version.m4 \ ++ ../copyright.txt \ ++ Makefile \ ++ $(CONFFILE) ++ ++RM ?= rm -f ++ ++if USE_ASCIIDOCTOR ++ ++%.1: %.txt $(XML_DEPS) ++ $(AM_V_GEN)$(RM) $@+ $@ && \ ++ $(ASCIIDOC) -b manpage -d manpage -acompat-mode \ ++ -I. -rasciidoctor-extensions \ ++ -amansource=cxl -amanmanual="cxl Manual" \ ++ -andctl_version=$(VERSION) -o $@+ $< && \ ++ mv $@+ $@ ++ ++else ++ ++%.xml: %.txt $(XML_DEPS) ++ $(AM_V_GEN)$(RM) $@+ $@ && \ ++ $(ASCIIDOC) -b docbook -d manpage -f asciidoc.conf \ ++ --unsafe -acxl_version=$(VERSION) -o $@+ $< && \ ++ mv $@+ $@ ++ ++%.1: %.xml $(XML_DEPS) ++ $(AM_V_GEN)$(RM) $@ && \ ++ $(XMLTO) -o . -m ../manpage-normal.xsl man $< ++ ++endif +diff --git a/Documentation/cxl/cxl-list.txt b/Documentation/cxl/cxl-list.txt +new file mode 100644 +index 0000000..4e2be87 +--- /dev/null ++++ b/Documentation/cxl/cxl-list.txt +@@ -0,0 +1,64 @@ ++// SPDX-License-Identifier: GPL-2.0 ++ ++cxl-list(1) ++=========== ++ ++NAME ++---- ++cxl-list - List CXL capable memory devices, and their attributes in json. ++ ++SYNOPSIS ++-------- ++[verse] ++'cxl list' [] ++ ++Walk the CXL capable device hierarchy in the system and list all device ++instances along with some of their major attributes. ++ ++Options can be specified to limit the output to specific devices. ++By default, 'cxl list' with no options is equivalent to: ++[verse] ++cxl list --devices ++ ++EXAMPLE ++------- ++---- ++# cxl list --devices ++{ ++ "memdev":"mem0", ++ "pmem_size":268435456, ++ "ram_size":0, ++} ++---- ++ ++OPTIONS ++------- ++-d:: ++--memdev=:: ++ Specify a cxl memory device name to filter the listing. For example: ++---- ++# cxl list --memdev=mem0 ++{ ++ "memdev":"mem0", ++ "pmem_size":268435456, ++ "ram_size":0, ++} ++---- ++ ++-D:: ++--memdevs:: ++ Include all CXL memory devices in the listing ++ ++-i:: ++--idle:: ++ Include idle (not enabled / zero-sized) devices in the listing ++ ++include::human-option.txt[] ++ ++include::verbose-option.txt[] ++ ++include::../copyright.txt[] ++ ++SEE ALSO ++-------- ++linkcxl:ndctl-list[1] +diff --git a/Documentation/cxl/cxl-read-labels.txt b/Documentation/cxl/cxl-read-labels.txt +new file mode 100644 +index 0000000..143f296 +--- /dev/null ++++ b/Documentation/cxl/cxl-read-labels.txt +@@ -0,0 +1,33 @@ ++// SPDX-License-Identifier: GPL-2.0 ++ ++cxl-read-labels(1) ++================== ++ ++NAME ++---- ++cxl-read-labels - read out the label area on a CXL memdev ++ ++SYNOPSIS ++-------- ++[verse] ++'cxl read-labels' [..] [] ++ ++include::labels-description.txt[] ++This command dumps the raw binary data in a memdev's label area to stdout or a ++file. In the multi-memdev case the data is concatenated. ++ ++OPTIONS ++------- ++include::labels-options.txt[] ++ ++-o:: ++--output:: ++ output file ++ ++include::../copyright.txt[] ++ ++SEE ALSO ++-------- ++linkcxl:cxl-write-labels[1], ++linkcxl:cxl-zero-labels[1], ++CXL-2.0 9.13.2 +diff --git a/Documentation/cxl/cxl-write-labels.txt b/Documentation/cxl/cxl-write-labels.txt +new file mode 100644 +index 0000000..c4592b3 +--- /dev/null ++++ b/Documentation/cxl/cxl-write-labels.txt +@@ -0,0 +1,32 @@ ++// SPDX-License-Identifier: GPL-2.0 ++ ++cxl-write-labels(1) ++=================== ++ ++NAME ++---- ++cxl-write-labels - write data to the label area on a memdev ++ ++SYNOPSIS ++-------- ++[verse] ++'cxl write-labels [-i ]' ++ ++include::labels-description.txt[] ++Read data from the input filename, or stdin, and write it to the given ++ device. Note that the device must not be active in any region, ++otherwise the kernel will not allow write access to the device's label ++data area. ++ ++OPTIONS ++------- ++include::labels-options.txt[] ++-i:: ++--input:: ++ input file ++ ++SEE ALSO ++-------- ++linkcxl:cxl-read-labels[1], ++linkcxl:cxl-zero-labels[1], ++CXL-2.0 9.13.2 +diff --git a/Documentation/cxl/cxl-zero-labels.txt b/Documentation/cxl/cxl-zero-labels.txt +new file mode 100644 +index 0000000..bf95b24 +--- /dev/null ++++ b/Documentation/cxl/cxl-zero-labels.txt +@@ -0,0 +1,29 @@ ++// SPDX-License-Identifier: GPL-2.0 ++ ++cxl-zero-labels(1) ++================== ++ ++NAME ++---- ++cxl-zero-labels - zero out the label area on a set of memdevs ++ ++SYNOPSIS ++-------- ++[verse] ++'cxl zero-labels' [..] [] ++ ++include::labels-description.txt[] ++This command resets the device to its default state by ++deleting all labels. ++ ++OPTIONS ++------- ++include::labels-options.txt[] ++ ++include::../copyright.txt[] ++ ++SEE ALSO ++-------- ++linkcxl:cxl-read-labels[1], ++linkcxl:cxl-write-labels[1], ++CXL-2.0 9.13.2 +diff --git a/Documentation/cxl/cxl.txt b/Documentation/cxl/cxl.txt +new file mode 100644 +index 0000000..e99e61b +--- /dev/null ++++ b/Documentation/cxl/cxl.txt +@@ -0,0 +1,34 @@ ++// SPDX-License-Identifier: GPL-2.0 ++ ++cxl(1) ++====== ++ ++NAME ++---- ++cxl - Provides enumeration and provisioning commands for CXL devices ++ ++SYNOPSIS ++-------- ++[verse] ++'cxl' [--version] [--help] COMMAND [ARGS] ++ ++OPTIONS ++------- ++-v:: ++--version:: ++ Display the version of the 'cxl' utility. ++ ++-h:: ++--help:: ++ Run the 'cxl help' command. ++ ++DESCRIPTION ++----------- ++The cxl utility provides enumeration and provisioning commands for ++the CXL devices managed by the Linux kernel. ++ ++include::../copyright.txt[] ++ ++SEE ALSO ++-------- ++linkcxl:ndctl[1] +diff --git a/Documentation/cxl/human-option.txt b/Documentation/cxl/human-option.txt +new file mode 100644 +index 0000000..2f4de7a +--- /dev/null ++++ b/Documentation/cxl/human-option.txt +@@ -0,0 +1,8 @@ ++// SPDX-License-Identifier: GPL-2.0 ++ ++-u:: ++--human:: ++ By default the command will output machine-friendly raw-integer ++ data. Instead, with this flag, numbers representing storage size ++ will be formatted as human readable strings with units, other ++ fields are converted to hexadecimal strings. +diff --git a/Documentation/cxl/labels-description.txt b/Documentation/cxl/labels-description.txt +new file mode 100644 +index 0000000..f60bd5d +--- /dev/null ++++ b/Documentation/cxl/labels-description.txt +@@ -0,0 +1,8 @@ ++// SPDX-License-Identifier: GPL-2.0 ++ ++DESCRIPTION ++----------- ++The region label area is a small persistent partition of capacity ++available on some CXL memory devices. The label area is used to ++and configure or determine the set of memory devices participating ++in different interleave sets. +diff --git a/Documentation/cxl/labels-options.txt b/Documentation/cxl/labels-options.txt +new file mode 100644 +index 0000000..06fbac3 +--- /dev/null ++++ b/Documentation/cxl/labels-options.txt +@@ -0,0 +1,17 @@ ++// SPDX-License-Identifier: GPL-2.0 ++ ++:: ++include::memdev-option.txt[] ++ ++-s:: ++--size=:: ++ Limit the operation to the given number of bytes. A size of 0 ++ indicates to operate over the entire label capacity. ++ ++-O:: ++--offset=:: ++ Begin the operation at the given offset into the label area. ++ ++-v:: ++ Turn on verbose debug messages in the library (if libcxl was built with ++ logging and debug enabled). +diff --git a/Documentation/cxl/lib/Makefile.am b/Documentation/cxl/lib/Makefile.am +new file mode 100644 +index 0000000..41e3a5f +--- /dev/null ++++ b/Documentation/cxl/lib/Makefile.am +@@ -0,0 +1,58 @@ ++# SPDX-License-Identifier: GPL-2.0 ++# Copyright (C) 2020-2021 Intel Corporation. All rights reserved. ++ ++if USE_ASCIIDOCTOR ++ ++do_subst = sed -e 's,@Utility@,Libcxl,g' -e's,@utility@,libcxl,g' ++CONFFILE = asciidoctor-extensions.rb ++asciidoctor-extensions.rb: ../../asciidoctor-extensions.rb.in ++ $(AM_V_GEN) $(do_subst) < $< > $@ ++ ++else ++ ++do_subst = sed -e 's,UTILITY,libcxl,g' ++CONFFILE = asciidoc.conf ++asciidoc.conf: ../../asciidoc.conf.in ++ $(AM_V_GEN) $(do_subst) < $< > $@ ++ ++endif ++ ++man3_MANS = \ ++ libcxl.3 \ ++ cxl_new.3 ++ ++EXTRA_DIST = $(man3_MANS) ++ ++CLEANFILES = $(man3_MANS) ++ ++XML_DEPS = \ ++ ../../../version.m4 \ ++ ../../copyright.txt \ ++ Makefile \ ++ $(CONFFILE) ++ ++RM ?= rm -f ++ ++if USE_ASCIIDOCTOR ++ ++%.3: %.txt $(XML_DEPS) ++ $(AM_V_GEN)$(RM) $@+ $@ && \ ++ $(ASCIIDOC) -b manpage -d manpage -acompat-mode \ ++ -I. -rasciidoctor-extensions \ ++ -amansource=libcxl -amanmanual="libcxl Manual" \ ++ -andctl_version=$(VERSION) -o $@+ $< && \ ++ mv $@+ $@ ++ ++else ++ ++%.xml: %.txt $(XML_DEPS) ++ $(AM_V_GEN)$(RM) $@+ $@ && \ ++ $(ASCIIDOC) -b docbook -d manpage -f asciidoc.conf \ ++ --unsafe -alibcxl_version=$(VERSION) -o $@+ $< && \ ++ mv $@+ $@ ++ ++%.3: %.xml $(XML_DEPS) ++ $(AM_V_GEN)$(RM) $@ && \ ++ $(XMLTO) -o . -m ../../manpage-normal.xsl man $< ++ ++endif +diff --git a/Documentation/cxl/lib/cxl_new.txt b/Documentation/cxl/lib/cxl_new.txt +new file mode 100644 +index 0000000..d4d5bcb +--- /dev/null ++++ b/Documentation/cxl/lib/cxl_new.txt +@@ -0,0 +1,43 @@ ++// SPDX-License-Identifier: GPL-2.0 ++ ++cxl_new(3) ++========== ++ ++NAME ++---- ++cxl_new - Create a new library context object that acts as a handle for all ++library operations ++ ++SYNOPSIS ++-------- ++[verse] ++---- ++#include ++ ++int cxl_new(struct cxl_ctx **ctx); ++---- ++ ++DESCRIPTION ++----------- ++Instantiates a new library context, and stores an opaque pointer in ctx. The ++context is freed by linklibcxl:cxl_unref[3], i.e. cxl_new(3) implies an ++internal linklibcxl:cxl_ref[3]. ++ ++ ++RETURN VALUE ++------------ ++Returns 0 on success, and a negative errno on failure. ++Possible error codes are: ++ ++ * -ENOMEM ++ * -ENXIO ++ ++EXAMPLE ++------- ++See example usage in test/libcxl.c ++ ++include::../../copyright.txt[] ++ ++SEE ALSO ++-------- ++linklibcxl:cxl_ref[3], linklibcxl:cxl_unref[3] +diff --git a/Documentation/cxl/lib/libcxl.txt b/Documentation/cxl/lib/libcxl.txt +new file mode 100644 +index 0000000..47f4cc3 +--- /dev/null ++++ b/Documentation/cxl/lib/libcxl.txt +@@ -0,0 +1,56 @@ ++// SPDX-License-Identifier: GPL-2.0 ++ ++libcxl(3) ++========= ++ ++NAME ++---- ++libcxl - A library to interact with CXL devices through sysfs(5) ++and ioctl(2) interfaces ++ ++SYNOPSIS ++-------- ++[verse] ++#include ++cc ... -lcxl ++ ++DESCRIPTION ++----------- ++libcxl provides interfaces to interact with CXL devices in Linux, using sysfs ++interfaces for most kernel interactions, and the ioctl() interface for command ++submission. ++ ++The starting point for all library interfaces is a 'cxl_ctx' object, returned ++by linklibcxl:cxl_new[3]. CXL 'Type 3' memory devices are children of the ++cxl_ctx object, and can be iterated through using an iterator API. ++ ++Library level interfaces that are agnostic to any device, or a specific ++subclass of operations have the prefix 'cxl_' ++ ++The object representing a CXL Type 3 device is 'cxl_memdev'. Library interfaces ++related to these devices have the prefix 'cxl_memdev_'. These interfaces are ++mostly associated with sysfs interactions (unless otherwise noted in their ++respective documentation pages). They are typically used to retrieve data ++published by the kernel, or to send data or trigger kernel operations for a ++given device. ++ ++A 'cxl_cmd' is a reference counted object which is used to perform 'Mailbox' ++commands as described in the CXL Specification. A 'cxl_cmd' object is tied to a ++'cxl_memdev'. Associated library interfaces have the prefix 'cxl_cmd_'. Within ++this sub-class of interfaces, there are: ++ ++ * 'cxl_cmd_new_*' interfaces that allocate a new cxl_cmd object for a given ++ command type. ++ ++ * 'cxl_cmd_submit' which submits the command via ioctl() ++ ++ * 'cxl_cmd__get_' interfaces that get specific fields out of the ++ command response ++ ++ * 'cxl_cmd_get_*' interfaces to get general command related information. ++ ++include::../../copyright.txt[] ++ ++SEE ALSO ++-------- ++linklibcxl:cxl[1] +diff --git a/Documentation/cxl/memdev-option.txt b/Documentation/cxl/memdev-option.txt +new file mode 100644 +index 0000000..e778582 +--- /dev/null ++++ b/Documentation/cxl/memdev-option.txt +@@ -0,0 +1,4 @@ ++// SPDX-License-Identifier: GPL-2.0 ++A 'memX' device name, or a memdev id number. Restrict the operation to ++the specified memdev(s). The keyword 'all' can be specified to indicate ++the lack of any restriction. +diff --git a/Documentation/cxl/verbose-option.txt b/Documentation/cxl/verbose-option.txt +new file mode 100644 +index 0000000..cb62c8e +--- /dev/null ++++ b/Documentation/cxl/verbose-option.txt +@@ -0,0 +1,5 @@ ++// SPDX-License-Identifier: GPL-2.0 ++ ++-v:: ++--verbose:: ++ Emit more debug messages +diff --git a/Documentation/daxctl/daxctl-reconfigure-device.txt b/Documentation/daxctl/daxctl-reconfigure-device.txt +index ad33eda..f112b3c 100644 +--- a/Documentation/daxctl/daxctl-reconfigure-device.txt ++++ b/Documentation/daxctl/daxctl-reconfigure-device.txt +@@ -119,6 +119,10 @@ recommended to use the --no-online option described below. This will abridge + the device reconfiguration operation to just hotplugging the memory, and + refrain from then onlining it. + ++In case daxctl detects that there is a kernel policy to auto-online blocks ++(via /sys/devices/system/memory/auto_online_blocks), then reconfiguring to ++system-ram will result in a failure. This can be overridden with '--force'. ++ + OPTIONS + ------- + include::region-option.txt[] +@@ -162,12 +166,18 @@ include::movable-options.txt[] + + -f:: + --force:: +- When converting from "system-ram" mode to "devdax", it is expected ++ - When converting from "system-ram" mode to "devdax", it is expected + that all the memory sections are first made offline. By default, + daxctl won't touch online memory. However with this option, attempt + to offline the memory on the NUMA node associated with the dax device + before converting it back to "devdax" mode. + ++ - Additionally, if a kernel policy to auto-online blocks is detected, ++ reconfiguration to system-ram fails. With this option, the failure can ++ be overridden to allow reconfiguration regardless of kernel policy. ++ Doing this may result in a successful reconfiguration, but it may ++ not be possible to subsequently offline the memory without a reboot. ++ + + include::human-option.txt[] + +diff --git a/Makefile.am b/Makefile.am +index 60a1998..fa2010a 100644 +--- a/Makefile.am ++++ b/Makefile.am +@@ -1,9 +1,10 @@ + include Makefile.am.in + + ACLOCAL_AMFLAGS = -I m4 ${ACLOCAL_FLAGS} +-SUBDIRS = . daxctl/lib ndctl/lib ndctl daxctl ++SUBDIRS = . cxl/lib daxctl/lib ndctl/lib cxl ndctl daxctl + if ENABLE_DOCS +-SUBDIRS += Documentation/ndctl Documentation/daxctl ++SUBDIRS += Documentation/ndctl Documentation/daxctl Documentation/cxl ++SUBDIRS += Documentation/cxl/lib + endif + SUBDIRS += test + +@@ -22,17 +23,21 @@ CLEANFILES += $(noinst_SCRIPTS) + + do_rhel_subst = sed -e 's,VERSION,$(VERSION),g' \ + -e 's,DAX_DNAME,daxctl-devel,g' \ ++ -e 's,CXL_DNAME,cxl-devel,g' \ + -e 's,DNAME,ndctl-devel,g' \ + -e '/^%defattr.*/d' \ + -e 's,DAX_LNAME,daxctl-libs,g' \ ++ -e 's,CXL_LNAME,cxl-libs,g' \ + -e 's,LNAME,ndctl-libs,g' + + do_sles_subst = sed -e 's,VERSION,$(VERSION),g' \ + -e 's,DAX_DNAME,libdaxctl-devel,g' \ ++ -e 's,CXL_DNAME,libcxl-devel,g' \ + -e 's,DNAME,libndctl-devel,g' \ + -e 's,%license,%doc,g' \ + -e 's,\(^License:.*GPL\)v2,\1-2.0,g' \ + -e "s,DAX_LNAME,libdaxctl$$(($(LIBDAXCTL_CURRENT) - $(LIBDAXCTL_AGE))),g" \ ++ -e "s,CXL_LNAME,libcxl$$(($(LIBCXL_CURRENT) - $(LIBCXL_AGE))),g" \ + -e "s,LNAME,libndctl$$(($(LIBNDCTL_CURRENT) - $(LIBNDCTL_AGE))),g" + + rhel/ndctl.spec: ndctl.spec.in Makefile.am version.m4 +@@ -87,4 +92,7 @@ libutil_a_SOURCES = \ + util/filter.h \ + util/bitmap.h + +-nobase_include_HEADERS = daxctl/libdaxctl.h ++nobase_include_HEADERS = \ ++ daxctl/libdaxctl.h \ ++ cxl/libcxl.h \ ++ cxl/cxl_mem.h +diff --git a/Makefile.am.in b/Makefile.am.in +index bdceda9..a748128 100644 +--- a/Makefile.am.in ++++ b/Makefile.am.in +@@ -11,6 +11,7 @@ AM_CPPFLAGS = \ + -DNDCTL_MAN_PATH=\""$(mandir)"\" \ + -I${top_srcdir}/ndctl/lib \ + -I${top_srcdir}/ndctl \ ++ -I${top_srcdir}/cxl \ + -I${top_srcdir}/ \ + $(KMOD_CFLAGS) \ + $(UDEV_CFLAGS) \ +@@ -42,3 +43,7 @@ LIBNDCTL_AGE=19 + LIBDAXCTL_CURRENT=6 + LIBDAXCTL_REVISION=0 + LIBDAXCTL_AGE=5 ++ ++LIBCXL_CURRENT=1 ++LIBCXL_REVISION=0 ++LIBCXL_AGE=0 +diff --git a/README.md b/README.md +index 89dfc87..7a687ac 100644 +--- a/README.md ++++ b/README.md +@@ -95,7 +95,7 @@ test/test-suite.log: + SKIP: libndctl + ============== + test/init: nfit_test_init: nfit.ko: appears to be production version: /lib/modules/4.8.8-200.fc24.x86_64/kernel/drivers/acpi/nfit/nfit.ko.xz +-__ndctl_test_skip: explicit skip test_libndctl:2684 ++__test_skip: explicit skip test_libndctl:2684 + nfit_test unavailable skipping tests + ``` + +diff --git a/autogen.sh b/autogen.sh +index 2a52688..eb30402 100755 +--- a/autogen.sh ++++ b/autogen.sh +@@ -24,5 +24,5 @@ echo "----------------------------------------------------------------" + echo "Initialized build system. For a common configuration please run:" + echo "----------------------------------------------------------------" + echo +-echo "./configure CFLAGS='-g -O2' $args" ++echo "./configure --enable-debug CFLAGS='-g -O2' $args --disable-docs" + echo +diff --git a/cligen/README.md b/cligen/README.md +new file mode 100644 +index 0000000..574dd34 +--- /dev/null ++++ b/cligen/README.md +@@ -0,0 +1,31 @@ ++Parses a vendor_cmds YAML and produces c code for each operation. ++ ++Based on cci_vendor_cmds_May31.yaml, this parses the YAML and constructs a ++large portion of the necessary c code for implementation. Updated versions of ++the affected source files are created and placed into cxlcli-test/cxl/gen ++including libcxl.c, libcxl.h, libcxl.sym, builtin.h, cxl.c and memdev.c. ++ ++It requires some marked up base versions of these files to read in as ++templates, which are all included in the tar. ++ ++This is currently a first draft, so it has some limitations: ++ - Variable-length input / output payloads are not yet implemented. ++ - Names for variables & flags use mnemonics verbatim and are not truncated. ++ - Code is inserted directly into the relevant files instead of creating ++ vendor specific source files to import. These files are duplicated, not ++ overwritten, so it's fine for now but not ideal. ++ - The traversal for the pyyml output is a bit hacky, it'll need to be made ++ more robust in order to be extended to YAMLs from different vendors. ++ - Input parameters greater than 8 bytes of length need to be implemented ++ manually. ++ ++Instructions for use: ++ 1. $ tar git clone git@github.com:elake/ndctl.git ++ 2. $ cd ndctl/cligen ++ 3. $ python3 cligen.py ++ 4. Output files will be in ndctl/cligen/gen ++ ++ Note that we have not yet had the chance to test any of the generated code on ++ a pioneer card yet, so it has not been validated and may require some manual ++ modification after being generated. Expect this to be fixed once we have ++ access to a device for testing. +diff --git a/cligen/cligen.py b/cligen/cligen.py +new file mode 100644 +index 0000000..53ca3e7 +--- /dev/null ++++ b/cligen/cligen.py +@@ -0,0 +1,660 @@ ++""" ++Parses a vendor_cmds YAML and produces c code for each operation. ++ ++Based on cci_vendor_cmds_May31.yaml, this parses the YAML and constructs a ++large portion of the necessary c code for implementation. Updated versions of ++the affected source files are created and placed into cxlcli-test/cxl/gen ++including libcxl.c, libcxl.h, libcxl.sym, builtin.h, cxl.c and memdev.c. ++ ++It requires some marked up base versions of these files to read in as ++templates, which are all included in the tar. ++ ++This is currently a first draft, so it has some limitations: ++ - Variable-length input / output payloads are not yet implemented. ++ - Names for variables & flags use mnemonics verbatim and are not truncated. ++ - Code is inserted directly into the relevant files instead of creating ++ vendor specific source files to import. These files are duplicated, not ++ overwritten, so it's fine for now but not ideal. ++ - The traversal for the pyyml output is a bit hacky, it'll need to be made ++ more robust in order to be extended to YAMLs from different vendors. ++ - Input parameters greater than 8 bytes of length need to be implemented ++ manually. ++ ++Instructions for use: ++ 1. $ tar git clone git@github.com:elake/ndctl.git ++ 2. $ cd ndctl/cligen ++ 3. $ python3 cligen.py ++ 4. Output files will be in ndctl/cligen/gen ++""" ++ ++import re ++import sys ++import os ++ ++# Only required to run without buck in fb environment: ++USER = os.environ.get('USER') ++try: ++ sys.path.append(f"/data/users/{USER}/fbsource/third-party/pypi/pyyaml/5.4.1/lib3/") ++except: ++ pass ++ ++import yaml ++ ++YAMFILE = f"gen/cci_vendor_cmds.yaml" ++OUTDIR = f"gen/" ++BASE = "base" ++BUILTINH = "builtin.h" ++CXLC = "cxl.c" ++LIBCXLC = "libcxl.c" ++LIBCXLH = "libcxl.h" ++LIBCXLSYM = "libcxl.sym" ++MEMDEVC = "memdev.c" ++SIMPLE = False # Only process commands with simple payloads (1, 2, 4, 8 byte param types) ++BLANK = False ++ ++class Payload(): ++ """ ++ Payload class ++ """ ++ def __init__(self, payload, input=True): ++ if input: ++ x = "i" ++ else: ++ x = "o" ++ self.fixed_size = True ++ self.simple = True ++ self.var_opl = False ++ self.payload = payload ++ self.name = payload.get(f'{x}pl_name', "") ++ self.mn = payload.get(f'{x}pl_mnemonic', "").lower() ++ self.size = payload.get(f'{x}pl_size_bytes') ++ if isinstance(self.size, str): ++ self.fixed_size = False ++ return ++ self.params = [] ++ self.build_params(payload, x) ++ self.is_simple() ++ ++ def build_params(self, payload, x): ++ used_mn = set() ++ self.params_used = False ++ for par in payload.get('parameters', []): ++ mn = par.get(f"{x}pl_par_mnemonic", "").lower() ++ if mn != 'rsvd': ++ self.params_used = True ++ if mn in used_mn: ++ offset = par.get(f"{x}pl_offset") ++ mn = f"{mn}{offset}" ++ used_mn.add(mn) ++ param = { ++ "name": par.get(f"{x}pl_par_name"), ++ "mn": mn, ++ "size": par.get(f"{x}pl_length"), ++ "offset": par.get(f"{x}pl_offset"), ++ "description": par.get(f"{x}pl_description"), ++ "enums": [], ++ "format_specifier": "", ++ "unit_size": par.get(f"{x}pl_unit_size"), ++ "contiguous": par.get(f"{x}pl_contiguous", False), ++ } ++ param.update({ ++ "type": self.types(param), ++ }) ++ enums = par.get("enumerators", []) ++ for en in enums: ++ param["enums"].append( { ++ "value": en.get(f"{x}pl_value"), ++ "name": en.get(f"{x}pl_en_name"), ++ "mn": en.get(f"{x}pl_en_mnemnonic") ++ }) ++ param["enums"].sort(key=lambda x:x.get("value")) ++ if enums: ++ param["format_specifier"] = r"%s" ++ else: ++ t = param.get("type") ++ if not isinstance(t, str): ++ t = t[0] ++ param["format_specifier"] = self.get_format_specifier(t) ++ t = param.get("type") ++ if not isinstance(t, str): ++ t = t[0] ++ param["utype"] = re.sub("__le", "u", t) ++ self.params.append(param) ++ ++ def get_variable_length(self, mn): ++ keywords = ["num", "inst"] ++ ++ def is_simple(self): ++ for param in self.params: ++ if param.get("size") not in {1, 2, 4, 8}: ++ self.simple = False ++ return False ++ self.simple = True ++ return True ++ ++ def get_format_specifier(self, t: str): ++ """ ++ Given a type, return a c format specifier. Currently trivial. ++ """ ++ return { ++ "u8" : r"%x", ++ "__le16" : r"%x", ++ "__le32" : r"%x", ++ "__le64" : r"%lx", ++ }.get(t, r"%x") ++ ++ def types(self, param): ++ """ ++ Given a parameter dict, return an appropriate c type. For large sizes ++ or sizes not root 2, return a size and an array length in a tuple. ++ """ ++ i = param.get(f"size") ++ unit = param.get("unit_size") ++ t = { ++ 1: 'u8', ++ 2: '__le16', ++ 4: '__le32', ++ 8: '__le64', ++ } ++ if unit: ++ return (t.get(unit), i // unit) ++ if t.get(i): return t.get(i) ++ if i % 8 == 0: ++ return ('__le64', i // 8) ++ elif i % 4 == 0: ++ return ('__le32', i // 4) ++ elif i % 2 == 0: ++ return ('__le16', i // 2) ++ else: ++ return ('u8', i) ++ ++ def declaration(self, param, u=False): ++ """ ++ Return a declaration string for a parameter. ++ If param['type'] is string: "u8 some_name;" ++ If param['type'] is tuple: "u8 some_name[3];" ++ """ ++ if u: ++ t = param.get("utype") ++ else: ++ t = param.get("type") ++ mn = param.get("mn") ++ if isinstance(t, str): ++ return f"{t} {mn};\n" ++ else: ++ return f"{t[0]} {mn}[{t[1]}];\n" ++ ++def base(s): ++ return os.path.join(OUTDIR, f"{BASE}.{s}") ++ ++def to_cpu(t, mn): ++ t = re.sub("_", "", t) ++ if t == 'u8': ++ return mn ++ else: ++ return f"{t}_to_cpu({mn})" ++ ++def cpu_to(t, mn): ++ t = re.sub("_", "", t) ++ if t == 'u8': ++ return mn ++ else: ++ return f"cpu_to_{t}({mn})" ++ ++def generate_ipl_struct(name, ipl): ++ # struct memdev.c line 138-143 ++ pname = f"{name}_params" ++ out = f"static struct _{pname} {{\n" ++ for param in ipl.params: ++ mn = param.get('mn') ++ l = param.get('size') ++ if re.match(r"^rsvd\d*$", mn): ++ continue ++ if l < 5: ++ out += f"\tu32 {mn};\n" ++ else: ++ out += f"\tu64 {mn};\n" ++ out += f"\tbool verbose;\n" ++ out += f"}} {pname};\n\n" ++ return out ++ ++ ++def generate_option_struct(name, ipl): ++ # struct memdev.c line 153-157 ++ out = f"static const struct option cmd_{name}_options[] = {{\n" ++ out += f"\t{name.upper()}_BASE_OPTIONS(),\n" ++ if ipl.params_used: ++ out += f"\t{name.upper()}_OPTIONS(),\n" ++ out += f"\tOPT_END(),\n}};\n\n" ++ return out ++ ++def generate_def_base_options(name): ++ # Line 145 memdev.c ++ return f'#define {name.upper()}_BASE_OPTIONS() \\\nOPT_BOOLEAN(\'v\',"verbose", &{name}_params.verbose, "turn on debug")\n\n' ++ ++def generate_def_options(name, ipl): ++ if not ipl.params_used: ++ return "" ++ # options memdev.c line 148-151 ++ flags = set(['h', 'v']) ++ t = { ++ 'u8': 'OPT_UINTEGER', ++ '__le16': 'OPT_UINTEGER', ++ '__le32': 'OPT_UINTEGER', ++ '__le64': 'OPT_U64', ++ } ++ out = f"#define {name.upper()}_OPTIONS()" ++ for param in ipl.params: ++ mn = param.get('mn') ++ if re.match(r"^rsvd\d*$", mn): ++ continue ++ pname = param.get('name') ++ pt = param.get('type') ++ if not isinstance(pt, str): ++ pt = pt[0] ++ # ctype = t.get(pt) ++ if int(param.get('size')) < 5: ++ ctype = 'OPT_UINTEGER' ++ else: ++ ctype = 'OPT_U64' ++ flag = mn[0] ++ while flag in flags: ++ i = ord(flag) + 1 ++ if i > 122: ++ i = 65 ++ flag = chr(i) ++ flags.add(flag) ++ out += f" \\\n{ctype}(\'{flag}\', \"{mn}\", &{name}_params.{mn}, \"{pname}\")," ++ out = f"{out.rstrip(',')}\n\n" ++ return out ++ ++def generate_action_cmd(name, ipl): ++ # action_cmd memdev.c line 315-325 ++ out = f"static int action_cmd_{name}(struct cxl_memdev *memdev, struct action_context *actx)\n{{\n" ++ out += f"\tif (cxl_memdev_is_active(memdev)) {{\n" ++ out += f"\t\tfprintf(stderr, \"%s: memdev active, abort {name}\\n\",\n" ++ out += f"\t\t\tcxl_memdev_get_devname(memdev));\n" ++ out += f"\t\treturn -EBUSY;\n\t}}\n\n" ++ rout = f"\treturn cxl_memdev_{name}(memdev" ++ for param in ipl.params: ++ mn = param.get('mn') ++ if re.match(r"^rsvd\d*$", mn): ++ continue ++ t = param.get('type') ++ amp = "(void *) " ++ if isinstance(t, str): ++ # Prepend with & only if parameter is an array ++ amp = "" ++ if len(rout) > 60: ++ out += f"{rout},\n\t\t" ++ rout = f"{amp}{name}_params.{mn}" ++ continue ++ rout += f", {amp}{name}_params.{mn}" ++ out += f"{rout});\n}}\n" ++ return out ++ ++ ++def generate_cmd_def(name): ++ # memdev.c line 715-721 ++ return ( ++ f"int cmd_{name}(int argc, const char **argv, struct cxl_ctx *ctx)\n{{\n" ++ + f"\tint rc = memdev_action(argc, argv, ctx, action_cmd_{name}, cmd_{name}_options,\n" ++ + f'\t\t\t"cxl {name} [..] []");\n\n' ++ + f"\treturn rc >= 0 ? 0 : EXIT_FAILURE;\n}}\n" ++ ) ++ ++ ++def generate_mem_cmd_info(name, opcode, ipl, opl): ++ # libcxl.c line 1927-1930 ++ name = name.upper() ++ out = f"#define CXL_MEM_COMMAND_ID_{name} CXL_MEM_COMMAND_ID_RAW\n" ++ out += f"#define CXL_MEM_COMMAND_ID_{name}_OPCODE {opcode}\n" ++ if ipl.size: ++ out += f"#define CXL_MEM_COMMAND_ID_{name}_PAYLOAD_IN_SIZE {ipl.size}\n" ++ if opl.size: ++ out += f"#define CXL_MEM_COMMAND_ID_{name}_PAYLOAD_OUT_SIZE {opl.size}\n" ++ return out ++ ++ ++def generate_mbox(name, payload, end="in"): ++ # struct cxl_mbox libcxl.c line 1537-1546 & 1460-1471 ++ if not payload.params: ++ return "" ++ out = f"struct cxl_mbox_{name}_{end} {{\n" ++ for param in payload.params: ++ out += f"\t{payload.declaration(param)}" ++ out += f"}} __attribute__((packed));\n" ++ return out ++ ++ ++def generate_cxl_export(name, ipl, opl, fullname): ++ # CXL_EXPORT libcxl.c line 1549-1619 ++ out = "" ++ nout = f"CXL_EXPORT int cxl_memdev_{name}(struct cxl_memdev *memdev" ++ for param in ipl.params: ++ mn = param.get('mn') ++ if re.match(r"^rsvd\d*$", mn): ++ continue ++ t = param.get('type') ++ if len(nout) > 60: ++ out += f"{nout},\n\t" ++ nout = "" ++ if isinstance(t, str): ++ nout += f"{re.sub('__le', 'u', t)} {mn}" ++ else: ++ nout += f"{re.sub('__le', 'u', t[0])} *{mn}" ++ continue ++ if isinstance(t, str): ++ nout += f", {re.sub('__le', 'u', t)} {mn}" ++ else: ++ nout += f", {re.sub('__le', 'u', t[0])} *{mn}" ++ out += f"{nout})\n{{\n" ++ for param in opl.params: ++ enums = param.get("enums") ++ if enums: ++ mn = param.get("mn") ++ out += f"\tconst char *{mn}_descriptions[] = {{" ++ for en in enums: ++ out += f"\n\t\t\"{en.get('name')}\"," ++ out = f"{out.rstrip(',')}\n\t}};\n\n" ++ out += f"\tstruct cxl_cmd *cmd;\n" ++ if ipl.params_used: ++ out += f"\tstruct cxl_mem_query_commands *query;\n" ++ out += f"\tstruct cxl_command_info *cinfo;\n" ++ out += f"\tstruct cxl_mbox_{name}_in *{name}_in;\n" ++ if opl.params: ++ out += f"\tstruct cxl_mbox_{name}_out *{name}_out;\n" ++ out += f"\tint rc = 0;\n\n" ++ out += f"\tcmd = cxl_cmd_new_raw(memdev, CXL_MEM_COMMAND_ID_{name.upper()}_OPCODE);\n" ++ out += f"\tif (!cmd) {{\n" ++ out += f"\t\tfprintf(stderr, \"%s: cxl_cmd_new_raw returned Null output\\n\",\n" ++ out += f"\t\t\t\tcxl_memdev_get_devname(memdev));\n" ++ out += f"\t\treturn -ENOMEM;\n\t}}\n\n" ++ if ipl.params_used: ++ out += f"\tquery = cmd->query_cmd;\n" ++ out += f"\tcinfo = &query->commands[cmd->query_idx];\n\n" ++ out += f"\t/* update payload size */\n" ++ out += f"\tcinfo->size_in = CXL_MEM_COMMAND_ID_{name.upper()}_PAYLOAD_IN_SIZE;\n" ++ out += f"\tif (cinfo->size_in > 0) {{\n" ++ out += f"\t\t cmd->input_payload = calloc(1, cinfo->size_in);\n" ++ out += f"\t\tif (!cmd->input_payload)\n" ++ out += f"\t\t\treturn -ENOMEM;\n" ++ out += f"\t\tcmd->send_cmd->in.payload = (u64)cmd->input_payload;\n" ++ out += f"\t\tcmd->send_cmd->in.size = cinfo->size_in;\n\t}}\n\n" ++ out += f"\t{name}_in = (void *) cmd->send_cmd->in.payload;\n\n" ++ for param in ipl.params: ++ mn = param.get('mn') ++ if re.match(r"^rsvd\d*$", mn): ++ continue ++ t = param.get('type') ++ if isinstance(t, str): ++ t = re.sub("_", "", t) ++ if t == 'u8': ++ out += f"\t{name}_in->{mn} = {mn};\n" ++ else: ++ out += f"\t{name}_in->{mn} = cpu_to_{t}({mn});\n" ++ else: ++ tz = re.sub("_", "", t[0]) ++ out += f"\tfor(int i = 0; i < {t[1]}; i++) {{\n" ++ if tz == 'u8': ++ out += f"\t\t{name}_in->{mn}[i] = {mn}[i];\n\t}}\n\n" ++ else: ++ out += f"\t\t{name}_in->{mn}[i] = cpu_to_{tz}({mn}[i]);\n\t}}\n\n" ++ out += f"\trc = cxl_cmd_submit(cmd);\n" ++ out += f"\tif (rc < 0) {{\n" ++ out += f"\t\tfprintf(stderr, \"%s: cmd submission failed: %d (%s)\\n\",\n" ++ out += f"\t\t\t\tcxl_memdev_get_devname(memdev), rc, strerror(-rc));\n" ++ out += f"\t\t goto out;\n\t}}\n\n" ++ out += f"\trc = cxl_cmd_get_mbox_status(cmd);\n" ++ out += f"\tif (rc != 0) {{\n" ++ out += f"\t\tfprintf(stderr, \"%s: firmware status: %d\\n\",\n" ++ out += f"\t\t\t\tcxl_memdev_get_devname(memdev), rc);\n" ++ out += f"\t\trc = -ENXIO;\n" ++ out += f"\t\tgoto out;\n\t}}\n\n" ++ out += f"\tif (cmd->send_cmd->id != CXL_MEM_COMMAND_ID_{name.upper()}) {{\n" ++ out += f"\t\t fprintf(stderr, \"%s: invalid command id 0x%x (expecting 0x%x)\\n\",\n" ++ out += f"\t\t\t\tcxl_memdev_get_devname(memdev), cmd->send_cmd->id, CXL_MEM_COMMAND_ID_{name.upper()});\n" ++ out += f"\t\treturn -EINVAL;\n\t}}\n\n" ++ if opl.params: ++ out += f"\t{name}_out = (void *)cmd->send_cmd->out.payload;\n" ++ heading = ( (78 - len(fullname)) // 2 ) * '=' ++ heading += f" {fullname} " ++ heading += ( 80 - len(heading) ) * '=' ++ out += f"\tfprintf(stdout, \"{heading}\\n\");\n" ++ for param in opl.params: ++ fs = param.get('format_specifier') ++ mn = param.get('mn') ++ if re.match(r"^rsvd\d*$", mn): ++ continue ++ t = param.get('type') ++ if isinstance(t, str): ++ print_value = to_cpu(t, f'{name}_out->{mn}') ++ if not param.get("enums"): ++ out += f"\tfprintf(stdout, \"{param.get('name')}: {fs}\\n\", {print_value});\n" ++ else: ++ out += f"\tfprintf(stdout, \"{param.get('name')}: {fs}\\n\", {mn}_descriptions[{name}_out->{mn}]);\n" ++ else: ++ out += f"\tfprintf(stdout, \"{param.get('name')}: \");\n" ++ out += f"\t/* Procedurally generated print statement. To print this array contiguously,\n\t add \"contiguous: True\" to the YAML param and rerun cligen.py */\n" ++ out += f"\tfor (int i = 0; i < {t[1]}; i++) {{\n" ++ if param.get("contiguous"): ++ out += f"\t\tfprintf(stdout, \"{fs}\", {to_cpu(t[0], f'{name}_out->{mn}[i]')});\n\t}}\n" ++ else: ++ out += f"\t\tfprintf(stdout, \"{mn}[%d]: {fs}\\n\", i, {to_cpu(t[0], f'{name}_out->{mn}[i]')});\n\t}}\n" ++ out += f"\tfprintf(stdout, \"\\n\");\n" ++ out += f"\nout:\n" ++ out += f"\tcxl_cmd_unref(cmd);\n" ++ out += f"\treturn rc;\n" ++ out += f"\treturn 0;\n}}\n\n" ++ return out ++ ++ ++def generate_libcxl_h(name, ipl): ++ # cxl_memdev libcxl.h line 62-63 ++ out = "" ++ nout = f"int cxl_memdev_{name}(struct cxl_memdev *memdev" ++ for param in ipl.params: ++ mn = param.get('mn') ++ if re.match(r"^rsvd\d*$", mn): ++ continue ++ t = param.get('type') ++ if len(nout) > 60: ++ out += f"{nout},\n\t" ++ nout = "" ++ if isinstance(t, str): ++ nout += f"{re.sub('__le', 'u', t)} {mn}" ++ else: ++ nout += f"{re.sub('__le', 'u', t[0])} *{mn}" ++ continue ++ if isinstance(t, str): ++ nout += f", {re.sub('__le', 'u', t)} {mn}" ++ else: ++ nout += f", {re.sub('__le', 'u', t[0])} *{mn}" ++ out += f"{nout});\n" ++ return out ++ ++def generate_libcxl_sym(name): ++ # libcxl.sym line 75 ++ return f"\tcxl_memdev_{name};\n" ++ ++def build_results(results): ++ bb = open(base(BUILTINH), 'r') ++ bc = open(base(CXLC), 'r') ++ blc = open(base(LIBCXLC), 'r') ++ blh = open(base(LIBCXLH), 'r') ++ bls = open(base(LIBCXLSYM), 'r') ++ bm = open(base(MEMDEVC), 'r') ++ b = open(os.path.join(OUTDIR, BUILTINH), 'w') ++ c = open(os.path.join(OUTDIR, CXLC), 'w') ++ lc = open(os.path.join(OUTDIR, LIBCXLC), 'w') ++ lh = open(os.path.join(OUTDIR, LIBCXLH), 'w') ++ ls = open(os.path.join(OUTDIR, LIBCXLSYM), 'w') ++ m = open(os.path.join(OUTDIR, MEMDEVC), 'w') ++ ++ rei = r".* insert here .*/" ++ rep = r".* insert here params options .*/" ++ rea = r".* insert here action .*/" ++ rec = r".* insert here cmd .*/" ++ res = r"} LIBCXL_3;" ++ ++ for line in bb.readlines(): ++ if re.search(rei, line): ++ for v in results.get("builtin_h").values(): ++ b.write(v) ++ else: ++ if not BLANK: b.write(line) ++ bb.close() ++ b.close() ++ ++ for line in bc.readlines(): ++ if re.search(rei, line): ++ for v in results.get("cxl_c").values(): ++ c.write(v) ++ else: ++ if not BLANK: c.write(line) ++ bc.close() ++ c.close() ++ ++ for line in blc.readlines(): ++ if re.search(rei, line): ++ for v in results.get("mem_cmd_info").keys(): ++ lc.write(results.get("mem_cmd_info").get(v)) ++ lc.write(f"\n") ++ lc.write(results.get("mbox_in").get(v)) ++ lc.write(f"\n") ++ lc.write(results.get("mbox_out").get(v)) ++ lc.write(f"\n") ++ lc.write(results.get("cxl_export").get(v)) ++ lc.write(f"\n") ++ else: ++ if not BLANK: lc.write(line) ++ blc.close() ++ lc.close() ++ ++ for line in bm.readlines(): ++ if re.search(rep, line): ++ for v in results.get("param_structs_memdev_c").keys(): ++ m.write(results.get("param_structs_memdev_c").get(v)) ++ m.write(results.get("base_options_memdev_c").get(v)) ++ m.write(results.get("options_memdev_c").get(v)) ++ m.write(results.get("option_structs_memdev_c").get(v)) ++ elif re.search(rea, line): ++ for v in results.get("action_cmd_memdev_c").values(): ++ m.write(v) ++ m.write(f"\n") ++ elif re.search(rec, line): ++ for v in results.get("cmd_memdev_c").values(): ++ m.write(v) ++ m.write(f"\n") ++ else: ++ if not BLANK: m.write(line) ++ bm.close() ++ m.close() ++ ++ for line in blh.readlines(): ++ if re.search(rei, line): ++ for v in results.get("libcxl_h").values(): ++ lh.write(v) ++ else: ++ if not BLANK: lh.write(line) ++ blh.close() ++ lh.close() ++ ++ for line in bls.readlines(): ++ if re.search(res, line): ++ for v in results.get("libcxl_sym").values(): ++ ls.write(v) ++ if not BLANK: ls.write(line) ++ bls.close() ++ ls.close() ++ ++def run(opcodes): ++ ++ cxl_c = {} ++ builtin_h = {} ++ param_structs_memdev_c = {} ++ option_structs_memdev_c = {} ++ base_options_memdev_c = {} ++ options_memdev_c = {} ++ action_cmd_memdev_c = {} ++ cmd_memdev_c = {} ++ mem_cmd_info = {} ++ mbox_in = {} ++ mbox_out = {} ++ cxl_export = {} ++ libcxl_h = {} ++ libcxl_sym = {} ++ skipped = {} ++ ++ for command in opcodes: ++ name = command.get("opcode_name", "").lower() ++ opcode = command.get("opcode") ++ mnemonic = command.get("mnemonic", "").lower() ++ description = command.get("opcode_description") ++ ipl = Payload(command.get("input_payload", [{}])[0], input=True) ++ if SIMPLE and not ipl.simple: ++ continue ++ opl = Payload(command.get("output_payload", [{}])[0], input=False) ++ if not (ipl.fixed_size and opl.fixed_size): ++ skipped.update({name: { "ipl.size": ipl.size, "opl.size": opl.size}}) ++ continue ++ cxl_c_cmd_struct = f'\t{{ "{re.sub("_", "-", mnemonic)}", .c_fn = cmd_{mnemonic} }},\n' ++ cxl_c[name] = cxl_c_cmd_struct ++ builtin_h_cmd = ( ++ f"int cmd_{mnemonic}(int argc, const char **argv, struct cxl_ctx *ctx);\n" ++ ) ++ builtin_h[name] = builtin_h_cmd ++ param_struct = generate_ipl_struct(mnemonic, ipl) ++ param_structs_memdev_c[name] = param_struct ++ base_options_def = generate_def_base_options(mnemonic) ++ base_options_memdev_c[name] = base_options_def ++ options_def = generate_def_options(mnemonic, ipl) ++ options_memdev_c[name] = options_def ++ option_struct = generate_option_struct(mnemonic, ipl) ++ option_structs_memdev_c[name] = option_struct ++ action_cmd = generate_action_cmd(mnemonic, ipl) ++ action_cmd_memdev_c[name] = action_cmd ++ cmd_def = generate_cmd_def(mnemonic) ++ cmd_memdev_c[name] = cmd_def ++ mem_command_info = generate_mem_cmd_info( ++ mnemonic, opcode, ipl, opl ++ ) ++ mem_cmd_info[name] = mem_command_info ++ cxl_mbox_in = generate_mbox(mnemonic, ipl, end='in') ++ mbox_in[name] = cxl_mbox_in ++ cxl_mbox_out = generate_mbox(mnemonic, opl, end='out') ++ mbox_out[name] = cxl_mbox_out ++ libcxl_export = generate_cxl_export(mnemonic, ipl, opl, name) ++ cxl_export[name] = libcxl_export ++ libh = generate_libcxl_h(mnemonic, ipl) ++ libcxl_h[name] = libh ++ libsym = generate_libcxl_sym(mnemonic) ++ libcxl_sym[name] = libsym ++ results = { ++ "cxl_c" : cxl_c, ++ "builtin_h" : builtin_h, ++ "param_structs_memdev_c" : param_structs_memdev_c, ++ "option_structs_memdev_c" : option_structs_memdev_c, ++ "base_options_memdev_c" : base_options_memdev_c, ++ "options_memdev_c" : options_memdev_c, ++ "action_cmd_memdev_c" : action_cmd_memdev_c, ++ "cmd_memdev_c" : cmd_memdev_c, ++ "mem_cmd_info" : mem_cmd_info, ++ "mbox_in" : mbox_in, ++ "mbox_out" : mbox_out, ++ "cxl_export" : cxl_export, ++ "libcxl_h" : libcxl_h, ++ "libcxl_sym" : libcxl_sym, ++ "skipped" : skipped, ++ } ++ build_results(results) ++ print("done.") ++ ++with open(YAMFILE, "r") as f: ++ yml = yaml.load(f) ++ yml_no_decode = yaml.load(f, Loader=yaml.BaseLoader) ++ command_sets = yml.get('command_sets') ++ opcodes = [] ++ for cs in command_sets: ++ opcodes += cs.get('command_set_opcodes') ++ run(opcodes) +diff --git a/cligen/gen/base.builtin.h b/cligen/gen/base.builtin.h +new file mode 100644 +index 0000000..0dc249f +--- /dev/null ++++ b/cligen/gen/base.builtin.h +@@ -0,0 +1,28 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++/* Copyright (C) 2020-2021 Intel Corporation. All rights reserved. */ ++#ifndef _CXL_BUILTIN_H_ ++#define _CXL_BUILTIN_H_ ++ ++struct cxl_ctx; ++int cmd_list(int argc, const char **argv, struct cxl_ctx *ctx); ++int cmd_write_labels(int argc, const char **argv, struct cxl_ctx *ctx); ++int cmd_read_labels(int argc, const char **argv, struct cxl_ctx *ctx); ++int cmd_zero_labels(int argc, const char **argv, struct cxl_ctx *ctx); ++int cmd_init_labels(int argc, const char **argv, struct cxl_ctx *ctx); ++int cmd_check_labels(int argc, const char **argv, struct cxl_ctx *ctx); ++int cmd_identify(int argc, const char **argv, struct cxl_ctx *ctx); ++int cmd_get_supported_logs(int argc, const char **argv, struct cxl_ctx *ctx); ++int cmd_get_cel_log(int argc, const char **argv, struct cxl_ctx *ctx); ++int cmd_get_event_interrupt_policy(int argc, const char **argv, struct cxl_ctx *ctx); ++int cmd_set_event_interrupt_policy(int argc, const char **argv, struct cxl_ctx *ctx); ++int cmd_get_timestamp(int argc, const char **argv, struct cxl_ctx *ctx); ++int cmd_set_timestamp(int argc, const char **argv, struct cxl_ctx *ctx); ++int cmd_get_alert_config(int argc, const char **argv, struct cxl_ctx *ctx); ++int cmd_set_alert_config(int argc, const char **argv, struct cxl_ctx *ctx); ++int cmd_get_health_info(int argc, const char **argv, struct cxl_ctx *ctx); ++int cmd_get_event_records(int argc, const char **argv, struct cxl_ctx *ctx); ++int cmd_get_ld_info(int argc, const char **argv, struct cxl_ctx *ctx); ++int cmd_clear_event_records(int argc, const char **argv, struct cxl_ctx *ctx); ++int cmd_ddr_info(int argc, const char **argv, struct cxl_ctx *ctx); ++/* insert here */ ++#endif /* _CXL_BUILTIN_H_ */ +diff --git a/cligen/gen/base.cxl.c b/cligen/gen/base.cxl.c +new file mode 100644 +index 0000000..ee2d6b0 +--- /dev/null ++++ b/cligen/gen/base.cxl.c +@@ -0,0 +1,117 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* Copyright (C) 2020-2021 Intel Corporation. All rights reserved. */ ++/* Copyright (C) 2005 Andreas Ericsson. All rights reserved. */ ++ ++/* originally copied from perf and git */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++#include ++#include ++#include ++ ++const char cxl_usage_string[] = "cxl [--version] [--help] COMMAND [ARGS]"; ++const char cxl_more_info_string[] = ++ "See 'cxl help COMMAND' for more information on a specific command.\n" ++ " cxl --list-cmds to see all available commands"; ++ ++static int cmd_version(int argc, const char **argv, struct cxl_ctx *ctx) ++{ ++ printf("%s\n", VERSION); ++ return 0; ++} ++ ++static int cmd_help(int argc, const char **argv, struct cxl_ctx *ctx) ++{ ++ const char * const builtin_help_subcommands[] = { ++ "list", ++ NULL, ++ }; ++ struct option builtin_help_options[] = { ++ OPT_END(), ++ }; ++ const char *builtin_help_usage[] = { ++ "cxl help [command]", ++ NULL ++ }; ++ ++ argc = parse_options_subcommand(argc, argv, builtin_help_options, ++ builtin_help_subcommands, builtin_help_usage, 0); ++ ++ if (!argv[0]) { ++ printf("\n usage: %s\n\n", cxl_usage_string); ++ printf("\n %s\n\n", cxl_more_info_string); ++ return 0; ++ } ++ ++ return help_show_man_page(argv[0], "cxl", "CXL_MAN_VIEWER"); ++} ++ ++static struct cmd_struct commands[] = { ++ { "version", .c_fn = cmd_version }, ++ { "list", .c_fn = cmd_list }, ++ { "help", .c_fn = cmd_help }, ++ { "zero-labels", .c_fn = cmd_zero_labels }, ++ { "read-labels", .c_fn = cmd_read_labels }, ++ { "write-labels", .c_fn = cmd_write_labels }, ++ { "id-cmd", .c_fn = cmd_identify }, ++ { "get-supported-logs", .c_fn = cmd_get_supported_logs }, ++ { "get-cel-log", .c_fn = cmd_get_cel_log }, ++ { "get-event-interrupt-policy", .c_fn = cmd_get_event_interrupt_policy }, ++ { "set-event-interrupt-policy", .c_fn = cmd_set_event_interrupt_policy }, ++ { "get-timestamp", .c_fn = cmd_get_timestamp }, ++ { "set-timestamp", .c_fn = cmd_set_timestamp }, ++ { "get-alert-config", .c_fn = cmd_get_alert_config }, ++ { "set-alert-config", .c_fn = cmd_set_alert_config }, ++ { "get-health-info", .c_fn = cmd_get_health_info }, ++ { "get-event-records", .c_fn = cmd_get_event_records }, ++ { "get-ld-info", .c_fn = cmd_get_ld_info }, ++ { "clear-event-records", .c_fn = cmd_clear_event_records }, ++ { "ddr-info", .c_fn = cmd_ddr_info }, ++ /* insert here */ ++}; ++ ++int main(int argc, const char **argv) ++{ ++ struct cxl_ctx *ctx; ++ int rc; ++ ++ /* Look for flags.. */ ++ argv++; ++ argc--; ++ main_handle_options(&argv, &argc, cxl_usage_string, commands, ++ ARRAY_SIZE(commands)); ++ ++ if (argc > 0) { ++ if (!prefixcmp(argv[0], "--")) ++ argv[0] += 2; ++ } else { ++ /* The user didn't specify a command; give them help */ ++ printf("\n usage: %s\n\n", cxl_usage_string); ++ printf("\n %s\n\n", cxl_more_info_string); ++ goto out; ++ } ++ ++ rc = cxl_new(&ctx); ++ if (rc) ++ goto out; ++ main_handle_internal_command(argc, argv, ctx, commands, ++ ARRAY_SIZE(commands), PROG_CXL); ++ cxl_unref(ctx); ++ fprintf(stderr, "Unknown command: '%s'\n", argv[0]); ++out: ++ return 1; ++} +diff --git a/cligen/gen/base.libcxl.c b/cligen/gen/base.libcxl.c +new file mode 100644 +index 0000000..af8a736 +--- /dev/null ++++ b/cligen/gen/base.libcxl.c +@@ -0,0 +1,2089 @@ ++// SPDX-License-Identifier: LGPL-2.1 ++// Copyright (C) 2020-2021, Intel Corporation. All rights reserved. ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++#include ++#include ++#include ++#include ++#include "private.h" ++ ++/** ++ * struct cxl_ctx - library user context to find "nd" instances ++ * ++ * Instantiate with cxl_new(), which takes an initial reference. Free ++ * the context by dropping the reference count to zero with ++ * cxl_unref(), or take additional references with cxl_ref() ++ * @timeout: default library timeout in milliseconds ++ */ ++struct cxl_ctx { ++ /* log_ctx must be first member for cxl_set_log_fn compat */ ++ struct log_ctx ctx; ++ int refcount; ++ void *userdata; ++ int memdevs_init; ++ struct list_head memdevs; ++ struct kmod_ctx *kmod_ctx; ++ void *private_data; ++}; ++ ++static void free_memdev(struct cxl_memdev *memdev, struct list_head *head) ++{ ++ if (head) ++ list_del_from(head, &memdev->list); ++ kmod_module_unref(memdev->module); ++ free(memdev->firmware_version); ++ free(memdev->dev_buf); ++ free(memdev->dev_path); ++ free(memdev); ++} ++ ++/** ++ * cxl_get_userdata - retrieve stored data pointer from library context ++ * @ctx: cxl library context ++ * ++ * This might be useful to access from callbacks like a custom logging ++ * function. ++ */ ++CXL_EXPORT void *cxl_get_userdata(struct cxl_ctx *ctx) ++{ ++ if (ctx == NULL) ++ return NULL; ++ return ctx->userdata; ++} ++ ++/** ++ * cxl_set_userdata - store custom @userdata in the library context ++ * @ctx: cxl library context ++ * @userdata: data pointer ++ */ ++CXL_EXPORT void cxl_set_userdata(struct cxl_ctx *ctx, void *userdata) ++{ ++ if (ctx == NULL) ++ return; ++ ctx->userdata = userdata; ++} ++ ++CXL_EXPORT void cxl_set_private_data(struct cxl_ctx *ctx, void *data) ++{ ++ ctx->private_data = data; ++} ++ ++CXL_EXPORT void *cxl_get_private_data(struct cxl_ctx *ctx) ++{ ++ return ctx->private_data; ++} ++ ++/** ++ * cxl_new - instantiate a new library context ++ * @ctx: context to establish ++ * ++ * Returns zero on success and stores an opaque pointer in ctx. The ++ * context is freed by cxl_unref(), i.e. cxl_new() implies an ++ * internal cxl_ref(). ++ */ ++CXL_EXPORT int cxl_new(struct cxl_ctx **ctx) ++{ ++ struct kmod_ctx *kmod_ctx; ++ struct cxl_ctx *c; ++ int rc = 0; ++ ++ c = calloc(1, sizeof(struct cxl_ctx)); ++ if (!c) ++ return -ENOMEM; ++ ++ kmod_ctx = kmod_new(NULL, NULL); ++ if (check_kmod(kmod_ctx) != 0) { ++ rc = -ENXIO; ++ goto out; ++ } ++ ++ c->refcount = 1; ++ log_init(&c->ctx, "libcxl", "CXL_LOG"); ++ info(c, "ctx %p created\n", c); ++ dbg(c, "log_priority=%d\n", c->ctx.log_priority); ++ *ctx = c; ++ list_head_init(&c->memdevs); ++ c->kmod_ctx = kmod_ctx; ++ ++ return 0; ++out: ++ free(c); ++ return rc; ++} ++ ++/** ++ * cxl_ref - take an additional reference on the context ++ * @ctx: context established by cxl_new() ++ */ ++CXL_EXPORT struct cxl_ctx *cxl_ref(struct cxl_ctx *ctx) ++{ ++ if (ctx == NULL) ++ return NULL; ++ ctx->refcount++; ++ return ctx; ++} ++ ++/** ++ * cxl_unref - drop a context reference count ++ * @ctx: context established by cxl_new() ++ * ++ * Drop a reference and if the resulting reference count is 0 destroy ++ * the context. ++ */ ++CXL_EXPORT void cxl_unref(struct cxl_ctx *ctx) ++{ ++ struct cxl_memdev *memdev, *_d; ++ ++ if (ctx == NULL) ++ return; ++ ctx->refcount--; ++ if (ctx->refcount > 0) ++ return; ++ ++ list_for_each_safe(&ctx->memdevs, memdev, _d, list) ++ free_memdev(memdev, &ctx->memdevs); ++ ++ kmod_unref(ctx->kmod_ctx); ++ info(ctx, "context %p released\n", ctx); ++ free(ctx); ++} ++ ++/** ++ * cxl_set_log_fn - override default log routine ++ * @ctx: cxl library context ++ * @log_fn: function to be called for logging messages ++ * ++ * The built-in logging writes to stderr. It can be overridden by a ++ * custom function, to plug log messages into the user's logging ++ * functionality. ++ */ ++CXL_EXPORT void cxl_set_log_fn(struct cxl_ctx *ctx, ++ void (*cxl_log_fn)(struct cxl_ctx *ctx, int priority, ++ const char *file, int line, const char *fn, ++ const char *format, va_list args)) ++{ ++ ctx->ctx.log_fn = (log_fn) cxl_log_fn; ++ info(ctx, "custom logging function %p registered\n", cxl_log_fn); ++} ++ ++/** ++ * cxl_get_log_priority - retrieve current library loglevel (syslog) ++ * @ctx: cxl library context ++ */ ++CXL_EXPORT int cxl_get_log_priority(struct cxl_ctx *ctx) ++{ ++ return ctx->ctx.log_priority; ++} ++ ++/** ++ * cxl_set_log_priority - set log verbosity ++ * @priority: from syslog.h, LOG_ERR, LOG_INFO, LOG_DEBUG ++ * ++ * Note: LOG_DEBUG requires library be built with "configure --enable-debug" ++ */ ++CXL_EXPORT void cxl_set_log_priority(struct cxl_ctx *ctx, int priority) ++{ ++ ctx->ctx.log_priority = priority; ++} ++ ++static void *add_cxl_memdev(void *parent, int id, const char *cxlmem_base) ++{ ++ const char *devname = devpath_to_devname(cxlmem_base); ++ char *path = calloc(1, strlen(cxlmem_base) + 100); ++ struct cxl_ctx *ctx = parent; ++ struct cxl_memdev *memdev, *memdev_dup; ++ char buf[SYSFS_ATTR_SIZE]; ++ struct stat st; ++ ++ if (!path) ++ return NULL; ++ dbg(ctx, "%s: base: \'%s\'\n", __func__, cxlmem_base); ++ ++ memdev = calloc(1, sizeof(*memdev)); ++ if (!memdev) ++ goto err_dev; ++ memdev->id = id; ++ memdev->ctx = ctx; ++ ++ sprintf(path, "/dev/cxl/%s", devname); ++ if (stat(path, &st) < 0) ++ goto err_read; ++ memdev->major = major(st.st_rdev); ++ memdev->minor = minor(st.st_rdev); ++ ++ sprintf(path, "%s/pmem/size", cxlmem_base); ++ if (sysfs_read_attr(ctx, path, buf) < 0) ++ goto err_read; ++ memdev->pmem_size = strtoull(buf, NULL, 0); ++ ++ sprintf(path, "%s/ram/size", cxlmem_base); ++ if (sysfs_read_attr(ctx, path, buf) < 0) ++ goto err_read; ++ memdev->ram_size = strtoull(buf, NULL, 0); ++ ++ sprintf(path, "%s/payload_max", cxlmem_base); ++ if (sysfs_read_attr(ctx, path, buf) < 0) ++ goto err_read; ++ memdev->payload_max = strtoull(buf, NULL, 0); ++ if (memdev->payload_max < 0) ++ goto err_read; ++ ++ sprintf(path, "%s/label_storage_size", cxlmem_base); ++ if (sysfs_read_attr(ctx, path, buf) < 0) ++ goto err_read; ++ memdev->lsa_size = strtoull(buf, NULL, 0); ++ if (memdev->lsa_size == ULLONG_MAX) ++ goto err_read; ++ ++ memdev->dev_path = strdup(cxlmem_base); ++ if (!memdev->dev_path) ++ goto err_read; ++ ++ sprintf(path, "%s/firmware_version", cxlmem_base); ++ if (sysfs_read_attr(ctx, path, buf) < 0) ++ goto err_read; ++ ++ memdev->firmware_version = strdup(buf); ++ if (!memdev->firmware_version) ++ goto err_read; ++ ++ memdev->dev_buf = calloc(1, strlen(cxlmem_base) + 50); ++ if (!memdev->dev_buf) ++ goto err_read; ++ memdev->buf_len = strlen(cxlmem_base) + 50; ++ ++ cxl_memdev_foreach(ctx, memdev_dup) ++ if (memdev_dup->id == memdev->id) { ++ free_memdev(memdev, NULL); ++ free(path); ++ return memdev_dup; ++ } ++ ++ list_add(&ctx->memdevs, &memdev->list); ++ free(path); ++ return memdev; ++ ++ err_read: ++ free(memdev->firmware_version); ++ free(memdev->dev_buf); ++ free(memdev->dev_path); ++ free(memdev); ++ err_dev: ++ free(path); ++ return NULL; ++} ++ ++static void cxl_memdevs_init(struct cxl_ctx *ctx) ++{ ++ if (ctx->memdevs_init) ++ return; ++ ++ ctx->memdevs_init = 1; ++ ++ sysfs_device_parse(ctx, "/sys/bus/cxl/devices", "mem", ctx, ++ add_cxl_memdev); ++} ++ ++CXL_EXPORT struct cxl_ctx *cxl_memdev_get_ctx(struct cxl_memdev *memdev) ++{ ++ return memdev->ctx; ++} ++ ++CXL_EXPORT struct cxl_memdev *cxl_memdev_get_first(struct cxl_ctx *ctx) ++{ ++ cxl_memdevs_init(ctx); ++ ++ return list_top(&ctx->memdevs, struct cxl_memdev, list); ++} ++ ++CXL_EXPORT struct cxl_memdev *cxl_memdev_get_next(struct cxl_memdev *memdev) ++{ ++ struct cxl_ctx *ctx = memdev->ctx; ++ ++ return list_next(&ctx->memdevs, memdev, list); ++} ++ ++CXL_EXPORT int cxl_memdev_get_id(struct cxl_memdev *memdev) ++{ ++ return memdev->id; ++} ++ ++CXL_EXPORT const char *cxl_memdev_get_devname(struct cxl_memdev *memdev) ++{ ++ return devpath_to_devname(memdev->dev_path); ++} ++ ++CXL_EXPORT int cxl_memdev_get_major(struct cxl_memdev *memdev) ++{ ++ return memdev->major; ++} ++ ++CXL_EXPORT int cxl_memdev_get_minor(struct cxl_memdev *memdev) ++{ ++ return memdev->minor; ++} ++ ++CXL_EXPORT unsigned long long cxl_memdev_get_pmem_size(struct cxl_memdev *memdev) ++{ ++ return memdev->pmem_size; ++} ++ ++CXL_EXPORT unsigned long long cxl_memdev_get_ram_size(struct cxl_memdev *memdev) ++{ ++ return memdev->ram_size; ++} ++ ++CXL_EXPORT const char *cxl_memdev_get_firmware_verison(struct cxl_memdev *memdev) ++{ ++ return memdev->firmware_version; ++} ++ ++CXL_EXPORT size_t cxl_memdev_get_lsa_size(struct cxl_memdev *memdev) ++{ ++ return memdev->lsa_size; ++} ++ ++CXL_EXPORT int cxl_memdev_is_active(struct cxl_memdev *memdev) ++{ ++ /* ++ * TODO: Currently memdevs are always considered inactive. Once we have ++ * cxl_bus drivers that are bound/unbound to memdevs, we'd use that to ++ * determine the active/inactive state. ++ */ ++ return 0; ++} ++ ++CXL_EXPORT void cxl_cmd_unref(struct cxl_cmd *cmd) ++{ ++ if (!cmd) ++ return; ++ if (--cmd->refcount == 0) { ++ free(cmd->query_cmd); ++ free(cmd->send_cmd); ++ free(cmd->input_payload); ++ free(cmd->output_payload); ++ free(cmd); ++ } ++} ++ ++CXL_EXPORT void cxl_cmd_ref(struct cxl_cmd *cmd) ++{ ++ cmd->refcount++; ++} ++ ++static int cxl_cmd_alloc_query(struct cxl_cmd *cmd, int num_cmds) ++{ ++ size_t size; ++ ++ if (!cmd) ++ return -EINVAL; ++ ++ if (cmd->query_cmd != NULL) ++ free(cmd->query_cmd); ++ ++ size = sizeof(struct cxl_mem_query_commands) + ++ (num_cmds * sizeof(struct cxl_command_info)); ++ cmd->query_cmd = calloc(1, size); ++ if (!cmd->query_cmd) ++ return -ENOMEM; ++ ++ cmd->query_cmd->n_commands = num_cmds; ++ ++ return 0; ++} ++ ++static struct cxl_cmd *cxl_cmd_new(struct cxl_memdev *memdev) ++{ ++ struct cxl_cmd *cmd; ++ size_t size; ++ ++ size = sizeof(*cmd); ++ cmd = calloc(1, size); ++ if (!cmd) ++ return NULL; ++ ++ cxl_cmd_ref(cmd); ++ cmd->memdev = memdev; ++ ++ return cmd; ++} ++ ++static int __do_cmd(struct cxl_cmd *cmd, int ioctl_cmd, int fd) ++{ ++ void *cmd_buf; ++ int rc; ++ ++ switch (ioctl_cmd) { ++ case CXL_MEM_QUERY_COMMANDS: ++ cmd_buf = cmd->query_cmd; ++ break; ++ case CXL_MEM_SEND_COMMAND: ++ cmd_buf = cmd->send_cmd; ++ break; ++ default: ++ return -EINVAL; ++ } ++ ++ rc = ioctl(fd, ioctl_cmd, cmd_buf); ++ if (rc < 0) ++ rc = -errno; ++ ++ return rc; ++} ++ ++static int do_cmd(struct cxl_cmd *cmd, int ioctl_cmd) ++{ ++ char *path; ++ struct stat st; ++ unsigned int major, minor; ++ int rc = 0, fd; ++ struct cxl_memdev *memdev = cmd->memdev; ++ struct cxl_ctx *ctx = cxl_memdev_get_ctx(memdev); ++ const char *devname = cxl_memdev_get_devname(memdev); ++ ++ major = cxl_memdev_get_major(memdev); ++ minor = cxl_memdev_get_minor(memdev); ++ ++ if (asprintf(&path, "/dev/cxl/%s", devname) < 0) ++ return -ENOMEM; ++ ++ fd = open(path, O_RDWR); ++ if (fd < 0) { ++ err(ctx, "failed to open %s: %s\n", path, strerror(errno)); ++ rc = -errno; ++ goto out; ++ } ++ ++ if (fstat(fd, &st) >= 0 && S_ISCHR(st.st_mode) ++ && major(st.st_rdev) == major ++ && minor(st.st_rdev) == minor) { ++ rc = __do_cmd(cmd, ioctl_cmd, fd); ++ } else { ++ err(ctx, "failed to validate %s as a CXL memdev node\n", path); ++ rc = -ENXIO; ++ } ++ close(fd); ++out: ++ free(path); ++ return rc; ++} ++ ++static int alloc_do_query(struct cxl_cmd *cmd, int num_cmds) ++{ ++ struct cxl_ctx *ctx = cxl_memdev_get_ctx(cmd->memdev); ++ int rc; ++ ++ rc = cxl_cmd_alloc_query(cmd, num_cmds); ++ if (rc) ++ return rc; ++ ++ rc = do_cmd(cmd, CXL_MEM_QUERY_COMMANDS); ++ if (rc < 0) ++ err(ctx, "%s: query commands failed: %s\n", ++ cxl_memdev_get_devname(cmd->memdev), ++ strerror(-rc)); ++ return rc; ++} ++ ++static int cxl_cmd_do_query(struct cxl_cmd *cmd) ++{ ++ struct cxl_memdev *memdev = cmd->memdev; ++ struct cxl_ctx *ctx = cxl_memdev_get_ctx(memdev); ++ const char *devname = cxl_memdev_get_devname(memdev); ++ int rc, n_commands; ++ ++ switch (cmd->query_status) { ++ case CXL_CMD_QUERY_OK: ++ return 0; ++ case CXL_CMD_QUERY_UNSUPPORTED: ++ return -EOPNOTSUPP; ++ case CXL_CMD_QUERY_NOT_RUN: ++ break; ++ default: ++ err(ctx, "%s: Unknown query_status %d\n", ++ devname, cmd->query_status); ++ return -EINVAL; ++ } ++ ++ rc = alloc_do_query(cmd, 0); ++ if (rc) ++ return rc; ++ ++ n_commands = cmd->query_cmd->n_commands; ++ dbg(ctx, "%s: supports %d commands\n", devname, n_commands); ++ ++ return alloc_do_query(cmd, n_commands); ++} ++ ++static int cxl_cmd_validate(struct cxl_cmd *cmd, u32 cmd_id) ++{ ++ struct cxl_memdev *memdev = cmd->memdev; ++ struct cxl_mem_query_commands *query = cmd->query_cmd; ++ const char *devname = cxl_memdev_get_devname(memdev); ++ struct cxl_ctx *ctx = cxl_memdev_get_ctx(memdev); ++ u32 i; ++ ++ for (i = 0; i < query->n_commands; i++) { ++ struct cxl_command_info *cinfo = &query->commands[i]; ++ const char *cmd_name = cxl_command_names[cinfo->id].name; ++ ++ if (cinfo->id != cmd_id) ++ continue; ++ ++ dbg(ctx, "%s: %s: in: %d, out %d, flags: %#08x\n", ++ devname, cmd_name, cinfo->size_in, ++ cinfo->size_out, cinfo->flags); ++ ++ cmd->query_idx = i; ++ cmd->query_status = CXL_CMD_QUERY_OK; ++ return 0; ++ } ++ cmd->query_status = CXL_CMD_QUERY_UNSUPPORTED; ++ return -EOPNOTSUPP; ++} ++ ++CXL_EXPORT int cxl_cmd_set_input_payload(struct cxl_cmd *cmd, void *buf, ++ int size) ++{ ++ struct cxl_memdev *memdev = cmd->memdev; ++ ++ if (size > memdev->payload_max || size < 0) ++ return -EINVAL; ++ ++ if (!buf) { ++ ++ /* If the user didn't supply a buffer, allocate it */ ++ cmd->input_payload = calloc(1, size); ++ if (!cmd->input_payload) ++ return -ENOMEM; ++ cmd->send_cmd->in.payload = (u64)cmd->input_payload; ++ } else { ++ /* ++ * Use user-buffer as is. If an automatic allocation was ++ * previously made (based on a fixed size from query), ++ * it will get freed during unref. ++ */ ++ cmd->send_cmd->in.payload = (u64)buf; ++ } ++ cmd->send_cmd->in.size = size; ++ ++ return 0; ++} ++ ++CXL_EXPORT int cxl_cmd_set_output_payload(struct cxl_cmd *cmd, void *buf, ++ int size) ++{ ++ struct cxl_memdev *memdev = cmd->memdev; ++ ++ if (size > memdev->payload_max || size < 0) ++ return -EINVAL; ++ ++ if (!buf) { ++ ++ /* If the user didn't supply a buffer, allocate it */ ++ cmd->output_payload = calloc(1, size); ++ if (!cmd->output_payload) ++ return -ENOMEM; ++ cmd->send_cmd->out.payload = (u64)cmd->output_payload; ++ } else { ++ /* ++ * Use user-buffer as is. If an automatic allocation was ++ * previously made (based on a fixed size from query), ++ * it will get freed during unref. ++ */ ++ cmd->send_cmd->out.payload = (u64)buf; ++ } ++ cmd->send_cmd->out.size = size; ++ ++ return 0; ++} ++ ++static int cxl_cmd_alloc_send(struct cxl_cmd *cmd, u32 cmd_id) ++{ ++ struct cxl_mem_query_commands *query = cmd->query_cmd; ++ struct cxl_command_info *cinfo = &query->commands[cmd->query_idx]; ++ size_t size; ++ ++ if (!query) ++ return -EINVAL; ++ ++ size = sizeof(struct cxl_send_command); ++ cmd->send_cmd = calloc(1, size); ++ if (!cmd->send_cmd) ++ return -ENOMEM; ++ ++ if (cinfo->id != cmd_id) ++ return -EINVAL; ++ ++ cmd->send_cmd->id = cmd_id; ++ ++ if (cinfo->size_in > 0) { ++ cmd->input_payload = calloc(1, cinfo->size_in); ++ if (!cmd->input_payload) ++ return -ENOMEM; ++ cmd->send_cmd->in.payload = (u64)cmd->input_payload; ++ cmd->send_cmd->in.size = cinfo->size_in; ++ } ++ ++ if (cinfo->size_out < 0) ++ cinfo->size_out = cmd->memdev->payload_max; // -1 will require update ++ ++ if (cinfo->size_out > 0) { ++ cmd->output_payload = calloc(1, cinfo->size_out); ++ if (!cmd->output_payload) ++ return -ENOMEM; ++ cmd->send_cmd->out.payload = (u64)cmd->output_payload; ++ cmd->send_cmd->out.size = cinfo->size_out; ++ } ++ ++ return 0; ++} ++ ++static struct cxl_cmd *cxl_cmd_new_generic(struct cxl_memdev *memdev, ++ u32 cmd_id) ++{ ++ const char *devname = cxl_memdev_get_devname(memdev); ++ struct cxl_ctx *ctx = cxl_memdev_get_ctx(memdev); ++ struct cxl_cmd *cmd; ++ int rc; ++ ++ cmd = cxl_cmd_new(memdev); ++ if (!cmd) ++ return NULL; ++ ++ rc = cxl_cmd_do_query(cmd); ++ if (rc) { ++ err(ctx, "%s: query returned: %s\n", devname, strerror(-rc)); ++ goto fail; ++ } ++ ++ rc = cxl_cmd_validate(cmd, cmd_id); ++ if (rc) { ++ errno = -rc; ++ goto fail; ++ } ++ ++ rc = cxl_cmd_alloc_send(cmd, cmd_id); ++ if (rc) { ++ errno = -rc; ++ goto fail; ++ } ++ ++ return cmd; ++ ++fail: ++ cxl_cmd_unref(cmd); ++ return NULL; ++} ++ ++CXL_EXPORT const char *cxl_cmd_get_devname(struct cxl_cmd *cmd) ++{ ++ return cxl_memdev_get_devname(cmd->memdev); ++} ++ ++#define cmd_get_int(cmd, n, N, field) \ ++do { \ ++ struct cxl_cmd_##n *c = (void *)cmd->send_cmd->out.payload; \ ++ if (cmd->send_cmd->id != CXL_MEM_COMMAND_ID_##N) \ ++ return EINVAL; \ ++ if (cmd->status < 0) \ ++ return cmd->status; \ ++ return le32_to_cpu(c->field); \ ++} while(0); ++ ++CXL_EXPORT struct cxl_cmd *cxl_cmd_new_get_health_info( ++ struct cxl_memdev *memdev) ++{ ++ return cxl_cmd_new_generic(memdev, CXL_MEM_COMMAND_ID_GET_HEALTH_INFO); ++} ++ ++#define cmd_health_get_int(c, f) \ ++do { \ ++ cmd_get_int(c, get_health_info, GET_HEALTH_INFO, f); \ ++} while (0); ++ ++CXL_EXPORT int cxl_cmd_get_health_info_get_health_status(struct cxl_cmd *cmd) ++{ ++ cmd_health_get_int(cmd, health_status); ++} ++ ++CXL_EXPORT int cxl_cmd_get_health_info_get_media_status(struct cxl_cmd *cmd) ++{ ++ cmd_health_get_int(cmd, media_status); ++} ++ ++CXL_EXPORT int cxl_cmd_get_health_info_get_ext_status(struct cxl_cmd *cmd) ++{ ++ cmd_health_get_int(cmd, ext_status); ++} ++ ++CXL_EXPORT int cxl_cmd_get_health_info_get_life_used(struct cxl_cmd *cmd) ++{ ++ cmd_health_get_int(cmd, life_used); ++} ++ ++CXL_EXPORT int cxl_cmd_get_health_info_get_temperature(struct cxl_cmd *cmd) ++{ ++ cmd_health_get_int(cmd, temperature); ++} ++ ++CXL_EXPORT int cxl_cmd_get_health_info_get_dirty_shutdowns(struct cxl_cmd *cmd) ++{ ++ cmd_health_get_int(cmd, dirty_shutdowns); ++} ++ ++CXL_EXPORT int cxl_cmd_get_health_info_get_volatile_errors(struct cxl_cmd *cmd) ++{ ++ cmd_health_get_int(cmd, volatile_errors); ++} ++ ++CXL_EXPORT int cxl_cmd_get_health_info_get_pmem_errors(struct cxl_cmd *cmd) ++{ ++ cmd_health_get_int(cmd, pmem_errors); ++} ++ ++CXL_EXPORT struct cxl_cmd *cxl_cmd_new_identify(struct cxl_memdev *memdev) ++{ ++ return cxl_cmd_new_generic(memdev, CXL_MEM_COMMAND_ID_IDENTIFY); ++} ++ ++CXL_EXPORT int cxl_cmd_identify_get_fw_rev(struct cxl_cmd *cmd, char *fw_rev, ++ int fw_len) ++{ ++ struct cxl_cmd_identify *id = (void *)cmd->send_cmd->out.payload; ++ ++ if (cmd->send_cmd->id != CXL_MEM_COMMAND_ID_IDENTIFY) ++ return -EINVAL; ++ if (cmd->status < 0) ++ return cmd->status; ++ ++ if (fw_len > 0) ++ memcpy(fw_rev, id->fw_revision, ++ min(fw_len, CXL_CMD_IDENTIFY_FW_REV_LENGTH)); ++ return 0; ++} ++ ++CXL_EXPORT unsigned long long cxl_cmd_identify_get_partition_align( ++ struct cxl_cmd *cmd) ++{ ++ struct cxl_cmd_identify *id = (void *)cmd->send_cmd->out.payload; ++ ++ if (cmd->send_cmd->id != CXL_MEM_COMMAND_ID_IDENTIFY) ++ return -EINVAL; ++ if (cmd->status < 0) ++ return cmd->status; ++ ++ return le64_to_cpu(id->partition_align); ++} ++ ++CXL_EXPORT unsigned int cxl_cmd_identify_get_lsa_size(struct cxl_cmd *cmd) ++{ ++ struct cxl_cmd_identify *id = (void *)cmd->send_cmd->out.payload; ++ ++ if (cmd->send_cmd->id != CXL_MEM_COMMAND_ID_IDENTIFY) ++ return -EINVAL; ++ if (cmd->status < 0) ++ return cmd->status; ++ ++ return le32_to_cpu(id->lsa_size); ++} ++ ++CXL_EXPORT struct cxl_cmd *cxl_cmd_new_raw(struct cxl_memdev *memdev, ++ int opcode) ++{ ++ struct cxl_cmd *cmd; ++ ++ /* opcode '0' is reserved */ ++ if (opcode <= 0) { ++ errno = EINVAL; ++ return NULL; ++ } ++ ++ cmd = cxl_cmd_new_generic(memdev, CXL_MEM_COMMAND_ID_RAW); ++ if (!cmd) ++ return NULL; ++ ++ cmd->send_cmd->raw.opcode = opcode; ++ return cmd; ++} ++ ++CXL_EXPORT struct cxl_cmd *cxl_cmd_new_get_lsa(struct cxl_memdev *memdev, ++ unsigned int offset, unsigned int length) ++{ ++ struct cxl_cmd_get_lsa_in *get_lsa; ++ struct cxl_cmd *cmd; ++ ++ cmd = cxl_cmd_new_generic(memdev, CXL_MEM_COMMAND_ID_GET_LSA); ++ if (!cmd) ++ return NULL; ++ ++ get_lsa = (void *)cmd->send_cmd->in.payload; ++ get_lsa->offset = cpu_to_le32(offset); ++ get_lsa->length = cpu_to_le32(length); ++ return cmd; ++} ++ ++#define cmd_get_void(cmd, N) \ ++do { \ ++ void *p = (void *)cmd->send_cmd->out.payload; \ ++ if (cmd->send_cmd->id != CXL_MEM_COMMAND_ID_##N) \ ++ return NULL; \ ++ if (cmd->status < 0) \ ++ return NULL; \ ++ return p; \ ++} while(0); ++ ++CXL_EXPORT void *cxl_cmd_get_lsa_get_payload(struct cxl_cmd *cmd) ++{ ++ cmd_get_void(cmd, GET_LSA); ++} ++ ++CXL_EXPORT int cxl_cmd_submit(struct cxl_cmd *cmd) ++{ ++ struct cxl_memdev *memdev = cmd->memdev; ++ const char *devname = cxl_memdev_get_devname(memdev); ++ struct cxl_ctx *ctx = cxl_memdev_get_ctx(memdev); ++ int rc; ++ ++ switch (cmd->query_status) { ++ case CXL_CMD_QUERY_OK: ++ break; ++ case CXL_CMD_QUERY_UNSUPPORTED: ++ return -EOPNOTSUPP; ++ case CXL_CMD_QUERY_NOT_RUN: ++ return -EINVAL; ++ default: ++ err(ctx, "%s: Unknown query_status %d\n", ++ devname, cmd->query_status); ++ return -EINVAL; ++ } ++ ++ dbg(ctx, "%s: submitting SEND cmd: in: %d, out: %d\n", devname, ++ cmd->send_cmd->in.size, cmd->send_cmd->out.size); ++ rc = do_cmd(cmd, CXL_MEM_SEND_COMMAND); ++ if (rc < 0) ++ err(ctx, "%s: send command failed: %s\n", ++ devname, strerror(-rc)); ++ cmd->status = cmd->send_cmd->retval; ++ dbg(ctx, "%s: got SEND cmd: in: %d, out: %d, retval: %d\n", devname, ++ cmd->send_cmd->in.size, cmd->send_cmd->out.size, cmd->status); ++ ++ return rc; ++} ++ ++CXL_EXPORT int cxl_cmd_get_mbox_status(struct cxl_cmd *cmd) ++{ ++ return cmd->status; ++} ++ ++CXL_EXPORT int cxl_cmd_get_out_size(struct cxl_cmd *cmd) ++{ ++ return cmd->send_cmd->out.size; ++} ++ ++CXL_EXPORT struct cxl_cmd *cxl_cmd_new_set_lsa(struct cxl_memdev *memdev, ++ void *lsa_buf, unsigned int offset, unsigned int length) ++{ ++ struct cxl_ctx *ctx = cxl_memdev_get_ctx(memdev); ++ struct cxl_cmd_set_lsa *set_lsa; ++ struct cxl_cmd *cmd; ++ int rc; ++ ++ cmd = cxl_cmd_new_generic(memdev, CXL_MEM_COMMAND_ID_SET_LSA); ++ if (!cmd) ++ return NULL; ++ ++ /* this will allocate 'in.payload' */ ++ rc = cxl_cmd_set_input_payload(cmd, NULL, sizeof(*set_lsa) + length); ++ if (rc) { ++ err(ctx, "%s: cmd setup failed: %s\n", ++ cxl_memdev_get_devname(memdev), strerror(-rc)); ++ goto out_fail; ++ } ++ set_lsa = (void *)cmd->send_cmd->in.payload; ++ set_lsa->offset = cpu_to_le32(offset); ++ memcpy(set_lsa->lsa_data, lsa_buf, length); ++ ++ return cmd; ++ ++out_fail: ++ cxl_cmd_unref(cmd); ++ return NULL; ++} ++ ++enum lsa_op { ++ LSA_OP_GET, ++ LSA_OP_SET, ++ LSA_OP_ZERO, ++}; ++ ++static int lsa_op(struct cxl_memdev *memdev, int op, void **buf, ++ size_t length, size_t offset) ++{ ++ const char *devname = cxl_memdev_get_devname(memdev); ++ struct cxl_ctx *ctx = cxl_memdev_get_ctx(memdev); ++ struct cxl_cmd *cmd; ++ void *zero_buf = NULL; ++ int rc = 0; ++ ++ if (op != LSA_OP_ZERO && (buf == NULL || *buf == NULL)) { ++ err(ctx, "%s: LSA buffer cannot be NULL\n", devname); ++ return -EINVAL; ++ } ++ ++ /* TODO: handle the case for offset + len > mailbox payload size */ ++ switch (op) { ++ case LSA_OP_GET: ++ if (length == 0) ++ length = memdev->lsa_size; ++ cmd = cxl_cmd_new_get_lsa(memdev, offset, length); ++ if (!cmd) ++ return -ENOMEM; ++ rc = cxl_cmd_set_output_payload(cmd, *buf, length); ++ if (rc) { ++ err(ctx, "%s: cmd setup failed: %s\n", ++ cxl_memdev_get_devname(memdev), strerror(-rc)); ++ goto out; ++ } ++ break; ++ case LSA_OP_ZERO: ++ if (length == 0) ++ length = memdev->lsa_size; ++ zero_buf = calloc(1, length); ++ if (!zero_buf) ++ return -ENOMEM; ++ buf = &zero_buf; ++ /* fall through */ ++ case LSA_OP_SET: ++ cmd = cxl_cmd_new_set_lsa(memdev, *buf, offset, length); ++ if (!cmd) { ++ rc = -ENOMEM; ++ goto out_free; ++ } ++ break; ++ default: ++ return -EOPNOTSUPP; ++ } ++ ++ rc = cxl_cmd_submit(cmd); ++ if (rc < 0) { ++ err(ctx, "%s: cmd submission failed: %s\n", ++ devname, strerror(-rc)); ++ goto out; ++ } ++ ++ rc = cxl_cmd_get_mbox_status(cmd); ++ if (rc != 0) { ++ err(ctx, "%s: firmware status: %d\n", ++ devname, rc); ++ rc = -ENXIO; ++ goto out; ++ } ++ ++ if (op == LSA_OP_GET) ++ memcpy(*buf, cxl_cmd_get_lsa_get_payload(cmd), length); ++ /* ++ * TODO: If writing, the memdev may need to be disabled/re-enabled to ++ * refresh any cached LSA data in the kernel. ++ */ ++ ++out: ++ cxl_cmd_unref(cmd); ++out_free: ++ free(zero_buf); ++ return rc; ++} ++ ++CXL_EXPORT int cxl_memdev_zero_lsa(struct cxl_memdev *memdev) ++{ ++ return lsa_op(memdev, LSA_OP_ZERO, NULL, 0, 0); ++} ++ ++CXL_EXPORT int cxl_memdev_set_lsa(struct cxl_memdev *memdev, void *buf, ++ size_t length, size_t offset) ++{ ++ return lsa_op(memdev, LSA_OP_SET, &buf, length, offset); ++} ++ ++CXL_EXPORT int cxl_memdev_get_lsa(struct cxl_memdev *memdev, void *buf, ++ size_t length, size_t offset) ++{ ++ return lsa_op(memdev, LSA_OP_GET, &buf, length, offset); ++} ++ ++CXL_EXPORT int cxl_memdev_cmd_identify(struct cxl_memdev *memdev) ++{ ++ struct cxl_cmd *cmd; ++ struct cxl_cmd_identify *id; ++ int rc = 0; ++ ++ printf("id: 0x%x\n", CXL_MEM_COMMAND_ID_IDENTIFY); ++ cmd = cxl_cmd_new_identify(memdev); ++ if (!cmd) { ++ fprintf(stderr, "%s: cxl_cmd_new_identify returned Null output\n", ++ cxl_memdev_get_devname(memdev)); ++ return -ENOMEM; ++ } ++ ++ rc = cxl_cmd_submit(cmd); ++ if (rc < 0) { ++ fprintf(stderr, "%s: cmd submission failed: %d (%s)\n", ++ cxl_memdev_get_devname(memdev), rc, strerror(-rc)); ++ goto out; ++ } ++ ++ rc = cxl_cmd_get_mbox_status(cmd); ++ if (rc != 0) { ++ fprintf(stderr, "%s: firmware status: %d\n", ++ cxl_memdev_get_devname(memdev), rc); ++ rc = -ENXIO; ++ goto out; ++ } ++ ++ id = (void *)cmd->send_cmd->out.payload; ++ fprintf(stderr, "size of payload: %ld\n", sizeof(*id)); ++ if (cmd->send_cmd->id != CXL_MEM_COMMAND_ID_IDENTIFY) { ++ fprintf(stderr, "%s: invalid command id 0x%x (expecting 0x%x)\n", ++ cxl_memdev_get_devname(memdev), cmd->send_cmd->id, CXL_MEM_COMMAND_ID_IDENTIFY); ++ return -EINVAL; ++ } ++ ++ fprintf(stdout, "%s info\n", cxl_memdev_get_devname(memdev)); ++ fprintf(stdout, " fw revision: "); ++ for (int i=0; i < CXL_CMD_IDENTIFY_FW_REV_LENGTH; ++i) ++ fprintf(stdout, "%02x ", id->fw_revision[i]); ++ fprintf(stdout, "\n"); ++ fprintf(stdout, " total_capacity: %lu MB (%lu GB)\n", ++ le64_to_cpu(id->total_capacity), (le64_to_cpu(id->total_capacity))/4); ++ fprintf(stdout, " volatile_capacity: %lu MB (%lu GB)\n", ++ le64_to_cpu(id->volatile_capacity), (le64_to_cpu(id->volatile_capacity))/4); ++ fprintf(stdout, " persistent_capacity: %lu MB (%lu GB)\n", ++ le64_to_cpu(id->persistent_capacity), (le64_to_cpu(id->persistent_capacity))/4); ++ fprintf(stdout, " partition_align: %lu MB (%lu GB)\n", ++ le64_to_cpu(id->partition_align), (le64_to_cpu(id->partition_align))/4); ++ fprintf(stdout, " info_event_log_size: %d\n", le16_to_cpu(id->info_event_log_size)); ++ fprintf(stdout, " warning_event_log_size: %d\n", le16_to_cpu(id->warning_event_log_size)); ++ fprintf(stdout, " failure_event_log_size: %d\n", le16_to_cpu(id->failure_event_log_size)); ++ fprintf(stdout, " fatal_event_log_size: %d\n", le16_to_cpu(id->fatal_event_log_size)); ++ fprintf(stdout, " lsa_size: %d\n", le32_to_cpu(id->lsa_size)); ++ for (int i=0; i < 3; ++i) ++ fprintf(stdout, " poison_list_max_mer[%d]: %d\n", i, id->poison_list_max_mer[i]); ++ fprintf(stdout, " inject_poison_limit: %d\n", le16_to_cpu(id->inject_poison_limit)); ++ fprintf(stdout, " poison_caps: %d\n", id->poison_caps); ++ fprintf(stdout, " qos_telemetry_caps: %d\n", id->qos_telemetry_caps); ++ ++out: ++ cxl_cmd_unref(cmd); ++ return rc; ++} ++ ++struct cxl_mbox_get_supported_logs { ++ __le16 entries; ++ u8 rsvd[6]; ++ struct gsl_entry { ++ uuid_t uuid; ++ __le32 size; ++ } __attribute__((packed)) entry[]; ++} __attribute__((packed)); ++ ++CXL_EXPORT int cxl_memdev_get_supported_logs(struct cxl_memdev *memdev) ++{ ++ struct cxl_cmd *cmd; ++ struct cxl_mbox_get_supported_logs *gsl; ++ int rc = 0; ++ ++ cmd = cxl_cmd_new_generic(memdev, CXL_MEM_COMMAND_ID_GET_SUPPORTED_LOGS); ++ if (!cmd) { ++ fprintf(stderr, "%s: cxl_cmd_new_identify returned Null output\n", ++ cxl_memdev_get_devname(memdev)); ++ return -ENOMEM; ++ } ++ ++ rc = cxl_cmd_submit(cmd); ++ if (rc < 0) { ++ fprintf(stderr, "%s: cmd submission failed: %d (%s)\n", ++ cxl_memdev_get_devname(memdev), rc, strerror(-rc)); ++ goto out; ++ } ++ ++ rc = cxl_cmd_get_mbox_status(cmd); ++ if (rc != 0) { ++ fprintf(stderr, "%s: firmware status: %d\n", ++ cxl_memdev_get_devname(memdev), rc); ++ rc = -ENXIO; ++ goto out; ++ } ++ ++ if (cmd->send_cmd->id != CXL_MEM_COMMAND_ID_GET_SUPPORTED_LOGS) { ++ fprintf(stderr, "%s: invalid command id 0x%x (expecting 0x%x)\n", ++ cxl_memdev_get_devname(memdev), ++ cmd->send_cmd->id, ++ CXL_MEM_COMMAND_ID_GET_SUPPORTED_LOGS); ++ return -EINVAL; ++ } ++ ++ gsl = (void *)cmd->send_cmd->out.payload; ++ fprintf(stdout, "payload info\n"); ++ fprintf(stdout, " out size: 0x%x\n", cmd->send_cmd->out.size); ++ fprintf(stdout, " entries: %d\n", gsl->entries); ++ for (int e=0; e < gsl->entries; ++e) { ++ char uuid[40]; ++ uuid_unparse(gsl->entry[e].uuid, uuid); ++ fprintf(stdout, " entries[%d] uuid: %s, size: %d\n", e, uuid, gsl->entry[e].size); ++ } ++out: ++ cxl_cmd_unref(cmd); ++ return rc; ++} ++ ++#define CEL_UUID "0da9c0b5-bf41-4b78-8f79-96b1623b3f17" ++ ++struct cxl_mbox_get_log { ++ uuid_t uuid; ++ __le32 offset; ++ __le32 length; ++} __attribute__((packed)); ++ ++struct cel_entry { ++ __le16 opcode; ++ __le16 effect; ++} __attribute__((packed)); ++ ++CXL_EXPORT int cxl_memdev_get_cel_log(struct cxl_memdev *memdev) ++{ ++ struct cxl_cmd *cmd; ++ struct cxl_mbox_get_log *get_log_input; ++ struct cel_entry *cel_entries; ++ int no_cel_entries; ++ int rc = 0; ++ ++ cmd = cxl_cmd_new_generic(memdev, CXL_MEM_COMMAND_ID_GET_LOG); ++ if (!cmd) { ++ fprintf(stderr, "%s: cxl_memdev_get_cel_log returned Null output\n", ++ cxl_memdev_get_devname(memdev)); ++ return -ENOMEM; ++ } ++ ++ fprintf(stderr, "in size: 0x%x\n", cmd->send_cmd->in.size); ++ get_log_input = (void *) cmd->send_cmd->in.payload; ++ uuid_parse(CEL_UUID, get_log_input->uuid); ++ get_log_input->offset = 0; ++ get_log_input->length = cmd->memdev->payload_max; ++ ++ rc = cxl_cmd_submit(cmd); ++ if (rc < 0) { ++ fprintf(stderr, "%s: cmd submission failed: %d (%s)\n", ++ cxl_memdev_get_devname(memdev), rc, strerror(-rc)); ++ goto out; ++ } ++ ++ rc = cxl_cmd_get_mbox_status(cmd); ++ if (rc != 0) { ++ fprintf(stderr, "%s: firmware status: %d\n", ++ cxl_memdev_get_devname(memdev), rc); ++ rc = -ENXIO; ++ goto out; ++ } ++ ++ if (cmd->send_cmd->id != CXL_MEM_COMMAND_ID_GET_LOG) { ++ fprintf(stderr, "%s: invalid command id 0x%x (expecting 0x%x)\n", ++ cxl_memdev_get_devname(memdev), cmd->send_cmd->id, CXL_MEM_COMMAND_ID_GET_LOG); ++ return -EINVAL; ++ } ++ ++ fprintf(stdout, "payload info\n"); ++ fprintf(stdout, " out size: 0x%x\n", cmd->send_cmd->out.size); ++ cel_entries = (void *)cmd->send_cmd->out.payload; ++ no_cel_entries = (cmd->send_cmd->out.size)/sizeof(struct cel_entry); ++ fprintf(stdout, " no_cel_entries size: %d\n", no_cel_entries); ++ for (int e = 0; e < no_cel_entries; ++e) { ++ fprintf(stdout, " cel_entry[%d] opcode: 0x%x, effect: 0x%x\n", e, ++ le16_to_cpu(cel_entries[e].opcode), ++ le16_to_cpu(cel_entries[e].effect)); ++ } ++out: ++ cxl_cmd_unref(cmd); ++ return rc; ++} ++ ++#define CXL_MEM_COMMAND_ID_GET_EVENT_INTERRUPT_POLICY CXL_MEM_COMMAND_ID_RAW ++#define CXL_MEM_COMMAND_ID_GET_EVENT_INTERRUPT_POLICY_OPCODE 0x102 ++ ++struct cxl_mbox_get_event_interrupt_policy { ++ u8 info_event_log_int_settings; ++ u8 warning_event_log_int_settings; ++ u8 failure_event_log_int_settings; ++ u8 fatal_event_log_int_settings; ++} __attribute__((packed)); ++ ++CXL_EXPORT int cxl_memdev_get_event_interrupt_policy(struct cxl_memdev *memdev) ++{ ++ struct cxl_cmd *cmd; ++ struct cxl_mbox_get_event_interrupt_policy *event_interrupt_policy_out; ++ int rc = 0; ++ ++ cmd = cxl_cmd_new_raw(memdev, CXL_MEM_COMMAND_ID_GET_EVENT_INTERRUPT_POLICY_OPCODE); ++ if (!cmd) { ++ fprintf(stderr, "%s: cxl_cmd_new_raw returned Null output\n", ++ cxl_memdev_get_devname(memdev)); ++ return -ENOMEM; ++ } ++ ++ rc = cxl_cmd_submit(cmd); ++ if (rc < 0) { ++ fprintf(stderr, "%s: cmd submission failed: %d (%s)\n", ++ cxl_memdev_get_devname(memdev), rc, strerror(-rc)); ++ goto out; ++ } ++ ++ rc = cxl_cmd_get_mbox_status(cmd); ++ if (rc != 0) { ++ fprintf(stderr, "%s: firmware status: %d\n", ++ cxl_memdev_get_devname(memdev), rc); ++ rc = -ENXIO; ++ goto out; ++ } ++ ++ if (cmd->send_cmd->id != CXL_MEM_COMMAND_ID_GET_EVENT_INTERRUPT_POLICY) { ++ fprintf(stderr, "%s: invalid command id 0x%x (expecting 0x%x)\n", ++ cxl_memdev_get_devname(memdev), cmd->send_cmd->id, CXL_MEM_COMMAND_ID_GET_EVENT_INTERRUPT_POLICY); ++ return -EINVAL; ++ } ++ ++ fprintf(stdout, "payload info\n"); ++ fprintf(stdout, " out size: 0x%x\n", cmd->send_cmd->out.size); ++ event_interrupt_policy_out = (void *)cmd->send_cmd->out.payload; ++ fprintf(stdout, " info_event_log_int_settings: 0x%x\n", event_interrupt_policy_out->info_event_log_int_settings); ++ fprintf(stdout, " warning_event_log_int_settings: 0x%x\n", event_interrupt_policy_out->warning_event_log_int_settings); ++ fprintf(stdout, " failure_event_log_int_settings: 0x%x\n", event_interrupt_policy_out->failure_event_log_int_settings); ++ fprintf(stdout, " fatal_event_log_int_settings: 0x%x\n", event_interrupt_policy_out->fatal_event_log_int_settings); ++out: ++ cxl_cmd_unref(cmd); ++ return rc; ++} ++ ++#define CXL_MEM_COMMAND_ID_SET_EVENT_INTERRUPT_POLICY CXL_MEM_COMMAND_ID_RAW ++#define CXL_MEM_COMMAND_ID_SET_EVENT_INTERRUPT_POLICY_OPCODE 0x103 ++#define CXL_MEM_COMMAND_ID_SET_EVENT_INTERRUPT_POLICY_PAYLOAD_IN_SIZE 0x4 ++ ++CXL_EXPORT int cxl_memdev_set_event_interrupt_policy(struct cxl_memdev *memdev, u32 int_policy) ++{ ++ struct cxl_cmd *cmd; ++ struct cxl_mem_query_commands *query; ++ struct cxl_command_info *cinfo; ++ struct cxl_mbox_get_event_interrupt_policy *interrupt_policy_in; ++ int rc = 0; ++ ++ cmd = cxl_cmd_new_raw(memdev, CXL_MEM_COMMAND_ID_SET_EVENT_INTERRUPT_POLICY_OPCODE); ++ if (!cmd) { ++ fprintf(stderr, "%s: cxl_cmd_new_raw returned Null output\n", ++ cxl_memdev_get_devname(memdev)); ++ return -ENOMEM; ++ } ++ ++ query = cmd->query_cmd; ++ cinfo = &query->commands[cmd->query_idx]; ++ ++ /* this is hack to create right payload size */ ++ cinfo->size_in = CXL_MEM_COMMAND_ID_SET_EVENT_INTERRUPT_POLICY_PAYLOAD_IN_SIZE; ++ if (cinfo->size_in > 0) { ++ cmd->input_payload = calloc(1, cinfo->size_in); ++ if (!cmd->input_payload) ++ return -ENOMEM; ++ cmd->send_cmd->in.payload = (u64)cmd->input_payload; ++ cmd->send_cmd->in.size = cinfo->size_in; ++ } ++ ++ fprintf(stdout, "in size: 0x%x\n", cmd->send_cmd->in.size); ++ fprintf(stdout, " int_policy: 0x%x\n", int_policy); ++ interrupt_policy_in = (void *) cmd->send_cmd->in.payload; ++ ++ /* below is meant for readability, you don't really need this */ ++ int_policy = cpu_to_be32(int_policy); ++ interrupt_policy_in->info_event_log_int_settings = (int_policy & 0xff); ++ interrupt_policy_in->warning_event_log_int_settings = ((int_policy >> 8) & 0xff); ++ interrupt_policy_in->failure_event_log_int_settings = ((int_policy >> 16) & 0xff); ++ interrupt_policy_in->fatal_event_log_int_settings = ((int_policy >> 24) & 0xff); ++ ++ rc = cxl_cmd_submit(cmd); ++ if (rc < 0) { ++ fprintf(stderr, "%s: cmd submission failed: %d (%s)\n", ++ cxl_memdev_get_devname(memdev), rc, strerror(-rc)); ++ goto out; ++ } ++ ++ rc = cxl_cmd_get_mbox_status(cmd); ++ if (rc != 0) { ++ fprintf(stderr, "%s: firmware status: %d\n", ++ cxl_memdev_get_devname(memdev), rc); ++ rc = -ENXIO; ++ goto out; ++ } ++ ++ if (cmd->send_cmd->id != CXL_MEM_COMMAND_ID_SET_EVENT_INTERRUPT_POLICY) { ++ fprintf(stderr, "%s: invalid command id 0x%x (expecting 0x%x)\n", ++ cxl_memdev_get_devname(memdev), cmd->send_cmd->id, CXL_MEM_COMMAND_ID_SET_EVENT_INTERRUPT_POLICY); ++ return -EINVAL; ++ } ++ ++ fprintf(stdout, "command completed successfully\n"); ++out: ++ cxl_cmd_unref(cmd); ++ return rc; ++ return 0; ++} ++ ++#define CXL_MEM_COMMAND_ID_GET_TIMESTAMP CXL_MEM_COMMAND_ID_RAW ++#define CXL_MEM_COMMAND_ID_GET_TIMESTAMP_OPCODE 0x0300 ++ ++CXL_EXPORT int cxl_memdev_get_timestamp(struct cxl_memdev *memdev) ++{ ++ struct cxl_cmd *cmd; ++ __le64 *timestamp_out; ++ int rc = 0; ++ ++ cmd = cxl_cmd_new_raw(memdev, CXL_MEM_COMMAND_ID_GET_TIMESTAMP_OPCODE); ++ if (!cmd) { ++ fprintf(stderr, "%s: cxl_cmd_new_raw returned Null output\n", ++ cxl_memdev_get_devname(memdev)); ++ return -ENOMEM; ++ } ++ ++ rc = cxl_cmd_submit(cmd); ++ if (rc < 0) { ++ fprintf(stderr, "%s: cmd submission failed: %d (%s)\n", ++ cxl_memdev_get_devname(memdev), rc, strerror(-rc)); ++ goto out; ++ } ++ ++ rc = cxl_cmd_get_mbox_status(cmd); ++ if (rc != 0) { ++ fprintf(stderr, "%s: firmware status: %d\n", ++ cxl_memdev_get_devname(memdev), rc); ++ rc = -ENXIO; ++ goto out; ++ } ++ ++ if (cmd->send_cmd->id != CXL_MEM_COMMAND_ID_GET_TIMESTAMP) { ++ fprintf(stderr, "%s: invalid command id 0x%x (expecting 0x%x)\n", ++ cxl_memdev_get_devname(memdev), cmd->send_cmd->id, CXL_MEM_COMMAND_ID_GET_TIMESTAMP); ++ return -EINVAL; ++ } ++ ++ timestamp_out = (void *)cmd->send_cmd->out.payload; ++ fprintf(stdout, "timestamp: 0x%lx\n", le64_to_cpu(*timestamp_out)); ++out: ++ cxl_cmd_unref(cmd); ++ return rc; ++} ++ ++#define CXL_MEM_COMMAND_ID_SET_TIMESTAMP CXL_MEM_COMMAND_ID_RAW ++#define CXL_MEM_COMMAND_ID_SET_TIMESTAMP_OPCODE 0x0301 ++#define CXL_MEM_COMMAND_ID_SET_TIMESTAMP_PAYLOAD_IN_SIZE 8 ++ ++CXL_EXPORT int cxl_memdev_set_timestamp(struct cxl_memdev *memdev, u64 timestamp) ++{ ++ struct cxl_cmd *cmd; ++ struct cxl_mem_query_commands *query; ++ struct cxl_command_info *cinfo; ++ __le64 *timestamp_in; ++ int rc = 0; ++ ++ cmd = cxl_cmd_new_raw(memdev, CXL_MEM_COMMAND_ID_SET_TIMESTAMP_OPCODE); ++ if (!cmd) { ++ fprintf(stderr, "%s: cxl_cmd_new_raw returned Null output\n", ++ cxl_memdev_get_devname(memdev)); ++ return -ENOMEM; ++ } ++ query = cmd->query_cmd; ++ cinfo = &query->commands[cmd->query_idx]; ++ ++ /* this is hack to create right payload size */ ++ cinfo->size_in = CXL_MEM_COMMAND_ID_SET_TIMESTAMP_PAYLOAD_IN_SIZE; ++ if (cinfo->size_in > 0) { ++ cmd->input_payload = calloc(1, cinfo->size_in); ++ if (!cmd->input_payload) ++ return -ENOMEM; ++ cmd->send_cmd->in.payload = (u64)cmd->input_payload; ++ cmd->send_cmd->in.size = cinfo->size_in; ++ } ++ ++ timestamp_in = (void *) cmd->send_cmd->in.payload; ++ *timestamp_in = cpu_to_le64(timestamp); ++ fprintf(stdout, "setting timestamp to: 0x%lx\n", le64_to_cpu(*timestamp_in)); ++ ++ rc = cxl_cmd_submit(cmd); ++ if (rc < 0) { ++ fprintf(stderr, "%s: cmd submission failed: %d (%s)\n", ++ cxl_memdev_get_devname(memdev), rc, strerror(-rc)); ++ goto out; ++ } ++ ++ rc = cxl_cmd_get_mbox_status(cmd); ++ if (rc != 0) { ++ fprintf(stderr, "%s: firmware status: %d\n", ++ cxl_memdev_get_devname(memdev), rc); ++ rc = -ENXIO; ++ goto out; ++ } ++ ++ if (cmd->send_cmd->id != CXL_MEM_COMMAND_ID_SET_TIMESTAMP) { ++ fprintf(stderr, "%s: invalid command id 0x%x (expecting 0x%x)\n", ++ cxl_memdev_get_devname(memdev), cmd->send_cmd->id, CXL_MEM_COMMAND_ID_SET_TIMESTAMP); ++ return -EINVAL; ++ } ++ ++ fprintf(stdout, "command completed successfully\n"); ++out: ++ cxl_cmd_unref(cmd); ++ return rc; ++} ++ ++struct cxl_mbox_get_alert_config_out { ++ u8 valid_alerts; ++ u8 programmable_alerts; ++ u8 life_used_critical_alert_threshold; ++ u8 life_used_prog_warn_threshold; ++ __le16 dev_over_temp_crit_alert_threshold; ++ __le16 dev_under_temp_crit_alert_threshold; ++ __le16 dev_over_temp_prog_warn_threshold; ++ __le16 dev_under_temp_prog_warn_threshold; ++ __le16 corr_vol_mem_err_prog_warn_thresold; ++ __le16 corr_pers_mem_err_prog_warn_threshold; ++} __attribute__((packed)); ++ ++CXL_EXPORT int cxl_memdev_get_alert_config(struct cxl_memdev *memdev) ++{ ++ struct cxl_cmd *cmd; ++ struct cxl_mbox_get_alert_config_out *alert_config_out; ++ int rc = 0; ++ ++ cmd = cxl_cmd_new_generic(memdev, CXL_MEM_COMMAND_ID_GET_ALERT_CONFIG); ++ if (!cmd) { ++ fprintf(stderr, "%s: cxl_cmd_new_raw returned Null output\n", ++ cxl_memdev_get_devname(memdev)); ++ return -ENOMEM; ++ } ++ ++ rc = cxl_cmd_submit(cmd); ++ if (rc < 0) { ++ fprintf(stderr, "%s: cmd submission failed: %d (%s)\n", ++ cxl_memdev_get_devname(memdev), rc, strerror(-rc)); ++ goto out; ++ } ++ ++ rc = cxl_cmd_get_mbox_status(cmd); ++ if (rc != 0) { ++ fprintf(stderr, "%s: firmware status: %d\n", ++ cxl_memdev_get_devname(memdev), rc); ++ rc = -ENXIO; ++ goto out; ++ } ++ ++ if (cmd->send_cmd->id != CXL_MEM_COMMAND_ID_GET_ALERT_CONFIG) { ++ fprintf(stderr, "%s: invalid command id 0x%x (expecting 0x%x)\n", ++ cxl_memdev_get_devname(memdev), cmd->send_cmd->id, CXL_MEM_COMMAND_ID_GET_ALERT_CONFIG); ++ return -EINVAL; ++ } ++ ++ fprintf(stdout, "alert_config summary\n"); ++ //fprintf(stdout, " out size: 0x%x\n", cmd->send_cmd->out.size); ++ ++ alert_config_out = (void *)cmd->send_cmd->out.payload; ++ ++ fprintf(stdout, " valid_alerts: 0x%x\n", alert_config_out->valid_alerts); ++ fprintf(stdout, " programmable_alerts: 0x%x\n", alert_config_out->programmable_alerts); ++ fprintf(stdout, " life_used_critical_alert_threshold: 0x%x\n", ++ alert_config_out->life_used_critical_alert_threshold); ++ fprintf(stdout, " life_used_prog_warn_threshold: 0x%x\n", ++ alert_config_out->life_used_prog_warn_threshold); ++ ++ fprintf(stdout, " dev_over_temp_crit_alert_threshold: 0x%x\n", ++ le16_to_cpu(alert_config_out->dev_over_temp_crit_alert_threshold)); ++ fprintf(stdout, " dev_under_temp_crit_alert_threshold: 0x%x\n", ++ le16_to_cpu(alert_config_out->dev_under_temp_crit_alert_threshold)); ++ fprintf(stdout, " dev_over_temp_prog_warn_threshold: 0x%x\n", ++ le16_to_cpu(alert_config_out->dev_over_temp_prog_warn_threshold)); ++ fprintf(stdout, " dev_under_temp_prog_warn_threshold: 0x%x\n", ++ le16_to_cpu(alert_config_out->dev_under_temp_prog_warn_threshold)); ++ fprintf(stdout, " corr_vol_mem_err_prog_warn_thresold: 0x%x\n", ++ le16_to_cpu(alert_config_out->corr_vol_mem_err_prog_warn_thresold)); ++ fprintf(stdout, " corr_pers_mem_err_prog_warn_threshold: 0x%x\n", ++ le16_to_cpu(alert_config_out->corr_pers_mem_err_prog_warn_threshold)); ++ ++out: ++ cxl_cmd_unref(cmd); ++ return rc; ++} ++ ++struct cxl_mbox_set_alert_config_in { ++ u8 valid_alert_actions; ++ u8 enable_alert_actions; ++ u8 life_used_prog_warn_threshold; ++ u8 reserved; ++ __le16 dev_over_temp_prog_warn_threshold; ++ __le16 dev_under_temp_prog_warn_threshold; ++ __le16 corr_vol_mem_err_prog_warn_thresold; ++ __le16 corr_pers_mem_err_prog_warn_threshold; ++} __attribute__((packed)); ++ ++ ++CXL_EXPORT int cxl_memdev_set_alert_config(struct cxl_memdev *memdev, u32 alert_prog_threshold, ++ u32 device_temp_threshold, u32 mem_error_threshold) ++{ ++ struct cxl_cmd *cmd; ++ struct cxl_mbox_set_alert_config_in *alert_config_in; ++ int rc = 0; ++ ++ cmd = cxl_cmd_new_generic(memdev, CXL_MEM_COMMAND_ID_SET_ALERT_CONFIG); ++ if (!cmd) { ++ fprintf(stderr, "%s: cxl_cmd_new_raw returned Null output\n", ++ cxl_memdev_get_devname(memdev)); ++ return -ENOMEM; ++ } ++ ++ alert_config_in = (void *) cmd->send_cmd->in.payload; ++ ++ alert_prog_threshold = cpu_to_be32(alert_prog_threshold); ++ device_temp_threshold = cpu_to_be32(device_temp_threshold); ++ mem_error_threshold = cpu_to_be32(mem_error_threshold); ++ ++ alert_config_in->valid_alert_actions = ((alert_prog_threshold >> 8) & 0xff); ++ alert_config_in->enable_alert_actions = ((alert_prog_threshold >> 16) & 0xff); ++ alert_config_in->life_used_prog_warn_threshold = ((alert_prog_threshold >> 24) & 0xff); ++ alert_config_in->reserved = 0; ++ ++ alert_config_in->dev_over_temp_prog_warn_threshold = cpu_to_le16(be16_to_cpu(((device_temp_threshold) & 0xffff))); ++ alert_config_in->dev_under_temp_prog_warn_threshold = cpu_to_le16(be16_to_cpu((((device_temp_threshold) >> 16) & 0xffff))); ++ ++ alert_config_in->corr_vol_mem_err_prog_warn_thresold = cpu_to_le16(be16_to_cpu((mem_error_threshold & 0xffff))); ++ alert_config_in->corr_pers_mem_err_prog_warn_threshold = cpu_to_le16(be16_to_cpu(((mem_error_threshold >> 16) & 0xffff))); ++ ++ fprintf(stdout, "alert_config settings\n"); ++ fprintf(stdout, " valid_alert_actions: 0x%x\n", alert_config_in->valid_alert_actions); ++ fprintf(stdout, " enable_alert_actions: 0x%x\n", alert_config_in->enable_alert_actions); ++ fprintf(stdout, " life_used_prog_warn_threshold: 0x%x\n", alert_config_in->life_used_prog_warn_threshold); ++ fprintf(stdout, " dev_over_temp_prog_warn_threshold: 0x%x\n", ++ le16_to_cpu(alert_config_in->dev_over_temp_prog_warn_threshold)); ++ fprintf(stdout, " dev_under_temp_prog_warn_threshold: 0x%x\n", ++ le16_to_cpu(alert_config_in->dev_under_temp_prog_warn_threshold)); ++ fprintf(stdout, " corr_vol_mem_err_prog_warn_thresold: 0x%x\n", ++ le16_to_cpu(alert_config_in->corr_vol_mem_err_prog_warn_thresold)); ++ fprintf(stdout, " corr_pers_mem_err_prog_warn_threshold: 0x%x\n", ++ le16_to_cpu(alert_config_in->corr_pers_mem_err_prog_warn_threshold)); ++ ++ rc = cxl_cmd_submit(cmd); ++ if (rc < 0) { ++ fprintf(stderr, "%s: cmd submission failed: %d (%s)\n", ++ cxl_memdev_get_devname(memdev), rc, strerror(-rc)); ++ goto out; ++ } ++ ++ rc = cxl_cmd_get_mbox_status(cmd); ++ if (rc != 0) { ++ fprintf(stderr, "%s: firmware status: %d\n", ++ cxl_memdev_get_devname(memdev), rc); ++ rc = -ENXIO; ++ goto out; ++ } ++ ++ if (cmd->send_cmd->id != CXL_MEM_COMMAND_ID_SET_ALERT_CONFIG) { ++ fprintf(stderr, "%s: invalid command id 0x%x (expecting 0x%x)\n", ++ cxl_memdev_get_devname(memdev), cmd->send_cmd->id, CXL_MEM_COMMAND_ID_SET_ALERT_CONFIG); ++ return -EINVAL; ++ } ++ ++ fprintf(stdout, "command completed successfully\n"); ++ ++out: ++ cxl_cmd_unref(cmd); ++ return rc; ++} ++ ++struct cxl_health_info { ++ u8 health_state; ++ u8 media_status; ++ u8 additional_status; ++ u8 life_used; ++ __le16 device_temp; ++ __le32 dirty_shutdown_count; ++ __le32 corr_vol_mem_err_count; ++ __le32 corr_pers_mem_err_count; ++} __attribute__((packed)); ++ ++CXL_EXPORT int cxl_memdev_get_health_info(struct cxl_memdev *memdev) ++{ ++ struct cxl_cmd *cmd; ++ struct cxl_health_info *health_info; ++ int rc = 0; ++ ++ cmd = cxl_cmd_new_generic(memdev, CXL_MEM_COMMAND_ID_GET_HEALTH_INFO); ++ if (!cmd) { ++ fprintf(stderr, "%s: cxl_memdev_get_health_info returned Null output\n", ++ cxl_memdev_get_devname(memdev)); ++ return -ENOMEM; ++ } ++ ++ rc = cxl_cmd_submit(cmd); ++ if (rc < 0) { ++ fprintf(stderr, "%s: cmd submission failed: %d (%s)\n", ++ cxl_memdev_get_devname(memdev), rc, strerror(-rc)); ++ goto out; ++ } ++ ++ rc = cxl_cmd_get_mbox_status(cmd); ++ if (rc != 0) { ++ fprintf(stderr, "%s: firmware status: %d\n", ++ cxl_memdev_get_devname(memdev), rc); ++ rc = -ENXIO; ++ goto out; ++ } ++ ++ if (cmd->send_cmd->id != CXL_MEM_COMMAND_ID_GET_HEALTH_INFO) { ++ fprintf(stderr, "%s: invalid command id 0x%x (expecting 0x%x)\n", ++ cxl_memdev_get_devname(memdev), cmd->send_cmd->id, CXL_MEM_COMMAND_ID_GET_HEALTH_INFO); ++ return -EINVAL; ++ } ++ ++ if (cmd->send_cmd->out.size != sizeof(*health_info)) { ++ fprintf(stderr, "%s: invalid payload output size (got: %d, required: %ld)\n", ++ cxl_memdev_get_devname(memdev), cmd->send_cmd->out.size, sizeof(*health_info)); ++ return -EINVAL; ++ } ++ ++ health_info = (void *)cmd->send_cmd->out.payload; ++ ++ fprintf(stdout, "Device Health Info\n"); ++ fprintf(stdout, " out size: 0x%x\n", cmd->send_cmd->out.size); ++ fprintf(stdout, " health_state: 0x%x\n", health_info->health_state); ++ fprintf(stdout, " media_status: 0x%x\n", health_info->media_status); ++ fprintf(stdout, " additional_status: 0x%x\n", health_info->additional_status); ++ fprintf(stdout, " life_used: 0x%x\n", health_info->life_used); ++ fprintf(stdout, " device_temp: 0x%x\n", le16_to_cpu(health_info->device_temp)); ++ fprintf(stdout, " dirty_shutdown_count: 0x%x\n", le32_to_cpu(health_info->dirty_shutdown_count)); ++ fprintf(stdout, " corr_vol_mem_err_count: 0x%x\n", le32_to_cpu(health_info->corr_vol_mem_err_count)); ++ fprintf(stdout, " corr_pers_mem_err_count: 0x%x\n", le32_to_cpu(health_info->corr_pers_mem_err_count)); ++out: ++ cxl_cmd_unref(cmd); ++ return rc; ++} ++ ++#define CXL_MEM_COMMAND_ID_GET_EVENT_RECORDS CXL_MEM_COMMAND_ID_RAW ++#define CXL_MEM_COMMAND_ID_GET_EVENT_RECORDS_OPCODE 0x100 ++#define CXL_MEM_COMMAND_ID_GET_EVENT_RECORDS_PAYLOAD_IN_SIZE 0x1 ++#define CXL_MAX_RECORDS_TO_DUMP 20 ++ ++#define CXL_DRAM_EVENT_GUID "601dcbb3-9c06-4eab-b8af-4e9bfb5c9624" ++#define CXL_MEM_MODULE_EVENT_GUID "fe927475-dd59-4339-a586-79bab113b774" ++ ++struct cxl_dram_event_record { ++ __le64 physical_addr; ++ u8 memory_event_descriptor; ++ u8 memory_event_type; ++ u8 transaction_type; ++ __le16 validity_flags; ++ u8 channel; ++ u8 rank; ++ u8 nibble_mask[3]; ++ u8 bank_group; ++ u8 bank; ++ u8 row[3]; ++ __le16 column; ++ u8 correction_mask[0x20]; ++ u8 reserved[0x17]; ++} __attribute__((packed)); ++ ++struct cxl_memory_module_record { ++ u8 dev_event_type; ++ u8 dev_health_info[0x12]; ++ u8 reserved[0x3d]; ++}__attribute__((packed)); ++ ++struct cxl_event_record { ++ uuid_t uuid; ++ u8 event_record_length; ++ u8 event_record_flags[3]; ++ __le16 event_record_handle; ++ __le16 related_event_record_handle; ++ __le64 event_record_ts; ++ u8 reserved[0x10]; ++ union { ++ struct cxl_dram_event_record dram_event_record; ++ struct cxl_memory_module_record memory_module_record; ++ } event_record; ++} __attribute__((packed)); ++ ++struct cxl_get_event_record_info { ++ u8 flags; ++ u8 reserved1; ++ __le16 overflow_err_cnt; ++ __le64 first_overflow_evt_ts; ++ __le64 last_overflow_evt_ts; ++ __le16 event_record_count; ++ u8 reserved2[0xa]; ++ struct cxl_event_record event_records[]; ++} __attribute__((packed)); ++ ++CXL_EXPORT int cxl_memdev_get_event_records(struct cxl_memdev *memdev, u8 event_log_type) ++{ ++ struct cxl_cmd *cmd; ++ struct cxl_mem_query_commands *query; ++ struct cxl_command_info *cinfo; ++ struct cxl_get_event_record_info *event_info; ++ int rc = 0; ++ int rec; ++ int indent = 2; ++ ++ cmd = cxl_cmd_new_raw(memdev, CXL_MEM_COMMAND_ID_GET_EVENT_RECORDS_OPCODE); ++ if (!cmd) { ++ fprintf(stderr, "%s: cxl_cmd_new_raw returned Null output\n", ++ cxl_memdev_get_devname(memdev)); ++ return -ENOMEM; ++ } ++ ++ query = cmd->query_cmd; ++ cinfo = &query->commands[cmd->query_idx]; ++ ++ /* this is hack to create right payload size */ ++ cinfo->size_in = CXL_MEM_COMMAND_ID_GET_EVENT_RECORDS_PAYLOAD_IN_SIZE; ++ if (cinfo->size_in > 0) { ++ cmd->input_payload = calloc(1, cinfo->size_in); ++ if (!cmd->input_payload) ++ return -ENOMEM; ++ cmd->send_cmd->in.payload = (u64)cmd->input_payload; ++ cmd->send_cmd->in.size = cinfo->size_in; ++ } ++ ++ fprintf(stdout, "in size: 0x%x\n", cmd->send_cmd->in.size); ++ fprintf(stdout, "Getting Event Records for %d type\n", event_log_type); ++ * ((u8 *) cmd->send_cmd->in.payload) = event_log_type; ++ ++ rc = cxl_cmd_submit(cmd); ++ if (rc < 0) { ++ fprintf(stderr, "%s: cmd submission failed: %d (%s)\n", ++ cxl_memdev_get_devname(memdev), rc, strerror(-rc)); ++ goto out; ++ } ++ ++ rc = cxl_cmd_get_mbox_status(cmd); ++ if (rc != 0) { ++ fprintf(stderr, "%s: firmware status: %d\n", ++ cxl_memdev_get_devname(memdev), rc); ++ rc = -ENXIO; ++ goto out; ++ } ++ ++ if (cmd->send_cmd->id != CXL_MEM_COMMAND_ID_GET_EVENT_RECORDS) { ++ fprintf(stderr, "%s: invalid command id 0x%x (expecting 0x%x)\n", ++ cxl_memdev_get_devname(memdev), cmd->send_cmd->id, CXL_MEM_COMMAND_ID_GET_EVENT_RECORDS); ++ return -EINVAL; ++ } ++ ++ event_info = (void *)cmd->send_cmd->out.payload; ++ ++ fprintf(stdout, "cxl_dram_event_record size: 0x%lx\n", sizeof(struct cxl_dram_event_record)); ++ fprintf(stdout, "cxl_memory_module_record size: 0x%lx\n", sizeof(struct cxl_memory_module_record)); ++ fprintf(stdout, "cxl_event_record size: 0x%lx\n", sizeof(struct cxl_event_record)); ++ fprintf(stdout, "cxl_get_event_record_info size: 0x%lx\n", sizeof(struct cxl_get_event_record_info)); ++ fprintf(stdout, "========= Get Event Records Info =========\n"); ++ fprintf(stdout, "%*sout size: 0x%x\n", indent, "", cmd->send_cmd->out.size); ++ fprintf(stdout, "%*sflags: 0x%x\n", indent, "", event_info->flags); ++ fprintf(stdout, "%*soverflow_err_cnt: 0x%x\n", indent, "", le16_to_cpu(event_info->overflow_err_cnt)); ++ fprintf(stdout, "%*sfirst_overflow_evt_ts: 0x%lx\n", indent, "", le64_to_cpu(event_info->first_overflow_evt_ts)); ++ fprintf(stdout, "%*slast_overflow_evt_ts: 0x%lx\n", indent, "", le64_to_cpu(event_info->last_overflow_evt_ts)); ++ fprintf(stdout, "%*sevent_record_count: 0x%x\n", indent, "", le16_to_cpu(event_info->event_record_count)); ++ ++ for (rec = 0; rec < min(CXL_MAX_RECORDS_TO_DUMP, le16_to_cpu(event_info->event_record_count)); ++rec) { ++ char uuid[40]; ++ struct cxl_event_record *event_record = &event_info->event_records[rec]; ++ ++ uuid_unparse(event_info->event_records[rec].uuid, uuid); ++ ++ if (strcmp(uuid, CXL_DRAM_EVENT_GUID) == 0) ++ fprintf(stdout, "%*sEvent Record: %d (DRAM guid: %s)\n", indent, "", rec, uuid); ++ else if (strcmp(uuid, CXL_MEM_MODULE_EVENT_GUID) == 0) ++ fprintf(stdout, "%*sEvent Record: %d (Memory Module Event guid: %s)\n", indent, "", rec, uuid); ++ else ++ fprintf(stdout, "%*sEvent Record: %d (uuid: %s)\n", indent, "", rec, uuid); ++ ++ fprintf(stdout, "%*sevent_record_length: 0x%x\n", indent+2, "", event_record->event_record_length); ++ fprintf(stdout, "%*sevent_record_flags: 0x%02x%02x%02x\n", indent+2, "", event_record->event_record_flags[0], ++ event_record->event_record_flags[1], event_record->event_record_flags[2]); ++ fprintf(stdout, "%*sevent_record_handle: 0x%x\n", indent+2, "", le16_to_cpu(event_record->event_record_handle)); ++ fprintf(stdout, "%*srelated_event_record_handle: 0x%x\n", indent+2, "", ++ le16_to_cpu(event_record->related_event_record_handle)); ++ fprintf(stdout, "%*sevent_record_ts: 0x%lx\n", indent+2, "", le64_to_cpu(event_record->event_record_ts)); ++ ++ if (strcmp(uuid, CXL_DRAM_EVENT_GUID) == 0){ ++ struct cxl_dram_event_record *dram_event = &event_record->event_record.dram_event_record; ++ fprintf(stdout, "%*sphysical_addr: 0x%lx\n", indent+2, "", le64_to_cpu(dram_event->physical_addr)); ++ fprintf(stdout, "%*smemory_event_descriptor: 0x%x\n", indent+2, "", dram_event->memory_event_descriptor); ++ fprintf(stdout, "%*smemory_event_type: 0x%x\n", indent+2, "", dram_event->memory_event_type); ++ fprintf(stdout, "%*stransaction_type: 0x%x\n", indent+2, "", dram_event->transaction_type); ++ fprintf(stdout, "%*svalidity_flags: 0x%x\n", indent+2, "", le16_to_cpu(dram_event->validity_flags)); ++ fprintf(stdout, "%*schannel: 0x%x\n", indent+2, "", dram_event->channel); ++ fprintf(stdout, "%*srank: 0x%x\n", indent+2, "", dram_event->rank); ++ fprintf(stdout, "%*snibble_mask: 0x%02x%02x%02x\n", indent+2, "", ++ dram_event->nibble_mask[0], dram_event->nibble_mask[1], ++ dram_event->nibble_mask[2]); ++ fprintf(stdout, "%*sbank_group: 0x%x\n", indent+2, "", dram_event->bank_group); ++ fprintf(stdout, "%*sbank: 0x%x\n", indent+2, "", dram_event->bank); ++ fprintf(stdout, "%*srow: 0x%02x%02x%02x\n", indent+2, "", dram_event->row[0], ++ dram_event->row[1], dram_event->row[2]); ++ fprintf(stdout, "%*scolumn: 0x%x\n", indent+2, "", le16_to_cpu(dram_event->column)); ++ } ++ } ++ ++out: ++ cxl_cmd_unref(cmd); ++ return rc; ++ return 0; ++} ++ ++// GET_LD_INFO START ++#define CXL_MEM_COMMAND_ID_GET_LD_INFO CXL_MEM_COMMAND_ID_RAW ++#define CXL_MEM_COMMAND_ID_GET_LD_INFO_OPCODE 0x5400 ++#define CXL_MEM_COMMAND_ID_GET_LD_INFO_PAYLOAD_OUT_SIZE 0xb ++ ++struct cxl_get_ld_info { ++ __le64 mem_size; ++ __le16 ld_cnt; ++ u8 qos_telemetry_capa; ++} __attribute__((packed)); ++ ++CXL_EXPORT int cxl_memdev_get_ld_info(struct cxl_memdev *memdev) ++{ ++ struct cxl_cmd *cmd; ++ struct cxl_get_ld_info *ld_info; ++ int rc = 0; ++ int rec; ++ int indent = 2; ++ ++ cmd = cxl_cmd_new_raw(memdev, CXL_MEM_COMMAND_ID_GET_LD_INFO_OPCODE); ++ if (!cmd) { ++ fprintf(stderr, "%s: cxl_cmd_new_raw returned Null output\n", ++ cxl_memdev_get_devname(memdev)); ++ return -ENOMEM; ++ } ++ cmd->send_cmd->in.size = 0; ++ ++ fprintf(stdout, "Getting LD info for memdev %d\n", cxl_memdev_get_devname(memdev)); ++ ++ rc = cxl_cmd_submit(cmd); ++ if (rc < 0) { ++ fprintf(stderr, "%s: cmd submission failed: %d (%s)\n", ++ cxl_memdev_get_devname(memdev), rc, strerror(-rc)); ++ goto out; ++ } ++ ++ rc = cxl_cmd_get_mbox_status(cmd); ++ if (rc != 0) { ++ fprintf(stderr, "%s: firmware status: %d\n", ++ cxl_memdev_get_devname(memdev), rc); ++ rc = -ENXIO; ++ goto out; ++ } ++ ++ if (cmd->send_cmd->id != CXL_MEM_COMMAND_ID_GET_LD_INFO) { ++ fprintf(stderr, "%s: invalid command id 0x%x (expecting 0x%x)\n", ++ cxl_memdev_get_devname(memdev), cmd->send_cmd->id, CXL_MEM_COMMAND_ID_GET_LD_INFO); ++ return -EINVAL; ++ } ++ ++ ld_info = (void *)cmd->send_cmd->out.payload; ++ ++ fprintf(stdout, "========= Get LD Info =========\n"); ++ fprintf(stdout, "%*sout size: 0x%x\n", indent, "", cmd->send_cmd->out.size); ++ fprintf(stdout, "%*smemory size: 0x%lu\n", indent, "", ld_info->mem_size); ++ fprintf(stdout, "%*sld count: 0x%x\n", indent, "", le16_to_cpu(ld_info->ld_cnt)); ++ fprintf(stdout, "%*sqos telemetry capability: 0x%x\n", indent, "", ld_info->qos_telemetry_capa); ++ ++out: ++ cxl_cmd_unref(cmd); ++ return rc; ++ return 0; ++} ++ ++// GET_LD_INFO END ++ ++#define CXL_MEM_COMMAND_ID_DDR_INFO CXL_MEM_COMMAND_ID_RAW ++#define CXL_MEM_COMMAND_ID_DDR_INFO_OPCODE 0xC500 ++#define CXL_MEM_COMMAND_ID_DDR_INFO_PAYLOAD_IN_SIZE 0x1 ++#define CXL_MEM_COMMAND_ID_DDR_INFO_PAYLOAD_OUT_SIZE 0x8 ++ ++struct cxl_ddr_info { ++ __le32 mstr_reg; ++ __le32 dram_width; ++} __attribute__((packed)); ++ ++const char* ddr_width[] = {"X4_DEVICE", "X8_DEVICE", "X16_DEVICE", "X32_DEVICE"}; ++ ++CXL_EXPORT int cxl_memdev_ddr_info(struct cxl_memdev *memdev, u8 ddr_id) ++{ ++ struct cxl_cmd *cmd; ++ struct cxl_mem_query_commands *query; ++ struct cxl_command_info *cinfo; ++ struct cxl_ddr_info *ddr_info; ++ int rc = 0; ++ int rec; ++ int indent = 2; ++ ++ cmd = cxl_cmd_new_raw(memdev, CXL_MEM_COMMAND_ID_DDR_INFO_OPCODE); ++ if (!cmd) { ++ fprintf(stderr, "%s: cxl_cmd_new_raw returned Null output\n", ++ cxl_memdev_get_devname(memdev)); ++ return -ENOMEM; ++ } ++ ++ query = cmd->query_cmd; ++ cinfo = &query->commands[cmd->query_idx]; ++ ++ /* this is hack to create right payload size */ ++ cinfo->size_in = CXL_MEM_COMMAND_ID_DDR_INFO_PAYLOAD_IN_SIZE; ++ if (cinfo->size_in > 0) { ++ cmd->input_payload = calloc(1, cinfo->size_in); ++ if (!cmd->input_payload) ++ return -ENOMEM; ++ cmd->send_cmd->in.payload = (u64)cmd->input_payload; ++ cmd->send_cmd->in.size = cinfo->size_in; ++ } ++ ++ * ((u8 *) cmd->send_cmd->in.payload) = ddr_id; ++ ++ fprintf(stdout, "Getting ddr info for memdev %d\n", cxl_memdev_get_devname(memdev)); ++ ++ rc = cxl_cmd_submit(cmd); ++ if (rc < 0) { ++ fprintf(stderr, "%s: cmd submission failed: %d (%s)\n", ++ cxl_memdev_get_devname(memdev), rc, strerror(-rc)); ++ goto out; ++ } ++ ++ rc = cxl_cmd_get_mbox_status(cmd); ++ if (rc != 0) { ++ fprintf(stderr, "%s: firmware status: %d\n", ++ cxl_memdev_get_devname(memdev), rc); ++ rc = -ENXIO; ++ goto out; ++ } ++ ++ if (cmd->send_cmd->id != CXL_MEM_COMMAND_ID_DDR_INFO) { ++ fprintf(stderr, "%s: invalid command id 0x%x (expecting 0x%x)\n", ++ cxl_memdev_get_devname(memdev), cmd->send_cmd->id, CXL_MEM_COMMAND_ID_DDR_INFO); ++ return -EINVAL; ++ } ++ ++ ddr_info = (void *)cmd->send_cmd->out.payload; ++ ++ fprintf(stdout, "========= DDR Info =========\n"); ++ fprintf(stdout, "%*sddr controller mstr register: 0x%d\n", indent, "", ddr_info->mstr_reg); ++ fprintf(stdout, "%*sdram width derived from device config: 0x%s\n", indent, "", ddr_width[le32_to_cpu(ddr_info->dram_width)]); ++ ++out: ++ cxl_cmd_unref(cmd); ++ return rc; ++ return 0; ++} ++ ++#define CXL_MEM_COMMAND_ID_CLEAR_EVENT_RECORDS CXL_MEM_COMMAND_ID_RAW ++#define CXL_MEM_COMMAND_ID_CLEAR_EVENT_RECORDS_OPCODE 0x101 ++ ++struct cxl_clear_event_record_info { ++ u8 event_log_type; ++ u8 clear_event_flags; ++ u8 no_event_record_handles; ++ u8 reserved[3]; ++ __le16 event_record_handles[]; ++} __attribute__((packed)); ++ ++CXL_EXPORT int cxl_memdev_clear_event_records(struct cxl_memdev *memdev, u8 event_log_type, ++ u8 clear_event_flags, u8 no_event_record_handles, u16 *event_record_handles) ++{ ++ struct cxl_cmd *cmd; ++ struct cxl_mem_query_commands *query; ++ struct cxl_command_info *cinfo; ++ struct cxl_clear_event_record_info *event_info; ++ int rc = 0; ++ int rec; ++ ++ cmd = cxl_cmd_new_raw(memdev, CXL_MEM_COMMAND_ID_CLEAR_EVENT_RECORDS_OPCODE); ++ if (!cmd) { ++ fprintf(stderr, "%s: cxl_cmd_new_raw returned Null output\n", ++ cxl_memdev_get_devname(memdev)); ++ return -ENOMEM; ++ } ++ ++ query = cmd->query_cmd; ++ cinfo = &query->commands[cmd->query_idx]; ++ ++ /* this is hack to create right payload size */ ++ cinfo->size_in = sizeof(*event_info) + (no_event_record_handles * sizeof(__le16)); ++ if (cinfo->size_in > 0) { ++ cmd->input_payload = calloc(1, cinfo->size_in); ++ if (!cmd->input_payload) ++ return -ENOMEM; ++ cmd->send_cmd->in.payload = (u64)cmd->input_payload; ++ cmd->send_cmd->in.size = cinfo->size_in; ++ } ++ ++ fprintf(stdout, "in size: 0x%x\n", cmd->send_cmd->in.size); ++ if (clear_event_flags) ++ fprintf(stdout, "Clearing 'All Event' Records for type %d\n", event_log_type); ++ ++ event_info = (struct cxl_clear_event_record_info *) cmd->send_cmd->in.payload; ++ event_info->event_log_type = event_log_type; ++ event_info->clear_event_flags = clear_event_flags; ++ event_info->no_event_record_handles = no_event_record_handles; ++ for (rec = 0; rec < event_info->no_event_record_handles; ++rec) { ++ fprintf(stdout, "Clearing Event Record 0x%x for %d type\n", event_record_handles[rec], event_log_type); ++ event_info->event_record_handles[rec] = cpu_to_le16(event_record_handles[rec]); ++ } ++ ++ rc = cxl_cmd_submit(cmd); ++ if (rc < 0) { ++ fprintf(stderr, "%s: cmd submission failed: %d (%s)\n", ++ cxl_memdev_get_devname(memdev), rc, strerror(-rc)); ++ goto out; ++ } ++ ++ rc = cxl_cmd_get_mbox_status(cmd); ++ if (rc != 0) { ++ fprintf(stderr, "%s: firmware status: %d\n", ++ cxl_memdev_get_devname(memdev), rc); ++ rc = -ENXIO; ++ goto out; ++ } ++ ++ if (cmd->send_cmd->id != CXL_MEM_COMMAND_ID_CLEAR_EVENT_RECORDS) { ++ fprintf(stderr, "%s: invalid command id 0x%x (expecting 0x%x)\n", ++ cxl_memdev_get_devname(memdev), cmd->send_cmd->id, CXL_MEM_COMMAND_ID_CLEAR_EVENT_RECORDS); ++ return -EINVAL; ++ } ++ ++ fprintf(stdout, "Clear Event Records command completed successfully\n"); ++ ++out: ++ cxl_cmd_unref(cmd); ++ return rc; ++ return 0; ++} ++ ++/* insert here */ +diff --git a/cligen/gen/base.libcxl.h b/cligen/gen/base.libcxl.h +new file mode 100644 +index 0000000..abca59e +--- /dev/null ++++ b/cligen/gen/base.libcxl.h +@@ -0,0 +1,110 @@ ++/* SPDX-License-Identifier: LGPL-2.1 */ ++/* Copyright (C) 2020-2021, Intel Corporation. All rights reserved. */ ++#ifndef _LIBCXL_H_ ++#define _LIBCXL_H_ ++ ++#include ++#include ++#include ++#include ++ ++#ifdef HAVE_UUID ++#include ++#else ++typedef unsigned char uuid_t[16]; ++#endif ++ ++#ifdef __cplusplus ++extern "C" { ++#endif ++ ++struct cxl_ctx; ++struct cxl_ctx *cxl_ref(struct cxl_ctx *ctx); ++void cxl_unref(struct cxl_ctx *ctx); ++int cxl_new(struct cxl_ctx **ctx); ++void cxl_set_log_fn(struct cxl_ctx *ctx, ++ void (*log_fn)(struct cxl_ctx *ctx, int priority, ++ const char *file, int line, const char *fn, ++ const char *format, va_list args)); ++int cxl_get_log_priority(struct cxl_ctx *ctx); ++void cxl_set_log_priority(struct cxl_ctx *ctx, int priority); ++void cxl_set_userdata(struct cxl_ctx *ctx, void *userdata); ++void *cxl_get_userdata(struct cxl_ctx *ctx); ++void cxl_set_private_data(struct cxl_ctx *ctx, void *data); ++void *cxl_get_private_data(struct cxl_ctx *ctx); ++ ++struct cxl_memdev; ++struct cxl_memdev *cxl_memdev_get_first(struct cxl_ctx *ctx); ++struct cxl_memdev *cxl_memdev_get_next(struct cxl_memdev *memdev); ++int cxl_memdev_get_id(struct cxl_memdev *memdev); ++const char *cxl_memdev_get_devname(struct cxl_memdev *memdev); ++int cxl_memdev_get_major(struct cxl_memdev *memdev); ++int cxl_memdev_get_minor(struct cxl_memdev *memdev); ++struct cxl_ctx *cxl_memdev_get_ctx(struct cxl_memdev *memdev); ++unsigned long long cxl_memdev_get_pmem_size(struct cxl_memdev *memdev); ++unsigned long long cxl_memdev_get_ram_size(struct cxl_memdev *memdev); ++const char *cxl_memdev_get_firmware_verison(struct cxl_memdev *memdev); ++size_t cxl_memdev_get_lsa_size(struct cxl_memdev *memdev); ++int cxl_memdev_is_active(struct cxl_memdev *memdev); ++int cxl_memdev_zero_lsa(struct cxl_memdev *memdev); ++int cxl_memdev_get_lsa(struct cxl_memdev *memdev, void *buf, size_t length, ++ size_t offset); ++int cxl_memdev_set_lsa(struct cxl_memdev *memdev, void *buf, size_t length, ++ size_t offset); ++int cxl_memdev_cmd_identify(struct cxl_memdev *memdev); ++int cxl_memdev_get_supported_logs(struct cxl_memdev *memdev); ++int cxl_memdev_get_cel_log(struct cxl_memdev *memdev); ++int cxl_memdev_get_event_interrupt_policy(struct cxl_memdev *memdev); ++int cxl_memdev_set_event_interrupt_policy(struct cxl_memdev *memdev, u32 int_policy); ++int cxl_memdev_get_timestamp(struct cxl_memdev *memdev); ++int cxl_memdev_set_timestamp(struct cxl_memdev *memdev, u64 timestamp); ++int cxl_memdev_get_alert_config(struct cxl_memdev *memdev); ++int cxl_memdev_set_alert_config(struct cxl_memdev *memdev, u32 alert_prog_threshold, ++ u32 device_temp_threshold, u32 mem_error_threshold); ++int cxl_memdev_get_health_info(struct cxl_memdev *memdev); ++int cxl_memdev_get_event_records(struct cxl_memdev *memdev, u8 event_log_type); ++int cxl_memdev_get_ld_info(struct cxl_memdev *memdev); ++int cxl_memdev_ddr_info(struct cxl_memdev *memdev, u8 ddr_id); ++int cxl_memdev_clear_event_records(struct cxl_memdev *memdev, u8 event_log_type, ++ u8 clear_event_flags, u8 no_event_record_handles, u16 *event_record_handles); ++/* insert here */ ++ ++#define cxl_memdev_foreach(ctx, memdev) \ ++ for (memdev = cxl_memdev_get_first(ctx); \ ++ memdev != NULL; \ ++ memdev = cxl_memdev_get_next(memdev)) ++ ++struct cxl_cmd; ++const char *cxl_cmd_get_devname(struct cxl_cmd *cmd); ++struct cxl_cmd *cxl_cmd_new_raw(struct cxl_memdev *memdev, int opcode); ++int cxl_cmd_set_input_payload(struct cxl_cmd *cmd, void *in, int size); ++int cxl_cmd_set_output_payload(struct cxl_cmd *cmd, void *out, int size); ++void cxl_cmd_ref(struct cxl_cmd *cmd); ++void cxl_cmd_unref(struct cxl_cmd *cmd); ++int cxl_cmd_submit(struct cxl_cmd *cmd); ++int cxl_cmd_get_mbox_status(struct cxl_cmd *cmd); ++int cxl_cmd_get_out_size(struct cxl_cmd *cmd); ++struct cxl_cmd *cxl_cmd_new_identify(struct cxl_memdev *memdev); ++int cxl_cmd_identify_get_fw_rev(struct cxl_cmd *cmd, char *fw_rev, int fw_len); ++unsigned long long cxl_cmd_identify_get_partition_align(struct cxl_cmd *cmd); ++unsigned int cxl_cmd_identify_get_lsa_size(struct cxl_cmd *cmd); ++struct cxl_cmd *cxl_cmd_new_get_health_info(struct cxl_memdev *memdev); ++int cxl_cmd_get_health_info_get_health_status(struct cxl_cmd *cmd); ++int cxl_cmd_get_health_info_get_media_status(struct cxl_cmd *cmd); ++int cxl_cmd_get_health_info_get_ext_status(struct cxl_cmd *cmd); ++int cxl_cmd_get_health_info_get_life_used(struct cxl_cmd *cmd); ++int cxl_cmd_get_health_info_get_temperature(struct cxl_cmd *cmd); ++int cxl_cmd_get_health_info_get_dirty_shutdowns(struct cxl_cmd *cmd); ++int cxl_cmd_get_health_info_get_volatile_errors(struct cxl_cmd *cmd); ++int cxl_cmd_get_health_info_get_pmem_errors(struct cxl_cmd *cmd); ++struct cxl_cmd *cxl_cmd_new_get_lsa(struct cxl_memdev *memdev, ++ unsigned int offset, unsigned int length); ++void *cxl_cmd_get_lsa_get_payload(struct cxl_cmd *cmd); ++struct cxl_cmd *cxl_cmd_new_set_lsa(struct cxl_memdev *memdev, ++ void *buf, unsigned int offset, unsigned int length); ++ ++#ifdef __cplusplus ++} /* extern "C" */ ++#endif ++ ++#endif +diff --git a/cligen/gen/base.libcxl.sym b/cligen/gen/base.libcxl.sym +new file mode 100644 +index 0000000..4bd9497 +--- /dev/null ++++ b/cligen/gen/base.libcxl.sym +@@ -0,0 +1,81 @@ ++LIBCXL_1 { ++global: ++ cxl_get_userdata; ++ cxl_set_userdata; ++ cxl_get_private_data; ++ cxl_set_private_data; ++ cxl_ref; ++ cxl_get_log_priority; ++ cxl_set_log_fn; ++ cxl_unref; ++ cxl_set_log_priority; ++ cxl_new; ++local: ++ *; ++}; ++ ++LIBCXL_2 { ++global: ++ cxl_memdev_get_first; ++ cxl_memdev_get_next; ++ cxl_memdev_get_id; ++ cxl_memdev_get_devname; ++ cxl_memdev_get_major; ++ cxl_memdev_get_minor; ++ cxl_memdev_get_ctx; ++ cxl_memdev_get_pmem_size; ++ cxl_memdev_get_ram_size; ++ cxl_memdev_get_firmware_verison; ++} LIBCXL_1; ++ ++LIBCXL_3 { ++global: ++ cxl_cmd_get_devname; ++ cxl_cmd_new_raw; ++ cxl_cmd_set_input_payload; ++ cxl_cmd_set_output_payload; ++ cxl_cmd_ref; ++ cxl_cmd_unref; ++ cxl_cmd_submit; ++ cxl_cmd_get_mbox_status; ++ cxl_cmd_get_out_size; ++ cxl_cmd_new_identify; ++ cxl_cmd_identify_get_fw_rev; ++ cxl_cmd_identify_get_partition_align; ++ cxl_cmd_identify_get_lsa_size; ++ cxl_cmd_new_get_health_info; ++ cxl_cmd_get_health_info_get_health_status; ++ cxl_cmd_get_health_info_get_media_status; ++ cxl_cmd_get_health_info_get_ext_status; ++ cxl_cmd_get_health_info_get_life_used; ++ cxl_cmd_get_health_info_get_temperature; ++ cxl_cmd_get_health_info_get_dirty_shutdowns; ++ cxl_cmd_get_health_info_get_volatile_errors; ++ cxl_cmd_get_health_info_get_pmem_errors; ++ cxl_cmd_new_get_lsa; ++ cxl_cmd_get_lsa_get_payload; ++} LIBCXL_2; ++ ++LIBCXL_4 { ++global: ++ cxl_memdev_get_lsa_size; ++ cxl_memdev_is_active; ++ cxl_cmd_new_set_lsa; ++ cxl_memdev_zero_lsa; ++ cxl_memdev_set_lsa; ++ cxl_memdev_get_lsa; ++ cxl_memdev_cmd_identify; ++ cxl_memdev_get_supported_logs; ++ cxl_memdev_get_cel_log; ++ cxl_memdev_get_event_interrupt_policy; ++ cxl_memdev_set_event_interrupt_policy; ++ cxl_memdev_get_timestamp; ++ cxl_memdev_set_timestamp; ++ cxl_memdev_get_alert_config; ++ cxl_memdev_set_alert_config; ++ cxl_memdev_get_health_info; ++ cxl_memdev_get_event_records; ++ cxl_memdev_get_ld_info; ++ cxl_memdev_ddr_info; ++ cxl_memdev_clear_event_records; ++} LIBCXL_3; +diff --git a/cligen/gen/base.memdev.c b/cligen/gen/base.memdev.c +new file mode 100644 +index 0000000..56c3ebd +--- /dev/null ++++ b/cligen/gen/base.memdev.c +@@ -0,0 +1,767 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* Copyright (C) 2020-2021 Intel Corporation. All rights reserved. */ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++struct action_context { ++ FILE *f_out; ++ FILE *f_in; ++}; ++ ++static struct parameters { ++ const char *outfile; ++ const char *infile; ++ unsigned len; ++ unsigned offset; ++ bool verbose; ++} param; ++ ++#define fail(fmt, ...) \ ++do { \ ++ fprintf(stderr, "cxl-%s:%s:%d: " fmt, \ ++ VERSION, __func__, __LINE__, ##__VA_ARGS__); \ ++} while (0) ++ ++#define BASE_OPTIONS() \ ++OPT_BOOLEAN('v',"verbose", ¶m.verbose, "turn on debug") ++ ++#define READ_OPTIONS() \ ++OPT_STRING('o', "output", ¶m.outfile, "output-file", \ ++ "filename to write label area contents") ++ ++#define WRITE_OPTIONS() \ ++OPT_STRING('i', "input", ¶m.infile, "input-file", \ ++ "filename to read label area data") ++ ++#define LABEL_OPTIONS() \ ++OPT_UINTEGER('s', "size", ¶m.len, "number of label bytes to operate"), \ ++OPT_UINTEGER('O', "offset", ¶m.offset, \ ++ "offset into the label area to start operation") ++ ++static const struct option read_options[] = { ++ BASE_OPTIONS(), ++ LABEL_OPTIONS(), ++ READ_OPTIONS(), ++ OPT_END(), ++}; ++ ++static const struct option write_options[] = { ++ BASE_OPTIONS(), ++ LABEL_OPTIONS(), ++ WRITE_OPTIONS(), ++ OPT_END(), ++}; ++ ++static const struct option zero_options[] = { ++ BASE_OPTIONS(), ++ LABEL_OPTIONS(), ++ OPT_END(), ++}; ++ ++static const struct option cmd_identify_options[] = { ++ BASE_OPTIONS(), ++ OPT_END(), ++}; ++ ++static const struct option cmd_get_supported_logs_options[] = { ++ BASE_OPTIONS(), ++ OPT_END(), ++}; ++ ++static const struct option cmd_get_cel_log_options[] = { ++ BASE_OPTIONS(), ++ OPT_END(), ++}; ++ ++static const struct option cmd_get_event_interrupt_policy_options[] = { ++ BASE_OPTIONS(), ++ OPT_END(), ++}; ++ ++static struct _interrupt_policy_params { ++ u32 policy; ++ bool verbose; ++} interrupt_policy_params; ++ ++#define INT_POLICY_BASE_OPTIONS() \ ++OPT_BOOLEAN('v',"verbose", &interrupt_policy_params.verbose, "turn on debug") ++ ++#define SET_INTERRUPT_POLICY_OPTIONS() \ ++OPT_UINTEGER('i', "int_policy", &interrupt_policy_params.policy, "Set event interrupt policy. Fields: Informational Event Log Interrupt Settings (1B), Warning Event Log Interrupt Settings (1B), Failure Event Log Interrupt Settings (1B), Fatal Event Log Interrupt Settings (1B)") ++ ++static const struct option cmd_set_event_interrupt_policy_options[] = { ++ INT_POLICY_BASE_OPTIONS(), ++ SET_INTERRUPT_POLICY_OPTIONS(), ++ OPT_END(), ++}; ++ ++static const struct option cmd_get_timestamp_options[] = { ++ BASE_OPTIONS(), ++ OPT_END(), ++}; ++ ++static struct _ts_params { ++ u64 timestamp; ++ bool verbose; ++} ts_params; ++ ++#define TS_BASE_OPTIONS() \ ++OPT_BOOLEAN('v',"verbose", &ts_params.verbose, "turn on debug") ++ ++#define SET_TIMESTAMP_OPTIONS() \ ++OPT_U64('t', "timestamp", &ts_params.timestamp, "Set the timestamp on the device") ++ ++static const struct option cmd_set_timestamp_options[] = { ++ TS_BASE_OPTIONS(), ++ SET_TIMESTAMP_OPTIONS(), ++ OPT_END(), ++}; ++ ++static const struct option cmd_get_alert_config_options[] = { ++ BASE_OPTIONS(), ++ OPT_END(), ++}; ++ ++static struct _alert_config_params { ++ u32 alert_prog_threshold; ++ u32 device_temp_threshold; ++ u32 mem_error_threshold; ++ bool verbose; ++} alert_config_params; ++ ++#define SET_ALERT_CONFIG_BASE_OPTIONS() \ ++OPT_BOOLEAN('v',"verbose", &interrupt_policy_params.verbose, "turn on debug") ++ ++#define SET_ALERT_CONFIG_OPTIONS() \ ++OPT_UINTEGER('a', "alert_prog_threshold", &alert_config_params.alert_prog_threshold, "Set valid, enable alert actions and life used programmable threshold. Fields: Valid Alert Actions (1B), Enable Alert Actions (1B), Life Used Programmable Warning Threshold (1B)"), \ ++OPT_UINTEGER('d', "device_temp_threshold", &alert_config_params.device_temp_threshold, "Set device over/under temp thresholds. Fields: Device Over-Temperature Programmable Warning Threshold (2B), Device Under-Temperature Programmable Warning Threshold (2B)"), \ ++OPT_UINTEGER('m', "mem_error_threshold", &alert_config_params.mem_error_threshold, "Set memory corrected thresholds. Fields: Corrected Volatile Memory Error Programmable Warning Threshold (2B), Corrected Persistent Memory Error Programmable Warning Threshold (2B)") ++ ++static const struct option cmd_set_alert_config_options[] = { ++ SET_ALERT_CONFIG_BASE_OPTIONS(), ++ SET_ALERT_CONFIG_OPTIONS(), ++ OPT_END(), ++}; ++ ++static const struct option cmd_get_health_info_options[] = { ++ BASE_OPTIONS(), ++ OPT_END(), ++}; ++ ++static struct _get_ld_info_params { ++ bool verbose; ++} get_ld_info_params; ++ ++#define GET_LD_INFO_BASE_OPTIONS() \ ++OPT_BOOLEAN('v',"verbose", &get_ld_info_params.verbose, "turn on debug") ++ ++static const struct option cmd_get_ld_info_options[] = { ++ GET_LD_INFO_BASE_OPTIONS(), ++ OPT_END(), ++}; ++ ++static struct _ddr_info_params { ++ bool verbose; ++ int ddr_id; ++} ddr_info_params; ++ ++#define DDR_INFO_BASE_OPTIONS() \ ++OPT_BOOLEAN('v',"verbose", &ddr_info_params.verbose, "turn on debug") ++ ++#define DDR_INFO_OPTIONS() \ ++OPT_INTEGER('i', "ddr_id", &ddr_info_params.ddr_id, "DDR instance id") ++ ++ ++static const struct option cmd_ddr_info_options[] = { ++ DDR_INFO_BASE_OPTIONS(), ++ DDR_INFO_OPTIONS(), ++ OPT_END(), ++}; ++ ++static struct _get_event_records_params { ++ int event_log_type; /* 00 - information, 01 - warning, 02 - failure, 03 - fatal */ ++ bool verbose; ++} get_event_records_params; ++ ++#define GET_EVENT_RECORDS_BASE_OPTIONS() \ ++OPT_BOOLEAN('v',"verbose", &get_event_records_params.verbose, "turn on debug") ++ ++#define GET_EVENT_RECORDS_OPTIONS() \ ++OPT_INTEGER('t', "log_type", &get_event_records_params.event_log_type, "Event log type (00 - information (default), 01 - warning, 02 - failure, 03 - fatal)") ++ ++static const struct option cmd_get_event_records_options[] = { ++ GET_EVENT_RECORDS_BASE_OPTIONS(), ++ GET_EVENT_RECORDS_OPTIONS(), ++ OPT_END(), ++}; ++ ++static struct _clear_event_records_params { ++ int event_log_type; /* 00 - information, 01 - warning, 02 - failure, 03 - fatal */ ++ int clear_event_flags; /* bit 0 - when set, clears all events */ ++ unsigned event_record_handle; /* only one is supported */ ++ bool verbose; ++} clear_event_records_params; ++ ++#define CLEAR_EVENT_RECORDS_BASE_OPTIONS() \ ++OPT_BOOLEAN('v',"verbose", &clear_event_records_params.verbose, "turn on debug") ++ ++#define CLEAR_EVENT_RECORDS_OPTIONS() \ ++OPT_INTEGER('t', "log_type", &clear_event_records_params.event_log_type, "Event log type (00 - information (default), 01 - warning, 02 - failure, 03 - fatal)"), \ ++OPT_INTEGER('f', "event_flag", &clear_event_records_params.clear_event_flags, "Clear Event Flags: 1 - clear all events, 0 (default) - clear specific event record"), \ ++OPT_UINTEGER('i', "event_record_handle", &clear_event_records_params.event_record_handle, "Clear Specific Event specific by Event Record Handle") ++ ++static const struct option cmd_clear_event_records_options[] = { ++ CLEAR_EVENT_RECORDS_BASE_OPTIONS(), ++ CLEAR_EVENT_RECORDS_OPTIONS(), ++ OPT_END(), ++}; ++ ++/* insert here params options */ ++ ++static int action_cmd_clear_event_records(struct cxl_memdev *memdev, struct action_context *actx) ++{ ++ u16 record_handle; ++ if (cxl_memdev_is_active(memdev)) { ++ fprintf(stderr, "%s: memdev active, abort clear_event_records\n", ++ cxl_memdev_get_devname(memdev)); ++ return -EBUSY; ++ } ++ if (clear_event_records_params.clear_event_flags) { ++ record_handle = 0; ++ return cxl_memdev_clear_event_records(memdev, clear_event_records_params.event_log_type, ++ clear_event_records_params.clear_event_flags, 0, &record_handle); ++ } ++ else { ++ record_handle = (u16) clear_event_records_params.event_record_handle; ++ return cxl_memdev_clear_event_records(memdev, clear_event_records_params.event_log_type, ++ clear_event_records_params.clear_event_flags, 1, &record_handle); ++ } ++} ++ ++static int action_cmd_get_event_records(struct cxl_memdev *memdev, struct action_context *actx) ++{ ++ if (cxl_memdev_is_active(memdev)) { ++ fprintf(stderr, "%s: memdev active, abort get_event_records\n", ++ cxl_memdev_get_devname(memdev)); ++ return -EBUSY; ++ } ++#if 0 ++ if (get_event_records_params.event_log_type < 0 || get_event_records_params.event_log_type > 3) { ++ fprintf(stderr, "%s: Invalid Event Log type: %d, Allowed values Event log type " ++ "(00 - information (default), 01 - warning, 02 - failure, 03 - fatal)\n", ++ cxl_memdev_get_devname(memdev), get_event_records_params.event_log_type); ++ return -EINVAL; ++ } ++#endif ++ ++ return cxl_memdev_get_event_records(memdev, get_event_records_params.event_log_type); ++} ++ ++static int action_cmd_get_ld_info(struct cxl_memdev *memdev, struct action_context *actx) ++{ ++ if (cxl_memdev_is_active(memdev)) { ++ fprintf(stderr, "%s: memdev active, abort get_ld_info\n", ++ cxl_memdev_get_devname(memdev)); ++ return -EBUSY; ++ } ++ ++ return cxl_memdev_get_ld_info(memdev); ++} ++ ++static int action_cmd_ddr_info(struct cxl_memdev *memdev, struct action_context *actx) ++{ ++ if (cxl_memdev_is_active(memdev)) { ++ fprintf(stderr, "%s: memdev active, abort ddr_info\n", ++ cxl_memdev_get_devname(memdev)); ++ return -EBUSY; ++ } ++ fprintf(stdout, "memdev id: %d", cxl_memdev_get_id(memdev)); ++ return cxl_memdev_ddr_info(memdev, ddr_info_params.ddr_id); ++} ++ ++static int action_cmd_get_health_info(struct cxl_memdev *memdev, struct action_context *actx) ++{ ++ if (cxl_memdev_is_active(memdev)) { ++ fprintf(stderr, "%s: memdev active, abort get_health_info\n", ++ cxl_memdev_get_devname(memdev)); ++ return -EBUSY; ++ } ++ ++ return cxl_memdev_get_health_info(memdev); ++} ++ ++static int action_cmd_get_alert_config(struct cxl_memdev *memdev, struct action_context *actx) ++{ ++ if (cxl_memdev_is_active(memdev)) { ++ fprintf(stderr, "%s: memdev active, abort get_alert_config\n", ++ cxl_memdev_get_devname(memdev)); ++ return -EBUSY; ++ } ++ ++ return cxl_memdev_get_alert_config(memdev); ++} ++ ++static int action_cmd_set_alert_config(struct cxl_memdev *memdev, struct action_context *actx) ++{ ++ if (cxl_memdev_is_active(memdev)) { ++ fprintf(stderr, "%s: memdev active, abort set_alert_config\n", ++ cxl_memdev_get_devname(memdev)); ++ return -EBUSY; ++ } ++ ++ return cxl_memdev_set_alert_config(memdev, alert_config_params.alert_prog_threshold, ++ alert_config_params.device_temp_threshold, alert_config_params.mem_error_threshold); ++} ++ ++static int action_cmd_get_timestamp(struct cxl_memdev *memdev, struct action_context *actx) ++{ ++ if (cxl_memdev_is_active(memdev)) { ++ fprintf(stderr, "%s: memdev active, get_timestamp\n", ++ cxl_memdev_get_devname(memdev)); ++ return -EBUSY; ++ } ++ ++ return cxl_memdev_get_timestamp(memdev); ++} ++ ++static int action_cmd_set_timestamp(struct cxl_memdev *memdev, struct action_context *actx) ++{ ++ if (cxl_memdev_is_active(memdev)) { ++ fprintf(stderr, "%s: memdev active, set_timestamp\n", ++ cxl_memdev_get_devname(memdev)); ++ return -EBUSY; ++ } ++ ++ printf("timestamp: 0x%lx (%ld)\n", ts_params.timestamp, ts_params.timestamp); ++ return cxl_memdev_set_timestamp(memdev, ts_params.timestamp); ++} ++ ++static int action_cmd_get_event_interrupt_policy(struct cxl_memdev *memdev, struct action_context *actx) ++{ ++ if (cxl_memdev_is_active(memdev)) { ++ fprintf(stderr, "%s: memdev active, get_event_interrupt_policy\n", ++ cxl_memdev_get_devname(memdev)); ++ return -EBUSY; ++ } ++ ++ return cxl_memdev_get_event_interrupt_policy(memdev); ++} ++ ++static int action_cmd_set_event_interrupt_policy(struct cxl_memdev *memdev, struct action_context *actx) ++{ ++ if (cxl_memdev_is_active(memdev)) { ++ fprintf(stderr, "%s: memdev active, set_event_interrupt_policy\n", ++ cxl_memdev_get_devname(memdev)); ++ return -EBUSY; ++ } ++ ++ return cxl_memdev_set_event_interrupt_policy(memdev, interrupt_policy_params.policy); ++} ++ ++static int action_cmd_get_cel_log(struct cxl_memdev *memdev, struct action_context *actx) ++{ ++ if (cxl_memdev_is_active(memdev)) { ++ fprintf(stderr, "%s: memdev active, get_cel_log\n", ++ cxl_memdev_get_devname(memdev)); ++ return -EBUSY; ++ } ++ ++ return cxl_memdev_get_cel_log(memdev); ++} ++ ++static int action_cmd_get_supported_logs(struct cxl_memdev *memdev, struct action_context *actx) ++{ ++ if (cxl_memdev_is_active(memdev)) { ++ fprintf(stderr, "%s: memdev active, get_supported_logs\n", ++ cxl_memdev_get_devname(memdev)); ++ return -EBUSY; ++ } ++ ++ return cxl_memdev_get_supported_logs(memdev); ++} ++ ++static int action_cmd_identify(struct cxl_memdev *memdev, struct action_context *actx) ++{ ++ if (cxl_memdev_is_active(memdev)) { ++ fprintf(stderr, "%s: memdev active, cmd_identify\n", ++ cxl_memdev_get_devname(memdev)); ++ return -EBUSY; ++ } ++ ++ return cxl_memdev_cmd_identify(memdev); ++} ++ ++/* insert here action */ ++ ++static int action_zero(struct cxl_memdev *memdev, struct action_context *actx) ++{ ++ int rc; ++ ++ if (cxl_memdev_is_active(memdev)) { ++ fprintf(stderr, "%s: memdev active, abort label write\n", ++ cxl_memdev_get_devname(memdev)); ++ return -EBUSY; ++ } ++ ++ rc = cxl_memdev_zero_lsa(memdev); ++ if (rc < 0) ++ fprintf(stderr, "%s: label zeroing failed: %s\n", ++ cxl_memdev_get_devname(memdev), strerror(-rc)); ++ ++ return rc; ++} ++ ++static int action_write(struct cxl_memdev *memdev, struct action_context *actx) ++{ ++ size_t size = param.len, read_len; ++ unsigned char *buf; ++ int rc; ++ ++ if (cxl_memdev_is_active(memdev)) { ++ fprintf(stderr, "%s is active, abort label write\n", ++ cxl_memdev_get_devname(memdev)); ++ return -EBUSY; ++ } ++ ++ if (!size) { ++ size_t lsa_size = cxl_memdev_get_lsa_size(memdev); ++ ++ fseek(actx->f_in, 0L, SEEK_END); ++ size = ftell(actx->f_in); ++ fseek(actx->f_in, 0L, SEEK_SET); ++ ++ if (size > lsa_size) { ++ fprintf(stderr, ++ "File size (%zu) greater than LSA size (%zu), aborting\n", ++ size, lsa_size); ++ return -EINVAL; ++ } ++ } ++ ++ buf = calloc(1, size); ++ if (!buf) ++ return -ENOMEM; ++ ++ read_len = fread(buf, 1, size, actx->f_in); ++ if (read_len != size) { ++ rc = -ENXIO; ++ goto out; ++ } ++ ++ rc = cxl_memdev_set_lsa(memdev, buf, size, param.offset); ++ if (rc < 0) ++ fprintf(stderr, "%s: label write failed: %s\n", ++ cxl_memdev_get_devname(memdev), strerror(-rc)); ++ ++out: ++ free(buf); ++ return rc; ++} ++ ++static int action_read(struct cxl_memdev *memdev, struct action_context *actx) ++{ ++ size_t size = param.len, write_len; ++ char *buf; ++ int rc; ++ ++ if (!size) ++ size = cxl_memdev_get_lsa_size(memdev); ++ ++ buf = calloc(1, size); ++ if (!buf) ++ return -ENOMEM; ++ ++ rc = cxl_memdev_get_lsa(memdev, buf, size, param.offset); ++ if (rc < 0) { ++ fprintf(stderr, "%s: label read failed: %s\n", ++ cxl_memdev_get_devname(memdev), strerror(-rc)); ++ goto out; ++ } ++ ++ write_len = fwrite(buf, 1, size, actx->f_out); ++ if (write_len != size) { ++ rc = -ENXIO; ++ goto out; ++ } ++ fflush(actx->f_out); ++ ++out: ++ free(buf); ++ return rc; ++} ++ ++static int memdev_action(int argc, const char **argv, struct cxl_ctx *ctx, ++ int (*action)(struct cxl_memdev *memdev, struct action_context *actx), ++ const struct option *options, const char *usage) ++{ ++ struct cxl_memdev *memdev, *single = NULL; ++ struct action_context actx = { 0 }; ++ int i, rc = 0, count = 0, err = 0; ++ const char * const u[] = { ++ usage, ++ NULL ++ }; ++ unsigned long id; ++ ++ argc = parse_options(argc, argv, options, u, 0); ++ ++ if (argc == 0) ++ usage_with_options(u, options); ++ for (i = 0; i < argc; i++) { ++ if (strcmp(argv[i], "all") == 0) { ++ argv[0] = "all"; ++ argc = 1; ++ break; ++ } ++ ++ if (sscanf(argv[i], "mem%lu", &id) != 1) { ++ fprintf(stderr, "'%s' is not a valid memdev name\n", ++ argv[i]); ++ err++; ++ } ++ } ++ ++ if (err == argc) { ++ usage_with_options(u, options); ++ return -EINVAL; ++ } ++ ++ if (!param.outfile) ++ actx.f_out = stdout; ++ else { ++ actx.f_out = fopen(param.outfile, "w+"); ++ if (!actx.f_out) { ++ fprintf(stderr, "failed to open: %s: (%s)\n", ++ param.outfile, strerror(errno)); ++ rc = -errno; ++ goto out; ++ } ++ } ++ ++ if (!param.infile) { ++ actx.f_in = stdin; ++ } else { ++ actx.f_in = fopen(param.infile, "r"); ++ if (!actx.f_in) { ++ fprintf(stderr, "failed to open: %s: (%s)\n", ++ param.infile, strerror(errno)); ++ rc = -errno; ++ goto out_close_fout; ++ } ++ } ++ ++ if (param.verbose) ++ cxl_set_log_priority(ctx, LOG_DEBUG); ++ ++ rc = 0; ++ err = 0; ++ count = 0; ++ ++ for (i = 0; i < argc; i++) { ++ if (sscanf(argv[i], "mem%lu", &id) != 1 ++ && strcmp(argv[i], "all") != 0) ++ continue; ++ ++ cxl_memdev_foreach (ctx, memdev) { ++ if (!util_cxl_memdev_filter(memdev, argv[i])) ++ continue; ++ ++ if (action == action_write) { ++ single = memdev; ++ rc = 0; ++ } else ++ rc = action(memdev, &actx); ++ ++ if (rc == 0) ++ count++; ++ else if (rc && !err) ++ err = rc; ++ } ++ } ++ rc = err; ++ ++ if (action == action_write) { ++ if (count > 1) { ++ error("write-labels only supports writing a single memdev\n"); ++ usage_with_options(u, options); ++ return -EINVAL; ++ } else if (single) { ++ rc = action(single, &actx); ++ if (rc) ++ count = 0; ++ } ++ } ++ ++ if (actx.f_in != stdin) ++ fclose(actx.f_in); ++ ++ out_close_fout: ++ if (actx.f_out != stdout) ++ fclose(actx.f_out); ++ ++ out: ++ /* ++ * count if some actions succeeded, 0 if none were attempted, ++ * negative error code otherwise. ++ */ ++ if (count > 0) ++ return count; ++ return rc; ++} ++ ++int cmd_write_labels(int argc, const char **argv, struct cxl_ctx *ctx) ++{ ++ int count = memdev_action(argc, argv, ctx, action_write, write_options, ++ "cxl write-labels [-i ]"); ++ ++ fprintf(stderr, "wrote %d mem%s\n", count >= 0 ? count : 0, ++ count > 1 ? "s" : ""); ++ return count >= 0 ? 0 : EXIT_FAILURE; ++} ++ ++int cmd_read_labels(int argc, const char **argv, struct cxl_ctx *ctx) ++{ ++ int count = memdev_action(argc, argv, ctx, action_read, read_options, ++ "cxl read-labels [..] [-o ]"); ++ ++ fprintf(stderr, "read %d mem%s\n", count >= 0 ? count : 0, ++ count > 1 ? "s" : ""); ++ return count >= 0 ? 0 : EXIT_FAILURE; ++} ++ ++int cmd_zero_labels(int argc, const char **argv, struct cxl_ctx *ctx) ++{ ++ int count = memdev_action(argc, argv, ctx, action_zero, zero_options, ++ "cxl zero-labels [..] []"); ++ ++ fprintf(stderr, "zeroed %d mem%s\n", count >= 0 ? count : 0, ++ count > 1 ? "s" : ""); ++ return count >= 0 ? 0 : EXIT_FAILURE; ++} ++ ++int cmd_identify(int argc, const char **argv, struct cxl_ctx *ctx) ++{ ++ int rc = memdev_action(argc, argv, ctx, action_cmd_identify, cmd_identify_options, ++ "cxl id-cmd [..] []"); ++ ++ return rc >= 0 ? 0 : EXIT_FAILURE; ++} ++ ++int cmd_get_supported_logs(int argc, const char **argv, struct cxl_ctx *ctx) ++{ ++ int rc = memdev_action(argc, argv, ctx, action_cmd_get_supported_logs, cmd_get_supported_logs_options, ++ "cxl get-supported-logs [..] []"); ++ ++ return rc >= 0 ? 0 : EXIT_FAILURE; ++} ++ ++int cmd_get_cel_log(int argc, const char **argv, struct cxl_ctx *ctx) ++{ ++ int rc = memdev_action(argc, argv, ctx, action_cmd_get_cel_log, cmd_get_cel_log_options, ++ "cxl get-cel-log [..] []"); ++ ++ return rc >= 0 ? 0 : EXIT_FAILURE; ++} ++ ++int cmd_get_event_interrupt_policy(int argc, const char **argv, struct cxl_ctx *ctx) ++{ ++ int rc = memdev_action(argc, argv, ctx, action_cmd_get_event_interrupt_policy, cmd_get_event_interrupt_policy_options, ++ "cxl get-event-interrupt-policy [..] []"); ++ ++ return rc >= 0 ? 0 : EXIT_FAILURE; ++} ++ ++int cmd_set_event_interrupt_policy(int argc, const char **argv, struct cxl_ctx *ctx) ++{ ++ int rc = memdev_action(argc, argv, ctx, action_cmd_set_event_interrupt_policy, cmd_set_event_interrupt_policy_options, ++ "cxl set-event-interrupt-policy [..] []"); ++ ++ return rc >= 0 ? 0 : EXIT_FAILURE; ++} ++ ++int cmd_get_timestamp(int argc, const char **argv, struct cxl_ctx *ctx) ++{ ++ int rc = memdev_action(argc, argv, ctx, action_cmd_get_timestamp, cmd_get_timestamp_options, ++ "cxl get-timestamp [..] []"); ++ ++ return rc >= 0 ? 0 : EXIT_FAILURE; ++} ++ ++int cmd_set_timestamp(int argc, const char **argv, struct cxl_ctx *ctx) ++{ ++ int rc = memdev_action(argc, argv, ctx, action_cmd_set_timestamp, cmd_set_timestamp_options, ++ "cxl set-timestamp [..] []"); ++ ++ return rc >= 0 ? 0 : EXIT_FAILURE; ++} ++ ++int cmd_get_alert_config(int argc, const char **argv, struct cxl_ctx *ctx) ++{ ++ int rc = memdev_action(argc, argv, ctx, action_cmd_get_alert_config, cmd_get_alert_config_options, ++ "cxl get-alert-config [..] []"); ++ ++ return rc >= 0 ? 0 : EXIT_FAILURE; ++} ++ ++int cmd_set_alert_config(int argc, const char **argv, struct cxl_ctx *ctx) ++{ ++ int rc = memdev_action(argc, argv, ctx, action_cmd_set_alert_config, cmd_set_alert_config_options, ++ "cxl set-alert-config [..] []"); ++ ++ return rc >= 0 ? 0 : EXIT_FAILURE; ++} ++ ++int cmd_get_health_info(int argc, const char **argv, struct cxl_ctx *ctx) ++{ ++ int rc = memdev_action(argc, argv, ctx, action_cmd_get_health_info, cmd_get_health_info_options, ++ "cxl get-health-info [..] []"); ++ ++ return rc >= 0 ? 0 : EXIT_FAILURE; ++} ++ ++int cmd_get_event_records(int argc, const char **argv, struct cxl_ctx *ctx) ++{ ++ int rc = memdev_action(argc, argv, ctx, action_cmd_get_event_records, cmd_get_event_records_options, ++ "cxl get-event-records [..] []"); ++ ++ return rc >= 0 ? 0 : EXIT_FAILURE; ++} ++ ++int cmd_get_ld_info(int argc, const char **argv, struct cxl_ctx *ctx) ++{ ++ int rc = memdev_action(argc, argv, ctx, action_cmd_get_ld_info, cmd_get_ld_info_options, ++ "cxl get-ld-info [..] []"); ++ ++ return rc >= 0 ? 0 : EXIT_FAILURE; ++} ++ ++int cmd_ddr_info(int argc, const char **argv, struct cxl_ctx *ctx) ++{ ++ int rc = memdev_action(argc, argv, ctx, action_cmd_ddr_info, cmd_ddr_info_options, ++ "cxl ddr-info [..] [-i ]"); ++ ++ return rc >= 0 ? 0 : EXIT_FAILURE; ++} ++ ++int cmd_clear_event_records(int argc, const char **argv, struct cxl_ctx *ctx) ++{ ++ int rc = memdev_action(argc, argv, ctx, action_cmd_clear_event_records, cmd_clear_event_records_options, ++ "cxl clear-event-records [..] []"); ++ ++ return rc >= 0 ? 0 : EXIT_FAILURE; ++} ++ ++/* insert here cmd */ +diff --git a/cligen/gen/cci_vendor_cmds.yaml b/cligen/gen/cci_vendor_cmds.yaml +new file mode 100644 +index 0000000..e6c4e59 +--- /dev/null ++++ b/cligen/gen/cci_vendor_cmds.yaml +@@ -0,0 +1,5029 @@ ++################################################################################ ++# Copyright 2022 Microchip Technology Inc. and its subsidiaries. ++# Subject to your compliance with these terms, you may use Microchip ++# software and any derivatives exclusively with Microchip products. It is ++# your responsibility to comply with third party license terms applicable to ++# your use of third party software (including open source software) that may ++# accompany Microchip software. ++# THIS SOFTWARE IS SUPPLIED BY MICROCHIP "AS IS". NO WARRANTIES, WHETHER ++# EXPRESS, IMPLIED OR STATUTORY, APPLY TO THIS SOFTWARE, INCLUDING ANY ++# IMPLIED WARRANTIES OF NON-INFRINGEMENT, MERCHANTABILITY, AND FITNESS FOR A ++# PARTICULAR PURPOSE. IN NO EVENT WILL MICROCHIP BE LIABLE FOR ANY INDIRECT, ++# SPECIAL, PUNITIVE, INCIDENTAL OR CONSEQUENTIAL LOSS, DAMAGE, COST OR ++# EXPENSE OF ANY KIND WHATSOEVER RELATED TO THE SOFTWARE, HOWEVER CAUSED, ++# EVEN IF MICROCHIP HAS BEEN ADVISED OF THE POSSIBILITY OR THE DAMAGES ARE ++# FORESEEABLE. TO THE FULLEST EXTENT ALLOWED BY LAW, MICROCHIP'S TOTAL ++# LIABILITY ON ALL CLAIMS IN ANY WAY RELATED TO THIS SOFTWARE WILL NOT ++# EXCEED THE AMOUNT OF FEES, IF ANY, THAT YOU HAVE PAID DIRECTLY TO ++# MICROCHIP FOR THIS SOFTWARE. ++################################################################################ ++ ++--- # CCI vendor commands ++ ++command_sets: ++- command_set_name: Device Info ++ command_set_description: Fetches general Device Information ++ ++ command_set_opcodes: ++ - opcode_name: Read Device Information ++ mnemonic: DEVICE_INFO_GET ++ opcode: 0xC000 ++ ++ output_payload: ++ - opl_name: Output parameters for Device Info Get ++ opl_mnemonic: DEVICE_INFO_GET_OUT_PL ++ opl_size_bytes: 8 ++ ++ parameters: ++ - opl_par_name: Device ID ++ opl_par_mnemonic: DEVICE_ID ++ opl_offset: 0x00 ++ opl_length: 2 ++ ++ - opl_par_name: Chip Info Release Major ++ opl_par_mnemonic: CHIPINFO_REL_MAJOR ++ opl_offset: 0x02 ++ opl_length: 1 ++ opl_description: > ++ Major: Rev A = 0, Rev B = 1, Rev C = 2, etc. ++ ++ - opl_par_name: Chip Info Release Minor ++ opl_par_mnemonic: CHIPINFO_REL_MINOR ++ opl_offset: 0x03 ++ opl_length: 1 ++ opl_description: > ++ Minor: Rev A.0 = 0, Rev A.1 = 1, Rev B.0 = 0, Rev B.1 = 1, etc. ++ ++ - opl_par_name: Device Revision ++ opl_par_mnemonic: DEVICE_REV ++ opl_offset: 0x04 ++ opl_length: 1 ++ opl_description: > ++ Incremented for every device revision. ++ Mapping to major/minor revisions is specific to the device lineage. ++ ++ - opl_par_name: ConfigFile version Major ++ opl_par_mnemonic: CONFIGFILE_VER_MAJOR ++ opl_offset: 0x05 ++ opl_length: 1 ++ ++ - opl_par_name: ConfigFile version Minor ++ opl_par_mnemonic: CONFIGFILE_VER_MINOR ++ opl_offset: 0x06 ++ opl_length: 2 ++ ++- command_set_name: Flex BIST ++ command_set_description: > ++ 0xC20x: General API ++ 0xC21x: Error and Performance monitoring API ++ 0xC22x: Polling API (Collection of useful interrupt-flags) ++ 0xC23x: Test API (Predefined standard testcases) ++ ++ command_set_opcodes: ++ ++ - opcode_name: Configure error stop conditions ++ mnemonic: FBIST_STOPCONFIG_SET ++ opcode: 0xC207 ++ opcode_description: > ++ Note: Affects both Transaction Generators ++ ++ input_payload: ++ - ipl_name: Input parameters for StopConfig register Set ++ ipl_mnemonic: FBIST_STOPCONFIG_SET_IN_PL ++ ipl_size_bytes: 7 ++ ++ parameters: ++ - ipl_par_name: Flex BIST Instance ++ ipl_par_mnemonic: FBIST_ID ++ ipl_offset: 0x00 ++ ipl_length: 4 ++ ++ - ipl_par_name: Stop on Write Response ++ ipl_par_mnemonic: STOP_ON_WRESP ++ ipl_offset: 0x04 ++ ipl_length: 1 ++ ++ - ipl_par_name: Stop on Read Response ++ ipl_par_mnemonic: STOP_ON_RRESP ++ ipl_offset: 0x05 ++ ipl_length: 1 ++ ++ - ipl_par_name: Stop on Read Data Error ++ ipl_par_mnemonic: STOP_ON_RDATAERR ++ ipl_offset: 0x06 ++ ipl_length: 1 ++ ++ - opcode_name: Configure maximum cyclecount ++ mnemonic: FBIST_CYCLECOUNT_SET ++ opcode: 0xC208 ++ opcode_description: > ++ Note: Must not be accessed while a TXG is in run state ++ Setting cyclecount to '0' disables this stop condition ++ ++ input_payload: ++ - ipl_name: Input parameters for CycleCnt register Set ++ ipl_mnemonic: FBIST_CYCLECOUNT_SET_IN_PL ++ ipl_size_bytes: 16 ++ ++ parameters: ++ - ipl_par_name: Flex BIST Instance ++ ipl_par_mnemonic: FBIST_ID ++ ipl_offset: 0x00 ++ ipl_length: 4 ++ ++ - ipl_par_name: TXG Nr ++ ipl_par_mnemonic: TXG_NR ++ ipl_offset: 0x04 ++ ipl_length: 1 ++ ++ - ipl_par_name: Reserved ++ ipl_par_mnemonic: RSVD ++ ipl_offset: 0x05 ++ ipl_length: 3 ++ ++ - ipl_par_name: cyclecount ++ ipl_par_mnemonic: CYCLECOUNT ++ ipl_offset: 0x08 ++ ipl_length: 8 ++ ++ - opcode_name: Set Reset flags of TXG[0|1] ++ mnemonic: FBIST_RESET_SET ++ opcode: 0xC209 ++ opcode_description: > ++ Note: Activating a reset will also clear the run flag ++ ++ input_payload: ++ - ipl_name: Input parameters for Reset flags Set ++ ipl_mnemonic: FBIST_RESET_SET_IN_PL ++ ipl_size_bytes: 6 ++ ++ parameters: ++ - ipl_par_name: Flex BIST Instance ++ ipl_par_mnemonic: FBIST_ID ++ ipl_offset: 0x00 ++ ipl_length: 4 ++ ++ - ipl_par_name: TXG0 Reset ++ ipl_par_mnemonic: TXG0_RESET ++ ipl_offset: 0x04 ++ ipl_length: 1 ++ ++ - ipl_par_name: TXG1 Reset ++ ipl_par_mnemonic: TXG1_RESET ++ ipl_offset: 0x05 ++ ipl_length: 1 ++ ++ - opcode_name: Set Run flags of TXG[0|1] ++ mnemonic: FBIST_RUN_SET ++ opcode: 0xC20A ++ ++ input_payload: ++ - ipl_name: Input parameters for Run flags Set ++ ipl_mnemonic: FBIST_RUN_SET_IN_PL ++ ipl_size_bytes: 6 ++ ++ parameters: ++ - ipl_par_name: Flex BIST Instance ++ ipl_par_mnemonic: FBIST_ID ++ ipl_offset: 0x00 ++ ipl_length: 4 ++ ++ - ipl_par_name: TXG0 Run ++ ipl_par_mnemonic: TXG0_RUN ++ ipl_offset: 0x04 ++ ipl_length: 1 ++ ++ - ipl_par_name: TXG1 Run ++ ipl_par_mnemonic: TXG1_RUN ++ ipl_offset: 0x05 ++ ipl_length: 1 ++ ++ - opcode_name: Read Run flags of TXG[0|1] ++ mnemonic: FBIST_RUN_GET ++ opcode: 0xC20B ++ ++ input_payload: ++ - ipl_name: Input parameters for Run flags Get ++ ipl_mnemonic: FBIST_RUN_GET_IN_PL ++ ipl_size_bytes: 4 ++ ++ parameters: ++ - ipl_par_name: Flex BIST Instance ++ ipl_par_mnemonic: FBIST_ID ++ ipl_offset: 0x00 ++ ipl_length: 4 ++ ++ output_payload: ++ - opl_name: Output parameters for Run flags Get ++ opl_mnemonic: FBIST_RUN_GET_OUT_PL ++ opl_size_bytes: 2 ++ ++ parameters: ++ - opl_par_name: TXG0 Run ++ opl_par_mnemonic: TXG0_RUN ++ opl_offset: 0x00 ++ opl_length: 1 ++ ++ - opl_par_name: TXG1 Run ++ opl_par_mnemonic: TXG1_RUN ++ opl_offset: 0x01 ++ opl_length: 1 ++ ++ ++ - opcode_name: Read a thread's Remaining Transfer Counts ++ mnemonic: FBIST_XFER_REM_CNT_GET ++ opcode: 0xC210 ++ opcode_description: > ++ Note: Must not be accessed while a TXG is in run state ++ ++ input_payload: ++ - ipl_name: Input parameters for XFER remaining counter Get ++ ipl_mnemonic: FBIST_XFER_REM_CNT_GET_IN_PL ++ ipl_size_bytes: 5 ++ ++ parameters: ++ - ipl_par_name: Flex BIST Instance ++ ipl_par_mnemonic: FBIST_ID ++ ipl_offset: 0x00 ++ ipl_length: 4 ++ ++ - ipl_par_name: Thread Nr ++ ipl_par_mnemonic: THREAD_NR ++ ipl_offset: 0x04 ++ ipl_length: 1 ++ ++ output_payload: ++ - opl_name: Output parameters for XFER remaining counter Get ++ opl_mnemonic: FBIST_XFER_REM_CNT_GET_OUT_PL ++ opl_size_bytes: 2 ++ ++ parameters: ++ - opl_par_name: XFER Remaining ++ opl_par_mnemonic: XFER_REM ++ opl_offset: 0x00 ++ opl_length: 2 ++ ++ - opcode_name: Reads last and expected data ++ mnemonic: FBIST_LAST_EXP_READ_DATA_GET ++ opcode: 0xC211 ++ opcode_description: > ++ Note: Must not be accessed while a TXG is in run state ++ ++ input_payload: ++ - ipl_name: Input parameters for Last Exp Read Data Get ++ ipl_mnemonic: FBIST_LAST_EXP_READ_DATA_GET_IN_PL ++ ipl_size_bytes: 4 ++ ++ parameters: ++ - ipl_par_name: Flex BIST Instance ++ ipl_par_mnemonic: FBIST_ID ++ ipl_offset: 0x00 ++ ipl_length: 4 ++ ++ output_payload: ++ - opl_name: Output parameters for Input parameters for Last Exp Read Data Get ++ opl_mnemonic: FBIST_LAST_EXP_READ_DATA_GET_OUT_PL ++ opl_size_bytes: 128 ++ ++ parameters: ++ - opl_par_name: last_rd_data ++ opl_par_mnemonic: LAST_RD_DATA ++ opl_offset: 0x00 ++ opl_unit_size: 4 ++ opl_units_num: 16 ++ opl_length: 64 ++ opl_description: Array of 16 DWords ++ ++ - opl_par_name: exp_rd_data ++ opl_par_mnemonic: EXP_RD_DATA ++ opl_offset: 0x40 ++ opl_unit_size: 4 ++ opl_units_num: 16 ++ opl_length: 64 ++ opl_description: Array of 16 DWords ++ ++ - opcode_name: Read a TXG's current cycle count ++ mnemonic: FBIST_CURR_CYCLE_CNT_GET ++ opcode: 0xC212 ++ ++ input_payload: ++ - ipl_name: Input parameters for Current Cycle Count Get ++ ipl_mnemonic: FBIST_CURR_CYCLE_CNT_GET_IN_PL ++ ipl_size_bytes: 5 ++ ++ parameters: ++ - ipl_par_name: Flex BIST Instance ++ ipl_par_mnemonic: FBIST_ID ++ ipl_offset: 0x00 ++ ipl_length: 4 ++ ++ - ipl_par_name: TXG Nr ++ ipl_par_mnemonic: TXG_NR ++ ipl_offset: 0x04 ++ ipl_length: 1 ++ ++ output_payload: ++ - opl_name: Output parameters for Current Cycle Count Get ++ opl_mnemonic: FBIST_CURR_CYCLE_CNT_GET_OUT_PL ++ opl_size_bytes: 8 ++ ++ parameters: ++ - opl_par_name: Current Cycle Count ++ opl_par_mnemonic: CURR_CYCLE_CNT ++ opl_offset: 0x00 ++ opl_length: 8 ++ ++ - opcode_name: Read a TXG's thread status ++ mnemonic: FBIST_THREAD_STATUS_GET ++ opcode: 0xC213 ++ ++ input_payload: ++ - ipl_name: Input parameters for Thread Status Get ++ ipl_mnemonic: FBIST_THREAD_STATUS_GET_IN_PL ++ ipl_size_bytes: 6 ++ ++ parameters: ++ - ipl_par_name: Flex BIST Instance ++ ipl_par_mnemonic: FBIST_ID ++ ipl_offset: 0x00 ++ ipl_length: 4 ++ ++ - ipl_par_name: TXG Nr ++ ipl_par_mnemonic: TXG_NR ++ ipl_offset: 0x04 ++ ipl_length: 1 ++ ++ - ipl_par_name: Thread Nr ++ ipl_par_mnemonic: THREAD_NR ++ ipl_offset: 0x05 ++ ipl_length: 1 ++ ++ output_payload: ++ - opl_name: Output parameters for Thread Status Get ++ opl_mnemonic: FBIST_THREAD_STATUS_GET_OUT_PL ++ opl_size_bytes: 4 ++ ++ parameters: ++ - opl_par_name: Thread State ++ opl_par_mnemonic: THREAD_STATE ++ opl_offset: 0x00 ++ opl_length: 1 ++ ++ - opl_par_name: Reserved ++ opl_par_mnemonic: RSVD ++ opl_offset: 0x01 ++ opl_length: 1 ++ ++ - opl_par_name: curr_thread_desc_index ++ opl_par_mnemonic: CURR_THREAD_DESC_INDEX ++ opl_offset: 0x02 ++ opl_length: 2 ++ ++ - opcode_name: Read a TXG's thread transaction count ++ mnemonic: FBIST_THREAD_TRANS_CNT_GET ++ opcode: 0xC214 ++ ++ input_payload: ++ - ipl_name: Input parameters for Thread Trans Count Get ++ ipl_mnemonic: FBIST_THREAD_TRANS_CNT_GET_IN_PL ++ ipl_size_bytes: 6 ++ ++ parameters: ++ - ipl_par_name: Flex BIST Instance ++ ipl_par_mnemonic: FBIST_ID ++ ipl_offset: 0x00 ++ ipl_length: 4 ++ ++ - ipl_par_name: TXG Nr ++ ipl_par_mnemonic: TXG_NR ++ ipl_offset: 0x04 ++ ipl_length: 1 ++ ++ - ipl_par_name: Thread Nr ++ ipl_par_mnemonic: THREAD_NR ++ ipl_offset: 0x05 ++ ipl_length: 1 ++ ++ output_payload: ++ - opl_name: Output parameters for Thread Trans Count Get ++ opl_mnemonic: FBIST_THREAD_TRANS_CNT_GET_OUT_PL ++ opl_size_bytes: 8 ++ ++ parameters: ++ - opl_par_name: Transaction Count ++ opl_par_mnemonic: TRANSACTION_CNT ++ opl_offset: 0x00 ++ opl_length: 8 ++ ++ - opcode_name: Read a TXG's thread Rd/Wr bandwidth counters ++ mnemonic: FBIST_THREAD_BANDWIDTH_GET ++ opcode: 0xC215 ++ ++ input_payload: ++ - ipl_name: Input parameters for Thread Bandwidth Get ++ ipl_mnemonic: FBIST_THREAD_BANDWIDTH_GET_IN_PL ++ ipl_size_bytes: 6 ++ ++ parameters: ++ - ipl_par_name: Flex BIST Instance ++ ipl_par_mnemonic: FBIST_ID ++ ipl_offset: 0x00 ++ ipl_length: 4 ++ ++ - ipl_par_name: TXG Nr ++ ipl_par_mnemonic: TXG_NR ++ ipl_offset: 0x04 ++ ipl_length: 1 ++ ++ - ipl_par_name: Thread Nr ++ ipl_par_mnemonic: THREAD_NR ++ ipl_offset: 0x05 ++ ipl_length: 1 ++ ++ output_payload: ++ - opl_name: Output parameters for Thread Bandwidth Get ++ opl_mnemonic: FBIST_THREAD_BANDWIDTH_GET_OUT_PL ++ opl_size_bytes: 8 ++ ++ parameters: ++ - opl_par_name: Read BW Count ++ opl_par_mnemonic: READ_BW_CNT ++ opl_offset: 0x00 ++ opl_length: 4 ++ ++ - opl_par_name: Write BW Count ++ opl_par_mnemonic: WRITE_BW_CNT ++ opl_offset: 0x04 ++ opl_length: 4 ++ ++ - opcode_name: Read a TXG's thread Rd/Wr latency counters ++ mnemonic: FBIST_THREAD_LATENCY_GET ++ opcode: 0xC216 ++ ++ input_payload: ++ - ipl_name: Input parameters for Thread Latency Get ++ ipl_mnemonic: FBIST_THREAD_LATENCY_GET_IN_PL ++ ipl_size_bytes: 6 ++ ++ parameters: ++ - ipl_par_name: Flex BIST Instance ++ ipl_par_mnemonic: FBIST_ID ++ ipl_offset: 0x00 ++ ipl_length: 4 ++ ++ - ipl_par_name: TXG Nr ++ ipl_par_mnemonic: TXG_NR ++ ipl_offset: 0x04 ++ ipl_length: 1 ++ ++ - ipl_par_name: Thread Nr ++ ipl_par_mnemonic: THREAD_NR ++ ipl_offset: 0x05 ++ ipl_length: 1 ++ ++ output_payload: ++ - opl_name: Output parameters for Thread Latency Get ++ opl_mnemonic: FBIST_THREAD_LATENCY_GET_OUT_PL ++ opl_size_bytes: 8 ++ ++ parameters: ++ - opl_par_name: Read Latency Count ++ opl_par_mnemonic: READ_LATENCY_CNT ++ opl_offset: 0x00 ++ opl_length: 4 ++ ++ - opl_par_name: Write Latency Count ++ opl_par_mnemonic: WRITE_LATENCY_CNT ++ opl_offset: 0x04 ++ opl_length: 4 ++ ++ - opcode_name: Set up TXG's/thread's performance monitor ++ mnemonic: FBIST_THREAD_PERF_MON_SET ++ opcode: 0xC217 ++ ++ input_payload: ++ - ipl_name: Input parameters for Thread Performance Monitor Set ++ ipl_mnemonic: FBIST_THREAD_PERF_MON_SET_IN_PL ++ ipl_size_bytes: 10 ++ ++ parameters: ++ - ipl_par_name: Flex BIST Instance ++ ipl_par_mnemonic: FBIST_ID ++ ipl_offset: 0x00 ++ ipl_length: 4 ++ ++ - ipl_par_name: TXG Nr ++ ipl_par_mnemonic: TXG_NR ++ ipl_offset: 0x04 ++ ipl_length: 1 ++ ++ - ipl_par_name: Thread Nr ++ ipl_par_mnemonic: THREAD_NR ++ ipl_offset: 0x05 ++ ipl_length: 1 ++ ++ - ipl_par_name: Performance Monitor Preset Enable ++ ipl_par_mnemonic: PMON_PRESET_EN ++ ipl_offset: 0x06 ++ ipl_length: 1 ++ ++ - ipl_par_name: Performance Monitor Clear Enable ++ ipl_par_mnemonic: PMON_CLEAR_EN ++ ipl_offset: 0x07 ++ ipl_length: 1 ++ ++ - ipl_par_name: Performance Monitor Rollover ++ ipl_par_mnemonic: PMON_ROLLOVER ++ ipl_offset: 0x08 ++ ipl_length: 1 ++ ++ - ipl_par_name: Performance Monitor Thread lclk ++ ipl_par_mnemonic: PMON_THREAD_LCLK ++ ipl_offset: 0x09 ++ ipl_length: 1 ++ ++ - opcode_name: Read the top read status0 ++ mnemonic: FBIST_TOP_READ_STATUS0_GET ++ opcode: 0xC218 ++ ++ input_payload: ++ - ipl_name: Input parameters for Top Read Status0 Get ++ ipl_mnemonic: FBIST_TOP_READ_STATUS0_GET_IN_PL ++ ipl_size_bytes: 4 ++ ++ parameters: ++ - ipl_par_name: Flex BIST Instance ++ ipl_par_mnemonic: FBIST_ID ++ ipl_offset: 0x00 ++ ipl_length: 4 ++ ++ output_payload: ++ - opl_name: Output parameters for Top Read Status0 Get ++ opl_mnemonic: FBIST_TOP_READ_STATUS0_GET_OUT_PL ++ opl_size_bytes: 3 ++ ++ parameters: ++ - opl_par_name: tag_id_err_idx ++ opl_par_mnemonic: TAG_ID_ERR_IDX ++ opl_offset: 0x00 ++ opl_length: 2 ++ ++ - opl_par_name: thread_err_idx ++ opl_par_mnemonic: THREAD_ERR_IDX ++ opl_offset: 0x02 ++ opl_length: 1 ++ ++ - opcode_name: Read Read-Dataframe, Read-Response and Write-Response error counters ++ mnemonic: FBIST_TOP_ERR_CNT_GET ++ opcode: 0xC219 ++ ++ input_payload: ++ - ipl_name: Input parameters for Top Error Count Get ++ ipl_mnemonic: FBIST_TOP_ERR_CNT_GET_IN_PL ++ ipl_size_bytes: 4 ++ ++ parameters: ++ - ipl_par_name: Flex BIST Instance ++ ipl_par_mnemonic: FBIST_ID ++ ipl_offset: 0x00 ++ ipl_length: 4 ++ ++ output_payload: ++ - opl_name: Output parameters for Top Error Count Get ++ opl_mnemonic: FBIST_TOP_ERR_CNT_GET_OUT_PL ++ opl_size_bytes: 12 ++ ++ parameters: ++ - opl_par_name: Read Data Error Count ++ opl_par_mnemonic: RDATA_ERR_CNT ++ opl_offset: 0x00 ++ opl_length: 4 ++ ++ - opl_par_name: Read Response Error Count ++ opl_par_mnemonic: RRESP_ERR_CNT ++ opl_offset: 0x04 ++ opl_length: 4 ++ ++ - opl_par_name: Write Response Error Count ++ opl_par_mnemonic: WRESP_ERR_CNT ++ opl_offset: 0x08 ++ opl_length: 4 ++ ++ - opcode_name: Read the last read address ++ mnemonic: FBIST_LAST_READ_ADDR_GET ++ opcode: 0xC21A ++ opcode_description: > ++ Note: Must not be accessed while a TXG is in run state ++ ++ input_payload: ++ - ipl_name: Input parameters for Last Read Address Get ++ ipl_mnemonic: FBIST_LAST_READ_ADDR_GET_IN_PL ++ ipl_size_bytes: 4 ++ ++ parameters: ++ - ipl_par_name: Flex BIST Instance ++ ipl_par_mnemonic: FBIST_ID ++ ipl_offset: 0x00 ++ ipl_length: 4 ++ ++ output_payload: ++ - opl_name: Output parameters for Last Read Address Get ++ opl_mnemonic: FBIST_LAST_READ_ADDR_GET_OUT_PL ++ opl_size_bytes: 8 ++ ++ parameters: ++ - opl_par_name: last_read_addr ++ opl_par_mnemonic: LAST_READ_ADDR ++ opl_offset: 0x00 ++ opl_length: 8 ++ ++ - opcode_name: Sets up a predefined configuration from the SimpleData group ++ mnemonic: FBIST_TEST_SIMPLEDATA ++ opcode: 0xC230 ++ opcode_description: > ++ This function sets up a complete testcase. ++ User then need to run TXG[0] and poll/handle interrupt flags for errors. ++ ++ input_payload: ++ - ipl_name: Parameters for SimpleData configuration Input Payload ++ ipl_mnemonic: FBIST_TEST_SIMPLEDATA_IN_PL ++ ipl_size_bytes: 24 ++ ++ parameters: ++ - ipl_par_name: Flex BIST Instance ++ ipl_par_mnemonic: FBIST_ID ++ ipl_offset: 0x00 ++ ipl_length: 4 ++ ++ - ipl_par_name: Test number to be setup ++ ipl_par_mnemonic: TEST_NR ++ ipl_offset: 0x04 ++ ipl_length: 1 ++ ++ enumerators: ++ - ipl_en_name: Wr Sweep, Rd Sweep ++ ipl_en_mnemonic: FBIST_WRSWEEP_RDSWEEP ++ ipl_value: 0x00 ++ ++ - ipl_en_name: Wr/Rd Interleave ++ ipl_en_mnemonic: FBIST_WR_RD_INTERLEAVE ++ ipl_value: 0x01 ++ ++ - ipl_par_name: reserved ++ ipl_par_mnemonic: RSVD ++ ipl_offset: 0x05 ++ ipl_length: 3 ++ ++ - ipl_par_name: Start Address ++ ipl_par_mnemonic: START_ADDRESS ++ ipl_offset: 0x08 ++ ipl_length: 8 ++ ++ - ipl_par_name: Size of memory to operate on ++ ipl_par_mnemonic: NUM_BYTES ++ ipl_offset: 0x10 ++ ipl_length: 8 ++ ipl_description: Rounded down to a multiple of 64 ++ ++ - opcode_name: Sets up a predefined configuration from the AddressTest group ++ mnemonic: FBIST_TEST_ADDRESSTEST ++ opcode: 0xC231 ++ opcode_description: > ++ This function sets up a complete testcase. ++ User then need to run TXG[0] and poll/handle interrupt flags for errors. ++ ++ input_payload: ++ - ipl_name: Parameters for AddressTest configuration Input Payload ++ ipl_mnemonic: FBIST_TEST_ADDRESSTEST_IN_PL ++ ipl_size_bytes: 28 ++ ++ parameters: ++ - ipl_par_name: Flex BIST Instance ++ ipl_par_mnemonic: FBIST_ID ++ ipl_offset: 0x00 ++ ipl_length: 4 ++ ++ - ipl_par_name: Test number to be setup ++ ipl_par_mnemonic: TEST_NR ++ ipl_offset: 0x04 ++ ipl_length: 1 ++ ++ enumerators: ++ - ipl_en_name: Walking Ones ++ ipl_en_mnemonic: FBIST_WALKING_ONES ++ ipl_value: 0x00 ++ ++ - ipl_en_name: Own Address Sequential ++ ipl_en_mnemonic: FBIST_OWN_ADDR_SEQUENTIAL ++ ipl_value: 0x01 ++ ++ - ipl_en_name: Own Address Parallel ++ ipl_en_mnemonic: FBIST_OWN_ADDR_PARALLEL ++ ipl_value: 0x02 ++ ++ - ipl_par_name: reserved ++ ipl_par_mnemonic: RSVD ++ ipl_offset: 0x05 ++ ipl_length: 3 ++ ++ - ipl_par_name: Start Address ++ ipl_par_mnemonic: START_ADDRESS ++ ipl_offset: 0x08 ++ ipl_length: 8 ++ ++ - ipl_par_name: Size of memory to operate on ++ ipl_par_mnemonic: NUM_BYTES ++ ipl_offset: 0x10 ++ ipl_length: 8 ++ ipl_description: Rounded down to a multiple of 64 ++ ++ - ipl_par_name: Inital Seed ++ ipl_par_mnemonic: SEED ++ ipl_offset: 0x18 ++ ipl_length: 4 ++ ++ - opcode_name: Sets up a predefined configuration from the MovingInversion group ++ mnemonic: FBIST_TEST_MOVINGINVERSION ++ opcode: 0xC232 ++ opcode_description: > ++ This function sets up one of the phases (A/B/C/D/E/F) of a testcase. ++ User then need to run both TXG[0/1] and poll/handle interrupt flags for errors. ++ ++ input_payload: ++ - ipl_name: Parameters for MovingInversion configuration Input Payload ++ ipl_mnemonic: FBIST_TEST_MOVINGINVERSION_IN_PL ++ ipl_size_bytes: 28 ++ ++ parameters: ++ - ipl_par_name: Flex BIST Instance ++ ipl_par_mnemonic: FBIST_ID ++ ipl_offset: 0x00 ++ ipl_length: 4 ++ ++ - ipl_par_name: Test number to be setup ++ ipl_par_mnemonic: TEST_NR ++ ipl_offset: 0x04 ++ ipl_length: 1 ++ ++ enumerators: ++ - ipl_en_name: MemTest86 #3: Ones & Zeros sequential ++ ipl_en_mnemonic: FBIST_MEMTEST86_3 ++ ipl_value: 0x00 ++ ++ - ipl_en_name: MemTest86 #4: 8-bit pattern ++ ipl_en_mnemonic: FBIST_MEMTEST86_4 ++ ipl_value: 0x01 ++ ++ - ipl_en_name: MemTest86 #5: Random Pattern ++ ipl_en_mnemonic: FBIST_MEMTEST86_5 ++ ipl_value: 0x02 ++ ++ - ipl_en_name: MemTest86 #7: 32-bit Pattern ++ ipl_en_mnemonic: FBIST_MEMTEST86_7 ++ ipl_value: 0x03 ++ ++ - ipl_par_name: Testphase to be setup ++ ipl_par_mnemonic: PHASE_NR ++ ipl_offset: 0x05 ++ ipl_length: 1 ++ ++ enumerators: ++ - ipl_en_name: Test Phase A ++ ipl_en_mnemonic: FBIST_PHASE_A ++ ipl_value: 0x00 ++ ++ - ipl_en_name: Test Phase B ++ ipl_en_mnemonic: FBIST_PHASE_B ++ ipl_value: 0x01 ++ ++ - ipl_en_name: Test Phase C ++ ipl_en_mnemonic: FBIST_PHASE_C ++ ipl_value: 0x02 ++ ++ - ipl_en_name: Test Phase D ++ ipl_en_mnemonic: FBIST_PHASE_D ++ ipl_value: 0x03 ++ ++ - ipl_en_name: Test Phase E ++ ipl_en_mnemonic: FBIST_PHASE_E ++ ipl_value: 0x04 ++ ++ - ipl_en_name: Test Phase F ++ ipl_en_mnemonic: FBIST_PHASE_F ++ ipl_value: 0x05 ++ ++ - ipl_par_name: reserved ++ ipl_par_mnemonic: RSVD ++ ipl_offset: 0x06 ++ ipl_length: 2 ++ ++ - ipl_par_name: Start Address ++ ipl_par_mnemonic: START_ADDRESS ++ ipl_offset: 0x08 ++ ipl_length: 8 ++ ++ - ipl_par_name: Size of memory to operate on ++ ipl_par_mnemonic: NUM_BYTES ++ ipl_offset: 0x10 ++ ipl_length: 8 ++ ipl_description: Rounded down to a multiple of 64 ++ ++ - ipl_par_name: DDR Page size ++ ipl_par_mnemonic: DDRPAGE_SIZE ++ ipl_offset: 0x18 ++ ipl_length: 4 ++ ++ - opcode_name: Sets up a predefined configuration from the RandomSequence group ++ mnemonic: FBIST_TEST_RANDOMSEQUENCE ++ opcode: 0xC233 ++ opcode_description: > ++ This function sets up one of the phases (A/B) of the random sequence test. ++ User then need to run both TXG[0/1] and poll/handle interrupt flags for error. ++ ++ input_payload: ++ - ipl_name: Parameters for RandomSequence configuration Input Payload ++ ipl_mnemonic: FBIST_TEST_RANDOMSEQUENCE_IN_PL ++ ipl_size_bytes: 36 ++ ++ parameters: ++ - ipl_par_name: Flex BIST Instance ++ ipl_par_mnemonic: FBIST_ID ++ ipl_offset: 0x00 ++ ipl_length: 4 ++ ++ - ipl_par_name: Testphase to be setup ++ ipl_par_mnemonic: PHASE_NR ++ ipl_offset: 0x04 ++ ipl_length: 1 ++ ++ enumerators: ++ - ipl_en_name: Test Phase A ++ ipl_en_mnemonic: FBIST_PHASE_A ++ ipl_value: 0x00 ++ ++ - ipl_en_name: Test Phase B ++ ipl_en_mnemonic: FBIST_PHASE_B ++ ipl_value: 0x01 ++ ++ - ipl_par_name: reserved ++ ipl_par_mnemonic: RSVD ++ ipl_offset: 0x05 ++ ipl_length: 3 ++ ++ - ipl_par_name: Start Address ++ ipl_par_mnemonic: START_ADDRESS ++ ipl_offset: 0x08 ++ ipl_length: 8 ++ ++ - ipl_par_name: Size of memory to operate on ++ ipl_par_mnemonic: NUM_BYTES ++ ipl_offset: 0x10 ++ ipl_length: 8 ++ ipl_description: Rounded down to a multiple of 64 ++ ++ - ipl_par_name: DDR Page size ++ ipl_par_mnemonic: DDRPAGE_SIZE ++ ipl_offset: 0x18 ++ ipl_length: 4 ++ ++ - ipl_par_name: Seed_DR0 ++ ipl_par_mnemonic: SEED_DR0 ++ ipl_offset: 0x1C ++ ipl_length: 4 ++ ++ - ipl_par_name: Seed DR1 ++ ipl_par_mnemonic: SEED_DR1 ++ ipl_offset: 0x20 ++ ipl_length: 4 ++ ++- command_set_name: DDR Commands ++ command_set_description: DDR Subcommands ++ ++ command_set_opcodes: ++ - opcode_name: DDR Info ++ mnemonic: DDR_INFO ++ opcode: 0xC500 ++ opcode_description: > ++ This opcode reads basic DDR memory information ++ ++ input_payload: ++ - ipl_name: DDR Info Input Payload ++ ipl_mnemonic: DDR_INFO_IN_PL ++ ipl_size_bytes: 1 ++ ++ parameters: ++ - ipl_par_name: DDR instance ++ ipl_par_mnemonic: DDR_ID ++ ipl_offset: 0x00 ++ ipl_length: 1 ++ ++ output_payload: ++ - opl_name: DDR Info Output Payload ++ opl_mnemonic: DDR_INFO_OUT_PL ++ opl_size_bytes: 8 ++ ++ parameters: ++ - opl_par_name: DDR controller MSTR register ++ opl_par_mnemonic: MSTR_REG ++ opl_offset: 0x00 ++ opl_length: 4 ++ ++ - opl_par_name: DRAM width derived from DEVICE_CONFIG ++ opl_par_mnemonic: DRAM_WIDTH ++ opl_offset: 0x04 ++ opl_length: 4 ++ ++ enumerators: ++ - opl_en_name: DRAM Width x4 device ++ opl_en_mnemonic: X4_DEVICE ++ opl_value: 0x00 ++ ++ - opl_en_name: DRAM Width x8 device ++ opl_en_mnemonic: X8_DEVICE ++ opl_value: 0x01 ++ ++ - opl_en_name: DRAM Width x16 device ++ opl_en_mnemonic: X16_DEVICE ++ opl_value: 0x02 ++ ++ - opl_en_name: DRAM Width x32 device ++ opl_en_mnemonic: X32_DEVICE ++ opl_value: 0x03 ++ ++- command_set_name: HIF/CXL Trace Buffer ++ command_set_description: | ++ Manage and operate HIF and CXL Trace buffer. ++ Prepare device to capture HIF or CXL traffic ++ ++ 1) enable trace buffer ++ 2) configure trace buffer ++ - trigger pattern ++ - trigger masks ++ - post trigger depth ++ 3) set trigger mode and arm the trace buffer ++ ++ command_set_opcodes: ++ - opcode_name: Get HIF/CXL Trace Buffer Platform Parameters ++ mnemonic: HCT_GET_PLAT_PARAMS ++ opcode: 0xC600 ++ opcode_description: > ++ Returns the number of trace buffer instances and the type (HIF or CXL) ++ of each instance. ++ ++ output_payload: ++ - opl_name: Get Platform Parameter Output Payload ++ opl_mnemonic: HCT_GET_PLAT_PARAMS_OUT_PL ++ opl_size_bytes: 1+ ++ ++ parameters: ++ - opl_par_name: Number of Trace buffer instances ++ opl_par_mnemonic: NUM_INST ++ opl_offset: 0x00 ++ opl_length: 1 ++ opl_description: > ++ This parameter represents the number of available trace buffer instances. ++ ++ - opl_par_name: Trace buffer instance type ++ opl_par_mnemonic: TYPE ++ opl_offset: 0x01 ++ opl_unit_size: 1 ++ opl_units_num: VARIES ++ opl_length: VARIES ++ opl_description: | ++ This parameter is an array of length NUM_INST and represents the type ++ (HIF or CXL) of the available trace buffer instances. ++ A type field containing a 0 indicates a FLIT/CXL trace buffer. ++ A type field containing a 1 indicates a HIF trace buffer. ++ ++ enumerators: ++ - opl_en_name: Trace Buffer Type FLIT ++ opl_en_mnemonic: FLIT ++ opl_value: 0x00 ++ opl_en_description: Indicates a trace buffer instance of type FLIT/CXL. ++ ++ - opl_en_name: Trace Buffer Type HIF ++ opl_en_mnemonic: HIF ++ opl_value: 0x01 ++ opl_en_description: Indicates a trace buffer instance of type HIF. ++ ++ - opcode_name: Get HIF/CXL Trace Buffer Configuration ++ mnemonic: HCT_GET_CONFIG ++ opcode: 0xC601 ++ opcode_description: | ++ There are 8 registers for both, trigger pattern and trigger mask configuration ++ for the HIF trace buffer and 16 each for the CXL variant. The configuration values ++ returned by this opcode have the following structure. ++ ++ HIF output payload format ++ |PTD |Ignr val|rsvd|rsvd|Config Trig |Config Mask | ++ |[00]|[01] |[02]|[03]|[04][05]...[35]|[36][37]...[67]| ++ ++ CXL output payload format ++ |PTD |Ignr val|rsvd|rsvd|Config Trig |Config Mask | ++ |[00]|[01] |[02]|[03]|[04][05]...[67]|[68][69]...[131]| ++ ++ input_payload: ++ - ipl_name: Get Configuration Input Payload ++ ipl_mnemonic: HCT_GET_CONFIG_IN_PL ++ ipl_size_bytes: 1 ++ ++ parameters: ++ - ipl_par_name: HCT Instance ++ ipl_par_mnemonic: HCT_INST ++ ipl_offset: 0x00 ++ ipl_length: 1 ++ ipl_description: > ++ The trace buffer instance we want to retrieve the configuration from. ++ ++ output_payload: ++ - opl_name: Get Configuration Output Payload ++ opl_mnemonic: HCT_GET_CONFIG_OUT_PL ++ opl_size_bytes: 4+ ++ ++ parameters: ++ - opl_par_name: Post Trigger Depth ++ opl_par_mnemonic: POST_TRIG_DEPTH ++ opl_offset: 0x00 ++ opl_length: 1 ++ opl_description: > ++ The Post-Trigger Depth controls the number of entries to capture in the trace buffer after the trigger ++ occurs. This can be any value from 0 (0% post-trigger) to 127 (100% post-trigger). ++ ++ - opl_par_name: Ignore Valid ++ opl_par_mnemonic: IGNORE_VALID ++ opl_offset: 0x01 ++ opl_length: 1 ++ opl_description: | ++ The Ignore Valid bit controls whether the non-valid flits are stored in the trace buffer. ++ ++ When set to logic: ++ 1: Invalid flits are captured in the trace buffer. ++ 0: Only valid flits are stored in the trace buffer. ++ ++ This register bit should only be modified when the trace buffer is in idle state. ++ ++ - opl_par_name: Reserved ++ opl_par_mnemonic: RSVD ++ opl_offset: 0x02 ++ opl_length: 1 ++ ++ - opl_par_name: Reserved ++ opl_par_mnemonic: RSVD ++ opl_offset: 0x03 ++ opl_length: 1 ++ ++ - opl_par_name: Trigger Config ++ opl_par_mnemonic: TRIG_CONFIG ++ opl_offset: 0x04 ++ opl_unit_size: 4 ++ opl_units_num: VARIES ++ opl_length: VARIES ++ opl_description: > ++ Array that contains the trigger pattern configuration followed by the trigger mask configuration. ++ ++ - opcode_name: Set HIF/CXL Trace Buffer Configuration ++ mnemonic: HCT_SET_CONFIG ++ opcode: 0xC602 ++ opcode_description: | ++ There are 8 registers for both, trigger pattern and trigger mask ++ configuration for the HIF trace buffer and 16 each for the CXL variant. ++ The passed configuration values must have the following structure. ++ ++ HIF input payload format ++ |HCT Index|Flags|rsvd|rsvd|PTD |Ignr val|rsvd|rsvd|Config Trig |Config Mask | ++ |[00] |[01] |[02]|[03]|[04]|[05] |[06]|[07]|[08][09]...[39]|[40][41]...[71]| ++ ++ CXL input payload format ++ |HCT Index|Flags|rsvd|rsvd|PTD |Ignr val|rsvd|rsvd|Config Trig |Config Mask | ++ |[00] |[01] |[02]|[03]|[04]|[05] |[06]|[07]|[08][09]...[71]|[72][73]...[135]| ++ ++ input_payload: ++ - ipl_name: Set Configuration Input Payload ++ ipl_mnemonic: HCT_SET_CONFIG_IN_PL ++ ipl_size_bytes: 8+ ++ ++ parameters: ++ - ipl_par_name: HCT Instance ++ ipl_par_mnemonic: HCT_INST ++ ipl_offset: 0x00 ++ ipl_length: 1 ++ ++ - ipl_par_name: Config Flags ++ ipl_par_mnemonic: CONFIG_FLAGS ++ ipl_offset: 0x01 ++ ipl_length: 1 ++ ipl_description: | ++ This parameter indicates what configuration values are present in the ++ input payload. ++ ++ 3'b001 - capture config (post-trigger-depth and ignore-valid) is present ++ 3'b010 - config trigger is present ++ 3'b100 - config trigger mask is present ++ ++ enumerators: ++ - ipl_en_name: Capture Configuration Present ++ ipl_en_mnemonic: CAPT_CFG ++ ipl_value: 0x01 ++ ipl_en_description: Signals that the PTD and Ignore_Valid flags are present in input payload. ++ ++ - ipl_en_name: Trigger Config Present ++ ipl_en_mnemonic: TRIGGER_CFG ++ ipl_value: 0x02 ++ ipl_en_description: Signals that the trigger configuration is present in input payload. ++ ++ - ipl_en_name: Trigger Mask Present ++ ipl_en_mnemonic: TRIGGER_MASK ++ ipl_value: 0x04 ++ ipl_en_description: Signals that the trigger mask is present in input payload. ++ ++ - ipl_par_name: Reserved ++ ipl_par_mnemonic: RSVD ++ ipl_offset: 0x02 ++ ipl_length: 1 ++ ++ - ipl_par_name: Reserved ++ ipl_par_mnemonic: RSVD ++ ipl_offset: 0x03 ++ ipl_length: 1 ++ ++ - ipl_par_name: Post Trigger Depth ++ ipl_par_mnemonic: POST_TRIG_DEPTH ++ ipl_offset: 0x04 ++ ipl_length: 1 ++ ipl_description: > ++ The Post-Trigger Depth controls the number of entries to capture in ++ the trace buffer after the trigger occurs. This can be any value from ++ 0 (0% post-trigger) to 127 (100% post-trigger). ++ ++ - ipl_par_name: Ignore Valid ++ ipl_par_mnemonic: IGNORE_VALID ++ ipl_offset: 0x05 ++ ipl_length: 1 ++ ipl_description: | ++ The Ignore Valid bit controls whether the non-valid flits are stored in the trace buffer. ++ ++ When set to logic: ++ ++ 1: Invalid flits are captured in the trace buffer. ++ 0: Only valid flits are stored in the trace buffer. ++ ++ This register bit should only be modified when the trace buffer is in idle state. ++ ++ - ipl_par_name: Reserved ++ ipl_par_mnemonic: RSVD ++ ipl_offset: 0x06 ++ ipl_length: 1 ++ ++ - ipl_par_name: Reserved ++ ipl_par_mnemonic: RSVD ++ ipl_offset: 0x07 ++ ipl_length: 1 ++ ++ - ipl_par_name: Trigger Config ++ ipl_par_mnemonic: TRIG_CONFIG ++ ipl_offset: 0x08 ++ ipl_length: VARIES ++ ipl_description: > ++ Array that contains the trigger pattern configuration followed by the ++ trigger mask configuration. Every incoming HIF command (or CXL flit) ++ is compared against the pattern bits and pattern mask. If the bits of ++ the command or flit where the pattern mask bit is 1 match the pattern ++ bits, the trigger is asserted and the trace buffer begins the ++ post-trigger capture phase. The post-trigger capture phase ends when ++ the post-trigger capture depth is reached, and at that point no new ++ data is written to the circular buffer. For the HIF variant, separate ++ trigger pattern registers are defined for HIF read commands and HIF ++ write commands. Also note that the pattern trigger can not be ++ configured to trigger on the sideband bits in the HIF variant. ++ ++ - opcode_name: Start Stop Trigger the HIF/CXL Trace Buffer ++ mnemonic: HCT_START_STOP_TRIGGER ++ opcode: 0xC603 ++ opcode_description: > ++ Configure the capture starting event. ++ ++ input_payload: ++ - ipl_name: Start Stop Trigger Input Payload ++ ipl_mnemonic: HCT_START_STOP_TRIGGER_IN_PL ++ ipl_size_bytes: 2 ++ ++ parameters: ++ - ipl_par_name: HCT Instance ++ ipl_par_mnemonic: HCT_INST ++ ipl_offset: 0x00 ++ ipl_length: 1 ++ ipl_description: > ++ The number of the trace buffer instance that is going to be used. ++ ++ - ipl_par_name: Buffer Control ++ ipl_par_mnemonic: BUF_CONTROL ++ ipl_offset: 0x01 ++ ipl_length: 1 ++ ipl_description: > ++ Operational mode of the trace buffer. ++ ++ enumerators: ++ - ipl_en_name: Stop ++ ipl_en_mnemonic: STOP ++ ipl_value: 0x00 ++ ipl_en_description: Stop capturing ++ ++ - ipl_en_name: Pre-Trigger ++ ipl_en_mnemonic: PRE_TRIGGER ++ ipl_value: 0x01 ++ ipl_en_description: Arm Trigger (waiting for pattern to occur) ++ ++ - ipl_en_name: Post-Trigger ++ ipl_en_mnemonic: POST_TRIGGER ++ ipl_value: 0x02 ++ ipl_en_description: Manual Trigger (kicks off capturing immediately) ++ ++ - opcode_name: Get HIF/CXL Trace Buffer Status ++ mnemonic: HCT_GET_BUFFER_STATUS ++ opcode: 0xC604 ++ opcode_description: > ++ Retrieve the current state the trace buffer is in. ++ ++ input_payload: ++ - ipl_name: Get HIF/CXL Trace Buffer Status Input Payload ++ ipl_mnemonic: HCT_GET_BUFFER_STATUS_IN_PL ++ ipl_size_bytes: 1 ++ ++ parameters: ++ - ipl_par_name: HCT Instance ++ ipl_par_mnemonic: HCT_INST ++ ipl_offset: 0x00 ++ ipl_length: 1 ++ ipl_description: > ++ The number of the trace buffer instance the caller is interested in. ++ ++ output_payload: ++ - opl_name: Get HIF/CXL Trace Buffer Status Output Payload ++ opl_mnemonic: HCT_GET_BUFFER_STATUS_OUT_PL ++ opl_size_bytes: 2 ++ ++ parameters: ++ - opl_par_name: Buffer Status ++ opl_par_mnemonic: BUF_STATUS ++ opl_offset: 0x00 ++ opl_length: 1 ++ opl_description: > ++ Operational mode of the trace buffer. ++ ++ enumerators: ++ - opl_en_name: Stop ++ opl_en_mnemonic: STOP ++ opl_value: 0x00 ++ opl_en_description: Stop capturing ++ ++ - opl_en_name: Pre-Trigger ++ opl_en_mnemonic: PRE_TRIGGER ++ opl_value: 0x01 ++ opl_en_description: Arm Trigger (waiting for pattern to occur) ++ ++ - opl_en_name: Post-Trigger ++ opl_en_mnemonic: POST_TRIGGER ++ opl_value: 0x02 ++ opl_en_description: Manual Trigger (kicks off capturing immediately) ++ ++ - opl_par_name: Fill Level ++ opl_par_mnemonic: FILL_LEVEL ++ opl_offset: 0x01 ++ opl_length: 1 ++ opl_description: > ++ The number of buffer entries in the trace buffer reflects the current ++ fill level of the trace buffer. It is updated automatically. ++ ++ - opcode_name: Read HIF/CXL Trace Buffer ++ mnemonic: HCT_READ_BUFFER ++ opcode: 0xC605 ++ opcode_description: > ++ Retrieve one entry line of the trace buffer starting at the buffer start. ++ Each subsequent read will retrieve the next buffer entry until the buffer ++ end is reached indicated by 'BUF_END' flag in the output payload. ++ From this point on subsequent reads read will return the last entry line ++ until the buffer is stared again. ++ ++ input_payload: ++ - ipl_name: Read Buffer Input Payload ++ ipl_mnemonic: HCT_READ_BUFFER_IN_PL ++ ipl_size_bytes: 2 ++ ++ parameters: ++ - ipl_par_name: HCT Instance ++ ipl_par_mnemonic: HCT_INST ++ ipl_offset: 0x00 ++ ipl_length: 1 ++ ipl_description: > ++ The number of the trace buffer instance the caller is interested in. ++ ++ - ipl_par_name: Number of buffer entries to read ++ ipl_par_mnemonic: NUM_ENTRIES_TO_READ ++ ipl_offset: 0x01 ++ ipl_length: 1 ++ ipl_description: > ++ The number of buffer entries the caller wants to read. ++ ++ output_payload: ++ - opl_name: Read Buffer Output Payload ++ opl_mnemonic: HCT_READ_BUFFER_OUT_PL ++ opl_size_bytes: 4+ ++ ++ parameters: ++ - opl_par_name: Buffer End Reached ++ opl_par_mnemonic: BUF_END ++ opl_offset: 0x00 ++ opl_length: 1 ++ opl_description: > ++ This flag signals that the buffer end has been reached. ++ ++ - opl_par_name: Number of buffer entries ++ opl_par_mnemonic: NUM_BUF_ENTRIES ++ opl_offset: 0x01 ++ opl_length: 1 ++ opl_description: > ++ Number of buffer entries that follow in the payload. ++ ++ - opl_par_name: Reserved ++ opl_par_mnemonic: RSVD ++ opl_offset: 0x02 ++ opl_length: 2 ++ ++ - opl_par_name: Buffer Entry ++ opl_par_mnemonic: BUF_ENTRY ++ opl_offset: 0x04 ++ opl_length: VARIES ++ opl_description: | ++ Depending on the 'NUM_ENTRIES_TO_READ' parameter of the input payload ++ or the provided output payload buffer size, this section of the ++ payload can occur multiple times. ++ ++ The structure of an entry is as follows: ++ ++ HIF buffer entry data: ++ The trace buffer RAM is 256 bits (32 bytes) wide, so bits [4:2] of the ++ address is used to address inside the 256-bit word read from the RAM. ++ ++ RAM data [30:0] 31-bit Timestamp ++ RAM data [31] Post Trigger Flag ++ RAM data [63:32] HIF Write Command (wdata_ptr, autopre fields) ++ RAM data [95:64] HIF Write Command (addr, pri, latency, length, wdata_ptr fields) ++ RAM data [127:96] HIF Write Command (cmd_type, addr fields) ++ RAM data [159:128] HIF Write Command (valid flag), HIF Read Command (read token, length, autopre fields) ++ RAM data [191:160] HIF Read Command (read token field) ++ RAM data [223:192] HIF Read Command (addr,pri, latency, read token fields) ++ RAM data [255:224] HIF Read Command (valid flag, cmd_type, addr fields) ++ ++ FLIT buffer entry data: ++ ++ The trace buffer RAM is 560 bits (70 bytes) wide, however, in ++ order to address in 32-bit slices, RAM is assumed to be 576 bits ++ (72 bytes) wide. MSB 16-bits [572:560] are ignored. ++ ++ RAM data [31:0] CXL Flit Slot0, Bytes 3-0 ++ RAM data [63:32] CXL Flit Slot0, Bytes 7-4 ++ RAM data [95:64] CXL Flit Slot0, Bytes 11-8 ++ RAM data [127:96] CXL Flit Slot0, Bytes 15-12 ++ RAM data [159:128] CXL Flit Slot1, Bytes 3-0 ++ RAM data [191:160] CXL Flit Slot1, Bytes 7-4 ++ RAM data [223:192] CXL Flit Slot1, Bytes 11-8 ++ RAM data [255:224] CXL Flit Slot1, Bytes 15-12 ++ RAM data [287:256] CXL Flit Slot2, Bytes 3-0 ++ RAM data [319:288] CXL Flit Slot2, Bytes 7-4 ++ RAM data [351:320] CXL Flit Slot2, Bytes 11-8 ++ RAM data [383:352] CXL Flit Slot2, Bytes 15-12 ++ RAM data [415:384] CXL Flit Slot3, Bytes 3-0 ++ RAM data [447:416] CXL Flit Slot3, Bytes 7-4 ++ RAM data [480:448] CXL Flit Slot3, Bytes 11-8 ++ RAM data [511:480] CXL Flit Slot3, Bytes 15-12 ++ RAM data [527:512] CXL FLIT 16-bit CRC ++ RAM data [558:528] 31-bit Timestamp ++ RAM data [559] Post Trigger Flag ++ RAM data [575:560] (16-bits) 16-bit Unused ++ ++ - opcode_name: Enable HIF/CXL Trace Buffer Instance ++ mnemonic: HCT_ENABLE ++ opcode: 0xC606 ++ opcode_description: > ++ This opcode is used to enable and activate a trace buffer instance. This ++ action is mandatory in order to be able to use a trace buffer. ++ ++ input_payload: ++ - ipl_name: Enable Trace Buffer Input Payload ++ ipl_mnemonic: HCT_ENABLE_IN_PL ++ ipl_size_bytes: 1 ++ ++ parameters: ++ - ipl_par_name: HCT Instance ++ ipl_par_mnemonic: HCT_INST ++ ipl_offset: 0x00 ++ ipl_length: 1 ++ ipl_description: > ++ The number of the trace buffer instance that is to be enabled. ++ ++- command_set_name: LTMON Commands ++ command_set_description: LTSSM Monitor Commands ++ command_set_opcodes: ++ - opcode_name: LTMON Capture Clear ++ mnemonic: LTMON_CAPTURE_CLEAR ++ opcode: 0xC70A ++ ++ input_payload: ++ - ipl_name: LTMON Capture Clear Input Payload ++ ipl_mnemonic: LTMON_CAPT_CLEAR_IN_PL ++ ipl_size_bytes: 2 ++ ++ parameters: ++ - ipl_par_name: unused ++ ipl_par_mnemonic: RSVD ++ ipl_offset: 0x00 ++ ipl_length: 1 ++ ++ - ipl_par_name: CXL.MEM ID ++ ipl_par_mnemonic: cxl_mem_id ++ ipl_offset: 0x01 ++ ipl_length: 1 ++ ++ - opcode_name: LTMON Capture ++ mnemonic: LTMON_CAPTURE ++ opcode: 0xC70C ++ ++ input_payload: ++ - ipl_name: LTMON Capture Input Payload ++ ipl_mnemonic: LTMON_CAPT_IN_PL ++ ipl_size_bytes: 8 ++ ++ parameters: ++ - ipl_par_name: unused ++ ipl_par_mnemonic: RSVD ++ ipl_offset: 0x00 ++ ipl_length: 1 ++ ++ - ipl_par_name: CXL.MEM ID ++ ipl_par_mnemonic: cxl_mem_id ++ ipl_offset: 0x01 ++ ipl_length: 1 ++ ++ - ipl_par_name: Capture Mode ++ ipl_par_mnemonic: CAPT_MODE ++ ipl_offset: 0x02 ++ ipl_length: 1 ++ ++ - ipl_par_name: Ignore Sub Change ++ ipl_par_mnemonic: IGNORE_SUB_CHG ++ ipl_offset: 0x03 ++ ipl_length: 2 ++ ++ - ipl_par_name: Ignore Receiver L0 Change ++ ipl_par_mnemonic: IGNORE_RXL0_CHG ++ ipl_offset: 0x05 ++ ipl_length: 1 ++ ++ - ipl_par_name: Trigger Source Selection ++ ipl_par_mnemonic: TRIG_SRC_SEL ++ ipl_offset: 0x06 ++ ipl_length: 1 ++ ++ - ipl_par_name: reserved ++ ipl_par_mnemonic: RSVD ++ ipl_offset: 0x07 ++ ipl_length: 1 ++ ++ - opcode_name: LTMON Capture Freeze and Restore ++ mnemonic: LTMON_CAPTURE_FREEZE_AND_RESTORE ++ opcode: 0xC70E ++ ++ input_payload: ++ - ipl_name: LTMON Capture Freeze Input Payload ++ ipl_mnemonic: LTMON_CAPT_FREEZE_RESTORE_IN_PL ++ ipl_size_bytes: 4 ++ ++ parameters: ++ - ipl_par_name: unused ++ ipl_par_mnemonic: RSVD ++ ipl_offset: 0x00 ++ ipl_length: 1 ++ ++ - ipl_par_name: CXL.MEM ID ++ ipl_par_mnemonic: cxl_mem_id ++ ipl_offset: 0x01 ++ ipl_length: 1 ++ ++ - ipl_par_name: Freeze Restore ++ ipl_par_mnemonic: FREEZE_RESTORE ++ ipl_offset: 0x02 ++ ipl_length: 1 ++ ++ - ipl_par_name: reserved ++ ipl_par_mnemonic: RSVD ++ ipl_offset: 0x03 ++ ipl_length: 1 ++ ++ - opcode_name: LTMON L2R Count Dump ++ mnemonic: LTMON_L2R_COUNT_DUMP ++ opcode: 0xC710 ++ ++ input_payload: ++ - ipl_name: LTMON L2R Count Dump Input Payload ++ ipl_mnemonic: LTMON_L2R_CNT_DUMP_IN_PL ++ ipl_size_bytes: 2 ++ ++ parameters: ++ - ipl_par_name: unused ++ ipl_par_mnemonic: RSVD ++ ipl_offset: 0x00 ++ ipl_length: 1 ++ ++ - ipl_par_name: CXL.MEM ID ++ ipl_par_mnemonic: cxl_mem_id ++ ipl_offset: 0x01 ++ ipl_length: 1 ++ ++ output_payload: ++ - opl_name: LTMON L2R Count Dump Output Payload ++ opl_mnemonic: LTMON_L2R_CNT_DUMP_OUT_PL ++ opl_size_bytes: 4 ++ ++ parameters: ++ - opl_par_name: Dump Count ++ opl_par_mnemonic: DUMP_CNT ++ opl_offset: 0x00 ++ opl_length: 4 ++ ++ - opcode_name: LTMON L2R Count Clear ++ mnemonic: LTMON_L2R_COUNT_CLEAR ++ opcode: 0xC711 ++ ++ input_payload: ++ - ipl_name: LTMON L2R Count Clear Input Payload ++ ipl_mnemonic: LTMON_L2R_CNT_CLEAR_IN_PL ++ ipl_size_bytes: 2 ++ ++ parameters: ++ - ipl_par_name: unused ++ ipl_par_mnemonic: RSVD ++ ipl_offset: 0x00 ++ ipl_length: 1 ++ ++ - ipl_par_name: CXL.MEM ID ++ ipl_par_mnemonic: cxl_mem_id ++ ipl_offset: 0x01 ++ ipl_length: 1 ++ ++ - opcode_name: LTMON Basic Configuration ++ mnemonic: LTMON_BASIC_CFG ++ opcode: 0xC712 ++ ++ input_payload: ++ - ipl_name: LTMON Basic Configuration Input Payload ++ ipl_mnemonic: LTMON_BASE_CFG_IN_PL ++ ipl_size_bytes: 4 ++ ++ parameters: ++ - ipl_par_name: unused ++ ipl_par_mnemonic: RSVD ++ ipl_offset: 0x00 ++ ipl_length: 1 ++ ++ - ipl_par_name: CXL.MEM ID ++ ipl_par_mnemonic: cxl_mem_id ++ ipl_offset: 0x01 ++ ipl_length: 1 ++ ++ - ipl_par_name: Tick Count ++ ipl_par_mnemonic: TICK_CNT ++ ipl_offset: 0x02 ++ ipl_length: 1 ++ ++ - ipl_par_name: Global Time Stamp ++ ipl_par_mnemonic: GLOBAL_TS ++ ipl_offset: 0x03 ++ ipl_length: 1 ++ ++ - opcode_name: LTMON Watch ++ mnemonic: LTMON_WATCH ++ opcode: 0xC713 ++ ++ input_payload: ++ - ipl_name: LTMON Watch Input Payload ++ ipl_mnemonic: LTMON_WATCH_IN_PL ++ ipl_size_bytes: 12 ++ ++ parameters: ++ - ipl_par_name: unused ++ ipl_par_mnemonic: RSVD ++ ipl_offset: 0x00 ++ ipl_length: 1 ++ ++ - ipl_par_name: CXL.MEM ID ++ ipl_par_mnemonic: cxl_mem_id ++ ipl_offset: 0x01 ++ ipl_length: 1 ++ ++ - ipl_par_name: Watch ID ++ ipl_par_mnemonic: WATCH_ID ++ ipl_offset: 0x02 ++ ipl_length: 1 ++ ++ - ipl_par_name: Watch Mode ++ ipl_par_mnemonic: WATCH_MODE ++ ipl_offset: 0x03 ++ ipl_length: 1 ++ ++ - ipl_par_name: Source Maj State ++ ipl_par_mnemonic: SRC_MAJ_ST ++ ipl_offset: 0x04 ++ ipl_length: 1 ++ ++ - ipl_par_name: Source Min State ++ ipl_par_mnemonic: SRC_MIN_ST ++ ipl_offset: 0x05 ++ ipl_length: 1 ++ ++ - ipl_par_name: Source L0 State ++ ipl_par_mnemonic: SRC_L0_ST ++ ipl_offset: 0x06 ++ ipl_length: 1 ++ ++ - ipl_par_name: Destination Maj State ++ ipl_par_mnemonic: DST_MAJ_ST ++ ipl_offset: 0x07 ++ ipl_length: 1 ++ ++ - ipl_par_name: Destination Min State ++ ipl_par_mnemonic: DST_MIN_ST ++ ipl_offset: 0x08 ++ ipl_length: 1 ++ ++ - ipl_par_name: Destination L0 State ++ ipl_par_mnemonic: DST_L0_ST ++ ipl_offset: 0x09 ++ ipl_length: 1 ++ ++ - ipl_par_name: reserved ++ ipl_par_mnemonic: RSVD ++ ipl_offset: 0x0A ++ ipl_length: 2 ++ ++ - opcode_name: LTMON Capture Status ++ mnemonic: LTMON_CAPTURE_STAT ++ opcode: 0xC714 ++ ++ input_payload: ++ - ipl_name: LTMON Capture Status Input Payload ++ ipl_mnemonic: LTMON_CAPT_STAT_IN_PL ++ ipl_size_bytes: 2 ++ ++ parameters: ++ - ipl_par_name: unused ++ ipl_par_mnemonic: RSVD ++ ipl_offset: 0x00 ++ ipl_length: 1 ++ ++ - ipl_par_name: CXL.MEM ID ++ ipl_par_mnemonic: cxl_mem_id ++ ipl_offset: 0x01 ++ ipl_length: 1 ++ ++ output_payload: ++ - opl_name: LTMON Capture Status Output Payload ++ opl_mnemonic: LTMON_CAPT_STAT_OUT_PL ++ opl_size_bytes: 12 ++ ++ parameters: ++ - opl_par_name: Trigger Count ++ opl_par_mnemonic: TRIG_CNT ++ opl_offset: 0x00 ++ opl_length: 2 ++ ++ - opl_par_name: Watch 0 Trigger Count ++ opl_par_mnemonic: WATCH0_TRIG_CNT ++ opl_offset: 0x02 ++ opl_length: 2 ++ ++ - opl_par_name: Watch 1 Trigger Count ++ opl_par_mnemonic: WATCH1_TRIG_CNT ++ opl_offset: 0x04 ++ opl_length: 2 ++ ++ - opl_par_name: Time Stamp ++ opl_par_mnemonic: TIME_STAMP ++ opl_offset: 0x06 ++ opl_length: 2 ++ ++ - opl_par_name: Trigger Source Status ++ opl_par_mnemonic: TRIG_SRC_STAT ++ opl_offset: 0x08 ++ opl_length: 1 ++ ++ - opl_par_name: reserved ++ opl_par_mnemonic: RSVD ++ opl_offset: 0x09 ++ opl_length: 3 ++ ++ - opcode_name: LTMON Capture Log Dump ++ mnemonic: LTMON_CAPTURE_LOG_DMP ++ opcode: 0xC715 ++ ++ input_payload: ++ - ipl_name: LTMON Capture Log Dump Input Payload ++ ipl_mnemonic: LTMON_CAPT_LOG_DUMP_IN_PL ++ ipl_size_bytes: 8 ++ ++ parameters: ++ - ipl_par_name: unused ++ ipl_par_mnemonic: RSVD ++ ipl_offset: 0x00 ++ ipl_length: 1 ++ ++ - ipl_par_name: CXL.MEM ID ++ ipl_par_mnemonic: cxl_mem_id ++ ipl_offset: 0x01 ++ ipl_length: 1 ++ ++ - ipl_par_name: Dump Index ++ ipl_par_mnemonic: DUMP_IDX ++ ipl_offset: 0x02 ++ ipl_length: 2 ++ ++ - ipl_par_name: Dump Count ++ ipl_par_mnemonic: DUMP_CNT ++ ipl_offset: 0x04 ++ ipl_length: 2 ++ ++ - ipl_par_name: reserved ++ ipl_par_mnemonic: RSVD ++ ipl_offset: 0x06 ++ ipl_length: 2 ++ ++ output_payload: ++ - opl_name: LTMON Capture Log Dump Output Payload ++ opl_mnemonic: LTMON_CAPT_LOG_DUMP_OUT_PL ++ opl_size_bytes: 16 ++ ++ parameters: ++ - opl_par_name: LTMON Data ++ opl_par_mnemonic: data ++ opl_offset: 0 ++ opl_length: 16 ++ ++ bit_fields: ++ ++ # LTSSM_RAM_REG_1 ++ - opl_bf_name: rx l0s state ++ opl_bf_mnemonic: RX_L0S_SUBSTATE ++ opl_bit: "2:0" ++ ++ - opl_bf_name: Monitored LTSSM Substate ++ opl_bf_mnemonic: SUBSTATE ++ opl_bit: "6:3" ++ ++ - opl_bf_name: Monitored LTSSM Main State ++ opl_bf_mnemonic: MAIN_STATE ++ opl_bit: "12:7" ++ ++ - opl_bf_name: "link rate, 0: 2.5G, 1: 5G, 2: 8G, 3: 16G, 4: 32G" ++ opl_bf_mnemonic: LINK_RATE ++ opl_bit: "15:13" ++ ++ - opl_bf_name: indication for roll over of 37-bit timer counter ++ opl_bf_mnemonic: TIMESTAMP_ROLLOVER ++ opl_bit: "16:16" ++ ++ - opl_bf_name: "link width 0: 0, 1: 1, 2: 2, 3: 4, 4: 8, 5: 16" ++ opl_bf_mnemonic: LINK_WIDTH ++ opl_bit: "19:17" ++ ++ - opl_bf_name: unused bits ++ opl_bf_mnemonic: RSVD ++ opl_bit: "31:20" ++ ++ # LTSSM_RAM_REG_2 ++ - opl_bf_name: 32 higher time counter bits ++ opl_bf_mnemonic: TIMESTAMP_31_0 ++ opl_bit: "31:0" ++ ++ # LTSSM_RAM_REG_3 ++ - opl_bf_name: time stamp high bits ++ opl_bf_mnemonic: TIMESTAMP_36_32 ++ opl_bit: "4:0" ++ ++ - opl_bf_name: unused bits ++ opl_bf_mnemonic: RSVD ++ opl_bit: "31:5" ++ ++ # LTSSM_RAM_REG_4 ++ - opl_bf_name: conditions lead to recorded state ++ opl_bf_mnemonic: ARC ++ opl_bit: "31:0" ++ ++ - opcode_name: LTMON Capture Trigger ++ mnemonic: LTMON_CAPTURE_TRIGGER ++ opcode: 0xC716 ++ ++ input_payload: ++ - ipl_name: LTMON Capture Trigger Input Payload ++ ipl_mnemonic: LTMON_TRIG_IN_PL ++ ipl_size_bytes: 4 ++ ++ parameters: ++ - ipl_par_name: unused ++ ipl_par_mnemonic: RSVD ++ ipl_offset: 0x00 ++ ipl_length: 1 ++ ++ - ipl_par_name: CXL.MEM ID ++ ipl_par_mnemonic: cxl_mem_id ++ ipl_offset: 0x01 ++ ipl_length: 1 ++ ++ - ipl_par_name: Trigger Source ++ ipl_par_mnemonic: TRIG_SRC ++ ipl_offset: 0x02 ++ ipl_length: 1 ++ ++ - ipl_par_name: reserved ++ ipl_par_mnemonic: RSVD ++ ipl_offset: 0x03 ++ ipl_length: 1 ++ ++ - opcode_name: LTMON Enable ++ mnemonic: LTMON_ENABLE ++ opcode: 0xC780 ++ ++ input_payload: ++ - ipl_name: LTMON Enable Input Payload ++ ipl_mnemonic: LTMON_ENABLE_IN_PL ++ ipl_size_bytes: 4 ++ ++ parameters: ++ - ipl_par_name: unused ++ ipl_par_mnemonic: RSVD ++ ipl_offset: 0x00 ++ ipl_length: 1 ++ ++ - ipl_par_name: CXL.MEM ID ++ ipl_par_mnemonic: cxl_mem_id ++ ipl_offset: 0x01 ++ ipl_length: 1 ++ ++ - ipl_par_name: Enable ++ ipl_par_mnemonic: ENABLE ++ ipl_offset: 0x02 ++ ipl_length: 1 ++ ++ - ipl_par_name: reserved ++ ipl_par_mnemonic: RSVD ++ ipl_offset: 0x03 ++ ipl_length: 1 ++ ++- command_set_name: Ordered Set Analyzer ++ command_set_description: OSA Commands ++ command_set_opcodes: ++ - opcode_name: OSA Type Trigger Configuration ++ mnemonic: OSA_OS_TYPE_TRIG_CFG ++ opcode: 0xC800 ++ ++ input_payload: ++ - ipl_name: OSA Type Trigger Config Input Payload ++ ipl_mnemonic: OSA_OS_TYPE_TRIG_CFG_IN_PL ++ ipl_size_bytes: 12 ++ ++ parameters: ++ - ipl_par_name: unused ++ ipl_par_mnemonic: RSVD ++ ipl_offset: 0x00 ++ ipl_length: 1 ++ ++ - ipl_par_name: CXL.MEM ID ++ ipl_par_mnemonic: cxl_mem_id ++ ipl_offset: 0x01 ++ ipl_length: 1 ++ ++ - ipl_par_name: Reserved ++ ipl_par_mnemonic: RSVD ++ ipl_offset: 0x02 ++ ipl_length: 2 ++ ++ - ipl_par_name: Lane Mask ++ ipl_par_mnemonic: LANE_MASK ++ ipl_offset: 0x04 ++ ipl_length: 2 ++ ++ - ipl_par_name: Lane Direction Mask (see OSA_LANE_DIR_BITMSK_*) ++ ipl_par_mnemonic: LANE_DIR_MASK ++ ipl_offset: 0x06 ++ ipl_length: 1 ++ ++ - ipl_par_name: Link Rate mask (see OSA_LINK_RATE_BITMSK_*) ++ ipl_par_mnemonic: RATE_MASK ++ ipl_offset: 0x07 ++ ipl_length: 1 ++ ++ - ipl_par_name: OS Type mask (see OSA_OS_TYPE_TRIG_BITMSK_*) ++ ipl_par_mnemonic: OS_TYPE_MASK ++ ipl_offset: 0x08 ++ ipl_length: 2 ++ ++ - ipl_par_name: Reserved ++ ipl_par_mnemonic: RSVD ++ ipl_offset: 0x0A ++ ipl_length: 2 ++ ++ - opcode_name: OSA Pattern Trigger Configuration ++ mnemonic: OSA_OS_PATT_TRIG_CFG ++ opcode: 0xC801 ++ ++ input_payload: ++ - ipl_name: OSA Pattern Trigger Config Input Payload ++ ipl_mnemonic: OSA_PATT_TRIG_CFG_IN_PL ++ ipl_size_bytes: 40 ++ ++ parameters: ++ - ipl_par_name: unused ++ ipl_par_mnemonic: RSVD ++ ipl_offset: 0x00 ++ ipl_length: 1 ++ ++ - ipl_par_name: CXL.MEM ID ++ ipl_par_mnemonic: cxl_mem_id ++ ipl_offset: 0x01 ++ ipl_length: 1 ++ ++ - ipl_par_name: Reserved ++ ipl_par_mnemonic: RSVD ++ ipl_offset: 0x02 ++ ipl_length: 2 ++ ++ - ipl_par_name: Lane Mask ++ ipl_par_mnemonic: LANE_MASK ++ ipl_offset: 0x04 ++ ipl_length: 2 ++ ++ - ipl_par_name: Lane Direction mask (see OSA_LANE_DIR_BITMSK_*) ++ ipl_par_mnemonic: LANE_DIR_MASK ++ ipl_offset: 0x06 ++ ipl_length: 1 ++ ++ - ipl_par_name: Link Rate mask (see OSA_LINK_RATE_BITMSK_*) ++ ipl_par_mnemonic: RATE_MASK ++ ipl_offset: 0x07 ++ ipl_length: 1 ++ ++ - ipl_par_name: Pattern Match Value [CXL_MEM_OSA_DATA_LEN_DW] ++ ipl_par_mnemonic: PATT_VAL ++ ipl_offset: 0x08 ++ ipl_unit_size: 4 ++ ipl_units_num: 4 ++ ipl_length: 16 ++ ++ - ipl_par_name: Pattern Match mask [CXL_MEM_OSA_DATA_LEN_DW] ++ ipl_par_mnemonic: PATT_MASK ++ ipl_offset: 0x18 ++ ipl_unit_size: 4 ++ ipl_units_num: 4 ++ ipl_length: 16 ++ ++ - opcode_name: OSA Miscellaneous Trigger Configuration ++ mnemonic: OSA_MISC_TRIG_CFG ++ opcode: 0xC802 ++ ++ input_payload: ++ - ipl_name: OSA Miscellaneous Trigger Configuration Input Payload ++ ipl_mnemonic: OSA_MISC_TRIG_CFG_IN_PL ++ ipl_size_bytes: 8 ++ ++ parameters: ++ - ipl_par_name: unused ++ ipl_par_mnemonic: RSVD ++ ipl_offset: 0x00 ++ ipl_length: 1 ++ ++ - ipl_par_name: CXL.MEM ID ++ ipl_par_mnemonic: cxl_mem_id ++ ipl_offset: 0x01 ++ ipl_length: 1 ++ ++ - ipl_par_name: Reserved ++ ipl_par_mnemonic: RSVD ++ ipl_offset: 0x02 ++ ipl_length: 2 ++ ++ - ipl_par_name: Trigger Enable Mask. ++ ipl_par_mnemonic: TRIG_EN_MASK ++ ipl_offset: 0x04 ++ ipl_length: 1 ++ ipl_description: > ++ Bitmask of trigger types to enable. ++ Use cxl_mem_osa_misc_trig_type_enum values for bit positions. ++ ++ - ipl_par_name: Reserved ++ ipl_par_mnemonic: RSVD ++ ipl_offset: 0x05 ++ ipl_length: 3 ++ ++ - opcode_name: OSA Capture Control ++ mnemonic: OSA_CAP_CTRL ++ opcode: 0xC803 ++ ++ input_payload: ++ - ipl_name: OSA Capture Control Input Payload ++ ipl_mnemonic: OSA_CAP_CTRL_IN_PL ++ ipl_size_bytes: 16 ++ ++ parameters: ++ - ipl_par_name: unused ++ ipl_par_mnemonic: RSVD ++ ipl_offset: 0x00 ++ ipl_length: 1 ++ ++ - ipl_par_name: CXL.MEM ID ++ ipl_par_mnemonic: cxl_mem_id ++ ipl_offset: 0x01 ++ ipl_length: 1 ++ ++ - ipl_par_name: Reserved ++ ipl_par_mnemonic: RSVD ++ ipl_offset: 0x02 ++ ipl_length: 2 ++ ++ - ipl_par_name: Lane Mask ++ ipl_par_mnemonic: LANE_MASK ++ ipl_offset: 0x04 ++ ipl_length: 2 ++ ++ - ipl_par_name: Lane Direction Mask (see OSA_LANE_DIR_BITMSK_*) ++ ipl_par_mnemonic: LANE_DIR_MASK ++ ipl_offset: 0x06 ++ ipl_length: 1 ++ ++ - ipl_par_name: Drop Single OS's (TS1/TS2/FTS/CTL_SKP) ++ ipl_par_mnemonic: DROP_SINGLE_OS ++ ipl_offset: 0x07 ++ ipl_length: 1 ++ ++ - ipl_par_name: Capture Stop Mode (see osa_cap_stop_mode_enum) ++ ipl_par_mnemonic: STOP_MODE ++ ipl_offset: 0x08 ++ ipl_length: 1 ++ ++ - ipl_par_name: Snapshot Mode Enable ++ ipl_par_mnemonic: SNAPSHOT_MODE ++ ipl_offset: 0x09 ++ ipl_length: 1 ++ ++ - ipl_par_name: Number of post-trigger entries ++ ipl_par_mnemonic: POST_TRIG_NUM ++ ipl_offset: 0x0A ++ ipl_length: 2 ++ ipl_description: > ++ Number of post-trigger entries to capture. ++ This is multiplied by the number of RAMs allocated to the lane. ++ ++ - ipl_par_name: OS Type mask (see OSA_OS_TYPE_CAP_BITMSK_*) ++ ipl_par_mnemonic: OS_TYPE_MASK ++ ipl_offset: 0x0C ++ ipl_length: 2 ++ ++ - ipl_par_name: Lane Mux ++ ipl_par_mnemonic: LANE_MUX ++ ipl_offset: 0x0E ++ ipl_length: 1 ++ ipl_description: > ++ 00: Captures RX and TX of Lanes 0 to 7 ++ 01: Captures All Lanes TX only ++ 10: Captures All Lanes RX only ++ 11: Captures RX and TX of Lanes 8 to 15 ++ ++ - ipl_par_name: Reserved ++ ipl_par_mnemonic: RSVD ++ ipl_offset: 0x0F ++ ipl_length: 1 ++ ++ - opcode_name: OSA Configuration Dump ++ mnemonic: OSA_CFG_DUMP ++ opcode: 0xC804 ++ ++ input_payload: ++ - ipl_name: OSA Configuration Dump Input Payload ++ ipl_mnemonic: OSA_CFG_DUMP_IN_PL ++ ipl_size_bytes: 4 ++ ++ parameters: ++ - ipl_par_name: unused ++ ipl_par_mnemonic: RSVD ++ ipl_offset: 0x00 ++ ipl_length: 1 ++ ++ - ipl_par_name: CXL.MEM ID ++ ipl_par_mnemonic: cxl_mem_id ++ ipl_offset: 0x01 ++ ipl_length: 1 ++ ++ - ipl_par_name: Reserved ++ ipl_par_mnemonic: RSVD ++ ipl_offset: 0x02 ++ ipl_length: 2 ++ ++ output_payload: ++ - opl_name: OSA Configuration Dump Output Payload ++ opl_mnemonic: OSA_CFG_DUMP_OUT_PL ++ opl_size_bytes: 60 ++ ++ parameters: ++ - opl_par_name: OS type triggering - lane mask ++ opl_par_mnemonic: os_type_trig_cfg_lane_mask ++ opl_offset: 0 ++ opl_length: 2 ++ ++ - opl_par_name: OS type triggering - lane direction mask (see OSA_LANE_DIR_BITMSK_*) ++ opl_par_mnemonic: os_type_trig_cfg_lane_dir_mask ++ opl_offset: 2 ++ opl_length: 1 ++ ++ - opl_par_name: OS type triggering - link rate mask (see OSA_LINK_RATE_BITMSK_*) ++ opl_par_mnemonic: os_type_trig_cfg_rate_mask ++ opl_offset: 3 ++ opl_length: 1 ++ ++ - opl_par_name: OS type triggering - OS type mask (see OSA_OS_TYPE_TRIG_BITMSK_*) ++ opl_par_mnemonic: os_type_trig_cfg_os_type_mask ++ opl_offset: 4 ++ opl_length: 2 ++ ++ - opl_par_name: Reserved ++ opl_par_mnemonic: RSVD ++ opl_offset: 6 ++ opl_length: 2 ++ ++ - opl_par_name: OS pattern triggering - lane mask ++ opl_par_mnemonic: os_patt_trig_cfg_lane_mask ++ opl_offset: 8 ++ opl_length: 2 ++ ++ - opl_par_name: OS pattern triggering - lane direction mask (see OSA_LANE_DIR_BITMSK_*) ++ opl_par_mnemonic: os_patt_trig_cfg_lane_dir_mask ++ opl_offset: 10 ++ opl_length: 1 ++ ++ - opl_par_name: OS pattern triggering - link rate mask (see OSA_LINK_RATE_BITMSK_*) ++ opl_par_mnemonic: os_patt_trig_cfg_rate_mask ++ opl_offset: 11 ++ opl_length: 1 ++ ++ - opl_par_name: OS pattern triggering - pattern match value ++ opl_par_mnemonic: os_patt_trig_cfg_val ++ opl_offset: 12 ++ opl_length: 16 ++ opl_unit_size: 4 ++ opl_units_num: 4 ++ ++ - opl_par_name: OS pattern triggering - pattern match mask ++ opl_par_mnemonic: os_patt_trig_cfg_mask ++ opl_offset: 28 ++ opl_length: 16 ++ opl_unit_size: 4 ++ opl_units_num: 4 ++ ++ - opl_par_name: miscellaneous triggering ++ opl_par_mnemonic: misc_trig_cfg_trig_en_mask ++ opl_offset: 44 ++ opl_length: 1 ++ opl_description: > ++ bitmask of enabled trigger types ++ (see cxl_mem_osa_misc_trig_type_enum values for bit positions) ++ ++ - opl_par_name: Reserved ++ opl_par_mnemonic: RSVD ++ opl_offset: 45 ++ opl_length: 3 ++ ++ - opl_par_name: capture control - lane mask ++ opl_par_mnemonic: cap_ctrl_lane_mask ++ opl_offset: 48 ++ opl_length: 2 ++ ++ - opl_par_name: capture control - lane direction mask (see OSA_LANE_DIR_BITMSK_*) ++ opl_par_mnemonic: cap_ctrl_lane_dir_mask ++ opl_offset: 50 ++ opl_length: 1 ++ ++ - opl_par_name: capture control - drop single OS's (TS1/TS2/FTS/CTL_SKP) ++ opl_par_mnemonic: cap_ctrl_drop_single_os ++ opl_offset: 51 ++ opl_length: 1 ++ ++ - opl_par_name: capture control - capture stop mode ++ opl_par_mnemonic: cap_ctrl_stop_mode ++ opl_offset: 52 ++ opl_length: 1 ++ ++ - opl_par_name: capture control - snapshot mode enable ++ opl_par_mnemonic: cap_ctrl_snapshot_mode ++ opl_offset: 53 ++ opl_length: 1 ++ ++ - opl_par_name: capture control ++ opl_par_mnemonic: cap_ctrl_post_trig_num ++ opl_offset: 54 ++ opl_length: 2 ++ opl_description: > ++ Number of post-trigger entries to capture. ++ This is multiplied by the number of RAMs allocated to the lane. ++ ++ - opl_par_name: capture control - OS type mask (see OSA_OS_TYPE_CAP_BITMSK_*) ++ opl_par_mnemonic: cap_ctrl_os_type_mask ++ opl_offset: 56 ++ opl_length: 2 ++ ++ - opl_par_name: Reserved ++ opl_par_mnemonic: RSVD ++ opl_offset: 58 ++ opl_length: 2 ++ ++ - opcode_name: OSA Analyzer Operation ++ mnemonic: OSA_ANA_OP ++ opcode: 0xC805 ++ ++ input_payload: ++ - ipl_name: OSA Analyzer Input Payload ++ ipl_mnemonic: OSA_ANA_OP_IN_PL ++ ipl_size_bytes: 4 ++ ++ parameters: ++ - ipl_par_name: unused ++ ipl_par_mnemonic: RSVD ++ ipl_offset: 0x00 ++ ipl_length: 1 ++ ++ - ipl_par_name: CXL.MEM ID ++ ipl_par_mnemonic: cxl_mem_id ++ ipl_offset: 0x01 ++ ipl_length: 1 ++ ++ - ipl_par_name: Operation (see osa_op_enum) ++ ipl_par_mnemonic: OP ++ ipl_offset: 0x02 ++ ipl_length: 1 ++ ++ - ipl_par_name: Reserved ++ ipl_par_mnemonic: RSVD ++ ipl_offset: 0x03 ++ ipl_length: 1 ++ ++ - opcode_name: OSA Status Query ++ mnemonic: OSA_STATUS_QUERY ++ opcode: 0xC806 ++ ++ input_payload: ++ - ipl_name: OSA Status Query Input Payload ++ ipl_mnemonic: OSA_STAT_QUERY_IN_PL ++ ipl_size_bytes: 4 ++ ++ parameters: ++ - ipl_par_name: unused ++ ipl_par_mnemonic: RSVD ++ ipl_offset: 0x00 ++ ipl_length: 1 ++ ++ - ipl_par_name: CXL.MEM ID ++ ipl_par_mnemonic: cxl_mem_id ++ ipl_offset: 0x01 ++ ipl_length: 1 ++ ++ - ipl_par_name: Reserved ++ ipl_par_mnemonic: RSVD ++ ipl_offset: 0x02 ++ ipl_length: 2 ++ ++ output_payload: ++ ++ - opl_name: OSA Status Query Output Payload ++ opl_mnemonic: OSA_STAT_QUERY_OUT_PL ++ opl_size_bytes: 8 ++ ++ parameters: ++ ++ - opl_par_name: OSA state (see osa_state_enum) ++ opl_par_mnemonic: state ++ opl_offset: 0x00 ++ opl_length: 1 ++ ++ - opl_par_name: lane that caused the trigger ++ opl_par_mnemonic: lane_id ++ opl_offset: 0x01 ++ opl_length: 1 ++ ++ - opl_par_name: direction of lane that caused the trigger (see osa_lane_dir_enum) ++ opl_par_mnemonic: lane_dir ++ opl_offset: 0x02 ++ opl_length: 1 ++ ++ - opl_par_name: reserved ++ opl_par_mnemonic: RSVD ++ opl_offset: 0x03 ++ opl_length: 1 ++ ++ - opl_par_name: trigger reason mask (see OSA_TRIG_REASON_BITMSK_*) ++ opl_par_mnemonic: trig_reason_mask ++ opl_offset: 0x04 ++ opl_length: 2 ++ ++ - opl_par_name: reserved ++ opl_par_mnemonic: RSVD ++ opl_offset: 0x06 ++ opl_length: 2 ++ ++ - opcode_name: OSA Data Read ++ mnemonic: OSA_DATA_READ ++ opcode: 0xC807 ++ ++ input_payload: ++ - ipl_name: OSA Data Read Input Payload ++ ipl_mnemonic: OSA_DATA_READ_IN_PL ++ ipl_size_bytes: 8 ++ ++ parameters: ++ - ipl_par_name: unused ++ ipl_par_mnemonic: RSVD ++ ipl_offset: 0x00 ++ ipl_length: 1 ++ ++ - ipl_par_name: CXL.MEM ID ++ ipl_par_mnemonic: cxl_mem_id ++ ipl_offset: 0x01 ++ ipl_length: 1 ++ ++ - ipl_par_name: Lane ID ++ ipl_par_mnemonic: lane_id ++ ipl_offset: 0x02 ++ ipl_length: 1 ++ ++ - ipl_par_name: lane direction (see osa_lane_dir_enum) ++ ipl_par_mnemonic: lane_dir ++ ipl_offset: 0x03 ++ ipl_length: 1 ++ ++ - ipl_par_name: index of the first entry to read ++ ipl_par_mnemonic: start_entry ++ ipl_offset: 0x04 ++ ipl_length: 2 ++ ++ - ipl_par_name: maximum number of entries to read ++ ipl_par_mnemonic: num_entries ++ ipl_offset: 0x06 ++ ipl_length: 1 ++ ++ - ipl_par_name: Reserved ++ ipl_par_mnemonic: RSVD ++ ipl_offset: 0x07 ++ ipl_length: 1 ++ ++ output_payload: ++ - opl_name: OSA Data Read Output Payload ++ opl_mnemonic: OSA_DATA_READ_OUT_PL ++ opl_size_bytes: 12+ ++ ++ parameters: ++ - opl_par_name: total number of entries read ++ opl_par_mnemonic: entries_read ++ opl_offset: 0 ++ opl_length: 1 ++ ++ - opl_par_name: CXL.MEM ID ++ opl_par_mnemonic: cxl_mem_id ++ opl_offset: 1 ++ opl_length: 1 ++ ++ - opl_par_name: lane ID ++ opl_par_mnemonic: lane_id ++ opl_offset: 2 ++ opl_length: 1 ++ ++ - opl_par_name: lane direction (see osa_lane_dir_enum) ++ opl_par_mnemonic: lane_dir ++ opl_offset: 3 ++ opl_length: 1 ++ ++ - opl_par_name: index of the next entry to read ++ opl_par_mnemonic: next_entry ++ opl_offset: 4 ++ opl_length: 2 ++ ++ - opl_par_name: number of entries remaining ++ opl_par_mnemonic: entries_rem ++ opl_offset: 6 ++ opl_length: 2 ++ ++ - opl_par_name: wrap indicator ++ opl_par_mnemonic: wrap ++ opl_offset: 8 ++ opl_length: 1 ++ opl_description: > ++ Indicates whether any older entries may have been discarded due to a wrap ++ in the allocated RAM(s). Only applicable when Snapshot Mode is disabled. ++ ++ - opl_par_name: reserved ++ opl_par_mnemonic: RSVD ++ opl_offset: 9 ++ opl_length: 3 ++ ++ - opl_par_name: data ++ opl_par_mnemonic: data ++ opl_offset: 12 ++ opl_unit_size: 4 ++ opl_units_num: VARIES ++ opl_length: VARIES ++ ++ - opcode_name: OSA Access Release ++ mnemonic: OSA_ACCESS_REL ++ opcode: 0xC808 ++ ++ input_payload: ++ - ipl_name: OSA Access Release Input Payload ++ ipl_mnemonic: OSA_ACC_REL_IN_PL ++ ipl_size_bytes: 4 ++ ++ parameters: ++ - ipl_par_name: unused ++ ipl_par_mnemonic: RSVD ++ ipl_offset: 0x00 ++ ipl_length: 1 ++ ++ - ipl_par_name: CXL.MEM ID ++ ipl_par_mnemonic: cxl_mem_id ++ ipl_offset: 0x01 ++ ipl_length: 1 ++ ++ - ipl_par_name: Reserved ++ ipl_par_mnemonic: RSVD ++ ipl_offset: 0x02 ++ ipl_length: 2 ++ ++- command_set_name: Performance Counters TL/AL ++ command_set_description: > ++ To configure and operate the performance counters the commands/opcodes ++ in this section can be used to: ++ ++ - Setup LTIF performance counter ++ - Setup HIF perf counter ++ - Read a specific counter value ++ - Clear a specific counter ++ - Latch the current value of a specific counter ++ - Read the latched value of a specific counter ++ ++ Please find a detailed description in the opcode section below about the ++ usage of the available commands. ++ ++ command_set_opcodes: ++ - opcode_name: MTA set LTIF performance counter ++ mnemonic: PERFCNT_MTA_LTIF_SET ++ opcode: 0xCA00 ++ opcode_description: > ++ This opcode provides access to the configuration field of the LTIF ++ performance counter. ++ The counter is set to increment when the event signal is high. ++ ++ input_payload: ++ - ipl_name: MTA set LTIF perf counter Input Payload ++ ipl_mnemonic: PERFCNT_MTA_LTIF_SET_IN_PL ++ ipl_size_bytes: 20 ++ ++ parameters: ++ - ipl_par_name: Counter ++ ipl_par_mnemonic: COUNTER ++ ipl_offset: 0x00 ++ ipl_length: 4 ++ ipl_description: > ++ Selects counter instances. ++ ++ enumerators: ++ - ipl_en_name: PMON0 is implemented on LTIF_RCMD0 interface ++ ipl_en_mnemonic: PMON0_LTIF_RCMD0 ++ ipl_value: 0x00 ++ ++ - ipl_en_name: PMON1 is implemented on LTIF_RCMD1 interface ++ ipl_en_mnemonic: PMON1_LTIF_RCMD1 ++ ipl_value: 0x01 ++ ++ - ipl_en_name: PMON2 is implemented on LTIF_WCMD interface ++ ipl_en_mnemonic: PMON2_LTIF_WCMD ++ ipl_value: 0x02 ++ ++ - ipl_par_name: Match Value ++ ipl_par_mnemonic: MATCH_VALUE ++ ipl_offset: 0x04 ++ ipl_length: 4 ++ ipl_description: > ++ This field specifies which signal out of 3 signals should be monitored and trigger the event counter. ++ Value 0 allows to pass events unfiltered and to count all types of LTIF commands. ++ ++ Bit 0 - enable filter for CXL opcode ++ Bit 1 - enable filter for CXL meta field ++ Bit 2 - enable filter for CXL meta value ++ ++ bit_fields: ++ ++ - ipl_bf_name: Count event trigger when programmed CXL OPCODE matches ++ ipl_bf_mnemonic: opcode ++ ipl_bit: "0" ++ ++ - ipl_bf_name: Count event trigger when programmed META_FIELD matches ++ ipl_bf_mnemonic: meta_field ++ ipl_bit: "1" ++ ++ - ipl_bf_name: Count event trigger when programmed META_VALUE matches ++ ipl_bf_mnemonic: meta_value ++ ipl_bit: "2" ++ ++ - ipl_par_name: Opcode ++ ipl_par_mnemonic: OPCODE ++ ipl_offset: 0x08 ++ ipl_length: 4 ++ ipl_description: > ++ Monitor an event for a programmed CXL OPCODE ++ ++ - ipl_par_name: Meta Field ++ ipl_par_mnemonic: META_FIELD ++ ipl_offset: 0x0c ++ ipl_length: 4 ++ ipl_description: > ++ Monitor an event for a programmed CXL meta field. ++ ++ - ipl_par_name: Meta Value ++ ipl_par_mnemonic: META_VALUE ++ ipl_offset: 0x10 ++ ipl_length: 4 ++ ipl_description: > ++ Monitor an event for a programmed CXL meta value. ++ ++ - opcode_name: MTA get performance counter ++ mnemonic: PERFCNT_MTA_GET ++ opcode: 0xCA01 ++ opcode_description: > ++ Reads current value of a counter instance. ++ ++ input_payload: ++ - ipl_name: MTA get performance counter Input Payload ++ ipl_mnemonic: PERFCNT_MTA_GET_IN_PL ++ ipl_size_bytes: 5 ++ ++ parameters: ++ - ipl_par_name: Type ++ ipl_par_mnemonic: TYPE ++ ipl_offset: 0x00 ++ ipl_length: 1 ++ ipl_description: > ++ Selects the interface type. ++ ++ enumerators: ++ - ipl_en_name: LTIF ++ ipl_en_mnemonic: LTIF ++ ipl_value: 0x00 ++ ++ - ipl_en_name: HIF ++ ipl_en_mnemonic: HIF ++ ipl_value: 0x01 ++ ++ - ipl_par_name: Counter ++ ipl_par_mnemonic: COUNTER ++ ipl_offset: 0x01 ++ ipl_length: 4 ++ ipl_description: > ++ Selects the counter instance to read the current counter value from. ++ ++ output_payload: ++ - opl_name: MTA get configuration output Payload ++ opl_mnemonic: PERFCNT_MTA_GET_OUT_PL ++ opl_size_bytes: 8 ++ ++ parameters: ++ - opl_par_name: Counter ++ opl_par_mnemonic: COUNTER ++ opl_offset: 0x00 ++ opl_length: 8 ++ ++ - opcode_name: MTA get latch value ++ mnemonic: PERFCNT_MTA_LATCH_VAL_GET ++ opcode: 0xCA02 ++ opcode_description: > ++ Reads a latched value of a counter instance. ++ ++ input_payload: ++ - ipl_name: MTA get latch Input Payload ++ ipl_mnemonic: PERFCNT_MTA_LATCH_VAL_GET_IN_PL ++ ipl_size_bytes: 5 ++ ++ parameters: ++ - ipl_par_name: Type ++ ipl_par_mnemonic: TYPE ++ ipl_offset: 0x00 ++ ipl_length: 1 ++ ipl_description: > ++ Selects the interface type. ++ ++ enumerators: ++ - ipl_en_name: LTIF ++ ipl_en_mnemonic: LTIF ++ ipl_value: 0x00 ++ ++ - ipl_en_name: HIF ++ ipl_en_mnemonic: HIF ++ ipl_value: 0x01 ++ ++ - ipl_par_name: Counter ++ ipl_par_mnemonic: COUNTER ++ ipl_offset: 0x01 ++ ipl_length: 4 ++ ipl_description: > ++ Selects the counter instance to read the latched counter value from. ++ ++ output_payload: ++ - opl_name: MTA get latch output Payload ++ opl_mnemonic: PERFCNT_MTA_LATCH_VAL_GET_OUT_PL ++ opl_size_bytes: 8 ++ ++ parameters: ++ - opl_par_name: Latch value ++ opl_par_mnemonic: LATCH_VAL ++ opl_offset: 0x00 ++ opl_length: 8 ++ ++ - opcode_name: MTA clear counter ++ mnemonic: PERFCNT_MTA_COUNTER_CLEAR ++ opcode: 0xCA03 ++ opcode_description: > ++ Clears value of a given counter instance. ++ ++ input_payload: ++ - ipl_name: MTA clear counter Input Payload ++ ipl_mnemonic: PERFCNT_MTA_COUNTER_CLEAR_IN_PL ++ ipl_size_bytes: 5 ++ ++ parameters: ++ - ipl_par_name: Type ++ ipl_par_mnemonic: TYPE ++ ipl_offset: 0x00 ++ ipl_length: 1 ++ ipl_description: > ++ Selects the interface type. ++ ++ enumerators: ++ - ipl_en_name: LTIF ++ ipl_en_mnemonic: LTIF ++ ipl_value: 0x00 ++ ++ - ipl_en_name: HIF ++ ipl_en_mnemonic: HIF ++ ipl_value: 0x01 ++ ++ - ipl_par_name: Counter ++ ipl_par_mnemonic: COUNTER ++ ipl_offset: 0x01 ++ ipl_length: 4 ++ ipl_description: > ++ Selects the counter instance to be cleared. ++ ++ - opcode_name: MTA latch counter value ++ mnemonic: PERFCNT_MTA_CNT_VAL_LATCH ++ opcode: 0xCA04 ++ opcode_description: > ++ Latches the current value of a given counter instance. ++ ++ input_payload: ++ - ipl_name: MTA latch counter value Input Payload ++ ipl_mnemonic: PERFCNT_MTA_CNT_VAL_LATCH_IN_PL ++ ipl_size_bytes: 5 ++ ++ parameters: ++ - ipl_par_name: Type ++ ipl_par_mnemonic: TYPE ++ ipl_offset: 0x00 ++ ipl_length: 1 ++ ipl_description: > ++ Selects the interface type. ++ ++ enumerators: ++ - ipl_en_name: LTIF ++ ipl_en_mnemonic: LTIF ++ ipl_value: 0x00 ++ ++ - ipl_en_name: HIF ++ ipl_en_mnemonic: HIF ++ ipl_value: 0x01 ++ ++ - ipl_par_name: Counter ++ ipl_par_mnemonic: COUNTER ++ ipl_offset: 0x01 ++ ipl_length: 4 ++ ipl_description: > ++ Selects counter instance which value is to be latched. ++ ++ - opcode_name: MTA set HIF performance counter ++ mnemonic: PERFCNT_MTA_HIF_SET ++ opcode: 0xCA05 ++ ++ input_payload: ++ - ipl_name: MTA set HIF performance counter Input Payload ++ ipl_mnemonic: PERFCNT_MTA_HIF_SET_IN_PL ++ ipl_size_bytes: 20 ++ ++ parameters: ++ - ipl_par_name: Counter ++ ipl_par_mnemonic: COUNTER ++ ipl_offset: 0x00 ++ ipl_length: 4 ++ ipl_description: > ++ Selects the counter instance. ++ ++ enumerators: ++ - ipl_en_name: PMON0 is implemented on HIF_RCMD0 interface ++ ipl_en_mnemonic: PMON0_HIF_RCMD0 ++ ipl_value: 0x00 ++ ++ - ipl_en_name: PMON1 is implemented on HIF_RCMD1 interface ++ ipl_en_mnemonic: PMON1_HIF_RCMD1 ++ ipl_value: 0x01 ++ ++ - ipl_en_name: PMON2 is implemented on HIF_RCMD2 interface ++ ipl_en_mnemonic: PMON2_HIF_RCMD0 ++ ipl_value: 0x02 ++ ++ - ipl_en_name: PMON3 is implemented on HIF_RCMD3 interface ++ ipl_en_mnemonic: PMON3_HIF_RCMD1 ++ ipl_value: 0x03 ++ ++ - ipl_en_name: PMON4 is implemented on HIF_WCMD0 interface ++ ipl_en_mnemonic: PMON4_HIF_WCMD ++ ipl_value: 0x04 ++ ++ - ipl_en_name: PMON5 is implemented on HIF_WCMD1 interface ++ ipl_en_mnemonic: PMON5_HIF_WCMD ++ ipl_value: 0x05 ++ ++ - ipl_en_name: PMON6 is implemented on HIF_WCMD2 interface ++ ipl_en_mnemonic: PMON6_HIF_WCMD ++ ipl_value: 0x06 ++ ++ - ipl_en_name: PMON7 is implemented on HIF_WCMD3 interface. ++ ipl_en_mnemonic: PMON7_HIF_WCMD ++ ipl_value: 0x07 ++ ++ - ipl_par_name: Match Value ++ ipl_par_mnemonic: MATCH_VALUE ++ ipl_offset: 0x04 ++ ipl_length: 4 ++ ipl_description: > ++ This field specifies which signal out of 3 signals should be monitored and trigger the event counter. ++ Value 0 allows to pass events unfiltered and to count all types of HIF commands. ++ ++ Bit 0 - enable filter for address ++ Bit 1 - enable filter for request type ++ Bit 2 - enable filter for scrubbing type ++ ++ bit_fields: ++ ++ - ipl_bf_name: Count event trigger when programmed ADDR matches ++ ipl_bf_mnemonic: addr ++ ipl_bit: "0" ++ ++ - ipl_bf_name: Count event trigger when programmed REQ_TYPE matches ++ ipl_bf_mnemonic: req_type ++ ipl_bit: "1" ++ ++ - ipl_bf_name: Count event trigger when programmed SC_TYPE matches ++ ipl_bf_mnemonic: sc_type ++ ipl_bit: "2" ++ ++ - ipl_par_name: Address ++ ipl_par_mnemonic: ADDR ++ ipl_offset: 0x08 ++ ipl_length: 4 ++ ipl_description: > ++ Monitor an event for a programmed upper 12 bits of byte aligned HIF channel address. ++ ++ - ipl_par_name: Req Type ++ ipl_par_mnemonic: REQ_TY ++ ipl_offset: 0x0c ++ ipl_length: 4 ++ ipl_description: > ++ Monitor an event for a programmed value of REQ_TYPE. ++ ++ enumerators: ++ - ipl_en_name: CXL request ++ ipl_en_mnemonic: CXL ++ ipl_value: 0x00 ++ ++ - ipl_en_name: FBE request ++ ipl_en_mnemonic: FBE ++ ipl_value: 0x02 ++ ++ - ipl_en_name: RMW request ++ ipl_en_mnemonic: RMW ++ ipl_value: 0x03 ++ ++ - ipl_par_name: Scrub Req ++ ipl_par_mnemonic: SC_TY ++ ipl_offset: 0x10 ++ ipl_length: 4 ++ ipl_description: > ++ Monitor an event for a programmed scrubbing type request. ++ ++- command_set_name: Performance Counters DDR ++ command_set_description: Performance Counters DDR command set ++ ++ command_set_opcodes: ++ - opcode_name: DDR Generic Select ++ mnemonic: PERFCNT_DDR_GENERIC_SELECT ++ opcode: 0xCA10 ++ opcode_description: > ++ This function is used to setup specific filters and/or events ++ to be captured by PERFCNT_DDR_GENERIC_CAPTURE function. ++ ++ input_payload: ++ - ipl_name: DDR Generic Select Input Payload ++ ipl_mnemonic: PERFCNT_DDR_GENERIC_SELECT_IN_PL ++ ipl_size_bytes: 13 ++ ++ parameters: ++ - ipl_par_name: DDR instance ++ ipl_par_mnemonic: DDR_ID ++ ipl_offset: 0x00 ++ ipl_length: 1 ++ ++ - ipl_par_name: CID selection ++ ipl_par_mnemonic: CID ++ ipl_offset: 0x01 ++ ipl_length: 1 ++ ++ enumerators: ++ - ipl_en_name: CID 0 filter ++ ipl_en_mnemonic: CID0 ++ ipl_value: 0x01 ++ ++ - ipl_en_name: CID 1 filter ++ ipl_en_mnemonic: CID1 ++ ipl_value: 0x02 ++ ++ - ipl_en_name: CID 2 filter ++ ipl_en_mnemonic: CID2 ++ ipl_value: 0x04 ++ ++ - ipl_en_name: CID 3 filter ++ ipl_en_mnemonic: CID3 ++ ipl_value: 0x08 ++ ++ - ipl_en_name: No filtering by CID ++ ipl_en_mnemonic: NO_FILTERING ++ ipl_value: 0x0F ++ ++ - ipl_par_name: Rank selection ++ ipl_par_mnemonic: RANK ++ ipl_offset: 0x02 ++ ipl_length: 1 ++ ++ enumerators: ++ - ipl_en_name: Rank 0 filter ++ ipl_en_mnemonic: RANK0 ++ ipl_value: 0x01 ++ ++ - ipl_en_name: Rank 1 filter ++ ipl_en_mnemonic: RANK1 ++ ipl_value: 0x02 ++ ++ - ipl_en_name: Rank 2 filter ++ ipl_en_mnemonic: RANK2 ++ ipl_value: 0x04 ++ ++ - ipl_en_name: Rank 3 filter ++ ipl_en_mnemonic: RANK3 ++ ipl_value: 0x08 ++ ++ - ipl_en_name: No filtering by Rank ++ ipl_en_mnemonic: NO_FILTERING ++ ipl_value: 0x0F ++ ++ - ipl_par_name: Bank selection ++ ipl_par_mnemonic: BANK ++ ipl_offset: 0x03 ++ ipl_length: 1 ++ ++ enumerators: ++ - ipl_en_name: Bank 0 filter ++ ipl_en_mnemonic: BA0 ++ ipl_value: 0x01 ++ ++ - ipl_en_name: Bank 1 filter ++ ipl_en_mnemonic: BA1 ++ ipl_value: 0x02 ++ ++ - ipl_en_name: Bank 2 filter ++ ipl_en_mnemonic: BA2 ++ ipl_value: 0x04 ++ ++ - ipl_en_name: Bank 3 filter ++ ipl_en_mnemonic: BA3 ++ ipl_value: 0x08 ++ ++ - ipl_en_name: No filtering by Bank ++ ipl_en_mnemonic: NO_FILTERING ++ ipl_value: 0x0F ++ ++ - ipl_par_name: Bank Group selection ++ ipl_par_mnemonic: BANKGROUP ++ ipl_offset: 0x04 ++ ipl_length: 1 ++ ++ enumerators: ++ - ipl_en_name: Bank Group 0 filter ++ ipl_en_mnemonic: BG0 ++ ipl_value: 0x01 ++ ++ - ipl_en_name: Bank Group 1 filter ++ ipl_en_mnemonic: BG1 ++ ipl_value: 0x02 ++ ++ - ipl_en_name: Bank Group 2 filter ++ ipl_en_mnemonic: BG2 ++ ipl_value: 0x04 ++ ++ - ipl_en_name: Bank Group 3 filter ++ ipl_en_mnemonic: BG3 ++ ipl_value: 0x08 ++ ++ - ipl_en_name: Bank Group 4 filter ++ ipl_en_mnemonic: BG4 ++ ipl_value: 0x10 ++ ++ - ipl_en_name: Bank Group 5 filter ++ ipl_en_mnemonic: BG5 ++ ipl_value: 0x20 ++ ++ - ipl_en_name: Bank Group 6 filter ++ ipl_en_mnemonic: BG6 ++ ipl_value: 0x40 ++ ++ - ipl_en_name: Bank Group 7 filter ++ ipl_en_mnemonic: BG7 ++ ipl_value: 0x80 ++ ++ - ipl_en_name: No filtering by Bank Group ++ ipl_en_mnemonic: NO_FILTERING ++ ipl_value: 0xFF ++ ++ - ipl_par_name: Events selection ++ ipl_par_mnemonic: EVENT ++ ipl_offset: 0x05 ++ ipl_unit_size: 1 ++ ipl_units_num: 8 ++ ipl_length: 8 ++ ++ enumerators: ++ - ipl_en_name: RAS on PHY0 ++ ipl_en_mnemonic: RAS_ON_PHY0 ++ ipl_value: 0x00 ++ ++ - ipl_en_name: CAS on PHY0 ++ ipl_en_mnemonic: CAS_ON_PHY0 ++ ipl_value: 0x01 ++ ++ - ipl_en_name: WE on PHY0 ++ ipl_en_mnemonic: WE_ON_PHY0 ++ ipl_value: 0x02 ++ ++ - ipl_en_name: CKE[0] on PHY0 ++ ipl_en_mnemonic: CKE0_ON_PHY0 ++ ipl_value: 0x03 ++ ++ - ipl_en_name: CKE[1] on PHY0 ++ ipl_en_mnemonic: CKE1_ON_PHY0 ++ ipl_value: 0x04 ++ ++ - ipl_en_name: CKE[2] on PHY0 ++ ipl_en_mnemonic: CKE2_ON_PHY0 ++ ipl_value: 0x05 ++ ++ - ipl_en_name: CKE[3] on PHY0 ++ ipl_en_mnemonic: CKE3_ON_PHY0 ++ ipl_value: 0x06 ++ ++ - ipl_en_name: controller_busy output ++ ipl_en_mnemonic: CONTROLLER_BUSY ++ ipl_value: 0x07 ++ ++ - ipl_en_name: controller_int output ++ ipl_en_mnemonic: CONTROLLER_INT ++ ipl_value: 0x08 ++ ++ - ipl_en_name: perf_hif_rd_or_wr ++ ipl_en_mnemonic: PERF_HIF_RD_OR_WR ++ ipl_value: 0x09 ++ ++ - ipl_en_name: perf_hif_wr ++ ipl_en_mnemonic: PERF_HIF_WR ++ ipl_value: 0x0A ++ ++ - ipl_en_name: perf_hif_rd ++ ipl_en_mnemonic: PERF_HIF_RD ++ ipl_value: 0x0B ++ ++ - ipl_en_name: perf_hif_rmw ++ ipl_en_mnemonic: PERF_HIF_RMW ++ ipl_value: 0x0C ++ ++ - ipl_en_name: perf_hif_hi_pri_rd ++ ipl_en_mnemonic: PERF_HIF_HI_PRI_RD ++ ipl_value: 0x0D ++ ++ - ipl_en_name: perf_read_bypass ++ ipl_en_mnemonic: PERF_READ_BYPASS ++ ipl_value: 0x0E ++ ++ - ipl_en_name: perf_act_bypass ++ ipl_en_mnemonic: PERF_ACT_BYPASS ++ ipl_value: 0x0F ++ ++ - ipl_en_name: perf_dfi_wr_data_cycles ++ ipl_en_mnemonic: PERF_DFI_WR_DATA_CYCLES ++ ipl_value: 0x10 ++ ++ - ipl_en_name: perf_dfi_rd_data_cycles ++ ipl_en_mnemonic: PERF_DFI_RD_DATA_CYCLES ++ ipl_value: 0x11 ++ ++ - ipl_en_name: perf_hpr_xact_when_critical ++ ipl_en_mnemonic: PERF_HPR_XACT_WHEN_CRITICAL ++ ipl_value: 0x12 ++ ++ - ipl_en_name: perf_lpr_xact_when_critical ++ ipl_en_mnemonic: PERF_LPR_XACT_WHEN_CRITICAL ++ ipl_value: 0x13 ++ ++ - ipl_en_name: perf_wr_xact_when_critical ++ ipl_en_mnemonic: PERF_WR_XACT_WHEN_CRITICAL ++ ipl_value: 0x14 ++ ++ - ipl_en_name: perf_op_is_activate ++ ipl_en_mnemonic: PERF_OP_IS_ACTIVATE ++ ipl_value: 0x15 ++ ++ - ipl_en_name: perf_op_is_rd_or_wr ++ ipl_en_mnemonic: PERF_OP_IS_RD_OR_WR ++ ipl_value: 0x16 ++ ++ - ipl_en_name: perf_op_is_rd_activate ++ ipl_en_mnemonic: PERF_OP_IS_RD_ACTIVATE ++ ipl_value: 0x17 ++ ++ - ipl_en_name: perf_op_is_rd ++ ipl_en_mnemonic: PERF_OP_IS_RD ++ ipl_value: 0x18 ++ ++ - ipl_en_name: perf_op_is_wr ++ ipl_en_mnemonic: PERF_OP_IS_WR ++ ipl_value: 0x19 ++ ++ - ipl_en_name: perf_op_is_precharge ++ ipl_en_mnemonic: PERF_OP_IS_PRECHARGE ++ ipl_value: 0x1A ++ ++ - ipl_en_name: perf_precharge_for_rdwr ++ ipl_en_mnemonic: PERF_PRECHARGE_FOR_RDWR ++ ipl_value: 0x1B ++ ++ - ipl_en_name: perf_precharge_for_other ++ ipl_en_mnemonic: PERF_PRECHARGE_FOR_OTHER ++ ipl_value: 0x1C ++ ++ - ipl_en_name: perf_rdwr_transitions ++ ipl_en_mnemonic: PERF_RDWR_TRANSITIONS ++ ipl_value: 0x1D ++ ++ - ipl_en_name: perf_write_combine ++ ipl_en_mnemonic: PERF_WRITE_COMBINE ++ ipl_value: 0x1E ++ ++ - ipl_en_name: perf_war_hazard ++ ipl_en_mnemonic: PERF_WAR_HAZARD ++ ipl_value: 0x1F ++ ++ - ipl_en_name: perf_raw_hazard ++ ipl_en_mnemonic: PERF_RAW_HAZARD ++ ipl_value: 0x20 ++ ++ - ipl_en_name: perf_waw_hazard ++ ipl_en_mnemonic: PERF_WAW_HAZARD ++ ipl_value: 0x21 ++ ++ - ipl_en_name: perf_op_is_enter_selfref ++ ipl_en_mnemonic: PERF_OP_IS_ENTER_SELFREF ++ ipl_value: 0x22 ++ ++ - ipl_en_name: perf_op_is_enter_powerdown ++ ipl_en_mnemonic: PERF_OP_IS_ENTER_POWERDOWN ++ ipl_value: 0x23 ++ ++ - ipl_en_name: perf_op_is_enter_mpsm ++ ipl_en_mnemonic: PERF_OP_IS_ENTER_MPSM ++ ipl_value: 0x24 ++ ++ - ipl_en_name: perf_selfref_mode ++ ipl_en_mnemonic: PERF_SELFREF_MODE ++ ipl_value: 0x25 ++ ++ - ipl_en_name: perf_op_is_refresh ++ ipl_en_mnemonic: PERF_OP_IS_REFRESH ++ ipl_value: 0x26 ++ ++ - ipl_en_name: perf_op_is_load_mode ++ ipl_en_mnemonic: PERF_OP_IS_LOAD_MODE ++ ipl_value: 0x27 ++ ++ - ipl_en_name: perf_op_is_zqcl ++ ipl_en_mnemonic: PERF_OP_IS_ZQCL ++ ipl_value: 0x28 ++ ++ - ipl_en_name: perf_op_is_zqcs ++ ipl_en_mnemonic: PERF_OP_IS_ZQCS ++ ipl_value: 0x29 ++ ++ - ipl_en_name: perf_hpr_req_with_nocredit ++ ipl_en_mnemonic: PERF_HPR_REQ_WITH_NOCREDIT ++ ipl_value: 0x2A ++ ++ - ipl_en_name: perf_lpr_req_with_nocredit ++ ipl_en_mnemonic: PERF_LPR_REQ_WITH_NOCREDIT ++ ipl_value: 0x2B ++ ++ - ipl_en_name: RAS on PHY1 ++ ipl_en_mnemonic: RAS_ON_PHY1 ++ ipl_value: 0x2C ++ ++ - ipl_en_name: CAS on PHY1 ++ ipl_en_mnemonic: CAS_ON_PHY1 ++ ipl_value: 0x2D ++ ++ - ipl_en_name: WE on PHY1 ++ ipl_en_mnemonic: WE_ON_PHY1 ++ ipl_value: 0x2E ++ ++ - ipl_en_name: CKE[0] on PHY1 ++ ipl_en_mnemonic: CKE0_ON_PHY1 ++ ipl_value: 0x2F ++ ++ - ipl_en_name: CKE[1] on PHY1 ++ ipl_en_mnemonic: CKE1_ON_PHY1 ++ ipl_value: 0x30 ++ ++ - ipl_en_name: CKE[2] on PHY1 ++ ipl_en_mnemonic: CKE2_ON_PHY1 ++ ipl_value: 0x31 ++ ++ - ipl_en_name: CKE[3] on PHY1 ++ ipl_en_mnemonic: CKE3_ON_PHY1 ++ ipl_value: 0x32 ++ ++ - ipl_en_name: controller_busy_ch1 ++ ipl_en_mnemonic: CONTROLLER_BUSY_CH1 ++ ipl_value: 0x33 ++ ++ - ipl_en_name: perf_hif_rd_or_wr_dch1 ++ ipl_en_mnemonic: PERF_HIF_RD_OR_WR_DCH1 ++ ipl_value: 0x34 ++ ++ - ipl_en_name: perf_hif_wr_dch1 ++ ipl_en_mnemonic: PERF_HIF_WR_DCH1 ++ ipl_value: 0x35 ++ ++ - ipl_en_name: perf_hif_rd_dch1 ++ ipl_en_mnemonic: PERF_HIF_RD_DCH1 ++ ipl_value: 0x36 ++ ++ - ipl_en_name: perf_hif_rmw_dch1 ++ ipl_en_mnemonic: PERF_HIF_RMW_DCH1 ++ ipl_value: 0x37 ++ ++ - ipl_en_name: perf_hif_hi_pri_rd_dch1 ++ ipl_en_mnemonic: PERF_HIF_HI_PRI_RD_DCH1 ++ ipl_value: 0x38 ++ ++ - ipl_en_name: perf_read_bypass_dch1 ++ ipl_en_mnemonic: PERF_READ_BYPASS_DCH1 ++ ipl_value: 0x39 ++ ++ - ipl_en_name: perf_act_bypass_dch1 ++ ipl_en_mnemonic: PERF_ACT_BYPASS_DCH1 ++ ipl_value: 0x3A ++ ++ - ipl_en_name: perf_dfi_wr_data_cycles_dch1 ++ ipl_en_mnemonic: PERF_DFI_WR_DATA_CYCLES_DCH1 ++ ipl_value: 0x3B ++ ++ - ipl_en_name: perf_dfi_rd_data_cycles_dch1 ++ ipl_en_mnemonic: PERF_DFI_RD_DATA_CYCLES_DCH1 ++ ipl_value: 0x3C ++ ++ - ipl_en_name: perf_hpr_xact_when_critical_dch1 ++ ipl_en_mnemonic: PERF_HPR_XACT_WHEN_CRITICAL_DCH1 ++ ipl_value: 0x3D ++ ++ - ipl_en_name: perf_lpr_xact_when_critical_dch1 ++ ipl_en_mnemonic: PERF_LPR_XACT_WHEN_CRITICAL_DCH1 ++ ipl_value: 0x3E ++ ++ - ipl_en_name: perf_wr_xact_when_critical_dch1 ++ ipl_en_mnemonic: PERF_WR_XACT_WHEN_CRITICAL_DCH1 ++ ipl_value: 0x3F ++ ++ - ipl_en_name: perf_op_is_activate_dch1 ++ ipl_en_mnemonic: PERF_OP_IS_ACTIVATE_DCH1 ++ ipl_value: 0x40 ++ ++ - ipl_en_name: perf_op_is_rd_or_wr_dch1 ++ ipl_en_mnemonic: PERF_OP_IS_RD_OR_WR_DCH1 ++ ipl_value: 0x41 ++ ++ - ipl_en_name: perf_op_is_rd_activate_dch1 ++ ipl_en_mnemonic: PERF_OP_IS_RD_ACTIVATE_DCH1 ++ ipl_value: 0x42 ++ ++ - ipl_en_name: perf_op_is_rd_dch1 ++ ipl_en_mnemonic: PERF_OP_IS_RD_DCH1 ++ ipl_value: 0x43 ++ ++ - ipl_en_name: perf_op_is_wr_dch1 ++ ipl_en_mnemonic: PERF_OP_IS_WR_DCH1 ++ ipl_value: 0x44 ++ ++ - ipl_en_name: perf_op_is_precharge_dch1 ++ ipl_en_mnemonic: PERF_OP_IS_PRECHARGE_DCH1 ++ ipl_value: 0x45 ++ ++ - ipl_en_name: perf_precharge_for_rdwr_dch1 ++ ipl_en_mnemonic: PERF_PRECHARGE_FOR_RDWR_DCH1 ++ ipl_value: 0x46 ++ ++ - ipl_en_name: perf_precharge_for_other_dch1 ++ ipl_en_mnemonic: PERF_PRECHARGE_FOR_OTHER_DCH1 ++ ipl_value: 0x47 ++ ++ - ipl_en_name: perf_rdwr_transitions_dch1 ++ ipl_en_mnemonic: PERF_RDWR_TRANSITIONS_DCH1 ++ ipl_value: 0x48 ++ ++ - ipl_en_name: perf_write_combine_dch1 ++ ipl_en_mnemonic: PERF_WRITE_COMBINE_DCH1 ++ ipl_value: 0x49 ++ ++ - ipl_en_name: perf_war_hazard_dch1 ++ ipl_en_mnemonic: PERF_WAR_HAZARD_DCH1 ++ ipl_value: 0x4A ++ ++ - ipl_en_name: perf_raw_hazard_dch1 ++ ipl_en_mnemonic: PERF_RAW_HAZARD_DCH1 ++ ipl_value: 0x4B ++ ++ - ipl_en_name: perf_waw_hazard_dch1 ++ ipl_en_mnemonic: PERF_WAW_HAZARD_DCH1 ++ ipl_value: 0x4C ++ ++ - ipl_en_name: perf_op_is_enter_selfref_dch1 ++ ipl_en_mnemonic: PERF_OP_IS_ENTER_SELFREF_DCH1 ++ ipl_value: 0x4D ++ ++ - ipl_en_name: perf_op_is_enter_powerdown_dch1 ++ ipl_en_mnemonic: PERF_OP_IS_ENTER_POWERDOWN_DCH1 ++ ipl_value: 0x4E ++ ++ - ipl_en_name: perf_op_is_enter_mpsm_dch1 ++ ipl_en_mnemonic: PERF_OP_IS_ENTER_MPSM_DCH1 ++ ipl_value: 0x4F ++ ++ - ipl_en_name: perf_selfref_mode_dch1 ++ ipl_en_mnemonic: PERF_SELFREF_MODE_DCH1 ++ ipl_value: 0x50 ++ ++ - ipl_en_name: perf_op_is_refresh_dch1 ++ ipl_en_mnemonic: PERF_OP_IS_REFRESH_DCH1 ++ ipl_value: 0x51 ++ ++ - ipl_en_name: perf_op_is_load_mode_dch1 ++ ipl_en_mnemonic: PERF_OP_IS_LOAD_MODE_DCH1 ++ ipl_value: 0x52 ++ ++ - ipl_en_name: perf_op_is_zqcl_dch1 ++ ipl_en_mnemonic: PERF_OP_IS_ZQCL_DCH1 ++ ipl_value: 0x53 ++ ++ - ipl_en_name: perf_op_is_zqcs_dch1 ++ ipl_en_mnemonic: PERF_OP_IS_ZQCS_DCH1 ++ ipl_value: 0x54 ++ ++ - ipl_en_name: perf_hpr_req_with_nocredit_dch1 ++ ipl_en_mnemonic: PERF_HPR_REQ_WITH_NOCREDIT_DCH1 ++ ipl_value: 0x55 ++ ++ - ipl_en_name: perf_lpr_req_with_nocredit_dch1 ++ ipl_en_mnemonic: PERF_LPR_REQ_WITH_NOCREDIT_DCH1 ++ ipl_value: 0x56 ++ ++ - ipl_en_name: perf_op_is_mwr ++ ipl_en_mnemonic: PERF_OP_IS_MWR ++ ipl_value: 0x57 ++ ++ - ipl_en_name: perf_op_is_crit_ref ++ ipl_en_mnemonic: PERF_OP_IS_CRIT_REF ++ ipl_value: 0x58 ++ ++ - ipl_en_name: perf_op_is_spec_ref ++ ipl_en_mnemonic: PERF_OP_IS_SPEC_REF ++ ipl_value: 0x59 ++ ++ - ipl_en_name: perf_visible_window_limit_reached_rd ++ ipl_en_mnemonic: PERF_VISIBLE_WINDOW_LIMIT_REACHED_RD ++ ipl_value: 0x5A ++ ++ - ipl_en_name: perf_visible_window_limit_reached_wr ++ ipl_en_mnemonic: PERF_VISIBLE_WINDOW_LIMIT_REACHED_WR ++ ipl_value: 0x5B ++ ++ - ipl_en_name: perf_op_is_dqsosc_mpc ++ ipl_en_mnemonic: PERF_OP_IS_DQSOSC_MPC ++ ipl_value: 0x5C ++ ++ - ipl_en_name: perf_op_is_dqsosc_mrr ++ ipl_en_mnemonic: PERF_OP_IS_DQSOSC_MRR ++ ipl_value: 0x5D ++ ++ - ipl_en_name: perf_op_is_tcr_mrr ++ ipl_en_mnemonic: PERF_OP_IS_TCR_MRR ++ ipl_value: 0x5E ++ ++ - ipl_en_name: perf_op_is_zqstart ++ ipl_en_mnemonic: PERF_OP_IS_ZQSTART ++ ipl_value: 0x5F ++ ++ - ipl_en_name: perf_op_is_zqlatch ++ ipl_en_mnemonic: PERF_OP_IS_ZQLATCH ++ ipl_value: 0x60 ++ ++ - ipl_en_name: perf_op_is_mwr_dch1 ++ ipl_en_mnemonic: PERF_OP_IS_MWR_DCH1 ++ ipl_value: 0x61 ++ ++ - ipl_en_name: perf_op_is_crit_ref_dch1 ++ ipl_en_mnemonic: PERF_OP_IS_CRIT_REF_DCH1 ++ ipl_value: 0x62 ++ ++ - ipl_en_name: perf_op_is_spec_ref_dch1 ++ ipl_en_mnemonic: PERF_OP_IS_SPEC_REF_DCH1 ++ ipl_value: 0x63 ++ ++ - ipl_en_name: perf_visible_window_limit_reached_rd_dch1 ++ ipl_en_mnemonic: PERF_VISIBLE_WINDOW_LIMIT_REACHED_RD_DCH1 ++ ipl_value: 0x64 ++ ++ - ipl_en_name: perf_visible_window_limit_reached_wr_dch1 ++ ipl_en_mnemonic: PERF_VISIBLE_WINDOW_LIMIT_REACHED_WR_DCH1 ++ ipl_value: 0x65 ++ ++ - ipl_en_name: perf_op_is_dqsosc_mpc_dch1 ++ ipl_en_mnemonic: PERF_OP_IS_DQSOSC_MPC_DCH1 ++ ipl_value: 0x66 ++ ++ - ipl_en_name: perf_op_is_dqsosc_mrr_dch1 ++ ipl_en_mnemonic: PERF_OP_IS_DQSOSC_MRR_DCH1 ++ ipl_value: 0x67 ++ ++ - ipl_en_name: perf_op_is_tcr_mrr_dch1 ++ ipl_en_mnemonic: PERF_OP_IS_TCR_MRR_DCH1 ++ ipl_value: 0x68 ++ ++ - ipl_en_name: perf_op_is_zqstart_dch1 ++ ipl_en_mnemonic: PERF_OP_IS_ZQSTART_DCH1 ++ ipl_value: 0x69 ++ ++ - ipl_en_name: perf_op_is_zqlatch_dch1 ++ ipl_en_mnemonic: PERF_OP_IS_ZQLATCH_DCH1 ++ ipl_value: 0x6A ++ ++ - opcode_name: DDR Generic Capture ++ mnemonic: PERFCNT_DDR_GENERIC_CAPTURE ++ opcode: 0xCA11 ++ opcode_description: > ++ This function is used to capture 8 generic events. ++ ++ By default, no filtering will be applied ++ and the 8 events are preset to: ++ ++ event0 - RAS on PHY0 ++ event1 - CAS on PHY0 ++ event2 - perf_op_is_activate ++ event3 - perf_op_is_rd ++ event4 - perf_op_is_wr ++ event5 - perf_op_is_precharge ++ event6 - perf_op_is_enter_selfref ++ event7 - perf_op_is_refresh ++ ++ To set a specific filtering or to capture other events, ++ use function PERFCNT_DDR_GENERIC_SELECT upfront. ++ ++ input_payload: ++ - ipl_name: DDR Generic Capture Input Payload ++ ipl_mnemonic: PERFCNT_DDR_GENERIC_CAPTURE_IN_PL ++ ipl_size_bytes: 8 ++ ++ parameters: ++ - ipl_par_name: DDR instance ++ ipl_par_mnemonic: DDR_ID ++ ipl_offset: 0x00 ++ ipl_length: 1 ++ ++ - ipl_par_name: reserved ++ ipl_par_mnemonic: RSVD ++ ipl_offset: 0x01 ++ ipl_length: 3 ++ ++ - ipl_par_name: Capture-time in ms ++ ipl_par_mnemonic: POLL_PERIOD_MS ++ ipl_offset: 0x04 ++ ipl_length: 4 ++ ++ output_payload: ++ - opl_name: DDR Generic Capture Output Payload ++ opl_mnemonic: PERFCNT_DDR_GENERIC_CAPTURE_OUT_PL ++ opl_size_bytes: 32 ++ ++ parameters: ++ - opl_par_name: Generic Counter Readings ++ opl_par_mnemonic: RESULT ++ opl_offset: 0x00 ++ opl_unit_size: 4 ++ opl_units_num: 8 ++ opl_length: 32 ++ ++ - opcode_name: DDR DFI Capture ++ mnemonic: PERFCNT_DDR_DFI_CAPTURE ++ opcode: 0xCA12 ++ opcode_description: > ++ This function is used to capture the following DFI special counters: ++ ++ DFI special counter #17 ++ DFI special counter #20 ++ DFI special counter #21 ++ ++ input_payload: ++ - ipl_name: DDR DFI Capture Input Payload ++ ipl_mnemonic: PERFCNT_DDR_DFI_CAPTURE_IN_PL ++ ipl_size_bytes: 8 ++ ++ parameters: ++ - ipl_par_name: DDR instance ++ ipl_par_mnemonic: DDR_ID ++ ipl_offset: 0x00 ++ ipl_length: 1 ++ ++ - ipl_par_name: reserved ++ ipl_par_mnemonic: RSVD ++ ipl_offset: 0x01 ++ ipl_length: 3 ++ ++ - ipl_par_name: Capture-time in ms ++ ipl_par_mnemonic: POLL_PERIOD_MS ++ ipl_offset: 0x04 ++ ipl_length: 4 ++ ++ output_payload: ++ - opl_name: DDR DFI Capture Output Payload ++ opl_mnemonic: PERFCNT_DDR_DFI_CAPTURE_OUT_PL ++ opl_size_bytes: 24 ++ ++ parameters: ++ - opl_par_name: Special Counter 17 Reading ++ opl_par_mnemonic: DFI_COUNTER_17 ++ opl_offset: 0x00 ++ opl_length: 4 ++ ++ - opl_par_name: Special Counter 20 Reading ++ opl_par_mnemonic: DFI_COUNTER_20 ++ opl_offset: 0x04 ++ opl_length: 4 ++ ++ - opl_par_name: Special Counter 21 Reading ++ opl_par_mnemonic: DFI_COUNTER_21 ++ opl_offset: 0x08 ++ opl_length: 4 ++ ++ - opl_par_name: Special Counter 17 Reading (second channel) ++ opl_par_mnemonic: DFI_CH1_COUNTER_17 ++ opl_offset: 0x0C ++ opl_length: 4 ++ ++ - opl_par_name: Special Counter 20 Reading (second channel) ++ opl_par_mnemonic: DFI_CH1_COUNTER_20 ++ opl_offset: 0x10 ++ opl_length: 4 ++ ++ - opl_par_name: Special Counter 21 Reading (second channel) ++ opl_par_mnemonic: DFI_CH1_COUNTER_21 ++ opl_offset: 0x14 ++ opl_length: 4 ++ ++- command_set_name: Error Injection ++ command_set_description: > ++ Injection of dedicated error conditions at several places ++ of ongoing memory traffic flow: ++ - HIF wdata Poison ++ - HIF wdata ECC ++ - DRS Poison ++ - DRS ECC ++ - RxFlit CRC ++ - TxFlit CRC ++ - Viral indication ++ ++ command_set_opcodes: ++ - opcode_name: Error Injection HIF Poison ++ mnemonic: ERR_INJ_HIF_POISON ++ opcode: 0xCB00 ++ opcode_description: > ++ Injects Poison bit to HIF wdata. ++ ++ input_payload: ++ - ipl_name: Error injection HIF Poison input ++ ipl_mnemonic: ERR_INJ_HIF_POISON_IN_PL ++ ipl_size_bytes: 9 ++ ++ parameters: ++ - ipl_par_name: HIF channel ++ ipl_par_mnemonic: CH_ID ++ ipl_offset: 0x00 ++ ipl_length: 1 ++ ipl_description: > ++ HIF channel 0..3 selection. ++ ++ - ipl_par_name: Duration ++ ipl_par_mnemonic: DURATION ++ ipl_offset: 0x01 ++ ipl_length: 1 ++ ipl_description: > ++ 0x00: Turn continuous error generation OFF ++ 0x01: Turn continuous error generation ON ++ 0x02: SingleShot - Generate a single error ++ ++ - ipl_par_name: Injection mode ++ ipl_par_mnemonic: INJ_MODE ++ ipl_offset: 0x02 ++ ipl_length: 1 ++ ipl_description: > ++ 0x00: Inject poison bit on any transaction going to HIF wdata ++ 0x01: Inject poison bit on address given by ADDRESS parameter ++ ++ Please note that the ADDRESS parameter is shared by HIF Poison and HIF ECC functionality! ++ ++ - ipl_par_name: reserved ++ ipl_par_mnemonic: RSVD ++ ipl_offset: 0x03 ++ ipl_length: 1 ++ ++ - ipl_par_name: Address ++ ipl_par_mnemonic: ADDRESS ++ ipl_offset: 0x04 ++ ipl_length: 5 ++ opcode_description: > ++ 40-bit Address field, only used for some injection modes. ++ ++ Please note that the ADDRESS parameter is shared by HIF Poison and HIF ECC functionality! ++ ++ - opcode_name: Error Injection HIF ECC ++ mnemonic: ERR_INJ_HIF_ECC ++ opcode: 0xCB01 ++ opcode_description: > ++ Injects single or double bit ECC to HIF wdata. ++ ++ input_payload: ++ - ipl_name: Error injection HIF ECC input ++ ipl_mnemonic: ERR_INJ_HIF_ECC_IN_PL ++ ipl_size_bytes: 9 ++ ++ parameters: ++ - ipl_par_name: HIF channel ++ ipl_par_mnemonic: CH_ID ++ ipl_offset: 0x00 ++ ipl_length: 1 ++ ipl_description: > ++ HIF channel 0..3 selection. ++ ++ - ipl_par_name: Duration ++ ipl_par_mnemonic: DURATION ++ ipl_offset: 0x01 ++ ipl_length: 1 ++ ipl_description: > ++ 0x00: Turn continuous error generation OFF ++ 0x01: Turn continuous error generation ON ++ 0x02: SingleShot - Generate a single error ++ ++ - ipl_par_name: Injection Mode ++ ipl_par_mnemonic: INJ_MODE ++ ipl_offset: 0x02 ++ ipl_length: 1 ++ opcode_description: > ++ 0x00: Inject single bit ECC error on any transaction going to HIF wdata ++ 0x01: Inject single bit ECC error on address given by ADDRESS parameter ++ 0x02: Inject double bit ECC error on any transaction going to HIF wdata ++ 0x03: Inject double bit ECC error on address given by ADDRESS parameter ++ ++ Please note that the ADDRESS parameter is shared by HIF Poison and HIF ECC functionality! ++ ++ - ipl_par_name: reserved ++ ipl_par_mnemonic: RSVD ++ ipl_offset: 0x03 ++ ipl_length: 1 ++ ++ - ipl_par_name: Address ++ ipl_par_mnemonic: ADDRESS ++ ipl_offset: 0x04 ++ ipl_length: 5 ++ opcode_description: > ++ 40-bit Address field, only used for some injection modes. ++ ++ Please note that the ADDRESS parameter is shared by HIF Poison and HIF ECC functionality! ++ ++ - opcode_name: Error Injection DRS Poison ++ mnemonic: ERR_INJ_DRS_POISON ++ opcode: 0xCB02 ++ opcode_description: > ++ Injects Poison bit on DRS going to Link-Layer. ++ ++ input_payload: ++ - ipl_name: Error injection DRS Poison input ++ ipl_mnemonic: ERR_INJ_DRS_POISON_IN_PL ++ ipl_size_bytes: 6 ++ ++ parameters: ++ - ipl_par_name: DRS channel ++ ipl_par_mnemonic: CH_ID ++ ipl_offset: 0x00 ++ ipl_length: 1 ++ ipl_description: > ++ CXL DRS channel 0..3 selection. ++ ++ - ipl_par_name: Duration ++ ipl_par_mnemonic: DURATION ++ ipl_offset: 0x01 ++ ipl_length: 1 ++ ipl_description: > ++ 0x00: Turn continuous error generation OFF ++ 0x01: Turn continuous error generation ON ++ 0x02: SingleShot - Generate a single error ++ ++ - ipl_par_name: Injection mode ++ ipl_par_mnemonic: INJ_MODE ++ ipl_offset: 0x02 ++ ipl_length: 1 ++ ipl_description: > ++ 0x00: Inject poison bit on any DRS going to Link layer ++ 0x01: Inject poison bit on tag given by TAG parameter ++ ++ Please note that the TAG parameter is shared by DRS Poison and DRS ECC functionality! ++ ++ - ipl_par_name: reserved ++ ipl_par_mnemonic: RSVD ++ ipl_offset: 0x03 ++ ipl_length: 1 ++ ++ - ipl_par_name: Tag ++ ipl_par_mnemonic: TAG ++ ipl_offset: 0x04 ++ ipl_length: 2 ++ opcode_description: > ++ 16-bit Tag field, only used for some injection modes. ++ ++ Please note that the TAG parameter is shared by DRS Poison and DRS ECC functionality! ++ ++ - opcode_name: Error Injection DRS ECC ++ mnemonic: ERR_INJ_DRS_ECC ++ opcode: 0xCB03 ++ opcode_description: > ++ Injects single or double bit ECC on DRS going to Link-Layer. ++ ++ input_payload: ++ - ipl_name: Error injection DRS ECC input ++ ipl_mnemonic: ERR_INJ_DRS_ECC_IN_PL ++ ipl_size_bytes: 6 ++ ++ parameters: ++ - ipl_par_name: DRS channel ++ ipl_par_mnemonic: CH_ID ++ ipl_offset: 0x00 ++ ipl_length: 1 ++ ipl_description: > ++ CXL DRS channel 0..3 selection. ++ ++ - ipl_par_name: Duration ++ ipl_par_mnemonic: DURATION ++ ipl_offset: 0x01 ++ ipl_length: 1 ++ ipl_description: > ++ 0x00: Turn continuous error generation OFF ++ 0x01: Turn continuous error generation ON ++ 0x02: SingleShot - Generate a single error ++ ++ - ipl_par_name: Injection mode ++ ipl_par_mnemonic: INJ_MODE ++ ipl_offset: 0x02 ++ ipl_length: 1 ++ ipl_description: > ++ 0x00: Inject single bit ECC error on any DRS going to Link-Layer ++ 0x01: Inject single bit ECC error on tag given by TAG parameter ++ 0x02: Inject double bit ECC error on any DRS going to Link-Layer ++ 0x03: Inject double bit ECC error on tag given by TAG parameter ++ ++ Please note that the TAG parameter is shared by DRS Poison and DRS ECC functionality! ++ ++ - ipl_par_name: reserved ++ ipl_par_mnemonic: RSVD ++ ipl_offset: 0x03 ++ ipl_length: 1 ++ ++ - ipl_par_name: Tag ++ ipl_par_mnemonic: TAG ++ ipl_offset: 0x04 ++ ipl_length: 2 ++ opcode_description: > ++ 16-bit Tag field, only used for some injection modes. ++ ++ Please note that the TAG parameter is shared by DRS Poison and DRS ECC functionality! ++ ++ - opcode_name: Error Injection RX-Flit CRC ++ mnemonic: ERR_INJ_RXFLIT_CRC ++ opcode: 0xCB04 ++ opcode_description: > ++ Injects CRC error onto RX-Flit. ++ ++ input_payload: ++ - ipl_name: Error injection RX-Flit CRC input ++ ipl_mnemonic: ERR_INJ_RXFLIT_CRC_IN_PL ++ ipl_size_bytes: 1 ++ ++ parameters: ++ - ipl_par_name: CXL.mem instance ++ ipl_par_mnemonic: CXL_MEM_ID ++ ipl_offset: 0x00 ++ ipl_length: 1 ++ ipl_description: > ++ CXL.mem instance selection. ++ ++ - opcode_name: Error Injection TX-Flit CRC ++ mnemonic: ERR_INJ_TXFLIT_CRC ++ opcode: 0xCB05 ++ opcode_description: > ++ Injects CRC error onto TX-Flit. ++ ++ input_payload: ++ - ipl_name: Error injection TX-Flit CRC input ++ ipl_mnemonic: ERR_INJ_TXFLIT_CRC_IN_PL ++ ipl_size_bytes: 1 ++ ++ parameters: ++ - ipl_par_name: CXL.mem instance ++ ipl_par_mnemonic: CXL_MEM_ID ++ ipl_offset: 0x00 ++ ipl_length: 1 ++ ipl_description: > ++ CXL.mem instance selection. ++ ++ - opcode_name: Error Injection Viral ++ mnemonic: ERR_INJ_VIRAL ++ opcode: 0xCB06 ++ opcode_description: > ++ Indication of a Viral error to Link-Layer. ++ ++ input_payload: ++ - ipl_name: Error injection Viral input ++ ipl_mnemonic: ERR_INJ_VIRAL_IN_PL ++ ipl_size_bytes: 1 ++ ++ parameters: ++ - ipl_par_name: ld_id ++ ipl_par_mnemonic: LD_ID ++ ipl_offset: 0x00 ++ ipl_length: 1 ++ ipl_description: > ++ 4-bit LD_ID value to be used while sending viral interrupt to Link-layer. ++ ++- command_set_name: EH Eye Capture ++ command_set_description: EH Eye Capture Commands ++ command_set_opcodes: ++ ++ - opcode_name: EH Eye capture Enable Timeout ++ mnemonic: EH_EYE_CAP_TIMEOUT_ENABLE ++ opcode: 0xCC0A ++ ++ input_payload: ++ ++ - ipl_name: EH Eye capture Enable Timeout ++ ipl_mnemonic: eh_eye_cap_timeout_en_in_pl ++ ipl_size_bytes: 2 ++ ++ parameters: ++ ++ - ipl_par_name: unused ++ ipl_par_mnemonic: RSVD ++ ipl_offset: 0x00 ++ ipl_length: 1 ++ ++ - ipl_par_name: enable ++ ipl_par_mnemonic: enable ++ ipl_offset: 0x01 ++ ipl_length: 1 ++ ipl_description: > ++ 0: disable timeout ++ 1: enable timeout ++ ++ - opcode_name: EH Eye capture Run ++ mnemonic: EH_EYE_CAP_RUN ++ opcode: 0xCC00 ++ ++ input_payload: ++ ++ - ipl_name: EH Eye capture Run input ++ ipl_mnemonic: eh_eye_cap_run_in_pl ++ ipl_size_bytes: 8 ++ ++ parameters: ++ ++ - ipl_par_name: unused ++ ipl_par_mnemonic: RSVD ++ ipl_offset: 0x00 ++ ipl_length: 1 ++ ++ - ipl_par_name: capture depth (BT_DEPTH_MIN to BT_DEPTH_MAX) ++ ipl_par_mnemonic: depth ++ ipl_offset: 0x01 ++ ipl_length: 1 ++ ++ - ipl_par_name: reserved ++ ipl_par_mnemonic: RSVD ++ ipl_offset: 0x02 ++ ipl_length: 2 ++ ++ - ipl_par_name: lane mask ++ ipl_par_mnemonic: lane_mask ++ ipl_offset: 0x04 ++ ipl_length: 4 ++ ++ - opcode_name: EH Eye capture Status ++ mnemonic: EH_EYE_CAP_STATUS ++ opcode: 0xCC01 ++ ++ input_payload: ++ ++ - ipl_name: EH Eye capture Status input ++ ipl_mnemonic: eh_eye_cap_stat_in_pl ++ ipl_size_bytes: 4 ++ ++ parameters: ++ ++ - ipl_par_name: unused ++ ipl_par_mnemonic: RSVD ++ ipl_offset: 0x00 ++ ipl_length: 1 ++ ++ - ipl_par_name: reserved ++ ipl_par_mnemonic: RSVD ++ ipl_offset: 0x01 ++ ipl_length: 3 ++ ++ output_payload: ++ ++ - opl_name: EH Eye capture Status output ++ opl_mnemonic: eh_eye_cap_stat_out_pl ++ opl_size_bytes: 4 ++ ++ parameters: ++ ++ - opl_par_name: Status (see eye_cap_state_enum) ++ opl_par_mnemonic: stat ++ opl_offset: 0x00 ++ opl_length: 1 ++ ++ - opl_par_name: reserved ++ opl_par_mnemonic: RSVD ++ opl_offset: 0x01 ++ opl_length: 3 ++ ++ - opcode_name: EH Eye capture Read ++ mnemonic: EH_EYE_CAP_READ ++ opcode: 0xCC02 ++ ++ input_payload: ++ ++ - ipl_name: EH Eye capture Read input ++ ipl_mnemonic: eh_eye_cap_read_in_pl ++ ipl_size_bytes: 5 ++ ++ parameters: ++ ++ - ipl_par_name: unused ++ ipl_par_mnemonic: RSVD ++ ipl_offset: 0x00 ++ ipl_length: 1 ++ ++ - ipl_par_name: Lane ID ++ ipl_par_mnemonic: lane_id ++ ipl_offset: 0x01 ++ ipl_length: 1 ++ ++ - ipl_par_name: bin number [0 .. BT_BIN_TOT - 1] ++ ipl_par_mnemonic: bin_num ++ ipl_offset: 0x02 ++ ipl_length: 1 ++ ++ - ipl_par_name: Phase Offset ++ ipl_par_mnemonic: phase_offset ++ ipl_offset: 0x03 ++ ipl_length: 1 ++ ipl_description: > ++ Offset of the first phase to be returned. ++ ++ - ipl_par_name: Requested number of phases in ber_data ++ ipl_par_mnemonic: num_phases ++ ipl_offset: 0x04 ++ ipl_length: 1 ++ ipl_description: > ++ The output payload for requested number of phases cannot exceed CCI buffer size. ++ ++ output_payload: ++ ++ - opl_name: EH Eye capture Read output ++ opl_mnemonic: eh_eye_cap_read_out_pl ++ opl_size_bytes: 8+ ++ ++ parameters: ++ ++ - opl_par_name: Total number of phases in ber_data ++ opl_par_mnemonic: total_num_phases ++ opl_offset: 0x00 ++ opl_length: 1 ++ ++ - opl_par_name: Returned number of phases in ber_data ++ opl_par_mnemonic: num_phases ++ opl_offset: 0x01 ++ opl_length: 1 ++ opl_description: > ++ if returned number of phases is less than the requested number of phases, ++ then the output payload contains data for the last phase. ++ ++ - opl_par_name: Phase Offset ++ opl_par_mnemonic: phase_offset ++ opl_offset: 0x02 ++ opl_length: 1 ++ opl_description: > ++ Offset of the first returned phase. ++ ++ - opl_par_name: reserved ++ opl_par_mnemonic: RSVD ++ opl_offset: 0x03 ++ opl_length: 5 ++ ++ - opl_par_name: Per-phase bit error rates (multiplied by EYE_CAP_ERROR_CNT_MULT) ++ opl_par_mnemonic: ber_data ++ opl_offset: 0x08 ++ opl_unit_size: 8 ++ opl_units_num: VARIES ++ opl_length: VARIES ++ ++- command_set_name: Elkhound Adaptation Objects ++ command_set_description: > ++ Elkhound Adaptation Objects Commands ++ ++ command_set_opcodes: ++ - opcode_name: EH Get Adaptation Data ++ mnemonic: EH_ADAPT_GET ++ opcode: 0xCC03 ++ opcode_description: > ++ Elkhound Get Adaptation Data Command ++ ++ input_payload: ++ ++ - ipl_name: Get Adaptation Data input ++ ipl_mnemonic: eh_get_adapt_data_in_pl ++ ipl_size_bytes: 4 ++ ++ parameters: ++ ++ - ipl_par_name: lane id ++ ipl_par_mnemonic: LANE_ID ++ ipl_offset: 0x00 ++ ipl_length: 4 ++ ++ output_payload: ++ ++ - opl_name: Get Adaptation Data output ++ opl_mnemonic: eh_get_adapt_data_out_pl ++ opl_size_bytes: 0x1c ++ ++ parameters: ++ ++ - opl_par_name: contain the current value of the object PGA_GAIN as captured through a write to register bit ADAPT_DSP_RESULTS_CAPTURE_REQ ++ opl_par_mnemonic: pga_gain ++ opl_offset: 0x00 ++ opl_length: 1 ++ ++ - opl_par_name: PGA Stage2 DC offset correction ++ opl_par_mnemonic: pga_off2 ++ opl_offset: 0x01 ++ opl_length: 1 ++ ++ - opl_par_name: PGA Stage1 DC offset correction ++ opl_par_mnemonic: pga_off1 ++ opl_offset: 0x02 ++ opl_length: 1 ++ ++ - opl_par_name: I_TAP2<7:0> 2's compliment ++ opl_par_mnemonic: cdfe_a2 ++ opl_offset: 0x03 ++ opl_length: 1 ++ ++ - opl_par_name: I_TAP3<6:0> 2's compliment ++ opl_par_mnemonic: cdfe_a3 ++ opl_offset: 0x04 ++ opl_length: 1 ++ ++ - opl_par_name: I_TAP4<6:0> 2's compliment ++ opl_par_mnemonic: cdfe_a4 ++ opl_offset: 0x05 ++ opl_length: 1 ++ ++ - opl_par_name: I_TAP5<6:0> 2's compliment ++ opl_par_mnemonic: cdfe_a5 ++ opl_offset: 0x06 ++ opl_length: 1 ++ ++ - opl_par_name: I_TAP6<6:0> 2's compliment ++ opl_par_mnemonic: cdfe_a6 ++ opl_offset: 0x07 ++ opl_length: 1 ++ ++ - opl_par_name: I_TAP7<6:0> 2's compliment ++ opl_par_mnemonic: cdfe_a7 ++ opl_offset: 0x08 ++ opl_length: 1 ++ ++ - opl_par_name: I_TAP8<6:0> 2's compliment ++ opl_par_mnemonic: cdfe_a8 ++ opl_offset: 0x09 ++ opl_length: 1 ++ ++ - opl_par_name: I_TAP9<5:0> 2's compliment ++ opl_par_mnemonic: cdfe_a9 ++ opl_offset: 0x0a ++ opl_length: 1 ++ ++ - opl_par_name: I_TAP10<5:0> 2's compliment ++ opl_par_mnemonic: cdfe_a10 ++ opl_offset: 0x0b ++ opl_length: 1 ++ ++ - opl_par_name: Zobel a_gain ++ opl_par_mnemonic: zobel_a_gain ++ opl_offset: 0x0c ++ opl_length: 1 ++ ++ - opl_par_name: zobel_b_gain ++ opl_par_mnemonic: zobel_b_gain ++ opl_offset: 0x0d ++ opl_length: 1 ++ ++ - opl_par_name: Zobel DC offset correction ++ opl_par_mnemonic: zobel_dc_offset ++ opl_offset: 0x0e ++ opl_length: 2 ++ ++ - opl_par_name: contain the current value of the object UDFE_THR_0 as captured through a write to register bit ADAPT_DSP_RESULTS_CAPTURE_REQ. ++ opl_par_mnemonic: udfe_thr_0 ++ opl_offset: 0x10 ++ opl_length: 2 ++ ++ - opl_par_name: contain the current value of the object UDFE_THR_1 as captured through a write to register bit ADAPT_DSP_RESULTS_CAPTURE_REQ ++ opl_par_mnemonic: udfe_thr_1 ++ opl_offset: 0x12 ++ opl_length: 2 ++ ++ - opl_par_name: contain the current value of the object DC_OFFSET as captured through a write to register bit ADAPT_DSP_RESULTS_CAPTURE_REQ ++ opl_par_mnemonic: dc_offset ++ opl_offset: 0x14 ++ opl_length: 2 ++ ++ - opl_par_name: contain the current value of the object PGA_GAIN as captured through a write to register bit ADAPT_DSP_RESULTS_CAPTURE_REQ ++ opl_par_mnemonic: median_amp ++ opl_offset: 0x16 ++ opl_length: 2 ++ ++ - opl_par_name: contain the current value of the object PH_OFS_T as captured through a write to register bit ADAPT_DSP_RESULTS_CAPTURE_REQ ++ opl_par_mnemonic: ph_ofs_t ++ opl_offset: 0x18 ++ opl_length: 1 ++ ++ - opl_par_name: reserved ++ opl_par_mnemonic: RSVD ++ opl_offset: 0x19 ++ opl_length: 3 ++ ++ - opcode_name: EH One-off Adaptation ++ mnemonic: EH_ADAPT_ONEOFF ++ opcode: 0xCC04 ++ opcode_description: > ++ Elkhound One-off Adaptation Command ++ ++ input_payload: ++ ++ - ipl_name: One-off Adaptation Data input ++ ipl_mnemonic: eh_one_off_adapt_data_in_pl ++ ipl_size_bytes: 0x10 ++ ++ parameters: ++ ++ - ipl_par_name: lane id ++ ipl_par_mnemonic: lane_id ++ ipl_offset: 0x00 ++ ipl_length: 4 ++ - ipl_par_name: Adaption objects preload enable ++ ipl_par_mnemonic: preload ++ ipl_offset: 0x04 ++ ipl_length: 4 ++ - ipl_par_name: Adaptions loop ++ ipl_par_mnemonic: loops ++ ipl_offset: 0x08 ++ ipl_length: 4 ++ - ipl_par_name: Adaption objects enable ++ ipl_par_mnemonic: objects ++ ipl_offset: 0x0C ++ ipl_length: 4 ++ ++ - opcode_name: EH Adapt Force ++ mnemonic: EH_ADAPT_FORCE ++ opcode: 0xCC05 ++ opcode_description: > ++ Elkhound Force Adaptation Values Command ++ ++ input_payload: ++ ++ - ipl_name: Force Adaptation Data input ++ ipl_mnemonic: eh_force_adapt_data_in_pl ++ ipl_size_bytes: 0x28 ++ ++ parameters: ++ ++ - ipl_par_name: lane id ++ ipl_par_mnemonic: lane_id ++ ipl_offset: 0x00 ++ ipl_length: 4 ++ - ipl_par_name: PCIe rate (0 - Gen1, 1 - Gen2, 2 - Gen3, 3 - Gen4, 4 - Gen5) ++ ipl_par_mnemonic: rate ++ ipl_offset: 0x04 ++ ipl_length: 4 ++ - ipl_par_name: vdd bias (0 = 0.82V, 1 = 0.952V) ++ ipl_par_mnemonic: vdd_bias ++ ipl_offset: 0x08 ++ ipl_length: 4 ++ - ipl_par_name: spread spectrum clocking enable (0 - SSC enable, 1 - SSC disable) ++ ipl_par_mnemonic: ssc ++ ipl_offset: 0x0C ++ ipl_length: 4 ++ - ipl_par_name: used to set the value of the PGA_GAIN object when preloading is enabled ++ ipl_par_mnemonic: pga_gain ++ ipl_offset: 0x10 ++ ipl_length: 1 ++ - ipl_par_name: used to set the value of the PGA_A0 object when preloading is enabled ++ ipl_par_mnemonic: pga_a0 ++ ipl_offset: 0x11 ++ ipl_length: 1 ++ - ipl_par_name: PGA Stage1,2 offset preload value, split evenly between PGA Stage1 & Stage2 DC offset ++ ipl_par_mnemonic: pga_off ++ ipl_offset: 0x12 ++ ipl_length: 1 ++ - ipl_par_name: used to set the value of CDFE_A2 (DFE Tap2) when preloading (CDFE_GRP0) is enabled ++ ipl_par_mnemonic: cdfe_a2 ++ ipl_offset: 0x13 ++ ipl_length: 1 ++ - ipl_par_name: used to set the value of CDFE_A3 (DFE Tap3) when preloading (CDFE_GRP0) is enabled ++ ipl_par_mnemonic: cdfe_a3 ++ ipl_offset: 0x14 ++ ipl_length: 1 ++ - ipl_par_name: used to set the value of CDFE_A4 (DFE Tap4) when preloading (CDFE_GRP0) is enabled ++ ipl_par_mnemonic: cdfe_a4 ++ ipl_offset: 0x15 ++ ipl_length: 1 ++ - ipl_par_name: used to set the value of CDFE_A5 (DFE Tap5) when preloading (CDFE_GRP1) is enabled ++ ipl_par_mnemonic: cdfe_a5 ++ ipl_offset: 0x16 ++ ipl_length: 1 ++ - ipl_par_name: used to set the value of CDFE_A6 (DFE Tap6) when preloading (CDFE_GRP1) is enabled ++ ipl_par_mnemonic: cdfe_a6 ++ ipl_offset: 0x17 ++ ipl_length: 1 ++ - ipl_par_name: used to set the value of CDFE_A7 (DFE Tap7) when preloading (CDFE_GRP1) is enabled ++ ipl_par_mnemonic: cdfe_a7 ++ ipl_offset: 0x18 ++ ipl_length: 1 ++ - ipl_par_name: used to set the value of CDFE_A8 (DFE Tap8) when preloading (CDFE_GRP2) is enabled ++ ipl_par_mnemonic: cdfe_a8 ++ ipl_offset: 0x19 ++ ipl_length: 1 ++ - ipl_par_name: used to set the value of CDFE_A9 (DFE Tap9) when preloading (CDFE_GRP2) is enabled ++ ipl_par_mnemonic: cdfe_a9 ++ ipl_offset: 0x1A ++ ipl_length: 1 ++ - ipl_par_name: used to set the value of CDFE_A10 (DFE Tap10) when preloading (CDFE_GRP2) is enabled ++ ipl_par_mnemonic: cdfe_a10 ++ ipl_offset: 0x1B ++ ipl_length: 1 ++ - ipl_par_name: used to set the value of the DC_OFFSET object when preloading is enabled ++ ipl_par_mnemonic: dc_offset ++ ipl_offset: 0x1C ++ ipl_length: 2 ++ - ipl_par_name: Zobel DC offset preload value ++ ipl_par_mnemonic: zobel_dc_offset ++ ipl_offset: 0x1E ++ ipl_length: 2 ++ - ipl_par_name: used to set the value of the UDFE_THR_0 object when preloading is enabled ++ ipl_par_mnemonic: udfe_thr_0 ++ ipl_offset: 0x20 ++ ipl_length: 2 ++ - ipl_par_name: used to set the value of the UDFE_THR_1 object when preloading is enabled ++ ipl_par_mnemonic: udfe_thr_1 ++ ipl_offset: 0x22 ++ ipl_length: 2 ++ - ipl_par_name: used to set the value of the MEDIAN_AMP object when preloading is enabled ++ ipl_par_mnemonic: median_amp ++ ipl_offset: 0x24 ++ ipl_length: 2 ++ - ipl_par_name: Zobel a_gain preload ++ ipl_par_mnemonic: zobel_a_gain ++ ipl_offset: 0x26 ++ ipl_length: 1 ++ - ipl_par_name: Timing phase offset preload ++ ipl_par_mnemonic: ph_ofs_t ++ ipl_offset: 0x27 ++ ipl_length: 1 ++ ++- command_set_name: Elkhound Link Debug Commands ++ command_set_description: Elkhound Link Debug Configuration and Dump Command ++ command_set_opcodes: ++ ++ - opcode_name: Config EH Link Debug ++ mnemonic: EH_LINK_DBG_CFG ++ opcode: 0xCC06 ++ opcode_description: > ++ This command configures Elkhound Link Debug operation ++ ++ input_payload: ++ ++ - ipl_name: Link Debug Configuration Input Payload ++ ipl_mnemonic: link_dbg_cfg_in_pl ++ ipl_size_bytes: 13 ++ ++ parameters: ++ ++ - ipl_par_name: mode ++ ipl_par_mnemonic: mode ++ ipl_offset: 0 ++ ipl_length: 1 ++ ipl_description: > ++ Bit[1:0] port_id: Link Debug target port id ++ Bit[3:2] op_mode: 1 - L2R, 2 - Rate ++ Bit[7:4] Capture Types: b4-Doorbell, b5-Adaptation, b6-EH register ++ ++ bit_fields: ++ - ipl_bf_name: Target port ++ ipl_bf_mnemonic: port_id ++ ipl_bit: "1:0" ++ ++ - ipl_bf_name: Operation Mode ++ ipl_bf_mnemonic: op_mode ++ ipl_bit: "3:2" ++ ++ - ipl_bf_name: Capture Type ++ ipl_bf_mnemonic: cap_type ++ ipl_bit: "7:4" ++ ++ - ipl_par_name: Lane mask ++ ipl_par_mnemonic: lane_mask ++ ipl_offset: 1 ++ ipl_length: 2 ++ ipl_description: > ++ Each bit represents capturing lane (b0: lane 0 ~ b15: lane 15) ++ ++ - ipl_par_name: Rate mask ++ ipl_par_mnemonic: rate_mask ++ ipl_offset: 3 ++ ipl_length: 1 ++ ipl_description: > ++ Each bit represents link rate (b0: GEN1 ~ b4: Gen5) ++ ++ - ipl_par_name: Timer interval ++ ipl_par_mnemonic: timer_us ++ ipl_offset: 4 ++ ipl_length: 4 ++ ipl_description: > ++ Polling Timer value in microsecond (minimum 1) ++ ++ - ipl_par_name: Capture Timer delay ++ ipl_par_mnemonic: cap_delay_us ++ ipl_offset: 8 ++ ipl_length: 4 ++ ipl_description: > ++ Capture delay timer value in microsecond ++ ++ - ipl_par_name: Max Capture ++ ipl_par_mnemonic: max_cap ++ ipl_offset: 12 ++ ipl_length: 1 ++ ipl_description: > ++ Maximum number of capture (0: Wrap around) ++ ++ - opcode_name: EH Link Debug Entry Dump ++ mnemonic: EH_LINK_DBG_ENTRY_DUMP ++ opcode: 0xCC07 ++ opcode_description: > ++ This command dumps Link capture entry results ++ One entry has lanes capture data ++ ++ input_payload: ++ - ipl_name: Link Debug Entry Dump Input Payload ++ ipl_mnemonic: link_dbg_entry_dump_in_pl ++ ipl_size_bytes: 1 ++ ++ parameters: ++ - ipl_par_name: Entry index ++ ipl_par_mnemonic: entry_idx ++ ipl_offset: 0 ++ ipl_length: 1 ++ ipl_description: > ++ index of capture entries - 0: Oldest entry ++ ++ output_payload: ++ - opl_name: Link Debug Entry Dump Output Payload ++ opl_mnemonic: link_dbg_entry_dump_out_pl ++ opl_size_bytes: 34 ++ ++ parameters: ++ - opl_par_name: Capture Info ++ opl_par_mnemonic: cap_info ++ opl_offset: 0 ++ opl_length: 1 ++ opl_description: > ++ Bit[3:0] Entry index of following data ++ Bit[7:4] Total Captured Entry Number ++ ++ bit_fields: ++ - opl_bf_name: Entry index of following data ++ opl_bf_mnemonic: entry_idx ++ opl_bit: "3:0" ++ ++ - opl_bf_name: Total Captured Entry Number ++ opl_bf_mnemonic: entry_num ++ opl_bit: "7:4" ++ ++ - opl_par_name: Capture Reason ++ opl_par_mnemonic: cap_reason ++ opl_offset: 1 ++ opl_length: 1 ++ opl_description: > ++ Capture Reason ++ ++ - opl_par_name: L2R Reason ++ opl_par_mnemonic: l2r_reason ++ opl_offset: 2 ++ opl_length: 4 ++ opl_description: > ++ L2R Reason ++ ++ - opl_par_name: Capture Start Timestamp ++ opl_par_mnemonic: start_time ++ opl_offset: 6 ++ opl_length: 8 ++ opl_description: > ++ Capture Start Time (us) ++ ++ - opl_par_name: Capture End Timestamp ++ opl_par_mnemonic: end_time ++ opl_offset: 14 ++ opl_length: 8 ++ opl_description: > ++ Capture End Time (us) ++ ++ - opl_par_name: Capture Start Rate ++ opl_par_mnemonic: start_rate ++ opl_offset: 22 ++ opl_length: 1 ++ opl_description: > ++ Capture Start Rate ++ ++ - opl_par_name: Capture End Rate ++ opl_par_mnemonic: end_rate ++ opl_offset: 23 ++ opl_length: 1 ++ opl_description: > ++ Capture End Rate ++ ++ - opl_par_name: Capture Start State ++ opl_par_mnemonic: start_state ++ opl_offset: 24 ++ opl_length: 1 ++ opl_description: > ++ Capture Start LTSSM State ++ ++ - opl_par_name: Capture End State ++ opl_par_mnemonic: end_state ++ opl_offset: 25 ++ opl_length: 1 ++ opl_description: > ++ Capture End LTSSM State ++ ++ - opl_par_name: Capture Start Status ++ opl_par_mnemonic: start_status ++ opl_offset: 26 ++ opl_length: 4 ++ opl_description: > ++ Capture Start LTSSM Status (SHB_PHYDL_INT_STAT_REG_3) ++ ++ - opl_par_name: Capture End Status ++ opl_par_mnemonic: end_status ++ opl_offset: 30 ++ opl_length: 4 ++ opl_description: > ++ Capture End LTSSM status (SHB_PHYDL_INT_STAT_REG_3) ++ ++ - opcode_name: EH Link Debug Lane Dump ++ mnemonic: EH_LINK_DBG_LANE_DUMP ++ opcode: 0xCC08 ++ opcode_description: > ++ This command dumps each Lane capture results of a Entry ++ ++ input_payload: ++ - ipl_name: Link Debug Lane Dump Input Payload ++ ipl_mnemonic: link_dbg_lane_dump_in_pl ++ ipl_size_bytes: 2 ++ ++ parameters: ++ - ipl_par_name: Capture Entry index ++ ipl_par_mnemonic: entry_idx ++ ipl_offset: 0 ++ ipl_length: 1 ++ ipl_description: > ++ index of capture entry - 0: Oldest capture ++ ++ - ipl_par_name: Capture Lane ++ ipl_par_mnemonic: lane_idx ++ ipl_offset: 1 ++ ipl_length: 1 ++ ipl_description: > ++ lane number to read (0: Lane 0 ~ 15 : Lane 15) ++ ++ output_payload: ++ - opl_name: Link Debug Lane Dump Output Payload ++ opl_mnemonic: link_dbg_lane_dump_out_pl ++ opl_size_bytes: 59 ++ ++ parameters: ++ - opl_par_name: Capture Info ++ opl_par_mnemonic: cap_info ++ opl_offset: 0 ++ opl_length: 1 ++ opl_description: > ++ Bit[3:0] Capture lane ++ Bit[7:4] Capture Entry number. ++ ++ bit_fields: ++ - opl_bf_name: Capture Lane ++ opl_bf_mnemonic: lane_idx ++ opl_bit: "3:0" ++ ++ - opl_bf_name: Capture Entry Index ++ opl_bf_mnemonic: entry_idx ++ opl_bit: "7:4" ++ ++ - opl_par_name: PGA gain ++ opl_par_mnemonic: pga_gain ++ opl_offset: 1 ++ opl_length: 1 ++ opl_description: > ++ contain the current value of the object PGA_GAIN as captured ++ through a write to register bit ADAPT_DSP_RESULTS_CAPTURE_REQ ++ ++ - opl_par_name: PGA offset 2 ++ opl_par_mnemonic: pga_off2 ++ opl_offset: 2 ++ opl_length: 1 ++ opl_description: > ++ PGA Stage2 DC offset correction ++ ++ - opl_par_name: PGA offset 1 ++ opl_par_mnemonic: pga_off1 ++ opl_offset: 3 ++ opl_length: 1 ++ opl_description: > ++ PGA Stage1 DC offset correction ++ ++ - opl_par_name: CDFE A2 ++ opl_par_mnemonic: cdfe_a2 ++ opl_offset: 4 ++ opl_length: 1 ++ opl_description: > ++ I_TAP2<7:0> 2's compliment ++ ++ - opl_par_name: CDFE A3 ++ opl_par_mnemonic: cdfe_a3 ++ opl_offset: 5 ++ opl_length: 1 ++ opl_description: > ++ I_TAP3<6:0> 2's compliment ++ ++ - opl_par_name: CDFE A4 ++ opl_par_mnemonic: cdfe_a4 ++ opl_offset: 6 ++ opl_length: 1 ++ opl_description: > ++ I_TAP4<6:0> 2's compliment ++ ++ - opl_par_name: CDFE A5 ++ opl_par_mnemonic: cdfe_a5 ++ opl_offset: 7 ++ opl_length: 1 ++ opl_description: > ++ I_TAP5<6:0> 2's compliment ++ ++ - opl_par_name: CDFE A6 ++ opl_par_mnemonic: cdfe_a6 ++ opl_offset: 8 ++ opl_length: 1 ++ opl_description: > ++ I_TAP6<6:0> 2's compliment ++ ++ - opl_par_name: CDFE A7 ++ opl_par_mnemonic: cdfe_a7 ++ opl_offset: 9 ++ opl_length: 1 ++ opl_description: > ++ I_TAP7<6:0> 2's compliment ++ ++ - opl_par_name: CDFE A8 ++ opl_par_mnemonic: cdfe_a8 ++ opl_offset: 10 ++ opl_length: 1 ++ opl_description: > ++ I_TAP8<6:0> 2's compliment ++ ++ - opl_par_name: CDFE A9 ++ opl_par_mnemonic: cdfe_a9 ++ opl_offset: 11 ++ opl_length: 1 ++ opl_description: > ++ I_TAP9<5:0> 2's compliment ++ ++ - opl_par_name: CDFE A10 ++ opl_par_mnemonic: cdfe_a10 ++ opl_offset: 12 ++ opl_length: 1 ++ opl_description: > ++ I_TAP10<5:0> 2's compliment ++ ++ - opl_par_name: Zobel A Gain ++ opl_par_mnemonic: zobel_a_gain ++ opl_offset: 13 ++ opl_length: 1 ++ opl_description: > ++ Zobel a_gain ++ ++ - opl_par_name: Zobel B Gain ++ opl_par_mnemonic: zobel_b_gain ++ opl_offset: 14 ++ opl_length: 1 ++ opl_description: > ++ zobel_b_gain ++ ++ - opl_par_name: Zobel DC Offset ++ opl_par_mnemonic: zobel_dc_offset ++ opl_offset: 15 ++ opl_length: 2 ++ opl_description: > ++ Zobel DC offset correction ++ ++ - opl_par_name: UDFE_THR_0 ++ opl_par_mnemonic: udfe_thr_0 ++ opl_offset: 17 ++ opl_length: 2 ++ opl_description: > ++ contain the current value of the object UDFE_THR_0 as ++ captured through a write to register bit ADAPT_DSP_RESULTS_CAPTURE_REQ. ++ ++ - opl_par_name: UDFE_THR_1 ++ opl_par_mnemonic: udfe_thr_1 ++ opl_offset: 19 ++ opl_length: 2 ++ opl_description: > ++ contain the current value of the object UDFE_THR_1 as ++ captured through a write to register bit ADAPT_DSP_RESULTS_CAPTURE_REQ ++ ++ - opl_par_name: DC_OFFSET ++ opl_par_mnemonic: dc_offset ++ opl_offset: 21 ++ opl_length: 2 ++ opl_description: > ++ contain the current value of the object DC_OFFSET as ++ captured through a write to register bit ADAPT_DSP_RESULTS_CAPTURE_REQ ++ ++ - opl_par_name: MEDIAN_AMP ++ opl_par_mnemonic: median_amp ++ opl_offset: 23 ++ opl_length: 2 ++ opl_description: > ++ contain the current value of the object MEDIAN_AMP as ++ captured through a write to register bit ADAPT_DSP_RESULTS_CAPTURE_REQ ++ ++ - opl_par_name: PH_OFS_T ++ opl_par_mnemonic: ph_ofs_t ++ opl_offset: 25 ++ opl_length: 1 ++ opl_description: > ++ contain the current value of the object PH_OFS_T as ++ captured through a write to register bit ADAPT_DSP_RESULTS_CAPTURE_REQ ++ ++ - opl_par_name: CDRU lock time ++ opl_par_mnemonic: cdru_lock_time ++ opl_offset: 26 ++ opl_length: 2 ++ opl_description: > ++ EHRV_CMD_READ_CDRU_LOCK_TIME EH doorbell value ++ ++ - opl_par_name: EH Workaround Status ++ opl_par_mnemonic: eh_workaround_stat ++ opl_offset: 28 ++ opl_length: 2 ++ opl_description: > ++ EHRV_CMD_WORKAROUND_STAT EH doorbell value ++ ++ - opl_par_name: LOS toggle count ++ opl_par_mnemonic: los_toggle_cnt ++ opl_offset: 30 ++ opl_length: 2 ++ opl_description: > ++ EHRV_CMD_LOS_TOGGLE_CNT EH doorbell value ++ ++ - opl_par_name: Adaptation time ++ opl_par_mnemonic: adapt_time ++ opl_offset: 32 ++ opl_length: 2 ++ opl_description: > ++ EHRV_CMD_ADAPT_TIME EH doorbell value ++ ++ - opl_par_name: CDR lock toggle count (arg = 0) ++ opl_par_mnemonic: cdr_lock_toggle_cnt_0 ++ opl_offset: 34 ++ opl_length: 2 ++ opl_description: > ++ EHRV_CMD_CDR_LOCK_TOGGLE_CNT 0 EH doorbell value ++ ++ - opl_par_name: JAT status (arg = 0) ++ opl_par_mnemonic: jat_stat_0 ++ opl_offset: 36 ++ opl_length: 2 ++ opl_description: > ++ EHRV_CMD_JAT_STATUS 0 EH doorbell value ++ ++ - opl_par_name: doorbell error ++ opl_par_mnemonic: db_err ++ opl_offset: 38 ++ opl_length: 4 ++ opl_description: > ++ doorbell error indicators ++ ++ - opl_par_name: EH register data 0 ++ opl_par_mnemonic: reg_val0 ++ opl_offset: 42 ++ opl_length: 4 ++ opl_description: > ++ EH register 0 value capture ++ ++ bit_fields: ++ - opl_bf_name: FS from PIPE interface ++ opl_bf_mnemonic: fs_obs ++ opl_bit: "5:0" ++ ++ - opl_bf_name: LF from PIPE interface ++ opl_bf_mnemonic: lf_obs ++ opl_bit: "11:6" ++ ++ - opl_bf_name: Pre-cursor value from PIPE interface ++ opl_bf_mnemonic: pre_cursor ++ opl_bit: "17:12" ++ ++ - opl_bf_name: Cursor value from PIPE interface ++ opl_bf_mnemonic: cursor ++ opl_bit: "23:18" ++ ++ - opl_bf_name: Post-cursor value from PIPE interface ++ opl_bf_mnemonic: post_cursor ++ opl_bit: "29:24" ++ ++ - opl_bf_name: Reserved ++ opl_bf_mnemonic: RSVD ++ opl_bit: "31:30" ++ ++ - opl_par_name: EH register data 1 ++ opl_par_mnemonic: reg_val1 ++ opl_offset: 46 ++ opl_length: 1 ++ opl_description: > ++ EH register 1 value capture ++ ++ bit_fields: ++ - opl_bf_name: US_PORT_TX_PRESET for current link rate ++ opl_bf_mnemonic: usp_tx_preset ++ opl_bit: "3:0" ++ ++ - opl_bf_name: DS_PORT_TX_PRESET for current link rate ++ opl_bf_mnemonic: dsp_tx_preset ++ opl_bit: "7:4" ++ ++ - opl_par_name: EH register data 2 ++ opl_par_mnemonic: reg_val2 ++ opl_offset: 47 ++ opl_length: 4 ++ opl_description: > ++ EH register 2 value capture ++ ++ bit_fields: ++ - opl_bf_name: TX_P1A_D1EN ++ opl_bf_mnemonic: tx_p1a_d1en ++ opl_bit: "5:0" ++ ++ - opl_bf_name: TX_P1A_D2EN ++ opl_bf_mnemonic: tx_p1a_d2en ++ opl_bit: "11:6" ++ ++ - opl_bf_name: TX_P1A_AMP_RED ++ opl_bf_mnemonic: tx_p1a_amp_red ++ opl_bit: "17:12" ++ ++ - opl_bf_name: TX_P1B_D1EN ++ opl_bf_mnemonic: tx_p1b_d1en ++ opl_bit: "23:18" ++ ++ - opl_bf_name: TX_P1B_D2EN ++ opl_bf_mnemonic: tx_p1b_d2en ++ opl_bit: "29:24" ++ ++ - opl_bf_name: Reserved ++ opl_bf_mnemonic: RSVD ++ opl_bit: "31:30" ++ ++ - opl_par_name: EH register data 3 ++ opl_par_mnemonic: reg_val3 ++ opl_offset: 51 ++ opl_length: 4 ++ opl_description: > ++ EH register 3 value capture ++ ++ bit_fields: ++ - opl_bf_name: TX_P1B_AMP_RED ++ opl_bf_mnemonic: tx_p1b_amp_red ++ opl_bit: "5:0" ++ ++ - opl_bf_name: TX_P2A_D1EN ++ opl_bf_mnemonic: tx_p2a_d1en ++ opl_bit: "11:6" ++ ++ - opl_bf_name: TX_P2A_D2EN ++ opl_bf_mnemonic: tx_p2a_d2en ++ opl_bit: "17:12" ++ ++ - opl_bf_name: TX_P2A_AMP_RED ++ opl_bf_mnemonic: tx_p2a_amp_red ++ opl_bit: "23:18" ++ ++ - opl_bf_name: Reserved ++ opl_bf_mnemonic: RSVD ++ opl_bit: "31:24" ++ ++ - opl_par_name: EH register data 4 ++ opl_par_mnemonic: reg_val4 ++ opl_offset: 55 ++ opl_length: 4 ++ opl_description: > ++ EH register 4 value capture ++ ++ bit_fields: ++ - opl_bf_name: TX_P2B_D1EN ++ opl_bf_mnemonic: tx_p2b_d1en ++ opl_bit: "5:0" ++ ++ - opl_bf_name: TX_P2B_D2EN ++ opl_bf_mnemonic: tx_p2b_d2en ++ opl_bit: "11:6" ++ ++ - opl_bf_name: TX_P2B_AMP_RED ++ opl_bf_mnemonic: tx_p2b_amp_red ++ opl_bit: "17:12" ++ ++ - opl_bf_name: TX_P3A_D1EN ++ opl_bf_mnemonic: tx_p3a_d1en ++ opl_bit: "23:18" ++ ++ - opl_bf_name: Reserved ++ opl_bf_mnemonic: RSVD ++ opl_bit: "31:24" ++ ++ - opcode_name: Reset EH Link Debug Dump ++ mnemonic: EH_LINK_DBG_RESET ++ opcode: 0xCC09 ++ opcode_description: > ++ This command resets configured (running) EH Link Debug ++ ++- command_set_name: Hidden BO Commands ++ command_set_description: Perform background operations hidden from the host driver ++ command_set_opcodes: ++ - opcode_name: Hidden BO Status ++ mnemonic: HBO_STATUS ++ opcode: 0xCD00 ++ ++ output_payload: ++ ++ - opl_name: Hidden BO status output ++ opl_mnemonic: hbo_status_out_pl ++ opl_size_bytes: 8 ++ ++ parameters: ++ ++ - opl_par_name: BO status ++ opl_par_mnemonic: BO_STATUS ++ opl_offset: 0x00 ++ opl_length: 8 ++ ++ bit_fields: ++ - opl_bf_name: Command Opcode ++ opl_bf_mnemonic: CMD_OPCODE ++ opl_bit: "15:0" ++ ++ - opl_bf_name: Percentage Complete ++ opl_bf_mnemonic: PERCENT_COMPLETE ++ opl_bit: "22:16" ++ ++ - opl_bf_name: Background Operation is running ++ opl_bf_mnemonic: BO_RUN ++ opl_bit: "23:23" ++ ++ - opl_bf_name: Reserved ++ opl_bf_mnemonic: RSVD ++ opl_bit: "31:24" ++ ++ - opl_bf_name: Return Code ++ opl_bf_mnemonic: RET_CODE ++ opl_bit: "47:32" ++ ++ - opl_bf_name: Vendor Specific Extended Status ++ opl_bf_mnemonic: VENDOR_EXT_STATUS ++ opl_bit: "63:48" ++ ++ - opcode_name: HBO Transfer FW ++ mnemonic: HBO_TRANSFER_FW ++ opcode: 0xCD01 ++ opcode_description: > ++ This command is identical to Spec defined Transfer FW command (201h), ++ but triggers a Hidden Background Operation which is queried using Hidden BO Status ++ ++ - opcode_name: HBO Activate FW ++ mnemonic: HBO_ACTIVATE_FW ++ opcode: 0xCD02 ++ opcode_description: > ++ This command is identical to Spec defined Activate FW command (202h), ++ but triggers a Hidden Background Operation which is queried using Hidden BO Status ++ ++- command_set_name: Health counter ++ command_set_description: Health counter command set ++ command_set_opcodes: ++ ++ - opcode_name: Clear Health Counters ++ mnemonic: HEALTH_COUNTERS_CLEAR ++ opcode: 0xCE00 ++ ++ input_payload: ++ ++ - ipl_name: Clear Health Counters Input Payload ++ ipl_mnemonic: clear_health_counters_in_pl ++ ipl_size_bytes: 4 ++ ++ parameters: ++ - ipl_par_name: health counters bitmask ++ ipl_par_mnemonic: bitmask ++ ipl_offset: 0x00 ++ ipl_length: 4 ++ ++ - opcode_name: Get Health Counters ++ mnemonic: HEALTH_COUNTERS_GET ++ opcode: 0xCE01 ++ ++ output_payload: ++ ++ - opl_name: Get Health Counters Output Payload ++ opl_mnemonic: get_health_counters_out_pl ++ opl_size_bytes: 40 ++ ++ parameters: ++ ++ - opl_par_name: Number of times temperature has exceeded threshold ++ opl_par_mnemonic: temperature_threshold_exceeded ++ opl_offset: 0x00 ++ opl_length: 4 ++ ++ - opl_par_name: Number of Power On events ++ opl_par_mnemonic: power_on_events ++ opl_offset: 0x04 ++ opl_length: 4 ++ ++ - opl_par_name: Number of Power On hours ++ opl_par_mnemonic: power_on_hours ++ opl_offset: 0x08 ++ opl_length: 4 ++ ++ - opl_par_name: Number of CXL.mem Link CRC errors ++ opl_par_mnemonic: cxl_mem_link_crc_errors ++ opl_offset: 0x0C ++ opl_length: 4 ++ ++ - opl_par_name: Number of CXL.io Link LCRC errors ++ opl_par_mnemonic: cxl_io_link_lcrc_errors ++ opl_offset: 0x10 ++ opl_length: 4 ++ ++ - opl_par_name: Number of CXL.io Link ECRC errors ++ opl_par_mnemonic: cxl_io_link_ecrc_errors ++ opl_offset: 0x14 ++ opl_length: 4 ++ ++ - opl_par_name: Number of DDR single ECC errors ++ opl_par_mnemonic: num_ddr_single_ecc_errors ++ opl_offset: 0x18 ++ opl_length: 4 ++ ++ - opl_par_name: Number of DDR double ECC errors ++ opl_par_mnemonic: num_ddr_double_ecc_errors ++ opl_offset: 0x1C ++ opl_length: 4 ++ ++ - opl_par_name: Number of Link recovery events ++ opl_par_mnemonic: link_recovery_events ++ opl_offset: 0x20 ++ opl_length: 4 ++ ++ - opl_par_name: Amount of time spent in throttled state (in seconds) ++ opl_par_mnemonic: time_in_throttled ++ opl_offset: 0x24 ++ opl_length: 4 ++ ++- command_set_name: Configuration File Commands ++ command_set_description: Configuration File Commands ++ command_set_opcodes: ++ ++ - opcode_name: Read Configuration File ++ mnemonic: CONF_READ ++ opcode: 0xCF00 ++ opcode_description: > ++ This command allows to read bytes of the configuration file starting from the offset . ++ The command will return the containing the requested portion of the configuration file. ++ If the request goes beyond the end of the config file, the portion within the config file will be returned. ++ The returned is empty if the caller specified an beyond the end of the config file. ++ This is not considered an error. ++ ++ input_payload: ++ ++ - ipl_name: Read Configuration File Input Payload ++ ipl_mnemonic: read_conf_in_pl ++ ipl_size_bytes: 8 ++ ++ parameters: ++ ++ - ipl_par_name: Starting Offset ++ ipl_par_mnemonic: offset ++ ipl_offset: 0 ++ ipl_length: 4 ++ ++ - ipl_par_name: Requested Length ++ ipl_par_mnemonic: length ++ ipl_offset: 4 ++ ipl_length: 4 ++ ++ output_payload: ++ ++ - opl_name: Read Configuration File Output Payload ++ opl_mnemonic: read_conf_out_pl ++ opl_size_bytes: 0+ ++ ++ parameters: ++ ++ - opl_par_name: Payload ++ opl_par_mnemonic: payload ++ opl_offset: 0 ++ opl_length: VARIES +diff --git a/configure.ac b/configure.ac +index 5ec8d2f..00497ae 100644 +--- a/configure.ac ++++ b/configure.ac +@@ -2,7 +2,7 @@ AC_PREREQ(2.60) + m4_include([version.m4]) + AC_INIT([ndctl], + GIT_VERSION, +- [linux-nvdimm@lists.01.org], ++ [nvdimm@lists.linux.dev], + [ndctl], + [https://github.com/pmem/ndctl]) + AC_CONFIG_SRCDIR([ndctl/lib/libndctl.c]) +@@ -222,12 +222,16 @@ AC_CONFIG_HEADERS(config.h) + AC_CONFIG_FILES([ + Makefile + daxctl/lib/Makefile ++ cxl/lib/Makefile + ndctl/lib/Makefile + ndctl/Makefile + daxctl/Makefile ++ cxl/Makefile + test/Makefile + Documentation/ndctl/Makefile + Documentation/daxctl/Makefile ++ Documentation/cxl/Makefile ++ Documentation/cxl/lib/Makefile + ]) + + AC_OUTPUT +diff --git a/contrib/ndctl b/contrib/ndctl +index 680fe6a..cae4b1b 100755 +--- a/contrib/ndctl ++++ b/contrib/ndctl +@@ -647,5 +647,114 @@ _daxctl() + __daxctl_main + } + ++### cxl-cli ### ++ ++__cxl_get_devs() ++{ ++ local opts=("--memdevs" "$*") ++ cxl list "${opts[@]}" | grep -E "^\s*\"memdev\":" | cut -d'"' -f4 ++} ++ ++__cxlcomp() ++{ ++ local i=0 ++ ++ COMPREPLY=( $( compgen -W "$1" -- "$2" ) ) ++ for cword in "${COMPREPLY[@]}"; do ++ if [[ "$cword" == @(--memdev|--offset|--size|--input|--output) ]]; then ++ COMPREPLY[$i]="${cword}=" ++ else ++ COMPREPLY[$i]="${cword} " ++ fi ++ ((i++)) ++ done ++} ++ ++__cxl_comp_options() ++{ ++ ++ local cur=$1 ++ local opts ++ ++ if [[ "$cur" == *=* ]]; then ++ local cur_subopt=${cur%%=*} ++ local cur_arg=${cur##*=} ++ case $cur_subopt in ++ --memdev) ++ opts="$(__cxl_get_devs -i)" ++ ;; ++ *) ++ return ++ ;; ++ esac ++ __cxlcomp "$opts" "$cur_arg" ++ fi ++} ++ ++__cxl_comp_non_option_args() ++{ ++ local subcmd=$1 ++ local cur=$2 ++ local opts ++ ++ case $subcmd in ++ read-labels) ++ ;& ++ write-labels) ++ ;& ++ zero-labels) ++ opts="$(__cxl_get_devs -i) all" ++ ;; ++ *) ++ return ++ ;; ++ esac ++ __cxlcomp "$opts" "$cur" ++} ++ ++__cxl_main() ++{ ++ local cmd subcmd ++ ++ cmd=${words[0]} ++ COMPREPLY=() ++ ++ # Skip options backward and find the last cxl command ++ __nd_common_prev_skip_opts ++ subcmd=$prev_skip_opts ++ # List cxl subcommands or long options ++ if [ -z $subcmd ]; then ++ if [[ $cur == --* ]]; then ++ cmds="--version --help --list-cmds" ++ else ++ cmds=$($cmd --list-cmds) ++ fi ++ __cxlcomp "$cmds" "$cur" ++ else ++ # List long option names ++ if [[ $cur == --* ]]; then ++ opts=$($cmd $subcmd --list-opts) ++ __cxlcomp "$opts" "$cur" ++ __cxl_comp_options "$cur" ++ else ++ [ -z "$subcmd" ] && return ++ __cxl_comp_non_option_args "$subcmd" "$cur" ++ fi ++ fi ++} ++ ++type cxl &>/dev/null && ++_cxl() ++{ ++ local cur words cword prev ++ if [ $preload_get_comp_words_by_ref = "true" ]; then ++ _get_comp_words_by_ref -n =: cur words cword prev ++ else ++ __nd_common_get_comp_words_by_ref -n =: cur words cword prev ++ fi ++ __cxl_main ++} ++ + complete -o nospace -F _ndctl ndctl + complete -o nospace -F _daxctl daxctl ++complete -o nospace -F _cxl cxl +diff --git a/cxl/Makefile.am b/cxl/Makefile.am +new file mode 100644 +index 0000000..5c83c6c +--- /dev/null ++++ b/cxl/Makefile.am +@@ -0,0 +1,23 @@ ++include $(top_srcdir)/Makefile.am.in ++ ++bin_PROGRAMS = cxl ++ ++DISTCLEANFILES = config.h ++BUILT_SOURCES = config.h ++config.h: $(srcdir)/Makefile.am ++ $(AM_V_GEN) echo "/* Autogenerated by cxl/Makefile.am */" >$@ ++ ++cxl_SOURCES =\ ++ cxl.c \ ++ list.c \ ++ memdev.c \ ++ ../util/json.c \ ++ ../util/log.c \ ++ builtin.h ++ ++cxl_LDADD =\ ++ lib/libcxl.la \ ++ ../libutil.a \ ++ $(UUID_LIBS) \ ++ $(KMOD_LIBS) \ ++ $(JSON_LIBS) +diff --git a/cxl/builtin.h b/cxl/builtin.h +new file mode 100644 +index 0000000..ca8be68 +--- /dev/null ++++ b/cxl/builtin.h +@@ -0,0 +1,118 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++/* Copyright (C) 2020-2021 Intel Corporation. All rights reserved. */ ++#ifndef _CXL_BUILTIN_H_ ++#define _CXL_BUILTIN_H_ ++ ++struct cxl_ctx; ++int cmd_update_fw(int argc, const char **argv, struct cxl_ctx *ctx); ++int cmd_get_fw_info(int argc, const char **argv, struct cxl_ctx *ctx); ++int cmd_transfer_fw(int argc, const char **argv, struct cxl_ctx *ctx); ++int cmd_activate_fw(int argc, const char **argv, struct cxl_ctx *ctx); ++int cmd_device_info_get(int argc, const char **argv, struct cxl_ctx *ctx); ++int cmd_list(int argc, const char **argv, struct cxl_ctx *ctx); ++int cmd_write_labels(int argc, const char **argv, struct cxl_ctx *ctx); ++int cmd_read_labels(int argc, const char **argv, struct cxl_ctx *ctx); ++int cmd_zero_labels(int argc, const char **argv, struct cxl_ctx *ctx); ++int cmd_init_labels(int argc, const char **argv, struct cxl_ctx *ctx); ++int cmd_check_labels(int argc, const char **argv, struct cxl_ctx *ctx); ++int cmd_identify(int argc, const char **argv, struct cxl_ctx *ctx); ++int cmd_get_supported_logs(int argc, const char **argv, struct cxl_ctx *ctx); ++int cmd_get_cel_log(int argc, const char **argv, struct cxl_ctx *ctx); ++int cmd_get_event_interrupt_policy(int argc, const char **argv, struct cxl_ctx *ctx); ++int cmd_set_event_interrupt_policy(int argc, const char **argv, struct cxl_ctx *ctx); ++int cmd_get_timestamp(int argc, const char **argv, struct cxl_ctx *ctx); ++int cmd_set_timestamp(int argc, const char **argv, struct cxl_ctx *ctx); ++int cmd_get_alert_config(int argc, const char **argv, struct cxl_ctx *ctx); ++int cmd_set_alert_config(int argc, const char **argv, struct cxl_ctx *ctx); ++int cmd_get_health_info(int argc, const char **argv, struct cxl_ctx *ctx); ++int cmd_get_event_records(int argc, const char **argv, struct cxl_ctx *ctx); ++int cmd_get_ld_info(int argc, const char **argv, struct cxl_ctx *ctx); ++int cmd_clear_event_records(int argc, const char **argv, struct cxl_ctx *ctx); ++int cmd_ddr_info(int argc, const char **argv, struct cxl_ctx *ctx); ++int cmd_hct_start_stop_trigger(int argc, const char **argv, struct cxl_ctx *ctx); ++int cmd_hct_get_buffer_status(int argc, const char **argv, struct cxl_ctx *ctx); ++int cmd_hct_enable(int argc, const char **argv, struct cxl_ctx *ctx); ++int cmd_ltmon_capture_clear(int argc, const char **argv, struct cxl_ctx *ctx); ++int cmd_ltmon_capture(int argc, const char **argv, struct cxl_ctx *ctx); ++int cmd_ltmon_capture_freeze_and_restore(int argc, const char **argv, struct cxl_ctx *ctx); ++int cmd_ltmon_l2r_count_dump(int argc, const char **argv, struct cxl_ctx *ctx); ++int cmd_ltmon_l2r_count_clear(int argc, const char **argv, struct cxl_ctx *ctx); ++int cmd_ltmon_basic_cfg(int argc, const char **argv, struct cxl_ctx *ctx); ++int cmd_ltmon_watch(int argc, const char **argv, struct cxl_ctx *ctx); ++int cmd_ltmon_capture_stat(int argc, const char **argv, struct cxl_ctx *ctx); ++int cmd_ltmon_capture_log_dmp(int argc, const char **argv, struct cxl_ctx *ctx); ++int cmd_ltmon_capture_trigger(int argc, const char **argv, struct cxl_ctx *ctx); ++int cmd_ltmon_enable(int argc, const char **argv, struct cxl_ctx *ctx); ++int cmd_osa_os_type_trig_cfg(int argc, const char **argv, struct cxl_ctx *ctx); ++int cmd_osa_cap_ctrl(int argc, const char **argv, struct cxl_ctx *ctx); ++int cmd_osa_cfg_dump(int argc, const char **argv, struct cxl_ctx *ctx); ++int cmd_osa_ana_op(int argc, const char **argv, struct cxl_ctx *ctx); ++int cmd_osa_status_query(int argc, const char **argv, struct cxl_ctx *ctx); ++int cmd_osa_access_rel(int argc, const char **argv, struct cxl_ctx *ctx); ++int cmd_perfcnt_mta_ltif_set(int argc, const char **argv, struct cxl_ctx *ctx); ++int cmd_perfcnt_mta_get(int argc, const char **argv, struct cxl_ctx *ctx); ++int cmd_perfcnt_mta_latch_val_get(int argc, const char **argv, struct cxl_ctx *ctx); ++int cmd_perfcnt_mta_counter_clear(int argc, const char **argv, struct cxl_ctx *ctx); ++int cmd_perfcnt_mta_cnt_val_latch(int argc, const char **argv, struct cxl_ctx *ctx); ++int cmd_perfcnt_mta_hif_set(int argc, const char **argv, struct cxl_ctx *ctx); ++int cmd_perfcnt_mta_hif_cfg_get(int argc, const char **argv, struct cxl_ctx *ctx); ++int cmd_perfcnt_mta_hif_latch_val_get(int argc, const char **argv, struct cxl_ctx *ctx); ++int cmd_perfcnt_mta_hif_counter_clear(int argc, const char **argv, struct cxl_ctx *ctx); ++int cmd_perfcnt_mta_hif_cnt_val_latch(int argc, const char **argv, struct cxl_ctx *ctx); ++int cmd_perfcnt_ddr_generic_select(int argc, const char **argv, struct cxl_ctx *ctx); ++int cmd_perfcnt_ddr_generic_capture(int argc, const char **argv, struct cxl_ctx *ctx); ++int cmd_perfcnt_ddr_dfi_capture(int argc, const char **argv, struct cxl_ctx *ctx); ++int cmd_err_inj_drs_poison(int argc, const char **argv, struct cxl_ctx *ctx); ++int cmd_err_inj_drs_ecc(int argc, const char **argv, struct cxl_ctx *ctx); ++int cmd_err_inj_rxflit_crc(int argc, const char **argv, struct cxl_ctx *ctx); ++int cmd_err_inj_txflit_crc(int argc, const char **argv, struct cxl_ctx *ctx); ++int cmd_err_inj_viral(int argc, const char **argv, struct cxl_ctx *ctx); ++int cmd_err_inj_hif_poison(int argc, const char **argv, struct cxl_ctx *ctx); ++int cmd_err_inj_hif_ecc(int argc, const char **argv, struct cxl_ctx *ctx); ++int cmd_eh_eye_cap_run(int argc, const char **argv, struct cxl_ctx *ctx); ++int cmd_eh_eye_cap_read(int argc, const char **argv, struct cxl_ctx *ctx); ++int cmd_eh_eye_cap_timeout_enable(int argc, const char **argv, struct cxl_ctx *ctx); ++int cmd_eh_eye_cap_status(int argc, const char **argv, struct cxl_ctx *ctx); ++int cmd_eh_adapt_get(int argc, const char **argv, struct cxl_ctx *ctx); ++int cmd_eh_adapt_oneoff(int argc, const char **argv, struct cxl_ctx *ctx); ++int cmd_eh_adapt_force(int argc, const char **argv, struct cxl_ctx *ctx); ++int cmd_hbo_status(int argc, const char **argv, struct cxl_ctx *ctx); ++int cmd_hbo_transfer_fw(int argc, const char **argv, struct cxl_ctx *ctx); ++int cmd_hbo_activate_fw(int argc, const char **argv, struct cxl_ctx *ctx); ++int cmd_health_counters_clear(int argc, const char **argv, struct cxl_ctx *ctx); ++int cmd_health_counters_get(int argc, const char **argv, struct cxl_ctx *ctx); ++int cmd_hct_get_plat_param(int argc, const char **argv, struct cxl_ctx *ctx); ++int cmd_eh_link_dbg_cfg(int argc, const char **argv, struct cxl_ctx *ctx); ++int cmd_eh_link_dbg_entry_dump(int argc, const char **argv, struct cxl_ctx *ctx); ++int cmd_eh_link_dbg_lane_dump(int argc, const char **argv, struct cxl_ctx *ctx); ++int cmd_eh_link_dbg_reset(int argc, const char **argv, struct cxl_ctx *ctx); ++int cmd_fbist_stopconfig_set(int argc, const char **argv, struct cxl_ctx *ctx); ++int cmd_fbist_cyclecount_set(int argc, const char **argv, struct cxl_ctx *ctx); ++int cmd_fbist_reset_set(int argc, const char **argv, struct cxl_ctx *ctx); ++int cmd_fbist_run_set(int argc, const char **argv, struct cxl_ctx *ctx); ++int cmd_fbist_run_get(int argc, const char **argv, struct cxl_ctx *ctx); ++int cmd_fbist_xfer_rem_cnt_get(int argc, const char **argv, struct cxl_ctx *ctx); ++int cmd_fbist_last_exp_read_data_get(int argc, const char **argv, struct cxl_ctx *ctx); ++int cmd_fbist_curr_cycle_cnt_get(int argc, const char **argv, struct cxl_ctx *ctx); ++int cmd_fbist_thread_status_get(int argc, const char **argv, struct cxl_ctx *ctx); ++int cmd_fbist_thread_trans_cnt_get(int argc, const char **argv, struct cxl_ctx *ctx); ++int cmd_fbist_thread_bandwidth_get(int argc, const char **argv, struct cxl_ctx *ctx); ++int cmd_fbist_thread_latency_get(int argc, const char **argv, struct cxl_ctx *ctx); ++int cmd_fbist_thread_perf_mon_set(int argc, const char **argv, struct cxl_ctx *ctx); ++int cmd_fbist_top_read_status0_get(int argc, const char **argv, struct cxl_ctx *ctx); ++int cmd_fbist_top_err_cnt_get(int argc, const char **argv, struct cxl_ctx *ctx); ++int cmd_fbist_last_read_addr_get(int argc, const char **argv, struct cxl_ctx *ctx); ++int cmd_fbist_test_simpledata(int argc, const char **argv, struct cxl_ctx *ctx); ++int cmd_fbist_test_addresstest(int argc, const char **argv, struct cxl_ctx *ctx); ++int cmd_fbist_test_movinginversion(int argc, const char **argv, struct cxl_ctx *ctx); ++int cmd_fbist_test_randomsequence(int argc, const char **argv, struct cxl_ctx *ctx); ++int cmd_conf_read(int argc, const char **argv, struct cxl_ctx *ctx); ++int cmd_hct_get_config(int argc, const char **argv, struct cxl_ctx *ctx); ++int cmd_hct_read_buffer(int argc, const char **argv, struct cxl_ctx *ctx); ++int cmd_hct_set_config(int argc, const char **argv, struct cxl_ctx *ctx); ++int cmd_osa_os_patt_trig_cfg(int argc, const char **argv, struct cxl_ctx *ctx); ++int cmd_osa_misc_trig_cfg(int argc, const char **argv, struct cxl_ctx *ctx); ++int cmd_osa_data_read(int argc, const char **argv, struct cxl_ctx *ctx); ++int cmd_dimm_spd_read(int argc, const char **argv, struct cxl_ctx *ctx); ++int cmd_ddr_training_status(int argc, const char **argv, struct cxl_ctx *ctx); ++#endif /* _CXL_BUILTIN_H_ */ +diff --git a/cxl/cxl.c b/cxl/cxl.c +new file mode 100644 +index 0000000..e5b5732 +--- /dev/null ++++ b/cxl/cxl.c +@@ -0,0 +1,206 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* Copyright (C) 2020-2021 Intel Corporation. All rights reserved. */ ++/* Copyright (C) 2005 Andreas Ericsson. All rights reserved. */ ++ ++/* originally copied from perf and git */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++#include ++#include ++#include ++ ++const char cxl_usage_string[] = "cxl [--version] [--help] COMMAND [ARGS]"; ++const char cxl_more_info_string[] = ++ "See 'cxl help COMMAND' for more information on a specific command.\n" ++ " cxl --list-cmds to see all available commands"; ++ ++static int cmd_version(int argc, const char **argv, struct cxl_ctx *ctx) ++{ ++ printf("%s\n", VERSION); ++ return 0; ++} ++ ++static int cmd_help(int argc, const char **argv, struct cxl_ctx *ctx) ++{ ++ const char * const builtin_help_subcommands[] = { ++ "list", ++ NULL, ++ }; ++ struct option builtin_help_options[] = { ++ OPT_END(), ++ }; ++ const char *builtin_help_usage[] = { ++ "cxl help [command]", ++ NULL ++ }; ++ ++ argc = parse_options_subcommand(argc, argv, builtin_help_options, ++ builtin_help_subcommands, builtin_help_usage, 0); ++ ++ if (!argv[0]) { ++ printf("\n usage: %s\n\n", cxl_usage_string); ++ printf("\n %s\n\n", cxl_more_info_string); ++ return 0; ++ } ++ ++ return help_show_man_page(argv[0], "cxl", "CXL_MAN_VIEWER"); ++} ++ ++static struct cmd_struct commands[] = { ++ { "update-fw", .c_fn = cmd_update_fw }, ++ { "get-fw-info", .c_fn = cmd_get_fw_info }, ++ { "activate-fw", .c_fn = cmd_activate_fw }, ++ { "device-info-get", .c_fn = cmd_device_info_get }, ++ { "version", .c_fn = cmd_version }, ++ { "list", .c_fn = cmd_list }, ++ { "help", .c_fn = cmd_help }, ++ { "zero-labels", .c_fn = cmd_zero_labels }, ++ { "read-labels", .c_fn = cmd_read_labels }, ++ { "write-labels", .c_fn = cmd_write_labels }, ++ { "id-cmd", .c_fn = cmd_identify }, ++ { "get-supported-logs", .c_fn = cmd_get_supported_logs }, ++ { "get-cel-log", .c_fn = cmd_get_cel_log }, ++ { "get-event-interrupt-policy", .c_fn = cmd_get_event_interrupt_policy }, ++ { "set-event-interrupt-policy", .c_fn = cmd_set_event_interrupt_policy }, ++ { "get-timestamp", .c_fn = cmd_get_timestamp }, ++ { "set-timestamp", .c_fn = cmd_set_timestamp }, ++ { "get-alert-config", .c_fn = cmd_get_alert_config }, ++ { "set-alert-config", .c_fn = cmd_set_alert_config }, ++ { "get-health-info", .c_fn = cmd_get_health_info }, ++ { "get-event-records", .c_fn = cmd_get_event_records }, ++ { "get-ld-info", .c_fn = cmd_get_ld_info }, ++ { "clear-event-records", .c_fn = cmd_clear_event_records }, ++ { "ddr-info", .c_fn = cmd_ddr_info }, ++ { "hct-start-stop-trigger", .c_fn = cmd_hct_start_stop_trigger }, ++ { "hct-get-buffer-status", .c_fn = cmd_hct_get_buffer_status }, ++ { "hct-enable", .c_fn = cmd_hct_enable }, ++ { "ltmon-capture-clear", .c_fn = cmd_ltmon_capture_clear }, ++ { "ltmon-capture", .c_fn = cmd_ltmon_capture }, ++ { "ltmon-capture-freeze-and-restore", .c_fn = cmd_ltmon_capture_freeze_and_restore }, ++ { "ltmon-l2r-count-dump", .c_fn = cmd_ltmon_l2r_count_dump }, ++ { "ltmon-l2r-count-clear", .c_fn = cmd_ltmon_l2r_count_clear }, ++ { "ltmon-basic-cfg", .c_fn = cmd_ltmon_basic_cfg }, ++ { "ltmon-watch", .c_fn = cmd_ltmon_watch }, ++ { "ltmon-capture-stat", .c_fn = cmd_ltmon_capture_stat }, ++ { "ltmon-capture-log-dmp", .c_fn = cmd_ltmon_capture_log_dmp }, ++ { "ltmon-capture-trigger", .c_fn = cmd_ltmon_capture_trigger }, ++ { "ltmon-enable", .c_fn = cmd_ltmon_enable }, ++ { "osa-os-type-trig-cfg", .c_fn = cmd_osa_os_type_trig_cfg }, ++ { "osa-cap-ctrl", .c_fn = cmd_osa_cap_ctrl }, ++ { "osa-cfg-dump", .c_fn = cmd_osa_cfg_dump }, ++ { "osa-ana-op", .c_fn = cmd_osa_ana_op }, ++ { "osa-status-query", .c_fn = cmd_osa_status_query }, ++ { "osa-access-rel", .c_fn = cmd_osa_access_rel }, ++ { "perfcnt-mta-ltif-set", .c_fn = cmd_perfcnt_mta_ltif_set }, ++ { "perfcnt-mta-get", .c_fn = cmd_perfcnt_mta_get }, ++ { "perfcnt-mta-latch-val-get", .c_fn = cmd_perfcnt_mta_latch_val_get }, ++ { "perfcnt-mta-counter-clear", .c_fn = cmd_perfcnt_mta_counter_clear }, ++ { "perfcnt-mta-cnt-val-latch", .c_fn = cmd_perfcnt_mta_cnt_val_latch }, ++ { "perfcnt-mta-hif-set", .c_fn = cmd_perfcnt_mta_hif_set }, ++ { "perfcnt-mta-hif-cfg-get", .c_fn = cmd_perfcnt_mta_hif_cfg_get }, ++ { "perfcnt-mta-hif-latch-val-get", .c_fn = cmd_perfcnt_mta_hif_latch_val_get }, ++ { "perfcnt-mta-hif-counter-clear", .c_fn = cmd_perfcnt_mta_hif_counter_clear }, ++ { "perfcnt-mta-hif-cnt-val-latch", .c_fn = cmd_perfcnt_mta_hif_cnt_val_latch }, ++ { "perfcnt-ddr-generic-select", .c_fn = cmd_perfcnt_ddr_generic_select }, ++ { "perfcnt-ddr-generic-capture", .c_fn = cmd_perfcnt_ddr_generic_capture}, ++ { "perfcnt-ddr-dfi-capture", .c_fn = cmd_perfcnt_ddr_dfi_capture}, ++ { "err-inj-drs-poison", .c_fn = cmd_err_inj_drs_poison }, ++ { "err-inj-drs-ecc", .c_fn = cmd_err_inj_drs_ecc }, ++ { "err-inj-rxflit-crc", .c_fn = cmd_err_inj_rxflit_crc }, ++ { "err-inj-txflit-crc", .c_fn = cmd_err_inj_txflit_crc }, ++ { "err-inj-viral", .c_fn = cmd_err_inj_viral }, ++ { "err-inj-hif-poison", .c_fn = cmd_err_inj_hif_poison }, ++ { "err-inj-hif-ecc", .c_fn = cmd_err_inj_hif_ecc }, ++ { "eh-eye-cap-run", .c_fn = cmd_eh_eye_cap_run }, ++ { "eh-eye-cap-read", .c_fn = cmd_eh_eye_cap_read }, ++ { "eh-eye-cap-timeout-enable", .c_fn = cmd_eh_eye_cap_timeout_enable }, ++ { "eh-eye-cap-status", .c_fn = cmd_eh_eye_cap_status }, ++ { "eh-adapt-get", .c_fn = cmd_eh_adapt_get }, ++ { "eh-adapt-oneoff", .c_fn = cmd_eh_adapt_oneoff }, ++ { "eh-adapt-force", .c_fn = cmd_eh_adapt_force }, ++ { "hbo-status", .c_fn = cmd_hbo_status }, ++ { "hbo-transfer-fw", .c_fn = cmd_hbo_transfer_fw }, ++ { "hbo-activate-fw", .c_fn = cmd_hbo_activate_fw }, ++ { "health-counters-clear", .c_fn = cmd_health_counters_clear }, ++ { "health-counters-get", .c_fn = cmd_health_counters_get }, ++ { "hct-get-plat-params", .c_fn = cmd_hct_get_plat_param }, ++ { "eh-link-dbg-cfg", .c_fn = cmd_eh_link_dbg_cfg }, ++ { "eh-link-dbg-entry-dump", .c_fn = cmd_eh_link_dbg_entry_dump }, ++ { "eh-link-dbg-lane-dump", .c_fn = cmd_eh_link_dbg_lane_dump }, ++ { "eh-link-dbg-reset", .c_fn = cmd_eh_link_dbg_reset }, ++ { "fbist-stopconfig-set", .c_fn = cmd_fbist_stopconfig_set }, ++ { "fbist-cyclecount-set", .c_fn = cmd_fbist_cyclecount_set }, ++ { "fbist-reset-set", .c_fn = cmd_fbist_reset_set }, ++ { "fbist-run-set", .c_fn = cmd_fbist_run_set }, ++ { "fbist-run-get", .c_fn = cmd_fbist_run_get }, ++ { "fbist-xfer-rem-cnt-get", .c_fn = cmd_fbist_xfer_rem_cnt_get }, ++ { "fbist-last-exp-read-data-get", .c_fn = cmd_fbist_last_exp_read_data_get }, ++ { "fbist-curr-cycle-cnt-get", .c_fn = cmd_fbist_curr_cycle_cnt_get }, ++ { "fbist-thread-status-get", .c_fn = cmd_fbist_thread_status_get }, ++ { "fbist-thread-trans-cnt-get", .c_fn = cmd_fbist_thread_trans_cnt_get }, ++ { "fbist-thread-bandwidth-get", .c_fn = cmd_fbist_thread_bandwidth_get }, ++ { "fbist-thread-latency-get", .c_fn = cmd_fbist_thread_latency_get }, ++ { "fbist-thread-perf-mon-set", .c_fn = cmd_fbist_thread_perf_mon_set }, ++ { "fbist-top-read-status0-get", .c_fn = cmd_fbist_top_read_status0_get }, ++ { "fbist-top-err-cnt-get", .c_fn = cmd_fbist_top_err_cnt_get }, ++ { "fbist-last-read-addr-get", .c_fn = cmd_fbist_last_read_addr_get }, ++ { "fbist-test-simpledata", .c_fn = cmd_fbist_test_simpledata }, ++ { "fbist-test-addresstest", .c_fn = cmd_fbist_test_addresstest }, ++ { "fbist-test-movinginversion", .c_fn = cmd_fbist_test_movinginversion }, ++ { "fbist-test-randomsequence", .c_fn = cmd_fbist_test_randomsequence }, ++ { "conf-read", .c_fn = cmd_conf_read }, ++ { "hct-get-config", .c_fn = cmd_hct_get_config }, ++ { "hct-read-buffer", .c_fn = cmd_hct_read_buffer }, ++ { "hct-set-config", .c_fn = cmd_hct_set_config }, ++ { "osa-os-patt-trig-cfg", .c_fn = cmd_osa_os_patt_trig_cfg }, ++ { "osa-misc-trig-cfg", .c_fn = cmd_osa_misc_trig_cfg }, ++ { "osa-data-read", .c_fn = cmd_osa_data_read }, ++ { "dimm-spd-read", .c_fn = cmd_dimm_spd_read }, ++ { "ddr-training-status", .c_fn = cmd_ddr_training_status }, ++}; ++ ++int main(int argc, const char **argv) ++{ ++ struct cxl_ctx *ctx; ++ int rc; ++ ++ /* Look for flags.. */ ++ argv++; ++ argc--; ++ main_handle_options(&argv, &argc, cxl_usage_string, commands, ++ ARRAY_SIZE(commands)); ++ ++ if (argc > 0) { ++ if (!prefixcmp(argv[0], "--")) ++ argv[0] += 2; ++ } else { ++ /* The user didn't specify a command; give them help */ ++ printf("\n usage: %s\n\n", cxl_usage_string); ++ printf("\n %s\n\n", cxl_more_info_string); ++ goto out; ++ } ++ ++ rc = cxl_new(&ctx); ++ if (rc) ++ goto out; ++ main_handle_internal_command(argc, argv, ctx, commands, ++ ARRAY_SIZE(commands), PROG_CXL); ++ cxl_unref(ctx); ++ fprintf(stderr, "Unknown command: '%s'\n", argv[0]); ++out: ++ return 1; ++} +diff --git a/cxl/cxl_mem.h b/cxl/cxl_mem.h +new file mode 100644 +index 0000000..d38cc9c +--- /dev/null ++++ b/cxl/cxl_mem.h +@@ -0,0 +1,189 @@ ++/* SPDX-License-Identifier: LGPL-2.1 */ ++/* Copyright (C) 2020-2021, Intel Corporation. All rights reserved. */ ++/* ++ * CXL IOCTLs for Memory Devices ++ */ ++ ++#ifndef _UAPI_CXL_MEM_H_ ++#define _UAPI_CXL_MEM_H_ ++ ++#include ++#include ++#include ++ ++#define __user ++ ++/** ++ * DOC: UAPI ++ * ++ * Not all of all commands that the driver supports are always available for use ++ * by userspace. Userspace must check the results from the QUERY command in ++ * order to determine the live set of commands. ++ */ ++ ++#define CXL_MEM_QUERY_COMMANDS _IOR(0xCE, 1, struct cxl_mem_query_commands) ++#define CXL_MEM_SEND_COMMAND _IOWR(0xCE, 2, struct cxl_send_command) ++ ++#define CXL_CMDS \ ++ ___C(INVALID, "Invalid Command"), \ ++ ___C(IDENTIFY, "Identify Command"), \ ++ ___C(RAW, "Raw device command"), \ ++ ___C(GET_SUPPORTED_LOGS, "Get Supported Logs"), \ ++ ___C(GET_FW_INFO, "Get FW Info"), \ ++ ___C(GET_PARTITION_INFO, "Get Partition Information"), \ ++ ___C(GET_LSA, "Get Label Storage Area"), \ ++ ___C(GET_HEALTH_INFO, "Get Health Info"), \ ++ ___C(GET_LOG, "Get Log"), \ ++ ___C(SET_PARTITION_INFO, "Set Partition Information"), \ ++ ___C(SET_LSA, "Set Label Storage Area"), \ ++ ___C(GET_ALERT_CONFIG, "Get Alert Configuration"), \ ++ ___C(SET_ALERT_CONFIG, "Set Alert Configuration"), \ ++ ___C(GET_SHUTDOWN_STATE, "Get Shutdown State"), \ ++ ___C(SET_SHUTDOWN_STATE, "Set Shutdown State"), \ ++ ___C(GET_POISON, "Get Poison List"), \ ++ ___C(INJECT_POISON, "Inject Poison"), \ ++ ___C(CLEAR_POISON, "Clear Poison"), \ ++ ___C(GET_SCAN_MEDIA_CAPS, "Get Scan Media Capabilities"), \ ++ ___C(SCAN_MEDIA, "Scan Media"), \ ++ ___C(GET_SCAN_MEDIA, "Get Scan Media Results"), \ ++ ___C(MAX, "invalid / last command") ++ ++#define ___C(a, b) CXL_MEM_COMMAND_ID_##a ++enum { CXL_CMDS }; ++ ++#undef ___C ++#define ___C(a, b) { b } ++static const struct { ++ const char *name; ++} cxl_command_names[] = { CXL_CMDS }; ++ ++/* ++ * Here's how this actually breaks out: ++ * cxl_command_names[] = { ++ * [CXL_MEM_COMMAND_ID_INVALID] = { "Invalid Command" }, ++ * [CXL_MEM_COMMAND_ID_IDENTIFY] = { "Identify Command" }, ++ * ... ++ * [CXL_MEM_COMMAND_ID_MAX] = { "invalid / last command" }, ++ * }; ++ */ ++ ++#undef ___C ++ ++/** ++ * struct cxl_command_info - Command information returned from a query. ++ * @id: ID number for the command. ++ * @flags: Flags that specify command behavior. ++ * @size_in: Expected input size, or -1 if variable length. ++ * @size_out: Expected output size, or -1 if variable length. ++ * ++ * Represents a single command that is supported by both the driver and the ++ * hardware. This is returned as part of an array from the query ioctl. The ++ * following would be a command that takes a variable length input and returns 0 ++ * bytes of output. ++ * ++ * - @id = 10 ++ * - @flags = 0 ++ * - @size_in = -1 ++ * - @size_out = 0 ++ * ++ * See struct cxl_mem_query_commands. ++ */ ++struct cxl_command_info { ++ __u32 id; ++ ++ __u32 flags; ++#define CXL_MEM_COMMAND_FLAG_MASK GENMASK(0, 0) ++ ++ __s32 size_in; ++ __s32 size_out; ++}; ++ ++/** ++ * struct cxl_mem_query_commands - Query supported commands. ++ * @n_commands: In/out parameter. When @n_commands is > 0, the driver will ++ * return min(num_support_commands, n_commands). When @n_commands ++ * is 0, driver will return the number of total supported commands. ++ * @rsvd: Reserved for future use. ++ * @commands: Output array of supported commands. This array must be allocated ++ * by userspace to be at least min(num_support_commands, @n_commands) ++ * ++ * Allow userspace to query the available commands supported by both the driver, ++ * and the hardware. Commands that aren't supported by either the driver, or the ++ * hardware are not returned in the query. ++ * ++ * Examples: ++ * ++ * - { .n_commands = 0 } // Get number of supported commands ++ * - { .n_commands = 15, .commands = buf } // Return first 15 (or less) ++ * supported commands ++ * ++ * See struct cxl_command_info. ++ */ ++struct cxl_mem_query_commands { ++ /* ++ * Input: Number of commands to return (space allocated by user) ++ * Output: Number of commands supported by the driver/hardware ++ * ++ * If n_commands is 0, kernel will only return number of commands and ++ * not try to populate commands[], thus allowing userspace to know how ++ * much space to allocate ++ */ ++ __u32 n_commands; ++ __u32 rsvd; ++ ++ struct cxl_command_info __user commands[]; /* out: supported commands */ ++}; ++ ++/** ++ * struct cxl_send_command - Send a command to a memory device. ++ * @id: The command to send to the memory device. This must be one of the ++ * commands returned by the query command. ++ * @flags: Flags for the command (input). ++ * @raw: Special fields for raw commands ++ * @raw.opcode: Opcode passed to hardware when using the RAW command. ++ * @raw.rsvd: Must be zero. ++ * @rsvd: Must be zero. ++ * @retval: Return value from the memory device (output). ++ * @in: Parameters associated with input payload. ++ * @in.size: Size of the payload to provide to the device (input). ++ * @in.rsvd: Must be zero. ++ * @in.payload: Pointer to memory for payload input, payload is little endian. ++ * @out: Parameters associated with output payload. ++ * @out.size: Size of the payload received from the device (input/output). This ++ * field is filled in by userspace to let the driver know how much ++ * space was allocated for output. It is populated by the driver to ++ * let userspace know how large the output payload actually was. ++ * @out.rsvd: Must be zero. ++ * @out.payload: Pointer to memory for payload output, payload is little endian. ++ * ++ * Mechanism for userspace to send a command to the hardware for processing. The ++ * driver will do basic validation on the command sizes. In some cases even the ++ * payload may be introspected. Userspace is required to allocate large enough ++ * buffers for size_out which can be variable length in certain situations. ++ */ ++struct cxl_send_command { ++ __u32 id; ++ __u32 flags; ++ union { ++ struct { ++ __u16 opcode; ++ __u16 rsvd; ++ } raw; ++ __u32 rsvd; ++ }; ++ __u32 retval; ++ ++ struct { ++ __s32 size; ++ __u32 rsvd; ++ __u64 payload; ++ } in; ++ ++ struct { ++ __s32 size; ++ __u32 rsvd; ++ __u64 payload; ++ } out; ++}; ++ ++#endif +diff --git a/cxl/lib/Makefile.am b/cxl/lib/Makefile.am +new file mode 100644 +index 0000000..72c9ccd +--- /dev/null ++++ b/cxl/lib/Makefile.am +@@ -0,0 +1,32 @@ ++include $(top_srcdir)/Makefile.am.in ++ ++%.pc: %.pc.in Makefile ++ $(SED_PROCESS) ++ ++pkginclude_HEADERS = ../libcxl.h ../cxl_mem.h ++lib_LTLIBRARIES = libcxl.la ++ ++libcxl_la_SOURCES =\ ++ ../libcxl.h \ ++ private.h \ ++ ../../util/sysfs.c \ ++ ../../util/sysfs.h \ ++ ../../util/log.c \ ++ ../../util/log.h \ ++ libcxl.c ++ ++libcxl_la_LIBADD =\ ++ $(UUID_LIBS) \ ++ $(KMOD_LIBS) ++ ++EXTRA_DIST += libcxl.sym ++ ++libcxl_la_LDFLAGS = $(AM_LDFLAGS) \ ++ -version-info $(LIBCXL_CURRENT):$(LIBCXL_REVISION):$(LIBCXL_AGE) \ ++ -Wl,--version-script=$(top_srcdir)/cxl/lib/libcxl.sym ++libcxl_la_DEPENDENCIES = libcxl.sym ++ ++pkgconfigdir = $(libdir)/pkgconfig ++pkgconfig_DATA = libcxl.pc ++EXTRA_DIST += libcxl.pc.in ++CLEANFILES += libcxl.pc +diff --git a/cxl/lib/libcxl.c b/cxl/lib/libcxl.c +new file mode 100644 +index 0000000..cd9fcb5 +--- /dev/null ++++ b/cxl/lib/libcxl.c +@@ -0,0 +1,9948 @@ ++// SPDX-License-Identifier: LGPL-2.1 ++// Copyright (C) 2020-2021, Intel Corporation. All rights reserved. ++/* ++ * SPD decoding portion of this code is copied from spd-decode.c ++ * spd-vendor.c, source of this code can be located at: ++ * https://github.com/lpereira/hardinfo/blob/master/modules/devices/spd-decode.c ++ * https://github.com/lpereira/hardinfo/blame/master/modules/devices/spd-vendors.c ++*/ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++#include ++#include ++#include ++#include ++#include "private.h" ++ ++const char *DEVICE_ERRORS[23] = { ++ "Success: The command completed successfully.", ++ "Background Command Started: The background command started successfully. Refer to the Background Command Status register to retrieve the command result.", ++ "Invalid Input: A command input was invalid.", ++ "Unsupported: The command is not supported.", ++ "Internal Error: The command was not completed due to an internal device error.", ++ "Retry Required: The command was not completed due to a temporary error. An optional single retry may resolve the issue.", ++ "Busy: The device is currently busy processing a background operation. Wait until background command completes and then retry the command.", ++ "Media Disabled: The command could not be completed because it requires media access and media is disabled.", ++ "FW Transfer in Progress: Only one FW package can be transferred at a time. Complete the current FW package transfer before starting a new one.", ++ "FW Transfer Out of Order: The FW package transfer was aborted because the FW package content was transferred out of order.", ++ "FW Authentication Failed: The FW package was not saved to the device because the FW package authentication failed.", ++ "Invalid Slot: The FW slot specified is not supported or not valid for the requested operation.", ++ "Activation Failed, FW Rolled Back: The new FW failed to activate and rolled back to the previous active FW.", ++ "Activation Failed, Cold Reset Required: The new FW failed to activate. A cold reset is required.", ++ "Invalid Handle: One or more Event Record Handles were invalid.", ++ "Invalid Physical Address: The physical address specified is invalid.", ++ "Inject Poison Limit Reached: The devices limit on allowed poison injection has been reached. Clear injected poison requests before attempting to inject more.", ++ "Permanent Media Failure: The device could not clear poison due to a permanent issue with the media.", ++ "Aborted: The background command was aborted by the device.", ++ "Invalid Security State: The command is not valid in the current security state.", ++ "Incorrect Passphrase: The passphrase does not match the currently set passphrase.", ++ "Unsupported Mailbox: The command is not supported on the mailbox it was issued on. Used to indicate an unsupported command issued on the secondary mailbox.", ++ "Invalid Payload Length: The payload length specified in the Command Register is not valid. The device is required to perform this check prior to processing any command defined in this specification.", ++}; ++ ++#define VENDORS_BANKS 8 ++#define VENDORS_ITEMS 128 ++const char *vendors[VENDORS_BANKS][VENDORS_ITEMS] = ++{ ++{"AMD", "AMI", "Fairchild", "Fujitsu", ++ "GTE", "Harris", "Hitachi", "Inmos", ++ "Intel", "I.T.T.", "Intersil", "Monolithic Memories", ++ "Mostek", "Freescale (former Motorola)", "National", "NEC", ++ "RCA", "Raytheon", "Conexant (Rockwell)", "Seeq", ++ "NXP (former Signetics, Philips Semi.)", "Synertek", "Texas Instruments", "Toshiba", ++ "Xicor", "Zilog", "Eurotechnique", "Mitsubishi", ++ "Lucent (AT&T)", "Exel", "Atmel", "SGS/Thomson", ++ "Lattice Semi.", "NCR", "Wafer Scale Integration", "IBM", ++ "Tristar", "Visic", "Intl. CMOS Technology", "SSSI", ++ "MicrochipTechnology", "Ricoh Ltd.", "VLSI", "Micron Technology", ++ "SK Hynix (former Hyundai Electronics)", "OKI Semiconductor", "ACTEL", "Sharp", ++ "Catalyst", "Panasonic", "IDT", "Cypress", ++ "DEC", "LSI Logic", "Zarlink (former Plessey)", "UTMC", ++ "Thinking Machine", "Thomson CSF", "Integrated CMOS (Vertex)", "Honeywell", ++ "Tektronix", "Oracle Corporation (former Sun Microsystems)", "Silicon Storage Technology", "ProMos/Mosel Vitelic", ++ "Infineon (former Siemens)", "Macronix", "Xerox", "Plus Logic", ++ "SunDisk", "Elan Circuit Tech.", "European Silicon Str.", "Apple Computer", ++ "Xilinx", "Compaq", "Protocol Engines", "SCI", ++ "Seiko Instruments", "Samsung", "I3 Design System", "Klic", ++ "Crosspoint Solutions", "Alliance Semiconductor", "Tandem", "Hewlett-Packard", ++ "Integrated Silicon Solutions", "Brooktree", "New Media", "MHS Electronic", ++ "Performance Semi.", "Winbond Electronic", "Kawasaki Steel", "Bright Micro", ++ "TECMAR", "Exar", "PCMCIA", "LG Semi (former Goldstar)", ++ "Northern Telecom", "Sanyo", "Array Microsystems", "Crystal Semiconductor", ++ "Analog Devices", "PMC-Sierra", "Asparix", "Convex Computer", ++ "Quality Semiconductor", "Nimbus Technology", "Transwitch", "Micronas (ITT Intermetall)", ++ "Cannon", "Altera", "NEXCOM", "QUALCOMM", ++ "Sony", "Cray Research", "AMS(Austria Micro)", "Vitesse", ++ "Aster Electronics", "Bay Networks (Synoptic)", "Zentrum or ZMD", "TRW", ++ "Thesys", "Solbourne Computer", "Allied-Signal", "Dialog", ++ "Media Vision", "Numonyx Corporation (former Level One Communication)"}, ++{"Cirrus Logic", "National Instruments", "ILC Data Device", "Alcatel Mietec", ++ "Micro Linear", "Univ. of NC", "JTAG Technologies", "BAE Systems", ++ "Nchip", "Galileo Tech", "Bestlink Systems", "Graychip", ++ "GENNUM", "VideoLogic", "Robert Bosch", "Chip Express", ++ "DATARAM", "United Microelec Corp.", "TCSI", "Smart Modular", ++ "Hughes Aircraft", "Lanstar Semiconductor", "Qlogic", "Kingston", ++ "Music Semi", "Ericsson Components", "SpaSE", "Eon Silicon Devices", ++ "Programmable Micro Corp", "DoD", "Integ. Memories Tech.", "Corollary Inc.", ++ "Dallas Semiconductor", "Omnivision", "EIV(Switzerland)", "Novatel Wireless", ++ "Zarlink (former Mitel)", "Clearpoint", "Cabletron", "STEC (former Silicon Technology)", ++ "Vanguard", "Hagiwara Sys-Com", "Vantis", "Celestica", ++ "Century", "Hal Computers", "Rohm Company Ltd.", "Juniper Networks", ++ "Libit Signal Processing", "Mushkin Enhanced Memory", "Tundra Semiconductor", "Adaptec Inc.", ++ "LightSpeed Semi.", "ZSP Corp.", "AMIC Technology", "Adobe Systems", ++ "Dynachip", "PNY Electronics", "Newport Digital", "MMC Networks", ++ "T Square", "Seiko Epson", "Broadcom", "Viking Components", ++ "V3 Semiconductor", "Flextronics (former Orbit)", "Suwa Electronics", "Transmeta", ++ "Micron CMS", "American Computer & Digital Components Inc", "Enhance 3000 Inc", "Tower Semiconductor", ++ "CPU Design", "Price Point", "Maxim Integrated Product", "Tellabs", ++ "Centaur Technology", "Unigen Corporation", "Transcend Information", "Memory Card Technology", ++ "CKD Corporation Ltd.", "Capital Instruments, Inc.", "Aica Kogyo, Ltd.", "Linvex Technology", ++ "MSC Vertriebs GmbH", "AKM Company, Ltd.", "Dynamem, Inc.", "NERA ASA", ++ "GSI Technology", "Dane-Elec (C Memory)", "Acorn Computers", "Lara Technology", ++ "Oak Technology, Inc.", "Itec Memory", "Tanisys Technology", "Truevision", ++ "Wintec Industries", "Super PC Memory", "MGV Memory", "Galvantech", ++ "Gadzoox Nteworks", "Multi Dimensional Cons.", "GateField", "Integrated Memory System", ++ "Triscend", "XaQti", "Goldenram", "Clear Logic", ++ "Cimaron Communications", "Nippon Steel Semi. Corp.", "Advantage Memory", "AMCC", ++ "LeCroy", "Yamaha Corporation", "Digital Microwave", "NetLogic Microsystems", ++ "MIMOS Semiconductor", "Advanced Fibre", "BF Goodrich Data.", "Epigram", ++ "Acbel Polytech Inc.", "Apacer Technology", "Admor Memory", "FOXCONN", ++ "Quadratics Superconductor", "3COM"}, ++{"Camintonn Corporation", "ISOA Incorporated", "Agate Semiconductor", "ADMtek Incorporated", ++ "HYPERTEC", "Adhoc Technologies", "MOSAID Technologies", "Ardent Technologies", ++ "Switchcore", "Cisco Systems, Inc.", "Allayer Technologies", "WorkX AG (Wichman)", ++ "Oasis Semiconductor", "Novanet Semiconductor", "E-M Solutions", "Power General", ++ "Advanced Hardware Arch.", "Inova Semiconductors GmbH", "Telocity", "Delkin Devices", ++ "Symagery Microsystems", "C-Port Corporation", "SiberCore Technologies", "Southland Microsystems", ++ "Malleable Technologies", "Kendin Communications", "Great Technology Microcomputer", "Sanmina Corporation", ++ "HADCO Corporation", "Corsair", "Actrans System Inc.", "ALPHA Technologies", ++ "Silicon Laboratories, Inc. (Cygnal)", "Artesyn Technologies", "Align Manufacturing", "Peregrine Semiconductor", ++ "Chameleon Systems", "Aplus Flash Technology", "MIPS Technologies", "Chrysalis ITS", ++ "ADTEC Corporation", "Kentron Technologies", "Win Technologies", "Tachyon Semiconductor (former ASIC Designs Inc.)", ++ "Extreme Packet Devices", "RF Micro Devices", "Siemens AG", "Sarnoff Corporation", ++ "Itautec SA (former Itautec Philco SA)", "Radiata Inc.", "Benchmark Elect. (AVEX)", "Legend", ++ "SpecTek Incorporated", "Hi/fn", "Enikia Incorporated", "SwitchOn Networks", ++ "AANetcom Incorporated", "Micro Memory Bank", "ESS Technology", "Virata Corporation", ++ "Excess Bandwidth", "West Bay Semiconductor", "DSP Group", "Newport Communications", ++ "Chip2Chip Incorporated", "Phobos Corporation", "Intellitech Corporation", "Nordic VLSI ASA", ++ "Ishoni Networks", "Silicon Spice", "Alchemy Semiconductor", "Agilent Technologies", ++ "Centillium Communications", "W.L. Gore", "HanBit Electronics", "GlobeSpan", ++ "Element 14", "Pycon", "Saifun Semiconductors", "Sibyte, Incorporated", ++ "MetaLink Technologies", "Feiya Technology", "I & C Technology", "Shikatronics", ++ "Elektrobit", "Megic", "Com-Tier", "Malaysia Micro Solutions", ++ "Hyperchip", "Gemstone Communications", "Anadigm (former Anadyne)", "3ParData", ++ "Mellanox Technologies", "Tenx Technologies", "Helix AG", "Domosys", ++ "Skyup Technology", "HiNT Corporation", "Chiaro", "MDT Technologies GmbH (former MCI Computer GMBH)", ++ "Exbit Technology A/S", "Integrated Technology Express", "AVED Memory", "Legerity", ++ "Jasmine Networks", "Caspian Networks", "nCUBE", "Silicon Access Networks", ++ "FDK Corporation", "High Bandwidth Access", "MultiLink Technology", "BRECIS", ++ "World Wide Packets", "APW", "Chicory Systems", "Xstream Logic", ++ "Fast-Chip", "Zucotto Wireless", "Realchip", "Galaxy Power", ++ "eSilicon", "Morphics Technology", "Accelerant Networks", "Silicon Wave", ++ "SandCraft", "Elpida"}, ++{"Solectron", "Optosys Technologies", "Buffalo (former Melco)", "TriMedia Technologies", ++ "Cyan Technologies", "Global Locate", "Optillion", "Terago Communications", ++ "Ikanos Communications", "Princeton Technology", "Nanya Technology", "Elite Flash Storage", ++ "Mysticom", "LightSand Communications", "ATI Technologies", "Agere Systems", ++ "NeoMagic", "AuroraNetics", "Golden Empire", "Mushkin", ++ "Tioga Technologies", "Netlist", "TeraLogic", "Cicada Semiconductor", ++ "Centon Electronics", "Tyco Electronics", "Magis Works", "Zettacom", ++ "Cogency Semiconductor", "Chipcon AS", "Aspex Technology", "F5 Networks", ++ "Programmable Silicon Solutions", "ChipWrights", "Acorn Networks", "Quicklogic", ++ "Kingmax Semiconductor", "BOPS", "Flasys", "BitBlitz Communications", ++ "eMemory Technology", "Procket Networks", "Purple Ray", "Trebia Networks", ++ "Delta Electronics", "Onex Communications", "Ample Communications", "Memory Experts Intl", ++ "Astute Networks", "Azanda Network Devices", "Dibcom", "Tekmos", ++ "API NetWorks", "Bay Microsystems", "Firecron Ltd", "Resonext Communications", ++ "Tachys Technologies", "Equator Technology", "Concept Computer", "SILCOM", ++ "3Dlabs", "c't Magazine", "Sanera Systems", "Silicon Packets", ++ "Viasystems Group", "Simtek", "Semicon Devices Singapore", "Satron Handelsges", ++ "Improv Systems", "INDUSYS GmbH", "Corrent", "Infrant Technologies", ++ "Ritek Corp", "empowerTel Networks", "Hypertec", "Cavium Networks", ++ "PLX Technology", "Massana Design", "Intrinsity", "Valence Semiconductor", ++ "Terawave Communications", "IceFyre Semiconductor", "Primarion", "Picochip Designs Ltd", ++ "Silverback Systems", "Jade Star Technologies", "Pijnenburg Securealink", ++ "takeMS - Ultron AG (former Memorysolution GmbH)", "Cambridge Silicon Radio", ++ "Swissbit", "Nazomi Communications", "eWave System", ++ "Rockwell Collins", "Picocel Co., Ltd.", "Alphamosaic Ltd", "Sandburst", ++ "SiCon Video", "NanoAmp Solutions", "Ericsson Technology", "PrairieComm", ++ "Mitac International", "Layer N Networks", "MtekVision", "Allegro Networks", ++ "Marvell Semiconductors", "Netergy Microelectronic", "NVIDIA", "Internet Machines", ++ "Peak Electronics", "Litchfield Communication", "Accton Technology", "Teradiant Networks", ++ "Scaleo Chip (former Europe Technologies)", "Cortina Systems", "RAM Components", "Raqia Networks", ++ "ClearSpeed", "Matsushita Battery", "Xelerated", "SimpleTech", ++ "Utron Technology", "Astec International", "AVM gmbH", "Redux Communications", ++ "Dot Hill Systems", "TeraChip"}, ++{"T-RAM Incorporated", "Innovics Wireless", "Teknovus", "KeyEye Communications", ++ "Runcom Technologies", "RedSwitch", "Dotcast", "Silicon Mountain Memory", ++ "Signia Technologies", "Pixim", "Galazar Networks", "White Electronic Designs", ++ "Patriot Scientific", "Neoaxiom Corporation", "3Y Power Technology", "Scaleo Chip (former Europe Technologies)", ++ "Potentia Power Systems", "C-guys Incorporated", "Digital Communications Technology Incorporated", "Silicon-Based Technology", ++ "Fulcrum Microsystems", "Positivo Informatica Ltd", "XIOtech Corporation", "PortalPlayer", ++ "Zhiying Software", "Parker Vision, Inc. (former Direct2Data)", "Phonex Broadband", "Skyworks Solutions", ++ "Entropic Communications", "Pacific Force Technology", "Zensys A/S", "Legend Silicon Corp.", ++ "sci-worx GmbH", "SMSC (former Oasis Silicon Systems)", "Renesas Electronics (former Renesas Technology)", "Raza Microelectronics", ++ "Phyworks", "MediaTek", "Non-cents Productions", "US Modular", ++ "Wintegra Ltd", "Mathstar", "StarCore", "Oplus Technologies", ++ "Mindspeed", "Just Young Computer", "Radia Communications", "OCZ", ++ "Emuzed", "LOGIC Devices", "Inphi Corporation", "Quake Technologies", ++ "Vixel", "SolusTek", "Kongsberg Maritime", "Faraday Technology", ++ "Altium Ltd.", "Insyte", "ARM Ltd.", "DigiVision", ++ "Vativ Technologies", "Endicott Interconnect Technologies", "Pericom", "Bandspeed", ++ "LeWiz Communications", "CPU Technology", "Ramaxel Technology", "DSP Group", ++ "Axis Communications", "Legacy Electronics", "Chrontel", "Powerchip Semiconductor", ++ "MobilEye Technologies", "Excel Semiconductor", "A-DATA Technology", "VirtualDigm", ++ "G.Skill Intl", "Quanta Computer", "Yield Microelectronics", "Afa Technologies", ++ "KINGBOX Technology Co. Ltd.", "Ceva", "iStor Networks", "Advance Modules", ++ "Microsoft", "Open-Silicon", "Goal Semiconductor", "ARC International", ++ "Simmtec", "Metanoia", "Key Stream", "Lowrance Electronics", ++ "Adimos", "SiGe Semiconductor", "Fodus Communications", "Credence Systems Corp.", ++ "Genesis Microchip Inc.", "Vihana, Inc.", "WIS Technologies", "GateChange Technologies", ++ "High Density Devices AS", "Synopsys", "Gigaram", "Enigma Semiconductor Inc.", ++ "Century Micro Inc.", "Icera Semiconductor", "Mediaworks Integrated Systems", "O'Neil Product Development", ++ "Supreme Top Technology Ltd.", "MicroDisplay Corporation", "Team Group Inc.", "Sinett Corporation", ++ "Toshiba Corporation", "Tensilica", "SiRF Technology", "Bacoc Inc.", ++ "SMaL Camera Technologies", "Thomson SC", "Airgo Networks", "Wisair Ltd.", ++ "SigmaTel", "Arkados", "Compete IT gmbH Co. KG", "Eudar Technology Inc.", ++ "Focus Enhancements", "Xyratex"}, ++{"Specular Networks", "Patriot Memory", "U-Chip Technology Corp.", "Silicon Optix", ++ "Greenfield Networks", "CompuRAM GmbH", "Stargen, Inc.", "NetCell Corporation", ++ "Excalibrus Technologies Ltd", "SCM Microsystems", "Xsigo Systems, Inc.", "CHIPS & Systems Inc", ++ "Tier 1 Multichip Solutions", "CWRL Labs", "Teradici", "Gigaram, Inc.", ++ "g2 Microsystems", "PowerFlash Semiconductor", "P.A. Semi, Inc.", "NovaTech Solutions, S.A.", ++ "c2 Microsystems, Inc.", "Level5 Networks", "COS Memory AG", "Innovasic Semiconductor", ++ "02IC Co. Ltd", "Tabula, Inc.", "Crucial Technology", "Chelsio Communications", ++ "Solarflare Communications", "Xambala Inc.", "EADS Astrium", "Terra Semiconductor Inc. (former ATO Semicon Co. Ltd.)", ++ "Imaging Works, Inc.", "Astute Networks, Inc.", "Tzero", "Emulex", ++ "Power-One", "Pulse~LINK Inc.", "Hon Hai Precision Industry", "White Rock Networks Inc.", ++ "Telegent Systems USA, Inc.", "Atrua Technologies, Inc.", "Acbel Polytech Inc.", ++ "eRide Inc.","ULi Electronics Inc.", "Magnum Semiconductor Inc.", "neoOne Technology, Inc.", ++ "Connex Technology, Inc.", "Stream Processors, Inc.", "Focus Enhancements", "Telecis Wireless, Inc.", ++ "uNav Microelectronics", "Tarari, Inc.", "Ambric, Inc.", "Newport Media, Inc.", "VMTS", ++ "Enuclia Semiconductor, Inc.", "Virtium Technology Inc.", "Solid State System Co., Ltd.", "Kian Tech LLC", ++ "Artimi", "Power Quotient International", "Avago Technologies", "ADTechnology", "Sigma Designs", ++ "SiCortex, Inc.", "Ventura Technology Group", "eASIC", "M.H.S. SAS", "Micro Star International", ++ "Rapport Inc.", "Makway International", "Broad Reach Engineering Co.", ++ "Semiconductor Mfg Intl Corp", "SiConnect", "FCI USA Inc.", "Validity Sensors", ++ "Coney Technology Co. Ltd.", "Spans Logic", "Neterion Inc.", "Qimonda", ++ "New Japan Radio Co. Ltd.", "Velogix", "Montalvo Systems", "iVivity Inc.", "Walton Chaintech", ++ "AENEON", "Lorom Industrial Co. Ltd.", "Radiospire Networks", "Sensio Technologies, Inc.", ++ "Nethra Imaging", "Hexon Technology Pte Ltd", "CompuStocx (CSX)", "Methode Electronics, Inc.", ++ "Connect One Ltd.", "Opulan Technologies", "Septentrio NV", "Goldenmars Technology Inc.", ++ "Kreton Corporation", "Cochlear Ltd.", "Altair Semiconductor", "NetEffect, Inc.", ++ "Spansion, Inc.", "Taiwan Semiconductor Mfg", "Emphany Systems Inc.", ++ "ApaceWave Technologies", "Mobilygen Corporation", "Tego", "Cswitch Corporation", ++ "Haier (Beijing) IC Design Co.", "MetaRAM", "Axel Electronics Co. Ltd.", "Tilera Corporation", ++ "Aquantia", "Vivace Semiconductor", "Redpine Signals", "Octalica", "InterDigital Communications", ++ "Avant Technology", "Asrock, Inc.", "Availink", "Quartics, Inc.", "Element CXI", ++ "Innovaciones Microelectronicas", "VeriSilicon Microelectronics", "W5 Networks"}, ++{"MOVEKING", "Mavrix Technology, Inc.", "CellGuide Ltd.", "Faraday Technology", ++ "Diablo Technologies, Inc.", "Jennic", "Octasic", "Molex Incorporated", "3Leaf Networks", ++ "Bright Micron Technology", "Netxen", "NextWave Broadband Inc.", "DisplayLink", "ZMOS Technology", ++ "Tec-Hill", "Multigig, Inc.", "Amimon", "Euphonic Technologies, Inc.", "BRN Phoenix", ++ "InSilica", "Ember Corporation", "Avexir Technologies Corporation", "Echelon Corporation", ++ "Edgewater Computer Systems", "XMOS Semiconductor Ltd.", "GENUSION, Inc.", "Memory Corp NV", ++ "SiliconBlue Technologies", "Rambus Inc.", "Andes Technology Corporation", "Coronis Systems", ++ "Achronix Semiconductor", "Siano Mobile Silicon Ltd.", "Semtech Corporation", "Pixelworks Inc.", ++ "Gaisler Research AB", "Teranetics", "Toppan Printing Co. Ltd.", "Kingxcon", ++ "Silicon Integrated Systems", "I-O Data Device, Inc.", "NDS Americas Inc.", "Solomon Systech Limited", ++ "On Demand Microelectronics", "Amicus Wireless Inc.", "SMARDTV SNC", "Comsys Communication Ltd.", ++ "Movidia Ltd.", "Javad GNSS, Inc.", "Montage Technology Group", "Trident Microsystems", "Super Talent", ++ "Optichron, Inc.", "Future Waves UK Ltd.", "SiBEAM, Inc.", "Inicore, Inc.", "Virident Systems", ++ "M2000, Inc.", "ZeroG Wireless, Inc.", "Gingle Technology Co. Ltd.", "Space Micro Inc.", "Wilocity", ++ "Novafora, Inc.", "iKoa Corporation", "ASint Technology", "Ramtron", "Plato Networks Inc.", ++ "IPtronics AS", "Infinite-Memories", "Parade Technologies Inc.", "Dune Networks", ++ "GigaDevice Semiconductor", "Modu Ltd.", "CEITEC", "Northrop Grumman", "XRONET Corporation", ++ "Sicon Semiconductor AB", "Atla Electronics Co. Ltd.", "TOPRAM Technology", "Silego Technology Inc.", ++ "Kinglife", "Ability Industries Ltd.", "Silicon Power Computer & Communications", ++ "Augusta Technology, Inc.", "Nantronics Semiconductors", "Hilscher Gesellschaft", "Quixant Ltd.", ++ "Percello Ltd.", "NextIO Inc.", "Scanimetrics Inc.", "FS-Semi Company Ltd.", "Infinera Corporation", ++ "SandForce Inc.", "Lexar Media", "Teradyne Inc.", "Memory Exchange Corp.", "Suzhou Smartek Electronics", ++ "Avantium Corporation", "ATP Electronics Inc.", "Valens Semiconductor Ltd", "Agate Logic, Inc.", ++ "Netronome", "Zenverge, Inc.", "N-trig Ltd", "SanMax Technologies Inc.", "Contour Semiconductor Inc.", ++ "TwinMOS", "Silicon Systems, Inc.", "V-Color Technology Inc.", "Certicom Corporation", "JSC ICC Milandr", ++ "PhotoFast Global Inc.", "InnoDisk Corporation", "Muscle Power", "Energy Micro", "Innofidei", ++ "CopperGate Communications", "Holtek Semiconductor Inc.", "Myson Century, Inc.", "FIDELIX", ++ "Red Digital Cinema", "Densbits Technology", "Zempro", "MoSys", "Provigent", "Triad Semiconductor, Inc."}, ++{"Siklu Communication Ltd.", "A Force Manufacturing Ltd.", "Strontium", "Abilis Systems", "Siglead, Inc.", ++ "Ubicom, Inc.", "Unifosa Corporation", "Stretch, Inc.", "Lantiq Deutschland GmbH", "Visipro", ++ "EKMemory", "Microelectronics Institute ZTE", "Cognovo Ltd.", "Carry Technology Co. Ltd.", "Nokia", ++ "King Tiger Technology", "Sierra Wireless", "HT Micron", "Albatron Technology Co. Ltd.", ++ "Leica Geosystems AG", "BroadLight", "AEXEA", "ClariPhy Communications, Inc.", "Green Plug", ++ "Design Art Networks", "Mach Xtreme Technology Ltd.", "ATO Solutions Co. Ltd.", "Ramsta", ++ "Greenliant Systems, Ltd.", "Teikon", "Antec Hadron", "NavCom Technology, Inc.", ++ "Shanghai Fudan Microelectronics", "Calxeda, Inc.", "JSC EDC Electronics", "Kandit Technology Co. Ltd.", ++ "Ramos Technology", "Goldenmars Technology", "XeL Technology Inc.", "Newzone Corporation", ++ "ShenZhen MercyPower Tech", "Nanjing Yihuo Technology", "Nethra Imaging Inc.", "SiTel Semiconductor BV", ++ "SolidGear Corporation", "Topower Computer Ind Co Ltd.", "Wilocity", "Profichip GmbH", ++ "Gerad Technologies", "Ritek Corporation", "Gomos Technology Limited", "Memoright Corporation", ++ "D-Broad, Inc.", "HiSilicon Technologies", "Syndiant Inc.", "Enverv Inc.", "Cognex", ++ "Xinnova Technology Inc.", "Ultron AG", "Concord Idea Corporation", "AIM Corporation", ++ "Lifetime Memory Products", "Ramsway", "Recore Systems BV", "Haotian Jinshibo Science Tech", ++ "Being Advanced Memory", "Adesto Technologies", "Giantec Semiconductor, Inc.", "HMD Electronics AG", ++ "Gloway International (HK)", "Kingcore", "Anucell Technology Holding", ++ "Accord Software & Systems Pvt. Ltd.", "Active-Semi Inc.", "Denso Corporation", "TLSI Inc.", ++ "Shenzhen Daling Electronic Co. Ltd.", "Mustang", "Orca Systems", "Passif Semiconductor", ++ "GigaDevice Semiconductor (Beijing) Inc.", "Memphis Electronic", "Beckhoff Automation GmbH", ++ "Harmony Semiconductor Corp (former ProPlus Design Solutions)", "Air Computers SRL", "TMT Memory", ++ "Eorex Corporation", "Xingtera", "Netsol", "Bestdon Technology Co. Ltd.", "Baysand Inc.", ++ "Uroad Technology Co. Ltd. (former Triple Grow Industrial Ltd.)", "Wilk Elektronik S.A.", ++ "AAI", "Harman", "Berg Microelectronics Inc.", "ASSIA, Inc.", "Visiontek Products LLC", ++ "OCMEMORY", "Welink Solution Inc.", "Shark Gaming", "Avalanche Technology", ++ "R&D Center ELVEES OJSC", "KingboMars Technology Co. Ltd.", ++ "High Bridge Solutions Industria Eletronica", "Transcend Technology Co. Ltd.", ++ "Everspin Technologies", "Hon-Hai Precision", "Smart Storage Systems", "Toumaz Group", ++ "Zentel Electronics Corporation", "Panram International Corporation", ++ "Silicon Space Technology"} ++}; ++ ++/** ++ * struct cxl_ctx - library user context to find "nd" instances ++ * ++ * Instantiate with cxl_new(), which takes an initial reference. Free ++ * the context by dropping the reference count to zero with ++ * cxl_unref(), or take additional references with cxl_ref() ++ * @timeout: default library timeout in milliseconds ++ */ ++struct cxl_ctx { ++ /* log_ctx must be first member for cxl_set_log_fn compat */ ++ struct log_ctx ctx; ++ int refcount; ++ void *userdata; ++ int memdevs_init; ++ struct list_head memdevs; ++ struct kmod_ctx *kmod_ctx; ++ void *private_data; ++}; ++ ++static void free_memdev(struct cxl_memdev *memdev, struct list_head *head) ++{ ++ if (head) ++ list_del_from(head, &memdev->list); ++ kmod_module_unref(memdev->module); ++ free(memdev->firmware_version); ++ free(memdev->dev_buf); ++ free(memdev->dev_path); ++ free(memdev); ++} ++ ++static void hexdump_mbox(struct cxl_cmd *cmd, struct cxl_ctx *ctx) ++{ ++ u8 *buf; ++ buf = (u8*) cmd->send_cmd->in.payload; ++ dbg(ctx, "\n============== SEND_CMD HEXDUMP =============\n \ ++ id (u32):\nHex: %x\tDec: %d\n \ ++ flags (u32):\nHex: %x\tDec: %d\n \ ++ raw.opcode (u16):\nHex: %x\tDec: %d\n \ ++ in.size (s32):\nHex: %x\tDec: %d\n \ ++ in.payload (u64, pointer to buffer):\nHex: %llx\tDec: %lld\n", cmd->send_cmd->id, cmd->send_cmd->id, cmd->send_cmd->flags, cmd->send_cmd->flags, cmd->send_cmd->raw.opcode, cmd->send_cmd->raw.opcode, cmd->send_cmd->in.size, cmd->send_cmd->in.size, cmd->send_cmd->in.payload, cmd->send_cmd->in.payload); ++ dbg_s(ctx, "Input payload:"); ++ for (int i = 0; i < cmd->send_cmd->in.size; i++) { ++ if (i % 16 == 0) ++ { ++ dbg_s(ctx, "\n%08x %02x ", i, buf[i]); ++ } ++ else ++ { ++ dbg_s(ctx, "%02x ", buf[i]); ++ } ++ } ++ dbg_s(ctx, "\n============== END SEND_CMD HEXDUMP =============\n"); ++ ++} ++ ++/** ++ * cxl_get_userdata - retrieve stored data pointer from library context ++ * @ctx: cxl library context ++ * ++ * This might be useful to access from callbacks like a custom logging ++ * function. ++ */ ++CXL_EXPORT void *cxl_get_userdata(struct cxl_ctx *ctx) ++{ ++ if (ctx == NULL) ++ return NULL; ++ return ctx->userdata; ++} ++ ++/** ++ * cxl_set_userdata - store custom @userdata in the library context ++ * @ctx: cxl library context ++ * @userdata: data pointer ++ */ ++CXL_EXPORT void cxl_set_userdata(struct cxl_ctx *ctx, void *userdata) ++{ ++ if (ctx == NULL) ++ return; ++ ctx->userdata = userdata; ++} ++ ++CXL_EXPORT void cxl_set_private_data(struct cxl_ctx *ctx, void *data) ++{ ++ ctx->private_data = data; ++} ++ ++CXL_EXPORT void *cxl_get_private_data(struct cxl_ctx *ctx) ++{ ++ return ctx->private_data; ++} ++ ++/** ++ * cxl_new - instantiate a new library context ++ * @ctx: context to establish ++ * ++ * Returns zero on success and stores an opaque pointer in ctx. The ++ * context is freed by cxl_unref(), i.e. cxl_new() implies an ++ * internal cxl_ref(). ++ */ ++CXL_EXPORT int cxl_new(struct cxl_ctx **ctx) ++{ ++ struct kmod_ctx *kmod_ctx; ++ struct cxl_ctx *c; ++ int rc = 0; ++ ++ c = calloc(1, sizeof(struct cxl_ctx)); ++ if (!c) ++ return -ENOMEM; ++ ++ kmod_ctx = kmod_new(NULL, NULL); ++ if (check_kmod(kmod_ctx) != 0) { ++ rc = -ENXIO; ++ goto out; ++ } ++ ++ c->refcount = 1; ++ log_init(&c->ctx, "libcxl", "CXL_LOG"); ++ info(c, "ctx %p created\n", c); ++ dbg(c, "log_priority=%d\n", c->ctx.log_priority); ++ *ctx = c; ++ list_head_init(&c->memdevs); ++ c->kmod_ctx = kmod_ctx; ++ ++ return 0; ++out: ++ free(c); ++ return rc; ++} ++ ++/** ++ * cxl_ref - take an additional reference on the context ++ * @ctx: context established by cxl_new() ++ */ ++CXL_EXPORT struct cxl_ctx *cxl_ref(struct cxl_ctx *ctx) ++{ ++ if (ctx == NULL) ++ return NULL; ++ ctx->refcount++; ++ return ctx; ++} ++ ++/** ++ * cxl_unref - drop a context reference count ++ * @ctx: context established by cxl_new() ++ * ++ * Drop a reference and if the resulting reference count is 0 destroy ++ * the context. ++ */ ++CXL_EXPORT void cxl_unref(struct cxl_ctx *ctx) ++{ ++ struct cxl_memdev *memdev, *_d; ++ ++ if (ctx == NULL) ++ return; ++ ctx->refcount--; ++ if (ctx->refcount > 0) ++ return; ++ ++ list_for_each_safe(&ctx->memdevs, memdev, _d, list) ++ free_memdev(memdev, &ctx->memdevs); ++ ++ kmod_unref(ctx->kmod_ctx); ++ info(ctx, "context %p released\n", ctx); ++ free(ctx); ++} ++ ++/** ++ * cxl_set_log_fn - override default log routine ++ * @ctx: cxl library context ++ * @log_fn: function to be called for logging messages ++ * ++ * The built-in logging writes to stderr. It can be overridden by a ++ * custom function, to plug log messages into the user's logging ++ * functionality. ++ */ ++CXL_EXPORT void cxl_set_log_fn(struct cxl_ctx *ctx, ++ void (*cxl_log_fn)(struct cxl_ctx *ctx, int priority, ++ const char *file, int line, const char *fn, ++ const char *format, va_list args)) ++{ ++ ctx->ctx.log_fn = (log_fn) cxl_log_fn; ++ info(ctx, "custom logging function %p registered\n", cxl_log_fn); ++} ++ ++/** ++ * cxl_get_log_priority - retrieve current library loglevel (syslog) ++ * @ctx: cxl library context ++ */ ++CXL_EXPORT int cxl_get_log_priority(struct cxl_ctx *ctx) ++{ ++ return ctx->ctx.log_priority; ++} ++ ++/** ++ * cxl_set_log_priority - set log verbosity ++ * @priority: from syslog.h, LOG_ERR, LOG_INFO, LOG_DEBUG ++ * ++ * Note: LOG_DEBUG requires library be built with "configure --enable-debug" ++ */ ++CXL_EXPORT void cxl_set_log_priority(struct cxl_ctx *ctx, int priority) ++{ ++ ctx->ctx.log_priority = priority; ++} ++ ++static void *add_cxl_memdev(void *parent, int id, const char *cxlmem_base) ++{ ++ const char *devname = devpath_to_devname(cxlmem_base); ++ char *path = calloc(1, strlen(cxlmem_base) + 100); ++ struct cxl_ctx *ctx = parent; ++ struct cxl_memdev *memdev, *memdev_dup; ++ char buf[SYSFS_ATTR_SIZE]; ++ struct stat st; ++ ++ if (!path) ++ return NULL; ++ dbg(ctx, "%s: base: \'%s\'\n", __func__, cxlmem_base); ++ ++ memdev = calloc(1, sizeof(*memdev)); ++ if (!memdev) ++ goto err_dev; ++ memdev->id = id; ++ memdev->ctx = ctx; ++ ++ sprintf(path, "/dev/cxl/%s", devname); ++ if (stat(path, &st) < 0) ++ goto err_read; ++ memdev->major = major(st.st_rdev); ++ memdev->minor = minor(st.st_rdev); ++ ++ sprintf(path, "%s/pmem/size", cxlmem_base); ++ if (sysfs_read_attr(ctx, path, buf) < 0) ++ goto err_read; ++ memdev->pmem_size = strtoull(buf, NULL, 0); ++ ++ sprintf(path, "%s/ram/size", cxlmem_base); ++ if (sysfs_read_attr(ctx, path, buf) < 0) ++ goto err_read; ++ memdev->ram_size = strtoull(buf, NULL, 0); ++ ++ sprintf(path, "%s/payload_max", cxlmem_base); ++ if (sysfs_read_attr(ctx, path, buf) < 0) ++ goto err_read; ++ memdev->payload_max = strtoull(buf, NULL, 0); ++ if (memdev->payload_max < 0) ++ goto err_read; ++ ++ sprintf(path, "%s/label_storage_size", cxlmem_base); ++ if (sysfs_read_attr(ctx, path, buf) < 0) ++ goto err_read; ++ memdev->lsa_size = strtoull(buf, NULL, 0); ++ if (memdev->lsa_size == ULLONG_MAX) ++ goto err_read; ++ ++ memdev->dev_path = strdup(cxlmem_base); ++ if (!memdev->dev_path) ++ goto err_read; ++ ++ sprintf(path, "%s/firmware_version", cxlmem_base); ++ if (sysfs_read_attr(ctx, path, buf) < 0) ++ goto err_read; ++ ++ memdev->firmware_version = strdup(buf); ++ if (!memdev->firmware_version) ++ goto err_read; ++ ++ memdev->dev_buf = calloc(1, strlen(cxlmem_base) + 50); ++ if (!memdev->dev_buf) ++ goto err_read; ++ memdev->buf_len = strlen(cxlmem_base) + 50; ++ ++ cxl_memdev_foreach(ctx, memdev_dup) ++ if (memdev_dup->id == memdev->id) { ++ free_memdev(memdev, NULL); ++ free(path); ++ return memdev_dup; ++ } ++ ++ list_add(&ctx->memdevs, &memdev->list); ++ free(path); ++ return memdev; ++ ++ err_read: ++ free(memdev->firmware_version); ++ free(memdev->dev_buf); ++ free(memdev->dev_path); ++ free(memdev); ++ err_dev: ++ free(path); ++ return NULL; ++} ++ ++static void cxl_memdevs_init(struct cxl_ctx *ctx) ++{ ++ if (ctx->memdevs_init) ++ return; ++ ++ ctx->memdevs_init = 1; ++ ++ sysfs_device_parse(ctx, "/sys/bus/cxl/devices", "mem", ctx, ++ add_cxl_memdev); ++} ++ ++CXL_EXPORT struct cxl_ctx *cxl_memdev_get_ctx(struct cxl_memdev *memdev) ++{ ++ return memdev->ctx; ++} ++ ++CXL_EXPORT struct cxl_memdev *cxl_memdev_get_first(struct cxl_ctx *ctx) ++{ ++ cxl_memdevs_init(ctx); ++ ++ return list_top(&ctx->memdevs, struct cxl_memdev, list); ++} ++ ++CXL_EXPORT struct cxl_memdev *cxl_memdev_get_next(struct cxl_memdev *memdev) ++{ ++ struct cxl_ctx *ctx = memdev->ctx; ++ ++ return list_next(&ctx->memdevs, memdev, list); ++} ++ ++CXL_EXPORT int cxl_memdev_get_id(struct cxl_memdev *memdev) ++{ ++ return memdev->id; ++} ++ ++CXL_EXPORT const char *cxl_memdev_get_devname(struct cxl_memdev *memdev) ++{ ++ return devpath_to_devname(memdev->dev_path); ++} ++ ++CXL_EXPORT int cxl_memdev_get_major(struct cxl_memdev *memdev) ++{ ++ return memdev->major; ++} ++ ++CXL_EXPORT int cxl_memdev_get_minor(struct cxl_memdev *memdev) ++{ ++ return memdev->minor; ++} ++ ++CXL_EXPORT unsigned long long cxl_memdev_get_pmem_size(struct cxl_memdev *memdev) ++{ ++ return memdev->pmem_size; ++} ++ ++CXL_EXPORT unsigned long long cxl_memdev_get_ram_size(struct cxl_memdev *memdev) ++{ ++ return memdev->ram_size; ++} ++ ++CXL_EXPORT const char *cxl_memdev_get_firmware_verison(struct cxl_memdev *memdev) ++{ ++ return memdev->firmware_version; ++} ++ ++CXL_EXPORT size_t cxl_memdev_get_lsa_size(struct cxl_memdev *memdev) ++{ ++ return memdev->lsa_size; ++} ++ ++CXL_EXPORT int cxl_memdev_is_active(struct cxl_memdev *memdev) ++{ ++ /* ++ * TODO: Currently memdevs are always considered inactive. Once we have ++ * cxl_bus drivers that are bound/unbound to memdevs, we'd use that to ++ * determine the active/inactive state. ++ */ ++ return 0; ++} ++ ++CXL_EXPORT void cxl_cmd_unref(struct cxl_cmd *cmd) ++{ ++ if (!cmd) ++ return; ++ if (--cmd->refcount == 0) { ++ free(cmd->query_cmd); ++ free(cmd->send_cmd); ++ free(cmd->input_payload); ++ free(cmd->output_payload); ++ free(cmd); ++ } ++} ++ ++CXL_EXPORT void cxl_cmd_ref(struct cxl_cmd *cmd) ++{ ++ cmd->refcount++; ++} ++ ++static int cxl_cmd_alloc_query(struct cxl_cmd *cmd, int num_cmds) ++{ ++ size_t size; ++ ++ if (!cmd) ++ return -EINVAL; ++ ++ if (cmd->query_cmd != NULL) ++ free(cmd->query_cmd); ++ ++ size = sizeof(struct cxl_mem_query_commands) + ++ (num_cmds * sizeof(struct cxl_command_info)); ++ cmd->query_cmd = calloc(1, size); ++ if (!cmd->query_cmd) ++ return -ENOMEM; ++ ++ cmd->query_cmd->n_commands = num_cmds; ++ ++ return 0; ++} ++ ++static struct cxl_cmd *cxl_cmd_new(struct cxl_memdev *memdev) ++{ ++ struct cxl_cmd *cmd; ++ size_t size; ++ ++ size = sizeof(*cmd); ++ cmd = calloc(1, size); ++ if (!cmd) ++ return NULL; ++ ++ cxl_cmd_ref(cmd); ++ cmd->memdev = memdev; ++ ++ return cmd; ++} ++ ++static int __do_cmd(struct cxl_cmd *cmd, int ioctl_cmd, int fd) ++{ ++ void *cmd_buf; ++ int rc; ++ ++ switch (ioctl_cmd) { ++ case CXL_MEM_QUERY_COMMANDS: ++ cmd_buf = cmd->query_cmd; ++ break; ++ case CXL_MEM_SEND_COMMAND: ++ cmd_buf = cmd->send_cmd; ++ if (cxl_get_log_priority(cmd->memdev->ctx) == LOG_DEBUG) ++ { ++ hexdump_mbox(cmd, cmd->memdev->ctx); ++ } ++ break; ++ default: ++ return -EINVAL; ++ } ++ rc = ioctl(fd, ioctl_cmd, cmd_buf); ++ if (rc < 0) ++ rc = -errno; ++ ++ return rc; ++} ++ ++static int do_cmd(struct cxl_cmd *cmd, int ioctl_cmd) ++{ ++ char *path; ++ struct stat st; ++ unsigned int major, minor; ++ int rc = 0, fd; ++ struct cxl_memdev *memdev = cmd->memdev; ++ struct cxl_ctx *ctx = cxl_memdev_get_ctx(memdev); ++ const char *devname = cxl_memdev_get_devname(memdev); ++ ++ major = cxl_memdev_get_major(memdev); ++ minor = cxl_memdev_get_minor(memdev); ++ ++ if (asprintf(&path, "/dev/cxl/%s", devname) < 0) ++ return -ENOMEM; ++ ++ fd = open(path, O_RDWR); ++ if (fd < 0) { ++ err(ctx, "failed to open %s: %s\n", path, strerror(errno)); ++ rc = -errno; ++ goto out; ++ } ++ ++ if (fstat(fd, &st) >= 0 && S_ISCHR(st.st_mode) ++ && major(st.st_rdev) == major ++ && minor(st.st_rdev) == minor) { ++ rc = __do_cmd(cmd, ioctl_cmd, fd); ++ } else { ++ err(ctx, "failed to validate %s as a CXL memdev node\n", path); ++ rc = -ENXIO; ++ } ++ close(fd); ++out: ++ free(path); ++ return rc; ++} ++ ++static int alloc_do_query(struct cxl_cmd *cmd, int num_cmds) ++{ ++ struct cxl_ctx *ctx = cxl_memdev_get_ctx(cmd->memdev); ++ int rc; ++ ++ rc = cxl_cmd_alloc_query(cmd, num_cmds); ++ if (rc) ++ return rc; ++ ++ rc = do_cmd(cmd, CXL_MEM_QUERY_COMMANDS); ++ if (rc < 0) ++ err(ctx, "%s: query commands failed: %s\n", ++ cxl_memdev_get_devname(cmd->memdev), ++ strerror(-rc)); ++ return rc; ++} ++ ++static int cxl_cmd_do_query(struct cxl_cmd *cmd) ++{ ++ struct cxl_memdev *memdev = cmd->memdev; ++ struct cxl_ctx *ctx = cxl_memdev_get_ctx(memdev); ++ const char *devname = cxl_memdev_get_devname(memdev); ++ int rc, n_commands; ++ ++ switch (cmd->query_status) { ++ case CXL_CMD_QUERY_OK: ++ return 0; ++ case CXL_CMD_QUERY_UNSUPPORTED: ++ return -EOPNOTSUPP; ++ case CXL_CMD_QUERY_NOT_RUN: ++ break; ++ default: ++ err(ctx, "%s: Unknown query_status %d\n", ++ devname, cmd->query_status); ++ return -EINVAL; ++ } ++ ++ rc = alloc_do_query(cmd, 0); ++ if (rc) ++ return rc; ++ ++ n_commands = cmd->query_cmd->n_commands; ++ dbg(ctx, "%s: supports %d commands\n", devname, n_commands); ++ ++ return alloc_do_query(cmd, n_commands); ++} ++ ++static int cxl_cmd_validate(struct cxl_cmd *cmd, u32 cmd_id) ++{ ++ struct cxl_memdev *memdev = cmd->memdev; ++ struct cxl_mem_query_commands *query = cmd->query_cmd; ++ const char *devname = cxl_memdev_get_devname(memdev); ++ struct cxl_ctx *ctx = cxl_memdev_get_ctx(memdev); ++ u32 i; ++ ++ for (i = 0; i < query->n_commands; i++) { ++ struct cxl_command_info *cinfo = &query->commands[i]; ++ const char *cmd_name = cxl_command_names[cinfo->id].name; ++ ++ if (cinfo->id != cmd_id) ++ continue; ++ ++ dbg(ctx, "%s: %s: in: %d, out %d, flags: %#08x\n", ++ devname, cmd_name, cinfo->size_in, ++ cinfo->size_out, cinfo->flags); ++ ++ cmd->query_idx = i; ++ cmd->query_status = CXL_CMD_QUERY_OK; ++ return 0; ++ } ++ cmd->query_status = CXL_CMD_QUERY_UNSUPPORTED; ++ return -EOPNOTSUPP; ++} ++ ++CXL_EXPORT int cxl_cmd_set_input_payload(struct cxl_cmd *cmd, void *buf, ++ int size) ++{ ++ struct cxl_memdev *memdev = cmd->memdev; ++ ++ if (size > memdev->payload_max || size < 0) ++ return -EINVAL; ++ ++ if (!buf) { ++ ++ /* If the user didn't supply a buffer, allocate it */ ++ cmd->input_payload = calloc(1, size); ++ if (!cmd->input_payload) ++ return -ENOMEM; ++ cmd->send_cmd->in.payload = (u64)cmd->input_payload; ++ } else { ++ /* ++ * Use user-buffer as is. If an automatic allocation was ++ * previously made (based on a fixed size from query), ++ * it will get freed during unref. ++ */ ++ cmd->send_cmd->in.payload = (u64)buf; ++ } ++ cmd->send_cmd->in.size = size; ++ ++ return 0; ++} ++ ++CXL_EXPORT int cxl_cmd_set_output_payload(struct cxl_cmd *cmd, void *buf, ++ int size) ++{ ++ struct cxl_memdev *memdev = cmd->memdev; ++ ++ if (size > memdev->payload_max || size < 0) ++ return -EINVAL; ++ ++ if (!buf) { ++ ++ /* If the user didn't supply a buffer, allocate it */ ++ cmd->output_payload = calloc(1, size); ++ if (!cmd->output_payload) ++ return -ENOMEM; ++ cmd->send_cmd->out.payload = (u64)cmd->output_payload; ++ } else { ++ /* ++ * Use user-buffer as is. If an automatic allocation was ++ * previously made (based on a fixed size from query), ++ * it will get freed during unref. ++ */ ++ cmd->send_cmd->out.payload = (u64)buf; ++ } ++ cmd->send_cmd->out.size = size; ++ ++ return 0; ++} ++ ++static int cxl_cmd_alloc_send(struct cxl_cmd *cmd, u32 cmd_id) ++{ ++ struct cxl_mem_query_commands *query = cmd->query_cmd; ++ struct cxl_command_info *cinfo = &query->commands[cmd->query_idx]; ++ size_t size; ++ ++ if (!query) ++ return -EINVAL; ++ ++ size = sizeof(struct cxl_send_command); ++ cmd->send_cmd = calloc(1, size); ++ if (!cmd->send_cmd) ++ return -ENOMEM; ++ ++ if (cinfo->id != cmd_id) ++ return -EINVAL; ++ ++ cmd->send_cmd->id = cmd_id; ++ ++ if (cinfo->size_in > 0) { ++ cmd->input_payload = calloc(1, cinfo->size_in); ++ if (!cmd->input_payload) ++ return -ENOMEM; ++ cmd->send_cmd->in.payload = (u64)cmd->input_payload; ++ cmd->send_cmd->in.size = cinfo->size_in; ++ } ++ ++ if (cinfo->size_out < 0) ++ cinfo->size_out = cmd->memdev->payload_max; // -1 will require update ++ ++ if (cinfo->size_out > 0) { ++ cmd->output_payload = calloc(1, cinfo->size_out); ++ if (!cmd->output_payload) ++ return -ENOMEM; ++ cmd->send_cmd->out.payload = (u64)cmd->output_payload; ++ cmd->send_cmd->out.size = cinfo->size_out; ++ } ++ ++ return 0; ++} ++ ++static struct cxl_cmd *cxl_cmd_new_generic(struct cxl_memdev *memdev, ++ u32 cmd_id) ++{ ++ const char *devname = cxl_memdev_get_devname(memdev); ++ struct cxl_ctx *ctx = cxl_memdev_get_ctx(memdev); ++ struct cxl_cmd *cmd; ++ int rc; ++ ++ cmd = cxl_cmd_new(memdev); ++ if (!cmd) ++ return NULL; ++ ++ rc = cxl_cmd_do_query(cmd); ++ if (rc) { ++ err(ctx, "%s: query returned: %s\n", devname, strerror(-rc)); ++ goto fail; ++ } ++ ++ rc = cxl_cmd_validate(cmd, cmd_id); ++ if (rc) { ++ errno = -rc; ++ goto fail; ++ } ++ ++ rc = cxl_cmd_alloc_send(cmd, cmd_id); ++ if (rc) { ++ errno = -rc; ++ goto fail; ++ } ++ ++ return cmd; ++ ++fail: ++ cxl_cmd_unref(cmd); ++ return NULL; ++} ++ ++CXL_EXPORT const char *cxl_cmd_get_devname(struct cxl_cmd *cmd) ++{ ++ return cxl_memdev_get_devname(cmd->memdev); ++} ++ ++#define cmd_get_int(cmd, n, N, field) \ ++do { \ ++ struct cxl_cmd_##n *c = (void *)cmd->send_cmd->out.payload; \ ++ if (cmd->send_cmd->id != CXL_MEM_COMMAND_ID_##N) \ ++ return EINVAL; \ ++ if (cmd->status < 0) \ ++ return cmd->status; \ ++ return le32_to_cpu(c->field); \ ++} while(0); ++ ++CXL_EXPORT struct cxl_cmd *cxl_cmd_new_get_health_info( ++ struct cxl_memdev *memdev) ++{ ++ return cxl_cmd_new_generic(memdev, CXL_MEM_COMMAND_ID_GET_HEALTH_INFO); ++} ++ ++#define cmd_health_get_int(c, f) \ ++do { \ ++ cmd_get_int(c, get_health_info, GET_HEALTH_INFO, f); \ ++} while (0); ++ ++CXL_EXPORT int cxl_cmd_get_health_info_get_health_status(struct cxl_cmd *cmd) ++{ ++ cmd_health_get_int(cmd, health_status); ++} ++ ++CXL_EXPORT int cxl_cmd_get_health_info_get_media_status(struct cxl_cmd *cmd) ++{ ++ cmd_health_get_int(cmd, media_status); ++} ++ ++CXL_EXPORT int cxl_cmd_get_health_info_get_ext_status(struct cxl_cmd *cmd) ++{ ++ cmd_health_get_int(cmd, ext_status); ++} ++ ++CXL_EXPORT int cxl_cmd_get_health_info_get_life_used(struct cxl_cmd *cmd) ++{ ++ cmd_health_get_int(cmd, life_used); ++} ++ ++CXL_EXPORT int cxl_cmd_get_health_info_get_temperature(struct cxl_cmd *cmd) ++{ ++ cmd_health_get_int(cmd, temperature); ++} ++ ++CXL_EXPORT int cxl_cmd_get_health_info_get_dirty_shutdowns(struct cxl_cmd *cmd) ++{ ++ cmd_health_get_int(cmd, dirty_shutdowns); ++} ++ ++CXL_EXPORT int cxl_cmd_get_health_info_get_volatile_errors(struct cxl_cmd *cmd) ++{ ++ cmd_health_get_int(cmd, volatile_errors); ++} ++ ++CXL_EXPORT int cxl_cmd_get_health_info_get_pmem_errors(struct cxl_cmd *cmd) ++{ ++ cmd_health_get_int(cmd, pmem_errors); ++} ++ ++CXL_EXPORT struct cxl_cmd *cxl_cmd_new_identify(struct cxl_memdev *memdev) ++{ ++ return cxl_cmd_new_generic(memdev, CXL_MEM_COMMAND_ID_IDENTIFY); ++} ++ ++CXL_EXPORT int cxl_cmd_identify_get_fw_rev(struct cxl_cmd *cmd, char *fw_rev, ++ int fw_len) ++{ ++ struct cxl_cmd_identify *id = (void *)cmd->send_cmd->out.payload; ++ ++ if (cmd->send_cmd->id != CXL_MEM_COMMAND_ID_IDENTIFY) ++ return -EINVAL; ++ if (cmd->status < 0) ++ return cmd->status; ++ ++ if (fw_len > 0) ++ memcpy(fw_rev, id->fw_revision, ++ min(fw_len, CXL_CMD_IDENTIFY_FW_REV_LENGTH)); ++ return 0; ++} ++ ++CXL_EXPORT unsigned long long cxl_cmd_identify_get_partition_align( ++ struct cxl_cmd *cmd) ++{ ++ struct cxl_cmd_identify *id = (void *)cmd->send_cmd->out.payload; ++ ++ if (cmd->send_cmd->id != CXL_MEM_COMMAND_ID_IDENTIFY) ++ return -EINVAL; ++ if (cmd->status < 0) ++ return cmd->status; ++ ++ return le64_to_cpu(id->partition_align); ++} ++ ++CXL_EXPORT unsigned int cxl_cmd_identify_get_lsa_size(struct cxl_cmd *cmd) ++{ ++ struct cxl_cmd_identify *id = (void *)cmd->send_cmd->out.payload; ++ ++ if (cmd->send_cmd->id != CXL_MEM_COMMAND_ID_IDENTIFY) ++ return -EINVAL; ++ if (cmd->status < 0) ++ return cmd->status; ++ ++ return le32_to_cpu(id->lsa_size); ++} ++ ++CXL_EXPORT struct cxl_cmd *cxl_cmd_new_raw(struct cxl_memdev *memdev, ++ int opcode) ++{ ++ struct cxl_cmd *cmd; ++ ++ /* opcode '0' is reserved */ ++ if (opcode <= 0) { ++ errno = EINVAL; ++ return NULL; ++ } ++ ++ cmd = cxl_cmd_new_generic(memdev, CXL_MEM_COMMAND_ID_RAW); ++ if (!cmd) ++ return NULL; ++ ++ cmd->send_cmd->raw.opcode = opcode; ++ return cmd; ++} ++ ++CXL_EXPORT struct cxl_cmd *cxl_cmd_new_get_lsa(struct cxl_memdev *memdev, ++ unsigned int offset, unsigned int length) ++{ ++ struct cxl_cmd_get_lsa_in *get_lsa; ++ struct cxl_cmd *cmd; ++ ++ cmd = cxl_cmd_new_generic(memdev, CXL_MEM_COMMAND_ID_GET_LSA); ++ if (!cmd) ++ return NULL; ++ ++ get_lsa = (void *)cmd->send_cmd->in.payload; ++ get_lsa->offset = cpu_to_le32(offset); ++ get_lsa->length = cpu_to_le32(length); ++ return cmd; ++} ++ ++#define cmd_get_void(cmd, N) \ ++do { \ ++ void *p = (void *)cmd->send_cmd->out.payload; \ ++ if (cmd->send_cmd->id != CXL_MEM_COMMAND_ID_##N) \ ++ return NULL; \ ++ if (cmd->status < 0) \ ++ return NULL; \ ++ return p; \ ++} while(0); ++ ++CXL_EXPORT void *cxl_cmd_get_lsa_get_payload(struct cxl_cmd *cmd) ++{ ++ cmd_get_void(cmd, GET_LSA); ++} ++ ++CXL_EXPORT int cxl_cmd_submit(struct cxl_cmd *cmd) ++{ ++ struct cxl_memdev *memdev = cmd->memdev; ++ const char *devname = cxl_memdev_get_devname(memdev); ++ struct cxl_ctx *ctx = cxl_memdev_get_ctx(memdev); ++ int rc; ++ ++ switch (cmd->query_status) { ++ case CXL_CMD_QUERY_OK: ++ break; ++ case CXL_CMD_QUERY_UNSUPPORTED: ++ return -EOPNOTSUPP; ++ case CXL_CMD_QUERY_NOT_RUN: ++ return -EINVAL; ++ default: ++ err(ctx, "%s: Unknown query_status %d\n", ++ devname, cmd->query_status); ++ return -EINVAL; ++ } ++ ++ dbg(ctx, "%s: submitting SEND cmd: in: %d, out: %d\n", devname, ++ cmd->send_cmd->in.size, cmd->send_cmd->out.size); ++ rc = do_cmd(cmd, CXL_MEM_SEND_COMMAND); ++ if (rc < 0) ++ err(ctx, "%s: send command failed: %s\n", ++ devname, strerror(-rc)); ++ cmd->status = cmd->send_cmd->retval; ++ dbg(ctx, "%s: got SEND cmd: in: %d, out: %d, retval: %d\n", devname, ++ cmd->send_cmd->in.size, cmd->send_cmd->out.size, cmd->status); ++ ++ return rc; ++} ++ ++CXL_EXPORT int cxl_cmd_get_mbox_status(struct cxl_cmd *cmd) ++{ ++ return cmd->status; ++} ++ ++CXL_EXPORT int cxl_cmd_get_out_size(struct cxl_cmd *cmd) ++{ ++ return cmd->send_cmd->out.size; ++} ++ ++CXL_EXPORT struct cxl_cmd *cxl_cmd_new_set_lsa(struct cxl_memdev *memdev, ++ void *lsa_buf, unsigned int offset, unsigned int length) ++{ ++ struct cxl_ctx *ctx = cxl_memdev_get_ctx(memdev); ++ struct cxl_cmd_set_lsa *set_lsa; ++ struct cxl_cmd *cmd; ++ int rc; ++ ++ cmd = cxl_cmd_new_generic(memdev, CXL_MEM_COMMAND_ID_SET_LSA); ++ if (!cmd) ++ return NULL; ++ ++ /* this will allocate 'in.payload' */ ++ rc = cxl_cmd_set_input_payload(cmd, NULL, sizeof(*set_lsa) + length); ++ if (rc) { ++ err(ctx, "%s: cmd setup failed: %s\n", ++ cxl_memdev_get_devname(memdev), strerror(-rc)); ++ goto out_fail; ++ } ++ set_lsa = (void *)cmd->send_cmd->in.payload; ++ set_lsa->offset = cpu_to_le32(offset); ++ memcpy(set_lsa->lsa_data, lsa_buf, length); ++ ++ return cmd; ++ ++out_fail: ++ cxl_cmd_unref(cmd); ++ return NULL; ++} ++ ++enum lsa_op { ++ LSA_OP_GET, ++ LSA_OP_SET, ++ LSA_OP_ZERO, ++}; ++ ++static int lsa_op(struct cxl_memdev *memdev, int op, void **buf, ++ size_t length, size_t offset) ++{ ++ const char *devname = cxl_memdev_get_devname(memdev); ++ struct cxl_ctx *ctx = cxl_memdev_get_ctx(memdev); ++ struct cxl_cmd *cmd; ++ void *zero_buf = NULL; ++ int rc = 0; ++ ++ if (op != LSA_OP_ZERO && (buf == NULL || *buf == NULL)) { ++ err(ctx, "%s: LSA buffer cannot be NULL\n", devname); ++ return -EINVAL; ++ } ++ ++ /* TODO: handle the case for offset + len > mailbox payload size */ ++ switch (op) { ++ case LSA_OP_GET: ++ if (length == 0) ++ length = memdev->lsa_size; ++ cmd = cxl_cmd_new_get_lsa(memdev, offset, length); ++ if (!cmd) ++ return -ENOMEM; ++ rc = cxl_cmd_set_output_payload(cmd, *buf, length); ++ if (rc) { ++ err(ctx, "%s: cmd setup failed: %s\n", ++ cxl_memdev_get_devname(memdev), strerror(-rc)); ++ goto out; ++ } ++ break; ++ case LSA_OP_ZERO: ++ if (length == 0) ++ length = memdev->lsa_size; ++ zero_buf = calloc(1, length); ++ if (!zero_buf) ++ return -ENOMEM; ++ buf = &zero_buf; ++ /* fall through */ ++ case LSA_OP_SET: ++ cmd = cxl_cmd_new_set_lsa(memdev, *buf, offset, length); ++ if (!cmd) { ++ rc = -ENOMEM; ++ goto out_free; ++ } ++ break; ++ default: ++ return -EOPNOTSUPP; ++ } ++ ++ rc = cxl_cmd_submit(cmd); ++ if (rc < 0) { ++ err(ctx, "%s: cmd submission failed: %s\n", ++ devname, strerror(-rc)); ++ goto out; ++ } ++ ++ rc = cxl_cmd_get_mbox_status(cmd); ++ if (rc != 0) { ++ err(ctx, "%s: firmware status: %d:\n%s\n", ++ devname, rc, DEVICE_ERRORS[rc]); ++ rc = -ENXIO; ++ goto out; ++ } ++ ++ if (op == LSA_OP_GET) ++ memcpy(*buf, cxl_cmd_get_lsa_get_payload(cmd), length); ++ /* ++ * TODO: If writing, the memdev may need to be disabled/re-enabled to ++ * refresh any cached LSA data in the kernel. ++ */ ++ ++out: ++ cxl_cmd_unref(cmd); ++out_free: ++ free(zero_buf); ++ return rc; ++} ++ ++CXL_EXPORT int cxl_memdev_zero_lsa(struct cxl_memdev *memdev) ++{ ++ return lsa_op(memdev, LSA_OP_ZERO, NULL, 0, 0); ++} ++ ++CXL_EXPORT int cxl_memdev_set_lsa(struct cxl_memdev *memdev, void *buf, ++ size_t length, size_t offset) ++{ ++ return lsa_op(memdev, LSA_OP_SET, &buf, length, offset); ++} ++ ++CXL_EXPORT int cxl_memdev_get_lsa(struct cxl_memdev *memdev, void *buf, ++ size_t length, size_t offset) ++{ ++ return lsa_op(memdev, LSA_OP_GET, &buf, length, offset); ++} ++ ++CXL_EXPORT int cxl_memdev_cmd_identify(struct cxl_memdev *memdev) ++{ ++ struct cxl_cmd *cmd; ++ struct cxl_cmd_identify *id; ++ int rc = 0; ++ ++ printf("id: 0x%x\n", CXL_MEM_COMMAND_ID_IDENTIFY); ++ cmd = cxl_cmd_new_identify(memdev); ++ if (!cmd) { ++ fprintf(stderr, "%s: cxl_cmd_new_identify returned Null output\n", ++ cxl_memdev_get_devname(memdev)); ++ return -ENOMEM; ++ } ++ ++ rc = cxl_cmd_submit(cmd); ++ if (rc < 0) { ++ fprintf(stderr, "%s: cmd submission failed: %d (%s)\n", ++ cxl_memdev_get_devname(memdev), rc, strerror(-rc)); ++ goto out; ++ } ++ ++ rc = cxl_cmd_get_mbox_status(cmd); ++ if (rc != 0) { ++ fprintf(stderr, "%s: firmware status: %d:\n%s\n", ++ cxl_memdev_get_devname(memdev), rc, DEVICE_ERRORS[rc]); ++ rc = -ENXIO; ++ goto out; ++ } ++ ++ id = (void *)cmd->send_cmd->out.payload; ++ fprintf(stderr, "size of payload: %ld\n", sizeof(*id)); ++ if (cmd->send_cmd->id != CXL_MEM_COMMAND_ID_IDENTIFY) { ++ fprintf(stderr, "%s: invalid command id 0x%x (expecting 0x%x)\n", ++ cxl_memdev_get_devname(memdev), cmd->send_cmd->id, CXL_MEM_COMMAND_ID_IDENTIFY); ++ return -EINVAL; ++ } ++ ++ fprintf(stdout, "%s info\n", cxl_memdev_get_devname(memdev)); ++ fprintf(stdout, " fw revision: "); ++ for (int i=0; i < CXL_CMD_IDENTIFY_FW_REV_LENGTH; ++i) ++ fprintf(stdout, "%02x ", id->fw_revision[i]); ++ fprintf(stdout, "\n"); ++ fprintf(stdout, " total_capacity: %lu MB (%lu GB)\n", ++ le64_to_cpu(id->total_capacity), (le64_to_cpu(id->total_capacity))/4); ++ fprintf(stdout, " volatile_capacity: %lu MB (%lu GB)\n", ++ le64_to_cpu(id->volatile_capacity), (le64_to_cpu(id->volatile_capacity))/4); ++ fprintf(stdout, " persistent_capacity: %lu MB (%lu GB)\n", ++ le64_to_cpu(id->persistent_capacity), (le64_to_cpu(id->persistent_capacity))/4); ++ fprintf(stdout, " partition_align: %lu MB (%lu GB)\n", ++ le64_to_cpu(id->partition_align), (le64_to_cpu(id->partition_align))/4); ++ fprintf(stdout, " info_event_log_size: %d\n", le16_to_cpu(id->info_event_log_size)); ++ fprintf(stdout, " warning_event_log_size: %d\n", le16_to_cpu(id->warning_event_log_size)); ++ fprintf(stdout, " failure_event_log_size: %d\n", le16_to_cpu(id->failure_event_log_size)); ++ fprintf(stdout, " fatal_event_log_size: %d\n", le16_to_cpu(id->fatal_event_log_size)); ++ fprintf(stdout, " lsa_size: %d\n", le32_to_cpu(id->lsa_size)); ++ for (int i=0; i < 3; ++i) ++ fprintf(stdout, " poison_list_max_mer[%d]: %d\n", i, id->poison_list_max_mer[i]); ++ fprintf(stdout, " inject_poison_limit: %d\n", le16_to_cpu(id->inject_poison_limit)); ++ fprintf(stdout, " poison_caps: %d\n", id->poison_caps); ++ fprintf(stdout, " qos_telemetry_caps: %d\n", id->qos_telemetry_caps); ++ ++out: ++ cxl_cmd_unref(cmd); ++ return rc; ++} ++ ++struct cxl_mbox_get_supported_logs { ++ __le16 entries; ++ u8 rsvd[6]; ++ struct gsl_entry { ++ uuid_t uuid; ++ __le32 size; ++ } __attribute__((packed)) entry[]; ++} __attribute__((packed)); ++ ++CXL_EXPORT int cxl_memdev_get_supported_logs(struct cxl_memdev *memdev) ++{ ++ struct cxl_cmd *cmd; ++ struct cxl_mbox_get_supported_logs *gsl; ++ int rc = 0; ++ ++ cmd = cxl_cmd_new_generic(memdev, CXL_MEM_COMMAND_ID_GET_SUPPORTED_LOGS); ++ if (!cmd) { ++ fprintf(stderr, "%s: cxl_cmd_new_identify returned Null output\n", ++ cxl_memdev_get_devname(memdev)); ++ return -ENOMEM; ++ } ++ ++ rc = cxl_cmd_submit(cmd); ++ if (rc < 0) { ++ fprintf(stderr, "%s: cmd submission failed: %d (%s)\n", ++ cxl_memdev_get_devname(memdev), rc, strerror(-rc)); ++ goto out; ++ } ++ ++ rc = cxl_cmd_get_mbox_status(cmd); ++ if (rc != 0) { ++ fprintf(stderr, "%s: firmware status: %d:\n%s\n", ++ cxl_memdev_get_devname(memdev), rc, DEVICE_ERRORS[rc]); ++ rc = -ENXIO; ++ goto out; ++ } ++ ++ if (cmd->send_cmd->id != CXL_MEM_COMMAND_ID_GET_SUPPORTED_LOGS) { ++ fprintf(stderr, "%s: invalid command id 0x%x (expecting 0x%x)\n", ++ cxl_memdev_get_devname(memdev), ++ cmd->send_cmd->id, ++ CXL_MEM_COMMAND_ID_GET_SUPPORTED_LOGS); ++ return -EINVAL; ++ } ++ ++ gsl = (void *)cmd->send_cmd->out.payload; ++ fprintf(stdout, "payload info\n"); ++ fprintf(stdout, " out size: 0x%x\n", cmd->send_cmd->out.size); ++ fprintf(stdout, " entries: %d\n", gsl->entries); ++ for (int e=0; e < gsl->entries; ++e) { ++ char uuid[40]; ++ uuid_unparse(gsl->entry[e].uuid, uuid); ++ fprintf(stdout, " entries[%d] uuid: %s, size: %d\n", e, uuid, gsl->entry[e].size); ++ } ++out: ++ cxl_cmd_unref(cmd); ++ return rc; ++} ++ ++#define CEL_UUID "0da9c0b5-bf41-4b78-8f79-96b1623b3f17" ++ ++struct cxl_mbox_get_log { ++ uuid_t uuid; ++ __le32 offset; ++ __le32 length; ++} __attribute__((packed)); ++ ++struct cel_entry { ++ __le16 opcode; ++ __le16 effect; ++} __attribute__((packed)); ++ ++CXL_EXPORT int cxl_memdev_get_cel_log(struct cxl_memdev *memdev) ++{ ++ struct cxl_cmd *cmd; ++ struct cxl_mbox_get_log *get_log_input; ++ struct cel_entry *cel_entries; ++ int no_cel_entries; ++ int rc = 0; ++ ++ cmd = cxl_cmd_new_generic(memdev, CXL_MEM_COMMAND_ID_GET_LOG); ++ if (!cmd) { ++ fprintf(stderr, "%s: cxl_memdev_get_cel_log returned Null output\n", ++ cxl_memdev_get_devname(memdev)); ++ return -ENOMEM; ++ } ++ ++ get_log_input = (void *) cmd->send_cmd->in.payload; ++ uuid_parse(CEL_UUID, get_log_input->uuid); ++ get_log_input->offset = 0; ++ get_log_input->length = cmd->memdev->payload_max; ++ ++ rc = cxl_cmd_submit(cmd); ++ if (rc < 0) { ++ fprintf(stderr, "%s: cmd submission failed: %d (%s)\n", ++ cxl_memdev_get_devname(memdev), rc, strerror(-rc)); ++ goto out; ++ } ++ ++ rc = cxl_cmd_get_mbox_status(cmd); ++ if (rc != 0) { ++ fprintf(stderr, "%s: firmware status: %d:\n%s\n", ++ cxl_memdev_get_devname(memdev), rc, DEVICE_ERRORS[rc]); ++ rc = -ENXIO; ++ goto out; ++ } ++ ++ if (cmd->send_cmd->id != CXL_MEM_COMMAND_ID_GET_LOG) { ++ fprintf(stderr, "%s: invalid command id 0x%x (expecting 0x%x)\n", ++ cxl_memdev_get_devname(memdev), cmd->send_cmd->id, CXL_MEM_COMMAND_ID_GET_LOG); ++ return -EINVAL; ++ } ++ ++ fprintf(stdout, "payload info\n"); ++ fprintf(stdout, " out size: 0x%x\n", cmd->send_cmd->out.size); ++ cel_entries = (void *)cmd->send_cmd->out.payload; ++ no_cel_entries = (cmd->send_cmd->out.size)/sizeof(struct cel_entry); ++ fprintf(stdout, " no_cel_entries size: %d\n", no_cel_entries); ++ for (int e = 0; e < no_cel_entries; ++e) { ++ fprintf(stdout, " cel_entry[%d] opcode: 0x%x, effect: 0x%x\n", e, ++ le16_to_cpu(cel_entries[e].opcode), ++ le16_to_cpu(cel_entries[e].effect)); ++ } ++out: ++ cxl_cmd_unref(cmd); ++ return rc; ++} ++ ++#define CXL_MEM_COMMAND_ID_GET_EVENT_INTERRUPT_POLICY CXL_MEM_COMMAND_ID_RAW ++#define CXL_MEM_COMMAND_ID_GET_EVENT_INTERRUPT_POLICY_OPCODE 0x102 ++ ++struct cxl_mbox_get_event_interrupt_policy { ++ u8 info_event_log_int_settings; ++ u8 warning_event_log_int_settings; ++ u8 failure_event_log_int_settings; ++ u8 fatal_event_log_int_settings; ++} __attribute__((packed)); ++ ++CXL_EXPORT int cxl_memdev_get_event_interrupt_policy(struct cxl_memdev *memdev) ++{ ++ struct cxl_cmd *cmd; ++ struct cxl_mbox_get_event_interrupt_policy *event_interrupt_policy_out; ++ int rc = 0; ++ ++ cmd = cxl_cmd_new_raw(memdev, CXL_MEM_COMMAND_ID_GET_EVENT_INTERRUPT_POLICY_OPCODE); ++ if (!cmd) { ++ fprintf(stderr, "%s: cxl_cmd_new_raw returned Null output\n", ++ cxl_memdev_get_devname(memdev)); ++ return -ENOMEM; ++ } ++ ++ rc = cxl_cmd_submit(cmd); ++ if (rc < 0) { ++ fprintf(stderr, "%s: cmd submission failed: %d (%s)\n", ++ cxl_memdev_get_devname(memdev), rc, strerror(-rc)); ++ goto out; ++ } ++ ++ rc = cxl_cmd_get_mbox_status(cmd); ++ if (rc != 0) { ++ fprintf(stderr, "%s: firmware status: %d:\n%s\n", ++ cxl_memdev_get_devname(memdev), rc, DEVICE_ERRORS[rc]); ++ rc = -ENXIO; ++ goto out; ++ } ++ ++ if (cmd->send_cmd->id != CXL_MEM_COMMAND_ID_GET_EVENT_INTERRUPT_POLICY) { ++ fprintf(stderr, "%s: invalid command id 0x%x (expecting 0x%x)\n", ++ cxl_memdev_get_devname(memdev), cmd->send_cmd->id, CXL_MEM_COMMAND_ID_GET_EVENT_INTERRUPT_POLICY); ++ return -EINVAL; ++ } ++ ++ fprintf(stdout, "payload info\n"); ++ fprintf(stdout, " out size: 0x%x\n", cmd->send_cmd->out.size); ++ event_interrupt_policy_out = (void *)cmd->send_cmd->out.payload; ++ fprintf(stdout, " info_event_log_int_settings: 0x%x\n", event_interrupt_policy_out->info_event_log_int_settings); ++ fprintf(stdout, " warning_event_log_int_settings: 0x%x\n", event_interrupt_policy_out->warning_event_log_int_settings); ++ fprintf(stdout, " failure_event_log_int_settings: 0x%x\n", event_interrupt_policy_out->failure_event_log_int_settings); ++ fprintf(stdout, " fatal_event_log_int_settings: 0x%x\n", event_interrupt_policy_out->fatal_event_log_int_settings); ++out: ++ cxl_cmd_unref(cmd); ++ return rc; ++} ++ ++#define CXL_MEM_COMMAND_ID_SET_EVENT_INTERRUPT_POLICY CXL_MEM_COMMAND_ID_RAW ++#define CXL_MEM_COMMAND_ID_SET_EVENT_INTERRUPT_POLICY_OPCODE 0x103 ++#define CXL_MEM_COMMAND_ID_SET_EVENT_INTERRUPT_POLICY_PAYLOAD_IN_SIZE 0x4 ++ ++CXL_EXPORT int cxl_memdev_set_event_interrupt_policy(struct cxl_memdev *memdev, u32 int_policy) ++{ ++ struct cxl_cmd *cmd; ++ struct cxl_mem_query_commands *query; ++ struct cxl_command_info *cinfo; ++ struct cxl_mbox_get_event_interrupt_policy *interrupt_policy_in; ++ int rc = 0; ++ ++ cmd = cxl_cmd_new_raw(memdev, CXL_MEM_COMMAND_ID_SET_EVENT_INTERRUPT_POLICY_OPCODE); ++ if (!cmd) { ++ fprintf(stderr, "%s: cxl_cmd_new_raw returned Null output\n", ++ cxl_memdev_get_devname(memdev)); ++ return -ENOMEM; ++ } ++ ++ query = cmd->query_cmd; ++ cinfo = &query->commands[cmd->query_idx]; ++ ++ /* this is hack to create right payload size */ ++ cinfo->size_in = CXL_MEM_COMMAND_ID_SET_EVENT_INTERRUPT_POLICY_PAYLOAD_IN_SIZE; ++ if (cinfo->size_in > 0) { ++ cmd->input_payload = calloc(1, cinfo->size_in); ++ if (!cmd->input_payload) ++ return -ENOMEM; ++ cmd->send_cmd->in.payload = (u64)cmd->input_payload; ++ cmd->send_cmd->in.size = cinfo->size_in; ++ } ++ ++ interrupt_policy_in = (void *) cmd->send_cmd->in.payload; ++ ++ /* below is meant for readability, you don't really need this */ ++ int_policy = cpu_to_be32(int_policy); ++ interrupt_policy_in->info_event_log_int_settings = (int_policy & 0xff); ++ interrupt_policy_in->warning_event_log_int_settings = ((int_policy >> 8) & 0xff); ++ interrupt_policy_in->failure_event_log_int_settings = ((int_policy >> 16) & 0xff); ++ interrupt_policy_in->fatal_event_log_int_settings = ((int_policy >> 24) & 0xff); ++ ++ rc = cxl_cmd_submit(cmd); ++ if (rc < 0) { ++ fprintf(stderr, "%s: cmd submission failed: %d (%s)\n", ++ cxl_memdev_get_devname(memdev), rc, strerror(-rc)); ++ goto out; ++ } ++ ++ rc = cxl_cmd_get_mbox_status(cmd); ++ if (rc != 0) { ++ fprintf(stderr, "%s: firmware status: %d:\n%s\n", ++ cxl_memdev_get_devname(memdev), rc, DEVICE_ERRORS[rc]); ++ rc = -ENXIO; ++ goto out; ++ } ++ ++ if (cmd->send_cmd->id != CXL_MEM_COMMAND_ID_SET_EVENT_INTERRUPT_POLICY) { ++ fprintf(stderr, "%s: invalid command id 0x%x (expecting 0x%x)\n", ++ cxl_memdev_get_devname(memdev), cmd->send_cmd->id, CXL_MEM_COMMAND_ID_SET_EVENT_INTERRUPT_POLICY); ++ return -EINVAL; ++ } ++ ++ fprintf(stdout, "command completed successfully\n"); ++out: ++ cxl_cmd_unref(cmd); ++ return rc; ++ return 0; ++} ++ ++#define CXL_MEM_COMMAND_ID_GET_TIMESTAMP CXL_MEM_COMMAND_ID_RAW ++#define CXL_MEM_COMMAND_ID_GET_TIMESTAMP_OPCODE 0x0300 ++ ++CXL_EXPORT int cxl_memdev_get_timestamp(struct cxl_memdev *memdev) ++{ ++ struct cxl_cmd *cmd; ++ __le64 *timestamp_out; ++ int rc = 0; ++ ++ cmd = cxl_cmd_new_raw(memdev, CXL_MEM_COMMAND_ID_GET_TIMESTAMP_OPCODE); ++ if (!cmd) { ++ fprintf(stderr, "%s: cxl_cmd_new_raw returned Null output\n", ++ cxl_memdev_get_devname(memdev)); ++ return -ENOMEM; ++ } ++ ++ rc = cxl_cmd_submit(cmd); ++ if (rc < 0) { ++ fprintf(stderr, "%s: cmd submission failed: %d (%s)\n", ++ cxl_memdev_get_devname(memdev), rc, strerror(-rc)); ++ goto out; ++ } ++ ++ rc = cxl_cmd_get_mbox_status(cmd); ++ if (rc != 0) { ++ fprintf(stderr, "%s: firmware status: %d:\n%s\n", ++ cxl_memdev_get_devname(memdev), rc, DEVICE_ERRORS[rc]); ++ rc = -ENXIO; ++ goto out; ++ } ++ ++ if (cmd->send_cmd->id != CXL_MEM_COMMAND_ID_GET_TIMESTAMP) { ++ fprintf(stderr, "%s: invalid command id 0x%x (expecting 0x%x)\n", ++ cxl_memdev_get_devname(memdev), cmd->send_cmd->id, CXL_MEM_COMMAND_ID_GET_TIMESTAMP); ++ return -EINVAL; ++ } ++ ++ timestamp_out = (void *)cmd->send_cmd->out.payload; ++ fprintf(stdout, "timestamp: 0x%lx\n", le64_to_cpu(*timestamp_out)); ++out: ++ cxl_cmd_unref(cmd); ++ return rc; ++} ++ ++#define CXL_MEM_COMMAND_ID_SET_TIMESTAMP CXL_MEM_COMMAND_ID_RAW ++#define CXL_MEM_COMMAND_ID_SET_TIMESTAMP_OPCODE 0x0301 ++#define CXL_MEM_COMMAND_ID_SET_TIMESTAMP_PAYLOAD_IN_SIZE 8 ++ ++CXL_EXPORT int cxl_memdev_set_timestamp(struct cxl_memdev *memdev, u64 timestamp) ++{ ++ struct cxl_cmd *cmd; ++ struct cxl_mem_query_commands *query; ++ struct cxl_command_info *cinfo; ++ __le64 *timestamp_in; ++ int rc = 0; ++ ++ cmd = cxl_cmd_new_raw(memdev, CXL_MEM_COMMAND_ID_SET_TIMESTAMP_OPCODE); ++ if (!cmd) { ++ fprintf(stderr, "%s: cxl_cmd_new_raw returned Null output\n", ++ cxl_memdev_get_devname(memdev)); ++ return -ENOMEM; ++ } ++ query = cmd->query_cmd; ++ cinfo = &query->commands[cmd->query_idx]; ++ ++ /* this is hack to create right payload size */ ++ cinfo->size_in = CXL_MEM_COMMAND_ID_SET_TIMESTAMP_PAYLOAD_IN_SIZE; ++ if (cinfo->size_in > 0) { ++ cmd->input_payload = calloc(1, cinfo->size_in); ++ if (!cmd->input_payload) ++ return -ENOMEM; ++ cmd->send_cmd->in.payload = (u64)cmd->input_payload; ++ cmd->send_cmd->in.size = cinfo->size_in; ++ } ++ ++ timestamp_in = (void *) cmd->send_cmd->in.payload; ++ *timestamp_in = cpu_to_le64(timestamp); ++ fprintf(stdout, "setting timestamp to: 0x%lx\n", le64_to_cpu(*timestamp_in)); ++ ++ rc = cxl_cmd_submit(cmd); ++ if (rc < 0) { ++ fprintf(stderr, "%s: cmd submission failed: %d (%s)\n", ++ cxl_memdev_get_devname(memdev), rc, strerror(-rc)); ++ goto out; ++ } ++ ++ rc = cxl_cmd_get_mbox_status(cmd); ++ if (rc != 0) { ++ fprintf(stderr, "%s: firmware status: %d:\n%s\n", ++ cxl_memdev_get_devname(memdev), rc, DEVICE_ERRORS[rc]); ++ rc = -ENXIO; ++ goto out; ++ } ++ ++ if (cmd->send_cmd->id != CXL_MEM_COMMAND_ID_SET_TIMESTAMP) { ++ fprintf(stderr, "%s: invalid command id 0x%x (expecting 0x%x)\n", ++ cxl_memdev_get_devname(memdev), cmd->send_cmd->id, CXL_MEM_COMMAND_ID_SET_TIMESTAMP); ++ return -EINVAL; ++ } ++ ++ fprintf(stdout, "command completed successfully\n"); ++out: ++ cxl_cmd_unref(cmd); ++ return rc; ++} ++ ++struct cxl_mbox_get_alert_config_out { ++ u8 valid_alerts; ++ u8 programmable_alerts; ++ u8 life_used_critical_alert_threshold; ++ u8 life_used_prog_warn_threshold; ++ __le16 dev_over_temp_crit_alert_threshold; ++ __le16 dev_under_temp_crit_alert_threshold; ++ __le16 dev_over_temp_prog_warn_threshold; ++ __le16 dev_under_temp_prog_warn_threshold; ++ __le16 corr_vol_mem_err_prog_warn_thresold; ++ __le16 corr_pers_mem_err_prog_warn_threshold; ++} __attribute__((packed)); ++ ++CXL_EXPORT int cxl_memdev_get_alert_config(struct cxl_memdev *memdev) ++{ ++ struct cxl_cmd *cmd; ++ struct cxl_mbox_get_alert_config_out *alert_config_out; ++ int rc = 0; ++ ++ cmd = cxl_cmd_new_generic(memdev, CXL_MEM_COMMAND_ID_GET_ALERT_CONFIG); ++ if (!cmd) { ++ fprintf(stderr, "%s: cxl_cmd_new_raw returned Null output\n", ++ cxl_memdev_get_devname(memdev)); ++ return -ENOMEM; ++ } ++ ++ rc = cxl_cmd_submit(cmd); ++ if (rc < 0) { ++ fprintf(stderr, "%s: cmd submission failed: %d (%s)\n", ++ cxl_memdev_get_devname(memdev), rc, strerror(-rc)); ++ goto out; ++ } ++ ++ rc = cxl_cmd_get_mbox_status(cmd); ++ if (rc != 0) { ++ fprintf(stderr, "%s: firmware status: %d:\n%s\n", ++ cxl_memdev_get_devname(memdev), rc, DEVICE_ERRORS[rc]); ++ rc = -ENXIO; ++ goto out; ++ } ++ ++ if (cmd->send_cmd->id != CXL_MEM_COMMAND_ID_GET_ALERT_CONFIG) { ++ fprintf(stderr, "%s: invalid command id 0x%x (expecting 0x%x)\n", ++ cxl_memdev_get_devname(memdev), cmd->send_cmd->id, CXL_MEM_COMMAND_ID_GET_ALERT_CONFIG); ++ return -EINVAL; ++ } ++ ++ fprintf(stdout, "alert_config summary\n"); ++ //fprintf(stdout, " out size: 0x%x\n", cmd->send_cmd->out.size); ++ ++ alert_config_out = (void *)cmd->send_cmd->out.payload; ++ ++ fprintf(stdout, " valid_alerts: 0x%x\n", alert_config_out->valid_alerts); ++ fprintf(stdout, " programmable_alerts: 0x%x\n", alert_config_out->programmable_alerts); ++ fprintf(stdout, " life_used_critical_alert_threshold: 0x%x\n", ++ alert_config_out->life_used_critical_alert_threshold); ++ fprintf(stdout, " life_used_prog_warn_threshold: 0x%x\n", ++ alert_config_out->life_used_prog_warn_threshold); ++ ++ fprintf(stdout, " dev_over_temp_crit_alert_threshold: 0x%x\n", ++ le16_to_cpu(alert_config_out->dev_over_temp_crit_alert_threshold)); ++ fprintf(stdout, " dev_under_temp_crit_alert_threshold: 0x%x\n", ++ le16_to_cpu(alert_config_out->dev_under_temp_crit_alert_threshold)); ++ fprintf(stdout, " dev_over_temp_prog_warn_threshold: 0x%x\n", ++ le16_to_cpu(alert_config_out->dev_over_temp_prog_warn_threshold)); ++ fprintf(stdout, " dev_under_temp_prog_warn_threshold: 0x%x\n", ++ le16_to_cpu(alert_config_out->dev_under_temp_prog_warn_threshold)); ++ fprintf(stdout, " corr_vol_mem_err_prog_warn_thresold: 0x%x\n", ++ le16_to_cpu(alert_config_out->corr_vol_mem_err_prog_warn_thresold)); ++ fprintf(stdout, " corr_pers_mem_err_prog_warn_threshold: 0x%x\n", ++ le16_to_cpu(alert_config_out->corr_pers_mem_err_prog_warn_threshold)); ++ ++out: ++ cxl_cmd_unref(cmd); ++ return rc; ++} ++ ++struct cxl_mbox_set_alert_config_in { ++ u8 valid_alert_actions; ++ u8 enable_alert_actions; ++ u8 life_used_prog_warn_threshold; ++ u8 reserved; ++ __le16 dev_over_temp_prog_warn_threshold; ++ __le16 dev_under_temp_prog_warn_threshold; ++ __le16 corr_vol_mem_err_prog_warn_thresold; ++ __le16 corr_pers_mem_err_prog_warn_threshold; ++} __attribute__((packed)); ++ ++ ++CXL_EXPORT int cxl_memdev_set_alert_config(struct cxl_memdev *memdev, u32 alert_prog_threshold, ++ u32 device_temp_threshold, u32 mem_error_threshold) ++{ ++ struct cxl_cmd *cmd; ++ struct cxl_mbox_set_alert_config_in *alert_config_in; ++ int rc = 0; ++ ++ cmd = cxl_cmd_new_generic(memdev, CXL_MEM_COMMAND_ID_SET_ALERT_CONFIG); ++ if (!cmd) { ++ fprintf(stderr, "%s: cxl_cmd_new_raw returned Null output\n", ++ cxl_memdev_get_devname(memdev)); ++ return -ENOMEM; ++ } ++ ++ alert_config_in = (void *) cmd->send_cmd->in.payload; ++ ++ alert_prog_threshold = cpu_to_be32(alert_prog_threshold); ++ device_temp_threshold = cpu_to_be32(device_temp_threshold); ++ mem_error_threshold = cpu_to_be32(mem_error_threshold); ++ ++ alert_config_in->valid_alert_actions = ((alert_prog_threshold >> 8) & 0xff); ++ alert_config_in->enable_alert_actions = ((alert_prog_threshold >> 16) & 0xff); ++ alert_config_in->life_used_prog_warn_threshold = ((alert_prog_threshold >> 24) & 0xff); ++ alert_config_in->reserved = 0; ++ ++ alert_config_in->dev_over_temp_prog_warn_threshold = cpu_to_le16(be16_to_cpu(((device_temp_threshold) & 0xffff))); ++ alert_config_in->dev_under_temp_prog_warn_threshold = cpu_to_le16(be16_to_cpu((((device_temp_threshold) >> 16) & 0xffff))); ++ ++ alert_config_in->corr_vol_mem_err_prog_warn_thresold = cpu_to_le16(be16_to_cpu((mem_error_threshold & 0xffff))); ++ alert_config_in->corr_pers_mem_err_prog_warn_threshold = cpu_to_le16(be16_to_cpu(((mem_error_threshold >> 16) & 0xffff))); ++ ++ fprintf(stdout, "alert_config settings\n"); ++ fprintf(stdout, " valid_alert_actions: 0x%x\n", alert_config_in->valid_alert_actions); ++ fprintf(stdout, " enable_alert_actions: 0x%x\n", alert_config_in->enable_alert_actions); ++ fprintf(stdout, " life_used_prog_warn_threshold: 0x%x\n", alert_config_in->life_used_prog_warn_threshold); ++ fprintf(stdout, " dev_over_temp_prog_warn_threshold: 0x%x\n", ++ le16_to_cpu(alert_config_in->dev_over_temp_prog_warn_threshold)); ++ fprintf(stdout, " dev_under_temp_prog_warn_threshold: 0x%x\n", ++ le16_to_cpu(alert_config_in->dev_under_temp_prog_warn_threshold)); ++ fprintf(stdout, " corr_vol_mem_err_prog_warn_thresold: 0x%x\n", ++ le16_to_cpu(alert_config_in->corr_vol_mem_err_prog_warn_thresold)); ++ fprintf(stdout, " corr_pers_mem_err_prog_warn_threshold: 0x%x\n", ++ le16_to_cpu(alert_config_in->corr_pers_mem_err_prog_warn_threshold)); ++ ++ rc = cxl_cmd_submit(cmd); ++ if (rc < 0) { ++ fprintf(stderr, "%s: cmd submission failed: %d (%s)\n", ++ cxl_memdev_get_devname(memdev), rc, strerror(-rc)); ++ goto out; ++ } ++ ++ rc = cxl_cmd_get_mbox_status(cmd); ++ if (rc != 0) { ++ fprintf(stderr, "%s: firmware status: %d:\n%s\n", ++ cxl_memdev_get_devname(memdev), rc, DEVICE_ERRORS[rc]); ++ rc = -ENXIO; ++ goto out; ++ } ++ ++ if (cmd->send_cmd->id != CXL_MEM_COMMAND_ID_SET_ALERT_CONFIG) { ++ fprintf(stderr, "%s: invalid command id 0x%x (expecting 0x%x)\n", ++ cxl_memdev_get_devname(memdev), cmd->send_cmd->id, CXL_MEM_COMMAND_ID_SET_ALERT_CONFIG); ++ return -EINVAL; ++ } ++ ++ fprintf(stdout, "command completed successfully\n"); ++ ++out: ++ cxl_cmd_unref(cmd); ++ return rc; ++} ++ ++struct cxl_health_info { ++ u8 health_state; ++ u8 media_status; ++ u8 additional_status; ++ u8 life_used; ++ __le16 device_temp; ++ __le32 dirty_shutdown_count; ++ __le32 corr_vol_mem_err_count; ++ __le32 corr_pers_mem_err_count; ++} __attribute__((packed)); ++ ++CXL_EXPORT int cxl_memdev_get_health_info(struct cxl_memdev *memdev) ++{ ++ struct cxl_cmd *cmd; ++ struct cxl_health_info *health_info; ++ int rc = 0; ++ ++ cmd = cxl_cmd_new_generic(memdev, CXL_MEM_COMMAND_ID_GET_HEALTH_INFO); ++ if (!cmd) { ++ fprintf(stderr, "%s: cxl_memdev_get_health_info returned Null output\n", ++ cxl_memdev_get_devname(memdev)); ++ return -ENOMEM; ++ } ++ ++ rc = cxl_cmd_submit(cmd); ++ if (rc < 0) { ++ fprintf(stderr, "%s: cmd submission failed: %d (%s)\n", ++ cxl_memdev_get_devname(memdev), rc, strerror(-rc)); ++ goto out; ++ } ++ ++ rc = cxl_cmd_get_mbox_status(cmd); ++ if (rc != 0) { ++ fprintf(stderr, "%s: firmware status: %d:\n%s\n", ++ cxl_memdev_get_devname(memdev), rc, DEVICE_ERRORS[rc]); ++ rc = -ENXIO; ++ goto out; ++ } ++ ++ if (cmd->send_cmd->id != CXL_MEM_COMMAND_ID_GET_HEALTH_INFO) { ++ fprintf(stderr, "%s: invalid command id 0x%x (expecting 0x%x)\n", ++ cxl_memdev_get_devname(memdev), cmd->send_cmd->id, CXL_MEM_COMMAND_ID_GET_HEALTH_INFO); ++ return -EINVAL; ++ } ++ ++ if (cmd->send_cmd->out.size != sizeof(*health_info)) { ++ fprintf(stderr, "%s: invalid payload output size (got: %d, required: %ld)\n", ++ cxl_memdev_get_devname(memdev), cmd->send_cmd->out.size, sizeof(*health_info)); ++ return -EINVAL; ++ } ++ ++ health_info = (void *)cmd->send_cmd->out.payload; ++ ++ fprintf(stdout, "Device Health Info\n"); ++ fprintf(stdout, " out size: 0x%x\n", cmd->send_cmd->out.size); ++ fprintf(stdout, " health_state: 0x%x\n", health_info->health_state); ++ fprintf(stdout, " media_status: 0x%x\n", health_info->media_status); ++ fprintf(stdout, " additional_status: 0x%x\n", health_info->additional_status); ++ fprintf(stdout, " life_used: 0x%x\n", health_info->life_used); ++ fprintf(stdout, " device_temp: 0x%x\n", le16_to_cpu(health_info->device_temp)); ++ fprintf(stdout, " dirty_shutdown_count: 0x%x\n", le32_to_cpu(health_info->dirty_shutdown_count)); ++ fprintf(stdout, " corr_vol_mem_err_count: 0x%x\n", le32_to_cpu(health_info->corr_vol_mem_err_count)); ++ fprintf(stdout, " corr_pers_mem_err_count: 0x%x\n", le32_to_cpu(health_info->corr_pers_mem_err_count)); ++out: ++ cxl_cmd_unref(cmd); ++ return rc; ++} ++ ++#define CXL_MEM_COMMAND_ID_GET_EVENT_RECORDS CXL_MEM_COMMAND_ID_RAW ++#define CXL_MEM_COMMAND_ID_GET_EVENT_RECORDS_OPCODE 0x100 ++#define CXL_MEM_COMMAND_ID_GET_EVENT_RECORDS_PAYLOAD_IN_SIZE 0x1 ++#define CXL_MAX_RECORDS_TO_DUMP 20 ++ ++#define CXL_DRAM_EVENT_GUID "601dcbb3-9c06-4eab-b8af-4e9bfb5c9624" ++#define CXL_MEM_MODULE_EVENT_GUID "fe927475-dd59-4339-a586-79bab113b774" ++ ++struct cxl_dram_event_record { ++ __le64 physical_addr; ++ u8 memory_event_descriptor; ++ u8 memory_event_type; ++ u8 transaction_type; ++ __le16 validity_flags; ++ u8 channel; ++ u8 rank; ++ u8 nibble_mask[3]; ++ u8 bank_group; ++ u8 bank; ++ u8 row[3]; ++ __le16 column; ++ u8 correction_mask[0x20]; ++ u8 reserved[0x17]; ++} __attribute__((packed)); ++ ++struct cxl_memory_module_record { ++ u8 dev_event_type; ++ u8 dev_health_info[0x12]; ++ u8 reserved[0x3d]; ++}__attribute__((packed)); ++ ++struct cxl_event_record { ++ uuid_t uuid; ++ u8 event_record_length; ++ u8 event_record_flags[3]; ++ __le16 event_record_handle; ++ __le16 related_event_record_handle; ++ __le64 event_record_ts; ++ u8 reserved[0x10]; ++ union { ++ struct cxl_dram_event_record dram_event_record; ++ struct cxl_memory_module_record memory_module_record; ++ } event_record; ++} __attribute__((packed)); ++ ++struct cxl_get_event_record_info { ++ u8 flags; ++ u8 reserved1; ++ __le16 overflow_err_cnt; ++ __le64 first_overflow_evt_ts; ++ __le64 last_overflow_evt_ts; ++ __le16 event_record_count; ++ u8 reserved2[0xa]; ++ struct cxl_event_record event_records[]; ++} __attribute__((packed)); ++ ++CXL_EXPORT int cxl_memdev_get_event_records(struct cxl_memdev *memdev, u8 event_log_type) ++{ ++ struct cxl_cmd *cmd; ++ struct cxl_mem_query_commands *query; ++ struct cxl_command_info *cinfo; ++ struct cxl_get_event_record_info *event_info; ++ int rc = 0; ++ int rec; ++ int indent = 2; ++ ++ cmd = cxl_cmd_new_raw(memdev, CXL_MEM_COMMAND_ID_GET_EVENT_RECORDS_OPCODE); ++ if (!cmd) { ++ fprintf(stderr, "%s: cxl_cmd_new_raw returned Null output\n", ++ cxl_memdev_get_devname(memdev)); ++ return -ENOMEM; ++ } ++ ++ query = cmd->query_cmd; ++ cinfo = &query->commands[cmd->query_idx]; ++ ++ /* this is hack to create right payload size */ ++ cinfo->size_in = CXL_MEM_COMMAND_ID_GET_EVENT_RECORDS_PAYLOAD_IN_SIZE; ++ if (cinfo->size_in > 0) { ++ cmd->input_payload = calloc(1, cinfo->size_in); ++ if (!cmd->input_payload) ++ return -ENOMEM; ++ cmd->send_cmd->in.payload = (u64)cmd->input_payload; ++ cmd->send_cmd->in.size = cinfo->size_in; ++ } ++ ++ * ((u8 *) cmd->send_cmd->in.payload) = event_log_type; ++ ++ rc = cxl_cmd_submit(cmd); ++ if (rc < 0) { ++ fprintf(stderr, "%s: cmd submission failed: %d (%s)\n", ++ cxl_memdev_get_devname(memdev), rc, strerror(-rc)); ++ goto out; ++ } ++ ++ rc = cxl_cmd_get_mbox_status(cmd); ++ if (rc != 0) { ++ fprintf(stderr, "%s: firmware status: %d:\n%s\n", ++ cxl_memdev_get_devname(memdev), rc, DEVICE_ERRORS[rc]); ++ rc = -ENXIO; ++ goto out; ++ } ++ ++ if (cmd->send_cmd->id != CXL_MEM_COMMAND_ID_GET_EVENT_RECORDS) { ++ fprintf(stderr, "%s: invalid command id 0x%x (expecting 0x%x)\n", ++ cxl_memdev_get_devname(memdev), cmd->send_cmd->id, CXL_MEM_COMMAND_ID_GET_EVENT_RECORDS); ++ return -EINVAL; ++ } ++ ++ event_info = (void *)cmd->send_cmd->out.payload; ++ ++ fprintf(stdout, "cxl_dram_event_record size: 0x%lx\n", sizeof(struct cxl_dram_event_record)); ++ fprintf(stdout, "cxl_memory_module_record size: 0x%lx\n", sizeof(struct cxl_memory_module_record)); ++ fprintf(stdout, "cxl_event_record size: 0x%lx\n", sizeof(struct cxl_event_record)); ++ fprintf(stdout, "cxl_get_event_record_info size: 0x%lx\n", sizeof(struct cxl_get_event_record_info)); ++ fprintf(stdout, "========= Get Event Records Info =========\n"); ++ fprintf(stdout, "%*sout size: 0x%x\n", indent, "", cmd->send_cmd->out.size); ++ fprintf(stdout, "%*sflags: 0x%x\n", indent, "", event_info->flags); ++ fprintf(stdout, "%*soverflow_err_cnt: 0x%x\n", indent, "", le16_to_cpu(event_info->overflow_err_cnt)); ++ fprintf(stdout, "%*sfirst_overflow_evt_ts: 0x%lx\n", indent, "", le64_to_cpu(event_info->first_overflow_evt_ts)); ++ fprintf(stdout, "%*slast_overflow_evt_ts: 0x%lx\n", indent, "", le64_to_cpu(event_info->last_overflow_evt_ts)); ++ fprintf(stdout, "%*sevent_record_count: 0x%x\n", indent, "", le16_to_cpu(event_info->event_record_count)); ++ ++ for (rec = 0; rec < min(CXL_MAX_RECORDS_TO_DUMP, le16_to_cpu(event_info->event_record_count)); ++rec) { ++ char uuid[40]; ++ struct cxl_event_record *event_record = &event_info->event_records[rec]; ++ ++ uuid_unparse(event_info->event_records[rec].uuid, uuid); ++ ++ if (strcmp(uuid, CXL_DRAM_EVENT_GUID) == 0) ++ fprintf(stdout, "%*sEvent Record: %d (DRAM guid: %s)\n", indent, "", rec, uuid); ++ else if (strcmp(uuid, CXL_MEM_MODULE_EVENT_GUID) == 0) ++ fprintf(stdout, "%*sEvent Record: %d (Memory Module Event guid: %s)\n", indent, "", rec, uuid); ++ else ++ fprintf(stdout, "%*sEvent Record: %d (uuid: %s)\n", indent, "", rec, uuid); ++ ++ fprintf(stdout, "%*sevent_record_length: 0x%x\n", indent+2, "", event_record->event_record_length); ++ fprintf(stdout, "%*sevent_record_flags: 0x%02x%02x%02x\n", indent+2, "", event_record->event_record_flags[0], ++ event_record->event_record_flags[1], event_record->event_record_flags[2]); ++ fprintf(stdout, "%*sevent_record_handle: 0x%x\n", indent+2, "", le16_to_cpu(event_record->event_record_handle)); ++ fprintf(stdout, "%*srelated_event_record_handle: 0x%x\n", indent+2, "", ++ le16_to_cpu(event_record->related_event_record_handle)); ++ fprintf(stdout, "%*sevent_record_ts: 0x%lx\n", indent+2, "", le64_to_cpu(event_record->event_record_ts)); ++ ++ if (strcmp(uuid, CXL_DRAM_EVENT_GUID) == 0){ ++ struct cxl_dram_event_record *dram_event = &event_record->event_record.dram_event_record; ++ fprintf(stdout, "%*sphysical_addr: 0x%lx\n", indent+2, "", le64_to_cpu(dram_event->physical_addr)); ++ fprintf(stdout, "%*smemory_event_descriptor: 0x%x\n", indent+2, "", dram_event->memory_event_descriptor); ++ fprintf(stdout, "%*smemory_event_type: 0x%x\n", indent+2, "", dram_event->memory_event_type); ++ fprintf(stdout, "%*stransaction_type: 0x%x\n", indent+2, "", dram_event->transaction_type); ++ fprintf(stdout, "%*svalidity_flags: 0x%x\n", indent+2, "", le16_to_cpu(dram_event->validity_flags)); ++ fprintf(stdout, "%*schannel: 0x%x\n", indent+2, "", dram_event->channel); ++ fprintf(stdout, "%*srank: 0x%x\n", indent+2, "", dram_event->rank); ++ fprintf(stdout, "%*snibble_mask: 0x%02x%02x%02x\n", indent+2, "", ++ dram_event->nibble_mask[0], dram_event->nibble_mask[1], ++ dram_event->nibble_mask[2]); ++ fprintf(stdout, "%*sbank_group: 0x%x\n", indent+2, "", dram_event->bank_group); ++ fprintf(stdout, "%*sbank: 0x%x\n", indent+2, "", dram_event->bank); ++ fprintf(stdout, "%*srow: 0x%02x%02x%02x\n", indent+2, "", dram_event->row[0], ++ dram_event->row[1], dram_event->row[2]); ++ fprintf(stdout, "%*scolumn: 0x%x\n", indent+2, "", le16_to_cpu(dram_event->column)); ++ } ++ } ++ ++out: ++ cxl_cmd_unref(cmd); ++ return rc; ++ return 0; ++} ++ ++// GET_LD_INFO START ++#define CXL_MEM_COMMAND_ID_GET_LD_INFO CXL_MEM_COMMAND_ID_RAW ++#define CXL_MEM_COMMAND_ID_GET_LD_INFO_OPCODE 0x5400 ++#define CXL_MEM_COMMAND_ID_GET_LD_INFO_PAYLOAD_OUT_SIZE 0xb ++ ++struct cxl_get_ld_info { ++ __le64 mem_size; ++ __le16 ld_cnt; ++ u8 qos_telemetry_capa; ++} __attribute__((packed)); ++ ++CXL_EXPORT int cxl_memdev_get_ld_info(struct cxl_memdev *memdev) ++{ ++ struct cxl_cmd *cmd; ++ struct cxl_get_ld_info *ld_info; ++ int rc = 0; ++ int indent = 2; ++ ++ cmd = cxl_cmd_new_raw(memdev, CXL_MEM_COMMAND_ID_GET_LD_INFO_OPCODE); ++ if (!cmd) { ++ fprintf(stderr, "%s: cxl_cmd_new_raw returned Null output\n", ++ cxl_memdev_get_devname(memdev)); ++ return -ENOMEM; ++ } ++ cmd->send_cmd->in.size = 0; ++ ++ fprintf(stdout, "Getting LD info for memdev %s\n", cxl_memdev_get_devname(memdev)); ++ ++ rc = cxl_cmd_submit(cmd); ++ if (rc < 0) { ++ fprintf(stderr, "%s: cmd submission failed: %d (%s)\n", ++ cxl_memdev_get_devname(memdev), rc, strerror(-rc)); ++ goto out; ++ } ++ ++ rc = cxl_cmd_get_mbox_status(cmd); ++ if (rc != 0) { ++ fprintf(stderr, "%s: firmware status: %d:\n%s\n", ++ cxl_memdev_get_devname(memdev), rc, DEVICE_ERRORS[rc]); ++ rc = -ENXIO; ++ goto out; ++ } ++ ++ if (cmd->send_cmd->id != CXL_MEM_COMMAND_ID_GET_LD_INFO) { ++ fprintf(stderr, "%s: invalid command id 0x%x (expecting 0x%x)\n", ++ cxl_memdev_get_devname(memdev), cmd->send_cmd->id, CXL_MEM_COMMAND_ID_GET_LD_INFO); ++ return -EINVAL; ++ } ++ ++ ld_info = (void *)cmd->send_cmd->out.payload; ++ ++ fprintf(stdout, "========= Get LD Info =========\n"); ++ fprintf(stdout, "%*sout size: 0x%x\n", indent, "", cmd->send_cmd->out.size); ++ fprintf(stdout, "%*smemory size: 0x%lx\n", indent, "", le64_to_cpu(ld_info->mem_size)); ++ fprintf(stdout, "%*sld count: 0x%x\n", indent, "", le16_to_cpu(ld_info->ld_cnt)); ++ fprintf(stdout, "%*sqos telemetry capability: 0x%x\n", indent, "", ld_info->qos_telemetry_capa); ++ ++out: ++ cxl_cmd_unref(cmd); ++ return rc; ++ return 0; ++} ++ ++// GET_LD_INFO END ++ ++#define CXL_MEM_COMMAND_ID_DEVICE_INFO_GET CXL_MEM_COMMAND_ID_RAW ++#define CXL_MEM_COMMAND_ID_DEVICE_INFO_GET_OPCODE 49152 ++#define CXL_MEM_COMMAND_ID_DEVICE_INFO_GET_PAYLOAD_OUT_SIZE 8 ++#define CXL_MEM_COMMAND_ID_DEVICE_INFO_GET_PAYLOAD_IN_SIZE 0 ++ ++ ++struct cxl_mbox_device_info_get_out { ++ __le16 device_id; ++ u8 chipinfo_rel_major; ++ u8 chipinfo_rel_minor; ++ u8 device_rev; ++ u8 configfile_ver_major; ++ __le16 configfile_ver_minor; ++} __attribute__((packed)); ++ ++CXL_EXPORT int cxl_memdev_device_info_get(struct cxl_memdev *memdev) ++{ ++ struct cxl_cmd *cmd; ++ struct cxl_mem_query_commands *query; ++ struct cxl_command_info *cinfo; ++ struct cxl_mbox_device_info_get_out *device_info_get_out; ++ int rc = 0; ++ char release_major; ++ release_major = 'A'; ++ ++ cmd = cxl_cmd_new_raw(memdev, CXL_MEM_COMMAND_ID_DEVICE_INFO_GET_OPCODE); ++ if (!cmd) { ++ fprintf(stderr, "%s: cxl_cmd_new_raw returned Null output\n", ++ cxl_memdev_get_devname(memdev)); ++ return -ENOMEM; ++ } ++ ++ query = cmd->query_cmd; ++ cinfo = &query->commands[cmd->query_idx]; ++ ++ /* this is hack to create right payload size */ ++ cinfo->size_in = CXL_MEM_COMMAND_ID_DEVICE_INFO_GET_PAYLOAD_IN_SIZE; ++ if (cinfo->size_in > 0) { ++ cmd->input_payload = calloc(1, cinfo->size_in); ++ if (!cmd->input_payload) ++ return -ENOMEM; ++ cmd->send_cmd->in.payload = (u64)cmd->input_payload; ++ cmd->send_cmd->in.size = cinfo->size_in; ++ } ++ ++ rc = cxl_cmd_submit(cmd); ++ if (rc < 0) { ++ fprintf(stderr, "%s: cmd submission failed: %d (%s)\n", ++ cxl_memdev_get_devname(memdev), rc, strerror(-rc)); ++ goto out; ++ } ++ ++ rc = cxl_cmd_get_mbox_status(cmd); ++ if (rc != 0) { ++ fprintf(stderr, "%s: firmware status: %d\n", ++ cxl_memdev_get_devname(memdev), rc); ++ rc = -ENXIO; ++ goto out; ++ } ++ ++ if (cmd->send_cmd->id != CXL_MEM_COMMAND_ID_DEVICE_INFO_GET) { ++ fprintf(stderr, "%s: invalid command id 0x%x (expecting 0x%x)\n", ++ cxl_memdev_get_devname(memdev), cmd->send_cmd->id, CXL_MEM_COMMAND_ID_DEVICE_INFO_GET); ++ return -EINVAL; ++ } ++ ++ device_info_get_out = (void *)cmd->send_cmd->out.payload; ++ release_major = release_major + device_info_get_out->chipinfo_rel_major; ++ fprintf(stdout, "=========================== read device information ============================\n"); ++ ++ fprintf(stdout, "Release & Revision for Device ID %x: %c.%x Rev %x\n", ++ device_info_get_out->device_id, ++ release_major, ++ device_info_get_out->chipinfo_rel_minor, ++ device_info_get_out->device_rev); ++ fprintf(stdout, "Device ID: %x\n", device_info_get_out->device_id); ++ fprintf(stdout, "Chip Info Release Major: %x\n", device_info_get_out->chipinfo_rel_major); ++ fprintf(stdout, "Chip Info Release Minor: %x\n", device_info_get_out->chipinfo_rel_minor); ++ fprintf(stdout, "Device Revision: %x\n", device_info_get_out->device_rev); ++ fprintf(stdout, "ConfigFile version Major: %x\n", device_info_get_out->configfile_ver_major); ++ fprintf(stdout, "ConfigFile version Minor: %x\n", device_info_get_out->configfile_ver_minor); ++ ++out: ++ cxl_cmd_unref(cmd); ++ return rc; ++ return 0; ++} ++ ++#define CXL_MEM_COMMAND_ID_GET_FW_INFO CXL_MEM_COMMAND_ID_RAW ++#define CXL_MEM_COMMAND_ID_GET_FW_INFO_OPCODE 512 ++#define CXL_MEM_COMMAND_ID_GET_FW_INFO_PAYLOAD_OUT_SIZE 80 ++ ++ ++struct cxl_mbox_get_fw_info_out { ++ u8 fw_slots_supp; ++ u8 fw_slot_info; ++ u8 fw_activation_capas; ++ u8 rsvd[13]; ++ char slot_1_fw_rev[16]; ++ char slot_2_fw_rev[16]; ++ char slot_3_fw_rev[16]; ++ char slot_4_fw_rev[16]; ++} __attribute__((packed)); ++ ++CXL_EXPORT int cxl_memdev_get_fw_info(struct cxl_memdev *memdev) ++{ ++ struct cxl_cmd *cmd; ++ struct cxl_mbox_get_fw_info_out *get_fw_info_out; ++ int rc = 0; ++ u8 active_slot_mask; ++ u8 active_slot; ++ u8 staged_slot_mask; ++ u8 staged_slot; ++ ++ cmd = cxl_cmd_new_raw(memdev, CXL_MEM_COMMAND_ID_GET_FW_INFO_OPCODE); ++ if (!cmd) { ++ fprintf(stderr, "%s: cxl_cmd_new_raw returned Null output\n", ++ cxl_memdev_get_devname(memdev)); ++ return -ENOMEM; ++ } ++ ++ rc = cxl_cmd_submit(cmd); ++ if (rc < 0) { ++ fprintf(stderr, "%s: cmd submission failed: %d (%s)\n", ++ cxl_memdev_get_devname(memdev), rc, strerror(-rc)); ++ goto out; ++ } ++ ++ rc = cxl_cmd_get_mbox_status(cmd); ++ if (rc != 0) { ++ fprintf(stderr, "%s: firmware status: %d\n", ++ cxl_memdev_get_devname(memdev), rc); ++ rc = -ENXIO; ++ goto out; ++ } ++ ++ if (cmd->send_cmd->id != CXL_MEM_COMMAND_ID_GET_FW_INFO) { ++ fprintf(stderr, "%s: invalid command id 0x%x (expecting 0x%x)\n", ++ cxl_memdev_get_devname(memdev), cmd->send_cmd->id, CXL_MEM_COMMAND_ID_GET_FW_INFO); ++ return -EINVAL; ++ } ++ ++ get_fw_info_out = (void *)cmd->send_cmd->out.payload; ++ active_slot_mask = 0b00000111; ++ active_slot = get_fw_info_out->fw_slot_info & active_slot_mask; ++ staged_slot_mask = 0b00111000; ++ staged_slot = get_fw_info_out->fw_slot_info & staged_slot_mask; ++ staged_slot = staged_slot>>3; ++ fprintf(stdout, "================================= get fw info ==================================\n"); ++ fprintf(stdout, "FW Slots Supported: %x\n", get_fw_info_out->fw_slots_supp); ++ fprintf(stdout, "Active FW Slot: %x\n", active_slot); ++ if (staged_slot) ++ { ++ fprintf(stdout, "Staged FW Slot: %x\n", staged_slot); ++ } ++ fprintf(stdout, "FW Activation Capabilities: %x\n", get_fw_info_out->fw_activation_capas); ++ fprintf(stdout, "Slot 1 FW Revision: %s\n", get_fw_info_out->slot_1_fw_rev); ++ fprintf(stdout, "Slot 2 FW Revision: %s\n", get_fw_info_out->slot_2_fw_rev); ++ fprintf(stdout, "Slot 3 FW Revision: %s\n", get_fw_info_out->slot_3_fw_rev); ++ fprintf(stdout, "Slot 4 FW Revision: %s\n", get_fw_info_out->slot_4_fw_rev); ++ ++out: ++ cxl_cmd_unref(cmd); ++ return rc; ++ return 0; ++} ++ ++ ++#define CXL_MEM_COMMAND_ID_TRANSFER_FW CXL_MEM_COMMAND_ID_RAW ++#define CXL_MEM_COMMAND_ID_TRANSFER_FW_PAYLOAD_IN_SIZE 128 + FW_BLOCK_SIZE ++ ++struct cxl_mbox_transfer_fw_in { ++ u8 action; ++ u8 slot; ++ __le16 rsvd; ++ __le32 offset; ++ __le64 rsvd8[15]; ++ fwblock data; ++} __attribute__((packed)); ++ ++ ++CXL_EXPORT int cxl_memdev_transfer_fw(struct cxl_memdev *memdev, ++ u8 action, u8 slot, u32 offset, int size, ++ unsigned char *data, u32 transfer_fw_opcode) ++{ ++ struct cxl_cmd *cmd; ++ struct cxl_mem_query_commands *query; ++ struct cxl_command_info *cinfo; ++ struct cxl_mbox_transfer_fw_in *transfer_fw_in; ++ int rc = 0; ++ ++ cmd = cxl_cmd_new_raw(memdev, transfer_fw_opcode); ++ if (!cmd) { ++ fprintf(stderr, "%s: cxl_cmd_new_raw returned Null output\n", ++ cxl_memdev_get_devname(memdev)); ++ return -ENOMEM; ++ } ++ ++ query = cmd->query_cmd; ++ cinfo = &query->commands[cmd->query_idx]; ++ ++ /* used to force correct payload size */ ++ cinfo->size_in = 128 + size; ++ if (cinfo->size_in > 0) { ++ cmd->input_payload = calloc(1, cinfo->size_in); ++ if (!cmd->input_payload) ++ return -ENOMEM; ++ cmd->send_cmd->in.payload = (u64)cmd->input_payload; ++ cmd->send_cmd->in.size = cinfo->size_in; ++ } ++ ++ transfer_fw_in = (struct cxl_mbox_transfer_fw_in *) cmd->send_cmd->in.payload; ++ transfer_fw_in->action = action; ++ transfer_fw_in->slot = slot; ++ transfer_fw_in->offset = cpu_to_le32(offset); ++ memcpy(transfer_fw_in->data, data, size); ++ ++ rc = cxl_cmd_submit(cmd); ++ if (rc < 0) { ++ fprintf(stderr, "%s: cmd submission failed: %d (%s)\n", ++ cxl_memdev_get_devname(memdev), rc, strerror(-rc)); ++ goto out; ++ } ++ ++ rc = cxl_cmd_get_mbox_status(cmd); ++ if (rc != 0) { ++ fprintf(stderr, "%s: firmware status: %d\n", ++ cxl_memdev_get_devname(memdev), rc); ++ goto out; ++ } ++ ++ if (cmd->send_cmd->id != CXL_MEM_COMMAND_ID_TRANSFER_FW) { ++ fprintf(stderr, "%s: invalid command id 0x%x (expecting 0x%x)\n", ++ cxl_memdev_get_devname(memdev), cmd->send_cmd->id, CXL_MEM_COMMAND_ID_TRANSFER_FW); ++ rc = -EINVAL; ++ goto out; ++ } ++ ++ ++out: ++ cxl_cmd_unref(cmd); ++ return rc; ++ return 0; ++} ++ ++ ++#define CXL_MEM_COMMAND_ID_ACTIVATE_FW CXL_MEM_COMMAND_ID_RAW ++#define CXL_MEM_COMMAND_ID_ACTIVATE_FW_OPCODE 52482 ++#define CXL_MEM_COMMAND_ID_ACTIVATE_FW_PAYLOAD_IN_SIZE 2 ++ ++struct cxl_mbox_activate_fw_in { ++ u8 action; ++ u8 slot; ++} __attribute__((packed)); ++ ++ ++CXL_EXPORT int cxl_memdev_activate_fw(struct cxl_memdev *memdev, ++ u8 action, u8 slot) ++{ ++ struct cxl_cmd *cmd; ++ struct cxl_mem_query_commands *query; ++ struct cxl_command_info *cinfo; ++ struct cxl_mbox_activate_fw_in *activate_fw_in; ++ int rc = 0; ++ ++ cmd = cxl_cmd_new_raw(memdev, CXL_MEM_COMMAND_ID_ACTIVATE_FW_OPCODE); ++ if (!cmd) { ++ fprintf(stderr, "%s: cxl_cmd_new_raw returned Null output\n", ++ cxl_memdev_get_devname(memdev)); ++ return -ENOMEM; ++ } ++ ++ query = cmd->query_cmd; ++ cinfo = &query->commands[cmd->query_idx]; ++ ++ /* this is hack to create right payload size */ ++ cinfo->size_in = CXL_MEM_COMMAND_ID_ACTIVATE_FW_PAYLOAD_IN_SIZE; ++ if (cinfo->size_in > 0) { ++ cmd->input_payload = calloc(1, cinfo->size_in); ++ if (!cmd->input_payload) ++ return -ENOMEM; ++ cmd->send_cmd->in.payload = (u64)cmd->input_payload; ++ cmd->send_cmd->in.size = cinfo->size_in; ++ } ++ ++ activate_fw_in = (void *) cmd->send_cmd->in.payload; ++ ++ activate_fw_in->action = action; ++ activate_fw_in->slot = slot; ++ rc = cxl_cmd_submit(cmd); ++ if (rc < 0) { ++ fprintf(stderr, "%s: cmd submission failed: %d (%s)\n", ++ cxl_memdev_get_devname(memdev), rc, strerror(-rc)); ++ goto out; ++ } ++ ++ rc = cxl_cmd_get_mbox_status(cmd); ++ if (rc != 0) { ++ fprintf(stderr, "%s: firmware status: %d\n", ++ cxl_memdev_get_devname(memdev), rc); ++ rc = -ENXIO; ++ goto out; ++ } ++ ++ if (cmd->send_cmd->id != CXL_MEM_COMMAND_ID_ACTIVATE_FW) { ++ fprintf(stderr, "%s: invalid command id 0x%x (expecting 0x%x)\n", ++ cxl_memdev_get_devname(memdev), cmd->send_cmd->id, CXL_MEM_COMMAND_ID_ACTIVATE_FW); ++ return -EINVAL; ++ } ++ ++ ++out: ++ cxl_cmd_unref(cmd); ++ return rc; ++ return 0; ++} ++ ++ ++#define CXL_MEM_COMMAND_ID_DDR_INFO CXL_MEM_COMMAND_ID_RAW ++#define CXL_MEM_COMMAND_ID_DDR_INFO_OPCODE 50432 ++#define CXL_MEM_COMMAND_ID_DDR_INFO_PAYLOAD_IN_SIZE 1 ++#define CXL_MEM_COMMAND_ID_DDR_INFO_PAYLOAD_OUT_SIZE 8 ++ ++struct cxl_mbox_ddr_info_in { ++ u8 ddr_id; ++} __attribute__((packed)); ++ ++struct cxl_mbox_ddr_info_out { ++ __le32 mstr_reg; ++ __le32 dram_width; ++} __attribute__((packed)); ++ ++CXL_EXPORT int cxl_memdev_ddr_info(struct cxl_memdev *memdev, u8 ddr_id) ++{ ++ /*const char *dram_width_descriptions[4] = { ++ "DRAM Width x4 device", ++ "DRAM Width x8 device", ++ "DRAM Width x16 device", ++ "DRAM Width x32 device" ++ };*/ ++ ++ struct cxl_cmd *cmd; ++ struct cxl_mem_query_commands *query; ++ struct cxl_command_info *cinfo; ++ struct cxl_mbox_ddr_info_in *ddr_info_in; ++ struct cxl_mbox_ddr_info_out *ddr_info_out; ++ int rc = 0; ++ ++ cmd = cxl_cmd_new_raw(memdev, CXL_MEM_COMMAND_ID_DDR_INFO_OPCODE); ++ if (!cmd) { ++ fprintf(stderr, "%s: cxl_cmd_new_raw returned Null output\n", ++ cxl_memdev_get_devname(memdev)); ++ return -ENOMEM; ++ } ++ ++ query = cmd->query_cmd; ++ cinfo = &query->commands[cmd->query_idx]; ++ ++ /* update payload size */ ++ cinfo->size_in = CXL_MEM_COMMAND_ID_DDR_INFO_PAYLOAD_IN_SIZE; ++ if (cinfo->size_in > 0) { ++ cmd->input_payload = calloc(1, cinfo->size_in); ++ if (!cmd->input_payload) ++ return -ENOMEM; ++ cmd->send_cmd->in.payload = (u64)cmd->input_payload; ++ cmd->send_cmd->in.size = cinfo->size_in; ++ } ++ ++ ddr_info_in = (void *) cmd->send_cmd->in.payload; ++ ++ ddr_info_in->ddr_id = ddr_id; ++ rc = cxl_cmd_submit(cmd); ++ if (rc < 0) { ++ fprintf(stderr, "%s: cmd submission failed: %d (%s)\n", ++ cxl_memdev_get_devname(memdev), rc, strerror(-rc)); ++ goto out; ++ } ++ ++ rc = cxl_cmd_get_mbox_status(cmd); ++ if (rc != 0) { ++ fprintf(stderr, "%s: firmware status: %d\n", ++ cxl_memdev_get_devname(memdev), rc); ++ rc = -ENXIO; ++ goto out; ++ } ++ ++ if (cmd->send_cmd->id != CXL_MEM_COMMAND_ID_DDR_INFO) { ++ fprintf(stderr, "%s: invalid command id 0x%x (expecting 0x%x)\n", ++ cxl_memdev_get_devname(memdev), cmd->send_cmd->id, CXL_MEM_COMMAND_ID_DDR_INFO); ++ return -EINVAL; ++ } ++ ++ ddr_info_out = (void *)cmd->send_cmd->out.payload; ++ fprintf(stdout, "=================================== ddr info ===================================\n"); ++ fprintf(stdout, "DDR controller MSTR register: %x\n", le32_to_cpu(ddr_info_out->mstr_reg)); ++ fprintf(stdout, "DRAM width derived from DEVICE_CONFIG: %d\n", le32_to_cpu(ddr_info_out->dram_width)); ++ ++out: ++ cxl_cmd_unref(cmd); ++ return rc; ++ return 0; ++} ++ ++ ++#define CXL_MEM_COMMAND_ID_CLEAR_EVENT_RECORDS CXL_MEM_COMMAND_ID_RAW ++#define CXL_MEM_COMMAND_ID_CLEAR_EVENT_RECORDS_OPCODE 0x101 ++ ++struct cxl_clear_event_record_info { ++ u8 event_log_type; ++ u8 clear_event_flags; ++ u8 no_event_record_handles; ++ u8 reserved[3]; ++ __le16 event_record_handles[]; ++} __attribute__((packed)); ++ ++CXL_EXPORT int cxl_memdev_clear_event_records(struct cxl_memdev *memdev, u8 event_log_type, ++ u8 clear_event_flags, u8 no_event_record_handles, u16 *event_record_handles) ++{ ++ struct cxl_cmd *cmd; ++ struct cxl_mem_query_commands *query; ++ struct cxl_command_info *cinfo; ++ struct cxl_clear_event_record_info *event_info; ++ int rc = 0; ++ int rec; ++ ++ cmd = cxl_cmd_new_raw(memdev, CXL_MEM_COMMAND_ID_CLEAR_EVENT_RECORDS_OPCODE); ++ if (!cmd) { ++ fprintf(stderr, "%s: cxl_cmd_new_raw returned Null output\n", ++ cxl_memdev_get_devname(memdev)); ++ return -ENOMEM; ++ } ++ ++ query = cmd->query_cmd; ++ cinfo = &query->commands[cmd->query_idx]; ++ ++ /* this is hack to create right payload size */ ++ cinfo->size_in = sizeof(*event_info) + (no_event_record_handles * sizeof(__le16)); ++ if (cinfo->size_in > 0) { ++ cmd->input_payload = calloc(1, cinfo->size_in); ++ if (!cmd->input_payload) ++ return -ENOMEM; ++ cmd->send_cmd->in.payload = (u64)cmd->input_payload; ++ cmd->send_cmd->in.size = cinfo->size_in; ++ } ++ ++ if (clear_event_flags) { ++ dbg(memdev->ctx, "Clearing 'All Event' Records for type %d\n", event_log_type); ++ } ++ ++ event_info = (struct cxl_clear_event_record_info *) cmd->send_cmd->in.payload; ++ event_info->event_log_type = event_log_type; ++ event_info->clear_event_flags = clear_event_flags; ++ event_info->no_event_record_handles = no_event_record_handles; ++ for (rec = 0; rec < event_info->no_event_record_handles; ++rec) { ++ dbg(memdev->ctx, "Clearing Event Record 0x%x for %d type\n", event_record_handles[rec], event_log_type); ++ event_info->event_record_handles[rec] = cpu_to_le16(event_record_handles[rec]); ++ } ++ ++ rc = cxl_cmd_submit(cmd); ++ if (rc < 0) { ++ fprintf(stderr, "%s: cmd submission failed: %d (%s)\n", ++ cxl_memdev_get_devname(memdev), rc, strerror(-rc)); ++ goto out; ++ } ++ ++ rc = cxl_cmd_get_mbox_status(cmd); ++ if (rc != 0) { ++ fprintf(stderr, "%s: firmware status: %d:\n%s\n", ++ cxl_memdev_get_devname(memdev), rc, DEVICE_ERRORS[rc]); ++ rc = -ENXIO; ++ goto out; ++ } ++ ++ if (cmd->send_cmd->id != CXL_MEM_COMMAND_ID_CLEAR_EVENT_RECORDS) { ++ fprintf(stderr, "%s: invalid command id 0x%x (expecting 0x%x)\n", ++ cxl_memdev_get_devname(memdev), cmd->send_cmd->id, CXL_MEM_COMMAND_ID_CLEAR_EVENT_RECORDS); ++ return -EINVAL; ++ } ++ ++ fprintf(stdout, "Clear Event Records command completed successfully\n"); ++ ++out: ++ cxl_cmd_unref(cmd); ++ return rc; ++ return 0; ++} ++ ++#define CXL_MEM_COMMAND_ID_HCT_START_STOP_TRIGGER CXL_MEM_COMMAND_ID_RAW ++#define CXL_MEM_COMMAND_ID_HCT_START_STOP_TRIGGER_OPCODE 50691 ++#define CXL_MEM_COMMAND_ID_HCT_START_STOP_TRIGGER_PAYLOAD_IN_SIZE 2 ++ ++struct cxl_mbox_hct_start_stop_trigger_in { ++ u8 hct_inst; ++ u8 buf_control; ++} __attribute__((packed)); ++ ++ ++CXL_EXPORT int cxl_memdev_hct_start_stop_trigger(struct cxl_memdev *memdev, ++ u8 hct_inst, u8 buf_control) ++{ ++ struct cxl_cmd *cmd; ++ struct cxl_mem_query_commands *query; ++ struct cxl_command_info *cinfo; ++ struct cxl_mbox_hct_start_stop_trigger_in *hct_start_stop_trigger_in; ++ int rc = 0; ++ ++ cmd = cxl_cmd_new_raw(memdev, CXL_MEM_COMMAND_ID_HCT_START_STOP_TRIGGER_OPCODE); ++ if (!cmd) { ++ fprintf(stderr, "%s: cxl_cmd_new_raw returned Null output\n", ++ cxl_memdev_get_devname(memdev)); ++ return -ENOMEM; ++ } ++ ++ query = cmd->query_cmd; ++ cinfo = &query->commands[cmd->query_idx]; ++ ++ /* update payload size */ ++ cinfo->size_in = CXL_MEM_COMMAND_ID_HCT_START_STOP_TRIGGER_PAYLOAD_IN_SIZE; ++ if (cinfo->size_in > 0) { ++ cmd->input_payload = calloc(1, cinfo->size_in); ++ if (!cmd->input_payload) ++ return -ENOMEM; ++ cmd->send_cmd->in.payload = (u64)cmd->input_payload; ++ cmd->send_cmd->in.size = cinfo->size_in; ++ } ++ ++ hct_start_stop_trigger_in = (void *) cmd->send_cmd->in.payload; ++ ++ hct_start_stop_trigger_in->hct_inst = hct_inst; ++ hct_start_stop_trigger_in->buf_control = buf_control; ++ rc = cxl_cmd_submit(cmd); ++ if (rc < 0) { ++ fprintf(stderr, "%s: cmd submission failed: %d (%s)\n", ++ cxl_memdev_get_devname(memdev), rc, strerror(-rc)); ++ goto out; ++ } ++ ++ rc = cxl_cmd_get_mbox_status(cmd); ++ if (rc != 0) { ++ fprintf(stderr, "%s: firmware status: %d:\n%s\n", ++ cxl_memdev_get_devname(memdev), rc, DEVICE_ERRORS[rc]); ++ rc = -ENXIO; ++ goto out; ++ } ++ ++ if (cmd->send_cmd->id != CXL_MEM_COMMAND_ID_HCT_START_STOP_TRIGGER) { ++ fprintf(stderr, "%s: invalid command id 0x%x (expecting 0x%x)\n", ++ cxl_memdev_get_devname(memdev), cmd->send_cmd->id, CXL_MEM_COMMAND_ID_HCT_START_STOP_TRIGGER); ++ return -EINVAL; ++ } ++ ++ ++out: ++ cxl_cmd_unref(cmd); ++ return rc; ++ return 0; ++} ++ ++ ++#define CXL_MEM_COMMAND_ID_HCT_GET_BUFFER_STATUS CXL_MEM_COMMAND_ID_RAW ++#define CXL_MEM_COMMAND_ID_HCT_GET_BUFFER_STATUS_OPCODE 50692 ++#define CXL_MEM_COMMAND_ID_HCT_GET_BUFFER_STATUS_PAYLOAD_IN_SIZE 1 ++#define CXL_MEM_COMMAND_ID_HCT_GET_BUFFER_STATUS_PAYLOAD_OUT_SIZE 2 ++ ++struct cxl_mbox_hct_get_buffer_status_in { ++ u8 hct_inst; ++} __attribute__((packed)); ++ ++struct cxl_mbox_hct_get_buffer_status_out { ++ u8 buf_status; ++ u8 fill_level; ++} __attribute__((packed)); ++ ++CXL_EXPORT int cxl_memdev_hct_get_buffer_status(struct cxl_memdev *memdev, ++ u8 hct_inst) ++{ ++ const char *buf_status_descriptions[] = { ++ "Stop", ++ "Pre-Trigger", ++ "Post-Trigger" ++ }; ++ ++ struct cxl_cmd *cmd; ++ struct cxl_mem_query_commands *query; ++ struct cxl_command_info *cinfo; ++ struct cxl_mbox_hct_get_buffer_status_in *hct_get_buffer_status_in; ++ struct cxl_mbox_hct_get_buffer_status_out *hct_get_buffer_status_out; ++ int rc = 0; ++ ++ cmd = cxl_cmd_new_raw(memdev, CXL_MEM_COMMAND_ID_HCT_GET_BUFFER_STATUS_OPCODE); ++ if (!cmd) { ++ fprintf(stderr, "%s: cxl_cmd_new_raw returned Null output\n", ++ cxl_memdev_get_devname(memdev)); ++ return -ENOMEM; ++ } ++ ++ query = cmd->query_cmd; ++ cinfo = &query->commands[cmd->query_idx]; ++ ++ /* update payload size */ ++ cinfo->size_in = CXL_MEM_COMMAND_ID_HCT_GET_BUFFER_STATUS_PAYLOAD_IN_SIZE; ++ if (cinfo->size_in > 0) { ++ cmd->input_payload = calloc(1, cinfo->size_in); ++ if (!cmd->input_payload) ++ return -ENOMEM; ++ cmd->send_cmd->in.payload = (u64)cmd->input_payload; ++ cmd->send_cmd->in.size = cinfo->size_in; ++ } ++ ++ hct_get_buffer_status_in = (void *) cmd->send_cmd->in.payload; ++ ++ hct_get_buffer_status_in->hct_inst = hct_inst; ++ rc = cxl_cmd_submit(cmd); ++ if (rc < 0) { ++ fprintf(stderr, "%s: cmd submission failed: %d (%s)\n", ++ cxl_memdev_get_devname(memdev), rc, strerror(-rc)); ++ goto out; ++ } ++ ++ rc = cxl_cmd_get_mbox_status(cmd); ++ if (rc != 0) { ++ fprintf(stderr, "%s: firmware status: %d:\n%s\n", ++ cxl_memdev_get_devname(memdev), rc, DEVICE_ERRORS[rc]); ++ rc = -ENXIO; ++ goto out; ++ } ++ ++ if (cmd->send_cmd->id != CXL_MEM_COMMAND_ID_HCT_GET_BUFFER_STATUS) { ++ fprintf(stderr, "%s: invalid command id 0x%x (expecting 0x%x)\n", ++ cxl_memdev_get_devname(memdev), cmd->send_cmd->id, CXL_MEM_COMMAND_ID_HCT_GET_BUFFER_STATUS); ++ return -EINVAL; ++ } ++ ++ hct_get_buffer_status_out = (void *)cmd->send_cmd->out.payload; ++ fprintf(stdout, "======================= get hif/cxl trace buffer status ========================\n"); ++ fprintf(stdout, "Buffer Status: %s\n", buf_status_descriptions[hct_get_buffer_status_out->buf_status]); ++ fprintf(stdout, "Fill Level: %x\n", hct_get_buffer_status_out->fill_level); ++ ++out: ++ cxl_cmd_unref(cmd); ++ return rc; ++ return 0; ++} ++ ++ ++#define CXL_MEM_COMMAND_ID_HCT_ENABLE CXL_MEM_COMMAND_ID_RAW ++#define CXL_MEM_COMMAND_ID_HCT_ENABLE_OPCODE 50694 ++#define CXL_MEM_COMMAND_ID_HCT_ENABLE_PAYLOAD_IN_SIZE 1 ++ ++struct cxl_mbox_hct_enable_in { ++ u8 hct_inst; ++} __attribute__((packed)); ++ ++ ++CXL_EXPORT int cxl_memdev_hct_enable(struct cxl_memdev *memdev, ++ u8 hct_inst) ++{ ++ struct cxl_cmd *cmd; ++ struct cxl_mem_query_commands *query; ++ struct cxl_command_info *cinfo; ++ struct cxl_mbox_hct_enable_in *hct_enable_in; ++ int rc = 0; ++ ++ cmd = cxl_cmd_new_raw(memdev, CXL_MEM_COMMAND_ID_HCT_ENABLE_OPCODE); ++ if (!cmd) { ++ fprintf(stderr, "%s: cxl_cmd_new_raw returned Null output\n", ++ cxl_memdev_get_devname(memdev)); ++ return -ENOMEM; ++ } ++ ++ query = cmd->query_cmd; ++ cinfo = &query->commands[cmd->query_idx]; ++ ++ /* update payload size */ ++ cinfo->size_in = CXL_MEM_COMMAND_ID_HCT_ENABLE_PAYLOAD_IN_SIZE; ++ if (cinfo->size_in > 0) { ++ cmd->input_payload = calloc(1, cinfo->size_in); ++ if (!cmd->input_payload) ++ return -ENOMEM; ++ cmd->send_cmd->in.payload = (u64)cmd->input_payload; ++ cmd->send_cmd->in.size = cinfo->size_in; ++ } ++ ++ hct_enable_in = (void *) cmd->send_cmd->in.payload; ++ ++ hct_enable_in->hct_inst = hct_inst; ++ rc = cxl_cmd_submit(cmd); ++ if (rc < 0) { ++ fprintf(stderr, "%s: cmd submission failed: %d (%s)\n", ++ cxl_memdev_get_devname(memdev), rc, strerror(-rc)); ++ goto out; ++ } ++ ++ rc = cxl_cmd_get_mbox_status(cmd); ++ if (rc != 0) { ++ fprintf(stderr, "%s: firmware status: %d:\n%s\n", ++ cxl_memdev_get_devname(memdev), rc, DEVICE_ERRORS[rc]); ++ rc = -ENXIO; ++ goto out; ++ } ++ ++ if (cmd->send_cmd->id != CXL_MEM_COMMAND_ID_HCT_ENABLE) { ++ fprintf(stderr, "%s: invalid command id 0x%x (expecting 0x%x)\n", ++ cxl_memdev_get_devname(memdev), cmd->send_cmd->id, CXL_MEM_COMMAND_ID_HCT_ENABLE); ++ return -EINVAL; ++ } ++ ++ ++out: ++ cxl_cmd_unref(cmd); ++ return rc; ++ return 0; ++} ++ ++ ++#define CXL_MEM_COMMAND_ID_LTMON_CAPTURE_CLEAR CXL_MEM_COMMAND_ID_RAW ++#define CXL_MEM_COMMAND_ID_LTMON_CAPTURE_CLEAR_OPCODE 50954 ++#define CXL_MEM_COMMAND_ID_LTMON_CAPTURE_CLEAR_PAYLOAD_IN_SIZE 2 ++ ++struct cxl_mbox_ltmon_capture_clear_in { ++ u8 rsvd; ++ u8 cxl_mem_id; ++} __attribute__((packed)); ++ ++ ++CXL_EXPORT int cxl_memdev_ltmon_capture_clear(struct cxl_memdev *memdev, ++ u8 cxl_mem_id) ++{ ++ struct cxl_cmd *cmd; ++ struct cxl_mem_query_commands *query; ++ struct cxl_command_info *cinfo; ++ struct cxl_mbox_ltmon_capture_clear_in *ltmon_capture_clear_in; ++ int rc = 0; ++ ++ cmd = cxl_cmd_new_raw(memdev, CXL_MEM_COMMAND_ID_LTMON_CAPTURE_CLEAR_OPCODE); ++ if (!cmd) { ++ fprintf(stderr, "%s: cxl_cmd_new_raw returned Null output\n", ++ cxl_memdev_get_devname(memdev)); ++ return -ENOMEM; ++ } ++ ++ query = cmd->query_cmd; ++ cinfo = &query->commands[cmd->query_idx]; ++ ++ /* update payload size */ ++ cinfo->size_in = CXL_MEM_COMMAND_ID_LTMON_CAPTURE_CLEAR_PAYLOAD_IN_SIZE; ++ if (cinfo->size_in > 0) { ++ cmd->input_payload = calloc(1, cinfo->size_in); ++ if (!cmd->input_payload) ++ return -ENOMEM; ++ cmd->send_cmd->in.payload = (u64)cmd->input_payload; ++ cmd->send_cmd->in.size = cinfo->size_in; ++ } ++ ++ ltmon_capture_clear_in = (void *) cmd->send_cmd->in.payload; ++ ++ ltmon_capture_clear_in->cxl_mem_id = cxl_mem_id; ++ rc = cxl_cmd_submit(cmd); ++ if (rc < 0) { ++ fprintf(stderr, "%s: cmd submission failed: %d (%s)\n", ++ cxl_memdev_get_devname(memdev), rc, strerror(-rc)); ++ goto out; ++ } ++ ++ rc = cxl_cmd_get_mbox_status(cmd); ++ if (rc != 0) { ++ fprintf(stderr, "%s: firmware status: %d:\n%s\n", ++ cxl_memdev_get_devname(memdev), rc, DEVICE_ERRORS[rc]); ++ rc = -ENXIO; ++ goto out; ++ } ++ ++ if (cmd->send_cmd->id != CXL_MEM_COMMAND_ID_LTMON_CAPTURE_CLEAR) { ++ fprintf(stderr, "%s: invalid command id 0x%x (expecting 0x%x)\n", ++ cxl_memdev_get_devname(memdev), cmd->send_cmd->id, CXL_MEM_COMMAND_ID_LTMON_CAPTURE_CLEAR); ++ return -EINVAL; ++ } ++ ++ ++out: ++ cxl_cmd_unref(cmd); ++ return rc; ++ return 0; ++} ++ ++ ++#define CXL_MEM_COMMAND_ID_LTMON_CAPTURE CXL_MEM_COMMAND_ID_RAW ++#define CXL_MEM_COMMAND_ID_LTMON_CAPTURE_OPCODE 50956 ++#define CXL_MEM_COMMAND_ID_LTMON_CAPTURE_PAYLOAD_IN_SIZE 8 ++ ++struct cxl_mbox_ltmon_capture_in { ++ u8 rsvd; ++ u8 cxl_mem_id; ++ u8 capt_mode; ++ __le16 ignore_sub_chg; ++ u8 ignore_rxl0_chg; ++ u8 trig_src_sel; ++ u8 rsvd7; ++} __attribute__((packed)); ++ ++ ++CXL_EXPORT int cxl_memdev_ltmon_capture(struct cxl_memdev *memdev, ++ u8 cxl_mem_id, u8 capt_mode, u16 ignore_sub_chg, u8 ignore_rxl0_chg, ++ u8 trig_src_sel) ++{ ++ struct cxl_cmd *cmd; ++ struct cxl_mem_query_commands *query; ++ struct cxl_command_info *cinfo; ++ struct cxl_mbox_ltmon_capture_in *ltmon_capture_in; ++ int rc = 0; ++ ++ cmd = cxl_cmd_new_raw(memdev, CXL_MEM_COMMAND_ID_LTMON_CAPTURE_OPCODE); ++ if (!cmd) { ++ fprintf(stderr, "%s: cxl_cmd_new_raw returned Null output\n", ++ cxl_memdev_get_devname(memdev)); ++ return -ENOMEM; ++ } ++ ++ query = cmd->query_cmd; ++ cinfo = &query->commands[cmd->query_idx]; ++ ++ /* update payload size */ ++ cinfo->size_in = CXL_MEM_COMMAND_ID_LTMON_CAPTURE_PAYLOAD_IN_SIZE; ++ if (cinfo->size_in > 0) { ++ cmd->input_payload = calloc(1, cinfo->size_in); ++ if (!cmd->input_payload) ++ return -ENOMEM; ++ cmd->send_cmd->in.payload = (u64)cmd->input_payload; ++ cmd->send_cmd->in.size = cinfo->size_in; ++ } ++ ++ ltmon_capture_in = (void *) cmd->send_cmd->in.payload; ++ ++ ltmon_capture_in->cxl_mem_id = cxl_mem_id; ++ ltmon_capture_in->capt_mode = capt_mode; ++ ltmon_capture_in->ignore_sub_chg = cpu_to_le16(ignore_sub_chg); ++ ltmon_capture_in->ignore_rxl0_chg = ignore_rxl0_chg; ++ ltmon_capture_in->trig_src_sel = trig_src_sel; ++ rc = cxl_cmd_submit(cmd); ++ if (rc < 0) { ++ fprintf(stderr, "%s: cmd submission failed: %d (%s)\n", ++ cxl_memdev_get_devname(memdev), rc, strerror(-rc)); ++ goto out; ++ } ++ ++ rc = cxl_cmd_get_mbox_status(cmd); ++ if (rc != 0) { ++ fprintf(stderr, "%s: firmware status: %d:\n%s\n", ++ cxl_memdev_get_devname(memdev), rc, DEVICE_ERRORS[rc]); ++ rc = -ENXIO; ++ goto out; ++ } ++ ++ if (cmd->send_cmd->id != CXL_MEM_COMMAND_ID_LTMON_CAPTURE) { ++ fprintf(stderr, "%s: invalid command id 0x%x (expecting 0x%x)\n", ++ cxl_memdev_get_devname(memdev), cmd->send_cmd->id, CXL_MEM_COMMAND_ID_LTMON_CAPTURE); ++ return -EINVAL; ++ } ++ ++ ++out: ++ cxl_cmd_unref(cmd); ++ return rc; ++ return 0; ++} ++ ++ ++#define CXL_MEM_COMMAND_ID_LTMON_CAPTURE_FREEZE_AND_RESTORE CXL_MEM_COMMAND_ID_RAW ++#define CXL_MEM_COMMAND_ID_LTMON_CAPTURE_FREEZE_AND_RESTORE_OPCODE 50958 ++#define CXL_MEM_COMMAND_ID_LTMON_CAPTURE_FREEZE_AND_RESTORE_PAYLOAD_IN_SIZE 4 ++ ++struct cxl_mbox_ltmon_capture_freeze_and_restore_in { ++ u8 rsvd; ++ u8 cxl_mem_id; ++ u8 freeze_restore; ++ u8 rsvd3; ++} __attribute__((packed)); ++ ++ ++CXL_EXPORT int cxl_memdev_ltmon_capture_freeze_and_restore(struct cxl_memdev *memdev, ++ u8 cxl_mem_id, u8 freeze_restore) ++{ ++ struct cxl_cmd *cmd; ++ struct cxl_mem_query_commands *query; ++ struct cxl_command_info *cinfo; ++ struct cxl_mbox_ltmon_capture_freeze_and_restore_in *ltmon_capture_freeze_and_restore_in; ++ int rc = 0; ++ ++ cmd = cxl_cmd_new_raw(memdev, CXL_MEM_COMMAND_ID_LTMON_CAPTURE_FREEZE_AND_RESTORE_OPCODE); ++ if (!cmd) { ++ fprintf(stderr, "%s: cxl_cmd_new_raw returned Null output\n", ++ cxl_memdev_get_devname(memdev)); ++ return -ENOMEM; ++ } ++ ++ query = cmd->query_cmd; ++ cinfo = &query->commands[cmd->query_idx]; ++ ++ /* update payload size */ ++ cinfo->size_in = CXL_MEM_COMMAND_ID_LTMON_CAPTURE_FREEZE_AND_RESTORE_PAYLOAD_IN_SIZE; ++ if (cinfo->size_in > 0) { ++ cmd->input_payload = calloc(1, cinfo->size_in); ++ if (!cmd->input_payload) ++ return -ENOMEM; ++ cmd->send_cmd->in.payload = (u64)cmd->input_payload; ++ cmd->send_cmd->in.size = cinfo->size_in; ++ } ++ ++ ltmon_capture_freeze_and_restore_in = (void *) cmd->send_cmd->in.payload; ++ ++ ltmon_capture_freeze_and_restore_in->cxl_mem_id = cxl_mem_id; ++ ltmon_capture_freeze_and_restore_in->freeze_restore = freeze_restore; ++ rc = cxl_cmd_submit(cmd); ++ if (rc < 0) { ++ fprintf(stderr, "%s: cmd submission failed: %d (%s)\n", ++ cxl_memdev_get_devname(memdev), rc, strerror(-rc)); ++ goto out; ++ } ++ ++ rc = cxl_cmd_get_mbox_status(cmd); ++ if (rc != 0) { ++ fprintf(stderr, "%s: firmware status: %d:\n%s\n", ++ cxl_memdev_get_devname(memdev), rc, DEVICE_ERRORS[rc]); ++ rc = -ENXIO; ++ goto out; ++ } ++ ++ if (cmd->send_cmd->id != CXL_MEM_COMMAND_ID_LTMON_CAPTURE_FREEZE_AND_RESTORE) { ++ fprintf(stderr, "%s: invalid command id 0x%x (expecting 0x%x)\n", ++ cxl_memdev_get_devname(memdev), cmd->send_cmd->id, CXL_MEM_COMMAND_ID_LTMON_CAPTURE_FREEZE_AND_RESTORE); ++ return -EINVAL; ++ } ++ ++ ++out: ++ cxl_cmd_unref(cmd); ++ return rc; ++ return 0; ++} ++ ++ ++#define CXL_MEM_COMMAND_ID_LTMON_L2R_COUNT_DUMP CXL_MEM_COMMAND_ID_RAW ++#define CXL_MEM_COMMAND_ID_LTMON_L2R_COUNT_DUMP_OPCODE 50960 ++#define CXL_MEM_COMMAND_ID_LTMON_L2R_COUNT_DUMP_PAYLOAD_IN_SIZE 2 ++#define CXL_MEM_COMMAND_ID_LTMON_L2R_COUNT_DUMP_PAYLOAD_OUT_SIZE 4 ++ ++struct cxl_mbox_ltmon_l2r_count_dump_in { ++ u8 rsvd; ++ u8 cxl_mem_id; ++} __attribute__((packed)); ++ ++struct cxl_mbox_ltmon_l2r_count_dump_out { ++ __le32 dump_cnt; ++} __attribute__((packed)); ++ ++CXL_EXPORT int cxl_memdev_ltmon_l2r_count_dump(struct cxl_memdev *memdev, ++ u8 cxl_mem_id) ++{ ++ struct cxl_cmd *cmd; ++ struct cxl_mem_query_commands *query; ++ struct cxl_command_info *cinfo; ++ struct cxl_mbox_ltmon_l2r_count_dump_in *ltmon_l2r_count_dump_in; ++ struct cxl_mbox_ltmon_l2r_count_dump_out *ltmon_l2r_count_dump_out; ++ int rc = 0; ++ ++ cmd = cxl_cmd_new_raw(memdev, CXL_MEM_COMMAND_ID_LTMON_L2R_COUNT_DUMP_OPCODE); ++ if (!cmd) { ++ fprintf(stderr, "%s: cxl_cmd_new_raw returned Null output\n", ++ cxl_memdev_get_devname(memdev)); ++ return -ENOMEM; ++ } ++ ++ query = cmd->query_cmd; ++ cinfo = &query->commands[cmd->query_idx]; ++ ++ /* update payload size */ ++ cinfo->size_in = CXL_MEM_COMMAND_ID_LTMON_L2R_COUNT_DUMP_PAYLOAD_IN_SIZE; ++ if (cinfo->size_in > 0) { ++ cmd->input_payload = calloc(1, cinfo->size_in); ++ if (!cmd->input_payload) ++ return -ENOMEM; ++ cmd->send_cmd->in.payload = (u64)cmd->input_payload; ++ cmd->send_cmd->in.size = cinfo->size_in; ++ } ++ ++ ltmon_l2r_count_dump_in = (void *) cmd->send_cmd->in.payload; ++ ++ ltmon_l2r_count_dump_in->cxl_mem_id = cxl_mem_id; ++ rc = cxl_cmd_submit(cmd); ++ if (rc < 0) { ++ fprintf(stderr, "%s: cmd submission failed: %d (%s)\n", ++ cxl_memdev_get_devname(memdev), rc, strerror(-rc)); ++ goto out; ++ } ++ ++ rc = cxl_cmd_get_mbox_status(cmd); ++ if (rc != 0) { ++ fprintf(stderr, "%s: firmware status: %d:\n%s\n", ++ cxl_memdev_get_devname(memdev), rc, DEVICE_ERRORS[rc]); ++ rc = -ENXIO; ++ goto out; ++ } ++ ++ if (cmd->send_cmd->id != CXL_MEM_COMMAND_ID_LTMON_L2R_COUNT_DUMP) { ++ fprintf(stderr, "%s: invalid command id 0x%x (expecting 0x%x)\n", ++ cxl_memdev_get_devname(memdev), cmd->send_cmd->id, CXL_MEM_COMMAND_ID_LTMON_L2R_COUNT_DUMP); ++ return -EINVAL; ++ } ++ ++ ltmon_l2r_count_dump_out = (void *)cmd->send_cmd->out.payload; ++ fprintf(stdout, "============================= ltmon l2r count dump =============================\n"); ++ fprintf(stdout, "Dump Count: %x\n", le32_to_cpu(ltmon_l2r_count_dump_out->dump_cnt)); ++ ++out: ++ cxl_cmd_unref(cmd); ++ return rc; ++ return 0; ++} ++ ++ ++#define CXL_MEM_COMMAND_ID_LTMON_L2R_COUNT_CLEAR CXL_MEM_COMMAND_ID_RAW ++#define CXL_MEM_COMMAND_ID_LTMON_L2R_COUNT_CLEAR_OPCODE 50961 ++#define CXL_MEM_COMMAND_ID_LTMON_L2R_COUNT_CLEAR_PAYLOAD_IN_SIZE 2 ++ ++struct cxl_mbox_ltmon_l2r_count_clear_in { ++ u8 rsvd; ++ u8 cxl_mem_id; ++} __attribute__((packed)); ++ ++ ++CXL_EXPORT int cxl_memdev_ltmon_l2r_count_clear(struct cxl_memdev *memdev, ++ u8 cxl_mem_id) ++{ ++ struct cxl_cmd *cmd; ++ struct cxl_mem_query_commands *query; ++ struct cxl_command_info *cinfo; ++ struct cxl_mbox_ltmon_l2r_count_clear_in *ltmon_l2r_count_clear_in; ++ int rc = 0; ++ ++ cmd = cxl_cmd_new_raw(memdev, CXL_MEM_COMMAND_ID_LTMON_L2R_COUNT_CLEAR_OPCODE); ++ if (!cmd) { ++ fprintf(stderr, "%s: cxl_cmd_new_raw returned Null output\n", ++ cxl_memdev_get_devname(memdev)); ++ return -ENOMEM; ++ } ++ ++ query = cmd->query_cmd; ++ cinfo = &query->commands[cmd->query_idx]; ++ ++ /* update payload size */ ++ cinfo->size_in = CXL_MEM_COMMAND_ID_LTMON_L2R_COUNT_CLEAR_PAYLOAD_IN_SIZE; ++ if (cinfo->size_in > 0) { ++ cmd->input_payload = calloc(1, cinfo->size_in); ++ if (!cmd->input_payload) ++ return -ENOMEM; ++ cmd->send_cmd->in.payload = (u64)cmd->input_payload; ++ cmd->send_cmd->in.size = cinfo->size_in; ++ } ++ ++ ltmon_l2r_count_clear_in = (void *) cmd->send_cmd->in.payload; ++ ++ ltmon_l2r_count_clear_in->cxl_mem_id = cxl_mem_id; ++ rc = cxl_cmd_submit(cmd); ++ if (rc < 0) { ++ fprintf(stderr, "%s: cmd submission failed: %d (%s)\n", ++ cxl_memdev_get_devname(memdev), rc, strerror(-rc)); ++ goto out; ++ } ++ ++ rc = cxl_cmd_get_mbox_status(cmd); ++ if (rc != 0) { ++ fprintf(stderr, "%s: firmware status: %d:\n%s\n", ++ cxl_memdev_get_devname(memdev), rc, DEVICE_ERRORS[rc]); ++ rc = -ENXIO; ++ goto out; ++ } ++ ++ if (cmd->send_cmd->id != CXL_MEM_COMMAND_ID_LTMON_L2R_COUNT_CLEAR) { ++ fprintf(stderr, "%s: invalid command id 0x%x (expecting 0x%x)\n", ++ cxl_memdev_get_devname(memdev), cmd->send_cmd->id, CXL_MEM_COMMAND_ID_LTMON_L2R_COUNT_CLEAR); ++ return -EINVAL; ++ } ++ ++ ++out: ++ cxl_cmd_unref(cmd); ++ return rc; ++ return 0; ++} ++ ++ ++#define CXL_MEM_COMMAND_ID_LTMON_BASIC_CFG CXL_MEM_COMMAND_ID_RAW ++#define CXL_MEM_COMMAND_ID_LTMON_BASIC_CFG_OPCODE 50962 ++#define CXL_MEM_COMMAND_ID_LTMON_BASIC_CFG_PAYLOAD_IN_SIZE 4 ++ ++struct cxl_mbox_ltmon_basic_cfg_in { ++ u8 rsvd; ++ u8 cxl_mem_id; ++ u8 tick_cnt; ++ u8 global_ts; ++} __attribute__((packed)); ++ ++ ++CXL_EXPORT int cxl_memdev_ltmon_basic_cfg(struct cxl_memdev *memdev, ++ u8 cxl_mem_id, u8 tick_cnt, u8 global_ts) ++{ ++ struct cxl_cmd *cmd; ++ struct cxl_mem_query_commands *query; ++ struct cxl_command_info *cinfo; ++ struct cxl_mbox_ltmon_basic_cfg_in *ltmon_basic_cfg_in; ++ int rc = 0; ++ ++ cmd = cxl_cmd_new_raw(memdev, CXL_MEM_COMMAND_ID_LTMON_BASIC_CFG_OPCODE); ++ if (!cmd) { ++ fprintf(stderr, "%s: cxl_cmd_new_raw returned Null output\n", ++ cxl_memdev_get_devname(memdev)); ++ return -ENOMEM; ++ } ++ ++ query = cmd->query_cmd; ++ cinfo = &query->commands[cmd->query_idx]; ++ ++ /* update payload size */ ++ cinfo->size_in = CXL_MEM_COMMAND_ID_LTMON_BASIC_CFG_PAYLOAD_IN_SIZE; ++ if (cinfo->size_in > 0) { ++ cmd->input_payload = calloc(1, cinfo->size_in); ++ if (!cmd->input_payload) ++ return -ENOMEM; ++ cmd->send_cmd->in.payload = (u64)cmd->input_payload; ++ cmd->send_cmd->in.size = cinfo->size_in; ++ } ++ ++ ltmon_basic_cfg_in = (void *) cmd->send_cmd->in.payload; ++ ++ ltmon_basic_cfg_in->cxl_mem_id = cxl_mem_id; ++ ltmon_basic_cfg_in->tick_cnt = tick_cnt; ++ ltmon_basic_cfg_in->global_ts = global_ts; ++ rc = cxl_cmd_submit(cmd); ++ if (rc < 0) { ++ fprintf(stderr, "%s: cmd submission failed: %d (%s)\n", ++ cxl_memdev_get_devname(memdev), rc, strerror(-rc)); ++ goto out; ++ } ++ ++ rc = cxl_cmd_get_mbox_status(cmd); ++ if (rc != 0) { ++ fprintf(stderr, "%s: firmware status: %d:\n%s\n", ++ cxl_memdev_get_devname(memdev), rc, DEVICE_ERRORS[rc]); ++ rc = -ENXIO; ++ goto out; ++ } ++ ++ if (cmd->send_cmd->id != CXL_MEM_COMMAND_ID_LTMON_BASIC_CFG) { ++ fprintf(stderr, "%s: invalid command id 0x%x (expecting 0x%x)\n", ++ cxl_memdev_get_devname(memdev), cmd->send_cmd->id, CXL_MEM_COMMAND_ID_LTMON_BASIC_CFG); ++ return -EINVAL; ++ } ++ ++ ++out: ++ cxl_cmd_unref(cmd); ++ return rc; ++ return 0; ++} ++ ++ ++#define CXL_MEM_COMMAND_ID_LTMON_WATCH CXL_MEM_COMMAND_ID_RAW ++#define CXL_MEM_COMMAND_ID_LTMON_WATCH_OPCODE 50963 ++#define CXL_MEM_COMMAND_ID_LTMON_WATCH_PAYLOAD_IN_SIZE 12 ++ ++struct cxl_mbox_ltmon_watch_in { ++ u8 rsvd; ++ u8 cxl_mem_id; ++ u8 watch_id; ++ u8 watch_mode; ++ u8 src_maj_st; ++ u8 src_min_st; ++ u8 src_l0_st; ++ u8 dst_maj_st; ++ u8 dst_min_st; ++ u8 dst_l0_st; ++ __le16 rsvd10; ++} __attribute__((packed)); ++ ++ ++CXL_EXPORT int cxl_memdev_ltmon_watch(struct cxl_memdev *memdev, ++ u8 cxl_mem_id, u8 watch_id, u8 watch_mode, u8 src_maj_st, u8 src_min_st, ++ u8 src_l0_st, u8 dst_maj_st, u8 dst_min_st, u8 dst_l0_st) ++{ ++ struct cxl_cmd *cmd; ++ struct cxl_mem_query_commands *query; ++ struct cxl_command_info *cinfo; ++ struct cxl_mbox_ltmon_watch_in *ltmon_watch_in; ++ int rc = 0; ++ ++ cmd = cxl_cmd_new_raw(memdev, CXL_MEM_COMMAND_ID_LTMON_WATCH_OPCODE); ++ if (!cmd) { ++ fprintf(stderr, "%s: cxl_cmd_new_raw returned Null output\n", ++ cxl_memdev_get_devname(memdev)); ++ return -ENOMEM; ++ } ++ ++ query = cmd->query_cmd; ++ cinfo = &query->commands[cmd->query_idx]; ++ ++ /* update payload size */ ++ cinfo->size_in = CXL_MEM_COMMAND_ID_LTMON_WATCH_PAYLOAD_IN_SIZE; ++ if (cinfo->size_in > 0) { ++ cmd->input_payload = calloc(1, cinfo->size_in); ++ if (!cmd->input_payload) ++ return -ENOMEM; ++ cmd->send_cmd->in.payload = (u64)cmd->input_payload; ++ cmd->send_cmd->in.size = cinfo->size_in; ++ } ++ ++ ltmon_watch_in = (void *) cmd->send_cmd->in.payload; ++ ++ ltmon_watch_in->cxl_mem_id = cxl_mem_id; ++ ltmon_watch_in->watch_id = watch_id; ++ ltmon_watch_in->watch_mode = watch_mode; ++ ltmon_watch_in->src_maj_st = src_maj_st; ++ ltmon_watch_in->src_min_st = src_min_st; ++ ltmon_watch_in->src_l0_st = src_l0_st; ++ ltmon_watch_in->dst_maj_st = dst_maj_st; ++ ltmon_watch_in->dst_min_st = dst_min_st; ++ ltmon_watch_in->dst_l0_st = dst_l0_st; ++ rc = cxl_cmd_submit(cmd); ++ if (rc < 0) { ++ fprintf(stderr, "%s: cmd submission failed: %d (%s)\n", ++ cxl_memdev_get_devname(memdev), rc, strerror(-rc)); ++ goto out; ++ } ++ ++ rc = cxl_cmd_get_mbox_status(cmd); ++ if (rc != 0) { ++ fprintf(stderr, "%s: firmware status: %d:\n%s\n", ++ cxl_memdev_get_devname(memdev), rc, DEVICE_ERRORS[rc]); ++ rc = -ENXIO; ++ goto out; ++ } ++ ++ if (cmd->send_cmd->id != CXL_MEM_COMMAND_ID_LTMON_WATCH) { ++ fprintf(stderr, "%s: invalid command id 0x%x (expecting 0x%x)\n", ++ cxl_memdev_get_devname(memdev), cmd->send_cmd->id, CXL_MEM_COMMAND_ID_LTMON_WATCH); ++ return -EINVAL; ++ } ++ ++ ++out: ++ cxl_cmd_unref(cmd); ++ return rc; ++ return 0; ++} ++ ++ ++#define CXL_MEM_COMMAND_ID_LTMON_CAPTURE_STAT CXL_MEM_COMMAND_ID_RAW ++#define CXL_MEM_COMMAND_ID_LTMON_CAPTURE_STAT_OPCODE 50964 ++#define CXL_MEM_COMMAND_ID_LTMON_CAPTURE_STAT_PAYLOAD_IN_SIZE 2 ++#define CXL_MEM_COMMAND_ID_LTMON_CAPTURE_STAT_PAYLOAD_OUT_SIZE 12 ++ ++struct cxl_mbox_ltmon_capture_stat_in { ++ u8 rsvd; ++ u8 cxl_mem_id; ++} __attribute__((packed)); ++ ++struct cxl_mbox_ltmon_capture_stat_out { ++ __le16 trig_cnt; ++ __le16 watch0_trig_cnt; ++ __le16 watch1_trig_cnt; ++ __le16 time_stamp; ++ u8 trig_src_stat; ++ u8 rsvd[3]; ++} __attribute__((packed)); ++ ++CXL_EXPORT int cxl_memdev_ltmon_capture_stat(struct cxl_memdev *memdev, ++ u8 cxl_mem_id) ++{ ++ struct cxl_cmd *cmd; ++ struct cxl_mem_query_commands *query; ++ struct cxl_command_info *cinfo; ++ struct cxl_mbox_ltmon_capture_stat_in *ltmon_capture_stat_in; ++ struct cxl_mbox_ltmon_capture_stat_out *ltmon_capture_stat_out; ++ int rc = 0; ++ ++ cmd = cxl_cmd_new_raw(memdev, CXL_MEM_COMMAND_ID_LTMON_CAPTURE_STAT_OPCODE); ++ if (!cmd) { ++ fprintf(stderr, "%s: cxl_cmd_new_raw returned Null output\n", ++ cxl_memdev_get_devname(memdev)); ++ return -ENOMEM; ++ } ++ ++ query = cmd->query_cmd; ++ cinfo = &query->commands[cmd->query_idx]; ++ ++ /* update payload size */ ++ cinfo->size_in = CXL_MEM_COMMAND_ID_LTMON_CAPTURE_STAT_PAYLOAD_IN_SIZE; ++ if (cinfo->size_in > 0) { ++ cmd->input_payload = calloc(1, cinfo->size_in); ++ if (!cmd->input_payload) ++ return -ENOMEM; ++ cmd->send_cmd->in.payload = (u64)cmd->input_payload; ++ cmd->send_cmd->in.size = cinfo->size_in; ++ } ++ ++ ltmon_capture_stat_in = (void *) cmd->send_cmd->in.payload; ++ ++ ltmon_capture_stat_in->cxl_mem_id = cxl_mem_id; ++ rc = cxl_cmd_submit(cmd); ++ if (rc < 0) { ++ fprintf(stderr, "%s: cmd submission failed: %d (%s)\n", ++ cxl_memdev_get_devname(memdev), rc, strerror(-rc)); ++ goto out; ++ } ++ ++ rc = cxl_cmd_get_mbox_status(cmd); ++ if (rc != 0) { ++ fprintf(stderr, "%s: firmware status: %d:\n%s\n", ++ cxl_memdev_get_devname(memdev), rc, DEVICE_ERRORS[rc]); ++ rc = -ENXIO; ++ goto out; ++ } ++ ++ if (cmd->send_cmd->id != CXL_MEM_COMMAND_ID_LTMON_CAPTURE_STAT) { ++ fprintf(stderr, "%s: invalid command id 0x%x (expecting 0x%x)\n", ++ cxl_memdev_get_devname(memdev), cmd->send_cmd->id, CXL_MEM_COMMAND_ID_LTMON_CAPTURE_STAT); ++ return -EINVAL; ++ } ++ ++ ltmon_capture_stat_out = (void *)cmd->send_cmd->out.payload; ++ fprintf(stdout, "============================= ltmon capture status =============================\n"); ++ fprintf(stdout, "Trigger Count: %x\n", le16_to_cpu(ltmon_capture_stat_out->trig_cnt)); ++ fprintf(stdout, "Watch 0 Trigger Count: %x\n", le16_to_cpu(ltmon_capture_stat_out->watch0_trig_cnt)); ++ fprintf(stdout, "Watch 1 Trigger Count: %x\n", le16_to_cpu(ltmon_capture_stat_out->watch1_trig_cnt)); ++ fprintf(stdout, "Time Stamp: %x\n", le16_to_cpu(ltmon_capture_stat_out->time_stamp)); ++ fprintf(stdout, "Trigger Source Status: %x\n", ltmon_capture_stat_out->trig_src_stat); ++ ++out: ++ cxl_cmd_unref(cmd); ++ return rc; ++ return 0; ++} ++ ++ ++#define CXL_MEM_COMMAND_ID_LTMON_CAPTURE_LOG_DMP CXL_MEM_COMMAND_ID_RAW ++#define CXL_MEM_COMMAND_ID_LTMON_CAPTURE_LOG_DMP_OPCODE 50965 ++#define CXL_MEM_COMMAND_ID_LTMON_CAPTURE_LOG_DMP_PAYLOAD_IN_SIZE 8 ++#define CXL_MEM_COMMAND_ID_LTMON_CAPTURE_LOG_DMP_PAYLOAD_OUT_SIZE 16 ++ ++struct cxl_mbox_ltmon_capture_log_dmp_in { ++ u8 rsvd; ++ u8 cxl_mem_id; ++ __le16 dump_idx; ++ __le16 dump_cnt; ++ __le16 rsvd6; ++} __attribute__((packed)); ++ ++struct cxl_mbox_ltmon_capture_log_dmp_out { ++ __le64 data[2]; ++} __attribute__((packed)); ++ ++CXL_EXPORT int cxl_memdev_ltmon_capture_log_dmp(struct cxl_memdev *memdev, ++ u8 cxl_mem_id, u16 dump_idx, u16 dump_cnt) ++{ ++ struct cxl_cmd *cmd; ++ struct cxl_mem_query_commands *query; ++ struct cxl_command_info *cinfo; ++ struct cxl_mbox_ltmon_capture_log_dmp_in *ltmon_capture_log_dmp_in; ++ struct cxl_mbox_ltmon_capture_log_dmp_out *ltmon_capture_log_dmp_out; ++ int rc = 0; ++ ++ cmd = cxl_cmd_new_raw(memdev, CXL_MEM_COMMAND_ID_LTMON_CAPTURE_LOG_DMP_OPCODE); ++ if (!cmd) { ++ fprintf(stderr, "%s: cxl_cmd_new_raw returned Null output\n", ++ cxl_memdev_get_devname(memdev)); ++ return -ENOMEM; ++ } ++ ++ query = cmd->query_cmd; ++ cinfo = &query->commands[cmd->query_idx]; ++ ++ /* update payload size */ ++ cinfo->size_in = CXL_MEM_COMMAND_ID_LTMON_CAPTURE_LOG_DMP_PAYLOAD_IN_SIZE; ++ if (cinfo->size_in > 0) { ++ cmd->input_payload = calloc(1, cinfo->size_in); ++ if (!cmd->input_payload) ++ return -ENOMEM; ++ cmd->send_cmd->in.payload = (u64)cmd->input_payload; ++ cmd->send_cmd->in.size = cinfo->size_in; ++ } ++ ++ ltmon_capture_log_dmp_in = (void *) cmd->send_cmd->in.payload; ++ ++ ltmon_capture_log_dmp_in->cxl_mem_id = cxl_mem_id; ++ ltmon_capture_log_dmp_in->dump_idx = cpu_to_le16(dump_idx); ++ ltmon_capture_log_dmp_in->dump_cnt = cpu_to_le16(dump_cnt); ++ rc = cxl_cmd_submit(cmd); ++ if (rc < 0) { ++ fprintf(stderr, "%s: cmd submission failed: %d (%s)\n", ++ cxl_memdev_get_devname(memdev), rc, strerror(-rc)); ++ goto out; ++ } ++ ++ rc = cxl_cmd_get_mbox_status(cmd); ++ if (rc != 0) { ++ fprintf(stderr, "%s: firmware status: %d:\n%s\n", ++ cxl_memdev_get_devname(memdev), rc, DEVICE_ERRORS[rc]); ++ rc = -ENXIO; ++ goto out; ++ } ++ ++ if (cmd->send_cmd->id != CXL_MEM_COMMAND_ID_LTMON_CAPTURE_LOG_DMP) { ++ fprintf(stderr, "%s: invalid command id 0x%x (expecting 0x%x)\n", ++ cxl_memdev_get_devname(memdev), cmd->send_cmd->id, CXL_MEM_COMMAND_ID_LTMON_CAPTURE_LOG_DMP); ++ return -EINVAL; ++ } ++ ++ ltmon_capture_log_dmp_out = (void *)cmd->send_cmd->out.payload; ++ fprintf(stdout, "============================ ltmon capture log dump ============================\n"); ++ fprintf(stdout, "LTMON Data: "); ++ /* Procedurally generated print statement. To print this array contiguously, ++ add "contiguous: True" to the YAML param and rerun cligen.py */ ++ for (int i = 0; i < 2; i++) { ++ fprintf(stdout, "data[%d]: %lx\n", i, le64_to_cpu(ltmon_capture_log_dmp_out->data[i])); ++ } ++ fprintf(stdout, "\n"); ++ ++out: ++ cxl_cmd_unref(cmd); ++ return rc; ++ return 0; ++} ++ ++ ++#define CXL_MEM_COMMAND_ID_LTMON_CAPTURE_TRIGGER CXL_MEM_COMMAND_ID_RAW ++#define CXL_MEM_COMMAND_ID_LTMON_CAPTURE_TRIGGER_OPCODE 50966 ++#define CXL_MEM_COMMAND_ID_LTMON_CAPTURE_TRIGGER_PAYLOAD_IN_SIZE 4 ++ ++struct cxl_mbox_ltmon_capture_trigger_in { ++ u8 rsvd; ++ u8 cxl_mem_id; ++ u8 trig_src; ++ u8 rsvd3; ++} __attribute__((packed)); ++ ++ ++CXL_EXPORT int cxl_memdev_ltmon_capture_trigger(struct cxl_memdev *memdev, ++ u8 cxl_mem_id, u8 trig_src) ++{ ++ struct cxl_cmd *cmd; ++ struct cxl_mem_query_commands *query; ++ struct cxl_command_info *cinfo; ++ struct cxl_mbox_ltmon_capture_trigger_in *ltmon_capture_trigger_in; ++ int rc = 0; ++ ++ cmd = cxl_cmd_new_raw(memdev, CXL_MEM_COMMAND_ID_LTMON_CAPTURE_TRIGGER_OPCODE); ++ if (!cmd) { ++ fprintf(stderr, "%s: cxl_cmd_new_raw returned Null output\n", ++ cxl_memdev_get_devname(memdev)); ++ return -ENOMEM; ++ } ++ ++ query = cmd->query_cmd; ++ cinfo = &query->commands[cmd->query_idx]; ++ ++ /* update payload size */ ++ cinfo->size_in = CXL_MEM_COMMAND_ID_LTMON_CAPTURE_TRIGGER_PAYLOAD_IN_SIZE; ++ if (cinfo->size_in > 0) { ++ cmd->input_payload = calloc(1, cinfo->size_in); ++ if (!cmd->input_payload) ++ return -ENOMEM; ++ cmd->send_cmd->in.payload = (u64)cmd->input_payload; ++ cmd->send_cmd->in.size = cinfo->size_in; ++ } ++ ++ ltmon_capture_trigger_in = (void *) cmd->send_cmd->in.payload; ++ ++ ltmon_capture_trigger_in->cxl_mem_id = cxl_mem_id; ++ ltmon_capture_trigger_in->trig_src = trig_src; ++ rc = cxl_cmd_submit(cmd); ++ if (rc < 0) { ++ fprintf(stderr, "%s: cmd submission failed: %d (%s)\n", ++ cxl_memdev_get_devname(memdev), rc, strerror(-rc)); ++ goto out; ++ } ++ ++ rc = cxl_cmd_get_mbox_status(cmd); ++ if (rc != 0) { ++ fprintf(stderr, "%s: firmware status: %d:\n%s\n", ++ cxl_memdev_get_devname(memdev), rc, DEVICE_ERRORS[rc]); ++ rc = -ENXIO; ++ goto out; ++ } ++ ++ if (cmd->send_cmd->id != CXL_MEM_COMMAND_ID_LTMON_CAPTURE_TRIGGER) { ++ fprintf(stderr, "%s: invalid command id 0x%x (expecting 0x%x)\n", ++ cxl_memdev_get_devname(memdev), cmd->send_cmd->id, CXL_MEM_COMMAND_ID_LTMON_CAPTURE_TRIGGER); ++ return -EINVAL; ++ } ++ ++ ++out: ++ cxl_cmd_unref(cmd); ++ return rc; ++ return 0; ++} ++ ++ ++#define CXL_MEM_COMMAND_ID_LTMON_ENABLE CXL_MEM_COMMAND_ID_RAW ++#define CXL_MEM_COMMAND_ID_LTMON_ENABLE_OPCODE 51072 ++#define CXL_MEM_COMMAND_ID_LTMON_ENABLE_PAYLOAD_IN_SIZE 4 ++ ++struct cxl_mbox_ltmon_enable_in { ++ u8 rsvd; ++ u8 cxl_mem_id; ++ u8 enable; ++ u8 rsvd3; ++} __attribute__((packed)); ++ ++ ++CXL_EXPORT int cxl_memdev_ltmon_enable(struct cxl_memdev *memdev, ++ u8 cxl_mem_id, u8 enable) ++{ ++ struct cxl_cmd *cmd; ++ struct cxl_mem_query_commands *query; ++ struct cxl_command_info *cinfo; ++ struct cxl_mbox_ltmon_enable_in *ltmon_enable_in; ++ int rc = 0; ++ ++ cmd = cxl_cmd_new_raw(memdev, CXL_MEM_COMMAND_ID_LTMON_ENABLE_OPCODE); ++ if (!cmd) { ++ fprintf(stderr, "%s: cxl_cmd_new_raw returned Null output\n", ++ cxl_memdev_get_devname(memdev)); ++ return -ENOMEM; ++ } ++ ++ query = cmd->query_cmd; ++ cinfo = &query->commands[cmd->query_idx]; ++ ++ /* update payload size */ ++ cinfo->size_in = CXL_MEM_COMMAND_ID_LTMON_ENABLE_PAYLOAD_IN_SIZE; ++ if (cinfo->size_in > 0) { ++ cmd->input_payload = calloc(1, cinfo->size_in); ++ if (!cmd->input_payload) ++ return -ENOMEM; ++ cmd->send_cmd->in.payload = (u64)cmd->input_payload; ++ cmd->send_cmd->in.size = cinfo->size_in; ++ } ++ ++ ltmon_enable_in = (void *) cmd->send_cmd->in.payload; ++ ++ ltmon_enable_in->cxl_mem_id = cxl_mem_id; ++ ltmon_enable_in->enable = enable; ++ rc = cxl_cmd_submit(cmd); ++ if (rc < 0) { ++ fprintf(stderr, "%s: cmd submission failed: %d (%s)\n", ++ cxl_memdev_get_devname(memdev), rc, strerror(-rc)); ++ goto out; ++ } ++ ++ rc = cxl_cmd_get_mbox_status(cmd); ++ if (rc != 0) { ++ fprintf(stderr, "%s: firmware status: %d:\n%s\n", ++ cxl_memdev_get_devname(memdev), rc, DEVICE_ERRORS[rc]); ++ rc = -ENXIO; ++ goto out; ++ } ++ ++ if (cmd->send_cmd->id != CXL_MEM_COMMAND_ID_LTMON_ENABLE) { ++ fprintf(stderr, "%s: invalid command id 0x%x (expecting 0x%x)\n", ++ cxl_memdev_get_devname(memdev), cmd->send_cmd->id, CXL_MEM_COMMAND_ID_LTMON_ENABLE); ++ return -EINVAL; ++ } ++ ++ ++out: ++ cxl_cmd_unref(cmd); ++ return rc; ++ return 0; ++} ++ ++ ++#define CXL_MEM_COMMAND_ID_OSA_OS_TYPE_TRIG_CFG CXL_MEM_COMMAND_ID_RAW ++#define CXL_MEM_COMMAND_ID_OSA_OS_TYPE_TRIG_CFG_OPCODE 51200 ++#define CXL_MEM_COMMAND_ID_OSA_OS_TYPE_TRIG_CFG_PAYLOAD_IN_SIZE 12 ++ ++struct cxl_mbox_osa_os_type_trig_cfg_in { ++ u8 rsvd; ++ u8 cxl_mem_id; ++ __le16 rsvd2; ++ __le16 lane_mask; ++ u8 lane_dir_mask; ++ u8 rate_mask; ++ __le16 os_type_mask; ++ __le16 rsvd10; ++} __attribute__((packed)); ++ ++ ++CXL_EXPORT int cxl_memdev_osa_os_type_trig_cfg(struct cxl_memdev *memdev, ++ u8 cxl_mem_id, u16 lane_mask, u8 lane_dir_mask, u8 rate_mask, u16 os_type_mask) ++{ ++ struct cxl_cmd *cmd; ++ struct cxl_mem_query_commands *query; ++ struct cxl_command_info *cinfo; ++ struct cxl_mbox_osa_os_type_trig_cfg_in *osa_os_type_trig_cfg_in; ++ int rc = 0; ++ ++ cmd = cxl_cmd_new_raw(memdev, CXL_MEM_COMMAND_ID_OSA_OS_TYPE_TRIG_CFG_OPCODE); ++ if (!cmd) { ++ fprintf(stderr, "%s: cxl_cmd_new_raw returned Null output\n", ++ cxl_memdev_get_devname(memdev)); ++ return -ENOMEM; ++ } ++ ++ query = cmd->query_cmd; ++ cinfo = &query->commands[cmd->query_idx]; ++ ++ /* update payload size */ ++ cinfo->size_in = CXL_MEM_COMMAND_ID_OSA_OS_TYPE_TRIG_CFG_PAYLOAD_IN_SIZE; ++ if (cinfo->size_in > 0) { ++ cmd->input_payload = calloc(1, cinfo->size_in); ++ if (!cmd->input_payload) ++ return -ENOMEM; ++ cmd->send_cmd->in.payload = (u64)cmd->input_payload; ++ cmd->send_cmd->in.size = cinfo->size_in; ++ } ++ ++ osa_os_type_trig_cfg_in = (void *) cmd->send_cmd->in.payload; ++ ++ osa_os_type_trig_cfg_in->cxl_mem_id = cxl_mem_id; ++ osa_os_type_trig_cfg_in->lane_mask = cpu_to_le16(lane_mask); ++ osa_os_type_trig_cfg_in->lane_dir_mask = lane_dir_mask; ++ osa_os_type_trig_cfg_in->rate_mask = rate_mask; ++ osa_os_type_trig_cfg_in->os_type_mask = cpu_to_le16(os_type_mask); ++ rc = cxl_cmd_submit(cmd); ++ if (rc < 0) { ++ fprintf(stderr, "%s: cmd submission failed: %d (%s)\n", ++ cxl_memdev_get_devname(memdev), rc, strerror(-rc)); ++ goto out; ++ } ++ ++ rc = cxl_cmd_get_mbox_status(cmd); ++ if (rc != 0) { ++ fprintf(stderr, "%s: firmware status: %d:\n%s\n", ++ cxl_memdev_get_devname(memdev), rc, DEVICE_ERRORS[rc]); ++ rc = -ENXIO; ++ goto out; ++ } ++ ++ if (cmd->send_cmd->id != CXL_MEM_COMMAND_ID_OSA_OS_TYPE_TRIG_CFG) { ++ fprintf(stderr, "%s: invalid command id 0x%x (expecting 0x%x)\n", ++ cxl_memdev_get_devname(memdev), cmd->send_cmd->id, CXL_MEM_COMMAND_ID_OSA_OS_TYPE_TRIG_CFG); ++ return -EINVAL; ++ } ++ ++ ++out: ++ cxl_cmd_unref(cmd); ++ return rc; ++ return 0; ++} ++ ++ ++#define CXL_MEM_COMMAND_ID_OSA_CAP_CTRL CXL_MEM_COMMAND_ID_RAW ++#define CXL_MEM_COMMAND_ID_OSA_CAP_CTRL_OPCODE 51203 ++#define CXL_MEM_COMMAND_ID_OSA_CAP_CTRL_PAYLOAD_IN_SIZE 16 ++ ++struct cxl_mbox_osa_cap_ctrl_in { ++ u8 rsvd; ++ u8 cxl_mem_id; ++ __le16 rsvd2; ++ __le16 lane_mask; ++ u8 lane_dir_mask; ++ u8 drop_single_os; ++ u8 stop_mode; ++ u8 snapshot_mode; ++ __le16 post_trig_num; ++ __le16 os_type_mask; ++ __le16 rsvd14; ++} __attribute__((packed)); ++ ++ ++CXL_EXPORT int cxl_memdev_osa_cap_ctrl(struct cxl_memdev *memdev, ++ u8 cxl_mem_id, u16 lane_mask, u8 lane_dir_mask, u8 drop_single_os, ++ u8 stop_mode, u8 snapshot_mode, u16 post_trig_num, u16 os_type_mask) ++{ ++ struct cxl_cmd *cmd; ++ struct cxl_mem_query_commands *query; ++ struct cxl_command_info *cinfo; ++ struct cxl_mbox_osa_cap_ctrl_in *osa_cap_ctrl_in; ++ int rc = 0; ++ ++ cmd = cxl_cmd_new_raw(memdev, CXL_MEM_COMMAND_ID_OSA_CAP_CTRL_OPCODE); ++ if (!cmd) { ++ fprintf(stderr, "%s: cxl_cmd_new_raw returned Null output\n", ++ cxl_memdev_get_devname(memdev)); ++ return -ENOMEM; ++ } ++ ++ query = cmd->query_cmd; ++ cinfo = &query->commands[cmd->query_idx]; ++ ++ /* update payload size */ ++ cinfo->size_in = CXL_MEM_COMMAND_ID_OSA_CAP_CTRL_PAYLOAD_IN_SIZE; ++ if (cinfo->size_in > 0) { ++ cmd->input_payload = calloc(1, cinfo->size_in); ++ if (!cmd->input_payload) ++ return -ENOMEM; ++ cmd->send_cmd->in.payload = (u64)cmd->input_payload; ++ cmd->send_cmd->in.size = cinfo->size_in; ++ } ++ ++ osa_cap_ctrl_in = (void *) cmd->send_cmd->in.payload; ++ ++ osa_cap_ctrl_in->cxl_mem_id = cxl_mem_id; ++ osa_cap_ctrl_in->lane_mask = cpu_to_le16(lane_mask); ++ osa_cap_ctrl_in->lane_dir_mask = lane_dir_mask; ++ osa_cap_ctrl_in->drop_single_os = drop_single_os; ++ osa_cap_ctrl_in->stop_mode = stop_mode; ++ osa_cap_ctrl_in->snapshot_mode = snapshot_mode; ++ osa_cap_ctrl_in->post_trig_num = cpu_to_le16(post_trig_num); ++ osa_cap_ctrl_in->os_type_mask = cpu_to_le16(os_type_mask); ++ rc = cxl_cmd_submit(cmd); ++ if (rc < 0) { ++ fprintf(stderr, "%s: cmd submission failed: %d (%s)\n", ++ cxl_memdev_get_devname(memdev), rc, strerror(-rc)); ++ goto out; ++ } ++ ++ rc = cxl_cmd_get_mbox_status(cmd); ++ if (rc != 0) { ++ fprintf(stderr, "%s: firmware status: %d:\n%s\n", ++ cxl_memdev_get_devname(memdev), rc, DEVICE_ERRORS[rc]); ++ rc = -ENXIO; ++ goto out; ++ } ++ ++ if (cmd->send_cmd->id != CXL_MEM_COMMAND_ID_OSA_CAP_CTRL) { ++ fprintf(stderr, "%s: invalid command id 0x%x (expecting 0x%x)\n", ++ cxl_memdev_get_devname(memdev), cmd->send_cmd->id, CXL_MEM_COMMAND_ID_OSA_CAP_CTRL); ++ return -EINVAL; ++ } ++ ++ ++out: ++ cxl_cmd_unref(cmd); ++ return rc; ++ return 0; ++} ++ ++ ++#define CXL_MEM_COMMAND_ID_OSA_CFG_DUMP CXL_MEM_COMMAND_ID_RAW ++#define CXL_MEM_COMMAND_ID_OSA_CFG_DUMP_OPCODE 51204 ++#define CXL_MEM_COMMAND_ID_OSA_CFG_DUMP_PAYLOAD_IN_SIZE 4 ++#define CXL_MEM_COMMAND_ID_OSA_CFG_DUMP_PAYLOAD_OUT_SIZE 60 ++ ++struct cxl_mbox_osa_cfg_dump_in { ++ u8 rsvd; ++ u8 cxl_mem_id; ++ __le16 rsvd2; ++} __attribute__((packed)); ++ ++struct cxl_mbox_osa_cfg_dump_out { ++ __le16 os_type_trig_cfg_lane_mask; ++ u8 os_type_trig_cfg_lane_dir_mask; ++ u8 os_type_trig_cfg_rate_mask; ++ __le16 os_type_trig_cfg_os_type_mask; ++ __le16 rsvd; ++ __le16 os_patt_trig_cfg_lane_mask; ++ u8 os_patt_trig_cfg_lane_dir_mask; ++ u8 os_patt_trig_cfg_rate_mask; ++ __le32 os_patt_trig_cfg_val[4]; ++ __le32 os_patt_trig_cfg_mask[4]; ++ u8 misc_trig_cfg_trig_en_mask; ++ u8 rsvd45[3]; ++ __le16 cap_ctrl_lane_mask; ++ u8 cap_ctrl_lane_dir_mask; ++ u8 cap_ctrl_drop_single_os; ++ u8 cap_ctrl_stop_mode; ++ u8 cap_ctrl_snapshot_mode; ++ __le16 cap_ctrl_post_trig_num; ++ __le16 cap_ctrl_os_type_mask; ++ __le16 rsvd58; ++} __attribute__((packed)); ++ ++CXL_EXPORT int cxl_memdev_osa_cfg_dump(struct cxl_memdev *memdev, ++ u8 cxl_mem_id) ++{ ++ struct cxl_cmd *cmd; ++ struct cxl_mem_query_commands *query; ++ struct cxl_command_info *cinfo; ++ struct cxl_mbox_osa_cfg_dump_in *osa_cfg_dump_in; ++ struct cxl_mbox_osa_cfg_dump_out *osa_cfg_dump_out; ++ int rc = 0; ++ ++ cmd = cxl_cmd_new_raw(memdev, CXL_MEM_COMMAND_ID_OSA_CFG_DUMP_OPCODE); ++ if (!cmd) { ++ fprintf(stderr, "%s: cxl_cmd_new_raw returned Null output\n", ++ cxl_memdev_get_devname(memdev)); ++ return -ENOMEM; ++ } ++ ++ query = cmd->query_cmd; ++ cinfo = &query->commands[cmd->query_idx]; ++ ++ /* update payload size */ ++ cinfo->size_in = CXL_MEM_COMMAND_ID_OSA_CFG_DUMP_PAYLOAD_IN_SIZE; ++ if (cinfo->size_in > 0) { ++ cmd->input_payload = calloc(1, cinfo->size_in); ++ if (!cmd->input_payload) ++ return -ENOMEM; ++ cmd->send_cmd->in.payload = (u64)cmd->input_payload; ++ cmd->send_cmd->in.size = cinfo->size_in; ++ } ++ ++ osa_cfg_dump_in = (void *) cmd->send_cmd->in.payload; ++ ++ osa_cfg_dump_in->cxl_mem_id = cxl_mem_id; ++ rc = cxl_cmd_submit(cmd); ++ if (rc < 0) { ++ fprintf(stderr, "%s: cmd submission failed: %d (%s)\n", ++ cxl_memdev_get_devname(memdev), rc, strerror(-rc)); ++ goto out; ++ } ++ ++ rc = cxl_cmd_get_mbox_status(cmd); ++ if (rc != 0) { ++ fprintf(stderr, "%s: firmware status: %d:\n%s\n", ++ cxl_memdev_get_devname(memdev), rc, DEVICE_ERRORS[rc]); ++ rc = -ENXIO; ++ goto out; ++ } ++ ++ if (cmd->send_cmd->id != CXL_MEM_COMMAND_ID_OSA_CFG_DUMP) { ++ fprintf(stderr, "%s: invalid command id 0x%x (expecting 0x%x)\n", ++ cxl_memdev_get_devname(memdev), cmd->send_cmd->id, CXL_MEM_COMMAND_ID_OSA_CFG_DUMP); ++ return -EINVAL; ++ } ++ ++ osa_cfg_dump_out = (void *)cmd->send_cmd->out.payload; ++ fprintf(stdout, "============================ osa configuration dump ============================\n"); ++ fprintf(stdout, "OS type triggering - lane mask: %x\n", le16_to_cpu(osa_cfg_dump_out->os_type_trig_cfg_lane_mask)); ++ fprintf(stdout, "OS type triggering - lane direction mask (see OSA_LANE_DIR_BITMSK_*): %x\n", osa_cfg_dump_out->os_type_trig_cfg_lane_dir_mask); ++ fprintf(stdout, "OS type triggering - link rate mask (see OSA_LINK_RATE_BITMSK_*): %x\n", osa_cfg_dump_out->os_type_trig_cfg_rate_mask); ++ fprintf(stdout, "OS type triggering - OS type mask (see OSA_OS_TYPE_TRIG_BITMSK_*): %x\n", le16_to_cpu(osa_cfg_dump_out->os_type_trig_cfg_os_type_mask)); ++ fprintf(stdout, "OS pattern triggering - lane mask: %x\n", le16_to_cpu(osa_cfg_dump_out->os_patt_trig_cfg_lane_mask)); ++ fprintf(stdout, "OS pattern triggering - lane direction mask (see OSA_LANE_DIR_BITMSK_*): %x\n", osa_cfg_dump_out->os_patt_trig_cfg_lane_dir_mask); ++ fprintf(stdout, "OS pattern triggering - link rate mask (see OSA_LINK_RATE_BITMSK_*): %x\n", osa_cfg_dump_out->os_patt_trig_cfg_rate_mask); ++ fprintf(stdout, "OS pattern triggering - pattern match value: "); ++ /* Procedurally generated print statement. To print this array contiguously, ++ add "contiguous: True" to the YAML param and rerun cligen.py */ ++ for (int i = 0; i < 4; i++) { ++ fprintf(stdout, "os_patt_trig_cfg_val[%d]: %x\n", i, le32_to_cpu(osa_cfg_dump_out->os_patt_trig_cfg_val[i])); ++ } ++ fprintf(stdout, "\n"); ++ fprintf(stdout, "OS pattern triggering - pattern match mask: "); ++ /* Procedurally generated print statement. To print this array contiguously, ++ add "contiguous: True" to the YAML param and rerun cligen.py */ ++ for (int i = 0; i < 4; i++) { ++ fprintf(stdout, "os_patt_trig_cfg_mask[%d]: %x\n", i, le32_to_cpu(osa_cfg_dump_out->os_patt_trig_cfg_mask[i])); ++ } ++ fprintf(stdout, "\n"); ++ fprintf(stdout, "miscellaneous triggering: %x\n", osa_cfg_dump_out->misc_trig_cfg_trig_en_mask); ++ fprintf(stdout, "capture control - lane mask: %x\n", le16_to_cpu(osa_cfg_dump_out->cap_ctrl_lane_mask)); ++ fprintf(stdout, "capture control - lane direction mask (see OSA_LANE_DIR_BITMSK_*): %x\n", osa_cfg_dump_out->cap_ctrl_lane_dir_mask); ++ fprintf(stdout, "capture control - drop single OS's (TS1/TS2/FTS/CTL_SKP): %x\n", osa_cfg_dump_out->cap_ctrl_drop_single_os); ++ fprintf(stdout, "capture control - capture stop mode: %x\n", osa_cfg_dump_out->cap_ctrl_stop_mode); ++ fprintf(stdout, "capture control - snapshot mode enable: %x\n", osa_cfg_dump_out->cap_ctrl_snapshot_mode); ++ fprintf(stdout, "capture control: %x\n", le16_to_cpu(osa_cfg_dump_out->cap_ctrl_post_trig_num)); ++ fprintf(stdout, "capture control - OS type mask (see OSA_OS_TYPE_CAP_BITMSK_*): %x\n", le16_to_cpu(osa_cfg_dump_out->cap_ctrl_os_type_mask)); ++ ++out: ++ cxl_cmd_unref(cmd); ++ return rc; ++ return 0; ++} ++ ++ ++#define CXL_MEM_COMMAND_ID_OSA_ANA_OP CXL_MEM_COMMAND_ID_RAW ++#define CXL_MEM_COMMAND_ID_OSA_ANA_OP_OPCODE 51205 ++#define CXL_MEM_COMMAND_ID_OSA_ANA_OP_PAYLOAD_IN_SIZE 4 ++ ++struct cxl_mbox_osa_ana_op_in { ++ u8 rsvd; ++ u8 cxl_mem_id; ++ u8 op; ++ u8 rsvd3; ++} __attribute__((packed)); ++ ++ ++CXL_EXPORT int cxl_memdev_osa_ana_op(struct cxl_memdev *memdev, ++ u8 cxl_mem_id, u8 op) ++{ ++ struct cxl_cmd *cmd; ++ struct cxl_mem_query_commands *query; ++ struct cxl_command_info *cinfo; ++ struct cxl_mbox_osa_ana_op_in *osa_ana_op_in; ++ int rc = 0; ++ ++ cmd = cxl_cmd_new_raw(memdev, CXL_MEM_COMMAND_ID_OSA_ANA_OP_OPCODE); ++ if (!cmd) { ++ fprintf(stderr, "%s: cxl_cmd_new_raw returned Null output\n", ++ cxl_memdev_get_devname(memdev)); ++ return -ENOMEM; ++ } ++ ++ query = cmd->query_cmd; ++ cinfo = &query->commands[cmd->query_idx]; ++ ++ /* update payload size */ ++ cinfo->size_in = CXL_MEM_COMMAND_ID_OSA_ANA_OP_PAYLOAD_IN_SIZE; ++ if (cinfo->size_in > 0) { ++ cmd->input_payload = calloc(1, cinfo->size_in); ++ if (!cmd->input_payload) ++ return -ENOMEM; ++ cmd->send_cmd->in.payload = (u64)cmd->input_payload; ++ cmd->send_cmd->in.size = cinfo->size_in; ++ } ++ ++ osa_ana_op_in = (void *) cmd->send_cmd->in.payload; ++ ++ osa_ana_op_in->cxl_mem_id = cxl_mem_id; ++ osa_ana_op_in->op = op; ++ rc = cxl_cmd_submit(cmd); ++ if (rc < 0) { ++ fprintf(stderr, "%s: cmd submission failed: %d (%s)\n", ++ cxl_memdev_get_devname(memdev), rc, strerror(-rc)); ++ goto out; ++ } ++ ++ rc = cxl_cmd_get_mbox_status(cmd); ++ if (rc != 0) { ++ fprintf(stderr, "%s: firmware status: %d:\n%s\n", ++ cxl_memdev_get_devname(memdev), rc, DEVICE_ERRORS[rc]); ++ rc = -ENXIO; ++ goto out; ++ } ++ ++ if (cmd->send_cmd->id != CXL_MEM_COMMAND_ID_OSA_ANA_OP) { ++ fprintf(stderr, "%s: invalid command id 0x%x (expecting 0x%x)\n", ++ cxl_memdev_get_devname(memdev), cmd->send_cmd->id, CXL_MEM_COMMAND_ID_OSA_ANA_OP); ++ return -EINVAL; ++ } ++ ++ ++out: ++ cxl_cmd_unref(cmd); ++ return rc; ++ return 0; ++} ++ ++ ++#define CXL_MEM_COMMAND_ID_OSA_STATUS_QUERY CXL_MEM_COMMAND_ID_RAW ++#define CXL_MEM_COMMAND_ID_OSA_STATUS_QUERY_OPCODE 51206 ++#define CXL_MEM_COMMAND_ID_OSA_STATUS_QUERY_PAYLOAD_IN_SIZE 4 ++#define CXL_MEM_COMMAND_ID_OSA_STATUS_QUERY_PAYLOAD_OUT_SIZE 8 ++ ++struct cxl_mbox_osa_status_query_in { ++ u8 rsvd; ++ u8 cxl_mem_id; ++ __le16 rsvd2; ++} __attribute__((packed)); ++ ++struct cxl_mbox_osa_status_query_out { ++ u8 state; ++ u8 lane_id; ++ u8 lane_dir; ++ u8 rsvd; ++ __le16 trig_reason_mask; ++ __le16 rsvd6; ++} __attribute__((packed)); ++ ++CXL_EXPORT int cxl_memdev_osa_status_query(struct cxl_memdev *memdev, ++ u8 cxl_mem_id) ++{ ++ struct cxl_cmd *cmd; ++ struct cxl_mem_query_commands *query; ++ struct cxl_command_info *cinfo; ++ struct cxl_mbox_osa_status_query_in *osa_status_query_in; ++ struct cxl_mbox_osa_status_query_out *osa_status_query_out; ++ int rc = 0; ++ ++ cmd = cxl_cmd_new_raw(memdev, CXL_MEM_COMMAND_ID_OSA_STATUS_QUERY_OPCODE); ++ if (!cmd) { ++ fprintf(stderr, "%s: cxl_cmd_new_raw returned Null output\n", ++ cxl_memdev_get_devname(memdev)); ++ return -ENOMEM; ++ } ++ ++ query = cmd->query_cmd; ++ cinfo = &query->commands[cmd->query_idx]; ++ ++ /* update payload size */ ++ cinfo->size_in = CXL_MEM_COMMAND_ID_OSA_STATUS_QUERY_PAYLOAD_IN_SIZE; ++ if (cinfo->size_in > 0) { ++ cmd->input_payload = calloc(1, cinfo->size_in); ++ if (!cmd->input_payload) ++ return -ENOMEM; ++ cmd->send_cmd->in.payload = (u64)cmd->input_payload; ++ cmd->send_cmd->in.size = cinfo->size_in; ++ } ++ ++ osa_status_query_in = (void *) cmd->send_cmd->in.payload; ++ ++ osa_status_query_in->cxl_mem_id = cxl_mem_id; ++ rc = cxl_cmd_submit(cmd); ++ if (rc < 0) { ++ fprintf(stderr, "%s: cmd submission failed: %d (%s)\n", ++ cxl_memdev_get_devname(memdev), rc, strerror(-rc)); ++ goto out; ++ } ++ ++ rc = cxl_cmd_get_mbox_status(cmd); ++ if (rc != 0) { ++ fprintf(stderr, "%s: firmware status: %d:\n%s\n", ++ cxl_memdev_get_devname(memdev), rc, DEVICE_ERRORS[rc]); ++ rc = -ENXIO; ++ goto out; ++ } ++ ++ if (cmd->send_cmd->id != CXL_MEM_COMMAND_ID_OSA_STATUS_QUERY) { ++ fprintf(stderr, "%s: invalid command id 0x%x (expecting 0x%x)\n", ++ cxl_memdev_get_devname(memdev), cmd->send_cmd->id, CXL_MEM_COMMAND_ID_OSA_STATUS_QUERY); ++ return -EINVAL; ++ } ++ ++ osa_status_query_out = (void *)cmd->send_cmd->out.payload; ++ fprintf(stdout, "=============================== osa status query ===============================\n"); ++ fprintf(stdout, "OSA state (see osa_state_enum): %x\n", osa_status_query_out->state); ++ fprintf(stdout, "lane that caused the trigger: %x\n", osa_status_query_out->lane_id); ++ fprintf(stdout, "direction of lane that caused the trigger (see osa_lane_dir_enum): %x\n", osa_status_query_out->lane_dir); ++ fprintf(stdout, "trigger reason mask (see OSA_TRIG_REASON_BITMSK_*): %x\n", le16_to_cpu(osa_status_query_out->trig_reason_mask)); ++ ++out: ++ cxl_cmd_unref(cmd); ++ return rc; ++ return 0; ++} ++ ++ ++#define CXL_MEM_COMMAND_ID_OSA_ACCESS_REL CXL_MEM_COMMAND_ID_RAW ++#define CXL_MEM_COMMAND_ID_OSA_ACCESS_REL_OPCODE 51208 ++#define CXL_MEM_COMMAND_ID_OSA_ACCESS_REL_PAYLOAD_IN_SIZE 4 ++ ++struct cxl_mbox_osa_access_rel_in { ++ u8 rsvd; ++ u8 cxl_mem_id; ++ __le16 rsvd2; ++} __attribute__((packed)); ++ ++ ++CXL_EXPORT int cxl_memdev_osa_access_rel(struct cxl_memdev *memdev, ++ u8 cxl_mem_id) ++{ ++ struct cxl_cmd *cmd; ++ struct cxl_mem_query_commands *query; ++ struct cxl_command_info *cinfo; ++ struct cxl_mbox_osa_access_rel_in *osa_access_rel_in; ++ int rc = 0; ++ ++ cmd = cxl_cmd_new_raw(memdev, CXL_MEM_COMMAND_ID_OSA_ACCESS_REL_OPCODE); ++ if (!cmd) { ++ fprintf(stderr, "%s: cxl_cmd_new_raw returned Null output\n", ++ cxl_memdev_get_devname(memdev)); ++ return -ENOMEM; ++ } ++ ++ query = cmd->query_cmd; ++ cinfo = &query->commands[cmd->query_idx]; ++ ++ /* update payload size */ ++ cinfo->size_in = CXL_MEM_COMMAND_ID_OSA_ACCESS_REL_PAYLOAD_IN_SIZE; ++ if (cinfo->size_in > 0) { ++ cmd->input_payload = calloc(1, cinfo->size_in); ++ if (!cmd->input_payload) ++ return -ENOMEM; ++ cmd->send_cmd->in.payload = (u64)cmd->input_payload; ++ cmd->send_cmd->in.size = cinfo->size_in; ++ } ++ ++ osa_access_rel_in = (void *) cmd->send_cmd->in.payload; ++ ++ osa_access_rel_in->cxl_mem_id = cxl_mem_id; ++ rc = cxl_cmd_submit(cmd); ++ if (rc < 0) { ++ fprintf(stderr, "%s: cmd submission failed: %d (%s)\n", ++ cxl_memdev_get_devname(memdev), rc, strerror(-rc)); ++ goto out; ++ } ++ ++ rc = cxl_cmd_get_mbox_status(cmd); ++ if (rc != 0) { ++ fprintf(stderr, "%s: firmware status: %d:\n%s\n", ++ cxl_memdev_get_devname(memdev), rc, DEVICE_ERRORS[rc]); ++ rc = -ENXIO; ++ goto out; ++ } ++ ++ if (cmd->send_cmd->id != CXL_MEM_COMMAND_ID_OSA_ACCESS_REL) { ++ fprintf(stderr, "%s: invalid command id 0x%x (expecting 0x%x)\n", ++ cxl_memdev_get_devname(memdev), cmd->send_cmd->id, CXL_MEM_COMMAND_ID_OSA_ACCESS_REL); ++ return -EINVAL; ++ } ++ ++ ++out: ++ cxl_cmd_unref(cmd); ++ return rc; ++ return 0; ++} ++ ++ ++#define CXL_MEM_COMMAND_ID_PERFCNT_MTA_LTIF_SET CXL_MEM_COMMAND_ID_RAW ++#define CXL_MEM_COMMAND_ID_PERFCNT_MTA_LTIF_SET_OPCODE 51712 ++#define CXL_MEM_COMMAND_ID_PERFCNT_MTA_LTIF_SET_PAYLOAD_IN_SIZE 20 ++ ++struct cxl_mbox_perfcnt_mta_ltif_set_in { ++ __le32 counter; ++ __le32 match_value; ++ __le32 opcode; ++ __le32 meta_field; ++ __le32 meta_value; ++} __attribute__((packed)); ++ ++ ++CXL_EXPORT int cxl_memdev_perfcnt_mta_ltif_set(struct cxl_memdev *memdev, ++ u32 counter, u32 match_value, u32 opcode, u32 meta_field, u32 meta_value) ++{ ++ struct cxl_cmd *cmd; ++ struct cxl_mem_query_commands *query; ++ struct cxl_command_info *cinfo; ++ struct cxl_mbox_perfcnt_mta_ltif_set_in *perfcnt_mta_ltif_set_in; ++ int rc = 0; ++ ++ cmd = cxl_cmd_new_raw(memdev, CXL_MEM_COMMAND_ID_PERFCNT_MTA_LTIF_SET_OPCODE); ++ if (!cmd) { ++ fprintf(stderr, "%s: cxl_cmd_new_raw returned Null output\n", ++ cxl_memdev_get_devname(memdev)); ++ return -ENOMEM; ++ } ++ ++ query = cmd->query_cmd; ++ cinfo = &query->commands[cmd->query_idx]; ++ ++ /* update payload size */ ++ cinfo->size_in = CXL_MEM_COMMAND_ID_PERFCNT_MTA_LTIF_SET_PAYLOAD_IN_SIZE; ++ if (cinfo->size_in > 0) { ++ cmd->input_payload = calloc(1, cinfo->size_in); ++ if (!cmd->input_payload) ++ return -ENOMEM; ++ cmd->send_cmd->in.payload = (u64)cmd->input_payload; ++ cmd->send_cmd->in.size = cinfo->size_in; ++ } ++ ++ perfcnt_mta_ltif_set_in = (void *) cmd->send_cmd->in.payload; ++ ++ perfcnt_mta_ltif_set_in->counter = cpu_to_le32(counter); ++ perfcnt_mta_ltif_set_in->match_value = cpu_to_le32(match_value); ++ perfcnt_mta_ltif_set_in->opcode = cpu_to_le32(opcode); ++ perfcnt_mta_ltif_set_in->meta_field = cpu_to_le32(meta_field); ++ perfcnt_mta_ltif_set_in->meta_value = cpu_to_le32(meta_value); ++ rc = cxl_cmd_submit(cmd); ++ if (rc < 0) { ++ fprintf(stderr, "%s: cmd submission failed: %d (%s)\n", ++ cxl_memdev_get_devname(memdev), rc, strerror(-rc)); ++ goto out; ++ } ++ ++ rc = cxl_cmd_get_mbox_status(cmd); ++ if (rc != 0) { ++ fprintf(stderr, "%s: firmware status: %d:\n%s\n", ++ cxl_memdev_get_devname(memdev), rc, DEVICE_ERRORS[rc]); ++ rc = -ENXIO; ++ goto out; ++ } ++ ++ if (cmd->send_cmd->id != CXL_MEM_COMMAND_ID_PERFCNT_MTA_LTIF_SET) { ++ fprintf(stderr, "%s: invalid command id 0x%x (expecting 0x%x)\n", ++ cxl_memdev_get_devname(memdev), cmd->send_cmd->id, CXL_MEM_COMMAND_ID_PERFCNT_MTA_LTIF_SET); ++ return -EINVAL; ++ } ++ ++ ++out: ++ cxl_cmd_unref(cmd); ++ return rc; ++ return 0; ++} ++ ++ ++#define CXL_MEM_COMMAND_ID_PERFCNT_MTA_GET CXL_MEM_COMMAND_ID_RAW ++#define CXL_MEM_COMMAND_ID_PERFCNT_MTA_GET_OPCODE 51713 ++#define CXL_MEM_COMMAND_ID_PERFCNT_MTA_GET_PAYLOAD_IN_SIZE 5 ++#define CXL_MEM_COMMAND_ID_PERFCNT_MTA_GET_PAYLOAD_OUT_SIZE 8 ++ ++struct cxl_mbox_perfcnt_mta_get_in { ++ u8 type; ++ __le32 counter; ++} __attribute__((packed)); ++ ++struct cxl_mbox_perfcnt_mta_get_out { ++ __le64 counter; ++} __attribute__((packed)); ++ ++CXL_EXPORT int cxl_memdev_perfcnt_mta_get(struct cxl_memdev *memdev, ++ u8 type, u32 counter) ++{ ++ struct cxl_cmd *cmd; ++ struct cxl_mem_query_commands *query; ++ struct cxl_command_info *cinfo; ++ struct cxl_mbox_perfcnt_mta_get_in *perfcnt_mta_get_in; ++ struct cxl_mbox_perfcnt_mta_get_out *perfcnt_mta_get_out; ++ int rc = 0; ++ ++ cmd = cxl_cmd_new_raw(memdev, CXL_MEM_COMMAND_ID_PERFCNT_MTA_GET_OPCODE); ++ if (!cmd) { ++ fprintf(stderr, "%s: cxl_cmd_new_raw returned Null output\n", ++ cxl_memdev_get_devname(memdev)); ++ return -ENOMEM; ++ } ++ ++ query = cmd->query_cmd; ++ cinfo = &query->commands[cmd->query_idx]; ++ ++ /* update payload size */ ++ cinfo->size_in = CXL_MEM_COMMAND_ID_PERFCNT_MTA_GET_PAYLOAD_IN_SIZE; ++ if (cinfo->size_in > 0) { ++ cmd->input_payload = calloc(1, cinfo->size_in); ++ if (!cmd->input_payload) ++ return -ENOMEM; ++ cmd->send_cmd->in.payload = (u64)cmd->input_payload; ++ cmd->send_cmd->in.size = cinfo->size_in; ++ } ++ ++ perfcnt_mta_get_in = (void *) cmd->send_cmd->in.payload; ++ ++ perfcnt_mta_get_in->type = type; ++ perfcnt_mta_get_in->counter = cpu_to_le32(counter); ++ rc = cxl_cmd_submit(cmd); ++ if (rc < 0) { ++ fprintf(stderr, "%s: cmd submission failed: %d (%s)\n", ++ cxl_memdev_get_devname(memdev), rc, strerror(-rc)); ++ goto out; ++ } ++ ++ rc = cxl_cmd_get_mbox_status(cmd); ++ if (rc != 0) { ++ fprintf(stderr, "%s: firmware status: %d:\n%s\n", ++ cxl_memdev_get_devname(memdev), rc, DEVICE_ERRORS[rc]); ++ rc = -ENXIO; ++ goto out; ++ } ++ ++ if (cmd->send_cmd->id != CXL_MEM_COMMAND_ID_PERFCNT_MTA_GET) { ++ fprintf(stderr, "%s: invalid command id 0x%x (expecting 0x%x)\n", ++ cxl_memdev_get_devname(memdev), cmd->send_cmd->id, CXL_MEM_COMMAND_ID_PERFCNT_MTA_GET); ++ return -EINVAL; ++ } ++ ++ perfcnt_mta_get_out = (void *)cmd->send_cmd->out.payload; ++ fprintf(stdout, "========================= mta get performance counter ==========================\n"); ++ fprintf(stdout, "Counter: %lx\n", le64_to_cpu(perfcnt_mta_get_out->counter)); ++ ++out: ++ cxl_cmd_unref(cmd); ++ return rc; ++ return 0; ++} ++ ++ ++#define CXL_MEM_COMMAND_ID_PERFCNT_MTA_LATCH_VAL_GET CXL_MEM_COMMAND_ID_RAW ++#define CXL_MEM_COMMAND_ID_PERFCNT_MTA_LATCH_VAL_GET_OPCODE 51714 ++#define CXL_MEM_COMMAND_ID_PERFCNT_MTA_LATCH_VAL_GET_PAYLOAD_IN_SIZE 5 ++#define CXL_MEM_COMMAND_ID_PERFCNT_MTA_LATCH_VAL_GET_PAYLOAD_OUT_SIZE 8 ++ ++struct cxl_mbox_perfcnt_mta_latch_val_get_in { ++ u8 type; ++ __le32 counter; ++} __attribute__((packed)); ++ ++struct cxl_mbox_perfcnt_mta_latch_val_get_out { ++ __le64 latch_val; ++} __attribute__((packed)); ++ ++CXL_EXPORT int cxl_memdev_perfcnt_mta_latch_val_get(struct cxl_memdev *memdev, ++ u8 type, u32 counter) ++{ ++ struct cxl_cmd *cmd; ++ struct cxl_mem_query_commands *query; ++ struct cxl_command_info *cinfo; ++ struct cxl_mbox_perfcnt_mta_latch_val_get_in *perfcnt_mta_latch_val_get_in; ++ struct cxl_mbox_perfcnt_mta_latch_val_get_out *perfcnt_mta_latch_val_get_out; ++ int rc = 0; ++ ++ cmd = cxl_cmd_new_raw(memdev, CXL_MEM_COMMAND_ID_PERFCNT_MTA_LATCH_VAL_GET_OPCODE); ++ if (!cmd) { ++ fprintf(stderr, "%s: cxl_cmd_new_raw returned Null output\n", ++ cxl_memdev_get_devname(memdev)); ++ return -ENOMEM; ++ } ++ ++ query = cmd->query_cmd; ++ cinfo = &query->commands[cmd->query_idx]; ++ ++ /* update payload size */ ++ cinfo->size_in = CXL_MEM_COMMAND_ID_PERFCNT_MTA_LATCH_VAL_GET_PAYLOAD_IN_SIZE; ++ if (cinfo->size_in > 0) { ++ cmd->input_payload = calloc(1, cinfo->size_in); ++ if (!cmd->input_payload) ++ return -ENOMEM; ++ cmd->send_cmd->in.payload = (u64)cmd->input_payload; ++ cmd->send_cmd->in.size = cinfo->size_in; ++ } ++ ++ perfcnt_mta_latch_val_get_in = (void *) cmd->send_cmd->in.payload; ++ ++ perfcnt_mta_latch_val_get_in->type = type; ++ perfcnt_mta_latch_val_get_in->counter = cpu_to_le32(counter); ++ rc = cxl_cmd_submit(cmd); ++ if (rc < 0) { ++ fprintf(stderr, "%s: cmd submission failed: %d (%s)\n", ++ cxl_memdev_get_devname(memdev), rc, strerror(-rc)); ++ goto out; ++ } ++ ++ rc = cxl_cmd_get_mbox_status(cmd); ++ if (rc != 0) { ++ fprintf(stderr, "%s: firmware status: %d:\n%s\n", ++ cxl_memdev_get_devname(memdev), rc, DEVICE_ERRORS[rc]); ++ rc = -ENXIO; ++ goto out; ++ } ++ ++ if (cmd->send_cmd->id != CXL_MEM_COMMAND_ID_PERFCNT_MTA_LATCH_VAL_GET) { ++ fprintf(stderr, "%s: invalid command id 0x%x (expecting 0x%x)\n", ++ cxl_memdev_get_devname(memdev), cmd->send_cmd->id, CXL_MEM_COMMAND_ID_PERFCNT_MTA_LATCH_VAL_GET); ++ return -EINVAL; ++ } ++ ++ perfcnt_mta_latch_val_get_out = (void *)cmd->send_cmd->out.payload; ++ fprintf(stdout, "============================= mta get latch value ==============================\n"); ++ fprintf(stdout, "Latch value: %lx\n", le64_to_cpu(perfcnt_mta_latch_val_get_out->latch_val)); ++ ++out: ++ cxl_cmd_unref(cmd); ++ return rc; ++ return 0; ++} ++ ++ ++#define CXL_MEM_COMMAND_ID_PERFCNT_MTA_COUNTER_CLEAR CXL_MEM_COMMAND_ID_RAW ++#define CXL_MEM_COMMAND_ID_PERFCNT_MTA_COUNTER_CLEAR_OPCODE 51715 ++#define CXL_MEM_COMMAND_ID_PERFCNT_MTA_COUNTER_CLEAR_PAYLOAD_IN_SIZE 5 ++ ++struct cxl_mbox_perfcnt_mta_counter_clear_in { ++ u8 type; ++ __le32 counter; ++} __attribute__((packed)); ++ ++ ++CXL_EXPORT int cxl_memdev_perfcnt_mta_counter_clear(struct cxl_memdev *memdev, ++ u8 type, u32 counter) ++{ ++ struct cxl_cmd *cmd; ++ struct cxl_mem_query_commands *query; ++ struct cxl_command_info *cinfo; ++ struct cxl_mbox_perfcnt_mta_counter_clear_in *perfcnt_mta_counter_clear_in; ++ int rc = 0; ++ ++ cmd = cxl_cmd_new_raw(memdev, CXL_MEM_COMMAND_ID_PERFCNT_MTA_COUNTER_CLEAR_OPCODE); ++ if (!cmd) { ++ fprintf(stderr, "%s: cxl_cmd_new_raw returned Null output\n", ++ cxl_memdev_get_devname(memdev)); ++ return -ENOMEM; ++ } ++ ++ query = cmd->query_cmd; ++ cinfo = &query->commands[cmd->query_idx]; ++ ++ /* update payload size */ ++ cinfo->size_in = CXL_MEM_COMMAND_ID_PERFCNT_MTA_COUNTER_CLEAR_PAYLOAD_IN_SIZE; ++ if (cinfo->size_in > 0) { ++ cmd->input_payload = calloc(1, cinfo->size_in); ++ if (!cmd->input_payload) ++ return -ENOMEM; ++ cmd->send_cmd->in.payload = (u64)cmd->input_payload; ++ cmd->send_cmd->in.size = cinfo->size_in; ++ } ++ ++ perfcnt_mta_counter_clear_in = (void *) cmd->send_cmd->in.payload; ++ ++ perfcnt_mta_counter_clear_in->type = type; ++ perfcnt_mta_counter_clear_in->counter = cpu_to_le32(counter); ++ rc = cxl_cmd_submit(cmd); ++ if (rc < 0) { ++ fprintf(stderr, "%s: cmd submission failed: %d (%s)\n", ++ cxl_memdev_get_devname(memdev), rc, strerror(-rc)); ++ goto out; ++ } ++ ++ rc = cxl_cmd_get_mbox_status(cmd); ++ if (rc != 0) { ++ fprintf(stderr, "%s: firmware status: %d:\n%s\n", ++ cxl_memdev_get_devname(memdev), rc, DEVICE_ERRORS[rc]); ++ rc = -ENXIO; ++ goto out; ++ } ++ ++ if (cmd->send_cmd->id != CXL_MEM_COMMAND_ID_PERFCNT_MTA_COUNTER_CLEAR) { ++ fprintf(stderr, "%s: invalid command id 0x%x (expecting 0x%x)\n", ++ cxl_memdev_get_devname(memdev), cmd->send_cmd->id, CXL_MEM_COMMAND_ID_PERFCNT_MTA_COUNTER_CLEAR); ++ return -EINVAL; ++ } ++ ++ ++out: ++ cxl_cmd_unref(cmd); ++ return rc; ++ return 0; ++} ++ ++ ++#define CXL_MEM_COMMAND_ID_PERFCNT_MTA_CNT_VAL_LATCH CXL_MEM_COMMAND_ID_RAW ++#define CXL_MEM_COMMAND_ID_PERFCNT_MTA_CNT_VAL_LATCH_OPCODE 51716 ++#define CXL_MEM_COMMAND_ID_PERFCNT_MTA_CNT_VAL_LATCH_PAYLOAD_IN_SIZE 5 ++ ++struct cxl_mbox_perfcnt_mta_cnt_val_latch_in { ++ u8 type; ++ __le32 counter; ++} __attribute__((packed)); ++ ++ ++CXL_EXPORT int cxl_memdev_perfcnt_mta_cnt_val_latch(struct cxl_memdev *memdev, ++ u8 type, u32 counter) ++{ ++ struct cxl_cmd *cmd; ++ struct cxl_mem_query_commands *query; ++ struct cxl_command_info *cinfo; ++ struct cxl_mbox_perfcnt_mta_cnt_val_latch_in *perfcnt_mta_cnt_val_latch_in; ++ int rc = 0; ++ ++ cmd = cxl_cmd_new_raw(memdev, CXL_MEM_COMMAND_ID_PERFCNT_MTA_CNT_VAL_LATCH_OPCODE); ++ if (!cmd) { ++ fprintf(stderr, "%s: cxl_cmd_new_raw returned Null output\n", ++ cxl_memdev_get_devname(memdev)); ++ return -ENOMEM; ++ } ++ ++ query = cmd->query_cmd; ++ cinfo = &query->commands[cmd->query_idx]; ++ ++ /* update payload size */ ++ cinfo->size_in = CXL_MEM_COMMAND_ID_PERFCNT_MTA_CNT_VAL_LATCH_PAYLOAD_IN_SIZE; ++ if (cinfo->size_in > 0) { ++ cmd->input_payload = calloc(1, cinfo->size_in); ++ if (!cmd->input_payload) ++ return -ENOMEM; ++ cmd->send_cmd->in.payload = (u64)cmd->input_payload; ++ cmd->send_cmd->in.size = cinfo->size_in; ++ } ++ ++ perfcnt_mta_cnt_val_latch_in = (void *) cmd->send_cmd->in.payload; ++ ++ perfcnt_mta_cnt_val_latch_in->type = type; ++ perfcnt_mta_cnt_val_latch_in->counter = cpu_to_le32(counter); ++ rc = cxl_cmd_submit(cmd); ++ if (rc < 0) { ++ fprintf(stderr, "%s: cmd submission failed: %d (%s)\n", ++ cxl_memdev_get_devname(memdev), rc, strerror(-rc)); ++ goto out; ++ } ++ ++ rc = cxl_cmd_get_mbox_status(cmd); ++ if (rc != 0) { ++ fprintf(stderr, "%s: firmware status: %d:\n%s\n", ++ cxl_memdev_get_devname(memdev), rc, DEVICE_ERRORS[rc]); ++ rc = -ENXIO; ++ goto out; ++ } ++ ++ if (cmd->send_cmd->id != CXL_MEM_COMMAND_ID_PERFCNT_MTA_CNT_VAL_LATCH) { ++ fprintf(stderr, "%s: invalid command id 0x%x (expecting 0x%x)\n", ++ cxl_memdev_get_devname(memdev), cmd->send_cmd->id, CXL_MEM_COMMAND_ID_PERFCNT_MTA_CNT_VAL_LATCH); ++ return -EINVAL; ++ } ++ ++ ++out: ++ cxl_cmd_unref(cmd); ++ return rc; ++ return 0; ++} ++ ++ ++#define CXL_MEM_COMMAND_ID_PERFCNT_MTA_HIF_SET CXL_MEM_COMMAND_ID_RAW ++#define CXL_MEM_COMMAND_ID_PERFCNT_MTA_HIF_SET_OPCODE 51717 ++#define CXL_MEM_COMMAND_ID_PERFCNT_MTA_HIF_SET_PAYLOAD_IN_SIZE 20 ++ ++struct cxl_mbox_perfcnt_mta_hif_set_in { ++ __le32 counter; ++ __le32 match_value; ++ __le32 addr; ++ __le32 req_ty; ++ __le32 sc_ty; ++} __attribute__((packed)); ++ ++ ++CXL_EXPORT int cxl_memdev_perfcnt_mta_hif_set(struct cxl_memdev *memdev, ++ u32 counter, u32 match_value, u32 addr, u32 req_ty, u32 sc_ty) ++{ ++ struct cxl_cmd *cmd; ++ struct cxl_mem_query_commands *query; ++ struct cxl_command_info *cinfo; ++ struct cxl_mbox_perfcnt_mta_hif_set_in *perfcnt_mta_hif_set_in; ++ int rc = 0; ++ ++ cmd = cxl_cmd_new_raw(memdev, CXL_MEM_COMMAND_ID_PERFCNT_MTA_HIF_SET_OPCODE); ++ if (!cmd) { ++ fprintf(stderr, "%s: cxl_cmd_new_raw returned Null output\n", ++ cxl_memdev_get_devname(memdev)); ++ return -ENOMEM; ++ } ++ ++ query = cmd->query_cmd; ++ cinfo = &query->commands[cmd->query_idx]; ++ ++ /* update payload size */ ++ cinfo->size_in = CXL_MEM_COMMAND_ID_PERFCNT_MTA_HIF_SET_PAYLOAD_IN_SIZE; ++ if (cinfo->size_in > 0) { ++ cmd->input_payload = calloc(1, cinfo->size_in); ++ if (!cmd->input_payload) ++ return -ENOMEM; ++ cmd->send_cmd->in.payload = (u64)cmd->input_payload; ++ cmd->send_cmd->in.size = cinfo->size_in; ++ } ++ ++ perfcnt_mta_hif_set_in = (void *) cmd->send_cmd->in.payload; ++ ++ perfcnt_mta_hif_set_in->counter = cpu_to_le32(counter); ++ perfcnt_mta_hif_set_in->match_value = cpu_to_le32(match_value); ++ perfcnt_mta_hif_set_in->addr = cpu_to_le32(addr); ++ perfcnt_mta_hif_set_in->req_ty = cpu_to_le32(req_ty); ++ perfcnt_mta_hif_set_in->sc_ty = cpu_to_le32(sc_ty); ++ rc = cxl_cmd_submit(cmd); ++ if (rc < 0) { ++ fprintf(stderr, "%s: cmd submission failed: %d (%s)\n", ++ cxl_memdev_get_devname(memdev), rc, strerror(-rc)); ++ goto out; ++ } ++ ++ rc = cxl_cmd_get_mbox_status(cmd); ++ if (rc != 0) { ++ fprintf(stderr, "%s: firmware status: %d:\n%s\n", ++ cxl_memdev_get_devname(memdev), rc, DEVICE_ERRORS[rc]); ++ rc = -ENXIO; ++ goto out; ++ } ++ ++ if (cmd->send_cmd->id != CXL_MEM_COMMAND_ID_PERFCNT_MTA_HIF_SET) { ++ fprintf(stderr, "%s: invalid command id 0x%x (expecting 0x%x)\n", ++ cxl_memdev_get_devname(memdev), cmd->send_cmd->id, CXL_MEM_COMMAND_ID_PERFCNT_MTA_HIF_SET); ++ return -EINVAL; ++ } ++ ++ ++out: ++ cxl_cmd_unref(cmd); ++ return rc; ++ return 0; ++} ++ ++ ++#define CXL_MEM_COMMAND_ID_PERFCNT_MTA_HIF_CFG_GET CXL_MEM_COMMAND_ID_RAW ++#define CXL_MEM_COMMAND_ID_PERFCNT_MTA_HIF_CFG_GET_OPCODE 51718 ++#define CXL_MEM_COMMAND_ID_PERFCNT_MTA_HIF_CFG_GET_PAYLOAD_IN_SIZE 4 ++#define CXL_MEM_COMMAND_ID_PERFCNT_MTA_HIF_CFG_GET_PAYLOAD_OUT_SIZE 8 ++ ++struct cxl_mbox_perfcnt_mta_hif_cfg_get_in { ++ __le32 counter; ++} __attribute__((packed)); ++ ++struct cxl_mbox_perfcnt_mta_hif_cfg_get_out { ++ __le64 counter; ++} __attribute__((packed)); ++ ++CXL_EXPORT int cxl_memdev_perfcnt_mta_hif_cfg_get(struct cxl_memdev *memdev, ++ u32 counter) ++{ ++ struct cxl_cmd *cmd; ++ struct cxl_mem_query_commands *query; ++ struct cxl_command_info *cinfo; ++ struct cxl_mbox_perfcnt_mta_hif_cfg_get_in *perfcnt_mta_hif_cfg_get_in; ++ struct cxl_mbox_perfcnt_mta_hif_cfg_get_out *perfcnt_mta_hif_cfg_get_out; ++ int rc = 0; ++ ++ cmd = cxl_cmd_new_raw(memdev, CXL_MEM_COMMAND_ID_PERFCNT_MTA_HIF_CFG_GET_OPCODE); ++ if (!cmd) { ++ fprintf(stderr, "%s: cxl_cmd_new_raw returned Null output\n", ++ cxl_memdev_get_devname(memdev)); ++ return -ENOMEM; ++ } ++ ++ query = cmd->query_cmd; ++ cinfo = &query->commands[cmd->query_idx]; ++ ++ /* update payload size */ ++ cinfo->size_in = CXL_MEM_COMMAND_ID_PERFCNT_MTA_HIF_CFG_GET_PAYLOAD_IN_SIZE; ++ if (cinfo->size_in > 0) { ++ cmd->input_payload = calloc(1, cinfo->size_in); ++ if (!cmd->input_payload) ++ return -ENOMEM; ++ cmd->send_cmd->in.payload = (u64)cmd->input_payload; ++ cmd->send_cmd->in.size = cinfo->size_in; ++ } ++ ++ perfcnt_mta_hif_cfg_get_in = (void *) cmd->send_cmd->in.payload; ++ ++ perfcnt_mta_hif_cfg_get_in->counter = cpu_to_le32(counter); ++ rc = cxl_cmd_submit(cmd); ++ if (rc < 0) { ++ fprintf(stderr, "%s: cmd submission failed: %d (%s)\n", ++ cxl_memdev_get_devname(memdev), rc, strerror(-rc)); ++ goto out; ++ } ++ ++ rc = cxl_cmd_get_mbox_status(cmd); ++ if (rc != 0) { ++ fprintf(stderr, "%s: firmware status: %d:\n%s\n", ++ cxl_memdev_get_devname(memdev), rc, DEVICE_ERRORS[rc]); ++ rc = -ENXIO; ++ goto out; ++ } ++ ++ if (cmd->send_cmd->id != CXL_MEM_COMMAND_ID_PERFCNT_MTA_HIF_CFG_GET) { ++ fprintf(stderr, "%s: invalid command id 0x%x (expecting 0x%x)\n", ++ cxl_memdev_get_devname(memdev), cmd->send_cmd->id, CXL_MEM_COMMAND_ID_PERFCNT_MTA_HIF_CFG_GET); ++ return -EINVAL; ++ } ++ ++ perfcnt_mta_hif_cfg_get_out = (void *)cmd->send_cmd->out.payload; ++ fprintf(stdout, "========================== mta get hif configuration ===========================\n"); ++ fprintf(stdout, "Counter: %lx\n", le64_to_cpu(perfcnt_mta_hif_cfg_get_out->counter)); ++ ++out: ++ cxl_cmd_unref(cmd); ++ return rc; ++ return 0; ++} ++ ++ ++#define CXL_MEM_COMMAND_ID_PERFCNT_MTA_HIF_LATCH_VAL_GET CXL_MEM_COMMAND_ID_RAW ++#define CXL_MEM_COMMAND_ID_PERFCNT_MTA_HIF_LATCH_VAL_GET_OPCODE 51719 ++#define CXL_MEM_COMMAND_ID_PERFCNT_MTA_HIF_LATCH_VAL_GET_PAYLOAD_IN_SIZE 4 ++#define CXL_MEM_COMMAND_ID_PERFCNT_MTA_HIF_LATCH_VAL_GET_PAYLOAD_OUT_SIZE 8 ++ ++struct cxl_mbox_perfcnt_mta_hif_latch_val_get_in { ++ __le32 counter; ++} __attribute__((packed)); ++ ++struct cxl_mbox_perfcnt_mta_hif_latch_val_get_out { ++ __le64 latch_val; ++} __attribute__((packed)); ++ ++CXL_EXPORT int cxl_memdev_perfcnt_mta_hif_latch_val_get(struct cxl_memdev *memdev, ++ u32 counter) ++{ ++ struct cxl_cmd *cmd; ++ struct cxl_mem_query_commands *query; ++ struct cxl_command_info *cinfo; ++ struct cxl_mbox_perfcnt_mta_hif_latch_val_get_in *perfcnt_mta_hif_latch_val_get_in; ++ struct cxl_mbox_perfcnt_mta_hif_latch_val_get_out *perfcnt_mta_hif_latch_val_get_out; ++ int rc = 0; ++ ++ cmd = cxl_cmd_new_raw(memdev, CXL_MEM_COMMAND_ID_PERFCNT_MTA_HIF_LATCH_VAL_GET_OPCODE); ++ if (!cmd) { ++ fprintf(stderr, "%s: cxl_cmd_new_raw returned Null output\n", ++ cxl_memdev_get_devname(memdev)); ++ return -ENOMEM; ++ } ++ ++ query = cmd->query_cmd; ++ cinfo = &query->commands[cmd->query_idx]; ++ ++ /* update payload size */ ++ cinfo->size_in = CXL_MEM_COMMAND_ID_PERFCNT_MTA_HIF_LATCH_VAL_GET_PAYLOAD_IN_SIZE; ++ if (cinfo->size_in > 0) { ++ cmd->input_payload = calloc(1, cinfo->size_in); ++ if (!cmd->input_payload) ++ return -ENOMEM; ++ cmd->send_cmd->in.payload = (u64)cmd->input_payload; ++ cmd->send_cmd->in.size = cinfo->size_in; ++ } ++ ++ perfcnt_mta_hif_latch_val_get_in = (void *) cmd->send_cmd->in.payload; ++ ++ perfcnt_mta_hif_latch_val_get_in->counter = cpu_to_le32(counter); ++ rc = cxl_cmd_submit(cmd); ++ if (rc < 0) { ++ fprintf(stderr, "%s: cmd submission failed: %d (%s)\n", ++ cxl_memdev_get_devname(memdev), rc, strerror(-rc)); ++ goto out; ++ } ++ ++ rc = cxl_cmd_get_mbox_status(cmd); ++ if (rc != 0) { ++ fprintf(stderr, "%s: firmware status: %d:\n%s\n", ++ cxl_memdev_get_devname(memdev), rc, DEVICE_ERRORS[rc]); ++ rc = -ENXIO; ++ goto out; ++ } ++ ++ if (cmd->send_cmd->id != CXL_MEM_COMMAND_ID_PERFCNT_MTA_HIF_LATCH_VAL_GET) { ++ fprintf(stderr, "%s: invalid command id 0x%x (expecting 0x%x)\n", ++ cxl_memdev_get_devname(memdev), cmd->send_cmd->id, CXL_MEM_COMMAND_ID_PERFCNT_MTA_HIF_LATCH_VAL_GET); ++ return -EINVAL; ++ } ++ ++ perfcnt_mta_hif_latch_val_get_out = (void *)cmd->send_cmd->out.payload; ++ fprintf(stdout, "=========================== mta get hif latch value ============================\n"); ++ fprintf(stdout, "Latch value: %lx\n", le64_to_cpu(perfcnt_mta_hif_latch_val_get_out->latch_val)); ++ ++out: ++ cxl_cmd_unref(cmd); ++ return rc; ++ return 0; ++} ++ ++ ++#define CXL_MEM_COMMAND_ID_PERFCNT_MTA_HIF_COUNTER_CLEAR CXL_MEM_COMMAND_ID_RAW ++#define CXL_MEM_COMMAND_ID_PERFCNT_MTA_HIF_COUNTER_CLEAR_OPCODE 51720 ++#define CXL_MEM_COMMAND_ID_PERFCNT_MTA_HIF_COUNTER_CLEAR_PAYLOAD_IN_SIZE 4 ++ ++struct cxl_mbox_perfcnt_mta_hif_counter_clear_in { ++ __le32 counter; ++} __attribute__((packed)); ++ ++ ++CXL_EXPORT int cxl_memdev_perfcnt_mta_hif_counter_clear(struct cxl_memdev *memdev, ++ u32 counter) ++{ ++ struct cxl_cmd *cmd; ++ struct cxl_mem_query_commands *query; ++ struct cxl_command_info *cinfo; ++ struct cxl_mbox_perfcnt_mta_hif_counter_clear_in *perfcnt_mta_hif_counter_clear_in; ++ int rc = 0; ++ ++ cmd = cxl_cmd_new_raw(memdev, CXL_MEM_COMMAND_ID_PERFCNT_MTA_HIF_COUNTER_CLEAR_OPCODE); ++ if (!cmd) { ++ fprintf(stderr, "%s: cxl_cmd_new_raw returned Null output\n", ++ cxl_memdev_get_devname(memdev)); ++ return -ENOMEM; ++ } ++ ++ query = cmd->query_cmd; ++ cinfo = &query->commands[cmd->query_idx]; ++ ++ /* update payload size */ ++ cinfo->size_in = CXL_MEM_COMMAND_ID_PERFCNT_MTA_HIF_COUNTER_CLEAR_PAYLOAD_IN_SIZE; ++ if (cinfo->size_in > 0) { ++ cmd->input_payload = calloc(1, cinfo->size_in); ++ if (!cmd->input_payload) ++ return -ENOMEM; ++ cmd->send_cmd->in.payload = (u64)cmd->input_payload; ++ cmd->send_cmd->in.size = cinfo->size_in; ++ } ++ ++ perfcnt_mta_hif_counter_clear_in = (void *) cmd->send_cmd->in.payload; ++ ++ perfcnt_mta_hif_counter_clear_in->counter = cpu_to_le32(counter); ++ rc = cxl_cmd_submit(cmd); ++ if (rc < 0) { ++ fprintf(stderr, "%s: cmd submission failed: %d (%s)\n", ++ cxl_memdev_get_devname(memdev), rc, strerror(-rc)); ++ goto out; ++ } ++ ++ rc = cxl_cmd_get_mbox_status(cmd); ++ if (rc != 0) { ++ fprintf(stderr, "%s: firmware status: %d:\n%s\n", ++ cxl_memdev_get_devname(memdev), rc, DEVICE_ERRORS[rc]); ++ rc = -ENXIO; ++ goto out; ++ } ++ ++ if (cmd->send_cmd->id != CXL_MEM_COMMAND_ID_PERFCNT_MTA_HIF_COUNTER_CLEAR) { ++ fprintf(stderr, "%s: invalid command id 0x%x (expecting 0x%x)\n", ++ cxl_memdev_get_devname(memdev), cmd->send_cmd->id, CXL_MEM_COMMAND_ID_PERFCNT_MTA_HIF_COUNTER_CLEAR); ++ return -EINVAL; ++ } ++ ++ ++out: ++ cxl_cmd_unref(cmd); ++ return rc; ++ return 0; ++} ++ ++ ++#define CXL_MEM_COMMAND_ID_PERFCNT_MTA_HIF_CNT_VAL_LATCH CXL_MEM_COMMAND_ID_RAW ++#define CXL_MEM_COMMAND_ID_PERFCNT_MTA_HIF_CNT_VAL_LATCH_OPCODE 51721 ++#define CXL_MEM_COMMAND_ID_PERFCNT_MTA_HIF_CNT_VAL_LATCH_PAYLOAD_IN_SIZE 4 ++ ++struct cxl_mbox_perfcnt_mta_hif_cnt_val_latch_in { ++ __le32 counter; ++} __attribute__((packed)); ++ ++ ++CXL_EXPORT int cxl_memdev_perfcnt_mta_hif_cnt_val_latch(struct cxl_memdev *memdev, ++ u32 counter) ++{ ++ struct cxl_cmd *cmd; ++ struct cxl_mem_query_commands *query; ++ struct cxl_command_info *cinfo; ++ struct cxl_mbox_perfcnt_mta_hif_cnt_val_latch_in *perfcnt_mta_hif_cnt_val_latch_in; ++ int rc = 0; ++ ++ cmd = cxl_cmd_new_raw(memdev, CXL_MEM_COMMAND_ID_PERFCNT_MTA_HIF_CNT_VAL_LATCH_OPCODE); ++ if (!cmd) { ++ fprintf(stderr, "%s: cxl_cmd_new_raw returned Null output\n", ++ cxl_memdev_get_devname(memdev)); ++ return -ENOMEM; ++ } ++ ++ query = cmd->query_cmd; ++ cinfo = &query->commands[cmd->query_idx]; ++ ++ /* update payload size */ ++ cinfo->size_in = CXL_MEM_COMMAND_ID_PERFCNT_MTA_HIF_CNT_VAL_LATCH_PAYLOAD_IN_SIZE; ++ if (cinfo->size_in > 0) { ++ cmd->input_payload = calloc(1, cinfo->size_in); ++ if (!cmd->input_payload) ++ return -ENOMEM; ++ cmd->send_cmd->in.payload = (u64)cmd->input_payload; ++ cmd->send_cmd->in.size = cinfo->size_in; ++ } ++ ++ perfcnt_mta_hif_cnt_val_latch_in = (void *) cmd->send_cmd->in.payload; ++ ++ perfcnt_mta_hif_cnt_val_latch_in->counter = cpu_to_le32(counter); ++ rc = cxl_cmd_submit(cmd); ++ if (rc < 0) { ++ fprintf(stderr, "%s: cmd submission failed: %d (%s)\n", ++ cxl_memdev_get_devname(memdev), rc, strerror(-rc)); ++ goto out; ++ } ++ ++ rc = cxl_cmd_get_mbox_status(cmd); ++ if (rc != 0) { ++ fprintf(stderr, "%s: firmware status: %d:\n%s\n", ++ cxl_memdev_get_devname(memdev), rc, DEVICE_ERRORS[rc]); ++ rc = -ENXIO; ++ goto out; ++ } ++ ++ if (cmd->send_cmd->id != CXL_MEM_COMMAND_ID_PERFCNT_MTA_HIF_CNT_VAL_LATCH) { ++ fprintf(stderr, "%s: invalid command id 0x%x (expecting 0x%x)\n", ++ cxl_memdev_get_devname(memdev), cmd->send_cmd->id, CXL_MEM_COMMAND_ID_PERFCNT_MTA_HIF_CNT_VAL_LATCH); ++ return -EINVAL; ++ } ++ ++ ++out: ++ cxl_cmd_unref(cmd); ++ return rc; ++ return 0; ++} ++ ++ ++#define CXL_MEM_COMMAND_ID_PERFCNT_DDR_GENERIC_SELECT CXL_MEM_COMMAND_ID_RAW ++#define CXL_MEM_COMMAND_ID_PERFCNT_DDR_GENERIC_SELECT_OPCODE 51728 ++#define CXL_MEM_COMMAND_ID_PERFCNT_DDR_GENERIC_SELECT_PAYLOAD_IN_SIZE 13 ++ ++struct cxl_mbox_perfcnt_ddr_generic_select_in { ++ u8 ddr_id; ++ u8 cid; ++ u8 rank; ++ u8 bank; ++ u8 bankgroup; ++ __le64 event; ++} __attribute__((packed)); ++ ++ ++CXL_EXPORT int cxl_memdev_perfcnt_ddr_generic_select(struct cxl_memdev *memdev, ++ u8 ddr_id, u8 cid, u8 rank, u8 bank, u8 bankgroup, u64 event) ++{ ++ struct cxl_cmd *cmd; ++ struct cxl_mem_query_commands *query; ++ struct cxl_command_info *cinfo; ++ struct cxl_mbox_perfcnt_ddr_generic_select_in *perfcnt_ddr_generic_select_in; ++ int rc = 0; ++ ++ cmd = cxl_cmd_new_raw(memdev, CXL_MEM_COMMAND_ID_PERFCNT_DDR_GENERIC_SELECT_OPCODE); ++ if (!cmd) { ++ fprintf(stderr, "%s: cxl_cmd_new_raw returned Null output\n", ++ cxl_memdev_get_devname(memdev)); ++ return -ENOMEM; ++ } ++ ++ query = cmd->query_cmd; ++ cinfo = &query->commands[cmd->query_idx]; ++ ++ /* update payload size */ ++ cinfo->size_in = CXL_MEM_COMMAND_ID_PERFCNT_DDR_GENERIC_SELECT_PAYLOAD_IN_SIZE; ++ if (cinfo->size_in > 0) { ++ cmd->input_payload = calloc(1, cinfo->size_in); ++ if (!cmd->input_payload) ++ return -ENOMEM; ++ cmd->send_cmd->in.payload = (u64)cmd->input_payload; ++ cmd->send_cmd->in.size = cinfo->size_in; ++ } ++ ++ perfcnt_ddr_generic_select_in = (void *) cmd->send_cmd->in.payload; ++ ++ perfcnt_ddr_generic_select_in->ddr_id = ddr_id; ++ perfcnt_ddr_generic_select_in->cid = cid; ++ perfcnt_ddr_generic_select_in->rank = rank; ++ perfcnt_ddr_generic_select_in->bank = bank; ++ perfcnt_ddr_generic_select_in->bankgroup = bankgroup; ++ perfcnt_ddr_generic_select_in->event = cpu_to_le64(event); ++ ++ rc = cxl_cmd_submit(cmd); ++ if (rc < 0) { ++ fprintf(stderr, "%s: cmd submission failed: %d (%s)\n", ++ cxl_memdev_get_devname(memdev), rc, strerror(-rc)); ++ goto out; ++ } ++ ++ rc = cxl_cmd_get_mbox_status(cmd); ++ if (rc != 0) { ++ fprintf(stderr, "%s: firmware status: %d:\n%s\n", ++ cxl_memdev_get_devname(memdev), rc, DEVICE_ERRORS[rc]); ++ rc = -ENXIO; ++ goto out; ++ } ++ ++ if (cmd->send_cmd->id != CXL_MEM_COMMAND_ID_PERFCNT_DDR_GENERIC_SELECT) { ++ fprintf(stderr, "%s: invalid command id 0x%x (expecting 0x%x)\n", ++ cxl_memdev_get_devname(memdev), cmd->send_cmd->id, CXL_MEM_COMMAND_ID_PERFCNT_DDR_GENERIC_SELECT); ++ return -EINVAL; ++ } ++ ++ ++out: ++ cxl_cmd_unref(cmd); ++ return rc; ++ return 0; ++} ++ ++ ++#define CXL_MEM_COMMAND_ID_ERR_INJ_DRS_POISON CXL_MEM_COMMAND_ID_RAW ++#define CXL_MEM_COMMAND_ID_ERR_INJ_DRS_POISON_OPCODE 51970 ++#define CXL_MEM_COMMAND_ID_ERR_INJ_DRS_POISON_PAYLOAD_IN_SIZE 6 ++ ++struct cxl_mbox_err_inj_drs_poison_in { ++ u8 ch_id; ++ u8 duration; ++ u8 inj_mode; ++ u8 rsvd; ++ __le16 tag; ++} __attribute__((packed)); ++ ++ ++CXL_EXPORT int cxl_memdev_err_inj_drs_poison(struct cxl_memdev *memdev, ++ u8 ch_id, u8 duration, u8 inj_mode, u16 tag) ++{ ++ struct cxl_cmd *cmd; ++ struct cxl_mem_query_commands *query; ++ struct cxl_command_info *cinfo; ++ struct cxl_mbox_err_inj_drs_poison_in *err_inj_drs_poison_in; ++ int rc = 0; ++ ++ cmd = cxl_cmd_new_raw(memdev, CXL_MEM_COMMAND_ID_ERR_INJ_DRS_POISON_OPCODE); ++ if (!cmd) { ++ fprintf(stderr, "%s: cxl_cmd_new_raw returned Null output\n", ++ cxl_memdev_get_devname(memdev)); ++ return -ENOMEM; ++ } ++ ++ query = cmd->query_cmd; ++ cinfo = &query->commands[cmd->query_idx]; ++ ++ /* update payload size */ ++ cinfo->size_in = CXL_MEM_COMMAND_ID_ERR_INJ_DRS_POISON_PAYLOAD_IN_SIZE; ++ if (cinfo->size_in > 0) { ++ cmd->input_payload = calloc(1, cinfo->size_in); ++ if (!cmd->input_payload) ++ return -ENOMEM; ++ cmd->send_cmd->in.payload = (u64)cmd->input_payload; ++ cmd->send_cmd->in.size = cinfo->size_in; ++ } ++ ++ err_inj_drs_poison_in = (void *) cmd->send_cmd->in.payload; ++ ++ err_inj_drs_poison_in->ch_id = ch_id; ++ err_inj_drs_poison_in->duration = duration; ++ err_inj_drs_poison_in->inj_mode = inj_mode; ++ err_inj_drs_poison_in->tag = cpu_to_le16(tag); ++ rc = cxl_cmd_submit(cmd); ++ if (rc < 0) { ++ fprintf(stderr, "%s: cmd submission failed: %d (%s)\n", ++ cxl_memdev_get_devname(memdev), rc, strerror(-rc)); ++ goto out; ++ } ++ ++ rc = cxl_cmd_get_mbox_status(cmd); ++ if (rc != 0) { ++ fprintf(stderr, "%s: firmware status: %d:\n%s\n", ++ cxl_memdev_get_devname(memdev), rc, DEVICE_ERRORS[rc]); ++ rc = -ENXIO; ++ goto out; ++ } ++ ++ if (cmd->send_cmd->id != CXL_MEM_COMMAND_ID_ERR_INJ_DRS_POISON) { ++ fprintf(stderr, "%s: invalid command id 0x%x (expecting 0x%x)\n", ++ cxl_memdev_get_devname(memdev), cmd->send_cmd->id, CXL_MEM_COMMAND_ID_ERR_INJ_DRS_POISON); ++ return -EINVAL; ++ } ++ ++ ++out: ++ cxl_cmd_unref(cmd); ++ return rc; ++ return 0; ++} ++ ++ ++#define CXL_MEM_COMMAND_ID_ERR_INJ_DRS_ECC CXL_MEM_COMMAND_ID_RAW ++#define CXL_MEM_COMMAND_ID_ERR_INJ_DRS_ECC_OPCODE 51971 ++#define CXL_MEM_COMMAND_ID_ERR_INJ_DRS_ECC_PAYLOAD_IN_SIZE 6 ++ ++struct cxl_mbox_err_inj_drs_ecc_in { ++ u8 ch_id; ++ u8 duration; ++ u8 inj_mode; ++ u8 rsvd; ++ __le16 tag; ++} __attribute__((packed)); ++ ++ ++CXL_EXPORT int cxl_memdev_err_inj_drs_ecc(struct cxl_memdev *memdev, ++ u8 ch_id, u8 duration, u8 inj_mode, u16 tag) ++{ ++ struct cxl_cmd *cmd; ++ struct cxl_mem_query_commands *query; ++ struct cxl_command_info *cinfo; ++ struct cxl_mbox_err_inj_drs_ecc_in *err_inj_drs_ecc_in; ++ int rc = 0; ++ ++ cmd = cxl_cmd_new_raw(memdev, CXL_MEM_COMMAND_ID_ERR_INJ_DRS_ECC_OPCODE); ++ if (!cmd) { ++ fprintf(stderr, "%s: cxl_cmd_new_raw returned Null output\n", ++ cxl_memdev_get_devname(memdev)); ++ return -ENOMEM; ++ } ++ ++ query = cmd->query_cmd; ++ cinfo = &query->commands[cmd->query_idx]; ++ ++ /* update payload size */ ++ cinfo->size_in = CXL_MEM_COMMAND_ID_ERR_INJ_DRS_ECC_PAYLOAD_IN_SIZE; ++ if (cinfo->size_in > 0) { ++ cmd->input_payload = calloc(1, cinfo->size_in); ++ if (!cmd->input_payload) ++ return -ENOMEM; ++ cmd->send_cmd->in.payload = (u64)cmd->input_payload; ++ cmd->send_cmd->in.size = cinfo->size_in; ++ } ++ ++ err_inj_drs_ecc_in = (void *) cmd->send_cmd->in.payload; ++ ++ err_inj_drs_ecc_in->ch_id = ch_id; ++ err_inj_drs_ecc_in->duration = duration; ++ err_inj_drs_ecc_in->inj_mode = inj_mode; ++ err_inj_drs_ecc_in->tag = cpu_to_le16(tag); ++ rc = cxl_cmd_submit(cmd); ++ if (rc < 0) { ++ fprintf(stderr, "%s: cmd submission failed: %d (%s)\n", ++ cxl_memdev_get_devname(memdev), rc, strerror(-rc)); ++ goto out; ++ } ++ ++ rc = cxl_cmd_get_mbox_status(cmd); ++ if (rc != 0) { ++ fprintf(stderr, "%s: firmware status: %d:\n%s\n", ++ cxl_memdev_get_devname(memdev), rc, DEVICE_ERRORS[rc]); ++ rc = -ENXIO; ++ goto out; ++ } ++ ++ if (cmd->send_cmd->id != CXL_MEM_COMMAND_ID_ERR_INJ_DRS_ECC) { ++ fprintf(stderr, "%s: invalid command id 0x%x (expecting 0x%x)\n", ++ cxl_memdev_get_devname(memdev), cmd->send_cmd->id, CXL_MEM_COMMAND_ID_ERR_INJ_DRS_ECC); ++ return -EINVAL; ++ } ++ ++ ++out: ++ cxl_cmd_unref(cmd); ++ return rc; ++ return 0; ++} ++ ++ ++#define CXL_MEM_COMMAND_ID_ERR_INJ_RXFLIT_CRC CXL_MEM_COMMAND_ID_RAW ++#define CXL_MEM_COMMAND_ID_ERR_INJ_RXFLIT_CRC_OPCODE 51972 ++#define CXL_MEM_COMMAND_ID_ERR_INJ_RXFLIT_CRC_PAYLOAD_IN_SIZE 1 ++ ++struct cxl_mbox_err_inj_rxflit_crc_in { ++ u8 cxl_mem_id; ++} __attribute__((packed)); ++ ++ ++CXL_EXPORT int cxl_memdev_err_inj_rxflit_crc(struct cxl_memdev *memdev, ++ u8 cxl_mem_id) ++{ ++ struct cxl_cmd *cmd; ++ struct cxl_mem_query_commands *query; ++ struct cxl_command_info *cinfo; ++ struct cxl_mbox_err_inj_rxflit_crc_in *err_inj_rxflit_crc_in; ++ int rc = 0; ++ ++ cmd = cxl_cmd_new_raw(memdev, CXL_MEM_COMMAND_ID_ERR_INJ_RXFLIT_CRC_OPCODE); ++ if (!cmd) { ++ fprintf(stderr, "%s: cxl_cmd_new_raw returned Null output\n", ++ cxl_memdev_get_devname(memdev)); ++ return -ENOMEM; ++ } ++ ++ query = cmd->query_cmd; ++ cinfo = &query->commands[cmd->query_idx]; ++ ++ /* update payload size */ ++ cinfo->size_in = CXL_MEM_COMMAND_ID_ERR_INJ_RXFLIT_CRC_PAYLOAD_IN_SIZE; ++ if (cinfo->size_in > 0) { ++ cmd->input_payload = calloc(1, cinfo->size_in); ++ if (!cmd->input_payload) ++ return -ENOMEM; ++ cmd->send_cmd->in.payload = (u64)cmd->input_payload; ++ cmd->send_cmd->in.size = cinfo->size_in; ++ } ++ ++ err_inj_rxflit_crc_in = (void *) cmd->send_cmd->in.payload; ++ ++ err_inj_rxflit_crc_in->cxl_mem_id = cxl_mem_id; ++ rc = cxl_cmd_submit(cmd); ++ if (rc < 0) { ++ fprintf(stderr, "%s: cmd submission failed: %d (%s)\n", ++ cxl_memdev_get_devname(memdev), rc, strerror(-rc)); ++ goto out; ++ } ++ ++ rc = cxl_cmd_get_mbox_status(cmd); ++ if (rc != 0) { ++ fprintf(stderr, "%s: firmware status: %d:\n%s\n", ++ cxl_memdev_get_devname(memdev), rc, DEVICE_ERRORS[rc]); ++ rc = -ENXIO; ++ goto out; ++ } ++ ++ if (cmd->send_cmd->id != CXL_MEM_COMMAND_ID_ERR_INJ_RXFLIT_CRC) { ++ fprintf(stderr, "%s: invalid command id 0x%x (expecting 0x%x)\n", ++ cxl_memdev_get_devname(memdev), cmd->send_cmd->id, CXL_MEM_COMMAND_ID_ERR_INJ_RXFLIT_CRC); ++ return -EINVAL; ++ } ++ ++ ++out: ++ cxl_cmd_unref(cmd); ++ return rc; ++ return 0; ++} ++ ++ ++#define CXL_MEM_COMMAND_ID_ERR_INJ_TXFLIT_CRC CXL_MEM_COMMAND_ID_RAW ++#define CXL_MEM_COMMAND_ID_ERR_INJ_TXFLIT_CRC_OPCODE 51973 ++#define CXL_MEM_COMMAND_ID_ERR_INJ_TXFLIT_CRC_PAYLOAD_IN_SIZE 1 ++ ++struct cxl_mbox_err_inj_txflit_crc_in { ++ u8 cxl_mem_id; ++} __attribute__((packed)); ++ ++ ++CXL_EXPORT int cxl_memdev_err_inj_txflit_crc(struct cxl_memdev *memdev, ++ u8 cxl_mem_id) ++{ ++ struct cxl_cmd *cmd; ++ struct cxl_mem_query_commands *query; ++ struct cxl_command_info *cinfo; ++ struct cxl_mbox_err_inj_txflit_crc_in *err_inj_txflit_crc_in; ++ int rc = 0; ++ ++ cmd = cxl_cmd_new_raw(memdev, CXL_MEM_COMMAND_ID_ERR_INJ_TXFLIT_CRC_OPCODE); ++ if (!cmd) { ++ fprintf(stderr, "%s: cxl_cmd_new_raw returned Null output\n", ++ cxl_memdev_get_devname(memdev)); ++ return -ENOMEM; ++ } ++ ++ query = cmd->query_cmd; ++ cinfo = &query->commands[cmd->query_idx]; ++ ++ /* update payload size */ ++ cinfo->size_in = CXL_MEM_COMMAND_ID_ERR_INJ_TXFLIT_CRC_PAYLOAD_IN_SIZE; ++ if (cinfo->size_in > 0) { ++ cmd->input_payload = calloc(1, cinfo->size_in); ++ if (!cmd->input_payload) ++ return -ENOMEM; ++ cmd->send_cmd->in.payload = (u64)cmd->input_payload; ++ cmd->send_cmd->in.size = cinfo->size_in; ++ } ++ ++ err_inj_txflit_crc_in = (void *) cmd->send_cmd->in.payload; ++ ++ err_inj_txflit_crc_in->cxl_mem_id = cxl_mem_id; ++ rc = cxl_cmd_submit(cmd); ++ if (rc < 0) { ++ fprintf(stderr, "%s: cmd submission failed: %d (%s)\n", ++ cxl_memdev_get_devname(memdev), rc, strerror(-rc)); ++ goto out; ++ } ++ ++ rc = cxl_cmd_get_mbox_status(cmd); ++ if (rc != 0) { ++ fprintf(stderr, "%s: firmware status: %d:\n%s\n", ++ cxl_memdev_get_devname(memdev), rc, DEVICE_ERRORS[rc]); ++ rc = -ENXIO; ++ goto out; ++ } ++ ++ if (cmd->send_cmd->id != CXL_MEM_COMMAND_ID_ERR_INJ_TXFLIT_CRC) { ++ fprintf(stderr, "%s: invalid command id 0x%x (expecting 0x%x)\n", ++ cxl_memdev_get_devname(memdev), cmd->send_cmd->id, CXL_MEM_COMMAND_ID_ERR_INJ_TXFLIT_CRC); ++ return -EINVAL; ++ } ++ ++ ++out: ++ cxl_cmd_unref(cmd); ++ return rc; ++ return 0; ++} ++ ++ ++#define CXL_MEM_COMMAND_ID_ERR_INJ_VIRAL CXL_MEM_COMMAND_ID_RAW ++#define CXL_MEM_COMMAND_ID_ERR_INJ_VIRAL_OPCODE 51974 ++#define CXL_MEM_COMMAND_ID_ERR_INJ_VIRAL_PAYLOAD_IN_SIZE 1 ++ ++struct cxl_mbox_err_inj_viral_in { ++ u8 ld_id; ++} __attribute__((packed)); ++ ++ ++CXL_EXPORT int cxl_memdev_err_inj_viral(struct cxl_memdev *memdev, ++ u8 ld_id) ++{ ++ struct cxl_cmd *cmd; ++ struct cxl_mem_query_commands *query; ++ struct cxl_command_info *cinfo; ++ struct cxl_mbox_err_inj_viral_in *err_inj_viral_in; ++ int rc = 0; ++ ++ cmd = cxl_cmd_new_raw(memdev, CXL_MEM_COMMAND_ID_ERR_INJ_VIRAL_OPCODE); ++ if (!cmd) { ++ fprintf(stderr, "%s: cxl_cmd_new_raw returned Null output\n", ++ cxl_memdev_get_devname(memdev)); ++ return -ENOMEM; ++ } ++ ++ query = cmd->query_cmd; ++ cinfo = &query->commands[cmd->query_idx]; ++ ++ /* update payload size */ ++ cinfo->size_in = CXL_MEM_COMMAND_ID_ERR_INJ_VIRAL_PAYLOAD_IN_SIZE; ++ if (cinfo->size_in > 0) { ++ cmd->input_payload = calloc(1, cinfo->size_in); ++ if (!cmd->input_payload) ++ return -ENOMEM; ++ cmd->send_cmd->in.payload = (u64)cmd->input_payload; ++ cmd->send_cmd->in.size = cinfo->size_in; ++ } ++ ++ err_inj_viral_in = (void *) cmd->send_cmd->in.payload; ++ ++ err_inj_viral_in->ld_id = ld_id; ++ rc = cxl_cmd_submit(cmd); ++ if (rc < 0) { ++ fprintf(stderr, "%s: cmd submission failed: %d (%s)\n", ++ cxl_memdev_get_devname(memdev), rc, strerror(-rc)); ++ goto out; ++ } ++ ++ rc = cxl_cmd_get_mbox_status(cmd); ++ if (rc != 0) { ++ fprintf(stderr, "%s: firmware status: %d:\n%s\n", ++ cxl_memdev_get_devname(memdev), rc, DEVICE_ERRORS[rc]); ++ rc = -ENXIO; ++ goto out; ++ } ++ ++ if (cmd->send_cmd->id != CXL_MEM_COMMAND_ID_ERR_INJ_VIRAL) { ++ fprintf(stderr, "%s: invalid command id 0x%x (expecting 0x%x)\n", ++ cxl_memdev_get_devname(memdev), cmd->send_cmd->id, CXL_MEM_COMMAND_ID_ERR_INJ_VIRAL); ++ return -EINVAL; ++ } ++ ++ ++out: ++ cxl_cmd_unref(cmd); ++ return rc; ++ return 0; ++} ++ ++ ++#define CXL_MEM_COMMAND_ID_EH_EYE_CAP_RUN CXL_MEM_COMMAND_ID_RAW ++#define CXL_MEM_COMMAND_ID_EH_EYE_CAP_RUN_OPCODE 52224 ++#define CXL_MEM_COMMAND_ID_EH_EYE_CAP_RUN_PAYLOAD_IN_SIZE 8 ++ ++struct cxl_mbox_eh_eye_cap_run_in { ++ u8 rsvd; ++ u8 depth; ++ __le16 rsvd2; ++ __le32 lane_mask; ++} __attribute__((packed)); ++ ++ ++CXL_EXPORT int cxl_memdev_eh_eye_cap_run(struct cxl_memdev *memdev, ++ u8 depth, u32 lane_mask) ++{ ++ struct cxl_cmd *cmd; ++ struct cxl_mem_query_commands *query; ++ struct cxl_command_info *cinfo; ++ struct cxl_mbox_eh_eye_cap_run_in *eh_eye_cap_run_in; ++ int rc = 0; ++ ++ cmd = cxl_cmd_new_raw(memdev, CXL_MEM_COMMAND_ID_EH_EYE_CAP_RUN_OPCODE); ++ if (!cmd) { ++ fprintf(stderr, "%s: cxl_cmd_new_raw returned Null output\n", ++ cxl_memdev_get_devname(memdev)); ++ return -ENOMEM; ++ } ++ ++ query = cmd->query_cmd; ++ cinfo = &query->commands[cmd->query_idx]; ++ ++ /* update payload size */ ++ cinfo->size_in = CXL_MEM_COMMAND_ID_EH_EYE_CAP_RUN_PAYLOAD_IN_SIZE; ++ if (cinfo->size_in > 0) { ++ cmd->input_payload = calloc(1, cinfo->size_in); ++ if (!cmd->input_payload) ++ return -ENOMEM; ++ cmd->send_cmd->in.payload = (u64)cmd->input_payload; ++ cmd->send_cmd->in.size = cinfo->size_in; ++ } ++ ++ eh_eye_cap_run_in = (void *) cmd->send_cmd->in.payload; ++ ++ eh_eye_cap_run_in->depth = depth; ++ eh_eye_cap_run_in->lane_mask = cpu_to_le32(lane_mask); ++ rc = cxl_cmd_submit(cmd); ++ if (rc < 0) { ++ fprintf(stderr, "%s: cmd submission failed: %d (%s)\n", ++ cxl_memdev_get_devname(memdev), rc, strerror(-rc)); ++ goto out; ++ } ++ ++ rc = cxl_cmd_get_mbox_status(cmd); ++ if (rc != 0) { ++ fprintf(stderr, "%s: firmware status: %d:\n%s\n", ++ cxl_memdev_get_devname(memdev), rc, DEVICE_ERRORS[rc]); ++ rc = -ENXIO; ++ goto out; ++ } ++ ++ if (cmd->send_cmd->id != CXL_MEM_COMMAND_ID_EH_EYE_CAP_RUN) { ++ fprintf(stderr, "%s: invalid command id 0x%x (expecting 0x%x)\n", ++ cxl_memdev_get_devname(memdev), cmd->send_cmd->id, CXL_MEM_COMMAND_ID_EH_EYE_CAP_RUN); ++ return -EINVAL; ++ } ++ ++ ++out: ++ cxl_cmd_unref(cmd); ++ return rc; ++ return 0; ++} ++ ++ ++#define CXL_MEM_COMMAND_ID_EH_EYE_CAP_READ CXL_MEM_COMMAND_ID_RAW ++#define CXL_MEM_COMMAND_ID_EH_EYE_CAP_READ_OPCODE 52226 ++#define CXL_MEM_COMMAND_ID_EH_EYE_CAP_READ_PAYLOAD_IN_SIZE 4 ++#define CXL_MEM_COMMAND_ID_EH_EYE_CAP_READ_PAYLOAD_OUT_SIZE 248 ++ ++struct cxl_mbox_eh_eye_cap_read_in { ++ u8 rsvd; ++ u8 lane_id; ++ u8 bin_num; ++ u8 rsvd3; ++} __attribute__((packed)); ++ ++struct cxl_mbox_eh_eye_cap_read_out { ++ u8 num_phase; ++ u8 rsvd[7]; ++ __le32 ber_data[60]; ++} __attribute__((packed)); ++ ++CXL_EXPORT int cxl_memdev_eh_eye_cap_read(struct cxl_memdev *memdev, ++ u8 lane_id, u8 bin_num) ++{ ++ struct cxl_cmd *cmd; ++ struct cxl_mem_query_commands *query; ++ struct cxl_command_info *cinfo; ++ struct cxl_mbox_eh_eye_cap_read_in *eh_eye_cap_read_in; ++ struct cxl_mbox_eh_eye_cap_read_out *eh_eye_cap_read_out; ++ int rc = 0; ++ ++ cmd = cxl_cmd_new_raw(memdev, CXL_MEM_COMMAND_ID_EH_EYE_CAP_READ_OPCODE); ++ if (!cmd) { ++ fprintf(stderr, "%s: cxl_cmd_new_raw returned Null output\n", ++ cxl_memdev_get_devname(memdev)); ++ return -ENOMEM; ++ } ++ ++ query = cmd->query_cmd; ++ cinfo = &query->commands[cmd->query_idx]; ++ ++ /* update payload size */ ++ cinfo->size_in = CXL_MEM_COMMAND_ID_EH_EYE_CAP_READ_PAYLOAD_IN_SIZE; ++ if (cinfo->size_in > 0) { ++ cmd->input_payload = calloc(1, cinfo->size_in); ++ if (!cmd->input_payload) ++ return -ENOMEM; ++ cmd->send_cmd->in.payload = (u64)cmd->input_payload; ++ cmd->send_cmd->in.size = cinfo->size_in; ++ } ++ ++ eh_eye_cap_read_in = (void *) cmd->send_cmd->in.payload; ++ ++ eh_eye_cap_read_in->lane_id = lane_id; ++ eh_eye_cap_read_in->bin_num = bin_num; ++ rc = cxl_cmd_submit(cmd); ++ if (rc < 0) { ++ fprintf(stderr, "%s: cmd submission failed: %d (%s)\n", ++ cxl_memdev_get_devname(memdev), rc, strerror(-rc)); ++ goto out; ++ } ++ ++ rc = cxl_cmd_get_mbox_status(cmd); ++ if (rc != 0) { ++ fprintf(stderr, "%s: firmware status: %d:\n%s\n", ++ cxl_memdev_get_devname(memdev), rc, DEVICE_ERRORS[rc]); ++ rc = -ENXIO; ++ goto out; ++ } ++ ++ if (cmd->send_cmd->id != CXL_MEM_COMMAND_ID_EH_EYE_CAP_READ) { ++ fprintf(stderr, "%s: invalid command id 0x%x (expecting 0x%x)\n", ++ cxl_memdev_get_devname(memdev), cmd->send_cmd->id, CXL_MEM_COMMAND_ID_EH_EYE_CAP_READ); ++ return -EINVAL; ++ } ++ ++ eh_eye_cap_read_out = (void *)cmd->send_cmd->out.payload; ++ fprintf(stdout, "============================= eh eye capture read ==============================\n"); ++ fprintf(stdout, "Total number of phases in ber_data: %x\n", eh_eye_cap_read_out->num_phase); ++ fprintf(stdout, "Per-phase bit error rates (multiplied by EYE_CAP_ERROR_CNT_MULT): "); ++ /* Procedurally generated print statement. To print this array contiguously, ++ add "contiguous: True" to the YAML param and rerun cligen.py */ ++ for (int i = 0; i < 60; i++) { ++ fprintf(stdout, "ber_data[%d]: %x\n", i, le32_to_cpu(eh_eye_cap_read_out->ber_data[i])); ++ } ++ fprintf(stdout, "\n"); ++ ++out: ++ cxl_cmd_unref(cmd); ++ return rc; ++ return 0; ++} ++ ++ ++#define CXL_MEM_COMMAND_ID_EH_ADAPT_GET CXL_MEM_COMMAND_ID_RAW ++#define CXL_MEM_COMMAND_ID_EH_ADAPT_GET_OPCODE 52227 ++#define CXL_MEM_COMMAND_ID_EH_ADAPT_GET_PAYLOAD_IN_SIZE 4 ++#define CXL_MEM_COMMAND_ID_EH_ADAPT_GET_PAYLOAD_OUT_SIZE 28 ++ ++struct cxl_mbox_eh_adapt_get_in { ++ __le32 lane_id; ++} __attribute__((packed)); ++ ++struct cxl_mbox_eh_adapt_get_out { ++ u8 pga_gain; ++ u8 pga_off2; ++ u8 pga_off1; ++ u8 cdfe_a2; ++ u8 cdfe_a3; ++ u8 cdfe_a4; ++ u8 cdfe_a5; ++ u8 cdfe_a6; ++ u8 cdfe_a7; ++ u8 cdfe_a8; ++ u8 cdfe_a9; ++ u8 cdfe_a10; ++ u8 zobel_a_gain; ++ u8 zobel_b_gain; ++ __le16 zobel_dc_offset; ++ __le16 udfe_thr_0; ++ __le16 udfe_thr_1; ++ __le16 dc_offset; ++ __le16 median_amp; ++ u8 ph_ofs_t; ++ u8 rsvd[3]; ++} __attribute__((packed)); ++ ++CXL_EXPORT int cxl_memdev_eh_adapt_get(struct cxl_memdev *memdev, ++ u32 lane_id) ++{ ++ struct cxl_cmd *cmd; ++ struct cxl_mem_query_commands *query; ++ struct cxl_command_info *cinfo; ++ struct cxl_mbox_eh_adapt_get_in *eh_adapt_get_in; ++ struct cxl_mbox_eh_adapt_get_out *eh_adapt_get_out; ++ int rc = 0; ++ ++ cmd = cxl_cmd_new_raw(memdev, CXL_MEM_COMMAND_ID_EH_ADAPT_GET_OPCODE); ++ if (!cmd) { ++ fprintf(stderr, "%s: cxl_cmd_new_raw returned Null output\n", ++ cxl_memdev_get_devname(memdev)); ++ return -ENOMEM; ++ } ++ ++ query = cmd->query_cmd; ++ cinfo = &query->commands[cmd->query_idx]; ++ ++ /* update payload size */ ++ cinfo->size_in = CXL_MEM_COMMAND_ID_EH_ADAPT_GET_PAYLOAD_IN_SIZE; ++ if (cinfo->size_in > 0) { ++ cmd->input_payload = calloc(1, cinfo->size_in); ++ if (!cmd->input_payload) ++ return -ENOMEM; ++ cmd->send_cmd->in.payload = (u64)cmd->input_payload; ++ cmd->send_cmd->in.size = cinfo->size_in; ++ } ++ ++ eh_adapt_get_in = (void *) cmd->send_cmd->in.payload; ++ ++ eh_adapt_get_in->lane_id = cpu_to_le32(lane_id); ++ rc = cxl_cmd_submit(cmd); ++ if (rc < 0) { ++ fprintf(stderr, "%s: cmd submission failed: %d (%s)\n", ++ cxl_memdev_get_devname(memdev), rc, strerror(-rc)); ++ goto out; ++ } ++ ++ rc = cxl_cmd_get_mbox_status(cmd); ++ if (rc != 0) { ++ fprintf(stderr, "%s: firmware status: %d:\n%s\n", ++ cxl_memdev_get_devname(memdev), rc, DEVICE_ERRORS[rc]); ++ rc = -ENXIO; ++ goto out; ++ } ++ ++ if (cmd->send_cmd->id != CXL_MEM_COMMAND_ID_EH_ADAPT_GET) { ++ fprintf(stderr, "%s: invalid command id 0x%x (expecting 0x%x)\n", ++ cxl_memdev_get_devname(memdev), cmd->send_cmd->id, CXL_MEM_COMMAND_ID_EH_ADAPT_GET); ++ return -EINVAL; ++ } ++ ++ eh_adapt_get_out = (void *)cmd->send_cmd->out.payload; ++ fprintf(stdout, "============================ eh get adaptation data ============================\n"); ++ fprintf(stdout, "contain the current value of the object PGA_GAIN as captured through a write to register bit ADAPT_DSP_RESULTS_CAPTURE_REQ: %x\n", eh_adapt_get_out->pga_gain); ++ fprintf(stdout, "PGA Stage2 DC offset correction: %x\n", eh_adapt_get_out->pga_off2); ++ fprintf(stdout, "PGA Stage1 DC offset correction: %x\n", eh_adapt_get_out->pga_off1); ++ fprintf(stdout, "I_TAP2<7:0> 2's compliment: %x\n", eh_adapt_get_out->cdfe_a2); ++ fprintf(stdout, "I_TAP3<6:0> 2's compliment: %x\n", eh_adapt_get_out->cdfe_a3); ++ fprintf(stdout, "I_TAP4<6:0> 2's compliment: %x\n", eh_adapt_get_out->cdfe_a4); ++ fprintf(stdout, "I_TAP5<6:0> 2's compliment: %x\n", eh_adapt_get_out->cdfe_a5); ++ fprintf(stdout, "I_TAP6<6:0> 2's compliment: %x\n", eh_adapt_get_out->cdfe_a6); ++ fprintf(stdout, "I_TAP7<6:0> 2's compliment: %x\n", eh_adapt_get_out->cdfe_a7); ++ fprintf(stdout, "I_TAP8<6:0> 2's compliment: %x\n", eh_adapt_get_out->cdfe_a8); ++ fprintf(stdout, "I_TAP9<5:0> 2's compliment: %x\n", eh_adapt_get_out->cdfe_a9); ++ fprintf(stdout, "I_TAP10<5:0> 2's compliment: %x\n", eh_adapt_get_out->cdfe_a10); ++ fprintf(stdout, "Zobel a_gain: %x\n", eh_adapt_get_out->zobel_a_gain); ++ fprintf(stdout, "zobel_b_gain: %x\n", eh_adapt_get_out->zobel_b_gain); ++ fprintf(stdout, "Zobel DC offset correction: %x\n", le16_to_cpu(eh_adapt_get_out->zobel_dc_offset)); ++ fprintf(stdout, "contain the current value of the object UDFE_THR_0 as captured through a write to register bit ADAPT_DSP_RESULTS_CAPTURE_REQ.: %x\n", le16_to_cpu(eh_adapt_get_out->udfe_thr_0)); ++ fprintf(stdout, "contain the current value of the object UDFE_THR_1 as captured through a write to register bit ADAPT_DSP_RESULTS_CAPTURE_REQ: %x\n", le16_to_cpu(eh_adapt_get_out->udfe_thr_1)); ++ fprintf(stdout, "contain the current value of the object DC_OFFSET as captured through a write to register bit ADAPT_DSP_RESULTS_CAPTURE_REQ: %x\n", le16_to_cpu(eh_adapt_get_out->dc_offset)); ++ fprintf(stdout, "contain the current value of the object PGA_GAIN as captured through a write to register bit ADAPT_DSP_RESULTS_CAPTURE_REQ: %x\n", le16_to_cpu(eh_adapt_get_out->median_amp)); ++ fprintf(stdout, "contain the current value of the object PH_OFS_T as captured through a write to register bit ADAPT_DSP_RESULTS_CAPTURE_REQ: %x\n", eh_adapt_get_out->ph_ofs_t); ++ ++out: ++ cxl_cmd_unref(cmd); ++ return rc; ++ return 0; ++} ++ ++ ++#define CXL_MEM_COMMAND_ID_EH_ADAPT_ONEOFF CXL_MEM_COMMAND_ID_RAW ++#define CXL_MEM_COMMAND_ID_EH_ADAPT_ONEOFF_OPCODE 52228 ++#define CXL_MEM_COMMAND_ID_EH_ADAPT_ONEOFF_PAYLOAD_IN_SIZE 16 ++ ++struct cxl_mbox_eh_adapt_oneoff_in { ++ __le32 lane_id; ++ __le32 preload; ++ __le32 loops; ++ __le32 objects; ++} __attribute__((packed)); ++ ++ ++CXL_EXPORT int cxl_memdev_eh_adapt_oneoff(struct cxl_memdev *memdev, ++ u32 lane_id, u32 preload, u32 loops, u32 objects) ++{ ++ struct cxl_cmd *cmd; ++ struct cxl_mem_query_commands *query; ++ struct cxl_command_info *cinfo; ++ struct cxl_mbox_eh_adapt_oneoff_in *eh_adapt_oneoff_in; ++ int rc = 0; ++ ++ cmd = cxl_cmd_new_raw(memdev, CXL_MEM_COMMAND_ID_EH_ADAPT_ONEOFF_OPCODE); ++ if (!cmd) { ++ fprintf(stderr, "%s: cxl_cmd_new_raw returned Null output\n", ++ cxl_memdev_get_devname(memdev)); ++ return -ENOMEM; ++ } ++ ++ query = cmd->query_cmd; ++ cinfo = &query->commands[cmd->query_idx]; ++ ++ /* update payload size */ ++ cinfo->size_in = CXL_MEM_COMMAND_ID_EH_ADAPT_ONEOFF_PAYLOAD_IN_SIZE; ++ if (cinfo->size_in > 0) { ++ cmd->input_payload = calloc(1, cinfo->size_in); ++ if (!cmd->input_payload) ++ return -ENOMEM; ++ cmd->send_cmd->in.payload = (u64)cmd->input_payload; ++ cmd->send_cmd->in.size = cinfo->size_in; ++ } ++ ++ eh_adapt_oneoff_in = (void *) cmd->send_cmd->in.payload; ++ ++ eh_adapt_oneoff_in->lane_id = cpu_to_le32(lane_id); ++ eh_adapt_oneoff_in->preload = cpu_to_le32(preload); ++ eh_adapt_oneoff_in->loops = cpu_to_le32(loops); ++ eh_adapt_oneoff_in->objects = cpu_to_le32(objects); ++ rc = cxl_cmd_submit(cmd); ++ if (rc < 0) { ++ fprintf(stderr, "%s: cmd submission failed: %d (%s)\n", ++ cxl_memdev_get_devname(memdev), rc, strerror(-rc)); ++ goto out; ++ } ++ ++ rc = cxl_cmd_get_mbox_status(cmd); ++ if (rc != 0) { ++ fprintf(stderr, "%s: firmware status: %d:\n%s\n", ++ cxl_memdev_get_devname(memdev), rc, DEVICE_ERRORS[rc]); ++ rc = -ENXIO; ++ goto out; ++ } ++ ++ if (cmd->send_cmd->id != CXL_MEM_COMMAND_ID_EH_ADAPT_ONEOFF) { ++ fprintf(stderr, "%s: invalid command id 0x%x (expecting 0x%x)\n", ++ cxl_memdev_get_devname(memdev), cmd->send_cmd->id, CXL_MEM_COMMAND_ID_EH_ADAPT_ONEOFF); ++ return -EINVAL; ++ } ++ ++ ++out: ++ cxl_cmd_unref(cmd); ++ return rc; ++ return 0; ++} ++ ++ ++#define CXL_MEM_COMMAND_ID_EH_ADAPT_FORCE CXL_MEM_COMMAND_ID_RAW ++#define CXL_MEM_COMMAND_ID_EH_ADAPT_FORCE_OPCODE 52229 ++#define CXL_MEM_COMMAND_ID_EH_ADAPT_FORCE_PAYLOAD_IN_SIZE 40 ++ ++struct cxl_mbox_eh_adapt_force_in { ++ __le32 lane_id; ++ __le32 rate; ++ __le32 vdd_bias; ++ __le32 ssc; ++ u8 pga_gain; ++ u8 pga_a0; ++ u8 pga_off; ++ u8 cdfe_a2; ++ u8 cdfe_a3; ++ u8 cdfe_a4; ++ u8 cdfe_a5; ++ u8 cdfe_a6; ++ u8 cdfe_a7; ++ u8 cdfe_a8; ++ u8 cdfe_a9; ++ u8 cdfe_a10; ++ __le16 dc_offset; ++ __le16 zobel_dc_offset; ++ __le16 udfe_thr_0; ++ __le16 udfe_thr_1; ++ __le16 median_amp; ++ u8 zobel_a_gain; ++ u8 ph_ofs_t; ++} __attribute__((packed)); ++ ++ ++CXL_EXPORT int cxl_memdev_eh_adapt_force(struct cxl_memdev *memdev, ++ u32 lane_id, u32 rate, u32 vdd_bias, u32 ssc, u8 pga_gain, u8 pga_a0, ++ u8 pga_off, u8 cdfe_a2, u8 cdfe_a3, u8 cdfe_a4, u8 cdfe_a5, u8 cdfe_a6, ++ u8 cdfe_a7, u8 cdfe_a8, u8 cdfe_a9, u8 cdfe_a10, u16 dc_offset, ++ u16 zobel_dc_offset, u16 udfe_thr_0, u16 udfe_thr_1, u16 median_amp, ++ u8 zobel_a_gain, u8 ph_ofs_t) ++{ ++ struct cxl_cmd *cmd; ++ struct cxl_mem_query_commands *query; ++ struct cxl_command_info *cinfo; ++ struct cxl_mbox_eh_adapt_force_in *eh_adapt_force_in; ++ int rc = 0; ++ ++ cmd = cxl_cmd_new_raw(memdev, CXL_MEM_COMMAND_ID_EH_ADAPT_FORCE_OPCODE); ++ if (!cmd) { ++ fprintf(stderr, "%s: cxl_cmd_new_raw returned Null output\n", ++ cxl_memdev_get_devname(memdev)); ++ return -ENOMEM; ++ } ++ ++ query = cmd->query_cmd; ++ cinfo = &query->commands[cmd->query_idx]; ++ ++ /* update payload size */ ++ cinfo->size_in = CXL_MEM_COMMAND_ID_EH_ADAPT_FORCE_PAYLOAD_IN_SIZE; ++ if (cinfo->size_in > 0) { ++ cmd->input_payload = calloc(1, cinfo->size_in); ++ if (!cmd->input_payload) ++ return -ENOMEM; ++ cmd->send_cmd->in.payload = (u64)cmd->input_payload; ++ cmd->send_cmd->in.size = cinfo->size_in; ++ } ++ ++ eh_adapt_force_in = (void *) cmd->send_cmd->in.payload; ++ ++ eh_adapt_force_in->lane_id = cpu_to_le32(lane_id); ++ eh_adapt_force_in->rate = cpu_to_le32(rate); ++ eh_adapt_force_in->vdd_bias = cpu_to_le32(vdd_bias); ++ eh_adapt_force_in->ssc = cpu_to_le32(ssc); ++ eh_adapt_force_in->pga_gain = pga_gain; ++ eh_adapt_force_in->pga_a0 = pga_a0; ++ eh_adapt_force_in->pga_off = pga_off; ++ eh_adapt_force_in->cdfe_a2 = cdfe_a2; ++ eh_adapt_force_in->cdfe_a3 = cdfe_a3; ++ eh_adapt_force_in->cdfe_a4 = cdfe_a4; ++ eh_adapt_force_in->cdfe_a5 = cdfe_a5; ++ eh_adapt_force_in->cdfe_a6 = cdfe_a6; ++ eh_adapt_force_in->cdfe_a7 = cdfe_a7; ++ eh_adapt_force_in->cdfe_a8 = cdfe_a8; ++ eh_adapt_force_in->cdfe_a9 = cdfe_a9; ++ eh_adapt_force_in->cdfe_a10 = cdfe_a10; ++ eh_adapt_force_in->dc_offset = cpu_to_le16(dc_offset); ++ eh_adapt_force_in->zobel_dc_offset = cpu_to_le16(zobel_dc_offset); ++ eh_adapt_force_in->udfe_thr_0 = cpu_to_le16(udfe_thr_0); ++ eh_adapt_force_in->udfe_thr_1 = cpu_to_le16(udfe_thr_1); ++ eh_adapt_force_in->median_amp = cpu_to_le16(median_amp); ++ eh_adapt_force_in->zobel_a_gain = zobel_a_gain; ++ eh_adapt_force_in->ph_ofs_t = ph_ofs_t; ++ rc = cxl_cmd_submit(cmd); ++ if (rc < 0) { ++ fprintf(stderr, "%s: cmd submission failed: %d (%s)\n", ++ cxl_memdev_get_devname(memdev), rc, strerror(-rc)); ++ goto out; ++ } ++ ++ rc = cxl_cmd_get_mbox_status(cmd); ++ if (rc != 0) { ++ fprintf(stderr, "%s: firmware status: %d:\n%s\n", ++ cxl_memdev_get_devname(memdev), rc, DEVICE_ERRORS[rc]); ++ rc = -ENXIO; ++ goto out; ++ } ++ ++ if (cmd->send_cmd->id != CXL_MEM_COMMAND_ID_EH_ADAPT_FORCE) { ++ fprintf(stderr, "%s: invalid command id 0x%x (expecting 0x%x)\n", ++ cxl_memdev_get_devname(memdev), cmd->send_cmd->id, CXL_MEM_COMMAND_ID_EH_ADAPT_FORCE); ++ return -EINVAL; ++ } ++ ++ ++out: ++ cxl_cmd_unref(cmd); ++ return rc; ++ return 0; ++} ++ ++ ++#define CXL_MEM_COMMAND_ID_HBO_STATUS CXL_MEM_COMMAND_ID_RAW ++#define CXL_MEM_COMMAND_ID_HBO_STATUS_OPCODE 52480 ++#define CXL_MEM_COMMAND_ID_HBO_STATUS_PAYLOAD_OUT_SIZE 8 ++ ++ ++struct cxl_mbox_hbo_status_out { ++ __le64 bo_status; ++} __attribute__((packed)); ++ ++struct hbo_status_fields { ++ u16 opcode; ++ u8 percent_complete; ++ u8 is_running; ++ u16 return_code; ++ u16 extended_status; ++}; ++ ++CXL_EXPORT int cxl_memdev_hbo_status(struct cxl_memdev *memdev, u8 print_output) ++{ ++ struct cxl_cmd *cmd; ++ struct cxl_mbox_hbo_status_out *hbo_status_out; ++ struct hbo_status_fields status_fields; ++ u8 opcode_shift = 0; ++ u8 percent_shift = 16; ++ u8 running_shift = 23; ++ u8 retcode_shift = 32; ++ u8 extended_shift = 48; ++ u64 opcode_mask = (1 << percent_shift) - (1 << opcode_shift); // 0-15 ++ u64 percent_mask = (1 << running_shift) - (1 << percent_shift); // 16-22 ++ u64 running_mask = (1 << running_shift); // 23 ++ u64 retcode_mask = (1 << extended_shift) - (1 << retcode_shift); // 32-47 ++ u64 extended_mask = 0xffffffffffffffff - (1 << extended_shift) + 1; // 48-63 ++ int rc = 0; ++ ++ cmd = cxl_cmd_new_raw(memdev, CXL_MEM_COMMAND_ID_HBO_STATUS_OPCODE); ++ if (!cmd) { ++ fprintf(stderr, "%s: cxl_cmd_new_raw returned Null output\n", ++ cxl_memdev_get_devname(memdev)); ++ return -ENOMEM; ++ } ++ ++ rc = cxl_cmd_submit(cmd); ++ if (rc < 0) { ++ fprintf(stderr, "%s: cmd submission failed: %d (%s)\n", ++ cxl_memdev_get_devname(memdev), rc, strerror(-rc)); ++ goto out; ++ } ++ ++ rc = cxl_cmd_get_mbox_status(cmd); ++ if (rc != 0) { ++ fprintf(stderr, "%s: firmware status: %d:\n%s\n", ++ cxl_memdev_get_devname(memdev), rc, DEVICE_ERRORS[rc]); ++ rc = -ENXIO; ++ goto out; ++ } ++ ++ if (cmd->send_cmd->id != CXL_MEM_COMMAND_ID_HBO_STATUS) { ++ fprintf(stderr, "%s: invalid command id 0x%x (expecting 0x%x)\n", ++ cxl_memdev_get_devname(memdev), cmd->send_cmd->id, CXL_MEM_COMMAND_ID_HBO_STATUS); ++ return -EINVAL; ++ } ++ ++ hbo_status_out = (void *)cmd->send_cmd->out.payload; ++ status_fields.opcode = (hbo_status_out->bo_status & opcode_mask) >> opcode_shift; ++ status_fields.percent_complete = (hbo_status_out->bo_status & percent_mask) >> percent_shift; ++ status_fields.is_running = (hbo_status_out->bo_status & running_mask) >> running_shift; ++ status_fields.return_code = (hbo_status_out->bo_status & retcode_mask) >> retcode_shift; ++ status_fields.extended_status = (hbo_status_out->bo_status & extended_mask) >> extended_shift; ++ if (print_output) ++ { ++ fprintf(stdout, "=============================== hidden bo status ===============================\n"); ++ fprintf(stdout, "BO status: %08lx\n", le64_to_cpu(hbo_status_out->bo_status)); ++ fprintf(stdout, " - Opcode: %x\n", status_fields.opcode); ++ fprintf(stdout, " - Percent complete: %d\n", status_fields.percent_complete); ++ fprintf(stdout, " - Is running: %d\n", status_fields.is_running); ++ fprintf(stdout, " - Return code: %d\n", status_fields.return_code); ++ fprintf(stdout, " - Extended status: %x\n", status_fields.extended_status); ++ } ++ ++ if (status_fields.is_running) { ++ rc = 1; ++ } ++out: ++ cxl_cmd_unref(cmd); ++ return rc; ++ return 0; ++} ++ ++ ++#define CXL_MEM_COMMAND_ID_HBO_TRANSFER_FW CXL_MEM_COMMAND_ID_RAW ++#define CXL_MEM_COMMAND_ID_HBO_TRANSFER_FW_OPCODE 52481 ++ ++ ++ ++CXL_EXPORT int cxl_memdev_hbo_transfer_fw(struct cxl_memdev *memdev) ++{ ++ struct cxl_cmd *cmd; ++ int rc = 0; ++ ++ cmd = cxl_cmd_new_raw(memdev, CXL_MEM_COMMAND_ID_HBO_TRANSFER_FW_OPCODE); ++ if (!cmd) { ++ fprintf(stderr, "%s: cxl_cmd_new_raw returned Null output\n", ++ cxl_memdev_get_devname(memdev)); ++ return -ENOMEM; ++ } ++ ++ rc = cxl_cmd_submit(cmd); ++ if (rc < 0) { ++ fprintf(stderr, "%s: cmd submission failed: %d (%s)\n", ++ cxl_memdev_get_devname(memdev), rc, strerror(-rc)); ++ goto out; ++ } ++ ++ rc = cxl_cmd_get_mbox_status(cmd); ++ if (rc != 0) { ++ fprintf(stderr, "%s: firmware status: %d:\n%s\n", ++ cxl_memdev_get_devname(memdev), rc, DEVICE_ERRORS[rc]); ++ rc = -ENXIO; ++ goto out; ++ } ++ ++ if (cmd->send_cmd->id != CXL_MEM_COMMAND_ID_HBO_TRANSFER_FW) { ++ fprintf(stderr, "%s: invalid command id 0x%x (expecting 0x%x)\n", ++ cxl_memdev_get_devname(memdev), cmd->send_cmd->id, CXL_MEM_COMMAND_ID_HBO_TRANSFER_FW); ++ return -EINVAL; ++ } ++ ++ ++out: ++ cxl_cmd_unref(cmd); ++ return rc; ++ return 0; ++} ++ ++ ++#define CXL_MEM_COMMAND_ID_HBO_ACTIVATE_FW CXL_MEM_COMMAND_ID_RAW ++#define CXL_MEM_COMMAND_ID_HBO_ACTIVATE_FW_OPCODE 52482 ++ ++ ++ ++CXL_EXPORT int cxl_memdev_hbo_activate_fw(struct cxl_memdev *memdev) ++{ ++ struct cxl_cmd *cmd; ++ int rc = 0; ++ ++ cmd = cxl_cmd_new_raw(memdev, CXL_MEM_COMMAND_ID_HBO_ACTIVATE_FW_OPCODE); ++ if (!cmd) { ++ fprintf(stderr, "%s: cxl_cmd_new_raw returned Null output\n", ++ cxl_memdev_get_devname(memdev)); ++ return -ENOMEM; ++ } ++ ++ rc = cxl_cmd_submit(cmd); ++ if (rc < 0) { ++ fprintf(stderr, "%s: cmd submission failed: %d (%s)\n", ++ cxl_memdev_get_devname(memdev), rc, strerror(-rc)); ++ goto out; ++ } ++ ++ rc = cxl_cmd_get_mbox_status(cmd); ++ if (rc != 0) { ++ fprintf(stderr, "%s: firmware status: %d:\n%s\n", ++ cxl_memdev_get_devname(memdev), rc, DEVICE_ERRORS[rc]); ++ rc = -ENXIO; ++ goto out; ++ } ++ ++ if (cmd->send_cmd->id != CXL_MEM_COMMAND_ID_HBO_ACTIVATE_FW) { ++ fprintf(stderr, "%s: invalid command id 0x%x (expecting 0x%x)\n", ++ cxl_memdev_get_devname(memdev), cmd->send_cmd->id, CXL_MEM_COMMAND_ID_HBO_ACTIVATE_FW); ++ return -EINVAL; ++ } ++ ++ ++out: ++ cxl_cmd_unref(cmd); ++ return rc; ++ return 0; ++} ++ ++ ++#define CXL_MEM_COMMAND_ID_HEALTH_COUNTERS_CLEAR CXL_MEM_COMMAND_ID_RAW ++#define CXL_MEM_COMMAND_ID_HEALTH_COUNTERS_CLEAR_OPCODE 52736 ++#define CXL_MEM_COMMAND_ID_HEALTH_COUNTERS_CLEAR_PAYLOAD_IN_SIZE 4 ++ ++struct cxl_mbox_health_counters_clear_in { ++ __le32 bitmask; ++} __attribute__((packed)); ++ ++ ++CXL_EXPORT int cxl_memdev_health_counters_clear(struct cxl_memdev *memdev, ++ u32 bitmask) ++{ ++ struct cxl_cmd *cmd; ++ struct cxl_mem_query_commands *query; ++ struct cxl_command_info *cinfo; ++ struct cxl_mbox_health_counters_clear_in *health_counters_clear_in; ++ int rc = 0; ++ ++ cmd = cxl_cmd_new_raw(memdev, CXL_MEM_COMMAND_ID_HEALTH_COUNTERS_CLEAR_OPCODE); ++ if (!cmd) { ++ fprintf(stderr, "%s: cxl_cmd_new_raw returned Null output\n", ++ cxl_memdev_get_devname(memdev)); ++ return -ENOMEM; ++ } ++ ++ query = cmd->query_cmd; ++ cinfo = &query->commands[cmd->query_idx]; ++ ++ /* update payload size */ ++ cinfo->size_in = CXL_MEM_COMMAND_ID_HEALTH_COUNTERS_CLEAR_PAYLOAD_IN_SIZE; ++ if (cinfo->size_in > 0) { ++ cmd->input_payload = calloc(1, cinfo->size_in); ++ if (!cmd->input_payload) ++ return -ENOMEM; ++ cmd->send_cmd->in.payload = (u64)cmd->input_payload; ++ cmd->send_cmd->in.size = cinfo->size_in; ++ } ++ ++ health_counters_clear_in = (void *) cmd->send_cmd->in.payload; ++ ++ health_counters_clear_in->bitmask = cpu_to_le32(bitmask); ++ rc = cxl_cmd_submit(cmd); ++ if (rc < 0) { ++ fprintf(stderr, "%s: cmd submission failed: %d (%s)\n", ++ cxl_memdev_get_devname(memdev), rc, strerror(-rc)); ++ goto out; ++ } ++ ++ rc = cxl_cmd_get_mbox_status(cmd); ++ if (rc != 0) { ++ fprintf(stderr, "%s: firmware status: %d:\n%s\n", ++ cxl_memdev_get_devname(memdev), rc, DEVICE_ERRORS[rc]); ++ rc = -ENXIO; ++ goto out; ++ } ++ ++ if (cmd->send_cmd->id != CXL_MEM_COMMAND_ID_HEALTH_COUNTERS_CLEAR) { ++ fprintf(stderr, "%s: invalid command id 0x%x (expecting 0x%x)\n", ++ cxl_memdev_get_devname(memdev), cmd->send_cmd->id, CXL_MEM_COMMAND_ID_HEALTH_COUNTERS_CLEAR); ++ return -EINVAL; ++ } ++ ++ ++out: ++ cxl_cmd_unref(cmd); ++ return rc; ++ return 0; ++} ++ ++ ++#define CXL_MEM_COMMAND_ID_HEALTH_COUNTERS_GET CXL_MEM_COMMAND_ID_RAW ++#define CXL_MEM_COMMAND_ID_HEALTH_COUNTERS_GET_OPCODE 52737 ++#define CXL_MEM_COMMAND_ID_HEALTH_COUNTERS_GET_PAYLOAD_OUT_SIZE 40 ++ ++ ++struct cxl_mbox_health_counters_get_out { ++ __le32 temperature_threshold_exceeded; ++ __le32 power_on_events; ++ __le32 power_on_hours; ++ __le32 cxl_mem_link_crc_errors; ++ __le32 cxl_io_link_lcrc_errors; ++ __le32 cxl_io_link_ecrc_errors; ++ __le32 num_ddr_single_ecc_errors; ++ __le32 num_ddr_double_ecc_errors; ++ __le32 link_recovery_events; ++ __le32 time_in_throttled; ++} __attribute__((packed)); ++ ++CXL_EXPORT int cxl_memdev_health_counters_get(struct cxl_memdev *memdev) ++{ ++ struct cxl_cmd *cmd; ++ struct cxl_mbox_health_counters_get_out *health_counters_get_out; ++ int rc = 0; ++ ++ cmd = cxl_cmd_new_raw(memdev, CXL_MEM_COMMAND_ID_HEALTH_COUNTERS_GET_OPCODE); ++ if (!cmd) { ++ fprintf(stderr, "%s: cxl_cmd_new_raw returned Null output\n", ++ cxl_memdev_get_devname(memdev)); ++ return -ENOMEM; ++ } ++ ++ rc = cxl_cmd_submit(cmd); ++ if (rc < 0) { ++ fprintf(stderr, "%s: cmd submission failed: %d (%s)\n", ++ cxl_memdev_get_devname(memdev), rc, strerror(-rc)); ++ goto out; ++ } ++ ++ rc = cxl_cmd_get_mbox_status(cmd); ++ if (rc != 0) { ++ fprintf(stderr, "%s: firmware status: %d:\n%s\n", ++ cxl_memdev_get_devname(memdev), rc, DEVICE_ERRORS[rc]); ++ rc = -ENXIO; ++ goto out; ++ } ++ ++ if (cmd->send_cmd->id != CXL_MEM_COMMAND_ID_HEALTH_COUNTERS_GET) { ++ fprintf(stderr, "%s: invalid command id 0x%x (expecting 0x%x)\n", ++ cxl_memdev_get_devname(memdev), cmd->send_cmd->id, CXL_MEM_COMMAND_ID_HEALTH_COUNTERS_GET); ++ return -EINVAL; ++ } ++ ++ health_counters_get_out = (void *)cmd->send_cmd->out.payload; ++ fprintf(stdout, "============================= get health counters ==============================\n"); ++ fprintf(stdout, "Number of times temperature has exceeded threshold: %d\n", le32_to_cpu(health_counters_get_out->temperature_threshold_exceeded)); ++ fprintf(stdout, "Number of Power On events: %d\n", le32_to_cpu(health_counters_get_out->power_on_events)); ++ fprintf(stdout, "Number of Power On hours: %d\n", le32_to_cpu(health_counters_get_out->power_on_hours)); ++ fprintf(stdout, "Number of CXL.mem Link CRC errors: %d\n", le32_to_cpu(health_counters_get_out->cxl_mem_link_crc_errors)); ++ fprintf(stdout, "Number of CXL.io Link LCRC errors: %d\n", le32_to_cpu(health_counters_get_out->cxl_io_link_lcrc_errors)); ++ fprintf(stdout, "Number of CXL.io Link ECRC errors: %d\n", le32_to_cpu(health_counters_get_out->cxl_io_link_ecrc_errors)); ++ fprintf(stdout, "Number of DDR single ECC errors: %d\n", le32_to_cpu(health_counters_get_out->num_ddr_single_ecc_errors)); ++ fprintf(stdout, "Number of DDR double ECC errors: %d\n", le32_to_cpu(health_counters_get_out->num_ddr_double_ecc_errors)); ++ fprintf(stdout, "Number of Link recovery events: %d\n", le32_to_cpu(health_counters_get_out->link_recovery_events)); ++ fprintf(stdout, "Amount of time spent in throttled state (in seconds): %d\n", le32_to_cpu(health_counters_get_out->time_in_throttled)); ++ ++out: ++ cxl_cmd_unref(cmd); ++ return rc; ++ return 0; ++} ++ ++#define CXL_MEM_COMMAND_ID_HCT_GET_PLAT_PARAMS CXL_MEM_COMMAND_ID_RAW ++#define CXL_MEM_COMMAND_ID_HCT_GET_PLAT_PARAMS_OPCODE 0xC600 ++#define CXL_MEM_COMMAND_ID_HCT_GET_PLAT_PARAMS_OUT_SIZE 8 // varies ++ ++ ++struct cxl_mbox_hct_get_plat_param_out { ++ u8 num_inst; ++ u8* type; ++} __attribute__((packed)); ++ ++const char *TRACE_BUF_INST_TYPE[2] = { ++ "FLIT", ++ "HIF", ++}; ++ ++CXL_EXPORT int cxl_memdev_hct_get_plat_param(struct cxl_memdev *memdev) ++{ ++ u8 *out_ptr; ++ struct cxl_cmd *cmd; ++ struct cxl_mbox_hct_get_plat_param_out *hct_get_plat_param_out; ++ int rc = 0; ++ ++ cmd = cxl_cmd_new_raw(memdev, CXL_MEM_COMMAND_ID_HCT_GET_PLAT_PARAMS_OPCODE); ++ if (!cmd) { ++ fprintf(stderr, "%s: cxl_cmd_new_raw returned Null output\n", ++ cxl_memdev_get_devname(memdev)); ++ return -ENOMEM; ++ } ++ ++ rc = cxl_cmd_submit(cmd); ++ if (rc < 0) { ++ fprintf(stderr, "%s: cmd submission failed: %d (%s)\n", ++ cxl_memdev_get_devname(memdev), rc, strerror(-rc)); ++ goto out; ++ } ++ ++ rc = cxl_cmd_get_mbox_status(cmd); ++ if (rc != 0) { ++ fprintf(stderr, "%s: firmware status: %d:\n%s\n", ++ cxl_memdev_get_devname(memdev), rc, DEVICE_ERRORS[rc]); ++ rc = -ENXIO; ++ goto out; ++ } ++ ++ if (cmd->send_cmd->id != CXL_MEM_COMMAND_ID_HCT_GET_PLAT_PARAMS) { ++ fprintf(stderr, "%s: invalid command id 0x%x (expecting 0x%x)\n", ++ cxl_memdev_get_devname(memdev), cmd->send_cmd->id, CXL_MEM_COMMAND_ID_HCT_GET_PLAT_PARAMS); ++ return -EINVAL; ++ } ++ ++ hct_get_plat_param_out = (void *)cmd->send_cmd->out.payload; ++ fprintf(stdout, "=============================== Get HIF/CXL Trace Buffer Platform Parameters ===============================\n"); ++ fprintf(stdout, "Number of trace buffer instances: %u\n", hct_get_plat_param_out->num_inst); ++ out_ptr = (u8*) cmd->send_cmd->out.payload; ++ for (int i = 1; i < cmd->send_cmd->out.size; i++) { ++ printf("Instance: %d type %02x %s\n", i, out_ptr[i], TRACE_BUF_INST_TYPE[out_ptr[i]]); ++ } ++ ++out: ++ cxl_cmd_unref(cmd); ++ return rc; ++} ++ ++#define CXL_MEM_COMMAND_ID_ERR_INJ_HIF_POISON CXL_MEM_COMMAND_ID_RAW ++#define CXL_MEM_COMMAND_ID_ERR_INJ_HIF_POISON_OPCODE 0XCB00 ++#define CXL_MEM_COMMAND_ID_ERR_INJ_HIF_POISON_PAYLOAD_IN_SIZE 9 ++#define HIF_POISON_ADDRESS_SIZE 5 ++ ++ ++struct cxl_mbox_err_inj_hif_poison_in { ++ u8 ch_id; ++ u8 duration; ++ u8 inj_mode; ++ u8 rsvd; ++ char *address[HIF_POISON_ADDRESS_SIZE]; ++} __attribute__((packed)); ++ ++CXL_EXPORT int cxl_memdev_err_inj_hif_poison(struct cxl_memdev *memdev, ++ u8 ch_id, u8 duration, u8 inj_mode, u64 address) ++{ ++ struct cxl_cmd *cmd; ++ struct cxl_mem_query_commands *query; ++ struct cxl_command_info *cinfo; ++ struct cxl_mbox_err_inj_hif_poison_in *err_inj_hif_poison_in; ++ int rc = 0; ++ ++ __le64 leaddress; ++ leaddress = cpu_to_le64(address); ++ ++ cmd = cxl_cmd_new_raw(memdev, ++ CXL_MEM_COMMAND_ID_ERR_INJ_HIF_POISON_OPCODE); ++ if(!cmd) { ++ fprintf(stderr, "%s: cxl_cmd_new_raw returned Null output\n", ++ cxl_memdev_get_devname(memdev)); ++ return -ENOMEM; ++ } ++ ++ query = cmd->query_cmd; ++ cinfo = &query->commands[cmd->query_idx]; ++ ++ cinfo->size_in = CXL_MEM_COMMAND_ID_ERR_INJ_HIF_POISON_PAYLOAD_IN_SIZE; ++ if (cinfo->size_in > 0) { ++ cmd->input_payload = calloc(1, cinfo->size_in); ++ if (!cmd->input_payload) ++ return -ENOMEM; ++ cmd->send_cmd->in.payload = (u64)cmd->input_payload; ++ cmd->send_cmd->in.size = cinfo->size_in; ++ } ++ ++ err_inj_hif_poison_in = (void *) cmd->send_cmd->in.payload; ++ err_inj_hif_poison_in->ch_id = ch_id; ++ err_inj_hif_poison_in->duration = duration; ++ err_inj_hif_poison_in->inj_mode = inj_mode; ++ memcpy(err_inj_hif_poison_in->address, &leaddress, HIF_POISON_ADDRESS_SIZE); ++ rc = cxl_cmd_submit(cmd); ++ if (rc < 0) { ++ fprintf(stderr, "%s: cmd submission failed: %d (%s)\n", ++ cxl_memdev_get_devname(memdev), rc, strerror(-rc)); ++ goto out; ++ } ++ rc = cxl_cmd_get_mbox_status(cmd); ++ if (rc != 0) { ++ fprintf(stderr, "%s: firmware status: %d:\n%s\n", ++ cxl_memdev_get_devname(memdev), rc, DEVICE_ERRORS[rc]); ++ rc = -ENXIO; ++ goto out; ++ } ++ if (cmd->send_cmd->id != CXL_MEM_COMMAND_ID_ERR_INJ_HIF_POISON) { ++ fprintf(stderr, "%s: invalid command id 0x%x (expecting 0x%x)\n", ++ cxl_memdev_get_devname(memdev), cmd->send_cmd->id, ++ CXL_MEM_COMMAND_ID_ERR_INJ_HIF_POISON); ++ return -EINVAL; ++ } ++ fprintf(stdout, "command completed successfully\n"); ++out: ++ cxl_cmd_unref(cmd); ++ return rc; ++ return 0; ++} ++ ++#define CXL_MEM_COMMAND_ID_ERR_INJ_HIF_ECC CXL_MEM_COMMAND_ID_RAW ++#define CXL_MEM_COMMAND_ID_ERR_INJ_HIF_ECC_OPCODE 0XCB01 ++#define CXL_MEM_COMMAND_ID_ERR_INJ_HIF_ECC_PAYLOAD_IN_SIZE 9 ++#define HIF_ECC_ADDRESS_SIZE 5 ++ ++ ++struct cxl_mbox_err_inj_hif_ecc_in { ++ u8 ch_id; ++ u8 duration; ++ u8 inj_mode; ++ u8 rsvd; ++ char *address[HIF_ECC_ADDRESS_SIZE]; ++} __attribute__((packed)); ++ ++CXL_EXPORT int cxl_memdev_err_inj_hif_ecc(struct cxl_memdev *memdev, ++ u8 ch_id, u8 duration, u8 inj_mode, u64 address) ++{ ++ struct cxl_cmd *cmd; ++ struct cxl_mem_query_commands *query; ++ struct cxl_command_info *cinfo; ++ struct cxl_mbox_err_inj_hif_ecc_in *err_inj_hif_ecc_in; ++ int rc = 0; ++ ++ __le64 leaddress; ++ leaddress = cpu_to_le64(address); ++ ++ cmd = cxl_cmd_new_raw(memdev, ++ CXL_MEM_COMMAND_ID_ERR_INJ_HIF_ECC_OPCODE); ++ if(!cmd) { ++ fprintf(stderr, "%s: cxl_cmd_new_raw returned Null output\n", ++ cxl_memdev_get_devname(memdev)); ++ return -ENOMEM; ++ } ++ ++ query = cmd->query_cmd; ++ cinfo = &query->commands[cmd->query_idx]; ++ ++ cinfo->size_in = CXL_MEM_COMMAND_ID_ERR_INJ_HIF_ECC_PAYLOAD_IN_SIZE; ++ if (cinfo->size_in > 0) { ++ cmd->input_payload = calloc(1, cinfo->size_in); ++ if (!cmd->input_payload) ++ return -ENOMEM; ++ cmd->send_cmd->in.payload = (u64)cmd->input_payload; ++ cmd->send_cmd->in.size = cinfo->size_in; ++ } ++ ++ err_inj_hif_ecc_in = (void *) cmd->send_cmd->in.payload; ++ err_inj_hif_ecc_in->ch_id = ch_id; ++ err_inj_hif_ecc_in->duration = duration; ++ err_inj_hif_ecc_in->inj_mode = inj_mode; ++ memcpy(err_inj_hif_ecc_in->address, &leaddress, HIF_ECC_ADDRESS_SIZE); ++ rc = cxl_cmd_submit(cmd); ++ if (rc < 0) { ++ fprintf(stderr, "%s: cmd submission failed: %d (%s)\n", ++ cxl_memdev_get_devname(memdev), rc, strerror(-rc)); ++ goto out; ++ } ++ rc = cxl_cmd_get_mbox_status(cmd); ++ if (rc != 0) { ++ fprintf(stderr, "%s: firmware status: %d:\n%s\n", ++ cxl_memdev_get_devname(memdev), rc, DEVICE_ERRORS[rc]); ++ rc = -ENXIO; ++ goto out; ++ } ++ if (cmd->send_cmd->id != CXL_MEM_COMMAND_ID_ERR_INJ_HIF_ECC) { ++ fprintf(stderr, "%s: invalid command id 0x%x (expecting 0x%x)\n", ++ cxl_memdev_get_devname(memdev), cmd->send_cmd->id, ++ CXL_MEM_COMMAND_ID_ERR_INJ_HIF_ECC); ++ return -EINVAL; ++ } ++ fprintf(stdout, "command completed successfully\n"); ++out: ++ cxl_cmd_unref(cmd); ++ return rc; ++ return 0; ++} ++ ++#define CXL_MEM_COMMAND_ID_PERFCNT_DDR_GENERIC_CAPTURE CXL_MEM_COMMAND_ID_RAW ++#define CXL_MEM_COMMAND_ID_PERFCNT_DDR_GENERIC_CAPTURE_OPCODE 0XCA11 ++#define CXL_MEM_COMMAND_ID_PERFCNT_DDR_GENERIC_CAPTURE_PAYLOAD_IN_SIZE 8 ++#define CXL_MEM_COMMAND_ID_PERFCNT_DDR_GENERIC_CAPTURE_PAYLOAD_OUT_SIZE 32 ++ ++struct cxl_mbox_perfcnt_ddr_generic_capture_in { ++ u8 ddr_id; ++ u8 rsvd[3]; ++ __le32 poll_period_ms; ++} __attribute__((packed)); ++ ++struct cxl_mbox_perfcnt_ddr_generic_capture_out { ++ __le32 result[32]; ++} __attribute__((packed)); ++ ++CXL_EXPORT int cxl_memdev_perfcnt_ddr_generic_capture(struct cxl_memdev *memdev, ++ u8 ddr_id, u32 poll_period_ms) ++{ ++ struct cxl_cmd *cmd; ++ struct cxl_mem_query_commands *query; ++ struct cxl_command_info *cinfo; ++ struct cxl_mbox_perfcnt_ddr_generic_capture_in *perfcnt_ddr_generic_capture_in; ++ struct cxl_mbox_perfcnt_ddr_generic_capture_out *perfcnt_ddr_generic_capture_out; ++ int rc = 0; ++ ++ cmd = cxl_cmd_new_raw(memdev, ++ CXL_MEM_COMMAND_ID_GET_EVENT_INTERRUPT_POLICY_OPCODE); ++ if (!cmd) { ++ fprintf(stderr, "%s: cxl_cmd_new_raw returned Null output\n", ++ cxl_memdev_get_devname(memdev)); ++ return -ENOMEM; ++ } ++ ++ query = cmd->query_cmd; ++ cinfo = &query->commands[cmd->query_idx]; ++ cinfo->size_in = CXL_MEM_COMMAND_ID_PERFCNT_DDR_GENERIC_CAPTURE_PAYLOAD_IN_SIZE; ++ if (cinfo->size_in > 0) { ++ cmd->input_payload = calloc(1, cinfo->size_in); ++ if (!cmd->input_payload) ++ return -ENOMEM; ++ cmd->send_cmd->in.payload = (u64)cmd->input_payload; ++ cmd->send_cmd->in.size = cinfo->size_in; ++ } ++ ++ perfcnt_ddr_generic_capture_in = (void *) cmd->send_cmd->in.payload; ++ perfcnt_ddr_generic_capture_in->ddr_id = ddr_id; ++ perfcnt_ddr_generic_capture_in->poll_period_ms = cpu_to_le32(poll_period_ms); ++ ++ rc = cxl_cmd_submit(cmd); ++ if (rc < 0) { ++ fprintf(stderr, "%s: cmd submission failed: %d (%s)\n", ++ cxl_memdev_get_devname(memdev), rc, strerror(-rc)); ++ goto out; ++ } ++ rc = cxl_cmd_get_mbox_status(cmd); ++ if (rc != 0) { ++ fprintf(stderr, "%s: firmware status: %d\n", ++ cxl_memdev_get_devname(memdev), rc); ++ rc = -ENXIO; ++ goto out; ++ } ++ if (cmd->send_cmd->id != CXL_MEM_COMMAND_ID_PERFCNT_DDR_GENERIC_CAPTURE) { ++ fprintf(stderr, "%s: invalid command id 0x%x (expecting 0x%x)\n", ++ cxl_memdev_get_devname(memdev), cmd->send_cmd->id, ++ CXL_MEM_COMMAND_ID_PERFCNT_DDR_GENERIC_CAPTURE); ++ return -EINVAL; ++ } ++ fprintf(stdout, "command completed successfully\n"); ++ ++ perfcnt_ddr_generic_capture_out = (void *)cmd->send_cmd->out.payload; ++ fprintf(stdout, "=========================== PERFCNT DDR Generic Capture ============================\n"); ++ fprintf(stdout, "Generic Counter Readings:\n"); ++ for(int i=0; i<8; i++) { ++ fprintf(stdout, "%x\n", le32_to_cpu(perfcnt_ddr_generic_capture_out->result[i])); ++ } ++out: ++ cxl_cmd_unref(cmd); ++ return rc; ++ return 0; ++} ++ ++#define CXL_MEM_COMMAND_ID_PERFCNT_DDR_DFI_CAPTURE CXL_MEM_COMMAND_ID_RAW ++#define CXL_MEM_COMMAND_ID_PERFCNT_DDR_DFI_CAPTURE_OPCODE 0XCA12 ++#define CXL_MEM_COMMAND_ID_PERFCNT_DDR_DFI_CAPTURE_PAYLOAD_IN_SIZE 8 ++#define CXL_MEM_COMMAND_ID_PERFCNT_DDR_DFI_CAPTURE_PAYLOAD_OUT_SIZE 24 ++ ++struct cxl_mbox_perfcnt_ddr_dfi_capture_in { ++ u8 ddr_id; ++ u8 rsvd[3]; ++ __le32 poll_period_ms; ++} __attribute__((packed)); ++ ++struct cxl_mbox_perfcnt_ddr_dfi_capture_out { ++ __le32 dfi_counter17; ++ __le32 dfi_counter20; ++ __le32 dfi_counter21; ++ __le32 dfi_ch1_counter17; ++ __le32 dfi_ch1_counter20; ++ __le32 dfi_ch1_counter21; ++} __attribute__((packed)); ++ ++CXL_EXPORT int cxl_memdev_perfcnt_ddr_dfi_capture(struct cxl_memdev *memdev, ++ u8 ddr_id, u32 poll_period_ms) ++{ ++ struct cxl_cmd *cmd; ++ struct cxl_mem_query_commands *query; ++ struct cxl_command_info *cinfo; ++ struct cxl_mbox_perfcnt_ddr_dfi_capture_in *perfcnt_ddr_dfi_capture_in; ++ struct cxl_mbox_perfcnt_ddr_dfi_capture_out *perfcnt_ddr_dfi_capture_out; ++ int rc = 0; ++ ++ cmd = cxl_cmd_new_raw(memdev, ++ CXL_MEM_COMMAND_ID_GET_EVENT_INTERRUPT_POLICY_OPCODE); ++ if (!cmd) { ++ fprintf(stderr, "%s: cxl_cmd_new_raw returned Null output\n", ++ cxl_memdev_get_devname(memdev)); ++ return -ENOMEM; ++ } ++ ++ query = cmd->query_cmd; ++ cinfo = &query->commands[cmd->query_idx]; ++ cinfo->size_in = CXL_MEM_COMMAND_ID_PERFCNT_DDR_DFI_CAPTURE_PAYLOAD_IN_SIZE; ++ if (cinfo->size_in > 0) { ++ cmd->input_payload = calloc(1, cinfo->size_in); ++ if (!cmd->input_payload) ++ return -ENOMEM; ++ cmd->send_cmd->in.payload = (u64)cmd->input_payload; ++ cmd->send_cmd->in.size = cinfo->size_in; ++ } ++ ++ perfcnt_ddr_dfi_capture_in = (void *) cmd->send_cmd->in.payload; ++ perfcnt_ddr_dfi_capture_in->ddr_id = ddr_id; ++ perfcnt_ddr_dfi_capture_in->poll_period_ms = cpu_to_le32(poll_period_ms); ++ ++ rc = cxl_cmd_submit(cmd); ++ if (rc < 0) { ++ fprintf(stderr, "%s: cmd submission failed: %d (%s)\n", ++ cxl_memdev_get_devname(memdev), rc, strerror(-rc)); ++ goto out; ++ } ++ rc = cxl_cmd_get_mbox_status(cmd); ++ if (rc != 0) { ++ fprintf(stderr, "%s: firmware status: %d\n", ++ cxl_memdev_get_devname(memdev), rc); ++ rc = -ENXIO; ++ goto out; ++ } ++ if (cmd->send_cmd->id != CXL_MEM_COMMAND_ID_PERFCNT_DDR_DFI_CAPTURE) { ++ fprintf(stderr, "%s: invalid command id 0x%x (expecting 0x%x)\n", ++ cxl_memdev_get_devname(memdev), cmd->send_cmd->id, ++ CXL_MEM_COMMAND_ID_PERFCNT_DDR_DFI_CAPTURE); ++ return -EINVAL; ++ } ++ fprintf(stdout, "command completed successfully\n"); ++ perfcnt_ddr_dfi_capture_out = (void *)cmd->send_cmd->out.payload; ++ fprintf(stdout, "=========================== PERFCNT DDR DFI Capture ============================\n"); ++ fprintf(stdout, "DFI Counter Readings:\n"); ++ fprintf(stdout, "DFI Counter 17: %x\n", le32_to_cpu(perfcnt_ddr_dfi_capture_out->dfi_counter17)); ++ fprintf(stdout, "DFI Counter 20: %x\n", le32_to_cpu(perfcnt_ddr_dfi_capture_out->dfi_counter20)); ++ fprintf(stdout, "DFI Counter 21: %x\n", le32_to_cpu(perfcnt_ddr_dfi_capture_out->dfi_counter21)); ++ fprintf(stdout, "DFI CH1 Counter 17: %x\n", le32_to_cpu(perfcnt_ddr_dfi_capture_out->dfi_ch1_counter17)); ++ fprintf(stdout, "DFI CH1 Counter 20: %x\n", le32_to_cpu(perfcnt_ddr_dfi_capture_out->dfi_ch1_counter20)); ++ fprintf(stdout, "DFI CH1 Counter 21: %x\n", le32_to_cpu(perfcnt_ddr_dfi_capture_out->dfi_ch1_counter21)); ++out: ++ cxl_cmd_unref(cmd); ++ return rc; ++ return 0; ++} ++ ++#define CXL_MEM_COMMAND_ID_EH_EYE_CAP_TIMEOUT_ENABLE CXL_MEM_COMMAND_ID_RAW ++#define CXL_MEM_COMMAND_ID_EH_EYE_CAP_TIMEOUT_ENABLE_OPCODE 0XCC0A ++#define CXL_MEM_COMMAND_ID_EH_EYE_CAP_TIMEOUT_ENABLE_PAYLOAD_IN_SIZE 2 ++ ++struct cxl_mbox_eh_eye_cap_timeout_enable_in { ++ u8 rsvd; ++ u8 enable; ++} __attribute__((packed)); ++ ++CXL_EXPORT int cxl_memdev_eh_eye_cap_timeout_enable(struct cxl_memdev *memdev, u8 enable) ++{ ++ struct cxl_cmd *cmd; ++ struct cxl_mem_query_commands *query; ++ struct cxl_command_info *cinfo; ++ struct cxl_mbox_eh_eye_cap_timeout_enable_in *eh_eye_cap_timeout_enable_in; ++ int rc=0; ++ ++ cmd = cxl_cmd_new_raw(memdev, ++ CXL_MEM_COMMAND_ID_EH_EYE_CAP_TIMEOUT_ENABLE_OPCODE); ++ if(!cmd) { ++ fprintf(stderr, "%s: cxl_cmd_new_raw returned Null output\n", ++ cxl_memdev_get_devname(memdev)); ++ return -ENOMEM; ++ } ++ ++ query = cmd->query_cmd; ++ cinfo = &query->commands[cmd->query_idx]; ++ ++ cinfo->size_in = CXL_MEM_COMMAND_ID_EH_EYE_CAP_TIMEOUT_ENABLE_PAYLOAD_IN_SIZE; ++ if (cinfo->size_in > 0) { ++ cmd->input_payload = calloc(1, cinfo->size_in); ++ if (!cmd->input_payload) ++ return -ENOMEM; ++ cmd->send_cmd->in.payload = (u64)cmd->input_payload; ++ cmd->send_cmd->in.size = cinfo->size_in; ++ } ++ ++ eh_eye_cap_timeout_enable_in = (void *) cmd->send_cmd->in.payload; ++ eh_eye_cap_timeout_enable_in->enable = enable; ++ ++ rc = cxl_cmd_submit(cmd); ++ if (rc < 0) { ++ fprintf(stderr, "%s: cmd submission failed: %d (%s)\n", ++ cxl_memdev_get_devname(memdev), rc, strerror(-rc)); ++ goto out; ++ } ++ rc = cxl_cmd_get_mbox_status(cmd); ++ if (rc != 0) { ++ fprintf(stderr, "%s: firmware status: %d:\n%s\n", ++ cxl_memdev_get_devname(memdev), rc, DEVICE_ERRORS[rc]); ++ rc = -ENXIO; ++ goto out; ++ } ++ if (cmd->send_cmd->id != CXL_MEM_COMMAND_ID_EH_EYE_CAP_TIMEOUT_ENABLE) { ++ fprintf(stderr, "%s: invalid command id 0x%x (expecting 0x%x)\n", ++ cxl_memdev_get_devname(memdev), cmd->send_cmd->id, ++ CXL_MEM_COMMAND_ID_EH_EYE_CAP_TIMEOUT_ENABLE); ++ return -EINVAL; ++ } ++ fprintf(stdout, "command completed successfully\n"); ++out: ++ cxl_cmd_unref(cmd); ++ return rc; ++ return 0; ++} ++ ++#define CXL_MEM_COMMAND_ID_EH_EYE_CAP_STATUS CXL_MEM_COMMAND_ID_RAW ++#define CXL_MEM_COMMAND_ID_EH_EYE_CAP_STATUS_OPCODE 0XCC01 ++#define CXL_MEM_COMMAND_ID_EH_EYE_CAP_STATUS_PAYLOAD_IN_SIZE 4 ++#define CXL_MEM_COMMAND_ID_EH_EYE_CAP_STATUS_PAYLOAD_OUT_SIZE 4 ++ ++struct cxl_mbox_eh_eye_cap_status_in { ++ u8 rsvd; ++ u8 rsvd2[3]; ++} __attribute__((packed)); ++ ++struct cxl_mbox_eh_eye_cap_status_out { ++ u8 stat; ++ u8 rsvd[3]; ++} __attribute__((packed)); ++ ++CXL_EXPORT int cxl_memdev_eh_eye_cap_status(struct cxl_memdev *memdev) ++{ ++ struct cxl_cmd *cmd; ++ struct cxl_mem_query_commands *query; ++ struct cxl_command_info *cinfo; ++ struct cxl_mbox_eh_eye_cap_status_out *eh_eye_cap_status_out; ++ int rc=0; ++ ++ cmd = cxl_cmd_new_raw(memdev, ++ CXL_MEM_COMMAND_ID_EH_EYE_CAP_STATUS_OPCODE); ++ if(!cmd) { ++ fprintf(stderr, "%s: cxl_cmd_new_raw returned Null output\n", ++ cxl_memdev_get_devname(memdev)); ++ return -ENOMEM; ++ } ++ ++ query = cmd->query_cmd; ++ cinfo = &query->commands[cmd->query_idx]; ++ ++ cinfo->size_in = CXL_MEM_COMMAND_ID_EH_EYE_CAP_STATUS_PAYLOAD_IN_SIZE; ++ if (cinfo->size_in > 0) { ++ cmd->input_payload = calloc(1, cinfo->size_in); ++ if (!cmd->input_payload) ++ return -ENOMEM; ++ cmd->send_cmd->in.payload = (u64)cmd->input_payload; ++ cmd->send_cmd->in.size = cinfo->size_in; ++ } ++ ++ rc = cxl_cmd_submit(cmd); ++ if (rc < 0) { ++ fprintf(stderr, "%s: cmd submission failed: %d (%s)\n", ++ cxl_memdev_get_devname(memdev), rc, strerror(-rc)); ++ goto out; ++ } ++ rc = cxl_cmd_get_mbox_status(cmd); ++ if (rc != 0) { ++ fprintf(stderr, "%s: firmware status: %d:\n%s\n", ++ cxl_memdev_get_devname(memdev), rc, DEVICE_ERRORS[rc]); ++ rc = -ENXIO; ++ goto out; ++ } ++ if (cmd->send_cmd->id != CXL_MEM_COMMAND_ID_EH_EYE_CAP_STATUS) { ++ fprintf(stderr, "%s: invalid command id 0x%x (expecting 0x%x)\n", ++ cxl_memdev_get_devname(memdev), cmd->send_cmd->id, ++ CXL_MEM_COMMAND_ID_EH_EYE_CAP_STATUS); ++ return -EINVAL; ++ } ++ fprintf(stdout, "command completed successfully\n"); ++ eh_eye_cap_status_out = (void *)cmd->send_cmd->out.payload; ++ fprintf(stdout, "=========================== EH Eye Cap Status ============================\n"); ++ fprintf(stdout, "Status: %x\n", eh_eye_cap_status_out->stat); ++out: ++ cxl_cmd_unref(cmd); ++ return rc; ++ return 0; ++} ++ ++#define CXL_MEM_COMMAND_ID_EH_LINK_DBG_CFG CXL_MEM_COMMAND_ID_RAW ++#define CXL_MEM_COMMAND_ID_EH_LINK_DBG_CFG_OPCODE 0XCC06 ++#define CXL_MEM_COMMAND_ID_EH_LINK_DBG_CFG_PAYLOAD_IN_SIZE 13 ++ ++struct cxl_mbox_eh_link_dbg_cfg_in { ++ u8 mode; ++ __le16 lane_mask; ++ u8 rate_mask; ++ __le32 timer_us; ++ __le32 cap_delay_us; ++ u8 max_cap; ++} __attribute__((packed)); ++ ++CXL_EXPORT int cxl_memdev_eh_link_dbg_cfg(struct cxl_memdev *memdev, u8 port_id, u8 op_mode, ++ u8 cap_type, u16 lane_mask, u8 rate_mask, u32 timer_us, u32 cap_delay_us, u8 max_cap) ++{ ++ struct cxl_cmd *cmd; ++ struct cxl_mem_query_commands *query; ++ struct cxl_command_info *cinfo; ++ struct cxl_mbox_eh_link_dbg_cfg_in *eh_link_dbg_cfg_in; ++ int rc=0; ++ ++ u8 modes; ++ modes = ((port_id) | (op_mode << 2) | (cap_type <<4)); ++ ++ cmd = cxl_cmd_new_raw(memdev, CXL_MEM_COMMAND_ID_EH_LINK_DBG_CFG_OPCODE); ++ if(!cmd) { ++ fprintf(stderr, "%s: cxl_cmd_new_raw returned Null output\n", ++ cxl_memdev_get_devname(memdev)); ++ return -ENOMEM; ++ } ++ ++ query = cmd->query_cmd; ++ cinfo = &query->commands[cmd->query_idx]; ++ ++ cinfo->size_in = CXL_MEM_COMMAND_ID_EH_LINK_DBG_CFG_PAYLOAD_IN_SIZE; ++ if (cinfo->size_in > 0) { ++ cmd->input_payload = calloc(1, cinfo->size_in); ++ if (!cmd->input_payload) ++ return -ENOMEM; ++ cmd->send_cmd->in.payload = (u64)cmd->input_payload; ++ cmd->send_cmd->in.size = cinfo->size_in; ++ } ++ ++ eh_link_dbg_cfg_in = (void *) cmd->send_cmd->in.payload; ++ eh_link_dbg_cfg_in->mode = modes; ++ eh_link_dbg_cfg_in->lane_mask = cpu_to_le16(lane_mask); ++ eh_link_dbg_cfg_in->rate_mask = rate_mask; ++ eh_link_dbg_cfg_in->timer_us = cpu_to_le32(timer_us); ++ eh_link_dbg_cfg_in->cap_delay_us = cpu_to_le32(cap_delay_us); ++ eh_link_dbg_cfg_in->max_cap = max_cap; ++ ++ rc = cxl_cmd_submit(cmd); ++ if (rc < 0) { ++ fprintf(stderr, "%s: cmd submission failed: %d (%s)\n", ++ cxl_memdev_get_devname(memdev), rc, strerror(-rc)); ++ goto out; ++ } ++ rc = cxl_cmd_get_mbox_status(cmd); ++ if (rc != 0) { ++ fprintf(stderr, "%s: firmware status: %d:\n%s\n", ++ cxl_memdev_get_devname(memdev), rc, DEVICE_ERRORS[rc]); ++ rc = -ENXIO; ++ goto out; ++ } ++ if (cmd->send_cmd->id != CXL_MEM_COMMAND_ID_EH_LINK_DBG_CFG) { ++ fprintf(stderr, "%s: invalid command id 0x%x (expecting 0x%x)\n", ++ cxl_memdev_get_devname(memdev), cmd->send_cmd->id, ++ CXL_MEM_COMMAND_ID_EH_LINK_DBG_CFG); ++ return -EINVAL; ++ } ++ fprintf(stdout, "command completed successfully\n"); ++out: ++ cxl_cmd_unref(cmd); ++ return rc; ++ return 0; ++} ++ ++#define CXL_MEM_COMMAND_ID_EH_LINK_DBG_ENTRY_DUMP CXL_MEM_COMMAND_ID_RAW ++#define CXL_MEM_COMMAND_ID_EH_LINK_DBG_ENTRY_DUMP_OPCODE 0XCC07 ++#define CXL_MEM_COMMAND_ID_EH_LINK_DBG_ENTRY_DUMP_PAYLOAD_IN_SIZE 1 ++#define CXL_MEM_COMMAND_ID_EH_LINK_DBG_ENTRY_DUMP_PAYLOAD_OUT_SIZE 34 ++ ++struct cxl_mbox_eh_link_dbg_entry_dump_in { ++ u8 entry_idx; ++} __attribute__((packed)); ++ ++struct cxl_mbox_eh_link_dbg_entry_dump_out { ++ u8 cap_info; ++ u8 cap_reason; ++ __le32 l2r_reason; ++ __le64 start_time; ++ __le64 end_time; ++ u8 start_rate; ++ u8 end_rate; ++ u8 start_state; ++ u8 end_state; ++ __le32 start_status; ++ __le32 end_status; ++} __attribute__((packed)); ++ ++struct eh_link_dbg_entry_dump_fields { ++ u8 entry_idx; ++ u8 entry_num; ++}; ++ ++CXL_EXPORT int cxl_memdev_eh_link_dbg_entry_dump(struct cxl_memdev *memdev, u8 entry_idx) ++{ ++ struct cxl_cmd *cmd; ++ struct cxl_mem_query_commands *query; ++ struct cxl_command_info *cinfo; ++ struct cxl_mbox_eh_link_dbg_entry_dump_in *eh_link_dbg_entry_dump_in; ++ struct cxl_mbox_eh_link_dbg_entry_dump_out *eh_link_dbg_entry_dump_out; ++ struct eh_link_dbg_entry_dump_fields *cap_info_fields; ++ u8 entry_idx_shift = 0; ++ u8 entry_num_shift = 4; ++ u8 entry_idx_mask = (1 << entry_num_shift) - (1 << entry_idx_shift); // 0-3 ++ u8 entry_num_mask = 0xff - (1 << entry_num_shift) + 1; // 4-7 ++ int rc=0; ++ ++ cmd = cxl_cmd_new_raw(memdev, CXL_MEM_COMMAND_ID_EH_LINK_DBG_ENTRY_DUMP_OPCODE); ++ if(!cmd) { ++ fprintf(stderr, "%s: cxl_cmd_new_raw returned Null output\n", ++ cxl_memdev_get_devname(memdev)); ++ return -ENOMEM; ++ } ++ ++ query = cmd->query_cmd; ++ cinfo = &query->commands[cmd->query_idx]; ++ ++ cinfo->size_in = CXL_MEM_COMMAND_ID_EH_LINK_DBG_ENTRY_DUMP_PAYLOAD_IN_SIZE; ++ if (cinfo->size_in > 0) { ++ cmd->input_payload = calloc(1, cinfo->size_in); ++ if (!cmd->input_payload) ++ return -ENOMEM; ++ cmd->send_cmd->in.payload = (u64)cmd->input_payload; ++ cmd->send_cmd->in.size = cinfo->size_in; ++ } ++ ++ eh_link_dbg_entry_dump_in = (void *) cmd->send_cmd->in.payload; ++ eh_link_dbg_entry_dump_in->entry_idx = entry_idx; ++ ++ rc = cxl_cmd_submit(cmd); ++ if (rc < 0) { ++ fprintf(stderr, "%s: cmd submission failed: %d (%s)\n", ++ cxl_memdev_get_devname(memdev), rc, strerror(-rc)); ++ goto out; ++ } ++ rc = cxl_cmd_get_mbox_status(cmd); ++ if (rc != 0) { ++ fprintf(stderr, "%s: firmware status: %d:\n%s\n", ++ cxl_memdev_get_devname(memdev), rc, DEVICE_ERRORS[rc]); ++ rc = -ENXIO; ++ goto out; ++ } ++ if (cmd->send_cmd->id != CXL_MEM_COMMAND_ID_EH_LINK_DBG_ENTRY_DUMP) { ++ fprintf(stderr, "%s: invalid command id 0x%x (expecting 0x%x)\n", ++ cxl_memdev_get_devname(memdev), cmd->send_cmd->id, ++ CXL_MEM_COMMAND_ID_EH_LINK_DBG_ENTRY_DUMP); ++ return -EINVAL; ++ } ++ fprintf(stdout, "command completed successfully\n"); ++ eh_link_dbg_entry_dump_out = (void *)cmd->send_cmd->out.payload; ++ ++ cap_info_fields->entry_idx = (eh_link_dbg_entry_dump_out->cap_info & entry_idx_mask) >> entry_idx_shift; ++ cap_info_fields->entry_num = (eh_link_dbg_entry_dump_out->cap_info & entry_num_mask) >> entry_num_shift; ++ ++ fprintf(stdout, "=========================== EH Link Debug Entry Dump ============================\n"); ++ fprintf(stdout, "Capture Info (Entry Index): %x\n", cap_info_fields->entry_idx); ++ fprintf(stdout, "Capture Info (Entry Num): %x\n", cap_info_fields->entry_num); ++ fprintf(stdout, "Capture Reason: %x\n", eh_link_dbg_entry_dump_out->cap_reason); ++ fprintf(stdout, "L2R Reason: %x\n", le32_to_cpu(eh_link_dbg_entry_dump_out->l2r_reason)); ++ fprintf(stdout, "Capture Start Timestamp: %lx\n", le64_to_cpu(eh_link_dbg_entry_dump_out->start_time)); ++ fprintf(stdout, "Capture End Timestamp: %lx\n", le64_to_cpu(eh_link_dbg_entry_dump_out->end_time)); ++ fprintf(stdout, "Capture Start Rate: %x\n", eh_link_dbg_entry_dump_out->start_rate); ++ fprintf(stdout, "Capture End Rate: %x\n", eh_link_dbg_entry_dump_out->end_rate); ++ fprintf(stdout, "Capture Start State: %x\n", eh_link_dbg_entry_dump_out->start_state); ++ fprintf(stdout, "Capture End State: %x\n", eh_link_dbg_entry_dump_out->end_state); ++ fprintf(stdout, "Capture Start Status: %x\n", le32_to_cpu(eh_link_dbg_entry_dump_out->start_status)); ++ fprintf(stdout, "Capture End Status: %x\n", le32_to_cpu(eh_link_dbg_entry_dump_out->end_status)); ++out: ++ cxl_cmd_unref(cmd); ++ return rc; ++ return 0; ++} ++ ++#define CXL_MEM_COMMAND_ID_EH_LINK_DBG_LANE_DUMP CXL_MEM_COMMAND_ID_RAW ++#define CXL_MEM_COMMAND_ID_EH_LINK_DBG_LANE_DUMP_OPCODE 0XCC08 ++#define CXL_MEM_COMMAND_ID_EH_LINK_DBG_LANE_DUMP_PAYLOAD_IN_SIZE 2 ++#define CXL_MEM_COMMAND_ID_EH_LINK_DBG_LANE_DUMP_PAYLOAD_OUT_SIZE 59 ++ ++struct cxl_mbox_eh_link_dbg_lane_dump_in { ++ u8 entry_idx; ++ u8 lane_idx; ++} __attribute__((packed)); ++ ++struct cxl_mbox_eh_link_dbg_lane_dump_out { ++ u8 cap_info; ++ u8 pga_gain; ++ u8 pga_off2; ++ u8 pga_off1; ++ u8 cdfe_a2; ++ u8 cdfe_a3; ++ u8 cdfe_a4; ++ u8 cdfe_a5; ++ u8 cdfe_a6; ++ u8 cdfe_a7; ++ u8 cdfe_a8; ++ u8 cdfe_a9; ++ u8 cdfe_a10; ++ u8 zobel_a_gain; ++ u8 zobel_b_gain; ++ __le16 zobel_dc_offset; ++ __le16 udfe_thr_0; ++ __le16 udfe_thr_1; ++ __le16 dc_offset; ++ __le16 median_amp; ++ u8 ph_ofs_t; ++ __le16 cdru_lock_time; ++ __le16 eh_workaround_stat; ++ __le16 los_toggle_cnt; ++ __le16 adapt_time; ++ __le16 cdr_lock_toggle_cnt_0; ++ __le16 jat_stat_0; ++ __le32 db_err; ++ __le32 reg_val0; ++ u8 reg_val1; ++ __le32 reg_val2; ++ __le32 reg_val3; ++ __le32 reg_val4; ++ ++} __attribute__((packed)); ++ ++struct eh_link_dbg_cap_info_fields { ++ u8 lane_idx; ++ u8 entry_idx; ++}; ++ ++struct eh_link_dbg_reg_val0_fields { ++ u8 fs_obs; ++ u8 lf_obs; ++ u8 pre_cursor; ++ u8 cursor; ++ u8 post_cursor; ++ u8 rsvd; ++}; ++ ++struct eh_link_dbg_reg_val1_fields { ++ u8 usp_tx_preset; ++ u8 dsp_tx_preset; ++}; ++ ++struct eh_link_dbg_reg_val2_fields { ++ u8 tx_p1a_d1en; ++ u8 tx_p1a_d2en; ++ u8 tx_p1a_amp_red; ++ u8 tx_p1b_d1en; ++ u8 tx_p1b_d2en; ++ u8 rsvd1; ++}; ++ ++struct eh_link_dbg_reg_val3_fields { ++ u8 tx_p1b_amp_red; ++ u8 tx_p2a_d1en; ++ u8 tx_p2a_d2en; ++ u8 tx_p2a_amp_red; ++ u8 rsvd2; ++}; ++ ++struct eh_link_dbg_reg_val4_fields { ++ u8 tx_p2b_d1en; ++ u8 tx_p2b_d2en; ++ u8 tx_p2b_amp_red; ++ u8 tx_p3a_d1en; ++ u8 rsvd3; ++}; ++ ++CXL_EXPORT int cxl_memdev_eh_link_dbg_lane_dump(struct cxl_memdev *memdev, u8 entry_idx, u8 lane_idx) ++{ ++ struct cxl_cmd *cmd; ++ struct cxl_mem_query_commands *query; ++ struct cxl_command_info *cinfo; ++ struct cxl_mbox_eh_link_dbg_lane_dump_in *eh_link_dbg_lane_dump_in; ++ struct cxl_mbox_eh_link_dbg_lane_dump_out *eh_link_dbg_lane_dump_out; ++ struct eh_link_dbg_cap_info_fields *cap_info_fields; ++ struct eh_link_dbg_reg_val0_fields *reg_val0_fields; ++ struct eh_link_dbg_reg_val1_fields *reg_val1_fields; ++ struct eh_link_dbg_reg_val2_fields *reg_val2_fields; ++ struct eh_link_dbg_reg_val3_fields *reg_val3_fields; ++ struct eh_link_dbg_reg_val4_fields *reg_val4_fields; ++ ++ int rc=0; ++ // Initializing bit shifts and bit masks. ++ ++ // Capture Info ++ u8 lane_idx_shift = 0; ++ u8 entry_idx_shift = 4; ++ u8 lane_idx_mask = (1 << entry_idx_shift) - (1 << lane_idx_shift); // 0-3 ++ u8 entry_idx_mask = 0xff - (1 << entry_idx_shift) + 1; // 4-7 ++ ++ // register data 0 ++ u8 fs_obs_shift = 0; ++ u8 lf_obs_shift = 6; ++ u8 pre_cursor_shift = 12; ++ u8 cursor_shift = 18; ++ u8 post_cursor_shift = 24; ++ u8 rsvd_shift = 30; ++ u32 fs_obs_mask = (1 << lf_obs_shift) - (1 << fs_obs_shift); // 0-5 ++ u32 lf_obs_mask = (1 << pre_cursor_shift) - (1 << lf_obs_shift); // 6-11 ++ u32 pre_cursor_mask = (1 << cursor_shift) - (1 << pre_cursor_shift); // 12-17 ++ u32 cursor_mask = (1 << post_cursor_shift) - (1 << cursor_shift); //18-23 ++ u32 post_cursor_mask = (1 << rsvd_shift) - (1 << post_cursor_shift); //24-29 ++ ++ // register data 1 ++ u8 usp_tx_preset_shift = 0; ++ u8 dsp_tx_preset_shift = 4; ++ u32 usp_tx_preset_mask = (1 << dsp_tx_preset_shift) - (1<< usp_tx_preset_shift); // 0-3 ++ u32 dsp_tx_preset_mask = 0xff - (1 << dsp_tx_preset_shift); // 4-7 ++ ++ // register data 2 ++ u8 tx_p1a_d1en_shift = 0; ++ u8 tx_p1a_d2en_shift = 6; ++ u8 tx_p1a_amp_red_shift = 12; ++ u8 tx_p1b_d1en_shift = 18; ++ u8 tx_p1b_d2en_shift = 24; ++ u8 rsvd1_shift = 30; ++ u32 tx_p1a_d1en_mask = (1 << tx_p1a_d2en_shift) - (1 << tx_p1a_d1en_shift); ++ u32 tx_p1a_d2en_mask = (1 << tx_p1a_amp_red_shift) - (1 << tx_p1a_d2en_shift); ++ u32 tx_p1a_amp_red_mask = (1 << tx_p1b_d1en_shift) - (1 << tx_p1a_amp_red_shift); ++ u32 tx_p1b_d1en_mask = (1 << tx_p1b_d2en_shift) - (1 << tx_p1b_d1en_shift); ++ u32 tx_p1b_d2en_mask = (1 << rsvd1_shift) - (1 << tx_p1b_d2en_shift); ++ ++ // register data 3 ++ u8 tx_p1b_amp_red_shift = 0; ++ u8 tx_p2a_d1en_shift = 6; ++ u8 tx_p2a_d2en_shift = 12; ++ u8 tx_p2a_amp_red_shift = 18; ++ u8 rsvd2_shift = 24; ++ u32 tx_p1b_amp_red_mask = (1 << tx_p2a_d1en_shift) - (1 << tx_p1b_amp_red_shift); ++ u32 tx_p2a_d1en_mask = (1 << tx_p2a_d2en_shift) - (1 << tx_p2a_d1en_shift); ++ u32 tx_p2a_d2en_mask = (1 << tx_p2a_amp_red_shift) - (1 << tx_p2a_d2en_shift); ++ u32 tx_p2a_amp_red_mask = (1 << rsvd2_shift) - (1 << tx_p2a_amp_red_shift); ++ ++ // register data 4 ++ u8 tx_p2b_d1en_shift = 0; ++ u8 tx_p2b_d2en_shift = 6; ++ u8 tx_p2b_amp_red_shift = 12; ++ u8 tx_p3a_d1en_shift = 18; ++ u8 rsvd3_shift = 24; ++ u32 tx_p2b_d1en_mask = (1 << tx_p2b_d2en_shift) - (1 << tx_p2b_d1en_shift); ++ u32 tx_p2b_d2en_mask = (1 << tx_p2b_amp_red_shift) - (1 << tx_p2b_d2en_shift); ++ u32 tx_p2b_amp_red_mask = (1 << tx_p3a_d1en_shift) - (1 << tx_p2b_amp_red_shift); ++ u32 tx_p3a_d1en_mask = (1 << rsvd3_shift) - (1 << tx_p3a_d1en_shift); ++ ++ cmd = cxl_cmd_new_raw(memdev, CXL_MEM_COMMAND_ID_EH_LINK_DBG_LANE_DUMP_OPCODE); ++ if(!cmd) { ++ fprintf(stderr, "%s: cxl_cmd_new_raw returned Null output\n", ++ cxl_memdev_get_devname(memdev)); ++ return -ENOMEM; ++ } ++ ++ query = cmd->query_cmd; ++ cinfo = &query->commands[cmd->query_idx]; ++ ++ cinfo->size_in = CXL_MEM_COMMAND_ID_EH_LINK_DBG_LANE_DUMP_PAYLOAD_IN_SIZE; ++ if (cinfo->size_in > 0) { ++ cmd->input_payload = calloc(1, cinfo->size_in); ++ if (!cmd->input_payload) ++ return -ENOMEM; ++ cmd->send_cmd->in.payload = (u64)cmd->input_payload; ++ cmd->send_cmd->in.size = cinfo->size_in; ++ } ++ ++ eh_link_dbg_lane_dump_in = (void *) cmd->send_cmd->in.payload; ++ eh_link_dbg_lane_dump_in->entry_idx = entry_idx; ++ eh_link_dbg_lane_dump_in->lane_idx = lane_idx; ++ ++ rc = cxl_cmd_submit(cmd); ++ if (rc < 0) { ++ fprintf(stderr, "%s: cmd submission failed: %d (%s)\n", ++ cxl_memdev_get_devname(memdev), rc, strerror(-rc)); ++ goto out; ++ } ++ rc = cxl_cmd_get_mbox_status(cmd); ++ if (rc != 0) { ++ fprintf(stderr, "%s: firmware status: %d:\n%s\n", ++ cxl_memdev_get_devname(memdev), rc, DEVICE_ERRORS[rc]); ++ rc = -ENXIO; ++ goto out; ++ } ++ if (cmd->send_cmd->id != CXL_MEM_COMMAND_ID_EH_LINK_DBG_LANE_DUMP) { ++ fprintf(stderr, "%s: invalid command id 0x%x (expecting 0x%x)\n", ++ cxl_memdev_get_devname(memdev), cmd->send_cmd->id, ++ CXL_MEM_COMMAND_ID_EH_LINK_DBG_LANE_DUMP); ++ return -EINVAL; ++ } ++ fprintf(stdout, "command completed successfully\n"); ++ eh_link_dbg_lane_dump_out = (void *)cmd->send_cmd->out.payload; ++ ++ cap_info_fields->lane_idx = (eh_link_dbg_lane_dump_out->cap_info & lane_idx_mask) >> lane_idx_shift; ++ cap_info_fields->entry_idx = (eh_link_dbg_lane_dump_out->cap_info & entry_idx_mask) >> entry_idx_shift; ++ ++ reg_val0_fields->fs_obs = (le32_to_cpu(eh_link_dbg_lane_dump_out->reg_val0) & fs_obs_mask) >> fs_obs_shift; ++ reg_val0_fields->lf_obs = (le32_to_cpu(eh_link_dbg_lane_dump_out->reg_val0) & lf_obs_mask) >> lf_obs_shift; ++ reg_val0_fields->pre_cursor = (le32_to_cpu(eh_link_dbg_lane_dump_out->reg_val0) & pre_cursor_mask) >> pre_cursor_shift; ++ reg_val0_fields->cursor = (le32_to_cpu(eh_link_dbg_lane_dump_out->reg_val0) & cursor_mask) >> cursor_shift; ++ reg_val0_fields->post_cursor = (le32_to_cpu(eh_link_dbg_lane_dump_out->reg_val0) & post_cursor_mask) >> post_cursor_shift; ++ ++ reg_val1_fields->usp_tx_preset = (eh_link_dbg_lane_dump_out->reg_val1 & usp_tx_preset_mask) >> usp_tx_preset_shift; ++ reg_val1_fields->dsp_tx_preset = (eh_link_dbg_lane_dump_out->reg_val1 & dsp_tx_preset_mask) >> dsp_tx_preset_shift; ++ ++ reg_val2_fields->tx_p1a_d1en = (le32_to_cpu(eh_link_dbg_lane_dump_out->reg_val2) & tx_p1a_d1en_mask) >> tx_p1a_d1en_shift; ++ reg_val2_fields->tx_p1a_d2en = (le32_to_cpu(eh_link_dbg_lane_dump_out->reg_val2) & tx_p1a_d2en_mask) >> tx_p1a_d2en_shift; ++ reg_val2_fields->tx_p1a_amp_red = (le32_to_cpu(eh_link_dbg_lane_dump_out->reg_val2) & tx_p1a_amp_red_mask) >> tx_p1a_amp_red_shift; ++ reg_val2_fields->tx_p1b_d1en = (le32_to_cpu(eh_link_dbg_lane_dump_out->reg_val2) & tx_p1b_d1en_mask) >> tx_p1b_d1en_shift; ++ reg_val2_fields->tx_p1b_d2en = (le32_to_cpu(eh_link_dbg_lane_dump_out->reg_val2) & tx_p1b_d2en_mask) >> tx_p1b_d2en_shift; ++ ++ reg_val3_fields->tx_p1b_amp_red = (le32_to_cpu(eh_link_dbg_lane_dump_out->reg_val3) & tx_p1b_amp_red_mask) >> tx_p1b_amp_red_shift; ++ reg_val3_fields->tx_p2a_d1en = (le32_to_cpu(eh_link_dbg_lane_dump_out->reg_val3) & tx_p2a_d1en_mask) >> tx_p2a_d1en_shift; ++ reg_val3_fields->tx_p2a_d2en = (le32_to_cpu(eh_link_dbg_lane_dump_out->reg_val3) & tx_p2a_d2en_mask) >> tx_p2a_d2en_shift; ++ reg_val3_fields->tx_p2a_amp_red = (le32_to_cpu(eh_link_dbg_lane_dump_out->reg_val3) & tx_p2a_amp_red_mask) >> tx_p2a_amp_red_shift; ++ ++ reg_val4_fields->tx_p2b_d1en = (le32_to_cpu(eh_link_dbg_lane_dump_out->reg_val4) & tx_p2b_d1en_mask) >> tx_p2b_d1en_shift; ++ reg_val4_fields->tx_p2b_d2en = (le32_to_cpu(eh_link_dbg_lane_dump_out->reg_val4) & tx_p2b_d2en_mask) >> tx_p2b_d2en_shift; ++ reg_val4_fields->tx_p2b_amp_red = (le32_to_cpu(eh_link_dbg_lane_dump_out->reg_val4) & tx_p2b_amp_red_mask) >> tx_p2b_amp_red_shift; ++ reg_val4_fields->tx_p3a_d1en = (le32_to_cpu(eh_link_dbg_lane_dump_out->reg_val4) & tx_p3a_d1en_mask) >> tx_p3a_d1en_shift; ++ ++ fprintf(stdout, "=========================== EH Link Debug Lane Dump ============================\n"); ++ fprintf(stdout, "Capture Lane: %x\n", cap_info_fields->lane_idx); ++ fprintf(stdout, "Capture Entry Index: %x\n", cap_info_fields->entry_idx); ++ fprintf(stdout, "PGA Gain: %x\n", eh_link_dbg_lane_dump_out->pga_gain); ++ fprintf(stdout, "PGA offset 2: %x\n", eh_link_dbg_lane_dump_out->pga_off2); ++ fprintf(stdout, "PGA offset 1: %x\n", eh_link_dbg_lane_dump_out->pga_off1); ++ fprintf(stdout, "CDFE A2: %x\n", eh_link_dbg_lane_dump_out->cdfe_a2); ++ fprintf(stdout, "CDFE A3: %x\n", eh_link_dbg_lane_dump_out->cdfe_a3); ++ fprintf(stdout, "CDFE A4: %x\n", eh_link_dbg_lane_dump_out->cdfe_a4); ++ fprintf(stdout, "CDFE A5: %x\n", eh_link_dbg_lane_dump_out->cdfe_a5); ++ fprintf(stdout, "CDFE A6: %x\n", eh_link_dbg_lane_dump_out->cdfe_a6); ++ fprintf(stdout, "CDFE A7: %x\n", eh_link_dbg_lane_dump_out->cdfe_a7); ++ fprintf(stdout, "CDFE A8: %x\n", eh_link_dbg_lane_dump_out->cdfe_a8); ++ fprintf(stdout, "CDFE A9: %x\n", eh_link_dbg_lane_dump_out->cdfe_a9); ++ fprintf(stdout, "CDFE A10: %x\n", eh_link_dbg_lane_dump_out->cdfe_a10); ++ fprintf(stdout, "Zobel A Gain: %x\n", eh_link_dbg_lane_dump_out->zobel_a_gain); ++ fprintf(stdout, "Zobel B Gain: %x\n", eh_link_dbg_lane_dump_out->zobel_b_gain); ++ fprintf(stdout, "Zobel DC Offset: %x\n", le16_to_cpu(eh_link_dbg_lane_dump_out->zobel_dc_offset)); ++ fprintf(stdout, "UDFE_THR_0: %x\n", le16_to_cpu(eh_link_dbg_lane_dump_out->udfe_thr_0)); ++ fprintf(stdout, "UDFE_THR_1: %x\n", le16_to_cpu(eh_link_dbg_lane_dump_out->udfe_thr_1)); ++ fprintf(stdout, "DC_OFFSET: %x\n", le16_to_cpu(eh_link_dbg_lane_dump_out->dc_offset)); ++ fprintf(stdout, "MEDIAN_AMP: %x\n", le16_to_cpu(eh_link_dbg_lane_dump_out->median_amp)); ++ fprintf(stdout, "PH_OFS_T: %x\n", le16_to_cpu(eh_link_dbg_lane_dump_out->ph_ofs_t)); ++ fprintf(stdout, "CDRU lock time: %x\n", le16_to_cpu(eh_link_dbg_lane_dump_out->cdru_lock_time)); ++ fprintf(stdout, "EH Workaround Status: %x\n", le16_to_cpu(eh_link_dbg_lane_dump_out->eh_workaround_stat)); ++ fprintf(stdout, "LOS toggle count: %x\n", le16_to_cpu(eh_link_dbg_lane_dump_out->los_toggle_cnt)); ++ fprintf(stdout, "Adaptation time: %x\n", le16_to_cpu(eh_link_dbg_lane_dump_out->adapt_time)); ++ fprintf(stdout, "CDR lock toggle count (arg = 0): %x\n", le16_to_cpu(eh_link_dbg_lane_dump_out->cdr_lock_toggle_cnt_0)); ++ fprintf(stdout, "JAT status (arg = 0): %x\n", le16_to_cpu(eh_link_dbg_lane_dump_out->jat_stat_0)); ++ fprintf(stdout, "dorbell error: %x\n", le16_to_cpu(eh_link_dbg_lane_dump_out->db_err)); ++ fprintf(stdout, "==== EH register 0 value capture ====\n"); ++ fprintf(stdout, "FS from PIPE interface: %x\n", reg_val0_fields->fs_obs); ++ fprintf(stdout, "LF from PIPE interface: %x\n", reg_val0_fields->lf_obs); ++ fprintf(stdout, "Pre-cursor value from PIPE interface: %x\n", reg_val0_fields->pre_cursor); ++ fprintf(stdout, "Cursor value from PIPE interface: %x\n", reg_val0_fields->cursor); ++ fprintf(stdout, "Post-cursor value from PIPE interface: %x\n", reg_val0_fields->post_cursor); ++ fprintf(stdout, "==== EH register 1 value capture ====\n"); ++ fprintf(stdout, "US_PORT_TX_PRESET for current link rate: %x\n", reg_val1_fields->usp_tx_preset); ++ fprintf(stdout, "DS_PORT_TX_PRESET for current link rate: %x\n", reg_val1_fields->dsp_tx_preset); ++ fprintf(stdout, "==== EH register 2 value capture ====\n"); ++ fprintf(stdout, "TX_P1A_D1EN: %x\n", reg_val2_fields->tx_p1a_d1en); ++ fprintf(stdout, "TX_P1A_D2EN: %x\n", reg_val2_fields->tx_p1a_d2en); ++ fprintf(stdout, "TX_P1A_AMP_RED: %x\n", reg_val2_fields->tx_p1a_amp_red); ++ fprintf(stdout, "TX_P1B_D1EN: %x\n", reg_val2_fields->tx_p1b_d1en); ++ fprintf(stdout, "TX_P1B_D2EN: %x\n", reg_val2_fields->tx_p1b_d2en); ++ fprintf(stdout, "==== EH register 3 value capture ====\n"); ++ fprintf(stdout, "TX_P1B_AMP_RED: %x\n", reg_val3_fields->tx_p1b_amp_red); ++ fprintf(stdout, "TX_P2A_D1EN: %x\n", reg_val3_fields->tx_p2a_d1en); ++ fprintf(stdout, "TX_P2A_D2EN: %x\n", reg_val3_fields->tx_p2a_d2en); ++ fprintf(stdout, "TX_P2A_AMP_RED: %x\n", reg_val3_fields->tx_p2a_amp_red); ++ fprintf(stdout, "==== EH register 4 value capture ====\n"); ++ fprintf(stdout, "TX_P2B_D1EN: %x\n", reg_val4_fields->tx_p2b_d1en); ++ fprintf(stdout, "TX_P2B_D2EN: %x\n", reg_val4_fields->tx_p2b_d2en); ++ fprintf(stdout, "TX_P2B_AMP_RED: %x\n", reg_val4_fields->tx_p2b_amp_red); ++ fprintf(stdout, "TX_P3A_D1EN: %x\n", reg_val4_fields->tx_p3a_d1en); ++out: ++ cxl_cmd_unref(cmd); ++ return rc; ++ return 0; ++} ++ ++#define CXL_MEM_COMMAND_ID_EH_LINK_DBG_RESET CXL_MEM_COMMAND_ID_RAW ++#define CXL_MEM_COMMAND_ID_EH_LINK_DBG_RESET_OPCODE 0XCC09 ++#define CXL_MEM_COMMAND_ID_EH_LINK_DBG_RESET_PAYLOAD_IN_SIZE 0 ++ ++CXL_EXPORT int cxl_memdev_eh_link_dbg_reset(struct cxl_memdev *memdev) ++{ ++ struct cxl_cmd *cmd; ++ struct cxl_mem_query_commands *query; ++ struct cxl_command_info *cinfo; ++ int rc=0; ++ ++ cmd = cxl_cmd_new_raw(memdev, CXL_MEM_COMMAND_ID_EH_LINK_DBG_RESET_OPCODE); ++ if(!cmd) { ++ fprintf(stderr, "%s: cxl_cmd_new_raw returned Null output\n", ++ cxl_memdev_get_devname(memdev)); ++ return -ENOMEM; ++ } ++ ++ query = cmd->query_cmd; ++ cinfo = &query->commands[cmd->query_idx]; ++ ++ cinfo->size_in = CXL_MEM_COMMAND_ID_EH_LINK_DBG_RESET_PAYLOAD_IN_SIZE; ++ if (cinfo->size_in > 0) { ++ cmd->input_payload = calloc(1, cinfo->size_in); ++ if (!cmd->input_payload) ++ return -ENOMEM; ++ cmd->send_cmd->in.payload = (u64)cmd->input_payload; ++ cmd->send_cmd->in.size = cinfo->size_in; ++ } ++ ++ rc = cxl_cmd_submit(cmd); ++ if (rc < 0) { ++ fprintf(stderr, "%s: cmd submission failed: %d (%s)\n", ++ cxl_memdev_get_devname(memdev), rc, strerror(-rc)); ++ goto out; ++ } ++ rc = cxl_cmd_get_mbox_status(cmd); ++ if (rc != 0) { ++ fprintf(stderr, "%s: firmware status: %d:\n%s\n", ++ cxl_memdev_get_devname(memdev), rc, DEVICE_ERRORS[rc]); ++ rc = -ENXIO; ++ goto out; ++ } ++ if (cmd->send_cmd->id != CXL_MEM_COMMAND_ID_EH_LINK_DBG_RESET) { ++ fprintf(stderr, "%s: invalid command id 0x%x (expecting 0x%x)\n", ++ cxl_memdev_get_devname(memdev), cmd->send_cmd->id, ++ CXL_MEM_COMMAND_ID_EH_LINK_DBG_RESET); ++ return -EINVAL; ++ } ++ fprintf(stdout, "command completed successfully\n"); ++ ++ fprintf(stdout, "EH Link Reset Completed \n"); ++out: ++ cxl_cmd_unref(cmd); ++ return rc; ++ return 0; ++} ++ ++#define CXL_MEM_COMMAND_ID_FBIST_STOPCONFIG_SET CXL_MEM_COMMAND_ID_RAW ++#define CXL_MEM_COMMAND_ID_FBIST_STOPCONFIG_SET_OPCODE 49671 ++#define CXL_MEM_COMMAND_ID_FBIST_STOPCONFIG_SET_PAYLOAD_IN_SIZE 7 ++ ++struct cxl_mbox_fbist_stopconfig_set_in { ++ __le32 fbist_id; ++ u8 stop_on_wresp; ++ u8 stop_on_rresp; ++ u8 stop_on_rdataerr; ++} __attribute__((packed)); ++ ++ ++CXL_EXPORT int cxl_memdev_fbist_stopconfig_set(struct cxl_memdev *memdev, ++ u32 fbist_id, u8 stop_on_wresp, u8 stop_on_rresp, u8 stop_on_rdataerr) ++{ ++ struct cxl_cmd *cmd; ++ struct cxl_mem_query_commands *query; ++ struct cxl_command_info *cinfo; ++ struct cxl_mbox_fbist_stopconfig_set_in *fbist_stopconfig_set_in; ++ int rc = 0; ++ ++ cmd = cxl_cmd_new_raw(memdev, CXL_MEM_COMMAND_ID_FBIST_STOPCONFIG_SET_OPCODE); ++ if (!cmd) { ++ fprintf(stderr, "%s: cxl_cmd_new_raw returned Null output\n", ++ cxl_memdev_get_devname(memdev)); ++ return -ENOMEM; ++ } ++ ++ query = cmd->query_cmd; ++ cinfo = &query->commands[cmd->query_idx]; ++ ++ /* update payload size */ ++ cinfo->size_in = CXL_MEM_COMMAND_ID_FBIST_STOPCONFIG_SET_PAYLOAD_IN_SIZE; ++ if (cinfo->size_in > 0) { ++ cmd->input_payload = calloc(1, cinfo->size_in); ++ if (!cmd->input_payload) ++ return -ENOMEM; ++ cmd->send_cmd->in.payload = (u64)cmd->input_payload; ++ cmd->send_cmd->in.size = cinfo->size_in; ++ } ++ ++ fbist_stopconfig_set_in = (void *) cmd->send_cmd->in.payload; ++ ++ fbist_stopconfig_set_in->fbist_id = cpu_to_le32(fbist_id); ++ fbist_stopconfig_set_in->stop_on_wresp = stop_on_wresp; ++ fbist_stopconfig_set_in->stop_on_rresp = stop_on_rresp; ++ fbist_stopconfig_set_in->stop_on_rdataerr = stop_on_rdataerr; ++ rc = cxl_cmd_submit(cmd); ++ if (rc < 0) { ++ fprintf(stderr, "%s: cmd submission failed: %d (%s)\n", ++ cxl_memdev_get_devname(memdev), rc, strerror(-rc)); ++ goto out; ++ } ++ ++ rc = cxl_cmd_get_mbox_status(cmd); ++ if (rc != 0) { ++ fprintf(stderr, "%s: firmware status: %d\n", ++ cxl_memdev_get_devname(memdev), rc); ++ rc = -ENXIO; ++ goto out; ++ } ++ ++ if (cmd->send_cmd->id != CXL_MEM_COMMAND_ID_FBIST_STOPCONFIG_SET) { ++ fprintf(stderr, "%s: invalid command id 0x%x (expecting 0x%x)\n", ++ cxl_memdev_get_devname(memdev), cmd->send_cmd->id, CXL_MEM_COMMAND_ID_FBIST_STOPCONFIG_SET); ++ return -EINVAL; ++ } ++ fprintf(stdout, "command completed successfully\n"); ++ ++out: ++ cxl_cmd_unref(cmd); ++ return rc; ++ return 0; ++} ++ ++ ++#define CXL_MEM_COMMAND_ID_FBIST_CYCLECOUNT_SET CXL_MEM_COMMAND_ID_RAW ++#define CXL_MEM_COMMAND_ID_FBIST_CYCLECOUNT_SET_OPCODE 49672 ++#define CXL_MEM_COMMAND_ID_FBIST_CYCLECOUNT_SET_PAYLOAD_IN_SIZE 16 ++ ++struct cxl_mbox_fbist_cyclecount_set_in { ++ __le32 fbist_id; ++ u8 txg_nr; ++ u8 rsvd[3]; ++ __le64 cyclecount; ++} __attribute__((packed)); ++ ++ ++CXL_EXPORT int cxl_memdev_fbist_cyclecount_set(struct cxl_memdev *memdev, ++ u32 fbist_id, u8 txg_nr, u64 cyclecount) ++{ ++ struct cxl_cmd *cmd; ++ struct cxl_mem_query_commands *query; ++ struct cxl_command_info *cinfo; ++ struct cxl_mbox_fbist_cyclecount_set_in *fbist_cyclecount_set_in; ++ int rc = 0; ++ ++ cmd = cxl_cmd_new_raw(memdev, CXL_MEM_COMMAND_ID_FBIST_CYCLECOUNT_SET_OPCODE); ++ if (!cmd) { ++ fprintf(stderr, "%s: cxl_cmd_new_raw returned Null output\n", ++ cxl_memdev_get_devname(memdev)); ++ return -ENOMEM; ++ } ++ ++ query = cmd->query_cmd; ++ cinfo = &query->commands[cmd->query_idx]; ++ ++ /* update payload size */ ++ cinfo->size_in = CXL_MEM_COMMAND_ID_FBIST_CYCLECOUNT_SET_PAYLOAD_IN_SIZE; ++ if (cinfo->size_in > 0) { ++ cmd->input_payload = calloc(1, cinfo->size_in); ++ if (!cmd->input_payload) ++ return -ENOMEM; ++ cmd->send_cmd->in.payload = (u64)cmd->input_payload; ++ cmd->send_cmd->in.size = cinfo->size_in; ++ } ++ ++ fbist_cyclecount_set_in = (void *) cmd->send_cmd->in.payload; ++ ++ fbist_cyclecount_set_in->fbist_id = cpu_to_le32(fbist_id); ++ fbist_cyclecount_set_in->txg_nr = txg_nr; ++ fbist_cyclecount_set_in->cyclecount = cpu_to_le64(cyclecount); ++ rc = cxl_cmd_submit(cmd); ++ if (rc < 0) { ++ fprintf(stderr, "%s: cmd submission failed: %d (%s)\n", ++ cxl_memdev_get_devname(memdev), rc, strerror(-rc)); ++ goto out; ++ } ++ ++ rc = cxl_cmd_get_mbox_status(cmd); ++ if (rc != 0) { ++ fprintf(stderr, "%s: firmware status: %d\n", ++ cxl_memdev_get_devname(memdev), rc); ++ rc = -ENXIO; ++ goto out; ++ } ++ ++ if (cmd->send_cmd->id != CXL_MEM_COMMAND_ID_FBIST_CYCLECOUNT_SET) { ++ fprintf(stderr, "%s: invalid command id 0x%x (expecting 0x%x)\n", ++ cxl_memdev_get_devname(memdev), cmd->send_cmd->id, CXL_MEM_COMMAND_ID_FBIST_CYCLECOUNT_SET); ++ return -EINVAL; ++ } ++ fprintf(stdout, "command completed successfully\n"); ++ ++out: ++ cxl_cmd_unref(cmd); ++ return rc; ++ return 0; ++} ++ ++ ++#define CXL_MEM_COMMAND_ID_FBIST_RESET_SET CXL_MEM_COMMAND_ID_RAW ++#define CXL_MEM_COMMAND_ID_FBIST_RESET_SET_OPCODE 49673 ++#define CXL_MEM_COMMAND_ID_FBIST_RESET_SET_PAYLOAD_IN_SIZE 6 ++ ++struct cxl_mbox_fbist_reset_set_in { ++ __le32 fbist_id; ++ u8 txg0_reset; ++ u8 txg1_reset; ++} __attribute__((packed)); ++ ++ ++CXL_EXPORT int cxl_memdev_fbist_reset_set(struct cxl_memdev *memdev, ++ u32 fbist_id, u8 txg0_reset, u8 txg1_reset) ++{ ++ struct cxl_cmd *cmd; ++ struct cxl_mem_query_commands *query; ++ struct cxl_command_info *cinfo; ++ struct cxl_mbox_fbist_reset_set_in *fbist_reset_set_in; ++ int rc = 0; ++ ++ cmd = cxl_cmd_new_raw(memdev, CXL_MEM_COMMAND_ID_FBIST_RESET_SET_OPCODE); ++ if (!cmd) { ++ fprintf(stderr, "%s: cxl_cmd_new_raw returned Null output\n", ++ cxl_memdev_get_devname(memdev)); ++ return -ENOMEM; ++ } ++ ++ query = cmd->query_cmd; ++ cinfo = &query->commands[cmd->query_idx]; ++ ++ /* update payload size */ ++ cinfo->size_in = CXL_MEM_COMMAND_ID_FBIST_RESET_SET_PAYLOAD_IN_SIZE; ++ if (cinfo->size_in > 0) { ++ cmd->input_payload = calloc(1, cinfo->size_in); ++ if (!cmd->input_payload) ++ return -ENOMEM; ++ cmd->send_cmd->in.payload = (u64)cmd->input_payload; ++ cmd->send_cmd->in.size = cinfo->size_in; ++ } ++ ++ fbist_reset_set_in = (void *) cmd->send_cmd->in.payload; ++ ++ fbist_reset_set_in->fbist_id = cpu_to_le32(fbist_id); ++ fbist_reset_set_in->txg0_reset = txg0_reset; ++ fbist_reset_set_in->txg1_reset = txg1_reset; ++ rc = cxl_cmd_submit(cmd); ++ if (rc < 0) { ++ fprintf(stderr, "%s: cmd submission failed: %d (%s)\n", ++ cxl_memdev_get_devname(memdev), rc, strerror(-rc)); ++ goto out; ++ } ++ ++ rc = cxl_cmd_get_mbox_status(cmd); ++ if (rc != 0) { ++ fprintf(stderr, "%s: firmware status: %d\n", ++ cxl_memdev_get_devname(memdev), rc); ++ rc = -ENXIO; ++ goto out; ++ } ++ ++ if (cmd->send_cmd->id != CXL_MEM_COMMAND_ID_FBIST_RESET_SET) { ++ fprintf(stderr, "%s: invalid command id 0x%x (expecting 0x%x)\n", ++ cxl_memdev_get_devname(memdev), cmd->send_cmd->id, CXL_MEM_COMMAND_ID_FBIST_RESET_SET); ++ return -EINVAL; ++ } ++ fprintf(stdout, "command completed successfully\n"); ++ ++out: ++ cxl_cmd_unref(cmd); ++ return rc; ++ return 0; ++} ++ ++ ++#define CXL_MEM_COMMAND_ID_FBIST_RUN_SET CXL_MEM_COMMAND_ID_RAW ++#define CXL_MEM_COMMAND_ID_FBIST_RUN_SET_OPCODE 49674 ++#define CXL_MEM_COMMAND_ID_FBIST_RUN_SET_PAYLOAD_IN_SIZE 6 ++ ++struct cxl_mbox_fbist_run_set_in { ++ __le32 fbist_id; ++ u8 txg0_run; ++ u8 txg1_run; ++} __attribute__((packed)); ++ ++ ++CXL_EXPORT int cxl_memdev_fbist_run_set(struct cxl_memdev *memdev, ++ u32 fbist_id, u8 txg0_run, u8 txg1_run) ++{ ++ struct cxl_cmd *cmd; ++ struct cxl_mem_query_commands *query; ++ struct cxl_command_info *cinfo; ++ struct cxl_mbox_fbist_run_set_in *fbist_run_set_in; ++ int rc = 0; ++ ++ cmd = cxl_cmd_new_raw(memdev, CXL_MEM_COMMAND_ID_FBIST_RUN_SET_OPCODE); ++ if (!cmd) { ++ fprintf(stderr, "%s: cxl_cmd_new_raw returned Null output\n", ++ cxl_memdev_get_devname(memdev)); ++ return -ENOMEM; ++ } ++ ++ query = cmd->query_cmd; ++ cinfo = &query->commands[cmd->query_idx]; ++ ++ /* update payload size */ ++ cinfo->size_in = CXL_MEM_COMMAND_ID_FBIST_RUN_SET_PAYLOAD_IN_SIZE; ++ if (cinfo->size_in > 0) { ++ cmd->input_payload = calloc(1, cinfo->size_in); ++ if (!cmd->input_payload) ++ return -ENOMEM; ++ cmd->send_cmd->in.payload = (u64)cmd->input_payload; ++ cmd->send_cmd->in.size = cinfo->size_in; ++ } ++ ++ fbist_run_set_in = (void *) cmd->send_cmd->in.payload; ++ ++ fbist_run_set_in->fbist_id = cpu_to_le32(fbist_id); ++ fbist_run_set_in->txg0_run = txg0_run; ++ fbist_run_set_in->txg1_run = txg1_run; ++ rc = cxl_cmd_submit(cmd); ++ if (rc < 0) { ++ fprintf(stderr, "%s: cmd submission failed: %d (%s)\n", ++ cxl_memdev_get_devname(memdev), rc, strerror(-rc)); ++ goto out; ++ } ++ ++ rc = cxl_cmd_get_mbox_status(cmd); ++ if (rc != 0) { ++ fprintf(stderr, "%s: firmware status: %d\n", ++ cxl_memdev_get_devname(memdev), rc); ++ rc = -ENXIO; ++ goto out; ++ } ++ ++ if (cmd->send_cmd->id != CXL_MEM_COMMAND_ID_FBIST_RUN_SET) { ++ fprintf(stderr, "%s: invalid command id 0x%x (expecting 0x%x)\n", ++ cxl_memdev_get_devname(memdev), cmd->send_cmd->id, CXL_MEM_COMMAND_ID_FBIST_RUN_SET); ++ return -EINVAL; ++ } ++ fprintf(stdout, "command completed successfully\n"); ++ ++out: ++ cxl_cmd_unref(cmd); ++ return rc; ++ return 0; ++} ++ ++ ++#define CXL_MEM_COMMAND_ID_FBIST_RUN_GET CXL_MEM_COMMAND_ID_RAW ++#define CXL_MEM_COMMAND_ID_FBIST_RUN_GET_OPCODE 49675 ++#define CXL_MEM_COMMAND_ID_FBIST_RUN_GET_PAYLOAD_IN_SIZE 4 ++#define CXL_MEM_COMMAND_ID_FBIST_RUN_GET_PAYLOAD_OUT_SIZE 2 ++ ++struct cxl_mbox_fbist_run_get_in { ++ __le32 fbist_id; ++} __attribute__((packed)); ++ ++struct cxl_mbox_fbist_run_get_out { ++ u8 txg0_run; ++ u8 txg1_run; ++} __attribute__((packed)); ++ ++CXL_EXPORT int cxl_memdev_fbist_run_get(struct cxl_memdev *memdev, ++ u32 fbist_id) ++{ ++ struct cxl_cmd *cmd; ++ struct cxl_mem_query_commands *query; ++ struct cxl_command_info *cinfo; ++ struct cxl_mbox_fbist_run_get_in *fbist_run_get_in; ++ struct cxl_mbox_fbist_run_get_out *fbist_run_get_out; ++ int rc = 0; ++ ++ cmd = cxl_cmd_new_raw(memdev, CXL_MEM_COMMAND_ID_FBIST_RUN_GET_OPCODE); ++ if (!cmd) { ++ fprintf(stderr, "%s: cxl_cmd_new_raw returned Null output\n", ++ cxl_memdev_get_devname(memdev)); ++ return -ENOMEM; ++ } ++ ++ query = cmd->query_cmd; ++ cinfo = &query->commands[cmd->query_idx]; ++ ++ /* update payload size */ ++ cinfo->size_in = CXL_MEM_COMMAND_ID_FBIST_RUN_GET_PAYLOAD_IN_SIZE; ++ if (cinfo->size_in > 0) { ++ cmd->input_payload = calloc(1, cinfo->size_in); ++ if (!cmd->input_payload) ++ return -ENOMEM; ++ cmd->send_cmd->in.payload = (u64)cmd->input_payload; ++ cmd->send_cmd->in.size = cinfo->size_in; ++ } ++ ++ fbist_run_get_in = (void *) cmd->send_cmd->in.payload; ++ ++ fbist_run_get_in->fbist_id = cpu_to_le32(fbist_id); ++ rc = cxl_cmd_submit(cmd); ++ if (rc < 0) { ++ fprintf(stderr, "%s: cmd submission failed: %d (%s)\n", ++ cxl_memdev_get_devname(memdev), rc, strerror(-rc)); ++ goto out; ++ } ++ ++ rc = cxl_cmd_get_mbox_status(cmd); ++ if (rc != 0) { ++ fprintf(stderr, "%s: firmware status: %d\n", ++ cxl_memdev_get_devname(memdev), rc); ++ rc = -ENXIO; ++ goto out; ++ } ++ ++ if (cmd->send_cmd->id != CXL_MEM_COMMAND_ID_FBIST_RUN_GET) { ++ fprintf(stderr, "%s: invalid command id 0x%x (expecting 0x%x)\n", ++ cxl_memdev_get_devname(memdev), cmd->send_cmd->id, CXL_MEM_COMMAND_ID_FBIST_RUN_GET); ++ return -EINVAL; ++ } ++ ++ fbist_run_get_out = (void *)cmd->send_cmd->out.payload; ++ fprintf(stdout, "========================== read run flags of txg[0|1] ==========================\n"); ++ fprintf(stdout, "TXG0 Run: %x\n", fbist_run_get_out->txg0_run); ++ fprintf(stdout, "TXG1 Run: %x\n", fbist_run_get_out->txg1_run); ++ ++out: ++ cxl_cmd_unref(cmd); ++ return rc; ++ return 0; ++} ++ ++ ++#define CXL_MEM_COMMAND_ID_FBIST_XFER_REM_CNT_GET CXL_MEM_COMMAND_ID_RAW ++#define CXL_MEM_COMMAND_ID_FBIST_XFER_REM_CNT_GET_OPCODE 49680 ++#define CXL_MEM_COMMAND_ID_FBIST_XFER_REM_CNT_GET_PAYLOAD_IN_SIZE 5 ++#define CXL_MEM_COMMAND_ID_FBIST_XFER_REM_CNT_GET_PAYLOAD_OUT_SIZE 2 ++ ++struct cxl_mbox_fbist_xfer_rem_cnt_get_in { ++ __le32 fbist_id; ++ u8 thread_nr; ++} __attribute__((packed)); ++ ++struct cxl_mbox_fbist_xfer_rem_cnt_get_out { ++ __le16 xfer_rem; ++} __attribute__((packed)); ++ ++CXL_EXPORT int cxl_memdev_fbist_xfer_rem_cnt_get(struct cxl_memdev *memdev, ++ u32 fbist_id, u8 thread_nr) ++{ ++ struct cxl_cmd *cmd; ++ struct cxl_mem_query_commands *query; ++ struct cxl_command_info *cinfo; ++ struct cxl_mbox_fbist_xfer_rem_cnt_get_in *fbist_xfer_rem_cnt_get_in; ++ struct cxl_mbox_fbist_xfer_rem_cnt_get_out *fbist_xfer_rem_cnt_get_out; ++ int rc = 0; ++ ++ cmd = cxl_cmd_new_raw(memdev, CXL_MEM_COMMAND_ID_FBIST_XFER_REM_CNT_GET_OPCODE); ++ if (!cmd) { ++ fprintf(stderr, "%s: cxl_cmd_new_raw returned Null output\n", ++ cxl_memdev_get_devname(memdev)); ++ return -ENOMEM; ++ } ++ ++ query = cmd->query_cmd; ++ cinfo = &query->commands[cmd->query_idx]; ++ ++ /* update payload size */ ++ cinfo->size_in = CXL_MEM_COMMAND_ID_FBIST_XFER_REM_CNT_GET_PAYLOAD_IN_SIZE; ++ if (cinfo->size_in > 0) { ++ cmd->input_payload = calloc(1, cinfo->size_in); ++ if (!cmd->input_payload) ++ return -ENOMEM; ++ cmd->send_cmd->in.payload = (u64)cmd->input_payload; ++ cmd->send_cmd->in.size = cinfo->size_in; ++ } ++ ++ fbist_xfer_rem_cnt_get_in = (void *) cmd->send_cmd->in.payload; ++ ++ fbist_xfer_rem_cnt_get_in->fbist_id = cpu_to_le32(fbist_id); ++ fbist_xfer_rem_cnt_get_in->thread_nr = thread_nr; ++ rc = cxl_cmd_submit(cmd); ++ if (rc < 0) { ++ fprintf(stderr, "%s: cmd submission failed: %d (%s)\n", ++ cxl_memdev_get_devname(memdev), rc, strerror(-rc)); ++ goto out; ++ } ++ ++ rc = cxl_cmd_get_mbox_status(cmd); ++ if (rc != 0) { ++ fprintf(stderr, "%s: firmware status: %d\n", ++ cxl_memdev_get_devname(memdev), rc); ++ rc = -ENXIO; ++ goto out; ++ } ++ ++ if (cmd->send_cmd->id != CXL_MEM_COMMAND_ID_FBIST_XFER_REM_CNT_GET) { ++ fprintf(stderr, "%s: invalid command id 0x%x (expecting 0x%x)\n", ++ cxl_memdev_get_devname(memdev), cmd->send_cmd->id, CXL_MEM_COMMAND_ID_FBIST_XFER_REM_CNT_GET); ++ return -EINVAL; ++ } ++ ++ fbist_xfer_rem_cnt_get_out = (void *)cmd->send_cmd->out.payload; ++ fprintf(stdout, "================== read a thread's remaining transfer counts ===================\n"); ++ fprintf(stdout, "XFER Remaining: %x\n", le16_to_cpu(fbist_xfer_rem_cnt_get_out->xfer_rem)); ++ ++out: ++ cxl_cmd_unref(cmd); ++ return rc; ++ return 0; ++} ++ ++ ++#define CXL_MEM_COMMAND_ID_FBIST_LAST_EXP_READ_DATA_GET CXL_MEM_COMMAND_ID_RAW ++#define CXL_MEM_COMMAND_ID_FBIST_LAST_EXP_READ_DATA_GET_OPCODE 49681 ++#define CXL_MEM_COMMAND_ID_FBIST_LAST_EXP_READ_DATA_GET_PAYLOAD_IN_SIZE 4 ++#define CXL_MEM_COMMAND_ID_FBIST_LAST_EXP_READ_DATA_GET_PAYLOAD_OUT_SIZE 128 ++ ++struct cxl_mbox_fbist_last_exp_read_data_get_in { ++ __le32 fbist_id; ++} __attribute__((packed)); ++ ++struct cxl_mbox_fbist_last_exp_read_data_get_out { ++ __le32 last_rd_data[16]; ++ __le32 exp_rd_data[16]; ++} __attribute__((packed)); ++ ++CXL_EXPORT int cxl_memdev_fbist_last_exp_read_data_get(struct cxl_memdev *memdev, ++ u32 fbist_id) ++{ ++ struct cxl_cmd *cmd; ++ struct cxl_mem_query_commands *query; ++ struct cxl_command_info *cinfo; ++ struct cxl_mbox_fbist_last_exp_read_data_get_in *fbist_last_exp_read_data_get_in; ++ struct cxl_mbox_fbist_last_exp_read_data_get_out *fbist_last_exp_read_data_get_out; ++ int rc = 0; ++ ++ cmd = cxl_cmd_new_raw(memdev, CXL_MEM_COMMAND_ID_FBIST_LAST_EXP_READ_DATA_GET_OPCODE); ++ if (!cmd) { ++ fprintf(stderr, "%s: cxl_cmd_new_raw returned Null output\n", ++ cxl_memdev_get_devname(memdev)); ++ return -ENOMEM; ++ } ++ ++ query = cmd->query_cmd; ++ cinfo = &query->commands[cmd->query_idx]; ++ ++ /* update payload size */ ++ cinfo->size_in = CXL_MEM_COMMAND_ID_FBIST_LAST_EXP_READ_DATA_GET_PAYLOAD_IN_SIZE; ++ if (cinfo->size_in > 0) { ++ cmd->input_payload = calloc(1, cinfo->size_in); ++ if (!cmd->input_payload) ++ return -ENOMEM; ++ cmd->send_cmd->in.payload = (u64)cmd->input_payload; ++ cmd->send_cmd->in.size = cinfo->size_in; ++ } ++ ++ fbist_last_exp_read_data_get_in = (void *) cmd->send_cmd->in.payload; ++ ++ fbist_last_exp_read_data_get_in->fbist_id = cpu_to_le32(fbist_id); ++ rc = cxl_cmd_submit(cmd); ++ if (rc < 0) { ++ fprintf(stderr, "%s: cmd submission failed: %d (%s)\n", ++ cxl_memdev_get_devname(memdev), rc, strerror(-rc)); ++ goto out; ++ } ++ ++ rc = cxl_cmd_get_mbox_status(cmd); ++ if (rc != 0) { ++ fprintf(stderr, "%s: firmware status: %d\n", ++ cxl_memdev_get_devname(memdev), rc); ++ rc = -ENXIO; ++ goto out; ++ } ++ ++ if (cmd->send_cmd->id != CXL_MEM_COMMAND_ID_FBIST_LAST_EXP_READ_DATA_GET) { ++ fprintf(stderr, "%s: invalid command id 0x%x (expecting 0x%x)\n", ++ cxl_memdev_get_devname(memdev), cmd->send_cmd->id, CXL_MEM_COMMAND_ID_FBIST_LAST_EXP_READ_DATA_GET); ++ return -EINVAL; ++ } ++ ++ fbist_last_exp_read_data_get_out = (void *)cmd->send_cmd->out.payload; ++ fprintf(stdout, "========================= reads last and expected data =========================\n"); ++ fprintf(stdout, "last_rd_data: "); ++ for (int i = 0; i < 16; i++) { ++ fprintf(stdout, "last_rd_data[%d]: %x\n", i, le32_to_cpu(fbist_last_exp_read_data_get_out->last_rd_data[i])); ++ } ++ fprintf(stdout, "\n"); ++ fprintf(stdout, "exp_rd_data: "); ++ for (int i = 0; i < 16; i++) { ++ fprintf(stdout, "exp_rd_data[%d]: %x\n", i, le32_to_cpu(fbist_last_exp_read_data_get_out->exp_rd_data[i])); ++ } ++ fprintf(stdout, "\n"); ++ ++out: ++ cxl_cmd_unref(cmd); ++ return rc; ++ return 0; ++} ++ ++#define CXL_MEM_COMMAND_ID_FBIST_CURR_CYCLE_CNT_GET CXL_MEM_COMMAND_ID_RAW ++#define CXL_MEM_COMMAND_ID_FBIST_CURR_CYCLE_CNT_GET_OPCODE 49682 ++#define CXL_MEM_COMMAND_ID_FBIST_CURR_CYCLE_CNT_GET_PAYLOAD_IN_SIZE 5 ++#define CXL_MEM_COMMAND_ID_FBIST_CURR_CYCLE_CNT_GET_PAYLOAD_OUT_SIZE 8 ++ ++struct cxl_mbox_fbist_curr_cycle_cnt_get_in { ++ __le32 fbist_id; ++ u8 txg_nr; ++} __attribute__((packed)); ++ ++struct cxl_mbox_fbist_curr_cycle_cnt_get_out { ++ __le64 curr_cycle_cnt; ++} __attribute__((packed)); ++ ++CXL_EXPORT int cxl_memdev_fbist_curr_cycle_cnt_get(struct cxl_memdev *memdev, ++ u32 fbist_id, u8 txg_nr) ++{ ++ struct cxl_cmd *cmd; ++ struct cxl_mem_query_commands *query; ++ struct cxl_command_info *cinfo; ++ struct cxl_mbox_fbist_curr_cycle_cnt_get_in *fbist_curr_cycle_cnt_get_in; ++ struct cxl_mbox_fbist_curr_cycle_cnt_get_out *fbist_curr_cycle_cnt_get_out; ++ int rc = 0; ++ ++ cmd = cxl_cmd_new_raw(memdev, CXL_MEM_COMMAND_ID_FBIST_CURR_CYCLE_CNT_GET_OPCODE); ++ if (!cmd) { ++ fprintf(stderr, "%s: cxl_cmd_new_raw returned Null output\n", ++ cxl_memdev_get_devname(memdev)); ++ return -ENOMEM; ++ } ++ ++ query = cmd->query_cmd; ++ cinfo = &query->commands[cmd->query_idx]; ++ ++ /* update payload size */ ++ cinfo->size_in = CXL_MEM_COMMAND_ID_FBIST_CURR_CYCLE_CNT_GET_PAYLOAD_IN_SIZE; ++ if (cinfo->size_in > 0) { ++ cmd->input_payload = calloc(1, cinfo->size_in); ++ if (!cmd->input_payload) ++ return -ENOMEM; ++ cmd->send_cmd->in.payload = (u64)cmd->input_payload; ++ cmd->send_cmd->in.size = cinfo->size_in; ++ } ++ ++ fbist_curr_cycle_cnt_get_in = (void *) cmd->send_cmd->in.payload; ++ ++ fbist_curr_cycle_cnt_get_in->fbist_id = cpu_to_le32(fbist_id); ++ fbist_curr_cycle_cnt_get_in->txg_nr = txg_nr; ++ rc = cxl_cmd_submit(cmd); ++ if (rc < 0) { ++ fprintf(stderr, "%s: cmd submission failed: %d (%s)\n", ++ cxl_memdev_get_devname(memdev), rc, strerror(-rc)); ++ goto out; ++ } ++ ++ rc = cxl_cmd_get_mbox_status(cmd); ++ if (rc != 0) { ++ fprintf(stderr, "%s: firmware status: %d\n", ++ cxl_memdev_get_devname(memdev), rc); ++ rc = -ENXIO; ++ goto out; ++ } ++ ++ if (cmd->send_cmd->id != CXL_MEM_COMMAND_ID_FBIST_CURR_CYCLE_CNT_GET) { ++ fprintf(stderr, "%s: invalid command id 0x%x (expecting 0x%x)\n", ++ cxl_memdev_get_devname(memdev), cmd->send_cmd->id, CXL_MEM_COMMAND_ID_FBIST_CURR_CYCLE_CNT_GET); ++ return -EINVAL; ++ } ++ ++ fbist_curr_cycle_cnt_get_out = (void *)cmd->send_cmd->out.payload; ++ fprintf(stdout, "======================= read a txg's current cycle count =======================\n"); ++ fprintf(stdout, "Current Cycle Count: %lx\n", le64_to_cpu(fbist_curr_cycle_cnt_get_out->curr_cycle_cnt)); ++ ++out: ++ cxl_cmd_unref(cmd); ++ return rc; ++ return 0; ++} ++ ++ ++#define CXL_MEM_COMMAND_ID_FBIST_THREAD_STATUS_GET CXL_MEM_COMMAND_ID_RAW ++#define CXL_MEM_COMMAND_ID_FBIST_THREAD_STATUS_GET_OPCODE 49683 ++#define CXL_MEM_COMMAND_ID_FBIST_THREAD_STATUS_GET_PAYLOAD_IN_SIZE 6 ++#define CXL_MEM_COMMAND_ID_FBIST_THREAD_STATUS_GET_PAYLOAD_OUT_SIZE 4 ++ ++struct cxl_mbox_fbist_thread_status_get_in { ++ __le32 fbist_id; ++ u8 txg_nr; ++ u8 thread_nr; ++} __attribute__((packed)); ++ ++struct cxl_mbox_fbist_thread_status_get_out { ++ u8 thread_state; ++ u8 rsvd; ++ __le16 curr_thread_desc_index; ++} __attribute__((packed)); ++ ++CXL_EXPORT int cxl_memdev_fbist_thread_status_get(struct cxl_memdev *memdev, ++ u32 fbist_id, u8 txg_nr, u8 thread_nr) ++{ ++ struct cxl_cmd *cmd; ++ struct cxl_mem_query_commands *query; ++ struct cxl_command_info *cinfo; ++ struct cxl_mbox_fbist_thread_status_get_in *fbist_thread_status_get_in; ++ struct cxl_mbox_fbist_thread_status_get_out *fbist_thread_status_get_out; ++ int rc = 0; ++ ++ cmd = cxl_cmd_new_raw(memdev, CXL_MEM_COMMAND_ID_FBIST_THREAD_STATUS_GET_OPCODE); ++ if (!cmd) { ++ fprintf(stderr, "%s: cxl_cmd_new_raw returned Null output\n", ++ cxl_memdev_get_devname(memdev)); ++ return -ENOMEM; ++ } ++ ++ query = cmd->query_cmd; ++ cinfo = &query->commands[cmd->query_idx]; ++ ++ /* update payload size */ ++ cinfo->size_in = CXL_MEM_COMMAND_ID_FBIST_THREAD_STATUS_GET_PAYLOAD_IN_SIZE; ++ if (cinfo->size_in > 0) { ++ cmd->input_payload = calloc(1, cinfo->size_in); ++ if (!cmd->input_payload) ++ return -ENOMEM; ++ cmd->send_cmd->in.payload = (u64)cmd->input_payload; ++ cmd->send_cmd->in.size = cinfo->size_in; ++ } ++ ++ fbist_thread_status_get_in = (void *) cmd->send_cmd->in.payload; ++ ++ fbist_thread_status_get_in->fbist_id = cpu_to_le32(fbist_id); ++ fbist_thread_status_get_in->txg_nr = txg_nr; ++ fbist_thread_status_get_in->thread_nr = thread_nr; ++ rc = cxl_cmd_submit(cmd); ++ if (rc < 0) { ++ fprintf(stderr, "%s: cmd submission failed: %d (%s)\n", ++ cxl_memdev_get_devname(memdev), rc, strerror(-rc)); ++ goto out; ++ } ++ ++ rc = cxl_cmd_get_mbox_status(cmd); ++ if (rc != 0) { ++ fprintf(stderr, "%s: firmware status: %d\n", ++ cxl_memdev_get_devname(memdev), rc); ++ rc = -ENXIO; ++ goto out; ++ } ++ ++ if (cmd->send_cmd->id != CXL_MEM_COMMAND_ID_FBIST_THREAD_STATUS_GET) { ++ fprintf(stderr, "%s: invalid command id 0x%x (expecting 0x%x)\n", ++ cxl_memdev_get_devname(memdev), cmd->send_cmd->id, CXL_MEM_COMMAND_ID_FBIST_THREAD_STATUS_GET); ++ return -EINVAL; ++ } ++ ++ fbist_thread_status_get_out = (void *)cmd->send_cmd->out.payload; ++ fprintf(stdout, "========================== read a txg's thread status ==========================\n"); ++ fprintf(stdout, "Thread State: %x\n", fbist_thread_status_get_out->thread_state); ++ fprintf(stdout, "curr_thread_desc_index: %x\n", le16_to_cpu(fbist_thread_status_get_out->curr_thread_desc_index)); ++ ++out: ++ cxl_cmd_unref(cmd); ++ return rc; ++ return 0; ++} ++ ++ ++#define CXL_MEM_COMMAND_ID_FBIST_THREAD_TRANS_CNT_GET CXL_MEM_COMMAND_ID_RAW ++#define CXL_MEM_COMMAND_ID_FBIST_THREAD_TRANS_CNT_GET_OPCODE 49684 ++#define CXL_MEM_COMMAND_ID_FBIST_THREAD_TRANS_CNT_GET_PAYLOAD_IN_SIZE 6 ++#define CXL_MEM_COMMAND_ID_FBIST_THREAD_TRANS_CNT_GET_PAYLOAD_OUT_SIZE 8 ++ ++struct cxl_mbox_fbist_thread_trans_cnt_get_in { ++ __le32 fbist_id; ++ u8 txg_nr; ++ u8 thread_nr; ++} __attribute__((packed)); ++ ++struct cxl_mbox_fbist_thread_trans_cnt_get_out { ++ __le64 transaction_cnt; ++} __attribute__((packed)); ++ ++CXL_EXPORT int cxl_memdev_fbist_thread_trans_cnt_get(struct cxl_memdev *memdev, ++ u32 fbist_id, u8 txg_nr, u8 thread_nr) ++{ ++ struct cxl_cmd *cmd; ++ struct cxl_mem_query_commands *query; ++ struct cxl_command_info *cinfo; ++ struct cxl_mbox_fbist_thread_trans_cnt_get_in *fbist_thread_trans_cnt_get_in; ++ struct cxl_mbox_fbist_thread_trans_cnt_get_out *fbist_thread_trans_cnt_get_out; ++ int rc = 0; ++ ++ cmd = cxl_cmd_new_raw(memdev, CXL_MEM_COMMAND_ID_FBIST_THREAD_TRANS_CNT_GET_OPCODE); ++ if (!cmd) { ++ fprintf(stderr, "%s: cxl_cmd_new_raw returned Null output\n", ++ cxl_memdev_get_devname(memdev)); ++ return -ENOMEM; ++ } ++ ++ query = cmd->query_cmd; ++ cinfo = &query->commands[cmd->query_idx]; ++ ++ /* update payload size */ ++ cinfo->size_in = CXL_MEM_COMMAND_ID_FBIST_THREAD_TRANS_CNT_GET_PAYLOAD_IN_SIZE; ++ if (cinfo->size_in > 0) { ++ cmd->input_payload = calloc(1, cinfo->size_in); ++ if (!cmd->input_payload) ++ return -ENOMEM; ++ cmd->send_cmd->in.payload = (u64)cmd->input_payload; ++ cmd->send_cmd->in.size = cinfo->size_in; ++ } ++ ++ fbist_thread_trans_cnt_get_in = (void *) cmd->send_cmd->in.payload; ++ ++ fbist_thread_trans_cnt_get_in->fbist_id = cpu_to_le32(fbist_id); ++ fbist_thread_trans_cnt_get_in->txg_nr = txg_nr; ++ fbist_thread_trans_cnt_get_in->thread_nr = thread_nr; ++ rc = cxl_cmd_submit(cmd); ++ if (rc < 0) { ++ fprintf(stderr, "%s: cmd submission failed: %d (%s)\n", ++ cxl_memdev_get_devname(memdev), rc, strerror(-rc)); ++ goto out; ++ } ++ ++ rc = cxl_cmd_get_mbox_status(cmd); ++ if (rc != 0) { ++ fprintf(stderr, "%s: firmware status: %d\n", ++ cxl_memdev_get_devname(memdev), rc); ++ rc = -ENXIO; ++ goto out; ++ } ++ ++ if (cmd->send_cmd->id != CXL_MEM_COMMAND_ID_FBIST_THREAD_TRANS_CNT_GET) { ++ fprintf(stderr, "%s: invalid command id 0x%x (expecting 0x%x)\n", ++ cxl_memdev_get_devname(memdev), cmd->send_cmd->id, CXL_MEM_COMMAND_ID_FBIST_THREAD_TRANS_CNT_GET); ++ return -EINVAL; ++ } ++ ++ fbist_thread_trans_cnt_get_out = (void *)cmd->send_cmd->out.payload; ++ fprintf(stdout, "==================== read a txg's thread transaction count =====================\n"); ++ fprintf(stdout, "Transaction Count: %lx\n", le64_to_cpu(fbist_thread_trans_cnt_get_out->transaction_cnt)); ++ ++out: ++ cxl_cmd_unref(cmd); ++ return rc; ++ return 0; ++} ++ ++ ++#define CXL_MEM_COMMAND_ID_FBIST_THREAD_BANDWIDTH_GET CXL_MEM_COMMAND_ID_RAW ++#define CXL_MEM_COMMAND_ID_FBIST_THREAD_BANDWIDTH_GET_OPCODE 49685 ++#define CXL_MEM_COMMAND_ID_FBIST_THREAD_BANDWIDTH_GET_PAYLOAD_IN_SIZE 6 ++#define CXL_MEM_COMMAND_ID_FBIST_THREAD_BANDWIDTH_GET_PAYLOAD_OUT_SIZE 8 ++ ++struct cxl_mbox_fbist_thread_bandwidth_get_in { ++ __le32 fbist_id; ++ u8 txg_nr; ++ u8 thread_nr; ++} __attribute__((packed)); ++ ++struct cxl_mbox_fbist_thread_bandwidth_get_out { ++ __le32 read_bw_cnt; ++ __le32 write_bw_cnt; ++} __attribute__((packed)); ++ ++CXL_EXPORT int cxl_memdev_fbist_thread_bandwidth_get(struct cxl_memdev *memdev, ++ u32 fbist_id, u8 txg_nr, u8 thread_nr) ++{ ++ struct cxl_cmd *cmd; ++ struct cxl_mem_query_commands *query; ++ struct cxl_command_info *cinfo; ++ struct cxl_mbox_fbist_thread_bandwidth_get_in *fbist_thread_bandwidth_get_in; ++ struct cxl_mbox_fbist_thread_bandwidth_get_out *fbist_thread_bandwidth_get_out; ++ int rc = 0; ++ ++ cmd = cxl_cmd_new_raw(memdev, CXL_MEM_COMMAND_ID_FBIST_THREAD_BANDWIDTH_GET_OPCODE); ++ if (!cmd) { ++ fprintf(stderr, "%s: cxl_cmd_new_raw returned Null output\n", ++ cxl_memdev_get_devname(memdev)); ++ return -ENOMEM; ++ } ++ ++ query = cmd->query_cmd; ++ cinfo = &query->commands[cmd->query_idx]; ++ ++ /* update payload size */ ++ cinfo->size_in = CXL_MEM_COMMAND_ID_FBIST_THREAD_BANDWIDTH_GET_PAYLOAD_IN_SIZE; ++ if (cinfo->size_in > 0) { ++ cmd->input_payload = calloc(1, cinfo->size_in); ++ if (!cmd->input_payload) ++ return -ENOMEM; ++ cmd->send_cmd->in.payload = (u64)cmd->input_payload; ++ cmd->send_cmd->in.size = cinfo->size_in; ++ } ++ ++ fbist_thread_bandwidth_get_in = (void *) cmd->send_cmd->in.payload; ++ ++ fbist_thread_bandwidth_get_in->fbist_id = cpu_to_le32(fbist_id); ++ fbist_thread_bandwidth_get_in->txg_nr = txg_nr; ++ fbist_thread_bandwidth_get_in->thread_nr = thread_nr; ++ rc = cxl_cmd_submit(cmd); ++ if (rc < 0) { ++ fprintf(stderr, "%s: cmd submission failed: %d (%s)\n", ++ cxl_memdev_get_devname(memdev), rc, strerror(-rc)); ++ goto out; ++ } ++ ++ rc = cxl_cmd_get_mbox_status(cmd); ++ if (rc != 0) { ++ fprintf(stderr, "%s: firmware status: %d\n", ++ cxl_memdev_get_devname(memdev), rc); ++ rc = -ENXIO; ++ goto out; ++ } ++ ++ if (cmd->send_cmd->id != CXL_MEM_COMMAND_ID_FBIST_THREAD_BANDWIDTH_GET) { ++ fprintf(stderr, "%s: invalid command id 0x%x (expecting 0x%x)\n", ++ cxl_memdev_get_devname(memdev), cmd->send_cmd->id, CXL_MEM_COMMAND_ID_FBIST_THREAD_BANDWIDTH_GET); ++ return -EINVAL; ++ } ++ ++ fbist_thread_bandwidth_get_out = (void *)cmd->send_cmd->out.payload; ++ fprintf(stdout, "================= read a txg's thread rd/wr bandwidth counters =================\n"); ++ fprintf(stdout, "Read BW Count: %x\n", le32_to_cpu(fbist_thread_bandwidth_get_out->read_bw_cnt)); ++ fprintf(stdout, "Write BW Count: %x\n", le32_to_cpu(fbist_thread_bandwidth_get_out->write_bw_cnt)); ++ ++out: ++ cxl_cmd_unref(cmd); ++ return rc; ++ return 0; ++} ++ ++ ++#define CXL_MEM_COMMAND_ID_FBIST_THREAD_LATENCY_GET CXL_MEM_COMMAND_ID_RAW ++#define CXL_MEM_COMMAND_ID_FBIST_THREAD_LATENCY_GET_OPCODE 49686 ++#define CXL_MEM_COMMAND_ID_FBIST_THREAD_LATENCY_GET_PAYLOAD_IN_SIZE 6 ++#define CXL_MEM_COMMAND_ID_FBIST_THREAD_LATENCY_GET_PAYLOAD_OUT_SIZE 8 ++ ++struct cxl_mbox_fbist_thread_latency_get_in { ++ __le32 fbist_id; ++ u8 txg_nr; ++ u8 thread_nr; ++} __attribute__((packed)); ++ ++struct cxl_mbox_fbist_thread_latency_get_out { ++ __le32 read_latency_cnt; ++ __le32 write_latency_cnt; ++} __attribute__((packed)); ++ ++CXL_EXPORT int cxl_memdev_fbist_thread_latency_get(struct cxl_memdev *memdev, ++ u32 fbist_id, u8 txg_nr, u8 thread_nr) ++{ ++ struct cxl_cmd *cmd; ++ struct cxl_mem_query_commands *query; ++ struct cxl_command_info *cinfo; ++ struct cxl_mbox_fbist_thread_latency_get_in *fbist_thread_latency_get_in; ++ struct cxl_mbox_fbist_thread_latency_get_out *fbist_thread_latency_get_out; ++ int rc = 0; ++ ++ cmd = cxl_cmd_new_raw(memdev, CXL_MEM_COMMAND_ID_FBIST_THREAD_LATENCY_GET_OPCODE); ++ if (!cmd) { ++ fprintf(stderr, "%s: cxl_cmd_new_raw returned Null output\n", ++ cxl_memdev_get_devname(memdev)); ++ return -ENOMEM; ++ } ++ ++ query = cmd->query_cmd; ++ cinfo = &query->commands[cmd->query_idx]; ++ ++ /* update payload size */ ++ cinfo->size_in = CXL_MEM_COMMAND_ID_FBIST_THREAD_LATENCY_GET_PAYLOAD_IN_SIZE; ++ if (cinfo->size_in > 0) { ++ cmd->input_payload = calloc(1, cinfo->size_in); ++ if (!cmd->input_payload) ++ return -ENOMEM; ++ cmd->send_cmd->in.payload = (u64)cmd->input_payload; ++ cmd->send_cmd->in.size = cinfo->size_in; ++ } ++ ++ fbist_thread_latency_get_in = (void *) cmd->send_cmd->in.payload; ++ ++ fbist_thread_latency_get_in->fbist_id = cpu_to_le32(fbist_id); ++ fbist_thread_latency_get_in->txg_nr = txg_nr; ++ fbist_thread_latency_get_in->thread_nr = thread_nr; ++ rc = cxl_cmd_submit(cmd); ++ if (rc < 0) { ++ fprintf(stderr, "%s: cmd submission failed: %d (%s)\n", ++ cxl_memdev_get_devname(memdev), rc, strerror(-rc)); ++ goto out; ++ } ++ ++ rc = cxl_cmd_get_mbox_status(cmd); ++ if (rc != 0) { ++ fprintf(stderr, "%s: firmware status: %d\n", ++ cxl_memdev_get_devname(memdev), rc); ++ rc = -ENXIO; ++ goto out; ++ } ++ ++ if (cmd->send_cmd->id != CXL_MEM_COMMAND_ID_FBIST_THREAD_LATENCY_GET) { ++ fprintf(stderr, "%s: invalid command id 0x%x (expecting 0x%x)\n", ++ cxl_memdev_get_devname(memdev), cmd->send_cmd->id, CXL_MEM_COMMAND_ID_FBIST_THREAD_LATENCY_GET); ++ return -EINVAL; ++ } ++ ++ fbist_thread_latency_get_out = (void *)cmd->send_cmd->out.payload; ++ fprintf(stdout, "================== read a txg's thread rd/wr latency counters ==================\n"); ++ fprintf(stdout, "Read Latency Count: %x\n", le32_to_cpu(fbist_thread_latency_get_out->read_latency_cnt)); ++ fprintf(stdout, "Write Latency Count: %x\n", le32_to_cpu(fbist_thread_latency_get_out->write_latency_cnt)); ++ ++out: ++ cxl_cmd_unref(cmd); ++ return rc; ++ return 0; ++} ++ ++ ++#define CXL_MEM_COMMAND_ID_FBIST_THREAD_PERF_MON_SET CXL_MEM_COMMAND_ID_RAW ++#define CXL_MEM_COMMAND_ID_FBIST_THREAD_PERF_MON_SET_OPCODE 49687 ++#define CXL_MEM_COMMAND_ID_FBIST_THREAD_PERF_MON_SET_PAYLOAD_IN_SIZE 10 ++ ++struct cxl_mbox_fbist_thread_perf_mon_set_in { ++ __le32 fbist_id; ++ u8 txg_nr; ++ u8 thread_nr; ++ u8 pmon_preset_en; ++ u8 pmon_clear_en; ++ u8 pmon_rollover; ++ u8 pmon_thread_lclk; ++} __attribute__((packed)); ++ ++ ++CXL_EXPORT int cxl_memdev_fbist_thread_perf_mon_set(struct cxl_memdev *memdev, ++ u32 fbist_id, u8 txg_nr, u8 thread_nr, u8 pmon_preset_en, u8 pmon_clear_en, ++ u8 pmon_rollover, u8 pmon_thread_lclk) ++{ ++ struct cxl_cmd *cmd; ++ struct cxl_mem_query_commands *query; ++ struct cxl_command_info *cinfo; ++ struct cxl_mbox_fbist_thread_perf_mon_set_in *fbist_thread_perf_mon_set_in; ++ int rc = 0; ++ ++ cmd = cxl_cmd_new_raw(memdev, CXL_MEM_COMMAND_ID_FBIST_THREAD_PERF_MON_SET_OPCODE); ++ if (!cmd) { ++ fprintf(stderr, "%s: cxl_cmd_new_raw returned Null output\n", ++ cxl_memdev_get_devname(memdev)); ++ return -ENOMEM; ++ } ++ ++ query = cmd->query_cmd; ++ cinfo = &query->commands[cmd->query_idx]; ++ ++ /* update payload size */ ++ cinfo->size_in = CXL_MEM_COMMAND_ID_FBIST_THREAD_PERF_MON_SET_PAYLOAD_IN_SIZE; ++ if (cinfo->size_in > 0) { ++ cmd->input_payload = calloc(1, cinfo->size_in); ++ if (!cmd->input_payload) ++ return -ENOMEM; ++ cmd->send_cmd->in.payload = (u64)cmd->input_payload; ++ cmd->send_cmd->in.size = cinfo->size_in; ++ } ++ ++ fbist_thread_perf_mon_set_in = (void *) cmd->send_cmd->in.payload; ++ ++ fbist_thread_perf_mon_set_in->fbist_id = cpu_to_le32(fbist_id); ++ fbist_thread_perf_mon_set_in->txg_nr = txg_nr; ++ fbist_thread_perf_mon_set_in->thread_nr = thread_nr; ++ fbist_thread_perf_mon_set_in->pmon_preset_en = pmon_preset_en; ++ fbist_thread_perf_mon_set_in->pmon_clear_en = pmon_clear_en; ++ fbist_thread_perf_mon_set_in->pmon_rollover = pmon_rollover; ++ fbist_thread_perf_mon_set_in->pmon_thread_lclk = pmon_thread_lclk; ++ rc = cxl_cmd_submit(cmd); ++ if (rc < 0) { ++ fprintf(stderr, "%s: cmd submission failed: %d (%s)\n", ++ cxl_memdev_get_devname(memdev), rc, strerror(-rc)); ++ goto out; ++ } ++ ++ rc = cxl_cmd_get_mbox_status(cmd); ++ if (rc != 0) { ++ fprintf(stderr, "%s: firmware status: %d\n", ++ cxl_memdev_get_devname(memdev), rc); ++ rc = -ENXIO; ++ goto out; ++ } ++ ++ if (cmd->send_cmd->id != CXL_MEM_COMMAND_ID_FBIST_THREAD_PERF_MON_SET) { ++ fprintf(stderr, "%s: invalid command id 0x%x (expecting 0x%x)\n", ++ cxl_memdev_get_devname(memdev), cmd->send_cmd->id, CXL_MEM_COMMAND_ID_FBIST_THREAD_PERF_MON_SET); ++ return -EINVAL; ++ } ++ fprintf(stdout, "Command completed successfully\n"); ++ ++out: ++ cxl_cmd_unref(cmd); ++ return rc; ++ return 0; ++} ++ ++ ++#define CXL_MEM_COMMAND_ID_FBIST_TOP_READ_STATUS0_GET CXL_MEM_COMMAND_ID_RAW ++#define CXL_MEM_COMMAND_ID_FBIST_TOP_READ_STATUS0_GET_OPCODE 49688 ++#define CXL_MEM_COMMAND_ID_FBIST_TOP_READ_STATUS0_GET_PAYLOAD_IN_SIZE 4 ++#define CXL_MEM_COMMAND_ID_FBIST_TOP_READ_STATUS0_GET_PAYLOAD_OUT_SIZE 3 ++ ++struct cxl_mbox_fbist_top_read_status0_get_in { ++ __le32 fbist_id; ++} __attribute__((packed)); ++ ++struct cxl_mbox_fbist_top_read_status0_get_out { ++ __le16 tag_id_err_idx; ++ u8 thread_err_idx; ++} __attribute__((packed)); ++ ++CXL_EXPORT int cxl_memdev_fbist_top_read_status0_get(struct cxl_memdev *memdev, ++ u32 fbist_id) ++{ ++ struct cxl_cmd *cmd; ++ struct cxl_mem_query_commands *query; ++ struct cxl_command_info *cinfo; ++ struct cxl_mbox_fbist_top_read_status0_get_in *fbist_top_read_status0_get_in; ++ struct cxl_mbox_fbist_top_read_status0_get_out *fbist_top_read_status0_get_out; ++ int rc = 0; ++ ++ cmd = cxl_cmd_new_raw(memdev, CXL_MEM_COMMAND_ID_FBIST_TOP_READ_STATUS0_GET_OPCODE); ++ if (!cmd) { ++ fprintf(stderr, "%s: cxl_cmd_new_raw returned Null output\n", ++ cxl_memdev_get_devname(memdev)); ++ return -ENOMEM; ++ } ++ ++ query = cmd->query_cmd; ++ cinfo = &query->commands[cmd->query_idx]; ++ ++ /* update payload size */ ++ cinfo->size_in = CXL_MEM_COMMAND_ID_FBIST_TOP_READ_STATUS0_GET_PAYLOAD_IN_SIZE; ++ if (cinfo->size_in > 0) { ++ cmd->input_payload = calloc(1, cinfo->size_in); ++ if (!cmd->input_payload) ++ return -ENOMEM; ++ cmd->send_cmd->in.payload = (u64)cmd->input_payload; ++ cmd->send_cmd->in.size = cinfo->size_in; ++ } ++ ++ fbist_top_read_status0_get_in = (void *) cmd->send_cmd->in.payload; ++ ++ fbist_top_read_status0_get_in->fbist_id = cpu_to_le32(fbist_id); ++ rc = cxl_cmd_submit(cmd); ++ if (rc < 0) { ++ fprintf(stderr, "%s: cmd submission failed: %d (%s)\n", ++ cxl_memdev_get_devname(memdev), rc, strerror(-rc)); ++ goto out; ++ } ++ ++ rc = cxl_cmd_get_mbox_status(cmd); ++ if (rc != 0) { ++ fprintf(stderr, "%s: firmware status: %d\n", ++ cxl_memdev_get_devname(memdev), rc); ++ rc = -ENXIO; ++ goto out; ++ } ++ ++ if (cmd->send_cmd->id != CXL_MEM_COMMAND_ID_FBIST_TOP_READ_STATUS0_GET) { ++ fprintf(stderr, "%s: invalid command id 0x%x (expecting 0x%x)\n", ++ cxl_memdev_get_devname(memdev), cmd->send_cmd->id, CXL_MEM_COMMAND_ID_FBIST_TOP_READ_STATUS0_GET); ++ return -EINVAL; ++ } ++ ++ fbist_top_read_status0_get_out = (void *)cmd->send_cmd->out.payload; ++ fprintf(stdout, "========================== read the top read status0 ===========================\n"); ++ fprintf(stdout, "tag_id_err_idx: %x\n", le16_to_cpu(fbist_top_read_status0_get_out->tag_id_err_idx)); ++ fprintf(stdout, "thread_err_idx: %x\n", fbist_top_read_status0_get_out->thread_err_idx); ++ ++out: ++ cxl_cmd_unref(cmd); ++ return rc; ++ return 0; ++} ++ ++ ++#define CXL_MEM_COMMAND_ID_FBIST_TOP_ERR_CNT_GET CXL_MEM_COMMAND_ID_RAW ++#define CXL_MEM_COMMAND_ID_FBIST_TOP_ERR_CNT_GET_OPCODE 49689 ++#define CXL_MEM_COMMAND_ID_FBIST_TOP_ERR_CNT_GET_PAYLOAD_IN_SIZE 4 ++#define CXL_MEM_COMMAND_ID_FBIST_TOP_ERR_CNT_GET_PAYLOAD_OUT_SIZE 12 ++ ++struct cxl_mbox_fbist_top_err_cnt_get_in { ++ __le32 fbist_id; ++} __attribute__((packed)); ++ ++struct cxl_mbox_fbist_top_err_cnt_get_out { ++ __le32 rdata_err_cnt; ++ __le32 rresp_err_cnt; ++ __le32 wresp_err_cnt; ++} __attribute__((packed)); ++ ++CXL_EXPORT int cxl_memdev_fbist_top_err_cnt_get(struct cxl_memdev *memdev, ++ u32 fbist_id) ++{ ++ struct cxl_cmd *cmd; ++ struct cxl_mem_query_commands *query; ++ struct cxl_command_info *cinfo; ++ struct cxl_mbox_fbist_top_err_cnt_get_in *fbist_top_err_cnt_get_in; ++ struct cxl_mbox_fbist_top_err_cnt_get_out *fbist_top_err_cnt_get_out; ++ int rc = 0; ++ ++ cmd = cxl_cmd_new_raw(memdev, CXL_MEM_COMMAND_ID_FBIST_TOP_ERR_CNT_GET_OPCODE); ++ if (!cmd) { ++ fprintf(stderr, "%s: cxl_cmd_new_raw returned Null output\n", ++ cxl_memdev_get_devname(memdev)); ++ return -ENOMEM; ++ } ++ ++ query = cmd->query_cmd; ++ cinfo = &query->commands[cmd->query_idx]; ++ ++ /* update payload size */ ++ cinfo->size_in = CXL_MEM_COMMAND_ID_FBIST_TOP_ERR_CNT_GET_PAYLOAD_IN_SIZE; ++ if (cinfo->size_in > 0) { ++ cmd->input_payload = calloc(1, cinfo->size_in); ++ if (!cmd->input_payload) ++ return -ENOMEM; ++ cmd->send_cmd->in.payload = (u64)cmd->input_payload; ++ cmd->send_cmd->in.size = cinfo->size_in; ++ } ++ ++ fbist_top_err_cnt_get_in = (void *) cmd->send_cmd->in.payload; ++ ++ fbist_top_err_cnt_get_in->fbist_id = cpu_to_le32(fbist_id); ++ rc = cxl_cmd_submit(cmd); ++ if (rc < 0) { ++ fprintf(stderr, "%s: cmd submission failed: %d (%s)\n", ++ cxl_memdev_get_devname(memdev), rc, strerror(-rc)); ++ goto out; ++ } ++ ++ rc = cxl_cmd_get_mbox_status(cmd); ++ if (rc != 0) { ++ fprintf(stderr, "%s: firmware status: %d\n", ++ cxl_memdev_get_devname(memdev), rc); ++ rc = -ENXIO; ++ goto out; ++ } ++ ++ if (cmd->send_cmd->id != CXL_MEM_COMMAND_ID_FBIST_TOP_ERR_CNT_GET) { ++ fprintf(stderr, "%s: invalid command id 0x%x (expecting 0x%x)\n", ++ cxl_memdev_get_devname(memdev), cmd->send_cmd->id, CXL_MEM_COMMAND_ID_FBIST_TOP_ERR_CNT_GET); ++ return -EINVAL; ++ } ++ ++ fbist_top_err_cnt_get_out = (void *)cmd->send_cmd->out.payload; ++ fprintf(stdout, "===== read read-dataframe, read-response and write-response error counters =====\n"); ++ fprintf(stdout, "Read Data Error Count: %x\n", le32_to_cpu(fbist_top_err_cnt_get_out->rdata_err_cnt)); ++ fprintf(stdout, "Read Response Error Count: %x\n", le32_to_cpu(fbist_top_err_cnt_get_out->rresp_err_cnt)); ++ fprintf(stdout, "Write Response Error Count: %x\n", le32_to_cpu(fbist_top_err_cnt_get_out->wresp_err_cnt)); ++ ++out: ++ cxl_cmd_unref(cmd); ++ return rc; ++ return 0; ++} ++ ++ ++#define CXL_MEM_COMMAND_ID_FBIST_LAST_READ_ADDR_GET CXL_MEM_COMMAND_ID_RAW ++#define CXL_MEM_COMMAND_ID_FBIST_LAST_READ_ADDR_GET_OPCODE 49690 ++#define CXL_MEM_COMMAND_ID_FBIST_LAST_READ_ADDR_GET_PAYLOAD_IN_SIZE 4 ++#define CXL_MEM_COMMAND_ID_FBIST_LAST_READ_ADDR_GET_PAYLOAD_OUT_SIZE 8 ++ ++struct cxl_mbox_fbist_last_read_addr_get_in { ++ __le32 fbist_id; ++} __attribute__((packed)); ++ ++struct cxl_mbox_fbist_last_read_addr_get_out { ++ __le64 last_read_addr; ++} __attribute__((packed)); ++ ++CXL_EXPORT int cxl_memdev_fbist_last_read_addr_get(struct cxl_memdev *memdev, ++ u32 fbist_id) ++{ ++ struct cxl_cmd *cmd; ++ struct cxl_mem_query_commands *query; ++ struct cxl_command_info *cinfo; ++ struct cxl_mbox_fbist_last_read_addr_get_in *fbist_last_read_addr_get_in; ++ struct cxl_mbox_fbist_last_read_addr_get_out *fbist_last_read_addr_get_out; ++ int rc = 0; ++ ++ cmd = cxl_cmd_new_raw(memdev, CXL_MEM_COMMAND_ID_FBIST_LAST_READ_ADDR_GET_OPCODE); ++ if (!cmd) { ++ fprintf(stderr, "%s: cxl_cmd_new_raw returned Null output\n", ++ cxl_memdev_get_devname(memdev)); ++ return -ENOMEM; ++ } ++ ++ query = cmd->query_cmd; ++ cinfo = &query->commands[cmd->query_idx]; ++ ++ /* update payload size */ ++ cinfo->size_in = CXL_MEM_COMMAND_ID_FBIST_LAST_READ_ADDR_GET_PAYLOAD_IN_SIZE; ++ if (cinfo->size_in > 0) { ++ cmd->input_payload = calloc(1, cinfo->size_in); ++ if (!cmd->input_payload) ++ return -ENOMEM; ++ cmd->send_cmd->in.payload = (u64)cmd->input_payload; ++ cmd->send_cmd->in.size = cinfo->size_in; ++ } ++ ++ fbist_last_read_addr_get_in = (void *) cmd->send_cmd->in.payload; ++ ++ fbist_last_read_addr_get_in->fbist_id = cpu_to_le32(fbist_id); ++ rc = cxl_cmd_submit(cmd); ++ if (rc < 0) { ++ fprintf(stderr, "%s: cmd submission failed: %d (%s)\n", ++ cxl_memdev_get_devname(memdev), rc, strerror(-rc)); ++ goto out; ++ } ++ ++ rc = cxl_cmd_get_mbox_status(cmd); ++ if (rc != 0) { ++ fprintf(stderr, "%s: firmware status: %d\n", ++ cxl_memdev_get_devname(memdev), rc); ++ rc = -ENXIO; ++ goto out; ++ } ++ ++ if (cmd->send_cmd->id != CXL_MEM_COMMAND_ID_FBIST_LAST_READ_ADDR_GET) { ++ fprintf(stderr, "%s: invalid command id 0x%x (expecting 0x%x)\n", ++ cxl_memdev_get_devname(memdev), cmd->send_cmd->id, CXL_MEM_COMMAND_ID_FBIST_LAST_READ_ADDR_GET); ++ return -EINVAL; ++ } ++ ++ fbist_last_read_addr_get_out = (void *)cmd->send_cmd->out.payload; ++ fprintf(stdout, "========================== read the last read address ==========================\n"); ++ fprintf(stdout, "last_read_addr: %lx\n", le64_to_cpu(fbist_last_read_addr_get_out->last_read_addr)); ++ ++out: ++ cxl_cmd_unref(cmd); ++ return rc; ++ return 0; ++} ++ ++#define CXL_MEM_COMMAND_ID_FBIST_TEST_SIMPLEDATA CXL_MEM_COMMAND_ID_RAW ++#define CXL_MEM_COMMAND_ID_FBIST_TEST_SIMPLEDATA_OPCODE 49712 ++#define CXL_MEM_COMMAND_ID_FBIST_TEST_SIMPLEDATA_PAYLOAD_IN_SIZE 24 ++ ++struct cxl_mbox_fbist_test_simpledata_in { ++ __le32 fbist_id; ++ u8 test_nr; ++ u8 rsvd[3]; ++ __le64 start_address; ++ __le64 num_bytes; ++} __attribute__((packed)); ++ ++ ++CXL_EXPORT int cxl_memdev_fbist_test_simpledata(struct cxl_memdev *memdev, ++ u32 fbist_id, u8 test_nr, u64 start_address, u64 num_bytes) ++{ ++ struct cxl_cmd *cmd; ++ struct cxl_mem_query_commands *query; ++ struct cxl_command_info *cinfo; ++ struct cxl_mbox_fbist_test_simpledata_in *fbist_test_simpledata_in; ++ int rc = 0; ++ ++ cmd = cxl_cmd_new_raw(memdev, CXL_MEM_COMMAND_ID_FBIST_TEST_SIMPLEDATA_OPCODE); ++ if (!cmd) { ++ fprintf(stderr, "%s: cxl_cmd_new_raw returned Null output\n", ++ cxl_memdev_get_devname(memdev)); ++ return -ENOMEM; ++ } ++ ++ query = cmd->query_cmd; ++ cinfo = &query->commands[cmd->query_idx]; ++ ++ /* update payload size */ ++ cinfo->size_in = CXL_MEM_COMMAND_ID_FBIST_TEST_SIMPLEDATA_PAYLOAD_IN_SIZE; ++ if (cinfo->size_in > 0) { ++ cmd->input_payload = calloc(1, cinfo->size_in); ++ if (!cmd->input_payload) ++ return -ENOMEM; ++ cmd->send_cmd->in.payload = (u64)cmd->input_payload; ++ cmd->send_cmd->in.size = cinfo->size_in; ++ } ++ ++ fbist_test_simpledata_in = (void *) cmd->send_cmd->in.payload; ++ ++ fbist_test_simpledata_in->fbist_id = cpu_to_le32(fbist_id); ++ fbist_test_simpledata_in->test_nr = test_nr; ++ fbist_test_simpledata_in->start_address = cpu_to_le64(start_address); ++ fbist_test_simpledata_in->num_bytes = cpu_to_le64(num_bytes); ++ rc = cxl_cmd_submit(cmd); ++ if (rc < 0) { ++ fprintf(stderr, "%s: cmd submission failed: %d (%s)\n", ++ cxl_memdev_get_devname(memdev), rc, strerror(-rc)); ++ goto out; ++ } ++ ++ rc = cxl_cmd_get_mbox_status(cmd); ++ if (rc != 0) { ++ fprintf(stderr, "%s: firmware status: %d\n", ++ cxl_memdev_get_devname(memdev), rc); ++ rc = -ENXIO; ++ goto out; ++ } ++ ++ if (cmd->send_cmd->id != CXL_MEM_COMMAND_ID_FBIST_TEST_SIMPLEDATA) { ++ fprintf(stderr, "%s: invalid command id 0x%x (expecting 0x%x)\n", ++ cxl_memdev_get_devname(memdev), cmd->send_cmd->id, CXL_MEM_COMMAND_ID_FBIST_TEST_SIMPLEDATA); ++ return -EINVAL; ++ } ++ fprintf(stdout, "command completed successfully\n"); ++ ++out: ++ cxl_cmd_unref(cmd); ++ return rc; ++ return 0; ++} ++ ++ ++#define CXL_MEM_COMMAND_ID_FBIST_TEST_ADDRESSTEST CXL_MEM_COMMAND_ID_RAW ++#define CXL_MEM_COMMAND_ID_FBIST_TEST_ADDRESSTEST_OPCODE 49713 ++#define CXL_MEM_COMMAND_ID_FBIST_TEST_ADDRESSTEST_PAYLOAD_IN_SIZE 28 ++ ++struct cxl_mbox_fbist_test_addresstest_in { ++ __le32 fbist_id; ++ u8 test_nr; ++ u8 rsvd[3]; ++ __le64 start_address; ++ __le64 num_bytes; ++ __le32 seed; ++} __attribute__((packed)); ++ ++ ++CXL_EXPORT int cxl_memdev_fbist_test_addresstest(struct cxl_memdev *memdev, ++ u32 fbist_id, u8 test_nr, u64 start_address, u64 num_bytes, u32 seed) ++{ ++ struct cxl_cmd *cmd; ++ struct cxl_mem_query_commands *query; ++ struct cxl_command_info *cinfo; ++ struct cxl_mbox_fbist_test_addresstest_in *fbist_test_addresstest_in; ++ int rc = 0; ++ ++ cmd = cxl_cmd_new_raw(memdev, CXL_MEM_COMMAND_ID_FBIST_TEST_ADDRESSTEST_OPCODE); ++ if (!cmd) { ++ fprintf(stderr, "%s: cxl_cmd_new_raw returned Null output\n", ++ cxl_memdev_get_devname(memdev)); ++ return -ENOMEM; ++ } ++ ++ query = cmd->query_cmd; ++ cinfo = &query->commands[cmd->query_idx]; ++ ++ /* update payload size */ ++ cinfo->size_in = CXL_MEM_COMMAND_ID_FBIST_TEST_ADDRESSTEST_PAYLOAD_IN_SIZE; ++ if (cinfo->size_in > 0) { ++ cmd->input_payload = calloc(1, cinfo->size_in); ++ if (!cmd->input_payload) ++ return -ENOMEM; ++ cmd->send_cmd->in.payload = (u64)cmd->input_payload; ++ cmd->send_cmd->in.size = cinfo->size_in; ++ } ++ ++ fbist_test_addresstest_in = (void *) cmd->send_cmd->in.payload; ++ ++ fbist_test_addresstest_in->fbist_id = cpu_to_le32(fbist_id); ++ fbist_test_addresstest_in->test_nr = test_nr; ++ fbist_test_addresstest_in->start_address = cpu_to_le64(start_address); ++ fbist_test_addresstest_in->num_bytes = cpu_to_le64(num_bytes); ++ fbist_test_addresstest_in->seed = cpu_to_le32(seed); ++ rc = cxl_cmd_submit(cmd); ++ if (rc < 0) { ++ fprintf(stderr, "%s: cmd submission failed: %d (%s)\n", ++ cxl_memdev_get_devname(memdev), rc, strerror(-rc)); ++ goto out; ++ } ++ ++ rc = cxl_cmd_get_mbox_status(cmd); ++ if (rc != 0) { ++ fprintf(stderr, "%s: firmware status: %d\n", ++ cxl_memdev_get_devname(memdev), rc); ++ rc = -ENXIO; ++ goto out; ++ } ++ ++ if (cmd->send_cmd->id != CXL_MEM_COMMAND_ID_FBIST_TEST_ADDRESSTEST) { ++ fprintf(stderr, "%s: invalid command id 0x%x (expecting 0x%x)\n", ++ cxl_memdev_get_devname(memdev), cmd->send_cmd->id, CXL_MEM_COMMAND_ID_FBIST_TEST_ADDRESSTEST); ++ return -EINVAL; ++ } ++ fprintf(stdout, "command completed successfully\n"); ++ ++out: ++ cxl_cmd_unref(cmd); ++ return rc; ++ return 0; ++} ++ ++ ++#define CXL_MEM_COMMAND_ID_FBIST_TEST_MOVINGINVERSION CXL_MEM_COMMAND_ID_RAW ++#define CXL_MEM_COMMAND_ID_FBIST_TEST_MOVINGINVERSION_OPCODE 49714 ++#define CXL_MEM_COMMAND_ID_FBIST_TEST_MOVINGINVERSION_PAYLOAD_IN_SIZE 28 ++ ++struct cxl_mbox_fbist_test_movinginversion_in { ++ __le32 fbist_id; ++ u8 test_nr; ++ u8 phase_nr; ++ __le16 rsvd; ++ __le64 start_address; ++ __le64 num_bytes; ++ __le32 ddrpage_size; ++} __attribute__((packed)); ++ ++ ++CXL_EXPORT int cxl_memdev_fbist_test_movinginversion(struct cxl_memdev *memdev, ++ u32 fbist_id, u8 test_nr, u8 phase_nr, u64 start_address, u64 num_bytes, ++ u32 ddrpage_size) ++{ ++ struct cxl_cmd *cmd; ++ struct cxl_mem_query_commands *query; ++ struct cxl_command_info *cinfo; ++ struct cxl_mbox_fbist_test_movinginversion_in *fbist_test_movinginversion_in; ++ int rc = 0; ++ ++ cmd = cxl_cmd_new_raw(memdev, CXL_MEM_COMMAND_ID_FBIST_TEST_MOVINGINVERSION_OPCODE); ++ if (!cmd) { ++ fprintf(stderr, "%s: cxl_cmd_new_raw returned Null output\n", ++ cxl_memdev_get_devname(memdev)); ++ return -ENOMEM; ++ } ++ ++ query = cmd->query_cmd; ++ cinfo = &query->commands[cmd->query_idx]; ++ ++ /* update payload size */ ++ cinfo->size_in = CXL_MEM_COMMAND_ID_FBIST_TEST_MOVINGINVERSION_PAYLOAD_IN_SIZE; ++ if (cinfo->size_in > 0) { ++ cmd->input_payload = calloc(1, cinfo->size_in); ++ if (!cmd->input_payload) ++ return -ENOMEM; ++ cmd->send_cmd->in.payload = (u64)cmd->input_payload; ++ cmd->send_cmd->in.size = cinfo->size_in; ++ } ++ ++ fbist_test_movinginversion_in = (void *) cmd->send_cmd->in.payload; ++ ++ fbist_test_movinginversion_in->fbist_id = cpu_to_le32(fbist_id); ++ fbist_test_movinginversion_in->test_nr = test_nr; ++ fbist_test_movinginversion_in->phase_nr = phase_nr; ++ fbist_test_movinginversion_in->start_address = cpu_to_le64(start_address); ++ fbist_test_movinginversion_in->num_bytes = cpu_to_le64(num_bytes); ++ fbist_test_movinginversion_in->ddrpage_size = cpu_to_le32(ddrpage_size); ++ rc = cxl_cmd_submit(cmd); ++ if (rc < 0) { ++ fprintf(stderr, "%s: cmd submission failed: %d (%s)\n", ++ cxl_memdev_get_devname(memdev), rc, strerror(-rc)); ++ goto out; ++ } ++ ++ rc = cxl_cmd_get_mbox_status(cmd); ++ if (rc != 0) { ++ fprintf(stderr, "%s: firmware status: %d\n", ++ cxl_memdev_get_devname(memdev), rc); ++ rc = -ENXIO; ++ goto out; ++ } ++ ++ if (cmd->send_cmd->id != CXL_MEM_COMMAND_ID_FBIST_TEST_MOVINGINVERSION) { ++ fprintf(stderr, "%s: invalid command id 0x%x (expecting 0x%x)\n", ++ cxl_memdev_get_devname(memdev), cmd->send_cmd->id, CXL_MEM_COMMAND_ID_FBIST_TEST_MOVINGINVERSION); ++ return -EINVAL; ++ } ++ fprintf(stdout, "command completed successfully\n"); ++ ++out: ++ cxl_cmd_unref(cmd); ++ return rc; ++ return 0; ++} ++ ++ ++#define CXL_MEM_COMMAND_ID_FBIST_TEST_RANDOMSEQUENCE CXL_MEM_COMMAND_ID_RAW ++#define CXL_MEM_COMMAND_ID_FBIST_TEST_RANDOMSEQUENCE_OPCODE 49715 ++#define CXL_MEM_COMMAND_ID_FBIST_TEST_RANDOMSEQUENCE_PAYLOAD_IN_SIZE 36 ++ ++struct cxl_mbox_fbist_test_randomsequence_in { ++ __le32 fbist_id; ++ u8 phase_nr; ++ u8 rsvd[3]; ++ __le64 start_address; ++ __le64 num_bytes; ++ __le32 ddrpage_size; ++ __le32 seed_dr0; ++ __le32 seed_dr1; ++} __attribute__((packed)); ++ ++ ++CXL_EXPORT int cxl_memdev_fbist_test_randomsequence(struct cxl_memdev *memdev, ++ u32 fbist_id, u8 phase_nr, u64 start_address, u64 num_bytes, u32 ddrpage_size, ++ u32 seed_dr0, u32 seed_dr1) ++{ ++ struct cxl_cmd *cmd; ++ struct cxl_mem_query_commands *query; ++ struct cxl_command_info *cinfo; ++ struct cxl_mbox_fbist_test_randomsequence_in *fbist_test_randomsequence_in; ++ int rc = 0; ++ ++ cmd = cxl_cmd_new_raw(memdev, CXL_MEM_COMMAND_ID_FBIST_TEST_RANDOMSEQUENCE_OPCODE); ++ if (!cmd) { ++ fprintf(stderr, "%s: cxl_cmd_new_raw returned Null output\n", ++ cxl_memdev_get_devname(memdev)); ++ return -ENOMEM; ++ } ++ ++ query = cmd->query_cmd; ++ cinfo = &query->commands[cmd->query_idx]; ++ ++ /* update payload size */ ++ cinfo->size_in = CXL_MEM_COMMAND_ID_FBIST_TEST_RANDOMSEQUENCE_PAYLOAD_IN_SIZE; ++ if (cinfo->size_in > 0) { ++ cmd->input_payload = calloc(1, cinfo->size_in); ++ if (!cmd->input_payload) ++ return -ENOMEM; ++ cmd->send_cmd->in.payload = (u64)cmd->input_payload; ++ cmd->send_cmd->in.size = cinfo->size_in; ++ } ++ ++ fbist_test_randomsequence_in = (void *) cmd->send_cmd->in.payload; ++ ++ fbist_test_randomsequence_in->fbist_id = cpu_to_le32(fbist_id); ++ fbist_test_randomsequence_in->phase_nr = phase_nr; ++ fbist_test_randomsequence_in->start_address = cpu_to_le64(start_address); ++ fbist_test_randomsequence_in->num_bytes = cpu_to_le64(num_bytes); ++ fbist_test_randomsequence_in->ddrpage_size = cpu_to_le32(ddrpage_size); ++ fbist_test_randomsequence_in->seed_dr0 = cpu_to_le32(seed_dr0); ++ fbist_test_randomsequence_in->seed_dr1 = cpu_to_le32(seed_dr1); ++ rc = cxl_cmd_submit(cmd); ++ if (rc < 0) { ++ fprintf(stderr, "%s: cmd submission failed: %d (%s)\n", ++ cxl_memdev_get_devname(memdev), rc, strerror(-rc)); ++ goto out; ++ } ++ ++ rc = cxl_cmd_get_mbox_status(cmd); ++ if (rc != 0) { ++ fprintf(stderr, "%s: firmware status: %d\n", ++ cxl_memdev_get_devname(memdev), rc); ++ rc = -ENXIO; ++ goto out; ++ } ++ ++ if (cmd->send_cmd->id != CXL_MEM_COMMAND_ID_FBIST_TEST_RANDOMSEQUENCE) { ++ fprintf(stderr, "%s: invalid command id 0x%x (expecting 0x%x)\n", ++ cxl_memdev_get_devname(memdev), cmd->send_cmd->id, CXL_MEM_COMMAND_ID_FBIST_TEST_RANDOMSEQUENCE); ++ return -EINVAL; ++ } ++ fprintf(stdout, "command completed successfully\n"); ++ ++out: ++ cxl_cmd_unref(cmd); ++ return rc; ++ return 0; ++} ++ ++ ++#define CXL_MEM_COMMAND_ID_CONF_READ CXL_MEM_COMMAND_ID_RAW ++#define CXL_MEM_COMMAND_ID_CONF_READ_OPCODE 52992 ++#define CXL_MEM_COMMAND_ID_CONF_READ_PAYLOAD_IN_SIZE 8 ++#define CXL_MEM_COMMAND_ID_CONF_READ_PAYLOAD_OUT_SIZE 4 // varies ++ ++struct cxl_mbox_conf_read_in { ++ __le32 offset; ++ __le32 length; ++} __attribute__((packed)); ++ ++CXL_EXPORT int cxl_memdev_conf_read(struct cxl_memdev *memdev, ++ u32 offset, u32 length) ++{ ++ struct cxl_cmd *cmd; ++ struct cxl_mem_query_commands *query; ++ struct cxl_command_info *cinfo; ++ struct cxl_mbox_conf_read_in *conf_read_in; ++ u8 *conf_read_out; ++ int rc = 0; ++ ++ cmd = cxl_cmd_new_raw(memdev, CXL_MEM_COMMAND_ID_CONF_READ_OPCODE); ++ if (!cmd) { ++ fprintf(stderr, "%s: cxl_cmd_new_raw returned Null output\n", ++ cxl_memdev_get_devname(memdev)); ++ return -ENOMEM; ++ } ++ ++ query = cmd->query_cmd; ++ cinfo = &query->commands[cmd->query_idx]; ++ ++ /* update payload size */ ++ cinfo->size_in = CXL_MEM_COMMAND_ID_CONF_READ_PAYLOAD_IN_SIZE; ++ if (cinfo->size_in > 0) { ++ cmd->input_payload = calloc(1, cinfo->size_in); ++ if (!cmd->input_payload) ++ return -ENOMEM; ++ cmd->send_cmd->in.payload = (u64)cmd->input_payload; ++ cmd->send_cmd->in.size = cinfo->size_in; ++ } ++ ++ conf_read_in = (void *) cmd->send_cmd->in.payload; ++ ++ conf_read_in->offset = cpu_to_le32(offset); ++ conf_read_in->length = cpu_to_le32(length); ++ rc = cxl_cmd_submit(cmd); ++ if (rc < 0) { ++ fprintf(stderr, "%s: cmd submission failed: %d (%s)\n", ++ cxl_memdev_get_devname(memdev), rc, strerror(-rc)); ++ goto out; ++ } ++ ++ rc = cxl_cmd_get_mbox_status(cmd); ++ if (rc != 0) { ++ fprintf(stderr, "%s: firmware status: %d\n", ++ cxl_memdev_get_devname(memdev), rc); ++ rc = -ENXIO; ++ goto out; ++ } ++ ++ if (cmd->send_cmd->id != CXL_MEM_COMMAND_ID_CONF_READ) { ++ fprintf(stderr, "%s: invalid command id 0x%x (expecting 0x%x)\n", ++ cxl_memdev_get_devname(memdev), cmd->send_cmd->id, CXL_MEM_COMMAND_ID_CONF_READ); ++ return -EINVAL; ++ } ++ ++ fprintf(stdout, "command completed successfully\n"); ++ conf_read_out = (u8*)cmd->send_cmd->out.payload; ++ fprintf(stdout, "=========================== Read configuration file ============================\n"); ++ fprintf(stdout, "Output Payload:"); ++ for(int i=0; isend_cmd->out.size; i++){ ++ if (i % 16 == 0) ++ { ++ fprintf(stdout, "\n%04x %02x ", i+offset, conf_read_out[i]); ++ } ++ else ++ { ++ fprintf(stdout, "%02x ", conf_read_out[i]); ++ } ++ } ++ fprintf(stdout, "\n"); ++ ++out: ++ cxl_cmd_unref(cmd); ++ return rc; ++ return 0; ++} ++ ++#define CXL_MEM_COMMAND_ID_HCT_GET_CONFIG CXL_MEM_COMMAND_ID_RAW ++#define CXL_MEM_COMMAND_ID_HCT_GET_CONFIG_OPCODE 50689 ++#define CXL_MEM_COMMAND_ID_HCT_GET_CONFIG_PAYLOAD_IN_SIZE 1 ++#define CXL_MEM_COMMAND_ID_HCT_GET_CONFIG_PAYLOAD_OUT_SIZE 132 ++#define HCT_GET_CONFIG_FIXED_PAYLOAD_OUT_SIZE 4 ++#define TRIG_CONFIG_PACKET_SIZE 4 ++ ++struct cxl_mbox_hct_get_config_in { ++ u8 hct_inst; ++} __attribute__((packed)); ++ ++struct cxl_mbox_hct_get_config_out { ++ u8 post_trig_depth; ++ u8 ignore_valid; ++ u8 rsvd; ++ u8 rsvd3; ++ __le32 trig_config[128]; ++} __attribute__((packed)); ++ ++CXL_EXPORT int cxl_memdev_hct_get_config(struct cxl_memdev *memdev, ++ u8 hct_inst) ++{ ++ struct cxl_cmd *cmd; ++ struct cxl_mem_query_commands *query; ++ struct cxl_command_info *cinfo; ++ struct cxl_mbox_hct_get_config_in *hct_get_config_in; ++ struct cxl_mbox_hct_get_config_out *hct_get_config_out; ++ int rc = 0; ++ int trig_config_size; ++ ++ cmd = cxl_cmd_new_raw(memdev, CXL_MEM_COMMAND_ID_HCT_GET_CONFIG_OPCODE); ++ if (!cmd) { ++ fprintf(stderr, "%s: cxl_cmd_new_raw returned Null output\n", ++ cxl_memdev_get_devname(memdev)); ++ return -ENOMEM; ++ } ++ ++ query = cmd->query_cmd; ++ cinfo = &query->commands[cmd->query_idx]; ++ ++ /* update payload size */ ++ cinfo->size_in = CXL_MEM_COMMAND_ID_HCT_GET_CONFIG_PAYLOAD_IN_SIZE; ++ if (cinfo->size_in > 0) { ++ cmd->input_payload = calloc(1, cinfo->size_in); ++ if (!cmd->input_payload) ++ return -ENOMEM; ++ cmd->send_cmd->in.payload = (u64)cmd->input_payload; ++ cmd->send_cmd->in.size = cinfo->size_in; ++ } ++ ++ hct_get_config_in = (void *) cmd->send_cmd->in.payload; ++ ++ hct_get_config_in->hct_inst = hct_inst; ++ rc = cxl_cmd_submit(cmd); ++ if (rc < 0) { ++ fprintf(stderr, "%s: cmd submission failed: %d (%s)\n", ++ cxl_memdev_get_devname(memdev), rc, strerror(-rc)); ++ goto out; ++ } ++ ++ rc = cxl_cmd_get_mbox_status(cmd); ++ if (rc != 0) { ++ fprintf(stderr, "%s: firmware status: %d\n", ++ cxl_memdev_get_devname(memdev), rc); ++ rc = -ENXIO; ++ goto out; ++ } ++ ++ if (cmd->send_cmd->id != CXL_MEM_COMMAND_ID_HCT_GET_CONFIG) { ++ fprintf(stderr, "%s: invalid command id 0x%x (expecting 0x%x)\n", ++ cxl_memdev_get_devname(memdev), cmd->send_cmd->id, CXL_MEM_COMMAND_ID_HCT_GET_CONFIG); ++ return -EINVAL; ++ } ++ ++ hct_get_config_out = (void *)cmd->send_cmd->out.payload; ++ fprintf(stdout, "==================== get hif/cxl trace buffer configuration ====================\n"); ++ fprintf(stdout, "Post Trigger Depth: %x\n", hct_get_config_out->post_trig_depth); ++ fprintf(stdout, "Ignore Valid: %x\n", hct_get_config_out->ignore_valid); ++ // OPL size ++ trig_config_size = (cmd->send_cmd->out.size - HCT_GET_CONFIG_FIXED_PAYLOAD_OUT_SIZE) / TRIG_CONFIG_PACKET_SIZE; ++ for(int i=0; itrig_config[i])); ++ } ++ ++out: ++ cxl_cmd_unref(cmd); ++ return rc; ++ return 0; ++} ++ ++ ++#define CXL_MEM_COMMAND_ID_HCT_READ_BUFFER CXL_MEM_COMMAND_ID_RAW ++#define CXL_MEM_COMMAND_ID_HCT_READ_BUFFER_OPCODE 50693 ++#define CXL_MEM_COMMAND_ID_HCT_READ_BUFFER_PAYLOAD_IN_SIZE 2 ++#define CXL_MEM_COMMAND_ID_HCT_READ_BUFFER_PAYLOAD_OUT_SIZE 1024 ++ ++struct cxl_mbox_hct_read_buffer_in { ++ u8 hct_inst; ++ u8 num_entries_to_read; ++} __attribute__((packed)); ++ ++struct cxl_mbox_hct_read_buffer_out { ++ u8 buf_end; ++ u8 num_buf_entries; ++ __le16 rsvd; ++ __le32 buf_entry[1024]; ++} __attribute__((packed)); ++ ++CXL_EXPORT int cxl_memdev_hct_read_buffer(struct cxl_memdev *memdev, ++ u8 hct_inst, u8 num_entries_to_read) ++{ ++ u8 *buf_out; ++ struct cxl_cmd *cmd; ++ struct cxl_mem_query_commands *query; ++ struct cxl_command_info *cinfo; ++ struct cxl_mbox_hct_read_buffer_in *hct_read_buffer_in; ++ struct cxl_mbox_hct_read_buffer_out *hct_read_buffer_out; ++ int rc = 0; ++ ++ cmd = cxl_cmd_new_raw(memdev, CXL_MEM_COMMAND_ID_HCT_READ_BUFFER_OPCODE); ++ if (!cmd) { ++ fprintf(stderr, "%s: cxl_cmd_new_raw returned Null output\n", ++ cxl_memdev_get_devname(memdev)); ++ return -ENOMEM; ++ } ++ ++ query = cmd->query_cmd; ++ cinfo = &query->commands[cmd->query_idx]; ++ ++ /* update payload size */ ++ cinfo->size_in = CXL_MEM_COMMAND_ID_HCT_READ_BUFFER_PAYLOAD_IN_SIZE; ++ if (cinfo->size_in > 0) { ++ cmd->input_payload = calloc(1, cinfo->size_in); ++ if (!cmd->input_payload) ++ return -ENOMEM; ++ cmd->send_cmd->in.payload = (u64)cmd->input_payload; ++ cmd->send_cmd->in.size = cinfo->size_in; ++ } ++ ++ hct_read_buffer_in = (void *) cmd->send_cmd->in.payload; ++ ++ hct_read_buffer_in->hct_inst = hct_inst; ++ hct_read_buffer_in->num_entries_to_read = num_entries_to_read; ++ rc = cxl_cmd_submit(cmd); ++ if (rc < 0) { ++ fprintf(stderr, "%s: cmd submission failed: %d (%s)\n", ++ cxl_memdev_get_devname(memdev), rc, strerror(-rc)); ++ goto out; ++ } ++ ++ rc = cxl_cmd_get_mbox_status(cmd); ++ if (rc != 0) { ++ fprintf(stderr, "%s: firmware status: %d\n", ++ cxl_memdev_get_devname(memdev), rc); ++ rc = -ENXIO; ++ goto out; ++ } ++ ++ if (cmd->send_cmd->id != CXL_MEM_COMMAND_ID_HCT_READ_BUFFER) { ++ fprintf(stderr, "%s: invalid command id 0x%x (expecting 0x%x)\n", ++ cxl_memdev_get_devname(memdev), cmd->send_cmd->id, CXL_MEM_COMMAND_ID_HCT_READ_BUFFER); ++ return -EINVAL; ++ } ++ ++ hct_read_buffer_out = (void *)cmd->send_cmd->out.payload; ++ fprintf(stdout, "========================== read hif/cxl trace buffer ===========================\n"); ++ fprintf(stdout, "Buffer End Reached: %x\n", hct_read_buffer_out->buf_end); ++ fprintf(stdout, "Number of buffer entries: %x\n", hct_read_buffer_out->num_buf_entries); ++ ++ buf_out = (u8*) cmd->send_cmd->out.payload; ++ fprintf(stdout, "Buffer Entries:\n"); ++ for(int i=4; isend_cmd->out.size; i++){ ++ fprintf(stdout, "%02x ", buf_out[i]); ++ } ++ fprintf(stdout, "\n"); ++ ++out: ++ cxl_cmd_unref(cmd); ++ return rc; ++ return 0; ++} ++ ++#define CXL_MEM_COMMAND_ID_HCT_SET_CONFIG CXL_MEM_COMMAND_ID_RAW ++#define CXL_MEM_COMMAND_ID_HCT_SET_CONFIG_OPCODE 50690 ++#define CXL_MEM_COMMAND_ID_HCT_SET_CONFIG_PAYLOAD_IN_SIZE 136 ++#define HCT_SET_CONFIG_FIXED_PAYLOAD_IN_SIZE 8 ++ ++struct cxl_mbox_hct_set_config_in { ++ u8 hct_inst; ++ u8 config_flags; ++ u8 rsvd; ++ u8 rsvd2; ++ u8 post_trig_depth; ++ u8 ignore_valid; ++ u8 rsvd3; ++ u8 rsvd4; ++ u8 *trig_config[128]; ++} __attribute__((packed)); ++ ++CXL_EXPORT int cxl_memdev_hct_set_config(struct cxl_memdev *memdev, ++ u8 hct_inst, u8 config_flags, u8 post_trig_depth, u8 ignore_valid, int size, u8 *trig_config_buffer) ++{ ++ struct cxl_cmd *cmd; ++ struct cxl_mem_query_commands *query; ++ struct cxl_command_info *cinfo; ++ struct cxl_mbox_hct_set_config_in *hct_set_config_in; ++ int rc = 0; ++ ++ cmd = cxl_cmd_new_raw(memdev, CXL_MEM_COMMAND_ID_HCT_SET_CONFIG_OPCODE); ++ if (!cmd) { ++ fprintf(stderr, "%s: cxl_cmd_new_raw returned Null output\n", ++ cxl_memdev_get_devname(memdev)); ++ return -ENOMEM; ++ } ++ ++ query = cmd->query_cmd; ++ cinfo = &query->commands[cmd->query_idx]; ++ ++ /* update payload size */ ++ cinfo->size_in = HCT_SET_CONFIG_FIXED_PAYLOAD_IN_SIZE + size; ++ if (cinfo->size_in > 0) { ++ cmd->input_payload = calloc(1, cinfo->size_in); ++ if (!cmd->input_payload) ++ return -ENOMEM; ++ cmd->send_cmd->in.payload = (u64)cmd->input_payload; ++ cmd->send_cmd->in.size = cinfo->size_in; ++ } ++ ++ hct_set_config_in = (void *) cmd->send_cmd->in.payload; ++ ++ hct_set_config_in->hct_inst = hct_inst; ++ hct_set_config_in->config_flags = config_flags; ++ hct_set_config_in->post_trig_depth = post_trig_depth; ++ hct_set_config_in->ignore_valid = ignore_valid; ++ memcpy(hct_set_config_in->trig_config, trig_config_buffer, size); ++ ++ rc = cxl_cmd_submit(cmd); ++ if (rc < 0) { ++ fprintf(stderr, "%s: cmd submission failed: %d (%s)\n", ++ cxl_memdev_get_devname(memdev), rc, strerror(-rc)); ++ goto out; ++ } ++ ++ rc = cxl_cmd_get_mbox_status(cmd); ++ if (rc != 0) { ++ fprintf(stderr, "%s: firmware status: %d\n", ++ cxl_memdev_get_devname(memdev), rc); ++ rc = -ENXIO; ++ goto out; ++ } ++ ++ if (cmd->send_cmd->id != CXL_MEM_COMMAND_ID_HCT_SET_CONFIG) { ++ fprintf(stderr, "%s: invalid command id 0x%x (expecting 0x%x)\n", ++ cxl_memdev_get_devname(memdev), cmd->send_cmd->id, CXL_MEM_COMMAND_ID_HCT_SET_CONFIG); ++ return -EINVAL; ++ } ++ ++out: ++ cxl_cmd_unref(cmd); ++ return rc; ++ return 0; ++} ++ ++#define CXL_MEM_COMMAND_ID_OSA_OS_PATT_TRIG_CFG CXL_MEM_COMMAND_ID_RAW ++#define CXL_MEM_COMMAND_ID_OSA_OS_PATT_TRIG_CFG_OPCODE 51201 ++#define CXL_MEM_COMMAND_ID_OSA_OS_PATT_TRIG_CFG_PAYLOAD_IN_SIZE 40 ++ ++struct cxl_mbox_osa_os_patt_trig_cfg_in { ++ u8 rsvd; ++ u8 cxl_mem_id; ++ __le16 rsvd2; ++ __le16 lane_mask; ++ u8 lane_dir_mask; ++ u8 rate_mask; ++ __le32 patt_val[4]; ++ __le32 patt_mask[4]; ++} __attribute__((packed)); ++ ++ ++CXL_EXPORT int cxl_memdev_osa_os_patt_trig_cfg(struct cxl_memdev *memdev, ++ u8 cxl_mem_id, u16 lane_mask, u8 lane_dir_mask, u8 rate_mask, u32 *patt_val, ++ u32 *patt_mask) ++{ ++ struct cxl_cmd *cmd; ++ struct cxl_mem_query_commands *query; ++ struct cxl_command_info *cinfo; ++ struct cxl_mbox_osa_os_patt_trig_cfg_in *osa_os_patt_trig_cfg_in; ++ int rc = 0; ++ ++ cmd = cxl_cmd_new_raw(memdev, CXL_MEM_COMMAND_ID_OSA_OS_PATT_TRIG_CFG_OPCODE); ++ if (!cmd) { ++ fprintf(stderr, "%s: cxl_cmd_new_raw returned Null output\n", ++ cxl_memdev_get_devname(memdev)); ++ return -ENOMEM; ++ } ++ ++ query = cmd->query_cmd; ++ cinfo = &query->commands[cmd->query_idx]; ++ ++ /* update payload size */ ++ cinfo->size_in = CXL_MEM_COMMAND_ID_OSA_OS_PATT_TRIG_CFG_PAYLOAD_IN_SIZE; ++ if (cinfo->size_in > 0) { ++ cmd->input_payload = calloc(1, cinfo->size_in); ++ if (!cmd->input_payload) ++ return -ENOMEM; ++ cmd->send_cmd->in.payload = (u64)cmd->input_payload; ++ cmd->send_cmd->in.size = cinfo->size_in; ++ } ++ ++ osa_os_patt_trig_cfg_in = (void *) cmd->send_cmd->in.payload; ++ ++ osa_os_patt_trig_cfg_in->cxl_mem_id = cxl_mem_id; ++ osa_os_patt_trig_cfg_in->lane_mask = cpu_to_le16(lane_mask); ++ osa_os_patt_trig_cfg_in->lane_dir_mask = lane_dir_mask; ++ osa_os_patt_trig_cfg_in->rate_mask = rate_mask; ++ for(int i = 0; i < 4; i++) { ++ osa_os_patt_trig_cfg_in->patt_val[i] = cpu_to_le32(patt_val[i]); ++ } ++ ++ for(int i = 0; i < 4; i++) { ++ osa_os_patt_trig_cfg_in->patt_mask[i] = cpu_to_le32(patt_mask[i]); ++ } ++ ++ rc = cxl_cmd_submit(cmd); ++ if (rc < 0) { ++ fprintf(stderr, "%s: cmd submission failed: %d (%s)\n", ++ cxl_memdev_get_devname(memdev), rc, strerror(-rc)); ++ goto out; ++ } ++ ++ rc = cxl_cmd_get_mbox_status(cmd); ++ if (rc != 0) { ++ fprintf(stderr, "%s: firmware status: %d\n", ++ cxl_memdev_get_devname(memdev), rc); ++ rc = -ENXIO; ++ goto out; ++ } ++ ++ if (cmd->send_cmd->id != CXL_MEM_COMMAND_ID_OSA_OS_PATT_TRIG_CFG) { ++ fprintf(stderr, "%s: invalid command id 0x%x (expecting 0x%x)\n", ++ cxl_memdev_get_devname(memdev), cmd->send_cmd->id, CXL_MEM_COMMAND_ID_OSA_OS_PATT_TRIG_CFG); ++ return -EINVAL; ++ } ++ ++out: ++ cxl_cmd_unref(cmd); ++ return rc; ++ return 0; ++} ++ ++ ++#define CXL_MEM_COMMAND_ID_OSA_MISC_TRIG_CFG CXL_MEM_COMMAND_ID_RAW ++#define CXL_MEM_COMMAND_ID_OSA_MISC_TRIG_CFG_OPCODE 51202 ++#define CXL_MEM_COMMAND_ID_OSA_MISC_TRIG_CFG_PAYLOAD_IN_SIZE 8 ++ ++struct cxl_mbox_osa_misc_trig_cfg_in { ++ u8 rsvd; ++ u8 cxl_mem_id; ++ __le16 rsvd2; ++ u8 trig_en_mask; ++ u8 rsvd5[3]; ++} __attribute__((packed)); ++ ++ ++CXL_EXPORT int cxl_memdev_osa_misc_trig_cfg(struct cxl_memdev *memdev, ++ u8 cxl_mem_id, u8 trig_en_mask) ++{ ++ struct cxl_cmd *cmd; ++ struct cxl_mem_query_commands *query; ++ struct cxl_command_info *cinfo; ++ struct cxl_mbox_osa_misc_trig_cfg_in *osa_misc_trig_cfg_in; ++ int rc = 0; ++ ++ cmd = cxl_cmd_new_raw(memdev, CXL_MEM_COMMAND_ID_OSA_MISC_TRIG_CFG_OPCODE); ++ if (!cmd) { ++ fprintf(stderr, "%s: cxl_cmd_new_raw returned Null output\n", ++ cxl_memdev_get_devname(memdev)); ++ return -ENOMEM; ++ } ++ ++ query = cmd->query_cmd; ++ cinfo = &query->commands[cmd->query_idx]; ++ ++ /* update payload size */ ++ cinfo->size_in = CXL_MEM_COMMAND_ID_OSA_MISC_TRIG_CFG_PAYLOAD_IN_SIZE; ++ if (cinfo->size_in > 0) { ++ cmd->input_payload = calloc(1, cinfo->size_in); ++ if (!cmd->input_payload) ++ return -ENOMEM; ++ cmd->send_cmd->in.payload = (u64)cmd->input_payload; ++ cmd->send_cmd->in.size = cinfo->size_in; ++ } ++ ++ osa_misc_trig_cfg_in = (void *) cmd->send_cmd->in.payload; ++ ++ osa_misc_trig_cfg_in->cxl_mem_id = cxl_mem_id; ++ osa_misc_trig_cfg_in->trig_en_mask = trig_en_mask; ++ rc = cxl_cmd_submit(cmd); ++ if (rc < 0) { ++ fprintf(stderr, "%s: cmd submission failed: %d (%s)\n", ++ cxl_memdev_get_devname(memdev), rc, strerror(-rc)); ++ goto out; ++ } ++ ++ rc = cxl_cmd_get_mbox_status(cmd); ++ if (rc != 0) { ++ fprintf(stderr, "%s: firmware status: %d\n", ++ cxl_memdev_get_devname(memdev), rc); ++ rc = -ENXIO; ++ goto out; ++ } ++ ++ if (cmd->send_cmd->id != CXL_MEM_COMMAND_ID_OSA_MISC_TRIG_CFG) { ++ fprintf(stderr, "%s: invalid command id 0x%x (expecting 0x%x)\n", ++ cxl_memdev_get_devname(memdev), cmd->send_cmd->id, CXL_MEM_COMMAND_ID_OSA_MISC_TRIG_CFG); ++ return -EINVAL; ++ } ++ ++out: ++ cxl_cmd_unref(cmd); ++ return rc; ++ return 0; ++} ++ ++ ++#define CXL_MEM_COMMAND_ID_OSA_DATA_READ CXL_MEM_COMMAND_ID_RAW ++#define CXL_MEM_COMMAND_ID_OSA_DATA_READ_OPCODE 51207 ++#define CXL_MEM_COMMAND_ID_OSA_DATA_READ_PAYLOAD_IN_SIZE 8 ++#define CXL_MEM_COMMAND_ID_OSA_DATA_READ_PAYLOAD_OUT_SIZE 140 ++ ++struct cxl_mbox_osa_data_read_in { ++ u8 rsvd; ++ u8 cxl_mem_id; ++ u8 lane_id; ++ u8 lane_dir; ++ __le16 start_entry; ++ u8 num_entries; ++ u8 rsvd7; ++} __attribute__((packed)); ++ ++struct cxl_mbox_osa_data_read_out { ++ u8 entries_read; ++ u8 cxl_mem_id; ++ u8 lane_id; ++ u8 lane_dir; ++ __le16 next_entry; ++ __le16 entries_rem; ++ u8 wrap; ++ u8 rsvd[3]; ++ __le32 data[32]; ++} __attribute__((packed)); ++ ++CXL_EXPORT int cxl_memdev_osa_data_read(struct cxl_memdev *memdev, ++ u8 cxl_mem_id, u8 lane_id, u8 lane_dir, u16 start_entry, u8 num_entries) ++{ ++ struct cxl_cmd *cmd; ++ struct cxl_mem_query_commands *query; ++ struct cxl_command_info *cinfo; ++ struct cxl_mbox_osa_data_read_in *osa_data_read_in; ++ struct cxl_mbox_osa_data_read_out *osa_data_read_out; ++ int rc = 0; ++ ++ cmd = cxl_cmd_new_raw(memdev, CXL_MEM_COMMAND_ID_OSA_DATA_READ_OPCODE); ++ if (!cmd) { ++ fprintf(stderr, "%s: cxl_cmd_new_raw returned Null output\n", ++ cxl_memdev_get_devname(memdev)); ++ return -ENOMEM; ++ } ++ ++ query = cmd->query_cmd; ++ cinfo = &query->commands[cmd->query_idx]; ++ ++ /* update payload size */ ++ cinfo->size_in = CXL_MEM_COMMAND_ID_OSA_DATA_READ_PAYLOAD_IN_SIZE; ++ if (cinfo->size_in > 0) { ++ cmd->input_payload = calloc(1, cinfo->size_in); ++ if (!cmd->input_payload) ++ return -ENOMEM; ++ cmd->send_cmd->in.payload = (u64)cmd->input_payload; ++ cmd->send_cmd->in.size = cinfo->size_in; ++ } ++ ++ osa_data_read_in = (void *) cmd->send_cmd->in.payload; ++ ++ osa_data_read_in->cxl_mem_id = cxl_mem_id; ++ osa_data_read_in->lane_id = lane_id; ++ osa_data_read_in->lane_dir = lane_dir; ++ osa_data_read_in->start_entry = cpu_to_le16(start_entry); ++ osa_data_read_in->num_entries = num_entries; ++ rc = cxl_cmd_submit(cmd); ++ if (rc < 0) { ++ fprintf(stderr, "%s: cmd submission failed: %d (%s)\n", ++ cxl_memdev_get_devname(memdev), rc, strerror(-rc)); ++ goto out; ++ } ++ ++ rc = cxl_cmd_get_mbox_status(cmd); ++ if (rc != 0) { ++ fprintf(stderr, "%s: firmware status: %d\n", ++ cxl_memdev_get_devname(memdev), rc); ++ rc = -ENXIO; ++ goto out; ++ } ++ ++ if (cmd->send_cmd->id != CXL_MEM_COMMAND_ID_OSA_DATA_READ) { ++ fprintf(stderr, "%s: invalid command id 0x%x (expecting 0x%x)\n", ++ cxl_memdev_get_devname(memdev), cmd->send_cmd->id, CXL_MEM_COMMAND_ID_OSA_DATA_READ); ++ return -EINVAL; ++ } ++ ++ osa_data_read_out = (void *)cmd->send_cmd->out.payload; ++ fprintf(stdout, "================================ osa data read =================================\n"); ++ fprintf(stdout, "total number of entries read: %x\n", osa_data_read_out->entries_read); ++ fprintf(stdout, "CXL.MEM ID: %x\n", osa_data_read_out->cxl_mem_id); ++ fprintf(stdout, "lane ID: %x\n", osa_data_read_out->lane_id); ++ fprintf(stdout, "lane direction (see osa_lane_dir_enum): %x\n", osa_data_read_out->lane_dir); ++ fprintf(stdout, "index of the next entry to read: %x\n", le16_to_cpu(osa_data_read_out->next_entry)); ++ fprintf(stdout, "number of entries remaining: %x\n", le16_to_cpu(osa_data_read_out->entries_rem)); ++ fprintf(stdout, "wrap indicator: %x\n", osa_data_read_out->wrap); ++ fprintf(stdout, "Data: \n"); ++ for(int i=0; ientries_read;i++){ ++ fprintf(stdout,"Entry %d: %x\n", i, le32_to_cpu(osa_data_read_out->data[i])); ++ } ++ ++out: ++ cxl_cmd_unref(cmd); ++ return rc; ++ return 0; ++} ++ ++#define CXL_MEM_COMMAND_ID_DIMM_SPD_READ CXL_MEM_COMMAND_ID_RAW ++#define CXL_MEM_COMMAND_ID_DIMM_SPD_READ_OPCODE 50448 ++#define CXL_MEM_COMMAND_ID_DIMM_SPD_READ_PAYLOAD_IN_SIZE 12 ++ ++struct cxl_mbox_dimm_spd_read_in { ++ __le32 spd_id; ++ __le32 offset; ++ __le32 num_bytes; ++} __attribute__((packed)); ++ ++static char * decode_ddr4_module_type(u8 *bytes) { ++ char *type; ++ switch (bytes[3]) { ++ case 0x01: type = "RDIMM (Registered DIMM)"; break; ++ case 0x02: type = "UDIMM (Unbuffered DIMM)"; break; ++ case 0x03: type = "SODIMM (Small Outline Unbuffered DIMM)"; break; ++ case 0x04: type = "LRDIMM (Load-Reduced DIMM)"; break; ++ case 0x05: type = "Mini-RDIMM (Mini Registered DIMM)"; break; ++ case 0x06: type = "Mini-UDIMM (Mini Unbuffered DIMM)"; break; ++ case 0x08: type = "72b-SO-RDIMM (Small Outline Registered DIMM, 72-bit data bus)"; break; ++ case 0x09: type = "72b-SO-UDIMM (Small Outline Unbuffered DIMM, 72-bit data bus)"; break; ++ case 0x0c: type = "16b-SO-UDIMM (Small Outline Unbuffered DIMM, 16-bit data bus)"; break; ++ case 0x0d: type = "32b-SO-UDIMM (Small Outline Unbuffered DIMM, 32-bit data bus)"; break; ++ default: type = NULL; ++ } ++ return type; ++} ++ ++static float ddr4_mtb_ftb_calc(unsigned char b1, signed char b2) { ++ float mtb = 0.125; ++ float ftb = 0.001; ++ return b1 * mtb + b2 * ftb; ++} ++ ++static void decode_ddr4_module_speed(u8 *bytes, float *ddr_clock, int *pc4_speed) { ++ float ctime; ++ float ddrclk; ++ int tbits, pcclk; ++ ++ ctime = ddr4_mtb_ftb_calc(bytes[18], bytes[125]); ++ ddrclk = 2 * (1000 / ctime); ++ tbits = 8 << (bytes[13] & 7); ++ ++ pcclk = ddrclk * tbits / 8; ++ pcclk -= pcclk % 100; ++ ++ if (ddr_clock) { *ddr_clock = (int)ddrclk; } ++ if (pc4_speed) { *pc4_speed = pcclk; } ++} ++ ++static double decode_ddr4_module_size(u8 *bytes) { ++ double size; ++ int sdrcap = 256 << (bytes[4] & 15); ++ int buswidth = 8 << (bytes[13] & 7); ++ int sdrwidth = 4 << (bytes[12] & 7); ++ int signal_loading = bytes[6] & 3; ++ int lranks_per_dimm = ((bytes[12] >> 3) & 7) + 1; ++ ++ if (signal_loading == 2) lranks_per_dimm *= ((bytes[6] >> 4) & 7) + 1; ++ size = sdrcap / 8 * buswidth / sdrwidth * lranks_per_dimm; ++ return size; ++} ++ ++ ++static char * decode_ddr4_module_detail(u8 *bytes) { ++ char *type_detail = malloc(256); ++ float ddr_clock; ++ int pc4_speed; ++ if (type_detail) { ++ decode_ddr4_module_speed(bytes, &ddr_clock, &pc4_speed); ++ snprintf(type_detail, 255, "DDR4-%.0f (PC4-%d)", ddr_clock, pc4_speed); ++ } ++ return type_detail; ++} ++ ++static char * decode_ddr4_manufacturer(u8 *bytes){ ++ char *manufacturer; ++ u8 bank, index; ++ u8 count = bytes[320]; ++ u8 code = bytes[321]; ++ ++ if (code == 0x00 || code == 0xFF) { ++ manufacturer = NULL; ++ return manufacturer; ++ ++ } ++ ++ bank = count & 0x7f; ++ index = code & 0x7f; ++ if(bank >= VENDORS_BANKS) { ++ manufacturer = NULL; ++ return manufacturer; ++ } ++ manufacturer = (char *) vendors[bank][index]; ++ return manufacturer; ++} ++ ++typedef enum { ++ UNKNOWN = 0, ++ DIRECT_RAMBUS = 1, ++ RAMBUS = 2, ++ FPM_DRAM = 3, ++ EDO = 4, ++ PIPELINED_NIBBLE = 5, ++ SDR_SDRAM = 6, ++ MULTIPLEXED_ROM = 7, ++ DDR_SGRAM = 8, ++ DDR_SDRAM = 9, ++ DDR2_SDRAM = 10, ++ DDR3_SDRAM = 11, ++ DDR4_SDRAM = 12, ++ N_RAM_TYPES = 13 ++} RamType; ++ ++static int decode_ram_type(u8 *bytes) { ++ if (bytes[0] < 4) { ++ switch (bytes[2]) { ++ case 1: return DIRECT_RAMBUS; ++ case 17: return RAMBUS; ++ } ++ } else { ++ switch (bytes[2]) { ++ case 1: return FPM_DRAM; ++ case 2: return EDO; ++ case 3: return PIPELINED_NIBBLE; ++ case 4: return SDR_SDRAM; ++ case 5: return MULTIPLEXED_ROM; ++ case 6: return DDR_SGRAM; ++ case 7: return DDR_SDRAM; ++ case 8: return DDR2_SDRAM; ++ case 11: return DDR3_SDRAM; ++ case 12: return DDR4_SDRAM; ++ } ++ } ++ ++ return UNKNOWN; ++} ++ ++static const char *ram_types[] = {"Unknown", "Direct Rambus", "Rambus", "FPM DRAM", ++ "EDO", "Pipelined Nibble", "SDR SDRAM", "Multiplexed ROM", ++ "DDR SGRAM", "DDR SDRAM", "DDR2 SDRAM", "DDR3 SDRAM", ++ "DDR4 SDRAM"}; ++ ++CXL_EXPORT int cxl_memdev_dimm_spd_read(struct cxl_memdev *memdev, ++ u32 spd_id, u32 offset, u32 num_bytes) ++{ ++ struct cxl_cmd *cmd; ++ struct cxl_mem_query_commands *query; ++ struct cxl_command_info *cinfo; ++ struct cxl_mbox_dimm_spd_read_in *dimm_spd_read_in; ++ u8 *dimm_spd_read_out; ++ int rc = 0; ++ RamType ram_type; ++ ++ cmd = cxl_cmd_new_raw(memdev, CXL_MEM_COMMAND_ID_DIMM_SPD_READ_OPCODE); ++ if (!cmd) { ++ fprintf(stderr, "%s: cxl_cmd_new_raw returned Null output\n", ++ cxl_memdev_get_devname(memdev)); ++ return -ENOMEM; ++ } ++ ++ query = cmd->query_cmd; ++ cinfo = &query->commands[cmd->query_idx]; ++ ++ /* update payload size */ ++ cinfo->size_in = CXL_MEM_COMMAND_ID_DIMM_SPD_READ_PAYLOAD_IN_SIZE; ++ if (cinfo->size_in > 0) { ++ cmd->input_payload = calloc(1, cinfo->size_in); ++ if (!cmd->input_payload) ++ return -ENOMEM; ++ cmd->send_cmd->in.payload = (u64)cmd->input_payload; ++ cmd->send_cmd->in.size = cinfo->size_in; ++ } ++ ++ dimm_spd_read_in = (void *) cmd->send_cmd->in.payload; ++ ++ dimm_spd_read_in->spd_id = cpu_to_le32(spd_id); ++ dimm_spd_read_in->offset = cpu_to_le32(offset); ++ dimm_spd_read_in->num_bytes = cpu_to_le32(num_bytes); ++ rc = cxl_cmd_submit(cmd); ++ if (rc < 0) { ++ fprintf(stderr, "%s: cmd submission failed: %d (%s)\n", ++ cxl_memdev_get_devname(memdev), rc, strerror(-rc)); ++ goto out; ++ } ++ ++ rc = cxl_cmd_get_mbox_status(cmd); ++ if (rc != 0) { ++ fprintf(stderr, "%s: firmware status: %d\n", ++ cxl_memdev_get_devname(memdev), rc); ++ rc = -ENXIO; ++ goto out; ++ } ++ ++ if (cmd->send_cmd->id != CXL_MEM_COMMAND_ID_DIMM_SPD_READ) { ++ fprintf(stderr, "%s: invalid command id 0x%x (expecting 0x%x)\n", ++ cxl_memdev_get_devname(memdev), cmd->send_cmd->id, CXL_MEM_COMMAND_ID_DIMM_SPD_READ); ++ return -EINVAL; ++ } ++ ++ dimm_spd_read_out = (u8*)cmd->send_cmd->out.payload; ++ ram_type = decode_ram_type(dimm_spd_read_out); ++ fprintf(stdout, "=========================== DIMM SPD READ Data ============================\n"); ++ fprintf(stdout, "Output Payload:"); ++ for(int i=0; isend_cmd->out.size; i++){ ++ if (i % 16 == 0) ++ { ++ fprintf(stdout, "\n%04x %02x ", i+offset, dimm_spd_read_out[i]); ++ } ++ else ++ { ++ fprintf(stdout, "%02x ", dimm_spd_read_out[i]); ++ } ++ } ++ // Decoding SPD data for only DDR4 SDRAM. ++ if (ram_type == DDR4_SDRAM) { ++ fprintf(stdout, "\n\n"); ++ fprintf(stdout, "DDR RAM Type: %s\n", ram_types[ram_type]); ++ fprintf(stdout, "DDR Module Type: %s\n", decode_ddr4_module_type(dimm_spd_read_out)); ++ fprintf(stdout, "DDR Module Size: %1f\n", decode_ddr4_module_size(dimm_spd_read_out)); ++ fprintf(stdout, "DDR Module Detail: %s\n", decode_ddr4_module_detail(dimm_spd_read_out)); ++ fprintf(stdout, "DDR Manufacturer: %s\n", decode_ddr4_manufacturer(dimm_spd_read_out)); ++ } ++ ++out: ++ cxl_cmd_unref(cmd); ++ return rc; ++ return 0; ++} ++ ++#define CXL_MEM_COMMAND_ID_LOG_INFO CXL_MEM_COMMAND_ID_RAW ++#define CXL_MEM_COMMAND_ID_LOG_INFO_OPCODE 0X0401 ++#define DDR_TRAINING_STATUS_UUID "2f070da4-431c-4538-b41d-0c50c8f2e292" ++#define CXL_MEM_COMMAND_ID_LOG_INFO_PAYLOAD_IN_SIZE 24 ++ ++CXL_EXPORT int cxl_memdev_ddr_training_status(struct cxl_memdev *memdev) ++{ ++ struct cxl_cmd *cmd; ++ struct cxl_mbox_get_log *get_log_input; ++ struct cxl_mem_query_commands *query; ++ struct cxl_command_info *cinfo; ++ u8 *ddr_training_status; ++ int rc = 0; ++ int offset = 0; ++ ++ ++ cmd = cxl_cmd_new_raw(memdev, CXL_MEM_COMMAND_ID_LOG_INFO_OPCODE); ++ if (!cmd) { ++ fprintf(stderr, "%s: cxl_cmd_new_raw returned Null output\n", ++ cxl_memdev_get_devname(memdev)); ++ return -ENOMEM; ++ } ++ ++ query = cmd->query_cmd; ++ cinfo = &query->commands[cmd->query_idx]; ++ ++ cinfo->size_in = CXL_MEM_COMMAND_ID_LOG_INFO_PAYLOAD_IN_SIZE; ++ if (cinfo->size_in > 0) { ++ cmd->input_payload = calloc(1, cinfo->size_in); ++ if (!cmd->input_payload) ++ return -ENOMEM; ++ cmd->send_cmd->in.payload = (u64)cmd->input_payload; ++ cmd->send_cmd->in.size = cinfo->size_in; ++ } ++ ++ get_log_input = (void *) cmd->send_cmd->in.payload; ++ uuid_parse(DDR_TRAINING_STATUS_UUID, get_log_input->uuid); ++ get_log_input->offset = 0; ++ get_log_input->length = cmd->memdev->payload_max; ++ ++ ++ rc = cxl_cmd_submit(cmd); ++ if (rc < 0) { ++ fprintf(stderr, "%s: cmd submission failed: %d (%s)\n", ++ cxl_memdev_get_devname(memdev), rc, strerror(-rc)); ++ goto out; ++ } ++ ++ rc = cxl_cmd_get_mbox_status(cmd); ++ if (rc != 0) { ++ fprintf(stderr, "%s: firmware status: %d:\n%s\n", ++ cxl_memdev_get_devname(memdev), rc, DEVICE_ERRORS[rc]); ++ rc = -ENXIO; ++ goto out; ++ } ++ ++ if (cmd->send_cmd->id != CXL_MEM_COMMAND_ID_LOG_INFO) { ++ fprintf(stderr, "%s: invalid command id 0x%x (expecting 0x%x)\n", ++ cxl_memdev_get_devname(memdev), cmd->send_cmd->id, CXL_MEM_COMMAND_ID_LOG_INFO); ++ return -EINVAL; ++ } ++ ++ ddr_training_status = (u8*)cmd->send_cmd->out.payload; ++ fprintf(stdout, "=========================== DDR Training Status ============================\n"); ++ fprintf(stdout, "Output Payload:\n"); ++ for(int i=0; isend_cmd->out.size; i++){ ++ if (i % 16 == 0) ++ { ++ fprintf(stdout, "\n%04x %02x ", i+offset, ddr_training_status[i]); ++ } ++ else ++ { ++ fprintf(stdout, "%02x ", ddr_training_status[i]); ++ } ++ } ++ fprintf(stdout, "\n"); ++out: ++ cxl_cmd_unref(cmd); ++ return rc; ++} +diff --git a/cxl/lib/libcxl.pc.in b/cxl/lib/libcxl.pc.in +new file mode 100644 +index 0000000..949fcdc +--- /dev/null ++++ b/cxl/lib/libcxl.pc.in +@@ -0,0 +1,11 @@ ++prefix=@prefix@ ++exec_prefix=@exec_prefix@ ++libdir=@libdir@ ++includedir=@includedir@ ++ ++Name: libcxl ++Description: Manage CXL devices ++Version: @VERSION@ ++Libs: -L${libdir} -lcxl ++Libs.private: ++Cflags: -I${includedir} +diff --git a/cxl/lib/libcxl.sym b/cxl/lib/libcxl.sym +new file mode 100644 +index 0000000..e68bd89 +--- /dev/null ++++ b/cxl/lib/libcxl.sym +@@ -0,0 +1,171 @@ ++LIBCXL_1 { ++global: ++ cxl_get_userdata; ++ cxl_set_userdata; ++ cxl_get_private_data; ++ cxl_set_private_data; ++ cxl_ref; ++ cxl_get_log_priority; ++ cxl_set_log_fn; ++ cxl_unref; ++ cxl_set_log_priority; ++ cxl_new; ++local: ++ *; ++}; ++ ++LIBCXL_2 { ++global: ++ cxl_memdev_get_first; ++ cxl_memdev_get_next; ++ cxl_memdev_get_id; ++ cxl_memdev_get_devname; ++ cxl_memdev_get_major; ++ cxl_memdev_get_minor; ++ cxl_memdev_get_ctx; ++ cxl_memdev_get_pmem_size; ++ cxl_memdev_get_ram_size; ++ cxl_memdev_get_firmware_verison; ++} LIBCXL_1; ++ ++LIBCXL_3 { ++global: ++ cxl_cmd_get_devname; ++ cxl_cmd_new_raw; ++ cxl_cmd_set_input_payload; ++ cxl_cmd_set_output_payload; ++ cxl_cmd_ref; ++ cxl_cmd_unref; ++ cxl_cmd_submit; ++ cxl_cmd_get_mbox_status; ++ cxl_cmd_get_out_size; ++ cxl_cmd_new_identify; ++ cxl_cmd_identify_get_fw_rev; ++ cxl_cmd_identify_get_partition_align; ++ cxl_cmd_identify_get_lsa_size; ++ cxl_cmd_new_get_health_info; ++ cxl_cmd_get_health_info_get_health_status; ++ cxl_cmd_get_health_info_get_media_status; ++ cxl_cmd_get_health_info_get_ext_status; ++ cxl_cmd_get_health_info_get_life_used; ++ cxl_cmd_get_health_info_get_temperature; ++ cxl_cmd_get_health_info_get_dirty_shutdowns; ++ cxl_cmd_get_health_info_get_volatile_errors; ++ cxl_cmd_get_health_info_get_pmem_errors; ++ cxl_cmd_new_get_lsa; ++ cxl_cmd_get_lsa_get_payload; ++} LIBCXL_2; ++ ++LIBCXL_4 { ++global: ++ cxl_memdev_device_info_get; ++ cxl_memdev_get_fw_info; ++ cxl_memdev_transfer_fw; ++ cxl_memdev_activate_fw; ++ cxl_memdev_get_lsa_size; ++ cxl_memdev_is_active; ++ cxl_cmd_new_set_lsa; ++ cxl_memdev_zero_lsa; ++ cxl_memdev_set_lsa; ++ cxl_memdev_get_lsa; ++ cxl_memdev_cmd_identify; ++ cxl_memdev_get_supported_logs; ++ cxl_memdev_get_cel_log; ++ cxl_memdev_get_event_interrupt_policy; ++ cxl_memdev_set_event_interrupt_policy; ++ cxl_memdev_get_timestamp; ++ cxl_memdev_set_timestamp; ++ cxl_memdev_get_alert_config; ++ cxl_memdev_set_alert_config; ++ cxl_memdev_get_health_info; ++ cxl_memdev_get_event_records; ++ cxl_memdev_get_ld_info; ++ cxl_memdev_ddr_info; ++ cxl_memdev_clear_event_records; ++ cxl_memdev_hct_start_stop_trigger; ++ cxl_memdev_hct_get_buffer_status; ++ cxl_memdev_hct_enable; ++ cxl_memdev_ltmon_capture_clear; ++ cxl_memdev_ltmon_capture; ++ cxl_memdev_ltmon_capture_freeze_and_restore; ++ cxl_memdev_ltmon_l2r_count_dump; ++ cxl_memdev_ltmon_l2r_count_clear; ++ cxl_memdev_ltmon_basic_cfg; ++ cxl_memdev_ltmon_watch; ++ cxl_memdev_ltmon_capture_stat; ++ cxl_memdev_ltmon_capture_log_dmp; ++ cxl_memdev_ltmon_capture_trigger; ++ cxl_memdev_ltmon_enable; ++ cxl_memdev_osa_os_type_trig_cfg; ++ cxl_memdev_osa_cap_ctrl; ++ cxl_memdev_osa_cfg_dump; ++ cxl_memdev_osa_ana_op; ++ cxl_memdev_osa_status_query; ++ cxl_memdev_osa_access_rel; ++ cxl_memdev_perfcnt_mta_ltif_set; ++ cxl_memdev_perfcnt_mta_get; ++ cxl_memdev_perfcnt_mta_latch_val_get; ++ cxl_memdev_perfcnt_mta_counter_clear; ++ cxl_memdev_perfcnt_mta_cnt_val_latch; ++ cxl_memdev_perfcnt_mta_hif_set; ++ cxl_memdev_perfcnt_mta_hif_cfg_get; ++ cxl_memdev_perfcnt_mta_hif_latch_val_get; ++ cxl_memdev_perfcnt_mta_hif_counter_clear; ++ cxl_memdev_perfcnt_mta_hif_cnt_val_latch; ++ cxl_memdev_perfcnt_ddr_generic_select; ++ cxl_memdev_perfcnt_ddr_generic_capture; ++ cxl_memdev_perfcnt_ddr_dfi_capture; ++ cxl_memdev_err_inj_drs_poison; ++ cxl_memdev_err_inj_drs_ecc; ++ cxl_memdev_err_inj_rxflit_crc; ++ cxl_memdev_err_inj_txflit_crc; ++ cxl_memdev_err_inj_viral; ++ cxl_memdev_eh_eye_cap_run; ++ cxl_memdev_eh_eye_cap_read; ++ cxl_memdev_eh_eye_cap_timeout_enable; ++ cxl_memdev_eh_eye_cap_status; ++ cxl_memdev_eh_adapt_get; ++ cxl_memdev_eh_adapt_oneoff; ++ cxl_memdev_eh_adapt_force; ++ cxl_memdev_hbo_status; ++ cxl_memdev_hbo_transfer_fw; ++ cxl_memdev_hbo_activate_fw; ++ cxl_memdev_health_counters_clear; ++ cxl_memdev_health_counters_get; ++ cxl_memdev_hct_get_plat_param; ++ cxl_memdev_err_inj_hif_poison; ++ cxl_memdev_err_inj_hif_ecc; ++ cxl_memdev_eh_link_dbg_cfg; ++ cxl_memdev_eh_link_dbg_entry_dump; ++ cxl_memdev_eh_link_dbg_lane_dump; ++ cxl_memdev_eh_link_dbg_reset; ++ cxl_memdev_fbist_stopconfig_set; ++ cxl_memdev_fbist_cyclecount_set; ++ cxl_memdev_fbist_reset_set; ++ cxl_memdev_fbist_run_set; ++ cxl_memdev_fbist_run_get; ++ cxl_memdev_fbist_xfer_rem_cnt_get; ++ cxl_memdev_fbist_last_exp_read_data_get; ++ cxl_memdev_fbist_curr_cycle_cnt_get; ++ cxl_memdev_fbist_thread_status_get; ++ cxl_memdev_fbist_thread_trans_cnt_get; ++ cxl_memdev_fbist_thread_bandwidth_get; ++ cxl_memdev_fbist_thread_latency_get; ++ cxl_memdev_fbist_thread_perf_mon_set; ++ cxl_memdev_fbist_top_read_status0_get; ++ cxl_memdev_fbist_top_err_cnt_get; ++ cxl_memdev_fbist_last_read_addr_get; ++ cxl_memdev_fbist_test_simpledata; ++ cxl_memdev_fbist_test_addresstest; ++ cxl_memdev_fbist_test_movinginversion; ++ cxl_memdev_fbist_test_randomsequence; ++ cxl_memdev_conf_read; ++ cxl_memdev_hct_get_config; ++ cxl_memdev_hct_read_buffer; ++ cxl_memdev_hct_set_config; ++ cxl_memdev_osa_os_patt_trig_cfg; ++ cxl_memdev_osa_misc_trig_cfg; ++ cxl_memdev_osa_data_read; ++ cxl_memdev_dimm_spd_read; ++ cxl_memdev_ddr_training_status; ++} LIBCXL_3; +diff --git a/cxl/lib/private.h b/cxl/lib/private.h +new file mode 100644 +index 0000000..103429e +--- /dev/null ++++ b/cxl/lib/private.h +@@ -0,0 +1,104 @@ ++/* SPDX-License-Identifier: LGPL-2.1 */ ++/* Copyright (C) 2020-2021, Intel Corporation. All rights reserved. */ ++#ifndef _LIBCXL_PRIVATE_H_ ++#define _LIBCXL_PRIVATE_H_ ++ ++#include ++#include ++#include ++#include ++ ++#define CXL_EXPORT __attribute__ ((visibility("default"))) ++ ++struct cxl_memdev { ++ int id, major, minor; ++ void *dev_buf; ++ size_t buf_len; ++ char *dev_path; ++ char *firmware_version; ++ struct cxl_ctx *ctx; ++ struct list_node list; ++ unsigned long long pmem_size; ++ unsigned long long ram_size; ++ int payload_max; ++ size_t lsa_size; ++ struct kmod_module *module; ++}; ++ ++enum cxl_cmd_query_status { ++ CXL_CMD_QUERY_NOT_RUN = 0, ++ CXL_CMD_QUERY_OK, ++ CXL_CMD_QUERY_UNSUPPORTED, ++}; ++ ++/** ++ * struct cxl_cmd - CXL memdev command ++ * @memdev: the memory device to which the command is being sent ++ * @query_cmd: structure for the Linux 'Query commands' ioctl ++ * @send_cmd: structure for the Linux 'Send command' ioctl ++ * @input_payload: buffer for input payload managed by libcxl ++ * @output_payload: buffer for output payload managed by libcxl ++ * @refcount: reference for passing command buffer around ++ * @query_status: status from query_commands ++ * @query_idx: index of 'this' command in the query_commands array ++ * @status: command return status from the device ++ */ ++struct cxl_cmd { ++ struct cxl_memdev *memdev; ++ struct cxl_mem_query_commands *query_cmd; ++ struct cxl_send_command *send_cmd; ++ void *input_payload; ++ void *output_payload; ++ int refcount; ++ int query_status; ++ int query_idx; ++ int status; ++}; ++ ++#define CXL_CMD_IDENTIFY_FW_REV_LENGTH 0x10 ++ ++struct cxl_cmd_identify { ++ char fw_revision[CXL_CMD_IDENTIFY_FW_REV_LENGTH]; ++ le64 total_capacity; ++ le64 volatile_capacity; ++ le64 persistent_capacity; ++ le64 partition_align; ++ le16 info_event_log_size; ++ le16 warning_event_log_size; ++ le16 failure_event_log_size; ++ le16 fatal_event_log_size; ++ le32 lsa_size; ++ u8 poison_list_max_mer[3]; ++ le16 inject_poison_limit; ++ u8 poison_caps; ++ u8 qos_telemetry_caps; ++} __attribute__((packed)); ++ ++struct cxl_cmd_get_lsa_in { ++ le32 offset; ++ le32 length; ++} __attribute__((packed)); ++ ++struct cxl_cmd_set_lsa { ++ le32 offset; ++ le32 rsvd; ++ unsigned char lsa_data[0]; ++} __attribute__ ((packed)); ++ ++struct cxl_cmd_get_health_info { ++ u8 health_status; ++ u8 media_status; ++ u8 ext_status; ++ u8 life_used; ++ le16 temperature; ++ le32 dirty_shutdowns; ++ le32 volatile_errors; ++ le32 pmem_errors; ++} __attribute__((packed)); ++ ++static inline int check_kmod(struct kmod_ctx *kmod_ctx) ++{ ++ return kmod_ctx ? 0 : -ENXIO; ++} ++ ++#endif /* _LIBCXL_PRIVATE_H_ */ +diff --git a/cxl/libcxl.h b/cxl/libcxl.h +new file mode 100644 +index 0000000..498de7d +--- /dev/null ++++ b/cxl/libcxl.h +@@ -0,0 +1,277 @@ ++/* SPDX-License-Identifier: LGPL-2.1 */ ++/* Copyright (C) 2020-2021, Intel Corporation. All rights reserved. */ ++#ifndef _LIBCXL_H_ ++#define _LIBCXL_H_ ++ ++#include ++#include ++#include ++#include ++ ++#ifdef HAVE_UUID ++#include ++#else ++typedef unsigned char uuid_t[16]; ++#endif ++ ++#ifdef __cplusplus ++extern "C" { ++#endif ++ ++#define FW_BYTE_ALIGN 128 ++#define FW_BLOCK_SIZE 128 ++typedef unsigned char fwblock[FW_BLOCK_SIZE]; ++ ++struct cxl_ctx; ++struct cxl_ctx *cxl_ref(struct cxl_ctx *ctx); ++void cxl_unref(struct cxl_ctx *ctx); ++int cxl_new(struct cxl_ctx **ctx); ++void cxl_set_log_fn(struct cxl_ctx *ctx, ++ void (*log_fn)(struct cxl_ctx *ctx, int priority, ++ const char *file, int line, const char *fn, ++ const char *format, va_list args)); ++int cxl_get_log_priority(struct cxl_ctx *ctx); ++void cxl_set_log_priority(struct cxl_ctx *ctx, int priority); ++void cxl_set_userdata(struct cxl_ctx *ctx, void *userdata); ++void *cxl_get_userdata(struct cxl_ctx *ctx); ++void cxl_set_private_data(struct cxl_ctx *ctx, void *data); ++void *cxl_get_private_data(struct cxl_ctx *ctx); ++ ++struct cxl_memdev; ++struct cxl_memdev *cxl_memdev_get_first(struct cxl_ctx *ctx); ++struct cxl_memdev *cxl_memdev_get_next(struct cxl_memdev *memdev); ++int cxl_memdev_get_id(struct cxl_memdev *memdev); ++const char *cxl_memdev_get_devname(struct cxl_memdev *memdev); ++int cxl_memdev_get_major(struct cxl_memdev *memdev); ++int cxl_memdev_get_minor(struct cxl_memdev *memdev); ++struct cxl_ctx *cxl_memdev_get_ctx(struct cxl_memdev *memdev); ++unsigned long long cxl_memdev_get_pmem_size(struct cxl_memdev *memdev); ++unsigned long long cxl_memdev_get_ram_size(struct cxl_memdev *memdev); ++const char *cxl_memdev_get_firmware_verison(struct cxl_memdev *memdev); ++size_t cxl_memdev_get_lsa_size(struct cxl_memdev *memdev); ++int cxl_memdev_is_active(struct cxl_memdev *memdev); ++int cxl_memdev_zero_lsa(struct cxl_memdev *memdev); ++int cxl_memdev_get_lsa(struct cxl_memdev *memdev, void *buf, size_t length, ++ size_t offset); ++int cxl_memdev_set_lsa(struct cxl_memdev *memdev, void *buf, size_t length, ++ size_t offset); ++int cxl_memdev_cmd_identify(struct cxl_memdev *memdev); ++int cxl_memdev_device_info_get(struct cxl_memdev *memdev); ++int cxl_memdev_get_fw_info(struct cxl_memdev *memdev); ++int cxl_memdev_transfer_fw(struct cxl_memdev *memdev, u8 action, ++ u8 slot, u32 offset, int size, unsigned char *data, u32 transfer_fw_opcode); ++int cxl_memdev_activate_fw(struct cxl_memdev *memdev, u8 action, ++ u8 slot); ++int cxl_memdev_get_supported_logs(struct cxl_memdev *memdev); ++int cxl_memdev_get_cel_log(struct cxl_memdev *memdev); ++int cxl_memdev_get_event_interrupt_policy(struct cxl_memdev *memdev); ++int cxl_memdev_set_event_interrupt_policy(struct cxl_memdev *memdev, u32 int_policy); ++int cxl_memdev_get_timestamp(struct cxl_memdev *memdev); ++int cxl_memdev_set_timestamp(struct cxl_memdev *memdev, u64 timestamp); ++int cxl_memdev_get_alert_config(struct cxl_memdev *memdev); ++int cxl_memdev_set_alert_config(struct cxl_memdev *memdev, u32 alert_prog_threshold, ++ u32 device_temp_threshold, u32 mem_error_threshold); ++int cxl_memdev_get_health_info(struct cxl_memdev *memdev); ++int cxl_memdev_get_event_records(struct cxl_memdev *memdev, u8 event_log_type); ++int cxl_memdev_get_ld_info(struct cxl_memdev *memdev); ++int cxl_memdev_ddr_info(struct cxl_memdev *memdev, u8 ddr_id); ++int cxl_memdev_clear_event_records(struct cxl_memdev *memdev, u8 event_log_type, ++ u8 clear_event_flags, u8 no_event_record_handles, u16 *event_record_handles); ++int cxl_memdev_hct_start_stop_trigger(struct cxl_memdev *memdev, ++ u8 hct_inst, u8 buf_control); ++int cxl_memdev_hct_get_buffer_status(struct cxl_memdev *memdev, ++ u8 hct_inst); ++int cxl_memdev_hct_enable(struct cxl_memdev *memdev, u8 hct_inst); ++int cxl_memdev_ltmon_capture_clear(struct cxl_memdev *memdev, u8 cxl_mem_id); ++int cxl_memdev_ltmon_capture(struct cxl_memdev *memdev, u8 cxl_mem_id, ++ u8 capt_mode, u16 ignore_sub_chg, u8 ignore_rxl0_chg, u8 trig_src_sel); ++int cxl_memdev_ltmon_capture_freeze_and_restore(struct cxl_memdev *memdev, ++ u8 cxl_mem_id, u8 freeze_restore); ++int cxl_memdev_ltmon_l2r_count_dump(struct cxl_memdev *memdev, ++ u8 cxl_mem_id); ++int cxl_memdev_ltmon_l2r_count_clear(struct cxl_memdev *memdev, ++ u8 cxl_mem_id); ++int cxl_memdev_ltmon_basic_cfg(struct cxl_memdev *memdev, u8 cxl_mem_id, ++ u8 tick_cnt, u8 global_ts); ++int cxl_memdev_ltmon_watch(struct cxl_memdev *memdev, u8 cxl_mem_id, ++ u8 watch_id, u8 watch_mode, u8 src_maj_st, u8 src_min_st, u8 src_l0_st, ++ u8 dst_maj_st, u8 dst_min_st, u8 dst_l0_st); ++int cxl_memdev_ltmon_capture_stat(struct cxl_memdev *memdev, u8 cxl_mem_id); ++int cxl_memdev_ltmon_capture_log_dmp(struct cxl_memdev *memdev, ++ u8 cxl_mem_id, u16 dump_idx, u16 dump_cnt); ++int cxl_memdev_ltmon_capture_trigger(struct cxl_memdev *memdev, ++ u8 cxl_mem_id, u8 trig_src); ++int cxl_memdev_ltmon_enable(struct cxl_memdev *memdev, u8 cxl_mem_id, ++ u8 enable); ++int cxl_memdev_osa_os_type_trig_cfg(struct cxl_memdev *memdev, ++ u8 cxl_mem_id, u16 lane_mask, u8 lane_dir_mask, u8 rate_mask, u16 os_type_mask); ++int cxl_memdev_osa_cap_ctrl(struct cxl_memdev *memdev, u8 cxl_mem_id, ++ u16 lane_mask, u8 lane_dir_mask, u8 drop_single_os, u8 stop_mode, ++ u8 snapshot_mode, u16 post_trig_num, u16 os_type_mask); ++int cxl_memdev_osa_cfg_dump(struct cxl_memdev *memdev, u8 cxl_mem_id); ++int cxl_memdev_osa_ana_op(struct cxl_memdev *memdev, u8 cxl_mem_id, ++ u8 op); ++int cxl_memdev_osa_status_query(struct cxl_memdev *memdev, u8 cxl_mem_id); ++int cxl_memdev_osa_access_rel(struct cxl_memdev *memdev, u8 cxl_mem_id); ++int cxl_memdev_perfcnt_mta_ltif_set(struct cxl_memdev *memdev, ++ u32 counter, u32 match_value, u32 opcode, u32 meta_field, u32 meta_value); ++int cxl_memdev_perfcnt_mta_get(struct cxl_memdev *memdev, u8 type, ++ u32 counter); ++int cxl_memdev_perfcnt_mta_latch_val_get(struct cxl_memdev *memdev, ++ u8 type, u32 counter); ++int cxl_memdev_perfcnt_mta_counter_clear(struct cxl_memdev *memdev, ++ u8 type, u32 counter); ++int cxl_memdev_perfcnt_mta_cnt_val_latch(struct cxl_memdev *memdev, ++ u8 type, u32 counter); ++int cxl_memdev_perfcnt_mta_hif_set(struct cxl_memdev *memdev, u32 counter, ++ u32 match_value, u32 addr, u32 req_ty, u32 sc_ty); ++int cxl_memdev_perfcnt_mta_hif_cfg_get(struct cxl_memdev *memdev, ++ u32 counter); ++int cxl_memdev_perfcnt_mta_hif_latch_val_get(struct cxl_memdev *memdev, ++ u32 counter); ++int cxl_memdev_perfcnt_mta_hif_counter_clear(struct cxl_memdev *memdev, ++ u32 counter); ++int cxl_memdev_perfcnt_mta_hif_cnt_val_latch(struct cxl_memdev *memdev, ++ u32 counter); ++int cxl_memdev_perfcnt_ddr_generic_select(struct cxl_memdev *memdev, ++ u8 ddr_id, u8 cid, u8 rank, u8 bank, u8 bankgroup, u64 event); ++int cxl_memdev_perfcnt_ddr_generic_capture(struct cxl_memdev *memdev, ++ u8 ddr_id, u32 poll_period_ms); ++int cxl_memdev_perfcnt_ddr_dfi_capture(struct cxl_memdev *memdev, ++ u8 ddr_id, u32 poll_period_ms); ++int cxl_memdev_err_inj_drs_poison(struct cxl_memdev *memdev, u8 ch_id, ++ u8 duration, u8 inj_mode, u16 tag); ++int cxl_memdev_err_inj_drs_ecc(struct cxl_memdev *memdev, u8 ch_id, ++ u8 duration, u8 inj_mode, u16 tag); ++int cxl_memdev_err_inj_rxflit_crc(struct cxl_memdev *memdev, u8 cxl_mem_id); ++int cxl_memdev_err_inj_txflit_crc(struct cxl_memdev *memdev, u8 cxl_mem_id); ++int cxl_memdev_err_inj_viral(struct cxl_memdev *memdev, u8 ld_id); ++int cxl_memdev_eh_eye_cap_run(struct cxl_memdev *memdev, u8 depth, ++ u32 lane_mask); ++int cxl_memdev_eh_eye_cap_read(struct cxl_memdev *memdev, u8 lane_id, ++ u8 bin_num); ++int cxl_memdev_eh_eye_cap_timeout_enable(struct cxl_memdev *memdev, u8 enable); ++int cxl_memdev_eh_eye_cap_status(struct cxl_memdev *memdev); ++int cxl_memdev_eh_adapt_get(struct cxl_memdev *memdev, u32 lane_id); ++int cxl_memdev_eh_adapt_oneoff(struct cxl_memdev *memdev, u32 lane_id, ++ u32 preload, u32 loops, u32 objects); ++int cxl_memdev_eh_adapt_force(struct cxl_memdev *memdev, u32 lane_id, ++ u32 rate, u32 vdd_bias, u32 ssc, u8 pga_gain, u8 pga_a0, u8 pga_off, ++ u8 cdfe_a2, u8 cdfe_a3, u8 cdfe_a4, u8 cdfe_a5, u8 cdfe_a6, u8 cdfe_a7, ++ u8 cdfe_a8, u8 cdfe_a9, u8 cdfe_a10, u16 dc_offset, u16 zobel_dc_offset, ++ u16 udfe_thr_0, u16 udfe_thr_1, u16 median_amp, u8 zobel_a_gain, ++ u8 ph_ofs_t); ++int cxl_memdev_hbo_status(struct cxl_memdev *memdev, u8 print_output); ++int cxl_memdev_hbo_transfer_fw(struct cxl_memdev *memdev); ++int cxl_memdev_hbo_activate_fw(struct cxl_memdev *memdev); ++int cxl_memdev_health_counters_clear(struct cxl_memdev *memdev, ++ u32 bitmask); ++int cxl_memdev_health_counters_get(struct cxl_memdev *memdev); ++int cxl_memdev_hct_get_plat_param(struct cxl_memdev *memdev); ++int cxl_memdev_err_inj_hif_poison(struct cxl_memdev *memdev, u8 ch_id, ++ u8 duration, u8 inj_mode, u64 address); ++int cxl_memdev_err_inj_hif_ecc(struct cxl_memdev *memdev, u8 ch_id, ++ u8 duration, u8 inj_mode, u64 address); ++int cxl_memdev_eh_link_dbg_cfg(struct cxl_memdev *memdev, u8 port_id, u8 op_mode, ++ u8 cap_type, u16 lane_mask, u8 rate_mask, u32 timer_us, u32 cap_delay_us, u8 max_cap); ++int cxl_memdev_eh_link_dbg_entry_dump(struct cxl_memdev *memdev, u8 entry_idx); ++int cxl_memdev_eh_link_dbg_lane_dump(struct cxl_memdev *memdev, u8 entry_idx, u8 lane_idx); ++int cxl_memdev_eh_link_dbg_reset(struct cxl_memdev *memdev); ++int cxl_memdev_fbist_stopconfig_set(struct cxl_memdev *memdev, ++ u32 fbist_id, u8 stop_on_wresp, u8 stop_on_rresp, u8 stop_on_rdataerr); ++int cxl_memdev_fbist_cyclecount_set(struct cxl_memdev *memdev, ++ u32 fbist_id, u8 txg_nr, u64 cyclecount); ++int cxl_memdev_fbist_reset_set(struct cxl_memdev *memdev, u32 fbist_id, ++ u8 txg0_reset, u8 txg1_reset); ++int cxl_memdev_fbist_run_set(struct cxl_memdev *memdev, u32 fbist_id, ++ u8 txg0_run, u8 txg1_run); ++int cxl_memdev_fbist_run_get(struct cxl_memdev *memdev, u32 fbist_id); ++int cxl_memdev_fbist_xfer_rem_cnt_get(struct cxl_memdev *memdev, ++ u32 fbist_id, u8 thread_nr); ++int cxl_memdev_fbist_last_exp_read_data_get(struct cxl_memdev *memdev, ++ u32 fbist_id); ++int cxl_memdev_fbist_curr_cycle_cnt_get(struct cxl_memdev *memdev, ++ u32 fbist_id, u8 txg_nr); ++int cxl_memdev_fbist_thread_status_get(struct cxl_memdev *memdev, ++ u32 fbist_id, u8 txg_nr, u8 thread_nr); ++int cxl_memdev_fbist_thread_trans_cnt_get(struct cxl_memdev *memdev, ++ u32 fbist_id, u8 txg_nr, u8 thread_nr); ++int cxl_memdev_fbist_thread_bandwidth_get(struct cxl_memdev *memdev, ++ u32 fbist_id, u8 txg_nr, u8 thread_nr); ++int cxl_memdev_fbist_thread_latency_get(struct cxl_memdev *memdev, ++ u32 fbist_id, u8 txg_nr, u8 thread_nr); ++int cxl_memdev_fbist_thread_perf_mon_set(struct cxl_memdev *memdev, ++ u32 fbist_id, u8 txg_nr, u8 thread_nr, u8 pmon_preset_en, u8 pmon_clear_en, ++ u8 pmon_rollover, u8 pmon_thread_lclk); ++int cxl_memdev_fbist_top_read_status0_get(struct cxl_memdev *memdev, ++ u32 fbist_id); ++int cxl_memdev_fbist_top_err_cnt_get(struct cxl_memdev *memdev, ++ u32 fbist_id); ++int cxl_memdev_fbist_last_read_addr_get(struct cxl_memdev *memdev, ++ u32 fbist_id); ++int cxl_memdev_fbist_test_simpledata(struct cxl_memdev *memdev, ++ u32 fbist_id, u8 test_nr, u64 start_address, u64 num_bytes); ++int cxl_memdev_fbist_test_addresstest(struct cxl_memdev *memdev, ++ u32 fbist_id, u8 test_nr, u64 start_address, u64 num_bytes, u32 seed); ++int cxl_memdev_fbist_test_movinginversion(struct cxl_memdev *memdev, ++ u32 fbist_id, u8 test_nr, u8 phase_nr, u64 start_address, u64 num_bytes, ++ u32 ddrpage_size); ++int cxl_memdev_fbist_test_randomsequence(struct cxl_memdev *memdev, ++ u32 fbist_id, u8 phase_nr, u64 start_address, u64 num_bytes, u32 ddrpage_size, ++ u32 seed_dr0, u32 seed_dr1); ++int cxl_memdev_conf_read(struct cxl_memdev *memdev, u32 offset, ++ u32 length); ++int cxl_memdev_hct_get_config(struct cxl_memdev *memdev, u8 hct_inst); ++int cxl_memdev_hct_read_buffer(struct cxl_memdev *memdev, u8 hct_inst, u8 num_entries_to_read); ++int cxl_memdev_hct_set_config(struct cxl_memdev *memdev, u8 hct_inst, u8 config_flags, ++ u8 port_trig_depth, u8 ignore_invalid, int filesize, u8 *trig_config_buffer); ++int cxl_memdev_osa_os_patt_trig_cfg(struct cxl_memdev *memdev, ++ u8 cxl_mem_id, u16 lane_mask, u8 lane_dir_mask, u8 rate_mask, u32 *patt_val, ++ u32 *patt_mask); ++int cxl_memdev_osa_misc_trig_cfg(struct cxl_memdev *memdev, u8 cxl_mem_id, ++ u8 trig_en_mask); ++int cxl_memdev_osa_data_read(struct cxl_memdev *memdev, u8 cxl_mem_id, ++ u8 lane_id, u8 lane_dir, u16 start_entry, u8 num_entries); ++int cxl_memdev_dimm_spd_read(struct cxl_memdev *memdev, u32 spd_id, ++ u32 offset, u32 num_bytes); ++int cxl_memdev_ddr_training_status(struct cxl_memdev *memdev); ++ ++#define cxl_memdev_foreach(ctx, memdev) \ ++ for (memdev = cxl_memdev_get_first(ctx); \ ++ memdev != NULL; \ ++ memdev = cxl_memdev_get_next(memdev)) ++ ++struct cxl_cmd; ++const char *cxl_cmd_get_devname(struct cxl_cmd *cmd); ++struct cxl_cmd *cxl_cmd_new_raw(struct cxl_memdev *memdev, int opcode); ++int cxl_cmd_set_input_payload(struct cxl_cmd *cmd, void *in, int size); ++int cxl_cmd_set_output_payload(struct cxl_cmd *cmd, void *out, int size); ++void cxl_cmd_ref(struct cxl_cmd *cmd); ++void cxl_cmd_unref(struct cxl_cmd *cmd); ++int cxl_cmd_submit(struct cxl_cmd *cmd); ++int cxl_cmd_get_mbox_status(struct cxl_cmd *cmd); ++int cxl_cmd_get_out_size(struct cxl_cmd *cmd); ++struct cxl_cmd *cxl_cmd_new_identify(struct cxl_memdev *memdev); ++int cxl_cmd_identify_get_fw_rev(struct cxl_cmd *cmd, char *fw_rev, int fw_len); ++unsigned long long cxl_cmd_identify_get_partition_align(struct cxl_cmd *cmd); ++unsigned int cxl_cmd_identify_get_lsa_size(struct cxl_cmd *cmd); ++struct cxl_cmd *cxl_cmd_new_get_health_info(struct cxl_memdev *memdev); ++int cxl_cmd_get_health_info_get_health_status(struct cxl_cmd *cmd); ++int cxl_cmd_get_health_info_get_media_status(struct cxl_cmd *cmd); ++int cxl_cmd_get_health_info_get_ext_status(struct cxl_cmd *cmd); ++int cxl_cmd_get_health_info_get_life_used(struct cxl_cmd *cmd); ++int cxl_cmd_get_health_info_get_temperature(struct cxl_cmd *cmd); ++int cxl_cmd_get_health_info_get_dirty_shutdowns(struct cxl_cmd *cmd); ++int cxl_cmd_get_health_info_get_volatile_errors(struct cxl_cmd *cmd); ++int cxl_cmd_get_health_info_get_pmem_errors(struct cxl_cmd *cmd); ++struct cxl_cmd *cxl_cmd_new_get_lsa(struct cxl_memdev *memdev, ++ unsigned int offset, unsigned int length); ++void *cxl_cmd_get_lsa_get_payload(struct cxl_cmd *cmd); ++struct cxl_cmd *cxl_cmd_new_set_lsa(struct cxl_memdev *memdev, ++ void *buf, unsigned int offset, unsigned int length); ++ ++#ifdef __cplusplus ++} /* extern "C" */ ++#endif ++ ++#endif +diff --git a/cxl/list.c b/cxl/list.c +new file mode 100644 +index 0000000..3dea73f +--- /dev/null ++++ b/cxl/list.c +@@ -0,0 +1,113 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* Copyright (C) 2020-2021 Intel Corporation. All rights reserved. */ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++static struct { ++ bool memdevs; ++ bool idle; ++ bool human; ++} list; ++ ++static unsigned long listopts_to_flags(void) ++{ ++ unsigned long flags = 0; ++ ++ if (list.idle) ++ flags |= UTIL_JSON_IDLE; ++ if (list.human) ++ flags |= UTIL_JSON_HUMAN; ++ return flags; ++} ++ ++static struct { ++ const char *memdev; ++} param; ++ ++static int did_fail; ++ ++#define fail(fmt, ...) \ ++do { \ ++ did_fail = 1; \ ++ fprintf(stderr, "cxl-%s:%s:%d: " fmt, \ ++ VERSION, __func__, __LINE__, ##__VA_ARGS__); \ ++} while (0) ++ ++static int num_list_flags(void) ++{ ++ return list.memdevs; ++} ++ ++int cmd_list(int argc, const char **argv, struct cxl_ctx *ctx) ++{ ++ const struct option options[] = { ++ OPT_STRING('d', "memdev", ¶m.memdev, "memory device name", ++ "filter by CXL memory device name"), ++ OPT_BOOLEAN('D', "memdevs", &list.memdevs, ++ "include CXL memory device info"), ++ OPT_BOOLEAN('i', "idle", &list.idle, "include idle devices"), ++ OPT_BOOLEAN('u', "human", &list.human, ++ "use human friendly number formats "), ++ OPT_END(), ++ }; ++ const char * const u[] = { ++ "cxl list []", ++ NULL ++ }; ++ struct json_object *jdevs = NULL; ++ unsigned long list_flags; ++ struct cxl_memdev *memdev; ++ int i; ++ ++ argc = parse_options(argc, argv, options, u, 0); ++ for (i = 0; i < argc; i++) ++ error("unknown parameter \"%s\"\n", argv[i]); ++ ++ if (argc) ++ usage_with_options(u, options); ++ ++ if (num_list_flags() == 0) ++ list.memdevs = true; ++ ++ list_flags = listopts_to_flags(); ++ ++ cxl_memdev_foreach(ctx, memdev) { ++ struct json_object *jdev = NULL; ++ ++ if (!util_cxl_memdev_filter(memdev, param.memdev)) ++ continue; ++ ++ if (list.memdevs) { ++ if (!jdevs) { ++ jdevs = json_object_new_array(); ++ if (!jdevs) { ++ fail("\n"); ++ continue; ++ } ++ } ++ ++ jdev = util_cxl_memdev_to_json(memdev, list_flags); ++ if (!jdev) { ++ fail("\n"); ++ continue; ++ } ++ json_object_array_add(jdevs, jdev); ++ } ++ } ++ ++ if (jdevs) ++ util_display_json_array(stdout, jdevs, list_flags); ++ ++ if (did_fail) ++ return -ENOMEM; ++ return 0; ++} +diff --git a/cxl/memdev.c b/cxl/memdev.c +new file mode 100644 +index 0000000..d3a6f67 +--- /dev/null ++++ b/cxl/memdev.c +@@ -0,0 +1,4497 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* Copyright (C) 2020-2021 Intel Corporation. All rights reserved. */ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++ ++ ++struct action_context { ++ FILE *f_out; ++ FILE *f_in; ++}; ++ ++static struct parameters { ++ const char *outfile; ++ const char *infile; ++ unsigned len; ++ unsigned offset; ++ bool verbose; ++} param; ++ ++#define fail(fmt, ...) \ ++do { \ ++ fprintf(stderr, "cxl-%s:%s:%d: " fmt, \ ++ VERSION, __func__, __LINE__, ##__VA_ARGS__); \ ++} while (0) ++ ++#define BASE_OPTIONS() \ ++OPT_BOOLEAN('v',"verbose", ¶m.verbose, "turn on debug") ++ ++#define READ_OPTIONS() \ ++OPT_STRING('o', "output", ¶m.outfile, "output-file", \ ++ "filename to write label area contents") ++ ++#define WRITE_OPTIONS() \ ++OPT_STRING('i', "input", ¶m.infile, "input-file", \ ++ "filename to read label area data") ++ ++#define LABEL_OPTIONS() \ ++OPT_UINTEGER('s', "size", ¶m.len, "number of label bytes to operate"), \ ++OPT_UINTEGER('O', "offset", ¶m.offset, \ ++ "offset into the label area to start operation") ++ ++static const struct option read_options[] = { ++ BASE_OPTIONS(), ++ LABEL_OPTIONS(), ++ READ_OPTIONS(), ++ OPT_END(), ++}; ++ ++static const struct option write_options[] = { ++ BASE_OPTIONS(), ++ LABEL_OPTIONS(), ++ WRITE_OPTIONS(), ++ OPT_END(), ++}; ++ ++static const struct option zero_options[] = { ++ BASE_OPTIONS(), ++ LABEL_OPTIONS(), ++ OPT_END(), ++}; ++ ++static const struct option cmd_identify_options[] = { ++ BASE_OPTIONS(), ++ OPT_END(), ++}; ++ ++static const struct option cmd_get_supported_logs_options[] = { ++ BASE_OPTIONS(), ++ OPT_END(), ++}; ++ ++static const struct option cmd_get_cel_log_options[] = { ++ BASE_OPTIONS(), ++ OPT_END(), ++}; ++ ++static const struct option cmd_get_event_interrupt_policy_options[] = { ++ BASE_OPTIONS(), ++ OPT_END(), ++}; ++ ++static struct _interrupt_policy_params { ++ u32 policy; ++ bool verbose; ++} interrupt_policy_params; ++ ++ ++#define SET_INTERRUPT_POLICY_OPTIONS() \ ++OPT_UINTEGER('i', "int_policy", &interrupt_policy_params.policy, "Set event interrupt policy. Fields: Informational Event Log Interrupt Settings (1B), Warning Event Log Interrupt Settings (1B), Failure Event Log Interrupt Settings (1B), Fatal Event Log Interrupt Settings (1B)") ++ ++static const struct option cmd_set_event_interrupt_policy_options[] = { ++ BASE_OPTIONS(), ++ SET_INTERRUPT_POLICY_OPTIONS(), ++ OPT_END(), ++}; ++ ++static const struct option cmd_get_timestamp_options[] = { ++ BASE_OPTIONS(), ++ OPT_END(), ++}; ++ ++static struct _ts_params { ++ u64 timestamp; ++ bool verbose; ++} ts_params; ++ ++ ++#define SET_TIMESTAMP_OPTIONS() \ ++OPT_U64('t', "timestamp", &ts_params.timestamp, "Set the timestamp on the device") ++ ++static const struct option cmd_set_timestamp_options[] = { ++ BASE_OPTIONS(), ++ SET_TIMESTAMP_OPTIONS(), ++ OPT_END(), ++}; ++ ++static struct _update_fw_params { ++ const char *filepath; ++ u32 slot; ++ bool hbo; ++ bool mock; ++ bool verbose; ++} update_fw_params; ++ ++#define UPDATE_FW_OPTIONS() \ ++OPT_FILENAME('f', "file", &update_fw_params.filepath, "rom-file", \ ++ "filepath to read ROM for firmware update"), \ ++OPT_UINTEGER('s', "slot", &update_fw_params.slot, "slot to use for firmware loading"), \ ++OPT_BOOLEAN('b', "background", &update_fw_params.hbo, "runs as hidden background option"), \ ++OPT_BOOLEAN('m', "mock", &update_fw_params.mock, "For testing purposes. Mock transfer with only 1 continue then abort") ++ ++static const struct option cmd_update_fw_options[] = { ++ BASE_OPTIONS(), ++ UPDATE_FW_OPTIONS(), ++ OPT_END(), ++}; ++ ++ ++ ++ ++static const struct option cmd_device_info_get_options[] = { ++ BASE_OPTIONS(), ++ OPT_END(), ++}; ++ ++ ++ ++ ++static const struct option cmd_get_fw_info_options[] = { ++ BASE_OPTIONS(), ++ OPT_END(), ++}; ++ ++static struct _activate_fw_params { ++ u32 action; ++ u32 slot; ++ bool verbose; ++} activate_fw_params; ++ ++ ++#define ACTIVATE_FW_OPTIONS() \ ++OPT_UINTEGER('a', "action", &activate_fw_params.action, "Action"), \ ++OPT_UINTEGER('s', "slot", &activate_fw_params.slot, "Slot") ++ ++static const struct option cmd_activate_fw_options[] = { ++ BASE_OPTIONS(), ++ ACTIVATE_FW_OPTIONS(), ++ OPT_END(), ++}; ++ ++static const struct option cmd_get_alert_config_options[] = { ++ BASE_OPTIONS(), ++ OPT_END(), ++}; ++ ++static struct _alert_config_params { ++ u32 alert_prog_threshold; ++ u32 device_temp_threshold; ++ u32 mem_error_threshold; ++ bool verbose; ++} alert_config_params; ++ ++ ++#define SET_ALERT_CONFIG_OPTIONS() \ ++OPT_UINTEGER('a', "alert_prog_threshold", &alert_config_params.alert_prog_threshold, "Set valid, enable alert actions and life used programmable threshold. Fields: Valid Alert Actions (1B), Enable Alert Actions (1B), Life Used Programmable Warning Threshold (1B)"), \ ++OPT_UINTEGER('d', "device_temp_threshold", &alert_config_params.device_temp_threshold, "Set device over/under temp thresholds. Fields: Device Over-Temperature Programmable Warning Threshold (2B), Device Under-Temperature Programmable Warning Threshold (2B)"), \ ++OPT_UINTEGER('m', "mem_error_threshold", &alert_config_params.mem_error_threshold, "Set memory corrected thresholds. Fields: Corrected Volatile Memory Error Programmable Warning Threshold (2B), Corrected Persistent Memory Error Programmable Warning Threshold (2B)") ++ ++static const struct option cmd_set_alert_config_options[] = { ++ BASE_OPTIONS(), ++ SET_ALERT_CONFIG_OPTIONS(), ++ OPT_END(), ++}; ++ ++static const struct option cmd_get_health_info_options[] = { ++ BASE_OPTIONS(), ++ OPT_END(), ++}; ++ ++ ++ ++static const struct option cmd_get_ld_info_options[] = { ++ BASE_OPTIONS(), ++ OPT_END(), ++}; ++ ++static struct _ddr_info_params { ++ bool verbose; ++ int ddr_id; ++} ddr_info_params; ++ ++ ++#define DDR_INFO_OPTIONS() \ ++OPT_INTEGER('i', "ddr_id", &ddr_info_params.ddr_id, "DDR instance id") ++ ++ ++static const struct option cmd_ddr_info_options[] = { ++ BASE_OPTIONS(), ++ DDR_INFO_OPTIONS(), ++ OPT_END(), ++}; ++ ++static struct _get_event_records_params { ++ int event_log_type; /* 00 - information, 01 - warning, 02 - failure, 03 - fatal */ ++ bool verbose; ++} get_event_records_params; ++ ++ ++#define GET_EVENT_RECORDS_OPTIONS() \ ++OPT_INTEGER('t', "log_type", &get_event_records_params.event_log_type, "Event log type (00 - information (default), 01 - warning, 02 - failure, 03 - fatal)") ++ ++static const struct option cmd_get_event_records_options[] = { ++ BASE_OPTIONS(), ++ GET_EVENT_RECORDS_OPTIONS(), ++ OPT_END(), ++}; ++ ++static struct _clear_event_records_params { ++ int event_log_type; /* 00 - information, 01 - warning, 02 - failure, 03 - fatal */ ++ int clear_event_flags; /* bit 0 - when set, clears all events */ ++ unsigned event_record_handle; /* only one is supported */ ++ bool verbose; ++} clear_event_records_params; ++ ++ ++#define CLEAR_EVENT_RECORDS_OPTIONS() \ ++OPT_INTEGER('t', "log_type", &clear_event_records_params.event_log_type, "Event log type (00 - information (default), 01 - warning, 02 - failure, 03 - fatal)"), \ ++OPT_INTEGER('f', "event_flag", &clear_event_records_params.clear_event_flags, "Clear Event Flags: 1 - clear all events, 0 (default) - clear specific event record"), \ ++OPT_UINTEGER('i', "event_record_handle", &clear_event_records_params.event_record_handle, "Clear Specific Event specific by Event Record Handle") ++ ++static const struct option cmd_clear_event_records_options[] = { ++ BASE_OPTIONS(), ++ CLEAR_EVENT_RECORDS_OPTIONS(), ++ OPT_END(), ++}; ++ ++static struct _hct_start_stop_trigger_params { ++ u32 hct_inst; ++ u32 buf_control; ++ bool verbose; ++} hct_start_stop_trigger_params; ++ ++ ++#define HCT_START_STOP_TRIGGER_OPTIONS() \ ++OPT_UINTEGER('i', "hct_inst", &hct_start_stop_trigger_params.hct_inst, "HCT Instance"), \ ++OPT_UINTEGER('b', "buf_control", &hct_start_stop_trigger_params.buf_control, "Buffer Control") ++ ++static const struct option cmd_hct_start_stop_trigger_options[] = { ++ BASE_OPTIONS(), ++ HCT_START_STOP_TRIGGER_OPTIONS(), ++ OPT_END(), ++}; ++ ++static struct _hct_get_buffer_status_params { ++ u32 hct_inst; ++ bool verbose; ++} hct_get_buffer_status_params; ++ ++ ++#define HCT_GET_BUFFER_STATUS_OPTIONS() \ ++OPT_UINTEGER('i', "hct_inst", &hct_get_buffer_status_params.hct_inst, "HCT Instance") ++ ++static const struct option cmd_hct_get_buffer_status_options[] = { ++ BASE_OPTIONS(), ++ HCT_GET_BUFFER_STATUS_OPTIONS(), ++ OPT_END(), ++}; ++ ++static struct _hct_enable_params { ++ u32 hct_inst; ++ bool verbose; ++} hct_enable_params; ++ ++ ++#define HCT_ENABLE_OPTIONS() \ ++OPT_UINTEGER('i', "hct_inst", &hct_enable_params.hct_inst, "HCT Instance") ++ ++static const struct option cmd_hct_enable_options[] = { ++ BASE_OPTIONS(), ++ HCT_ENABLE_OPTIONS(), ++ OPT_END(), ++}; ++ ++static struct _ltmon_capture_clear_params { ++ u32 cxl_mem_id; ++ bool verbose; ++} ltmon_capture_clear_params; ++ ++ ++#define LTMON_CAPTURE_CLEAR_OPTIONS() \ ++OPT_UINTEGER('c', "cxl_mem_id", <mon_capture_clear_params.cxl_mem_id, "CXL.MEM ID") ++ ++static const struct option cmd_ltmon_capture_clear_options[] = { ++ BASE_OPTIONS(), ++ LTMON_CAPTURE_CLEAR_OPTIONS(), ++ OPT_END(), ++}; ++ ++static struct _ltmon_capture_params { ++ u32 cxl_mem_id; ++ u32 capt_mode; ++ u32 ignore_sub_chg; ++ u32 ignore_rxl0_chg; ++ u32 trig_src_sel; ++ bool verbose; ++} ltmon_capture_params; ++ ++ ++#define LTMON_CAPTURE_OPTIONS() \ ++OPT_UINTEGER('c', "cxl_mem_id", <mon_capture_params.cxl_mem_id, "CXL.MEM ID"), \ ++OPT_UINTEGER('d', "capt_mode", <mon_capture_params.capt_mode, "Capture Mode"), \ ++OPT_UINTEGER('i', "ignore_sub_chg", <mon_capture_params.ignore_sub_chg, "Ignore Sub Change"), \ ++OPT_UINTEGER('j', "ignore_rxl0_chg", <mon_capture_params.ignore_rxl0_chg, "Ignore Receiver L0 Change"), \ ++OPT_UINTEGER('t', "trig_src_sel", <mon_capture_params.trig_src_sel, "Trigger Source Selection") ++ ++static const struct option cmd_ltmon_capture_options[] = { ++ BASE_OPTIONS(), ++ LTMON_CAPTURE_OPTIONS(), ++ OPT_END(), ++}; ++ ++static struct _ltmon_capture_freeze_and_restore_params { ++ u32 cxl_mem_id; ++ u32 freeze_restore; ++ bool verbose; ++} ltmon_capture_freeze_and_restore_params; ++ ++ ++#define LTMON_CAPTURE_FREEZE_AND_RESTORE_OPTIONS() \ ++OPT_UINTEGER('c', "cxl_mem_id", <mon_capture_freeze_and_restore_params.cxl_mem_id, "CXL.MEM ID"), \ ++OPT_UINTEGER('f', "freeze_restore", <mon_capture_freeze_and_restore_params.freeze_restore, "Freeze Restore") ++ ++static const struct option cmd_ltmon_capture_freeze_and_restore_options[] = { ++ BASE_OPTIONS(), ++ LTMON_CAPTURE_FREEZE_AND_RESTORE_OPTIONS(), ++ OPT_END(), ++}; ++ ++static struct _ltmon_l2r_count_dump_params { ++ u32 cxl_mem_id; ++ bool verbose; ++} ltmon_l2r_count_dump_params; ++ ++ ++#define LTMON_L2R_COUNT_DUMP_OPTIONS() \ ++OPT_UINTEGER('c', "cxl_mem_id", <mon_l2r_count_dump_params.cxl_mem_id, "CXL.MEM ID") ++ ++static const struct option cmd_ltmon_l2r_count_dump_options[] = { ++ BASE_OPTIONS(), ++ LTMON_L2R_COUNT_DUMP_OPTIONS(), ++ OPT_END(), ++}; ++ ++static struct _ltmon_l2r_count_clear_params { ++ u32 cxl_mem_id; ++ bool verbose; ++} ltmon_l2r_count_clear_params; ++ ++ ++#define LTMON_L2R_COUNT_CLEAR_OPTIONS() \ ++OPT_UINTEGER('c', "cxl_mem_id", <mon_l2r_count_clear_params.cxl_mem_id, "CXL.MEM ID") ++ ++static const struct option cmd_ltmon_l2r_count_clear_options[] = { ++ BASE_OPTIONS(), ++ LTMON_L2R_COUNT_CLEAR_OPTIONS(), ++ OPT_END(), ++}; ++ ++static struct _ltmon_basic_cfg_params { ++ u32 cxl_mem_id; ++ u32 tick_cnt; ++ u32 global_ts; ++ bool verbose; ++} ltmon_basic_cfg_params; ++ ++ ++#define LTMON_BASIC_CFG_OPTIONS() \ ++OPT_UINTEGER('c', "cxl_mem_id", <mon_basic_cfg_params.cxl_mem_id, "CXL.MEM ID"), \ ++OPT_UINTEGER('t', "tick_cnt", <mon_basic_cfg_params.tick_cnt, "Tick Count"), \ ++OPT_UINTEGER('g', "global_ts", <mon_basic_cfg_params.global_ts, "Global Time Stamp") ++ ++static const struct option cmd_ltmon_basic_cfg_options[] = { ++ BASE_OPTIONS(), ++ LTMON_BASIC_CFG_OPTIONS(), ++ OPT_END(), ++}; ++ ++static struct _ltmon_watch_params { ++ u32 cxl_mem_id; ++ u32 watch_id; ++ u32 watch_mode; ++ u32 src_maj_st; ++ u32 src_min_st; ++ u32 src_l0_st; ++ u32 dst_maj_st; ++ u32 dst_min_st; ++ u32 dst_l0_st; ++ bool verbose; ++} ltmon_watch_params; ++ ++ ++#define LTMON_WATCH_OPTIONS() \ ++OPT_UINTEGER('c', "cxl_mem_id", <mon_watch_params.cxl_mem_id, "CXL.MEM ID"), \ ++OPT_UINTEGER('w', "watch_id", <mon_watch_params.watch_id, "Watch ID"), \ ++OPT_UINTEGER('x', "watch_mode", <mon_watch_params.watch_mode, "Watch Mode"), \ ++OPT_UINTEGER('s', "src_maj_st", <mon_watch_params.src_maj_st, "Source Maj State"), \ ++OPT_UINTEGER('t', "src_min_st", <mon_watch_params.src_min_st, "Source Min State"), \ ++OPT_UINTEGER('u', "src_l0_st", <mon_watch_params.src_l0_st, "Source L0 State"), \ ++OPT_UINTEGER('d', "dst_maj_st", <mon_watch_params.dst_maj_st, "Destination Maj State"), \ ++OPT_UINTEGER('e', "dst_min_st", <mon_watch_params.dst_min_st, "Destination Min State"), \ ++OPT_UINTEGER('f', "dst_l0_st", <mon_watch_params.dst_l0_st, "Destination L0 State") ++ ++static const struct option cmd_ltmon_watch_options[] = { ++ BASE_OPTIONS(), ++ LTMON_WATCH_OPTIONS(), ++ OPT_END(), ++}; ++ ++static struct _ltmon_capture_stat_params { ++ u32 cxl_mem_id; ++ bool verbose; ++} ltmon_capture_stat_params; ++ ++ ++#define LTMON_CAPTURE_STAT_OPTIONS() \ ++OPT_UINTEGER('c', "cxl_mem_id", <mon_capture_stat_params.cxl_mem_id, "CXL.MEM ID") ++ ++static const struct option cmd_ltmon_capture_stat_options[] = { ++ BASE_OPTIONS(), ++ LTMON_CAPTURE_STAT_OPTIONS(), ++ OPT_END(), ++}; ++ ++static struct _ltmon_capture_log_dmp_params { ++ u32 cxl_mem_id; ++ u32 dump_idx; ++ u32 dump_cnt; ++ bool verbose; ++} ltmon_capture_log_dmp_params; ++ ++ ++#define LTMON_CAPTURE_LOG_DMP_OPTIONS() \ ++OPT_UINTEGER('c', "cxl_mem_id", <mon_capture_log_dmp_params.cxl_mem_id, "CXL.MEM ID"), \ ++OPT_UINTEGER('d', "dump_idx", <mon_capture_log_dmp_params.dump_idx, "Dump Index"), \ ++OPT_UINTEGER('e', "dump_cnt", <mon_capture_log_dmp_params.dump_cnt, "Dump Count") ++ ++static const struct option cmd_ltmon_capture_log_dmp_options[] = { ++ BASE_OPTIONS(), ++ LTMON_CAPTURE_LOG_DMP_OPTIONS(), ++ OPT_END(), ++}; ++ ++static struct _ltmon_capture_trigger_params { ++ u32 cxl_mem_id; ++ u32 trig_src; ++ bool verbose; ++} ltmon_capture_trigger_params; ++ ++ ++#define LTMON_CAPTURE_TRIGGER_OPTIONS() \ ++OPT_UINTEGER('c', "cxl_mem_id", <mon_capture_trigger_params.cxl_mem_id, "CXL.MEM ID"), \ ++OPT_UINTEGER('t', "trig_src", <mon_capture_trigger_params.trig_src, "Trigger Source") ++ ++static const struct option cmd_ltmon_capture_trigger_options[] = { ++ BASE_OPTIONS(), ++ LTMON_CAPTURE_TRIGGER_OPTIONS(), ++ OPT_END(), ++}; ++ ++static struct _ltmon_enable_params { ++ u32 cxl_mem_id; ++ u32 enable; ++ bool verbose; ++} ltmon_enable_params; ++ ++ ++#define LTMON_ENABLE_OPTIONS() \ ++OPT_UINTEGER('c', "cxl_mem_id", <mon_enable_params.cxl_mem_id, "CXL.MEM ID"), \ ++OPT_UINTEGER('e', "enable", <mon_enable_params.enable, "Enable") ++ ++static const struct option cmd_ltmon_enable_options[] = { ++ BASE_OPTIONS(), ++ LTMON_ENABLE_OPTIONS(), ++ OPT_END(), ++}; ++ ++static struct _osa_os_type_trig_cfg_params { ++ u32 cxl_mem_id; ++ u32 lane_mask; ++ u32 lane_dir_mask; ++ u32 rate_mask; ++ u32 os_type_mask; ++ bool verbose; ++} osa_os_type_trig_cfg_params; ++ ++ ++#define OSA_OS_TYPE_TRIG_CFG_OPTIONS() \ ++OPT_UINTEGER('c', "cxl_mem_id", &osa_os_type_trig_cfg_params.cxl_mem_id, "CXL.MEM ID"), \ ++OPT_UINTEGER('l', "lane_mask", &osa_os_type_trig_cfg_params.lane_mask, "Lane Mask"), \ ++OPT_UINTEGER('m', "lane_dir_mask", &osa_os_type_trig_cfg_params.lane_dir_mask, "Lane Direction Mask (see OSA_LANE_DIR_BITMSK_*)"), \ ++OPT_UINTEGER('r', "rate_mask", &osa_os_type_trig_cfg_params.rate_mask, "Link Rate mask (see OSA_LINK_RATE_BITMSK_*)"), \ ++OPT_UINTEGER('o', "os_type_mask", &osa_os_type_trig_cfg_params.os_type_mask, "OS Type mask (see OSA_OS_TYPE_TRIG_BITMSK_*)") ++ ++static const struct option cmd_osa_os_type_trig_cfg_options[] = { ++ BASE_OPTIONS(), ++ OSA_OS_TYPE_TRIG_CFG_OPTIONS(), ++ OPT_END(), ++}; ++ ++static struct _osa_cap_ctrl_params { ++ u32 cxl_mem_id; ++ u32 lane_mask; ++ u32 lane_dir_mask; ++ u32 drop_single_os; ++ u32 stop_mode; ++ u32 snapshot_mode; ++ u32 post_trig_num; ++ u32 os_type_mask; ++ bool verbose; ++} osa_cap_ctrl_params; ++ ++ ++#define OSA_CAP_CTRL_OPTIONS() \ ++OPT_UINTEGER('c', "cxl_mem_id", &osa_cap_ctrl_params.cxl_mem_id, "CXL.MEM ID"), \ ++OPT_UINTEGER('l', "lane_mask", &osa_cap_ctrl_params.lane_mask, "Lane Mask"), \ ++OPT_UINTEGER('m', "lane_dir_mask", &osa_cap_ctrl_params.lane_dir_mask, "Lane Direction Mask (see OSA_LANE_DIR_BITMSK_*)"), \ ++OPT_UINTEGER('d', "drop_single_os", &osa_cap_ctrl_params.drop_single_os, "Drop Single OS's (TS1/TS2/FTS/CTL_SKP)"), \ ++OPT_UINTEGER('s', "stop_mode", &osa_cap_ctrl_params.stop_mode, "Capture Stop Mode (see osa_cap_stop_mode_enum)"), \ ++OPT_UINTEGER('t', "snapshot_mode", &osa_cap_ctrl_params.snapshot_mode, "Snapshot Mode Enable"), \ ++OPT_UINTEGER('p', "post_trig_num", &osa_cap_ctrl_params.post_trig_num, "Number of post-trigger entries"), \ ++OPT_UINTEGER('o', "os_type_mask", &osa_cap_ctrl_params.os_type_mask, "OS Type mask (see OSA_OS_TYPE_CAP_BITMSK_*)") ++ ++static const struct option cmd_osa_cap_ctrl_options[] = { ++ BASE_OPTIONS(), ++ OSA_CAP_CTRL_OPTIONS(), ++ OPT_END(), ++}; ++ ++static struct _osa_cfg_dump_params { ++ u32 cxl_mem_id; ++ bool verbose; ++} osa_cfg_dump_params; ++ ++ ++#define OSA_CFG_DUMP_OPTIONS() \ ++OPT_UINTEGER('c', "cxl_mem_id", &osa_cfg_dump_params.cxl_mem_id, "CXL.MEM ID") ++ ++static const struct option cmd_osa_cfg_dump_options[] = { ++ BASE_OPTIONS(), ++ OSA_CFG_DUMP_OPTIONS(), ++ OPT_END(), ++}; ++ ++static struct _osa_ana_op_params { ++ u32 cxl_mem_id; ++ u32 op; ++ bool verbose; ++} osa_ana_op_params; ++ ++ ++#define OSA_ANA_OP_OPTIONS() \ ++OPT_UINTEGER('c', "cxl_mem_id", &osa_ana_op_params.cxl_mem_id, "CXL.MEM ID"), \ ++OPT_UINTEGER('o', "op", &osa_ana_op_params.op, "Operation (see osa_op_enum)") ++ ++static const struct option cmd_osa_ana_op_options[] = { ++ BASE_OPTIONS(), ++ OSA_ANA_OP_OPTIONS(), ++ OPT_END(), ++}; ++ ++static struct _osa_status_query_params { ++ u32 cxl_mem_id; ++ bool verbose; ++} osa_status_query_params; ++ ++ ++#define OSA_STATUS_QUERY_OPTIONS() \ ++OPT_UINTEGER('c', "cxl_mem_id", &osa_status_query_params.cxl_mem_id, "CXL.MEM ID") ++ ++static const struct option cmd_osa_status_query_options[] = { ++ BASE_OPTIONS(), ++ OSA_STATUS_QUERY_OPTIONS(), ++ OPT_END(), ++}; ++ ++static struct _osa_access_rel_params { ++ u32 cxl_mem_id; ++ bool verbose; ++} osa_access_rel_params; ++ ++ ++#define OSA_ACCESS_REL_OPTIONS() \ ++OPT_UINTEGER('c', "cxl_mem_id", &osa_access_rel_params.cxl_mem_id, "CXL.MEM ID") ++ ++static const struct option cmd_osa_access_rel_options[] = { ++ BASE_OPTIONS(), ++ OSA_ACCESS_REL_OPTIONS(), ++ OPT_END(), ++}; ++ ++static struct _perfcnt_mta_ltif_set_params { ++ u32 counter; ++ u32 match_value; ++ u32 opcode; ++ u32 meta_field; ++ u32 meta_value; ++ bool verbose; ++} perfcnt_mta_ltif_set_params; ++ ++ ++#define PERFCNT_MTA_LTIF_SET_OPTIONS() \ ++OPT_UINTEGER('c', "counter", &perfcnt_mta_ltif_set_params.counter, "Counter"), \ ++OPT_UINTEGER('m', "match_value", &perfcnt_mta_ltif_set_params.match_value, "Match Value"), \ ++OPT_UINTEGER('o', "opcode", &perfcnt_mta_ltif_set_params.opcode, "Opcode"), \ ++OPT_UINTEGER('n', "meta_field", &perfcnt_mta_ltif_set_params.meta_field, "Meta Field"), \ ++OPT_UINTEGER('p', "meta_value", &perfcnt_mta_ltif_set_params.meta_value, "Meta Value") ++ ++static const struct option cmd_perfcnt_mta_ltif_set_options[] = { ++ BASE_OPTIONS(), ++ PERFCNT_MTA_LTIF_SET_OPTIONS(), ++ OPT_END(), ++}; ++ ++static struct _perfcnt_mta_get_params { ++ u32 type; ++ u32 counter; ++ bool verbose; ++} perfcnt_mta_get_params; ++ ++ ++#define PERFCNT_MTA_GET_OPTIONS() \ ++OPT_UINTEGER('t', "type", &perfcnt_mta_get_params.type, "Type"), \ ++OPT_UINTEGER('c', "counter", &perfcnt_mta_get_params.counter, "Counter") ++ ++static const struct option cmd_perfcnt_mta_get_options[] = { ++ BASE_OPTIONS(), ++ PERFCNT_MTA_GET_OPTIONS(), ++ OPT_END(), ++}; ++ ++static struct _perfcnt_mta_latch_val_get_params { ++ u32 type; ++ u32 counter; ++ bool verbose; ++} perfcnt_mta_latch_val_get_params; ++ ++ ++#define PERFCNT_MTA_LATCH_VAL_GET_OPTIONS() \ ++OPT_UINTEGER('t', "type", &perfcnt_mta_latch_val_get_params.type, "Type"), \ ++OPT_UINTEGER('c', "counter", &perfcnt_mta_latch_val_get_params.counter, "Counter") ++ ++static const struct option cmd_perfcnt_mta_latch_val_get_options[] = { ++ BASE_OPTIONS(), ++ PERFCNT_MTA_LATCH_VAL_GET_OPTIONS(), ++ OPT_END(), ++}; ++ ++static struct _perfcnt_mta_counter_clear_params { ++ u32 type; ++ u32 counter; ++ bool verbose; ++} perfcnt_mta_counter_clear_params; ++ ++ ++#define PERFCNT_MTA_COUNTER_CLEAR_OPTIONS() \ ++OPT_UINTEGER('t', "type", &perfcnt_mta_counter_clear_params.type, "Type"), \ ++OPT_UINTEGER('c', "counter", &perfcnt_mta_counter_clear_params.counter, "Counter") ++ ++static const struct option cmd_perfcnt_mta_counter_clear_options[] = { ++ BASE_OPTIONS(), ++ PERFCNT_MTA_COUNTER_CLEAR_OPTIONS(), ++ OPT_END(), ++}; ++ ++static struct _perfcnt_mta_cnt_val_latch_params { ++ u32 type; ++ u32 counter; ++ bool verbose; ++} perfcnt_mta_cnt_val_latch_params; ++ ++ ++#define PERFCNT_MTA_CNT_VAL_LATCH_OPTIONS() \ ++OPT_UINTEGER('t', "type", &perfcnt_mta_cnt_val_latch_params.type, "Type"), \ ++OPT_UINTEGER('c', "counter", &perfcnt_mta_cnt_val_latch_params.counter, "Counter") ++ ++static const struct option cmd_perfcnt_mta_cnt_val_latch_options[] = { ++ BASE_OPTIONS(), ++ PERFCNT_MTA_CNT_VAL_LATCH_OPTIONS(), ++ OPT_END(), ++}; ++ ++static struct _perfcnt_mta_hif_set_params { ++ u32 counter; ++ u32 match_value; ++ u32 addr; ++ u32 req_ty; ++ u32 sc_ty; ++ bool verbose; ++} perfcnt_mta_hif_set_params; ++ ++ ++#define PERFCNT_MTA_HIF_SET_OPTIONS() \ ++OPT_UINTEGER('c', "counter", &perfcnt_mta_hif_set_params.counter, "Counter"), \ ++OPT_UINTEGER('m', "match_value", &perfcnt_mta_hif_set_params.match_value, "Match Value"), \ ++OPT_UINTEGER('a', "addr", &perfcnt_mta_hif_set_params.addr, "Address"), \ ++OPT_UINTEGER('r', "req_ty", &perfcnt_mta_hif_set_params.req_ty, "Req Type"), \ ++OPT_UINTEGER('s', "sc_ty", &perfcnt_mta_hif_set_params.sc_ty, "Scrub Req") ++ ++static const struct option cmd_perfcnt_mta_hif_set_options[] = { ++ BASE_OPTIONS(), ++ PERFCNT_MTA_HIF_SET_OPTIONS(), ++ OPT_END(), ++}; ++ ++static struct _perfcnt_mta_hif_cfg_get_params { ++ u32 counter; ++ bool verbose; ++} perfcnt_mta_hif_cfg_get_params; ++ ++ ++#define PERFCNT_MTA_HIF_CFG_GET_OPTIONS() \ ++OPT_UINTEGER('c', "counter", &perfcnt_mta_hif_cfg_get_params.counter, "Counter") ++ ++static const struct option cmd_perfcnt_mta_hif_cfg_get_options[] = { ++ BASE_OPTIONS(), ++ PERFCNT_MTA_HIF_CFG_GET_OPTIONS(), ++ OPT_END(), ++}; ++ ++static struct _perfcnt_mta_hif_latch_val_get_params { ++ u32 counter; ++ bool verbose; ++} perfcnt_mta_hif_latch_val_get_params; ++ ++ ++#define PERFCNT_MTA_HIF_LATCH_VAL_GET_OPTIONS() \ ++OPT_UINTEGER('c', "counter", &perfcnt_mta_hif_latch_val_get_params.counter, "Counter") ++ ++static const struct option cmd_perfcnt_mta_hif_latch_val_get_options[] = { ++ BASE_OPTIONS(), ++ PERFCNT_MTA_HIF_LATCH_VAL_GET_OPTIONS(), ++ OPT_END(), ++}; ++ ++static struct _perfcnt_mta_hif_counter_clear_params { ++ u32 counter; ++ bool verbose; ++} perfcnt_mta_hif_counter_clear_params; ++ ++ ++#define PERFCNT_MTA_HIF_COUNTER_CLEAR_OPTIONS() \ ++OPT_UINTEGER('c', "counter", &perfcnt_mta_hif_counter_clear_params.counter, "Counter") ++ ++static const struct option cmd_perfcnt_mta_hif_counter_clear_options[] = { ++ BASE_OPTIONS(), ++ PERFCNT_MTA_HIF_COUNTER_CLEAR_OPTIONS(), ++ OPT_END(), ++}; ++ ++static struct _perfcnt_mta_hif_cnt_val_latch_params { ++ u32 counter; ++ bool verbose; ++} perfcnt_mta_hif_cnt_val_latch_params; ++ ++ ++#define PERFCNT_MTA_HIF_CNT_VAL_LATCH_OPTIONS() \ ++OPT_UINTEGER('c', "counter", &perfcnt_mta_hif_cnt_val_latch_params.counter, "Counter") ++ ++static const struct option cmd_perfcnt_mta_hif_cnt_val_latch_options[] = { ++ BASE_OPTIONS(), ++ PERFCNT_MTA_HIF_CNT_VAL_LATCH_OPTIONS(), ++ OPT_END(), ++}; ++ ++static struct _perfcnt_ddr_generic_select_params { ++ u32 ddr_id; ++ u32 cid; ++ u32 rank; ++ u32 bank; ++ u32 bankgroup; ++ u64 event; ++ bool verbose; ++} perfcnt_ddr_generic_select_params; ++ ++ ++#define PERFCNT_DDR_GENERIC_SELECT_OPTIONS() \ ++OPT_UINTEGER('d', "ddr_id", &perfcnt_ddr_generic_select_params.ddr_id, "DDR instance"), \ ++OPT_UINTEGER('c', "cid", &perfcnt_ddr_generic_select_params.cid, "CID selection"), \ ++OPT_UINTEGER('r', "rank", &perfcnt_ddr_generic_select_params.rank, "Rank selection"), \ ++OPT_UINTEGER('b', "bank", &perfcnt_ddr_generic_select_params.bank, "Bank selection"), \ ++OPT_UINTEGER('e', "bankgroup", &perfcnt_ddr_generic_select_params.bankgroup, "Bank Group selection"), \ ++OPT_U64('f', "event", &perfcnt_ddr_generic_select_params.event, "Events selection") ++ ++static const struct option cmd_perfcnt_ddr_generic_select_options[] = { ++ BASE_OPTIONS(), ++ PERFCNT_DDR_GENERIC_SELECT_OPTIONS(), ++ OPT_END(), ++}; ++ ++static struct _perfcnt_ddr_generic_capture_params { ++ u32 ddr_id; ++ u32 poll_period_ms; ++ bool verbose; ++} perfcnt_ddr_generic_capture_params; ++ ++#define PERFCNT_DDR_GENERIC_CAPTURE_BASE_OPTIONS() \ ++OPT_BOOLEAN('v',"verbose", &perfcnt_ddr_generic_capture_params.verbose, "turn on debug") ++ ++#define PERFCNT_DDR_GENERIC_CAPTURE_OPTIONS() \ ++OPT_UINTEGER('d', "ddr_id", &perfcnt_ddr_generic_capture_params.ddr_id, "DDR instance"), \ ++OPT_UINTEGER('c', "poll_period_ms", &perfcnt_ddr_generic_capture_params.poll_period_ms, "Capture-time in ms") ++ ++static const struct option cmd_perfcnt_ddr_generic_capture_options[] = { ++ PERFCNT_DDR_GENERIC_CAPTURE_BASE_OPTIONS(), ++ PERFCNT_DDR_GENERIC_CAPTURE_OPTIONS(), ++ OPT_END(), ++}; ++ ++static struct _perfcnt_ddr_dfi_capture_params { ++ u32 ddr_id; ++ u32 poll_period_ms; ++ bool verbose; ++} perfcnt_ddr_dfi_capture_params; ++ ++#define PERFCNT_DDR_DFI_CAPTURE_BASE_OPTIONS() \ ++OPT_BOOLEAN('v',"verbose", &perfcnt_ddr_dfi_capture_params.verbose, "turn on debug") ++ ++#define PERFCNT_DDR_DFI_CAPTURE_OPTIONS() \ ++OPT_UINTEGER('d', "ddr_id", &perfcnt_ddr_dfi_capture_params.ddr_id, "DDR instance"), \ ++OPT_UINTEGER('c', "poll_period_ms", &perfcnt_ddr_dfi_capture_params.poll_period_ms, "Capture-time in ms") ++ ++static const struct option cmd_perfcnt_ddr_dfi_capture_options[] = { ++ PERFCNT_DDR_DFI_CAPTURE_BASE_OPTIONS(), ++ PERFCNT_DDR_DFI_CAPTURE_OPTIONS(), ++ OPT_END(), ++}; ++ ++ ++static struct _err_inj_drs_poison_params { ++ u32 ch_id; ++ u32 duration; ++ u32 inj_mode; ++ u32 tag; ++ bool verbose; ++} err_inj_drs_poison_params; ++ ++ ++#define ERR_INJ_DRS_POISON_OPTIONS() \ ++OPT_UINTEGER('c', "ch_id", &err_inj_drs_poison_params.ch_id, "DRS channel"), \ ++OPT_UINTEGER('d', "duration", &err_inj_drs_poison_params.duration, "Duration"), \ ++OPT_UINTEGER('i', "inj_mode", &err_inj_drs_poison_params.inj_mode, "Injection mode"), \ ++OPT_UINTEGER('t', "tag", &err_inj_drs_poison_params.tag, "Tag") ++ ++static const struct option cmd_err_inj_drs_poison_options[] = { ++ BASE_OPTIONS(), ++ ERR_INJ_DRS_POISON_OPTIONS(), ++ OPT_END(), ++}; ++ ++static struct _err_inj_drs_ecc_params { ++ u32 ch_id; ++ u32 duration; ++ u32 inj_mode; ++ u32 tag; ++ bool verbose; ++} err_inj_drs_ecc_params; ++ ++ ++#define ERR_INJ_DRS_ECC_OPTIONS() \ ++OPT_UINTEGER('c', "ch_id", &err_inj_drs_ecc_params.ch_id, "DRS channel"), \ ++OPT_UINTEGER('d', "duration", &err_inj_drs_ecc_params.duration, "Duration"), \ ++OPT_UINTEGER('i', "inj_mode", &err_inj_drs_ecc_params.inj_mode, "Injection mode"), \ ++OPT_UINTEGER('t', "tag", &err_inj_drs_ecc_params.tag, "Tag") ++ ++static const struct option cmd_err_inj_drs_ecc_options[] = { ++ BASE_OPTIONS(), ++ ERR_INJ_DRS_ECC_OPTIONS(), ++ OPT_END(), ++}; ++ ++static struct _err_inj_rxflit_crc_params { ++ u32 cxl_mem_id; ++ bool verbose; ++} err_inj_rxflit_crc_params; ++ ++ ++#define ERR_INJ_RXFLIT_CRC_OPTIONS() \ ++OPT_UINTEGER('c', "cxl_mem_id", &err_inj_rxflit_crc_params.cxl_mem_id, "CXL.mem instance") ++ ++static const struct option cmd_err_inj_rxflit_crc_options[] = { ++ BASE_OPTIONS(), ++ ERR_INJ_RXFLIT_CRC_OPTIONS(), ++ OPT_END(), ++}; ++ ++static struct _err_inj_txflit_crc_params { ++ u32 cxl_mem_id; ++ bool verbose; ++} err_inj_txflit_crc_params; ++ ++ ++#define ERR_INJ_TXFLIT_CRC_OPTIONS() \ ++OPT_UINTEGER('c', "cxl_mem_id", &err_inj_txflit_crc_params.cxl_mem_id, "CXL.mem instance") ++ ++static const struct option cmd_err_inj_txflit_crc_options[] = { ++ BASE_OPTIONS(), ++ ERR_INJ_TXFLIT_CRC_OPTIONS(), ++ OPT_END(), ++}; ++ ++static struct _err_inj_viral_params { ++ u32 ld_id; ++ bool verbose; ++} err_inj_viral_params; ++ ++ ++#define ERR_INJ_VIRAL_OPTIONS() \ ++OPT_UINTEGER('l', "ld_id", &err_inj_viral_params.ld_id, "ld_id") ++ ++static const struct option cmd_err_inj_viral_options[] = { ++ BASE_OPTIONS(), ++ ERR_INJ_VIRAL_OPTIONS(), ++ OPT_END(), ++}; ++ ++static struct _eh_eye_cap_run_params { ++ u32 depth; ++ u32 lane_mask; ++ bool verbose; ++} eh_eye_cap_run_params; ++ ++ ++#define EH_EYE_CAP_RUN_OPTIONS() \ ++OPT_UINTEGER('d', "depth", &eh_eye_cap_run_params.depth, "capture depth (BT_DEPTH_MIN to BT_DEPTH_MAX)"), \ ++OPT_UINTEGER('l', "lane_mask", &eh_eye_cap_run_params.lane_mask, "lane mask") ++ ++static const struct option cmd_eh_eye_cap_run_options[] = { ++ BASE_OPTIONS(), ++ EH_EYE_CAP_RUN_OPTIONS(), ++ OPT_END(), ++}; ++ ++static struct _eh_eye_cap_read_params { ++ u32 lane_id; ++ u32 bin_num; ++ bool verbose; ++} eh_eye_cap_read_params; ++ ++ ++#define EH_EYE_CAP_READ_OPTIONS() \ ++OPT_UINTEGER('l', "lane_id", &eh_eye_cap_read_params.lane_id, "lane ID"), \ ++OPT_UINTEGER('b', "bin_num", &eh_eye_cap_read_params.bin_num, "bin number [0 .. BT_BIN_TOT - 1]") ++ ++static const struct option cmd_eh_eye_cap_read_options[] = { ++ BASE_OPTIONS(), ++ EH_EYE_CAP_READ_OPTIONS(), ++ OPT_END(), ++}; ++ ++static struct _eh_eye_cap_timeout_enable_params { ++ u32 enable; ++ bool verbose; ++} eh_eye_cap_timeout_enable_params; ++ ++#define EH_EYE_CAP_TIMEOUT_ENABLE_BASE_OPTIONS() \ ++OPT_BOOLEAN('v',"verbose", &eh_eye_cap_timeout_enable_params.verbose, "turn on debug") ++ ++#define EH_EYE_CAP_TIMEOUT_ENABLE_OPTIONS() \ ++OPT_UINTEGER('e', "enable", &eh_eye_cap_timeout_enable_params.enable, "enable (0: Disable, 1: Enable)") ++ ++static const struct option cmd_eh_eye_cap_timeout_enable_options[] = { ++ EH_EYE_CAP_TIMEOUT_ENABLE_BASE_OPTIONS(), ++ EH_EYE_CAP_TIMEOUT_ENABLE_OPTIONS(), ++ OPT_END(), ++}; ++ ++static struct _eh_eye_cap_status_params { ++ bool verbose; ++} eh_eye_cap_status_params; ++ ++#define EH_EYE_CAP_STATUS_BASE_OPTIONS() \ ++OPT_BOOLEAN('v',"verbose", &eh_eye_cap_status_params.verbose, "turn on debug") ++ ++static const struct option cmd_eh_eye_cap_status_options[] = { ++ EH_EYE_CAP_STATUS_BASE_OPTIONS(), ++ OPT_END(), ++}; ++ ++static struct _eh_adapt_get_params { ++ u32 lane_id; ++ bool verbose; ++} eh_adapt_get_params; ++ ++ ++#define EH_ADAPT_GET_OPTIONS() \ ++OPT_UINTEGER('l', "lane_id", &eh_adapt_get_params.lane_id, "lane id") ++ ++static const struct option cmd_eh_adapt_get_options[] = { ++ BASE_OPTIONS(), ++ EH_ADAPT_GET_OPTIONS(), ++ OPT_END(), ++}; ++ ++static struct _eh_adapt_oneoff_params { ++ u32 lane_id; ++ u32 preload; ++ u32 loops; ++ u32 objects; ++ bool verbose; ++} eh_adapt_oneoff_params; ++ ++ ++#define EH_ADAPT_ONEOFF_OPTIONS() \ ++OPT_UINTEGER('l', "lane_id", &eh_adapt_oneoff_params.lane_id, "lane id"), \ ++OPT_UINTEGER('p', "preload", &eh_adapt_oneoff_params.preload, "Adaption objects preload enable"), \ ++OPT_UINTEGER('m', "loops", &eh_adapt_oneoff_params.loops, "Adaptions loop"), \ ++OPT_UINTEGER('o', "objects", &eh_adapt_oneoff_params.objects, "Adaption objects enable") ++ ++static const struct option cmd_eh_adapt_oneoff_options[] = { ++ BASE_OPTIONS(), ++ EH_ADAPT_ONEOFF_OPTIONS(), ++ OPT_END(), ++}; ++ ++static struct _fbist_stopconfig_set_params { ++ u32 fbist_id; ++ u32 stop_on_wresp; ++ u32 stop_on_rresp; ++ u32 stop_on_rdataerr; ++ bool verbose; ++} fbist_stopconfig_set_params; ++ ++#define FBIST_STOPCONFIG_SET_BASE_OPTIONS() \ ++OPT_BOOLEAN('v',"verbose", &fbist_stopconfig_set_params.verbose, "turn on debug") ++ ++#define FBIST_STOPCONFIG_SET_OPTIONS() \ ++OPT_UINTEGER('f', "fbist_id", &fbist_stopconfig_set_params.fbist_id, "Flex BIST Instance"), \ ++OPT_UINTEGER('w', "stop_on_wresp", &fbist_stopconfig_set_params.stop_on_wresp, "Stop on Write Response"), \ ++OPT_UINTEGER('r', "stop_on_rresp", &fbist_stopconfig_set_params.stop_on_rresp, "Stop on Read Response"), \ ++OPT_UINTEGER('e', "stop_on_rdataerr", &fbist_stopconfig_set_params.stop_on_rdataerr, "Stop on Read Data Error") ++ ++static const struct option cmd_fbist_stopconfig_set_options[] = { ++ FBIST_STOPCONFIG_SET_BASE_OPTIONS(), ++ FBIST_STOPCONFIG_SET_OPTIONS(), ++ OPT_END(), ++}; ++ ++static struct _fbist_cyclecount_set_params { ++ u32 fbist_id; ++ u32 txg_nr; ++ u64 cyclecount; ++ bool verbose; ++} fbist_cyclecount_set_params; ++ ++#define FBIST_CYCLECOUNT_SET_BASE_OPTIONS() \ ++OPT_BOOLEAN('v',"verbose", &fbist_cyclecount_set_params.verbose, "turn on debug") ++ ++#define FBIST_CYCLECOUNT_SET_OPTIONS() \ ++OPT_UINTEGER('f', "fbist_id", &fbist_cyclecount_set_params.fbist_id, "Flex BIST Instance"), \ ++OPT_UINTEGER('t', "txg_nr", &fbist_cyclecount_set_params.txg_nr, "TXG Nr"), \ ++OPT_U64('c', "cyclecount", &fbist_cyclecount_set_params.cyclecount, "cyclecount") ++ ++static const struct option cmd_fbist_cyclecount_set_options[] = { ++ FBIST_CYCLECOUNT_SET_BASE_OPTIONS(), ++ FBIST_CYCLECOUNT_SET_OPTIONS(), ++ OPT_END(), ++}; ++ ++static struct _fbist_reset_set_params { ++ u32 fbist_id; ++ u32 txg0_reset; ++ u32 txg1_reset; ++ bool verbose; ++} fbist_reset_set_params; ++ ++#define FBIST_RESET_SET_BASE_OPTIONS() \ ++OPT_BOOLEAN('v',"verbose", &fbist_reset_set_params.verbose, "turn on debug") ++ ++#define FBIST_RESET_SET_OPTIONS() \ ++OPT_UINTEGER('f', "fbist_id", &fbist_reset_set_params.fbist_id, "Flex BIST Instance"), \ ++OPT_UINTEGER('t', "txg0_reset", &fbist_reset_set_params.txg0_reset, "TXG0 Reset"), \ ++OPT_UINTEGER('u', "txg1_reset", &fbist_reset_set_params.txg1_reset, "TXG1 Reset") ++ ++static const struct option cmd_fbist_reset_set_options[] = { ++ FBIST_RESET_SET_BASE_OPTIONS(), ++ FBIST_RESET_SET_OPTIONS(), ++ OPT_END(), ++}; ++ ++static struct _fbist_run_set_params { ++ u32 fbist_id; ++ u32 txg0_run; ++ u32 txg1_run; ++ bool verbose; ++} fbist_run_set_params; ++ ++#define FBIST_RUN_SET_BASE_OPTIONS() \ ++OPT_BOOLEAN('v',"verbose", &fbist_run_set_params.verbose, "turn on debug") ++ ++#define FBIST_RUN_SET_OPTIONS() \ ++OPT_UINTEGER('f', "fbist_id", &fbist_run_set_params.fbist_id, "Flex BIST Instance"), \ ++OPT_UINTEGER('t', "txg0_run", &fbist_run_set_params.txg0_run, "TXG0 Run"), \ ++OPT_UINTEGER('u', "txg1_run", &fbist_run_set_params.txg1_run, "TXG1 Run") ++ ++static const struct option cmd_fbist_run_set_options[] = { ++ FBIST_RUN_SET_BASE_OPTIONS(), ++ FBIST_RUN_SET_OPTIONS(), ++ OPT_END(), ++}; ++ ++static struct _fbist_run_get_params { ++ u32 fbist_id; ++ bool verbose; ++} fbist_run_get_params; ++ ++#define FBIST_RUN_GET_BASE_OPTIONS() \ ++OPT_BOOLEAN('v',"verbose", &fbist_run_get_params.verbose, "turn on debug") ++ ++#define FBIST_RUN_GET_OPTIONS() \ ++OPT_UINTEGER('f', "fbist_id", &fbist_run_get_params.fbist_id, "Flex BIST Instance") ++ ++static const struct option cmd_fbist_run_get_options[] = { ++ FBIST_RUN_GET_BASE_OPTIONS(), ++ FBIST_RUN_GET_OPTIONS(), ++ OPT_END(), ++}; ++ ++static struct _fbist_xfer_rem_cnt_get_params { ++ u32 fbist_id; ++ u32 thread_nr; ++ bool verbose; ++} fbist_xfer_rem_cnt_get_params; ++ ++#define FBIST_XFER_REM_CNT_GET_BASE_OPTIONS() \ ++OPT_BOOLEAN('v',"verbose", &fbist_xfer_rem_cnt_get_params.verbose, "turn on debug") ++ ++#define FBIST_XFER_REM_CNT_GET_OPTIONS() \ ++OPT_UINTEGER('f', "fbist_id", &fbist_xfer_rem_cnt_get_params.fbist_id, "Flex BIST Instance"), \ ++OPT_UINTEGER('t', "thread_nr", &fbist_xfer_rem_cnt_get_params.thread_nr, "Thread Nr") ++ ++static const struct option cmd_fbist_xfer_rem_cnt_get_options[] = { ++ FBIST_XFER_REM_CNT_GET_BASE_OPTIONS(), ++ FBIST_XFER_REM_CNT_GET_OPTIONS(), ++ OPT_END(), ++}; ++ ++static struct _fbist_last_exp_read_data_get_params { ++ u32 fbist_id; ++ bool verbose; ++} fbist_last_exp_read_data_get_params; ++ ++#define FBIST_LAST_EXP_READ_DATA_GET_BASE_OPTIONS() \ ++OPT_BOOLEAN('v',"verbose", &fbist_last_exp_read_data_get_params.verbose, "turn on debug") ++ ++#define FBIST_LAST_EXP_READ_DATA_GET_OPTIONS() \ ++OPT_UINTEGER('f', "fbist_id", &fbist_last_exp_read_data_get_params.fbist_id, "Flex BIST Instance") ++ ++static const struct option cmd_fbist_last_exp_read_data_get_options[] = { ++ FBIST_LAST_EXP_READ_DATA_GET_BASE_OPTIONS(), ++ FBIST_LAST_EXP_READ_DATA_GET_OPTIONS(), ++ OPT_END(), ++}; ++ ++static struct _fbist_curr_cycle_cnt_get_params { ++ u32 fbist_id; ++ u32 txg_nr; ++ bool verbose; ++} fbist_curr_cycle_cnt_get_params; ++ ++#define FBIST_CURR_CYCLE_CNT_GET_BASE_OPTIONS() \ ++OPT_BOOLEAN('v',"verbose", &fbist_curr_cycle_cnt_get_params.verbose, "turn on debug") ++ ++#define FBIST_CURR_CYCLE_CNT_GET_OPTIONS() \ ++OPT_UINTEGER('f', "fbist_id", &fbist_curr_cycle_cnt_get_params.fbist_id, "Flex BIST Instance"), \ ++OPT_UINTEGER('t', "txg_nr", &fbist_curr_cycle_cnt_get_params.txg_nr, "TXG Nr") ++ ++static const struct option cmd_fbist_curr_cycle_cnt_get_options[] = { ++ FBIST_CURR_CYCLE_CNT_GET_BASE_OPTIONS(), ++ FBIST_CURR_CYCLE_CNT_GET_OPTIONS(), ++ OPT_END(), ++}; ++ ++static struct _fbist_thread_status_get_params { ++ u32 fbist_id; ++ u32 txg_nr; ++ u32 thread_nr; ++ bool verbose; ++} fbist_thread_status_get_params; ++ ++#define FBIST_THREAD_STATUS_GET_BASE_OPTIONS() \ ++OPT_BOOLEAN('v',"verbose", &fbist_thread_status_get_params.verbose, "turn on debug") ++ ++#define FBIST_THREAD_STATUS_GET_OPTIONS() \ ++OPT_UINTEGER('f', "fbist_id", &fbist_thread_status_get_params.fbist_id, "Flex BIST Instance"), \ ++OPT_UINTEGER('t', "txg_nr", &fbist_thread_status_get_params.txg_nr, "TXG Nr"), \ ++OPT_UINTEGER('u', "thread_nr", &fbist_thread_status_get_params.thread_nr, "Thread Nr") ++ ++static const struct option cmd_fbist_thread_status_get_options[] = { ++ FBIST_THREAD_STATUS_GET_BASE_OPTIONS(), ++ FBIST_THREAD_STATUS_GET_OPTIONS(), ++ OPT_END(), ++}; ++ ++static struct _fbist_thread_trans_cnt_get_params { ++ u32 fbist_id; ++ u32 txg_nr; ++ u32 thread_nr; ++ bool verbose; ++} fbist_thread_trans_cnt_get_params; ++ ++#define FBIST_THREAD_TRANS_CNT_GET_BASE_OPTIONS() \ ++OPT_BOOLEAN('v',"verbose", &fbist_thread_trans_cnt_get_params.verbose, "turn on debug") ++ ++#define FBIST_THREAD_TRANS_CNT_GET_OPTIONS() \ ++OPT_UINTEGER('f', "fbist_id", &fbist_thread_trans_cnt_get_params.fbist_id, "Flex BIST Instance"), \ ++OPT_UINTEGER('t', "txg_nr", &fbist_thread_trans_cnt_get_params.txg_nr, "TXG Nr"), \ ++OPT_UINTEGER('u', "thread_nr", &fbist_thread_trans_cnt_get_params.thread_nr, "Thread Nr") ++ ++static const struct option cmd_fbist_thread_trans_cnt_get_options[] = { ++ FBIST_THREAD_TRANS_CNT_GET_BASE_OPTIONS(), ++ FBIST_THREAD_TRANS_CNT_GET_OPTIONS(), ++ OPT_END(), ++}; ++ ++static struct _fbist_thread_bandwidth_get_params { ++ u32 fbist_id; ++ u32 txg_nr; ++ u32 thread_nr; ++ bool verbose; ++} fbist_thread_bandwidth_get_params; ++ ++#define FBIST_THREAD_BANDWIDTH_GET_BASE_OPTIONS() \ ++OPT_BOOLEAN('v',"verbose", &fbist_thread_bandwidth_get_params.verbose, "turn on debug") ++ ++#define FBIST_THREAD_BANDWIDTH_GET_OPTIONS() \ ++OPT_UINTEGER('f', "fbist_id", &fbist_thread_bandwidth_get_params.fbist_id, "Flex BIST Instance"), \ ++OPT_UINTEGER('t', "txg_nr", &fbist_thread_bandwidth_get_params.txg_nr, "TXG Nr"), \ ++OPT_UINTEGER('u', "thread_nr", &fbist_thread_bandwidth_get_params.thread_nr, "Thread Nr") ++ ++static const struct option cmd_fbist_thread_bandwidth_get_options[] = { ++ FBIST_THREAD_BANDWIDTH_GET_BASE_OPTIONS(), ++ FBIST_THREAD_BANDWIDTH_GET_OPTIONS(), ++ OPT_END(), ++}; ++ ++static struct _fbist_thread_latency_get_params { ++ u32 fbist_id; ++ u32 txg_nr; ++ u32 thread_nr; ++ bool verbose; ++} fbist_thread_latency_get_params; ++ ++#define FBIST_THREAD_LATENCY_GET_BASE_OPTIONS() \ ++OPT_BOOLEAN('v',"verbose", &fbist_thread_latency_get_params.verbose, "turn on debug") ++ ++#define FBIST_THREAD_LATENCY_GET_OPTIONS() \ ++OPT_UINTEGER('f', "fbist_id", &fbist_thread_latency_get_params.fbist_id, "Flex BIST Instance"), \ ++OPT_UINTEGER('t', "txg_nr", &fbist_thread_latency_get_params.txg_nr, "TXG Nr"), \ ++OPT_UINTEGER('u', "thread_nr", &fbist_thread_latency_get_params.thread_nr, "Thread Nr") ++ ++static const struct option cmd_fbist_thread_latency_get_options[] = { ++ FBIST_THREAD_LATENCY_GET_BASE_OPTIONS(), ++ FBIST_THREAD_LATENCY_GET_OPTIONS(), ++ OPT_END(), ++}; ++ ++static struct _fbist_thread_perf_mon_set_params { ++ u32 fbist_id; ++ u32 txg_nr; ++ u32 thread_nr; ++ u32 pmon_preset_en; ++ u32 pmon_clear_en; ++ u32 pmon_rollover; ++ u32 pmon_thread_lclk; ++ bool verbose; ++} fbist_thread_perf_mon_set_params; ++ ++#define FBIST_THREAD_PERF_MON_SET_BASE_OPTIONS() \ ++OPT_BOOLEAN('v',"verbose", &fbist_thread_perf_mon_set_params.verbose, "turn on debug") ++ ++#define FBIST_THREAD_PERF_MON_SET_OPTIONS() \ ++OPT_UINTEGER('f', "fbist_id", &fbist_thread_perf_mon_set_params.fbist_id, "Flex BIST Instance"), \ ++OPT_UINTEGER('t', "txg_nr", &fbist_thread_perf_mon_set_params.txg_nr, "TXG Nr"), \ ++OPT_UINTEGER('u', "thread_nr", &fbist_thread_perf_mon_set_params.thread_nr, "Thread Nr"), \ ++OPT_UINTEGER('p', "pmon_preset_en", &fbist_thread_perf_mon_set_params.pmon_preset_en, "Performance Monitor Preset Enable"), \ ++OPT_UINTEGER('c', "pmon_clear_en", &fbist_thread_perf_mon_set_params.pmon_clear_en, "Performance Monitor Clear Enable"), \ ++OPT_UINTEGER('r', "pmon_rollover", &fbist_thread_perf_mon_set_params.pmon_rollover, "Performance Monitor Rollover"), \ ++OPT_UINTEGER('l', "pmon_thread_lclk", &fbist_thread_perf_mon_set_params.pmon_thread_lclk, "Performance Monitor Thread lclk") ++ ++static const struct option cmd_fbist_thread_perf_mon_set_options[] = { ++ FBIST_THREAD_PERF_MON_SET_BASE_OPTIONS(), ++ FBIST_THREAD_PERF_MON_SET_OPTIONS(), ++ OPT_END(), ++}; ++ ++static struct _fbist_top_read_status0_get_params { ++ u32 fbist_id; ++ bool verbose; ++} fbist_top_read_status0_get_params; ++ ++#define FBIST_TOP_READ_STATUS0_GET_BASE_OPTIONS() \ ++OPT_BOOLEAN('v',"verbose", &fbist_top_read_status0_get_params.verbose, "turn on debug") ++ ++#define FBIST_TOP_READ_STATUS0_GET_OPTIONS() \ ++OPT_UINTEGER('f', "fbist_id", &fbist_top_read_status0_get_params.fbist_id, "Flex BIST Instance") ++ ++static const struct option cmd_fbist_top_read_status0_get_options[] = { ++ FBIST_TOP_READ_STATUS0_GET_BASE_OPTIONS(), ++ FBIST_TOP_READ_STATUS0_GET_OPTIONS(), ++ OPT_END(), ++}; ++ ++static struct _fbist_top_err_cnt_get_params { ++ u32 fbist_id; ++ bool verbose; ++} fbist_top_err_cnt_get_params; ++ ++#define FBIST_TOP_ERR_CNT_GET_BASE_OPTIONS() \ ++OPT_BOOLEAN('v',"verbose", &fbist_top_err_cnt_get_params.verbose, "turn on debug") ++ ++#define FBIST_TOP_ERR_CNT_GET_OPTIONS() \ ++OPT_UINTEGER('f', "fbist_id", &fbist_top_err_cnt_get_params.fbist_id, "Flex BIST Instance") ++ ++static const struct option cmd_fbist_top_err_cnt_get_options[] = { ++ FBIST_TOP_ERR_CNT_GET_BASE_OPTIONS(), ++ FBIST_TOP_ERR_CNT_GET_OPTIONS(), ++ OPT_END(), ++}; ++ ++static struct _fbist_last_read_addr_get_params { ++ u32 fbist_id; ++ bool verbose; ++} fbist_last_read_addr_get_params; ++ ++#define FBIST_LAST_READ_ADDR_GET_BASE_OPTIONS() \ ++OPT_BOOLEAN('v',"verbose", &fbist_last_read_addr_get_params.verbose, "turn on debug") ++ ++#define FBIST_LAST_READ_ADDR_GET_OPTIONS() \ ++OPT_UINTEGER('f', "fbist_id", &fbist_last_read_addr_get_params.fbist_id, "Flex BIST Instance") ++ ++static const struct option cmd_fbist_last_read_addr_get_options[] = { ++ FBIST_LAST_READ_ADDR_GET_BASE_OPTIONS(), ++ FBIST_LAST_READ_ADDR_GET_OPTIONS(), ++ OPT_END(), ++}; ++ ++static struct _fbist_test_simpledata_params { ++ u32 fbist_id; ++ u32 test_nr; ++ u64 start_address; ++ u64 num_bytes; ++ bool verbose; ++} fbist_test_simpledata_params; ++ ++#define FBIST_TEST_SIMPLEDATA_BASE_OPTIONS() \ ++OPT_BOOLEAN('v',"verbose", &fbist_test_simpledata_params.verbose, "turn on debug") ++ ++#define FBIST_TEST_SIMPLEDATA_OPTIONS() \ ++OPT_UINTEGER('f', "fbist_id", &fbist_test_simpledata_params.fbist_id, "Flex BIST Instance"), \ ++OPT_UINTEGER('t', "test_nr", &fbist_test_simpledata_params.test_nr, "Test number to be setup"), \ ++OPT_U64('s', "start_address", &fbist_test_simpledata_params.start_address, "Start Address"), \ ++OPT_U64('n', "num_bytes", &fbist_test_simpledata_params.num_bytes, "Size of memory to operate on") ++ ++static const struct option cmd_fbist_test_simpledata_options[] = { ++ FBIST_TEST_SIMPLEDATA_BASE_OPTIONS(), ++ FBIST_TEST_SIMPLEDATA_OPTIONS(), ++ OPT_END(), ++}; ++ ++static struct _fbist_test_addresstest_params { ++ u32 fbist_id; ++ u32 test_nr; ++ u64 start_address; ++ u64 num_bytes; ++ u32 seed; ++ bool verbose; ++} fbist_test_addresstest_params; ++ ++#define FBIST_TEST_ADDRESSTEST_BASE_OPTIONS() \ ++OPT_BOOLEAN('v',"verbose", &fbist_test_addresstest_params.verbose, "turn on debug") ++ ++#define FBIST_TEST_ADDRESSTEST_OPTIONS() \ ++OPT_UINTEGER('f', "fbist_id", &fbist_test_addresstest_params.fbist_id, "Flex BIST Instance"), \ ++OPT_UINTEGER('t', "test_nr", &fbist_test_addresstest_params.test_nr, "Test number to be setup"), \ ++OPT_U64('a', "start_address", &fbist_test_addresstest_params.start_address, "Start Address"), \ ++OPT_U64('n', "num_bytes", &fbist_test_addresstest_params.num_bytes, "Size of memory to operate on"), \ ++OPT_UINTEGER('s', "seed", &fbist_test_addresstest_params.seed, "Inital Seed") ++ ++static const struct option cmd_fbist_test_addresstest_options[] = { ++ FBIST_TEST_ADDRESSTEST_BASE_OPTIONS(), ++ FBIST_TEST_ADDRESSTEST_OPTIONS(), ++ OPT_END(), ++}; ++ ++static struct _fbist_test_movinginversion_params { ++ u32 fbist_id; ++ u32 test_nr; ++ u32 phase_nr; ++ u64 start_address; ++ u64 num_bytes; ++ u32 ddrpage_size; ++ bool verbose; ++} fbist_test_movinginversion_params; ++ ++#define FBIST_TEST_MOVINGINVERSION_BASE_OPTIONS() \ ++OPT_BOOLEAN('v',"verbose", &fbist_test_movinginversion_params.verbose, "turn on debug") ++ ++#define FBIST_TEST_MOVINGINVERSION_OPTIONS() \ ++OPT_UINTEGER('f', "fbist_id", &fbist_test_movinginversion_params.fbist_id, "Flex BIST Instance"), \ ++OPT_UINTEGER('t', "test_nr", &fbist_test_movinginversion_params.test_nr, "Test number to be setup"), \ ++OPT_UINTEGER('p', "phase_nr", &fbist_test_movinginversion_params.phase_nr, "Testphase to be setup"), \ ++OPT_U64('s', "start_address", &fbist_test_movinginversion_params.start_address, "Start Address"), \ ++OPT_U64('n', "num_bytes", &fbist_test_movinginversion_params.num_bytes, "Size of memory to operate on"), \ ++OPT_UINTEGER('d', "ddrpage_size", &fbist_test_movinginversion_params.ddrpage_size, "DDR Page size") ++ ++static const struct option cmd_fbist_test_movinginversion_options[] = { ++ FBIST_TEST_MOVINGINVERSION_BASE_OPTIONS(), ++ FBIST_TEST_MOVINGINVERSION_OPTIONS(), ++ OPT_END(), ++}; ++ ++static struct _fbist_test_randomsequence_params { ++ u32 fbist_id; ++ u32 phase_nr; ++ u64 start_address; ++ u64 num_bytes; ++ u32 ddrpage_size; ++ u32 seed_dr0; ++ u32 seed_dr1; ++ bool verbose; ++} fbist_test_randomsequence_params; ++ ++#define FBIST_TEST_RANDOMSEQUENCE_BASE_OPTIONS() \ ++OPT_BOOLEAN('v',"verbose", &fbist_test_randomsequence_params.verbose, "turn on debug") ++ ++#define FBIST_TEST_RANDOMSEQUENCE_OPTIONS() \ ++OPT_UINTEGER('f', "fbist_id", &fbist_test_randomsequence_params.fbist_id, "Flex BIST Instance"), \ ++OPT_UINTEGER('p', "phase_nr", &fbist_test_randomsequence_params.phase_nr, "Testphase to be setup"), \ ++OPT_U64('s', "start_address", &fbist_test_randomsequence_params.start_address, "Start Address"), \ ++OPT_U64('n', "num_bytes", &fbist_test_randomsequence_params.num_bytes, "Size of memory to operate on"), \ ++OPT_UINTEGER('d', "ddrpage_size", &fbist_test_randomsequence_params.ddrpage_size, "DDR Page size"), \ ++OPT_UINTEGER('t', "seed_dr0", &fbist_test_randomsequence_params.seed_dr0, "Seed_DR0"), \ ++OPT_UINTEGER('u', "seed_dr1", &fbist_test_randomsequence_params.seed_dr1, "Seed DR1") ++ ++static const struct option cmd_fbist_test_randomsequence_options[] = { ++ FBIST_TEST_RANDOMSEQUENCE_BASE_OPTIONS(), ++ FBIST_TEST_RANDOMSEQUENCE_OPTIONS(), ++ OPT_END(), ++}; ++ ++static struct _eh_adapt_force_params { ++ u32 lane_id; ++ u32 rate; ++ u32 vdd_bias; ++ u32 ssc; ++ u32 pga_gain; ++ u32 pga_a0; ++ u32 pga_off; ++ u32 cdfe_a2; ++ u32 cdfe_a3; ++ u32 cdfe_a4; ++ u32 cdfe_a5; ++ u32 cdfe_a6; ++ u32 cdfe_a7; ++ u32 cdfe_a8; ++ u32 cdfe_a9; ++ u32 cdfe_a10; ++ u32 dc_offset; ++ u32 zobel_dc_offset; ++ u32 udfe_thr_0; ++ u32 udfe_thr_1; ++ u32 median_amp; ++ u32 zobel_a_gain; ++ u32 ph_ofs_t; ++ bool verbose; ++} eh_adapt_force_params; ++ ++ ++#define EH_ADAPT_FORCE_OPTIONS() \ ++OPT_UINTEGER('l', "lane_id", &eh_adapt_force_params.lane_id, "lane id"), \ ++OPT_UINTEGER('r', "rate", &eh_adapt_force_params.rate, "PCIe rate (0 - Gen1, 1 - Gen2, 2 - Gen3, 3 - Gen4, 4 - Gen5)"), \ ++OPT_UINTEGER('b', "vdd_bias", &eh_adapt_force_params.vdd_bias, "vdd bias (0 = 0.82V, 1 = 0.952V)"), \ ++OPT_UINTEGER('s', "ssc", &eh_adapt_force_params.ssc, "spread spectrum clocking enable (0 - SSC enable, 1 - SSC disable)"), \ ++OPT_UINTEGER('p', "pga_gain", &eh_adapt_force_params.pga_gain, "used to set the value of the PGA_GAIN object when preloading is enabled"), \ ++OPT_UINTEGER('q', "pga_a0", &eh_adapt_force_params.pga_a0, "used to set the value of the PGA_A0 object when preloading is enabled"), \ ++OPT_UINTEGER('t', "pga_off", &eh_adapt_force_params.pga_off, "PGA Stage1,2 offset preload value, split evenly between PGA Stage1 & Stage2 DC offset"), \ ++OPT_UINTEGER('c', "cdfe_a2", &eh_adapt_force_params.cdfe_a2, "used to set the value of CDFE_A2 (DFE Tap2) when preloading (CDFE_GRP0) is enabled"), \ ++OPT_UINTEGER('d', "cdfe_a3", &eh_adapt_force_params.cdfe_a3, "used to set the value of CDFE_A3 (DFE Tap3) when preloading (CDFE_GRP0) is enabled"), \ ++OPT_UINTEGER('e', "cdfe_a4", &eh_adapt_force_params.cdfe_a4, "used to set the value of CDFE_A4 (DFE Tap4) when preloading (CDFE_GRP0) is enabled"), \ ++OPT_UINTEGER('f', "cdfe_a5", &eh_adapt_force_params.cdfe_a5, "used to set the value of CDFE_A5 (DFE Tap5) when preloading (CDFE_GRP1) is enabled"), \ ++OPT_UINTEGER('g', "cdfe_a6", &eh_adapt_force_params.cdfe_a6, "used to set the value of CDFE_A6 (DFE Tap6) when preloading (CDFE_GRP1) is enabled"), \ ++OPT_UINTEGER('y', "cdfe_a7", &eh_adapt_force_params.cdfe_a7, "used to set the value of CDFE_A7 (DFE Tap7) when preloading (CDFE_GRP1) is enabled"), \ ++OPT_UINTEGER('i', "cdfe_a8", &eh_adapt_force_params.cdfe_a8, "used to set the value of CDFE_A8 (DFE Tap8) when preloading (CDFE_GRP2) is enabled"), \ ++OPT_UINTEGER('j', "cdfe_a9", &eh_adapt_force_params.cdfe_a9, "used to set the value of CDFE_A9 (DFE Tap9) when preloading (CDFE_GRP2) is enabled"), \ ++OPT_UINTEGER('k', "cdfe_a10", &eh_adapt_force_params.cdfe_a10, "used to set the value of CDFE_A10 (DFE Tap10) when preloading (CDFE_GRP2) is enabled"), \ ++OPT_UINTEGER('m', "dc_offset", &eh_adapt_force_params.dc_offset, "used to set the value of the DC_OFFSET object when preloading is enabled"), \ ++OPT_UINTEGER('z', "zobel_dc_offset", &eh_adapt_force_params.zobel_dc_offset, "Zobel DC offset preload value"), \ ++OPT_UINTEGER('u', "udfe_thr_0", &eh_adapt_force_params.udfe_thr_0, "used to set the value of the UDFE_THR_0 object when preloading is enabled"), \ ++OPT_UINTEGER('w', "udfe_thr_1", &eh_adapt_force_params.udfe_thr_1, "used to set the value of the UDFE_THR_1 object when preloading is enabled"), \ ++OPT_UINTEGER('n', "median_amp", &eh_adapt_force_params.median_amp, "used to set the value of the MEDIAN_AMP object when preloading is enabled"), \ ++OPT_UINTEGER('A', "zobel_a_gain", &eh_adapt_force_params.zobel_a_gain, "Zobel a_gain preload"), \ ++OPT_UINTEGER('x', "ph_ofs_t", &eh_adapt_force_params.ph_ofs_t, "Timing phase offset preload") ++ ++static const struct option cmd_eh_adapt_force_options[] = { ++ BASE_OPTIONS(), ++ EH_ADAPT_FORCE_OPTIONS(), ++ OPT_END(), ++}; ++ ++ ++ ++static const struct option cmd_hbo_status_options[] = { ++ BASE_OPTIONS(), ++ OPT_END(), ++}; ++ ++ ++ ++static const struct option cmd_hbo_transfer_fw_options[] = { ++ BASE_OPTIONS(), ++ OPT_END(), ++}; ++ ++ ++ ++static const struct option cmd_hbo_activate_fw_options[] = { ++ BASE_OPTIONS(), ++ OPT_END(), ++}; ++ ++static struct _health_counters_clear_params { ++ u32 bitmask; ++ bool verbose; ++} health_counters_clear_params; ++ ++ ++#define HEALTH_COUNTERS_CLEAR_OPTIONS() \ ++OPT_UINTEGER('b', "bitmask", &health_counters_clear_params.bitmask, "health counters bitmask") ++ ++static const struct option cmd_health_counters_clear_options[] = { ++ BASE_OPTIONS(), ++ HEALTH_COUNTERS_CLEAR_OPTIONS(), ++ OPT_END(), ++}; ++ ++ ++ ++static const struct option cmd_health_counters_get_options[] = { ++ BASE_OPTIONS(), ++ OPT_END(), ++}; ++ ++ ++ ++static const struct option cmd_hct_get_plat_param_options[] = { ++ BASE_OPTIONS(), ++ OPT_END(), ++}; ++ ++static struct _err_inj_hif_poison_params { ++ u32 ch_id; ++ u32 duration; ++ u32 inj_mode; ++ u64 address; ++ bool verbose; ++} err_inj_hif_poison_params; ++ ++#define ERR_INJ_HIF_POISON_BASE_OPTIONS() \ ++OPT_BOOLEAN('v',"verbose", &err_inj_hif_poison_params.verbose, "turn on debug") ++ ++#define ERR_INJ_HIF_POISON_OPTIONS() \ ++OPT_UINTEGER('c', "ch_id", &err_inj_hif_poison_params.ch_id, "HIF channel"), \ ++OPT_UINTEGER('d', "duration", &err_inj_hif_poison_params.duration, "Duration"), \ ++OPT_UINTEGER('i', "inj_mode", &err_inj_hif_poison_params.inj_mode, "Injection mode"), \ ++OPT_U64('a', "address", &err_inj_hif_poison_params.address, "Address") ++ ++static const struct option cmd_err_inj_hif_poison_options[] = { ++ ERR_INJ_HIF_POISON_BASE_OPTIONS(), ++ ERR_INJ_HIF_POISON_OPTIONS(), ++ OPT_END(), ++}; ++ ++static struct _err_inj_hif_ecc_params { ++ u32 ch_id; ++ u32 duration; ++ u32 inj_mode; ++ u64 address; ++ bool verbose; ++} err_inj_hif_ecc_params; ++ ++#define ERR_INJ_HIF_ECC_BASE_OPTIONS() \ ++OPT_BOOLEAN('v',"verbose", &err_inj_hif_ecc_params.verbose, "turn on debug") ++ ++#define ERR_INJ_HIF_ECC_OPTIONS() \ ++OPT_UINTEGER('c', "ch_id", &err_inj_hif_ecc_params.ch_id, "HIF channel"), \ ++OPT_UINTEGER('d', "duration", &err_inj_hif_ecc_params.duration, "Duration"), \ ++OPT_UINTEGER('i', "inj_mode", &err_inj_hif_ecc_params.inj_mode, "Injection mode"), \ ++OPT_U64('a', "address", &err_inj_hif_ecc_params.address, "Address") ++ ++static const struct option cmd_err_inj_hif_ecc_options[] = { ++ ERR_INJ_HIF_ECC_BASE_OPTIONS(), ++ ERR_INJ_HIF_ECC_OPTIONS(), ++ OPT_END(), ++}; ++ ++static struct _eh_link_dbg_cfg_params { ++ u32 port_id; ++ u32 op_mode; ++ u32 cap_type; ++ u32 lane_mask; ++ u32 rate_mask; ++ u32 timer_us; ++ u32 cap_delay_us; ++ u32 max_cap; ++ bool verbose; ++} eh_link_dbg_cfg_params; ++ ++#define EH_LINK_DBG_CFG_BASE_OPTIONS() \ ++OPT_BOOLEAN('v', "verbose", &eh_link_dbg_cfg_params.verbose, "turn on debug") ++ ++#define EH_LINK_DBG_CFG_OPTIONS() \ ++OPT_UINTEGER('p', "port_id", &eh_link_dbg_cfg_params.port_id, "Target Port"), \ ++OPT_UINTEGER('o', "op_mode", &eh_link_dbg_cfg_params.op_mode, "Operation Mode"), \ ++OPT_UINTEGER('c', "cap_type", &eh_link_dbg_cfg_params.cap_type, "Capture Type"), \ ++OPT_UINTEGER('l', "lane_mask", &eh_link_dbg_cfg_params.lane_mask, "Lane Mask"), \ ++OPT_UINTEGER('r', "rate_mask", &eh_link_dbg_cfg_params.rate_mask, "Rate Mask"), \ ++OPT_UINTEGER('t', "timer_us", &eh_link_dbg_cfg_params.timer_us, "Timer interval"), \ ++OPT_UINTEGER('d', "cap_delay_us", &eh_link_dbg_cfg_params.cap_delay_us, "Capture Timer delay"), \ ++OPT_UINTEGER('m', "max_cap", &eh_link_dbg_cfg_params.max_cap, "Max Capture") ++ ++static const struct option cmd_eh_link_dbg_cfg_options[] = { ++ EH_LINK_DBG_CFG_BASE_OPTIONS(), ++ EH_LINK_DBG_CFG_OPTIONS(), ++ OPT_END(), ++}; ++ ++static struct _eh_link_dbg_entry_dump_params { ++ u32 entry_idx; ++ bool verbose; ++} eh_link_dbg_entry_dump_params; ++ ++#define EH_LINK_DBG_ENTRY_DUMP_BASE_OPTIONS() \ ++OPT_BOOLEAN('v', "verbose", &eh_link_dbg_entry_dump_params.verbose, "turn on debug") ++ ++#define EH_LINK_DBG_ENTRY_DUMP_OPTIONS() \ ++OPT_UINTEGER('e', "entry_idx", &eh_link_dbg_entry_dump_params.entry_idx, "Entry Index") ++ ++static const struct option cmd_eh_link_dbg_entry_dump_options[] = { ++ EH_LINK_DBG_ENTRY_DUMP_BASE_OPTIONS(), ++ EH_LINK_DBG_ENTRY_DUMP_OPTIONS(), ++ OPT_END(), ++}; ++ ++static struct _eh_link_dbg_lane_dump_params { ++ u32 entry_idx; ++ u32 lane_idx; ++ bool verbose; ++} eh_link_dbg_lane_dump_params; ++ ++#define EH_LINK_DBG_LANE_DUMP_BASE_OPTIONS() \ ++OPT_BOOLEAN('v', "verbose", &eh_link_dbg_lane_dump_params.verbose, "turn on debug") ++ ++#define EH_LINK_DBG_LANE_DUMP_OPTIONS() \ ++OPT_UINTEGER('e', "entry_idx", &eh_link_dbg_lane_dump_params.entry_idx, "Capture Entry Index"), \ ++OPT_UINTEGER('l', "lane_idx", &eh_link_dbg_lane_dump_params.lane_idx, "Capture Lane") ++ ++static const struct option cmd_eh_link_dbg_lane_dump_options[] = { ++ EH_LINK_DBG_LANE_DUMP_BASE_OPTIONS(), ++ EH_LINK_DBG_LANE_DUMP_OPTIONS(), ++ OPT_END(), ++}; ++ ++static struct _eh_link_dbg_reset_params { ++ bool verbose; ++} eh_link_dbg_reset_params; ++ ++#define EH_LINK_DBG_RESET_BASE_OPTIONS() \ ++OPT_BOOLEAN('v', "verbose", &eh_link_dbg_reset_params.verbose, "turn on debug") ++ ++static const struct option cmd_eh_link_dbg_reset_options[] = { ++ EH_LINK_DBG_RESET_BASE_OPTIONS(), ++ OPT_END(), ++}; ++ ++static struct _conf_read_params { ++ u32 offset; ++ u32 length; ++ bool verbose; ++} conf_read_params; ++ ++#define CONF_READ_BASE_OPTIONS() \ ++OPT_BOOLEAN('v',"verbose", &conf_read_params.verbose, "turn on debug") ++ ++#define CONF_READ_OPTIONS() \ ++OPT_UINTEGER('o', "offset", &conf_read_params.offset, "Starting Offset"), \ ++OPT_UINTEGER('l', "length", &conf_read_params.length, "Requested Length") ++ ++static const struct option cmd_conf_read_options[] = { ++ CONF_READ_BASE_OPTIONS(), ++ CONF_READ_OPTIONS(), ++ OPT_END(), ++}; ++ ++static struct _hct_get_config_params { ++ u32 hct_inst; ++ bool verbose; ++} hct_get_config_params; ++ ++#define HCT_GET_CONFIG_BASE_OPTIONS() \ ++OPT_BOOLEAN('v',"verbose", &hct_get_config_params.verbose, "turn on debug") ++ ++#define HCT_GET_CONFIG_OPTIONS() \ ++OPT_UINTEGER('i', "hct_inst", &hct_get_config_params.hct_inst, "HCT Instance") ++ ++static const struct option cmd_hct_get_config_options[] = { ++ HCT_GET_CONFIG_BASE_OPTIONS(), ++ HCT_GET_CONFIG_OPTIONS(), ++ OPT_END(), ++}; ++ ++static struct _hct_read_buffer_params { ++ u32 hct_inst; ++ u32 num_entries_to_read; ++ bool verbose; ++} hct_read_buffer_params; ++ ++#define HCT_READ_BUFFER_BASE_OPTIONS() \ ++OPT_BOOLEAN('v',"verbose", &hct_read_buffer_params.verbose, "turn on debug") ++ ++#define HCT_READ_BUFFER_OPTIONS() \ ++OPT_UINTEGER('i', "hct_inst", &hct_read_buffer_params.hct_inst, "HCT Instance"), \ ++OPT_UINTEGER('n', "num_entries_to_read", &hct_read_buffer_params.num_entries_to_read, "Number of buffer entries to read") ++ ++static const struct option cmd_hct_read_buffer_options[] = { ++ HCT_READ_BUFFER_BASE_OPTIONS(), ++ HCT_READ_BUFFER_OPTIONS(), ++ OPT_END(), ++}; ++ ++static struct _hct_set_config_params { ++ u32 hct_inst; ++ u32 config_flags; ++ u32 post_trig_depth; ++ u32 ignore_valid; ++ const char *trig_config_file; ++ bool verbose; ++} hct_set_config_params; ++ ++#define HCT_SET_CONFIG_BASE_OPTIONS() \ ++OPT_BOOLEAN('v',"verbose", &hct_set_config_params.verbose, "turn on debug") ++ ++#define HCT_SET_CONFIG_OPTIONS() \ ++OPT_UINTEGER('i', "hct_inst", &hct_set_config_params.hct_inst, "HCT Instance"), \ ++OPT_UINTEGER('c', "config_flags", &hct_set_config_params.config_flags, "Config Flags"), \ ++OPT_UINTEGER('p', "post_trig_depth", &hct_set_config_params.post_trig_depth, "Post Trigger Depth"), \ ++OPT_UINTEGER('n', "ignore_valid", &hct_set_config_params.ignore_valid, "Ignore Valid"), \ ++OPT_FILENAME('t', "trig_config_file", &hct_set_config_params.trig_config_file, "Trigger Config filepath", \ ++ "Filepath containing trigger config") ++ ++static const struct option cmd_hct_set_config_options[] = { ++ HCT_SET_CONFIG_BASE_OPTIONS(), ++ HCT_SET_CONFIG_OPTIONS(), ++ OPT_END(), ++}; ++ ++static struct _osa_os_patt_trig_cfg_params { ++ u32 cxl_mem_id; ++ u32 lane_mask; ++ u32 lane_dir_mask; ++ u32 rate_mask; ++ unsigned patt_val; ++ unsigned patt_mask; ++ bool verbose; ++} osa_os_patt_trig_cfg_params; ++ ++#define OSA_OS_PATT_TRIG_CFG_BASE_OPTIONS() \ ++OPT_BOOLEAN('v',"verbose", &osa_os_patt_trig_cfg_params.verbose, "turn on debug") ++ ++#define OSA_OS_PATT_TRIG_CFG_OPTIONS() \ ++OPT_UINTEGER('c', "cxl_mem_id", &osa_os_patt_trig_cfg_params.cxl_mem_id, "CXL.MEM ID"), \ ++OPT_UINTEGER('l', "lane_mask", &osa_os_patt_trig_cfg_params.lane_mask, "Lane Mask"), \ ++OPT_UINTEGER('m', "lane_dir_mask", &osa_os_patt_trig_cfg_params.lane_dir_mask, "Lane Direction mask (see OSA_LANE_DIR_BITMSK_*)"), \ ++OPT_UINTEGER('r', "rate_mask", &osa_os_patt_trig_cfg_params.rate_mask, "Link Rate mask (see OSA_LINK_RATE_BITMSK_*)"), \ ++OPT_UINTEGER('p', "patt_val", &osa_os_patt_trig_cfg_params.patt_val, "Pattern Match Value [CXL_MEM_OSA_DATA_LEN_DW]"), \ ++OPT_UINTEGER('q', "patt_mask", &osa_os_patt_trig_cfg_params.patt_mask, "Pattern Match mask [CXL_MEM_OSA_DATA_LEN_DW]") ++ ++static const struct option cmd_osa_os_patt_trig_cfg_options[] = { ++ OSA_OS_PATT_TRIG_CFG_BASE_OPTIONS(), ++ OSA_OS_PATT_TRIG_CFG_OPTIONS(), ++ OPT_END(), ++}; ++ ++static struct _osa_misc_trig_cfg_params { ++ u32 cxl_mem_id; ++ u32 trig_en_mask; ++ bool verbose; ++} osa_misc_trig_cfg_params; ++ ++#define OSA_MISC_TRIG_CFG_BASE_OPTIONS() \ ++OPT_BOOLEAN('v',"verbose", &osa_misc_trig_cfg_params.verbose, "turn on debug") ++ ++#define OSA_MISC_TRIG_CFG_OPTIONS() \ ++OPT_UINTEGER('c', "cxl_mem_id", &osa_misc_trig_cfg_params.cxl_mem_id, "CXL.MEM ID"), \ ++OPT_UINTEGER('t', "trig_en_mask", &osa_misc_trig_cfg_params.trig_en_mask, "Trigger Enable Mask.") ++ ++static const struct option cmd_osa_misc_trig_cfg_options[] = { ++ OSA_MISC_TRIG_CFG_BASE_OPTIONS(), ++ OSA_MISC_TRIG_CFG_OPTIONS(), ++ OPT_END(), ++}; ++ ++static struct _osa_data_read_params { ++ u32 cxl_mem_id; ++ u32 lane_id; ++ u32 lane_dir; ++ u32 start_entry; ++ u32 num_entries; ++ bool verbose; ++} osa_data_read_params; ++ ++#define OSA_DATA_READ_BASE_OPTIONS() \ ++OPT_BOOLEAN('v',"verbose", &osa_data_read_params.verbose, "turn on debug") ++ ++#define OSA_DATA_READ_OPTIONS() \ ++OPT_UINTEGER('c', "cxl_mem_id", &osa_data_read_params.cxl_mem_id, "CXL.MEM ID"), \ ++OPT_UINTEGER('l', "lane_id", &osa_data_read_params.lane_id, "Lane ID"), \ ++OPT_UINTEGER('m', "lane_dir", &osa_data_read_params.lane_dir, "lane direction (see osa_lane_dir_enum)"), \ ++OPT_UINTEGER('s', "start_entry", &osa_data_read_params.start_entry, "index of the first entry to read"), \ ++OPT_UINTEGER('n', "num_entries", &osa_data_read_params.num_entries, "maximum number of entries to read") ++ ++static const struct option cmd_osa_data_read_options[] = { ++ OSA_DATA_READ_BASE_OPTIONS(), ++ OSA_DATA_READ_OPTIONS(), ++ OPT_END(), ++}; ++ ++static struct _dimm_spd_read_params { ++ u32 spd_id; ++ u32 offset; ++ u32 num_bytes; ++ bool verbose; ++} dimm_spd_read_params; ++ ++#define DIMM_SPD_READ_BASE_OPTIONS() \ ++OPT_BOOLEAN('v',"verbose", &dimm_spd_read_params.verbose, "turn on debug") ++ ++#define DIMM_SPD_READ_OPTIONS() \ ++OPT_UINTEGER('s', "spd_id", &dimm_spd_read_params.spd_id, "SPD ID"), \ ++OPT_UINTEGER('o', "offset", &dimm_spd_read_params.offset, "Offset"), \ ++OPT_UINTEGER('n', "num_bytes", &dimm_spd_read_params.num_bytes, "Num bytes") ++ ++static const struct option cmd_dimm_spd_read_options[] = { ++ DIMM_SPD_READ_BASE_OPTIONS(), ++ DIMM_SPD_READ_OPTIONS(), ++ OPT_END(), ++}; ++ ++static const struct option cmd_ddr_training_status_options[] = { ++ BASE_OPTIONS(), ++ OPT_END(), ++}; ++ ++static int action_cmd_clear_event_records(struct cxl_memdev *memdev, struct action_context *actx) ++{ ++ u16 record_handle; ++ if (cxl_memdev_is_active(memdev)) { ++ fprintf(stderr, "%s: memdev active, abort clear_event_records\n", ++ cxl_memdev_get_devname(memdev)); ++ return -EBUSY; ++ } ++ if (clear_event_records_params.clear_event_flags) { ++ record_handle = 0; ++ return cxl_memdev_clear_event_records(memdev, clear_event_records_params.event_log_type, ++ clear_event_records_params.clear_event_flags, 0, &record_handle); ++ } ++ else { ++ record_handle = (u16) clear_event_records_params.event_record_handle; ++ return cxl_memdev_clear_event_records(memdev, clear_event_records_params.event_log_type, ++ clear_event_records_params.clear_event_flags, 1, &record_handle); ++ } ++} ++ ++static int action_cmd_get_event_records(struct cxl_memdev *memdev, struct action_context *actx) ++{ ++ if (cxl_memdev_is_active(memdev)) { ++ fprintf(stderr, "%s: memdev active, abort get_event_records\n", ++ cxl_memdev_get_devname(memdev)); ++ return -EBUSY; ++ } ++#if 0 ++ if (get_event_records_params.event_log_type < 0 || get_event_records_params.event_log_type > 3) { ++ fprintf(stderr, "%s: Invalid Event Log type: %d, Allowed values Event log type " ++ "(00 - information (default), 01 - warning, 02 - failure, 03 - fatal)\n", ++ cxl_memdev_get_devname(memdev), get_event_records_params.event_log_type); ++ return -EINVAL; ++ } ++#endif ++ ++ return cxl_memdev_get_event_records(memdev, get_event_records_params.event_log_type); ++} ++ ++static int action_cmd_get_ld_info(struct cxl_memdev *memdev, struct action_context *actx) ++{ ++ if (cxl_memdev_is_active(memdev)) { ++ fprintf(stderr, "%s: memdev active, abort get_ld_info\n", ++ cxl_memdev_get_devname(memdev)); ++ return -EBUSY; ++ } ++ ++ return cxl_memdev_get_ld_info(memdev); ++} ++ ++static int action_cmd_ddr_info(struct cxl_memdev *memdev, struct action_context *actx) ++{ ++ if (cxl_memdev_is_active(memdev)) { ++ fprintf(stderr, "%s: memdev active, abort ddr_info\n", ++ cxl_memdev_get_devname(memdev)); ++ return -EBUSY; ++ } ++ fprintf(stdout, "memdev id: %d", cxl_memdev_get_id(memdev)); ++ return cxl_memdev_ddr_info(memdev, ddr_info_params.ddr_id); ++} ++ ++static int action_cmd_get_health_info(struct cxl_memdev *memdev, struct action_context *actx) ++{ ++ if (cxl_memdev_is_active(memdev)) { ++ fprintf(stderr, "%s: memdev active, abort get_health_info\n", ++ cxl_memdev_get_devname(memdev)); ++ return -EBUSY; ++ } ++ ++ return cxl_memdev_get_health_info(memdev); ++} ++ ++static int action_cmd_get_alert_config(struct cxl_memdev *memdev, struct action_context *actx) ++{ ++ if (cxl_memdev_is_active(memdev)) { ++ fprintf(stderr, "%s: memdev active, abort get_alert_config\n", ++ cxl_memdev_get_devname(memdev)); ++ return -EBUSY; ++ } ++ ++ return cxl_memdev_get_alert_config(memdev); ++} ++ ++static int action_cmd_set_alert_config(struct cxl_memdev *memdev, struct action_context *actx) ++{ ++ if (cxl_memdev_is_active(memdev)) { ++ fprintf(stderr, "%s: memdev active, abort set_alert_config\n", ++ cxl_memdev_get_devname(memdev)); ++ return -EBUSY; ++ } ++ ++ return cxl_memdev_set_alert_config(memdev, alert_config_params.alert_prog_threshold, ++ alert_config_params.device_temp_threshold, alert_config_params.mem_error_threshold); ++} ++ ++static int action_cmd_get_timestamp(struct cxl_memdev *memdev, struct action_context *actx) ++{ ++ if (cxl_memdev_is_active(memdev)) { ++ fprintf(stderr, "%s: memdev active, get_timestamp\n", ++ cxl_memdev_get_devname(memdev)); ++ return -EBUSY; ++ } ++ ++ return cxl_memdev_get_timestamp(memdev); ++} ++ ++static int action_cmd_set_timestamp(struct cxl_memdev *memdev, struct action_context *actx) ++{ ++ if (cxl_memdev_is_active(memdev)) { ++ fprintf(stderr, "%s: memdev active, set_timestamp\n", ++ cxl_memdev_get_devname(memdev)); ++ return -EBUSY; ++ } ++ ++ printf("timestamp: 0x%lx (%ld)\n", ts_params.timestamp, ts_params.timestamp); ++ return cxl_memdev_set_timestamp(memdev, ts_params.timestamp); ++} ++ ++#define INITIATE_TRANSFER 1 ++#define CONTINUE_TRANSFER 2 ++#define END_TRANSFER 3 ++#define ABORT_TRANSFER 4 ++const char *TRANSFER_FW_ERRORS[15] = { ++ "Success", ++ "Background Command Started", ++ "Invalid Parameter", ++ "Unsupported", ++ "Internal Error", ++ "Retry Required", ++ "Busy", ++ "Media Disabled", ++ "FW Transfer in Progress", ++ "FW Transfer Out of Order", ++ "FW Authentication Failed", ++ "Invalid Slot", ++ "Aborted", ++ "Invalid Security State", ++ "Invalid Payload Length" ++}; ++ ++/* ++ * Performs inband FW update through a series of successive calls to transfer-fw. The rom ++ * is loaded into memory and transfered in 128*n byte chunks. transfer-fw supports several ++ * actions that are specified as part of the input payload. The first call sets the action ++ * to initiate_transfer and includes the first chunk. The remaining chunks are then sent ++ * with the continue_transfer action. Finally, the end_transfer action will cause the ++ * device to validate the binary and transfer it to the indicated slot. ++ * ++ * User must provide available FW slot as indicated from get-fw-info. This slot is provided ++ * for every call to transfer-fw, but will only be read during the end_transfer call. ++*/ ++ ++struct cxl_ctx { ++ /* log_ctx must be first member for cxl_set_log_fn compat */ ++ struct log_ctx ctx; ++ int refcount; ++ void *userdata; ++ int memdevs_init; ++ struct list_head memdevs; ++ struct kmod_ctx *kmod_ctx; ++ void *private_data; ++}; ++static int action_cmd_update_fw(struct cxl_memdev *memdev, struct action_context *actx) ++{ ++ struct stat fileStat; ++ int filesize; ++ FILE *rom; ++ int rc; ++ int fd; ++ int num_blocks; ++ int num_read; ++ int size; ++ const int max_retries = 10; ++ int retry_count; ++ u32 offset; ++ fwblock *rom_buffer; ++ u32 opcode; ++ u8 action; ++ int sleep_time = 1; ++ int percent_to_print = 0; ++ struct cxl_ctx *ctx = cxl_memdev_get_ctx(memdev); ++ ++ rom = fopen(update_fw_params.filepath, "rb"); ++ if (rom == NULL) { ++ fprintf(stderr, "Error: File open returned %s\nCould not open file %s\n", ++ strerror(errno), update_fw_params.filepath); ++ return -ENOENT; ++ } ++ ++ if (cxl_memdev_is_active(memdev)) { ++ fprintf(stderr, "%s: memdev active, set_timestamp\n", ++ cxl_memdev_get_devname(memdev)); ++ return -EBUSY; ++ } ++ ++ dbg(ctx, "Rom filepath: %s\n", update_fw_params.filepath); ++ fd = fileno(rom); ++ rc = fstat(fd, &fileStat); ++ if (rc != 0) { ++ dbg(ctx, "Could not read filesize"); ++ fclose(rom); ++ return 1; ++ } ++ ++ filesize = fileStat.st_size; ++ dbg(ctx, "ROM size: %d bytes\n", filesize); ++ ++ num_blocks = filesize / FW_BLOCK_SIZE; ++ if (filesize % FW_BLOCK_SIZE != 0) ++ { ++ num_blocks++; ++ } ++ ++ rom_buffer = (fwblock*) malloc(filesize); ++ num_read = fread(rom_buffer, 1, filesize, rom); ++ if (filesize != num_read) ++ { ++ fprintf(stderr, "Number of bytes read: %d\nNumber of bytes expected: %d\n", num_read, num_blocks); ++ free(rom_buffer); ++ fclose(rom); ++ return -ENOENT; ++ } ++ ++ offset = 0; ++ if (update_fw_params.hbo) { ++ opcode = 0xCD01; // Pioneer vendor opcode for hbo-transfer-fw ++ } else { ++ opcode = 0x0201; // Spec defined transfer-fw ++ } ++ ++ for (int i = 0; i < num_blocks; i++) ++ { ++ offset = i * (FW_BLOCK_SIZE / FW_BYTE_ALIGN); ++ ++ if ( (i * 100) / num_blocks >= percent_to_print) ++ { ++ printf("%d percent complete. Transfering block %d of %d at offset 0x%x\n", percent_to_print, i, num_blocks, offset); ++ percent_to_print = percent_to_print + 10; ++ } ++ ++ ++ if (i == 0) ++ action = INITIATE_TRANSFER; ++ else if (i == num_blocks - 1) ++ action = END_TRANSFER; ++ else ++ action = CONTINUE_TRANSFER; ++ ++ size = FW_BLOCK_SIZE; ++ if (i == num_blocks - 1 && filesize % FW_BLOCK_SIZE != 0) { ++ size = filesize % FW_BLOCK_SIZE; ++ } ++ ++ fflush(stdout); ++ rc = cxl_memdev_transfer_fw(memdev, action, update_fw_params.slot, offset, size, rom_buffer[i], opcode); ++ ++ retry_count = 0; ++ sleep_time = 10; ++ while (rc != 0) ++ { ++ if (retry_count > max_retries) ++ { ++ fprintf(stderr, "Maximum %d retries exceeded while transferring block %d\n", max_retries, i); ++ goto abort; ++ } ++ dbg(ctx, "Mailbox returned %d: %s\nretrying in %d seconds...\n", rc, TRANSFER_FW_ERRORS[rc], sleep_time); ++ sleep(sleep_time); ++ rc = cxl_memdev_transfer_fw(memdev, action, update_fw_params.slot, offset, size, rom_buffer[i], opcode); ++ retry_count++; ++ } ++ ++ if (rc != 0) ++ { ++ fprintf(stderr, "transfer_fw failed on %d of %d\n", i, num_blocks); ++ goto abort; ++ } ++ ++ rc = cxl_memdev_hbo_status(memdev, 0); ++ retry_count = 0; ++ sleep_time = 10; ++ while (rc != 0) ++ { ++ if (retry_count > max_retries) ++ { ++ dbg(ctx, "Maximum %d retries exceeded for hbo_status of block %d\n", max_retries, i); ++ goto abort; ++ } ++ dbg(ctx, "HBO Status Mailbox returned %d: %s\nretrying in %d seconds...\n", rc, TRANSFER_FW_ERRORS[rc], sleep_time); ++ sleep(sleep_time); ++ rc = cxl_memdev_hbo_status(memdev, 0); ++ retry_count++; ++ } ++ ++ if (rc != 0) ++ { ++ fprintf(stderr, "transfer_fw failed on %d of %d\n", i, num_blocks); ++ goto abort; ++ } ++ ++ if (update_fw_params.mock) ++ { ++ goto abort; ++ } ++ } ++ ++ dbg(ctx, "Transfer completed successfully and fw was transferred to slot %d\n", update_fw_params.slot); ++ goto out; ++abort: ++ sleep(2.0); ++ rc = cxl_memdev_transfer_fw(memdev, ABORT_TRANSFER, update_fw_params.slot, FW_BLOCK_SIZE, FW_BLOCK_SIZE, rom_buffer[0], opcode); ++ dbg(ctx, "Abort return status %d\n", rc); ++out: ++ free(rom_buffer); ++ fclose(rom); ++ return 0; ++} ++ ++static int action_cmd_get_event_interrupt_policy(struct cxl_memdev *memdev, struct action_context *actx) ++{ ++ if (cxl_memdev_is_active(memdev)) { ++ fprintf(stderr, "%s: memdev active, get_event_interrupt_policy\n", ++ cxl_memdev_get_devname(memdev)); ++ return -EBUSY; ++ } ++ ++ return cxl_memdev_get_event_interrupt_policy(memdev); ++} ++ ++static int action_cmd_set_event_interrupt_policy(struct cxl_memdev *memdev, struct action_context *actx) ++{ ++ if (cxl_memdev_is_active(memdev)) { ++ fprintf(stderr, "%s: memdev active, set_event_interrupt_policy\n", ++ cxl_memdev_get_devname(memdev)); ++ return -EBUSY; ++ } ++ ++ return cxl_memdev_set_event_interrupt_policy(memdev, interrupt_policy_params.policy); ++} ++ ++static int action_cmd_get_cel_log(struct cxl_memdev *memdev, struct action_context *actx) ++{ ++ if (cxl_memdev_is_active(memdev)) { ++ fprintf(stderr, "%s: memdev active, get_cel_log\n", ++ cxl_memdev_get_devname(memdev)); ++ return -EBUSY; ++ } ++ ++ return cxl_memdev_get_cel_log(memdev); ++} ++ ++static int action_cmd_get_supported_logs(struct cxl_memdev *memdev, struct action_context *actx) ++{ ++ if (cxl_memdev_is_active(memdev)) { ++ fprintf(stderr, "%s: memdev active, get_supported_logs\n", ++ cxl_memdev_get_devname(memdev)); ++ return -EBUSY; ++ } ++ ++ return cxl_memdev_get_supported_logs(memdev); ++} ++ ++static int action_cmd_identify(struct cxl_memdev *memdev, struct action_context *actx) ++{ ++ if (cxl_memdev_is_active(memdev)) { ++ fprintf(stderr, "%s: memdev active, cmd_identify\n", ++ cxl_memdev_get_devname(memdev)); ++ return -EBUSY; ++ } ++ ++ return cxl_memdev_cmd_identify(memdev); ++} ++ ++static int action_cmd_hct_start_stop_trigger(struct cxl_memdev *memdev, struct action_context *actx) ++{ ++ if (cxl_memdev_is_active(memdev)) { ++ fprintf(stderr, "%s: memdev active, abort hct_start_stop_trigger\n", ++ cxl_memdev_get_devname(memdev)); ++ return -EBUSY; ++ } ++ ++ return cxl_memdev_hct_start_stop_trigger(memdev, hct_start_stop_trigger_params.hct_inst, ++ hct_start_stop_trigger_params.buf_control); ++} ++ ++static int action_cmd_hct_get_buffer_status(struct cxl_memdev *memdev, struct action_context *actx) ++{ ++ if (cxl_memdev_is_active(memdev)) { ++ fprintf(stderr, "%s: memdev active, abort hct_get_buffer_status\n", ++ cxl_memdev_get_devname(memdev)); ++ return -EBUSY; ++ } ++ ++ return cxl_memdev_hct_get_buffer_status(memdev, hct_get_buffer_status_params.hct_inst); ++} ++ ++static int action_cmd_hct_enable(struct cxl_memdev *memdev, struct action_context *actx) ++{ ++ if (cxl_memdev_is_active(memdev)) { ++ fprintf(stderr, "%s: memdev active, abort hct_enable\n", ++ cxl_memdev_get_devname(memdev)); ++ return -EBUSY; ++ } ++ ++ return cxl_memdev_hct_enable(memdev, hct_enable_params.hct_inst); ++} ++ ++static int action_cmd_ltmon_capture_clear(struct cxl_memdev *memdev, struct action_context *actx) ++{ ++ if (cxl_memdev_is_active(memdev)) { ++ fprintf(stderr, "%s: memdev active, abort ltmon_capture_clear\n", ++ cxl_memdev_get_devname(memdev)); ++ return -EBUSY; ++ } ++ ++ return cxl_memdev_ltmon_capture_clear(memdev, ltmon_capture_clear_params.cxl_mem_id); ++} ++ ++static int action_cmd_ltmon_capture(struct cxl_memdev *memdev, struct action_context *actx) ++{ ++ if (cxl_memdev_is_active(memdev)) { ++ fprintf(stderr, "%s: memdev active, abort ltmon_capture\n", ++ cxl_memdev_get_devname(memdev)); ++ return -EBUSY; ++ } ++ ++ return cxl_memdev_ltmon_capture(memdev, ltmon_capture_params.cxl_mem_id, ++ ltmon_capture_params.capt_mode, ltmon_capture_params.ignore_sub_chg, ++ ltmon_capture_params.ignore_rxl0_chg, ltmon_capture_params.trig_src_sel); ++} ++ ++static int action_cmd_device_info_get(struct cxl_memdev *memdev, struct action_context *actx) ++{ ++ if (cxl_memdev_is_active(memdev)) { ++ fprintf(stderr, "%s: memdev active, abort device_info_get\n", ++ cxl_memdev_get_devname(memdev)); ++ return -EBUSY; ++ } ++ ++ return cxl_memdev_device_info_get(memdev); ++} ++ ++static int action_cmd_get_fw_info(struct cxl_memdev *memdev, struct action_context *actx) ++{ ++ if (cxl_memdev_is_active(memdev)) { ++ fprintf(stderr, "%s: memdev active, abort get_fw_info", ++ cxl_memdev_get_devname(memdev)); ++ return -EBUSY; ++ } ++ ++ return cxl_memdev_get_fw_info(memdev); ++} ++ ++static int action_cmd_activate_fw(struct cxl_memdev *memdev, struct action_context *actx) ++{ ++ int rc; ++ const int max_retries = 300; ++ int retry_count; ++ int sleep_time = 60; ++ ++ if (cxl_memdev_is_active(memdev)) { ++ fprintf(stderr, "%s: memdev active, abort activate_fw", ++ cxl_memdev_get_devname(memdev)); ++ return -EBUSY; ++ } ++ ++ rc = cxl_memdev_activate_fw(memdev, activate_fw_params.action, activate_fw_params.slot); ++ retry_count = 0; ++ while (rc != 0) { ++ if (retry_count > max_retries) { ++ printf("Maximum %d retries exceeded while activating fw for slot %d\n", max_retries, activate_fw_params.slot); ++ return rc; ++ } ++ printf("Mailbox returned %d: %s\nretrying in %d seconds...\n", rc, TRANSFER_FW_ERRORS[rc], sleep_time); ++ sleep(sleep_time); ++ rc = cxl_memdev_activate_fw(memdev, activate_fw_params.action, activate_fw_params.slot); ++ retry_count++; ++ } ++ ++ if (rc != 0) { ++ fprintf(stderr, "activate_fw failed for slot %d, error %d: %s\n", activate_fw_params.slot, rc, TRANSFER_FW_ERRORS[rc]); ++ return rc; ++ } ++ ++ rc = cxl_memdev_hbo_status(memdev, 0); ++ retry_count = 0; ++ while (rc != 0) { ++ if (retry_count > max_retries) { ++ printf("Maximum %d retries exceeded for hbo_status\n", max_retries); ++ return rc; ++ } ++ printf("HBO Status Mailbox returned %d: %s\nretrying in %d seconds...\n", rc, TRANSFER_FW_ERRORS[rc], sleep_time); ++ sleep(sleep_time); ++ rc = cxl_memdev_hbo_status(memdev, 0); ++ retry_count++; ++ } ++ ++ if (rc != 0) { ++ fprintf(stderr, "activate_fw failed for slot %d\n", activate_fw_params.slot); ++ return rc; ++ } ++ ++ return rc; ++} ++ ++static int action_cmd_ltmon_capture_freeze_and_restore(struct cxl_memdev *memdev, struct action_context *actx) ++{ ++ if (cxl_memdev_is_active(memdev)) { ++ fprintf(stderr, "%s: memdev active, abort ltmon_capture_freeze_and_restore\n", ++ cxl_memdev_get_devname(memdev)); ++ return -EBUSY; ++ } ++ ++ return cxl_memdev_ltmon_capture_freeze_and_restore(memdev, ltmon_capture_freeze_and_restore_params.cxl_mem_id, ++ ltmon_capture_freeze_and_restore_params.freeze_restore); ++} ++ ++static int action_cmd_ltmon_l2r_count_dump(struct cxl_memdev *memdev, struct action_context *actx) ++{ ++ if (cxl_memdev_is_active(memdev)) { ++ fprintf(stderr, "%s: memdev active, abort ltmon_l2r_count_dump\n", ++ cxl_memdev_get_devname(memdev)); ++ return -EBUSY; ++ } ++ ++ return cxl_memdev_ltmon_l2r_count_dump(memdev, ltmon_l2r_count_dump_params.cxl_mem_id); ++} ++ ++static int action_cmd_ltmon_l2r_count_clear(struct cxl_memdev *memdev, struct action_context *actx) ++{ ++ if (cxl_memdev_is_active(memdev)) { ++ fprintf(stderr, "%s: memdev active, abort ltmon_l2r_count_clear\n", ++ cxl_memdev_get_devname(memdev)); ++ return -EBUSY; ++ } ++ ++ return cxl_memdev_ltmon_l2r_count_clear(memdev, ltmon_l2r_count_clear_params.cxl_mem_id); ++} ++ ++static int action_cmd_ltmon_basic_cfg(struct cxl_memdev *memdev, struct action_context *actx) ++{ ++ if (cxl_memdev_is_active(memdev)) { ++ fprintf(stderr, "%s: memdev active, abort ltmon_basic_cfg\n", ++ cxl_memdev_get_devname(memdev)); ++ return -EBUSY; ++ } ++ ++ return cxl_memdev_ltmon_basic_cfg(memdev, ltmon_basic_cfg_params.cxl_mem_id, ++ ltmon_basic_cfg_params.tick_cnt, ltmon_basic_cfg_params.global_ts); ++} ++ ++static int action_cmd_ltmon_watch(struct cxl_memdev *memdev, struct action_context *actx) ++{ ++ if (cxl_memdev_is_active(memdev)) { ++ fprintf(stderr, "%s: memdev active, abort ltmon_watch\n", ++ cxl_memdev_get_devname(memdev)); ++ return -EBUSY; ++ } ++ ++ return cxl_memdev_ltmon_watch(memdev, ltmon_watch_params.cxl_mem_id, ++ ltmon_watch_params.watch_id, ltmon_watch_params.watch_mode, ltmon_watch_params.src_maj_st, ++ ltmon_watch_params.src_min_st, ltmon_watch_params.src_l0_st, ltmon_watch_params.dst_maj_st, ++ ltmon_watch_params.dst_min_st, ltmon_watch_params.dst_l0_st); ++} ++ ++static int action_cmd_ltmon_capture_stat(struct cxl_memdev *memdev, struct action_context *actx) ++{ ++ if (cxl_memdev_is_active(memdev)) { ++ fprintf(stderr, "%s: memdev active, abort ltmon_capture_stat\n", ++ cxl_memdev_get_devname(memdev)); ++ return -EBUSY; ++ } ++ ++ return cxl_memdev_ltmon_capture_stat(memdev, ltmon_capture_stat_params.cxl_mem_id); ++} ++ ++static int action_cmd_ltmon_capture_log_dmp(struct cxl_memdev *memdev, struct action_context *actx) ++{ ++ if (cxl_memdev_is_active(memdev)) { ++ fprintf(stderr, "%s: memdev active, abort ltmon_capture_log_dmp\n", ++ cxl_memdev_get_devname(memdev)); ++ return -EBUSY; ++ } ++ ++ return cxl_memdev_ltmon_capture_log_dmp(memdev, ltmon_capture_log_dmp_params.cxl_mem_id, ++ ltmon_capture_log_dmp_params.dump_idx, ltmon_capture_log_dmp_params.dump_cnt); ++} ++ ++static int action_cmd_ltmon_capture_trigger(struct cxl_memdev *memdev, struct action_context *actx) ++{ ++ if (cxl_memdev_is_active(memdev)) { ++ fprintf(stderr, "%s: memdev active, abort ltmon_capture_trigger\n", ++ cxl_memdev_get_devname(memdev)); ++ return -EBUSY; ++ } ++ ++ return cxl_memdev_ltmon_capture_trigger(memdev, ltmon_capture_trigger_params.cxl_mem_id, ++ ltmon_capture_trigger_params.trig_src); ++} ++ ++static int action_cmd_ltmon_enable(struct cxl_memdev *memdev, struct action_context *actx) ++{ ++ if (cxl_memdev_is_active(memdev)) { ++ fprintf(stderr, "%s: memdev active, abort ltmon_enable\n", ++ cxl_memdev_get_devname(memdev)); ++ return -EBUSY; ++ } ++ ++ return cxl_memdev_ltmon_enable(memdev, ltmon_enable_params.cxl_mem_id, ++ ltmon_enable_params.enable); ++} ++ ++static int action_cmd_osa_os_type_trig_cfg(struct cxl_memdev *memdev, struct action_context *actx) ++{ ++ if (cxl_memdev_is_active(memdev)) { ++ fprintf(stderr, "%s: memdev active, abort osa_os_type_trig_cfg\n", ++ cxl_memdev_get_devname(memdev)); ++ return -EBUSY; ++ } ++ ++ return cxl_memdev_osa_os_type_trig_cfg(memdev, osa_os_type_trig_cfg_params.cxl_mem_id, ++ osa_os_type_trig_cfg_params.lane_mask, osa_os_type_trig_cfg_params.lane_dir_mask, ++ osa_os_type_trig_cfg_params.rate_mask, osa_os_type_trig_cfg_params.os_type_mask); ++} ++ ++static int action_cmd_osa_cap_ctrl(struct cxl_memdev *memdev, struct action_context *actx) ++{ ++ if (cxl_memdev_is_active(memdev)) { ++ fprintf(stderr, "%s: memdev active, abort osa_cap_ctrl\n", ++ cxl_memdev_get_devname(memdev)); ++ return -EBUSY; ++ } ++ ++ return cxl_memdev_osa_cap_ctrl(memdev, osa_cap_ctrl_params.cxl_mem_id, ++ osa_cap_ctrl_params.lane_mask, osa_cap_ctrl_params.lane_dir_mask, ++ osa_cap_ctrl_params.drop_single_os, osa_cap_ctrl_params.stop_mode, ++ osa_cap_ctrl_params.snapshot_mode, osa_cap_ctrl_params.post_trig_num, ++ osa_cap_ctrl_params.os_type_mask); ++} ++ ++static int action_cmd_osa_cfg_dump(struct cxl_memdev *memdev, struct action_context *actx) ++{ ++ if (cxl_memdev_is_active(memdev)) { ++ fprintf(stderr, "%s: memdev active, abort osa_cfg_dump\n", ++ cxl_memdev_get_devname(memdev)); ++ return -EBUSY; ++ } ++ ++ return cxl_memdev_osa_cfg_dump(memdev, osa_cfg_dump_params.cxl_mem_id); ++} ++ ++static int action_cmd_osa_ana_op(struct cxl_memdev *memdev, struct action_context *actx) ++{ ++ if (cxl_memdev_is_active(memdev)) { ++ fprintf(stderr, "%s: memdev active, abort osa_ana_op\n", ++ cxl_memdev_get_devname(memdev)); ++ return -EBUSY; ++ } ++ ++ return cxl_memdev_osa_ana_op(memdev, osa_ana_op_params.cxl_mem_id, ++ osa_ana_op_params.op); ++} ++ ++static int action_cmd_osa_status_query(struct cxl_memdev *memdev, struct action_context *actx) ++{ ++ if (cxl_memdev_is_active(memdev)) { ++ fprintf(stderr, "%s: memdev active, abort osa_status_query\n", ++ cxl_memdev_get_devname(memdev)); ++ return -EBUSY; ++ } ++ ++ return cxl_memdev_osa_status_query(memdev, osa_status_query_params.cxl_mem_id); ++} ++ ++static int action_cmd_osa_access_rel(struct cxl_memdev *memdev, struct action_context *actx) ++{ ++ if (cxl_memdev_is_active(memdev)) { ++ fprintf(stderr, "%s: memdev active, abort osa_access_rel\n", ++ cxl_memdev_get_devname(memdev)); ++ return -EBUSY; ++ } ++ ++ return cxl_memdev_osa_access_rel(memdev, osa_access_rel_params.cxl_mem_id); ++} ++ ++static int action_cmd_perfcnt_mta_ltif_set(struct cxl_memdev *memdev, struct action_context *actx) ++{ ++ if (cxl_memdev_is_active(memdev)) { ++ fprintf(stderr, "%s: memdev active, abort perfcnt_mta_ltif_set\n", ++ cxl_memdev_get_devname(memdev)); ++ return -EBUSY; ++ } ++ ++ return cxl_memdev_perfcnt_mta_ltif_set(memdev, perfcnt_mta_ltif_set_params.counter, ++ perfcnt_mta_ltif_set_params.match_value, perfcnt_mta_ltif_set_params.opcode, ++ perfcnt_mta_ltif_set_params.meta_field, perfcnt_mta_ltif_set_params.meta_value); ++} ++ ++static int action_cmd_perfcnt_mta_get(struct cxl_memdev *memdev, struct action_context *actx) ++{ ++ if (cxl_memdev_is_active(memdev)) { ++ fprintf(stderr, "%s: memdev active, abort perfcnt_mta_get\n", ++ cxl_memdev_get_devname(memdev)); ++ return -EBUSY; ++ } ++ ++ return cxl_memdev_perfcnt_mta_get(memdev, perfcnt_mta_get_params.type, ++ perfcnt_mta_get_params.counter); ++} ++ ++static int action_cmd_perfcnt_mta_latch_val_get(struct cxl_memdev *memdev, struct action_context *actx) ++{ ++ if (cxl_memdev_is_active(memdev)) { ++ fprintf(stderr, "%s: memdev active, abort perfcnt_mta_latch_val_get\n", ++ cxl_memdev_get_devname(memdev)); ++ return -EBUSY; ++ } ++ ++ return cxl_memdev_perfcnt_mta_latch_val_get(memdev, perfcnt_mta_latch_val_get_params.type, ++ perfcnt_mta_latch_val_get_params.counter); ++} ++ ++static int action_cmd_perfcnt_mta_counter_clear(struct cxl_memdev *memdev, struct action_context *actx) ++{ ++ if (cxl_memdev_is_active(memdev)) { ++ fprintf(stderr, "%s: memdev active, abort perfcnt_mta_counter_clear\n", ++ cxl_memdev_get_devname(memdev)); ++ return -EBUSY; ++ } ++ ++ return cxl_memdev_perfcnt_mta_counter_clear(memdev, perfcnt_mta_counter_clear_params.type, ++ perfcnt_mta_counter_clear_params.counter); ++} ++ ++static int action_cmd_perfcnt_mta_cnt_val_latch(struct cxl_memdev *memdev, struct action_context *actx) ++{ ++ if (cxl_memdev_is_active(memdev)) { ++ fprintf(stderr, "%s: memdev active, abort perfcnt_mta_cnt_val_latch\n", ++ cxl_memdev_get_devname(memdev)); ++ return -EBUSY; ++ } ++ ++ return cxl_memdev_perfcnt_mta_cnt_val_latch(memdev, perfcnt_mta_cnt_val_latch_params.type, ++ perfcnt_mta_cnt_val_latch_params.counter); ++} ++ ++static int action_cmd_perfcnt_mta_hif_set(struct cxl_memdev *memdev, struct action_context *actx) ++{ ++ if (cxl_memdev_is_active(memdev)) { ++ fprintf(stderr, "%s: memdev active, abort perfcnt_mta_hif_set\n", ++ cxl_memdev_get_devname(memdev)); ++ return -EBUSY; ++ } ++ ++ return cxl_memdev_perfcnt_mta_hif_set(memdev, perfcnt_mta_hif_set_params.counter, ++ perfcnt_mta_hif_set_params.match_value, perfcnt_mta_hif_set_params.addr, ++ perfcnt_mta_hif_set_params.req_ty, perfcnt_mta_hif_set_params.sc_ty); ++} ++ ++static int action_cmd_perfcnt_mta_hif_cfg_get(struct cxl_memdev *memdev, struct action_context *actx) ++{ ++ if (cxl_memdev_is_active(memdev)) { ++ fprintf(stderr, "%s: memdev active, abort perfcnt_mta_hif_cfg_get\n", ++ cxl_memdev_get_devname(memdev)); ++ return -EBUSY; ++ } ++ ++ return cxl_memdev_perfcnt_mta_hif_cfg_get(memdev, perfcnt_mta_hif_cfg_get_params.counter); ++} ++ ++static int action_cmd_perfcnt_mta_hif_latch_val_get(struct cxl_memdev *memdev, struct action_context *actx) ++{ ++ if (cxl_memdev_is_active(memdev)) { ++ fprintf(stderr, "%s: memdev active, abort perfcnt_mta_hif_latch_val_get\n", ++ cxl_memdev_get_devname(memdev)); ++ return -EBUSY; ++ } ++ ++ return cxl_memdev_perfcnt_mta_hif_latch_val_get(memdev, perfcnt_mta_hif_latch_val_get_params.counter); ++} ++ ++static int action_cmd_perfcnt_mta_hif_counter_clear(struct cxl_memdev *memdev, struct action_context *actx) ++{ ++ if (cxl_memdev_is_active(memdev)) { ++ fprintf(stderr, "%s: memdev active, abort perfcnt_mta_hif_counter_clear\n", ++ cxl_memdev_get_devname(memdev)); ++ return -EBUSY; ++ } ++ ++ return cxl_memdev_perfcnt_mta_hif_counter_clear(memdev, perfcnt_mta_hif_counter_clear_params.counter); ++} ++ ++static int action_cmd_perfcnt_mta_hif_cnt_val_latch(struct cxl_memdev *memdev, struct action_context *actx) ++{ ++ if (cxl_memdev_is_active(memdev)) { ++ fprintf(stderr, "%s: memdev active, abort perfcnt_mta_hif_cnt_val_latch\n", ++ cxl_memdev_get_devname(memdev)); ++ return -EBUSY; ++ } ++ ++ return cxl_memdev_perfcnt_mta_hif_cnt_val_latch(memdev, perfcnt_mta_hif_cnt_val_latch_params.counter); ++} ++ ++static int action_cmd_perfcnt_ddr_generic_select(struct cxl_memdev *memdev, struct action_context *actx) ++{ ++ if (cxl_memdev_is_active(memdev)) { ++ fprintf(stderr, "%s: memdev active, abort perfcnt_ddr_generic_select\n", ++ cxl_memdev_get_devname(memdev)); ++ return -EBUSY; ++ } ++ ++ return cxl_memdev_perfcnt_ddr_generic_select(memdev, perfcnt_ddr_generic_select_params.ddr_id, ++ perfcnt_ddr_generic_select_params.cid, perfcnt_ddr_generic_select_params.rank, ++ perfcnt_ddr_generic_select_params.bank, perfcnt_ddr_generic_select_params.bankgroup, ++ perfcnt_ddr_generic_select_params.event); ++} ++ ++static int action_cmd_perfcnt_ddr_generic_capture(struct cxl_memdev *memdev, struct action_context *actx) ++{ ++ if (cxl_memdev_is_active(memdev)) { ++ fprintf(stderr, "%s: memdev active, abort perfcnt_ddr_generic_capture\n", ++ cxl_memdev_get_devname(memdev)); ++ return -EBUSY; ++ } ++ ++ return cxl_memdev_perfcnt_ddr_generic_capture(memdev, perfcnt_ddr_generic_capture_params.ddr_id, ++ perfcnt_ddr_generic_capture_params.poll_period_ms ++ ); ++} ++ ++static int action_cmd_perfcnt_ddr_dfi_capture(struct cxl_memdev *memdev, struct action_context *actx) ++{ ++ if (cxl_memdev_is_active(memdev)) { ++ fprintf(stderr, "%s: memdev active, abort perfcnt_ddr_dfi_capture\n", ++ cxl_memdev_get_devname(memdev)); ++ return -EBUSY; ++ } ++ ++ return cxl_memdev_perfcnt_ddr_dfi_capture(memdev, perfcnt_ddr_dfi_capture_params.ddr_id, ++ perfcnt_ddr_dfi_capture_params.poll_period_ms ++ ); ++} ++ ++static int action_cmd_err_inj_drs_poison(struct cxl_memdev *memdev, struct action_context *actx) ++{ ++ if (cxl_memdev_is_active(memdev)) { ++ fprintf(stderr, "%s: memdev active, abort err_inj_drs_poison\n", ++ cxl_memdev_get_devname(memdev)); ++ return -EBUSY; ++ } ++ ++ return cxl_memdev_err_inj_drs_poison(memdev, err_inj_drs_poison_params.ch_id, ++ err_inj_drs_poison_params.duration, err_inj_drs_poison_params.inj_mode, ++ err_inj_drs_poison_params.tag); ++} ++ ++static int action_cmd_err_inj_drs_ecc(struct cxl_memdev *memdev, struct action_context *actx) ++{ ++ if (cxl_memdev_is_active(memdev)) { ++ fprintf(stderr, "%s: memdev active, abort err_inj_drs_ecc\n", ++ cxl_memdev_get_devname(memdev)); ++ return -EBUSY; ++ } ++ ++ return cxl_memdev_err_inj_drs_ecc(memdev, err_inj_drs_ecc_params.ch_id, ++ err_inj_drs_ecc_params.duration, err_inj_drs_ecc_params.inj_mode, ++ err_inj_drs_ecc_params.tag); ++} ++ ++static int action_cmd_err_inj_rxflit_crc(struct cxl_memdev *memdev, struct action_context *actx) ++{ ++ if (cxl_memdev_is_active(memdev)) { ++ fprintf(stderr, "%s: memdev active, abort err_inj_rxflit_crc\n", ++ cxl_memdev_get_devname(memdev)); ++ return -EBUSY; ++ } ++ ++ return cxl_memdev_err_inj_rxflit_crc(memdev, err_inj_rxflit_crc_params.cxl_mem_id); ++} ++ ++static int action_cmd_err_inj_txflit_crc(struct cxl_memdev *memdev, struct action_context *actx) ++{ ++ if (cxl_memdev_is_active(memdev)) { ++ fprintf(stderr, "%s: memdev active, abort err_inj_txflit_crc\n", ++ cxl_memdev_get_devname(memdev)); ++ return -EBUSY; ++ } ++ ++ return cxl_memdev_err_inj_txflit_crc(memdev, err_inj_txflit_crc_params.cxl_mem_id); ++} ++ ++static int action_cmd_err_inj_viral(struct cxl_memdev *memdev, struct action_context *actx) ++{ ++ if (cxl_memdev_is_active(memdev)) { ++ fprintf(stderr, "%s: memdev active, abort err_inj_viral\n", ++ cxl_memdev_get_devname(memdev)); ++ return -EBUSY; ++ } ++ ++ return cxl_memdev_err_inj_viral(memdev, err_inj_viral_params.ld_id); ++} ++ ++static int action_cmd_eh_eye_cap_run(struct cxl_memdev *memdev, struct action_context *actx) ++{ ++ if (cxl_memdev_is_active(memdev)) { ++ fprintf(stderr, "%s: memdev active, abort eh_eye_cap_run\n", ++ cxl_memdev_get_devname(memdev)); ++ return -EBUSY; ++ } ++ ++ return cxl_memdev_eh_eye_cap_run(memdev, eh_eye_cap_run_params.depth, ++ eh_eye_cap_run_params.lane_mask); ++} ++ ++static int action_cmd_eh_eye_cap_read(struct cxl_memdev *memdev, struct action_context *actx) ++{ ++ if (cxl_memdev_is_active(memdev)) { ++ fprintf(stderr, "%s: memdev active, abort eh_eye_cap_read\n", ++ cxl_memdev_get_devname(memdev)); ++ return -EBUSY; ++ } ++ ++ return cxl_memdev_eh_eye_cap_read(memdev, eh_eye_cap_read_params.lane_id, ++ eh_eye_cap_read_params.bin_num); ++} ++ ++static int action_cmd_eh_eye_cap_timeout_enable(struct cxl_memdev *memdev, struct action_context *actx) ++{ ++ if (cxl_memdev_is_active(memdev)) { ++ fprintf(stderr, "%s: memdev active, abort eh_eye_cap_timeout_enable\n", ++ cxl_memdev_get_devname(memdev)); ++ return -EBUSY; ++ } ++ ++ return cxl_memdev_eh_eye_cap_timeout_enable(memdev, eh_eye_cap_timeout_enable_params.enable); ++} ++ ++static int action_cmd_eh_eye_cap_status(struct cxl_memdev *memdev, struct action_context *actx) ++{ ++ if (cxl_memdev_is_active(memdev)) { ++ fprintf(stderr, "%s: memdev active, abort eh_eye_cap_status\n", ++ cxl_memdev_get_devname(memdev)); ++ return -EBUSY; ++ } ++ ++ return cxl_memdev_eh_eye_cap_status(memdev); ++} ++ ++static int action_cmd_eh_adapt_get(struct cxl_memdev *memdev, struct action_context *actx) ++{ ++ if (cxl_memdev_is_active(memdev)) { ++ fprintf(stderr, "%s: memdev active, abort eh_adapt_get\n", ++ cxl_memdev_get_devname(memdev)); ++ return -EBUSY; ++ } ++ ++ return cxl_memdev_eh_adapt_get(memdev, eh_adapt_get_params.lane_id); ++} ++ ++static int action_cmd_eh_adapt_oneoff(struct cxl_memdev *memdev, struct action_context *actx) ++{ ++ if (cxl_memdev_is_active(memdev)) { ++ fprintf(stderr, "%s: memdev active, abort eh_adapt_oneoff\n", ++ cxl_memdev_get_devname(memdev)); ++ return -EBUSY; ++ } ++ ++ return cxl_memdev_eh_adapt_oneoff(memdev, eh_adapt_oneoff_params.lane_id, ++ eh_adapt_oneoff_params.preload, eh_adapt_oneoff_params.loops, eh_adapt_oneoff_params.objects); ++} ++ ++static int action_cmd_eh_adapt_force(struct cxl_memdev *memdev, struct action_context *actx) ++{ ++ if (cxl_memdev_is_active(memdev)) { ++ fprintf(stderr, "%s: memdev active, abort eh_adapt_force\n", ++ cxl_memdev_get_devname(memdev)); ++ return -EBUSY; ++ } ++ ++ return cxl_memdev_eh_adapt_force(memdev, eh_adapt_force_params.lane_id, ++ eh_adapt_force_params.rate, eh_adapt_force_params.vdd_bias, eh_adapt_force_params.ssc, ++ eh_adapt_force_params.pga_gain, eh_adapt_force_params.pga_a0, eh_adapt_force_params.pga_off, ++ eh_adapt_force_params.cdfe_a2, eh_adapt_force_params.cdfe_a3, eh_adapt_force_params.cdfe_a4, ++ eh_adapt_force_params.cdfe_a5, eh_adapt_force_params.cdfe_a6, eh_adapt_force_params.cdfe_a7, ++ eh_adapt_force_params.cdfe_a8, eh_adapt_force_params.cdfe_a9, eh_adapt_force_params.cdfe_a10, ++ eh_adapt_force_params.dc_offset, eh_adapt_force_params.zobel_dc_offset, ++ eh_adapt_force_params.udfe_thr_0, eh_adapt_force_params.udfe_thr_1, ++ eh_adapt_force_params.median_amp, eh_adapt_force_params.zobel_a_gain, ++ eh_adapt_force_params.ph_ofs_t); ++} ++ ++static int action_cmd_hbo_status(struct cxl_memdev *memdev, struct action_context *actx) ++{ ++ if (cxl_memdev_is_active(memdev)) { ++ fprintf(stderr, "%s: memdev active, abort hbo_status\n", ++ cxl_memdev_get_devname(memdev)); ++ return -EBUSY; ++ } ++ ++ return cxl_memdev_hbo_status(memdev, 1); ++} ++ ++static int action_cmd_hbo_transfer_fw(struct cxl_memdev *memdev, struct action_context *actx) ++{ ++ if (cxl_memdev_is_active(memdev)) { ++ fprintf(stderr, "%s: memdev active, abort hbo_transfer_fw\n", ++ cxl_memdev_get_devname(memdev)); ++ return -EBUSY; ++ } ++ ++ return cxl_memdev_hbo_transfer_fw(memdev); ++} ++ ++static int action_cmd_hbo_activate_fw(struct cxl_memdev *memdev, struct action_context *actx) ++{ ++ if (cxl_memdev_is_active(memdev)) { ++ fprintf(stderr, "%s: memdev active, abort hbo_activate_fw\n", ++ cxl_memdev_get_devname(memdev)); ++ return -EBUSY; ++ } ++ ++ return cxl_memdev_hbo_activate_fw(memdev); ++} ++ ++static int action_cmd_health_counters_clear(struct cxl_memdev *memdev, struct action_context *actx) ++{ ++ if (cxl_memdev_is_active(memdev)) { ++ fprintf(stderr, "%s: memdev active, abort health_counters_clear\n", ++ cxl_memdev_get_devname(memdev)); ++ return -EBUSY; ++ } ++ ++ return cxl_memdev_health_counters_clear(memdev, health_counters_clear_params.bitmask); ++} ++ ++static int action_cmd_health_counters_get(struct cxl_memdev *memdev, struct action_context *actx) ++{ ++ if (cxl_memdev_is_active(memdev)) { ++ fprintf(stderr, "%s: memdev active, abort health_counters_get\n", ++ cxl_memdev_get_devname(memdev)); ++ return -EBUSY; ++ } ++ ++ return cxl_memdev_health_counters_get(memdev); ++} ++ ++static int action_cmd_hct_get_plat_param(struct cxl_memdev *memdev, struct action_context *actx) ++{ ++ if (cxl_memdev_is_active(memdev)) { ++ fprintf(stderr, "%s: memdev active, abort hct_get_plat_param\n", ++ cxl_memdev_get_devname(memdev)); ++ return -EBUSY; ++ } ++ ++ return cxl_memdev_hct_get_plat_param(memdev); ++} ++ ++static int action_cmd_err_inj_hif_poison(struct cxl_memdev *memdev, struct action_context *actx) ++{ ++ if (cxl_memdev_is_active(memdev)) { ++ fprintf(stderr, "%s: memdev active, abort err_inj_hif_poison\n", ++ cxl_memdev_get_devname(memdev)); ++ return -EBUSY; ++ } ++ ++ return cxl_memdev_err_inj_hif_poison(memdev, err_inj_hif_poison_params.ch_id, ++ err_inj_hif_poison_params.duration, err_inj_hif_poison_params.inj_mode, ++ err_inj_hif_poison_params.address); ++} ++ ++static int action_cmd_err_inj_hif_ecc(struct cxl_memdev *memdev, struct action_context *actx) ++{ ++ if (cxl_memdev_is_active(memdev)) { ++ fprintf(stderr, "%s: memdev active, abort err_inj_hif_ecc\n", ++ cxl_memdev_get_devname(memdev)); ++ return -EBUSY; ++ } ++ ++ return cxl_memdev_err_inj_hif_ecc(memdev, err_inj_hif_ecc_params.ch_id, ++ err_inj_hif_ecc_params.duration, err_inj_hif_ecc_params.inj_mode, ++ err_inj_hif_ecc_params.address); ++} ++ ++static int action_cmd_eh_link_dbg_cfg(struct cxl_memdev *memdev, struct action_context *actx) ++{ ++ if (cxl_memdev_is_active(memdev)) { ++ fprintf(stderr, "%s: memdev active, abort eh_link_dbg_cfg\n", ++ cxl_memdev_get_devname(memdev)); ++ return -EBUSY; ++ } ++ ++ return cxl_memdev_eh_link_dbg_cfg(memdev, eh_link_dbg_cfg_params.port_id, ++ eh_link_dbg_cfg_params.op_mode, eh_link_dbg_cfg_params.cap_type, ++ eh_link_dbg_cfg_params.lane_mask, eh_link_dbg_cfg_params.rate_mask, ++ eh_link_dbg_cfg_params.timer_us, eh_link_dbg_cfg_params.cap_delay_us, ++ eh_link_dbg_cfg_params.max_cap); ++} ++ ++static int action_cmd_eh_link_dbg_entry_dump(struct cxl_memdev *memdev, struct action_context *actx) ++{ ++ if (cxl_memdev_is_active(memdev)) { ++ fprintf(stderr, "%s: memdev active, abort eh_link_dbg_entry_dump\n", ++ cxl_memdev_get_devname(memdev)); ++ return -EBUSY; ++ } ++ ++ return cxl_memdev_eh_link_dbg_entry_dump(memdev, eh_link_dbg_entry_dump_params.entry_idx); ++} ++ ++static int action_cmd_eh_link_dbg_lane_dump(struct cxl_memdev *memdev, struct action_context *actx) ++{ ++ if (cxl_memdev_is_active(memdev)) { ++ fprintf(stderr, "%s: memdev active, abort eh_link_dbg_lane_dump\n", ++ cxl_memdev_get_devname(memdev)); ++ return -EBUSY; ++ } ++ ++ return cxl_memdev_eh_link_dbg_lane_dump(memdev, eh_link_dbg_lane_dump_params.entry_idx, ++ eh_link_dbg_lane_dump_params.lane_idx); ++} ++ ++static int action_cmd_eh_link_dbg_reset(struct cxl_memdev *memdev, struct action_context *actx) ++{ ++ if (cxl_memdev_is_active(memdev)) { ++ fprintf(stderr, "%s: memdev active, abort eh_link_dbg_reset\n", ++ cxl_memdev_get_devname(memdev)); ++ return -EBUSY; ++ } ++ ++ return cxl_memdev_eh_link_dbg_reset(memdev); ++} ++ ++static int action_cmd_fbist_stopconfig_set(struct cxl_memdev *memdev, struct action_context *actx) ++{ ++ if (cxl_memdev_is_active(memdev)) { ++ fprintf(stderr, "%s: memdev active, abort fbist_stopconfig_set\n", ++ cxl_memdev_get_devname(memdev)); ++ return -EBUSY; ++ } ++ ++ return cxl_memdev_fbist_stopconfig_set(memdev, fbist_stopconfig_set_params.fbist_id, ++ fbist_stopconfig_set_params.stop_on_wresp, fbist_stopconfig_set_params.stop_on_rresp, ++ fbist_stopconfig_set_params.stop_on_rdataerr); ++} ++ ++static int action_cmd_fbist_cyclecount_set(struct cxl_memdev *memdev, struct action_context *actx) ++{ ++ if (cxl_memdev_is_active(memdev)) { ++ fprintf(stderr, "%s: memdev active, abort fbist_cyclecount_set\n", ++ cxl_memdev_get_devname(memdev)); ++ return -EBUSY; ++ } ++ ++ return cxl_memdev_fbist_cyclecount_set(memdev, fbist_cyclecount_set_params.fbist_id, ++ fbist_cyclecount_set_params.txg_nr, fbist_cyclecount_set_params.cyclecount); ++} ++ ++static int action_cmd_fbist_reset_set(struct cxl_memdev *memdev, struct action_context *actx) ++{ ++ if (cxl_memdev_is_active(memdev)) { ++ fprintf(stderr, "%s: memdev active, abort fbist_reset_set\n", ++ cxl_memdev_get_devname(memdev)); ++ return -EBUSY; ++ } ++ ++ return cxl_memdev_fbist_reset_set(memdev, fbist_reset_set_params.fbist_id, ++ fbist_reset_set_params.txg0_reset, fbist_reset_set_params.txg1_reset); ++} ++ ++static int action_cmd_fbist_run_set(struct cxl_memdev *memdev, struct action_context *actx) ++{ ++ if (cxl_memdev_is_active(memdev)) { ++ fprintf(stderr, "%s: memdev active, abort fbist_run_set\n", ++ cxl_memdev_get_devname(memdev)); ++ return -EBUSY; ++ } ++ ++ return cxl_memdev_fbist_run_set(memdev, fbist_run_set_params.fbist_id, ++ fbist_run_set_params.txg0_run, fbist_run_set_params.txg1_run); ++} ++ ++static int action_cmd_fbist_run_get(struct cxl_memdev *memdev, struct action_context *actx) ++{ ++ if (cxl_memdev_is_active(memdev)) { ++ fprintf(stderr, "%s: memdev active, abort fbist_run_get\n", ++ cxl_memdev_get_devname(memdev)); ++ return -EBUSY; ++ } ++ ++ return cxl_memdev_fbist_run_get(memdev, fbist_run_get_params.fbist_id); ++} ++ ++static int action_cmd_fbist_xfer_rem_cnt_get(struct cxl_memdev *memdev, struct action_context *actx) ++{ ++ if (cxl_memdev_is_active(memdev)) { ++ fprintf(stderr, "%s: memdev active, abort fbist_xfer_rem_cnt_get\n", ++ cxl_memdev_get_devname(memdev)); ++ return -EBUSY; ++ } ++ ++ return cxl_memdev_fbist_xfer_rem_cnt_get(memdev, fbist_xfer_rem_cnt_get_params.fbist_id, ++ fbist_xfer_rem_cnt_get_params.thread_nr); ++} ++ ++static int action_cmd_fbist_last_exp_read_data_get(struct cxl_memdev *memdev, struct action_context *actx) ++{ ++ if (cxl_memdev_is_active(memdev)) { ++ fprintf(stderr, "%s: memdev active, abort fbist_last_exp_read_data_get\n", ++ cxl_memdev_get_devname(memdev)); ++ return -EBUSY; ++ } ++ ++ return cxl_memdev_fbist_last_exp_read_data_get(memdev, fbist_last_exp_read_data_get_params.fbist_id); ++} ++ ++static int action_cmd_fbist_curr_cycle_cnt_get(struct cxl_memdev *memdev, struct action_context *actx) ++{ ++ if (cxl_memdev_is_active(memdev)) { ++ fprintf(stderr, "%s: memdev active, abort fbist_curr_cycle_cnt_get\n", ++ cxl_memdev_get_devname(memdev)); ++ return -EBUSY; ++ } ++ ++ return cxl_memdev_fbist_curr_cycle_cnt_get(memdev, fbist_curr_cycle_cnt_get_params.fbist_id, ++ fbist_curr_cycle_cnt_get_params.txg_nr); ++} ++ ++static int action_cmd_fbist_thread_status_get(struct cxl_memdev *memdev, struct action_context *actx) ++{ ++ if (cxl_memdev_is_active(memdev)) { ++ fprintf(stderr, "%s: memdev active, abort fbist_thread_status_get\n", ++ cxl_memdev_get_devname(memdev)); ++ return -EBUSY; ++ } ++ ++ return cxl_memdev_fbist_thread_status_get(memdev, fbist_thread_status_get_params.fbist_id, ++ fbist_thread_status_get_params.txg_nr, fbist_thread_status_get_params.thread_nr); ++} ++ ++static int action_cmd_fbist_thread_trans_cnt_get(struct cxl_memdev *memdev, struct action_context *actx) ++{ ++ if (cxl_memdev_is_active(memdev)) { ++ fprintf(stderr, "%s: memdev active, abort fbist_thread_trans_cnt_get\n", ++ cxl_memdev_get_devname(memdev)); ++ return -EBUSY; ++ } ++ ++ return cxl_memdev_fbist_thread_trans_cnt_get(memdev, fbist_thread_trans_cnt_get_params.fbist_id, ++ fbist_thread_trans_cnt_get_params.txg_nr, fbist_thread_trans_cnt_get_params.thread_nr); ++} ++ ++static int action_cmd_fbist_thread_bandwidth_get(struct cxl_memdev *memdev, struct action_context *actx) ++{ ++ if (cxl_memdev_is_active(memdev)) { ++ fprintf(stderr, "%s: memdev active, abort fbist_thread_bandwidth_get\n", ++ cxl_memdev_get_devname(memdev)); ++ return -EBUSY; ++ } ++ ++ return cxl_memdev_fbist_thread_bandwidth_get(memdev, fbist_thread_bandwidth_get_params.fbist_id, ++ fbist_thread_bandwidth_get_params.txg_nr, fbist_thread_bandwidth_get_params.thread_nr); ++} ++ ++static int action_cmd_fbist_thread_latency_get(struct cxl_memdev *memdev, struct action_context *actx) ++{ ++ if (cxl_memdev_is_active(memdev)) { ++ fprintf(stderr, "%s: memdev active, abort fbist_thread_latency_get\n", ++ cxl_memdev_get_devname(memdev)); ++ return -EBUSY; ++ } ++ ++ return cxl_memdev_fbist_thread_latency_get(memdev, fbist_thread_latency_get_params.fbist_id, ++ fbist_thread_latency_get_params.txg_nr, fbist_thread_latency_get_params.thread_nr); ++} ++ ++static int action_cmd_fbist_thread_perf_mon_set(struct cxl_memdev *memdev, struct action_context *actx) ++{ ++ if (cxl_memdev_is_active(memdev)) { ++ fprintf(stderr, "%s: memdev active, abort fbist_thread_perf_mon_set\n", ++ cxl_memdev_get_devname(memdev)); ++ return -EBUSY; ++ } ++ ++ return cxl_memdev_fbist_thread_perf_mon_set(memdev, fbist_thread_perf_mon_set_params.fbist_id, ++ fbist_thread_perf_mon_set_params.txg_nr, fbist_thread_perf_mon_set_params.thread_nr, ++ fbist_thread_perf_mon_set_params.pmon_preset_en, fbist_thread_perf_mon_set_params.pmon_clear_en, ++ fbist_thread_perf_mon_set_params.pmon_rollover, fbist_thread_perf_mon_set_params.pmon_thread_lclk); ++} ++ ++static int action_cmd_fbist_top_read_status0_get(struct cxl_memdev *memdev, struct action_context *actx) ++{ ++ if (cxl_memdev_is_active(memdev)) { ++ fprintf(stderr, "%s: memdev active, abort fbist_top_read_status0_get\n", ++ cxl_memdev_get_devname(memdev)); ++ return -EBUSY; ++ } ++ ++ return cxl_memdev_fbist_top_read_status0_get(memdev, fbist_top_read_status0_get_params.fbist_id); ++} ++ ++static int action_cmd_fbist_top_err_cnt_get(struct cxl_memdev *memdev, struct action_context *actx) ++{ ++ if (cxl_memdev_is_active(memdev)) { ++ fprintf(stderr, "%s: memdev active, abort fbist_top_err_cnt_get\n", ++ cxl_memdev_get_devname(memdev)); ++ return -EBUSY; ++ } ++ ++ return cxl_memdev_fbist_top_err_cnt_get(memdev, fbist_top_err_cnt_get_params.fbist_id); ++} ++ ++static int action_cmd_fbist_last_read_addr_get(struct cxl_memdev *memdev, struct action_context *actx) ++{ ++ if (cxl_memdev_is_active(memdev)) { ++ fprintf(stderr, "%s: memdev active, abort fbist_last_read_addr_get\n", ++ cxl_memdev_get_devname(memdev)); ++ return -EBUSY; ++ } ++ ++ return cxl_memdev_fbist_last_read_addr_get(memdev, fbist_last_read_addr_get_params.fbist_id); ++} ++ ++static int action_cmd_fbist_test_simpledata(struct cxl_memdev *memdev, struct action_context *actx) ++{ ++ if (cxl_memdev_is_active(memdev)) { ++ fprintf(stderr, "%s: memdev active, abort fbist_test_simpledata\n", ++ cxl_memdev_get_devname(memdev)); ++ return -EBUSY; ++ } ++ ++ return cxl_memdev_fbist_test_simpledata(memdev, fbist_test_simpledata_params.fbist_id, ++ fbist_test_simpledata_params.test_nr, fbist_test_simpledata_params.start_address, ++ fbist_test_simpledata_params.num_bytes); ++} ++ ++static int action_cmd_fbist_test_addresstest(struct cxl_memdev *memdev, struct action_context *actx) ++{ ++ if (cxl_memdev_is_active(memdev)) { ++ fprintf(stderr, "%s: memdev active, abort fbist_test_addresstest\n", ++ cxl_memdev_get_devname(memdev)); ++ return -EBUSY; ++ } ++ ++ return cxl_memdev_fbist_test_addresstest(memdev, fbist_test_addresstest_params.fbist_id, ++ fbist_test_addresstest_params.test_nr, fbist_test_addresstest_params.start_address, ++ fbist_test_addresstest_params.num_bytes, fbist_test_addresstest_params.seed); ++} ++ ++static int action_cmd_fbist_test_movinginversion(struct cxl_memdev *memdev, struct action_context *actx) ++{ ++ if (cxl_memdev_is_active(memdev)) { ++ fprintf(stderr, "%s: memdev active, abort fbist_test_movinginversion\n", ++ cxl_memdev_get_devname(memdev)); ++ return -EBUSY; ++ } ++ ++ return cxl_memdev_fbist_test_movinginversion(memdev, fbist_test_movinginversion_params.fbist_id, ++ fbist_test_movinginversion_params.test_nr, fbist_test_movinginversion_params.phase_nr, ++ fbist_test_movinginversion_params.start_address, fbist_test_movinginversion_params.num_bytes, ++ fbist_test_movinginversion_params.ddrpage_size); ++} ++ ++static int action_cmd_fbist_test_randomsequence(struct cxl_memdev *memdev, struct action_context *actx) ++{ ++ if (cxl_memdev_is_active(memdev)) { ++ fprintf(stderr, "%s: memdev active, abort fbist_test_randomsequence\n", ++ cxl_memdev_get_devname(memdev)); ++ return -EBUSY; ++ } ++ ++ return cxl_memdev_fbist_test_randomsequence(memdev, fbist_test_randomsequence_params.fbist_id, ++ fbist_test_randomsequence_params.phase_nr, fbist_test_randomsequence_params.start_address, ++ fbist_test_randomsequence_params.num_bytes, fbist_test_randomsequence_params.ddrpage_size, ++ fbist_test_randomsequence_params.seed_dr0, fbist_test_randomsequence_params.seed_dr1); ++} ++ ++ ++static int action_cmd_conf_read(struct cxl_memdev *memdev, struct action_context *actx) ++{ ++ if (cxl_memdev_is_active(memdev)) { ++ fprintf(stderr, "%s: memdev active, abort conf_read\n", ++ cxl_memdev_get_devname(memdev)); ++ return -EBUSY; ++ } ++ ++ return cxl_memdev_conf_read(memdev, conf_read_params.offset, conf_read_params.length); ++} ++ ++static int action_zero(struct cxl_memdev *memdev, struct action_context *actx) ++{ ++ int rc; ++ ++ if (cxl_memdev_is_active(memdev)) { ++ fprintf(stderr, "%s: memdev active, abort label write\n", ++ cxl_memdev_get_devname(memdev)); ++ return -EBUSY; ++ } ++ ++ rc = cxl_memdev_zero_lsa(memdev); ++ if (rc < 0) ++ fprintf(stderr, "%s: label zeroing failed: %s\n", ++ cxl_memdev_get_devname(memdev), strerror(-rc)); ++ ++ return rc; ++} ++ ++static int action_cmd_hct_get_config(struct cxl_memdev *memdev, struct action_context *actx) ++{ ++ if (cxl_memdev_is_active(memdev)) { ++ fprintf(stderr, "%s: memdev active, abort hct_get_config\n", ++ cxl_memdev_get_devname(memdev)); ++ return -EBUSY; ++ } ++ ++ return cxl_memdev_hct_get_config(memdev, hct_get_config_params.hct_inst); ++} ++ ++static int action_cmd_hct_read_buffer(struct cxl_memdev *memdev, struct action_context *actx) ++{ ++ if (cxl_memdev_is_active(memdev)) { ++ fprintf(stderr, "%s: memdev active, abort hct_read_buffer\n", ++ cxl_memdev_get_devname(memdev)); ++ return -EBUSY; ++ } ++ ++ return cxl_memdev_hct_read_buffer(memdev, hct_read_buffer_params.hct_inst, ++ hct_read_buffer_params.num_entries_to_read); ++} ++ ++static int action_cmd_hct_set_config(struct cxl_memdev *memdev, struct action_context *actx) ++{ ++ struct stat filestat; ++ int filesize; ++ FILE *trig_config; ++ int fd; ++ int rc; ++ u8 *trig_config_buffer; ++ int conf_read; ++ ++ trig_config = fopen(hct_set_config_params.trig_config_file, "rb"); ++ if (trig_config == NULL) { ++ fprintf(stderr, "Error: File open returned %s\nCould not open file %s\n", ++ strerror(errno), hct_set_config_params.trig_config_file); ++ return -ENOENT; ++ } ++ ++ printf("Trigger Config filepath: %s\n", hct_set_config_params.trig_config_file); ++ fd = fileno(trig_config); ++ rc = fstat(fd, &filestat); ++ ++ if (rc != 0) { ++ fprintf(stderr, "Could not read filesize"); ++ fclose(trig_config); ++ return 1; ++ } ++ ++ if (cxl_memdev_is_active(memdev)) { ++ fprintf(stderr, "%s: memdev active, abort hct_set_config\n", ++ cxl_memdev_get_devname(memdev)); ++ return -EBUSY; ++ } ++ ++ filesize = filestat.st_size; ++ ++ trig_config_buffer = (u8*) malloc(filesize); ++ conf_read = fread(trig_config_buffer, 1, filesize, trig_config); ++ if (conf_read != filesize){ ++ fprintf(stderr, "Expected size: %d\nRead size: %d\n", filesize, conf_read); ++ free(trig_config_buffer); ++ fclose(trig_config); ++ return -ENOENT; ++ } ++ printf("Expected size: %d\nRead size: %d\n", filesize, conf_read); ++ ++ return cxl_memdev_hct_set_config(memdev, hct_set_config_params.hct_inst, ++ hct_set_config_params.config_flags, hct_set_config_params.post_trig_depth, ++ hct_set_config_params.ignore_valid, filesize, trig_config_buffer); ++} ++ ++static int action_cmd_osa_os_patt_trig_cfg(struct cxl_memdev *memdev, struct action_context *actx) ++{ ++ u32 pattern_val; ++ u32 pattern_mask; ++ pattern_val = (u32) osa_os_patt_trig_cfg_params.patt_val; ++ pattern_mask = (u32) osa_os_patt_trig_cfg_params.patt_mask; ++ if (cxl_memdev_is_active(memdev)) { ++ fprintf(stderr, "%s: memdev active, abort osa_os_patt_trig_cfg\n", ++ cxl_memdev_get_devname(memdev)); ++ return -EBUSY; ++ } ++ ++ return cxl_memdev_osa_os_patt_trig_cfg(memdev, osa_os_patt_trig_cfg_params.cxl_mem_id, ++ osa_os_patt_trig_cfg_params.lane_mask, osa_os_patt_trig_cfg_params.lane_dir_mask, ++ osa_os_patt_trig_cfg_params.rate_mask, &pattern_val, &pattern_mask); ++} ++ ++static int action_cmd_osa_misc_trig_cfg(struct cxl_memdev *memdev, struct action_context *actx) ++{ ++ if (cxl_memdev_is_active(memdev)) { ++ fprintf(stderr, "%s: memdev active, abort osa_misc_trig_cfg\n", ++ cxl_memdev_get_devname(memdev)); ++ return -EBUSY; ++ } ++ ++ return cxl_memdev_osa_misc_trig_cfg(memdev, osa_misc_trig_cfg_params.cxl_mem_id, ++ osa_misc_trig_cfg_params.trig_en_mask); ++} ++ ++static int action_cmd_osa_data_read(struct cxl_memdev *memdev, struct action_context *actx) ++{ ++ if (cxl_memdev_is_active(memdev)) { ++ fprintf(stderr, "%s: memdev active, abort osa_data_read\n", ++ cxl_memdev_get_devname(memdev)); ++ return -EBUSY; ++ } ++ ++ return cxl_memdev_osa_data_read(memdev, osa_data_read_params.cxl_mem_id, ++ osa_data_read_params.lane_id, osa_data_read_params.lane_dir, osa_data_read_params.start_entry, ++ osa_data_read_params.num_entries); ++} ++ ++static int action_cmd_dimm_spd_read(struct cxl_memdev *memdev, struct action_context *actx) ++{ ++ if (cxl_memdev_is_active(memdev)) { ++ fprintf(stderr, "%s: memdev active, abort dimm_spd_read\n", ++ cxl_memdev_get_devname(memdev)); ++ return -EBUSY; ++ } ++ ++ return cxl_memdev_dimm_spd_read(memdev, dimm_spd_read_params.spd_id, ++ dimm_spd_read_params.offset, dimm_spd_read_params.num_bytes); ++} ++ ++static int action_cmd_ddr_training_status(struct cxl_memdev *memdev, struct action_context *actx) ++{ ++ if (cxl_memdev_is_active(memdev)) { ++ fprintf(stderr, "%s: memdev active, ddr_training_status\n", ++ cxl_memdev_get_devname(memdev)); ++ return -EBUSY; ++ } ++ ++ return cxl_memdev_ddr_training_status(memdev); ++} ++ ++static int action_write(struct cxl_memdev *memdev, struct action_context *actx) ++{ ++ size_t size = param.len, read_len; ++ unsigned char *buf; ++ int rc; ++ ++ if (cxl_memdev_is_active(memdev)) { ++ fprintf(stderr, "%s is active, abort label write\n", ++ cxl_memdev_get_devname(memdev)); ++ return -EBUSY; ++ } ++ ++ if (!size) { ++ size_t lsa_size = cxl_memdev_get_lsa_size(memdev); ++ ++ fseek(actx->f_in, 0L, SEEK_END); ++ size = ftell(actx->f_in); ++ fseek(actx->f_in, 0L, SEEK_SET); ++ ++ if (size > lsa_size) { ++ fprintf(stderr, ++ "File size (%zu) greater than LSA size (%zu), aborting\n", ++ size, lsa_size); ++ return -EINVAL; ++ } ++ } ++ ++ buf = calloc(1, size); ++ if (!buf) ++ return -ENOMEM; ++ ++ read_len = fread(buf, 1, size, actx->f_in); ++ if (read_len != size) { ++ rc = -ENXIO; ++ goto out; ++ } ++ ++ rc = cxl_memdev_set_lsa(memdev, buf, size, param.offset); ++ if (rc < 0) ++ fprintf(stderr, "%s: label write failed: %s\n", ++ cxl_memdev_get_devname(memdev), strerror(-rc)); ++ ++out: ++ free(buf); ++ return rc; ++} ++ ++static int action_read(struct cxl_memdev *memdev, struct action_context *actx) ++{ ++ size_t size = param.len, write_len; ++ char *buf; ++ int rc; ++ ++ if (!size) ++ size = cxl_memdev_get_lsa_size(memdev); ++ ++ buf = calloc(1, size); ++ if (!buf) ++ return -ENOMEM; ++ ++ rc = cxl_memdev_get_lsa(memdev, buf, size, param.offset); ++ if (rc < 0) { ++ fprintf(stderr, "%s: label read failed: %s\n", ++ cxl_memdev_get_devname(memdev), strerror(-rc)); ++ goto out; ++ } ++ ++ write_len = fwrite(buf, 1, size, actx->f_out); ++ if (write_len != size) { ++ rc = -ENXIO; ++ goto out; ++ } ++ fflush(actx->f_out); ++ ++out: ++ free(buf); ++ return rc; ++} ++ ++static int memdev_action(int argc, const char **argv, struct cxl_ctx *ctx, ++ int (*action)(struct cxl_memdev *memdev, struct action_context *actx), ++ const struct option *options, const char *usage) ++{ ++ struct cxl_memdev *memdev, *single = NULL; ++ struct action_context actx = { 0 }; ++ int i, rc = 0, count = 0, err = 0; ++ const char * const u[] = { ++ usage, ++ NULL ++ }; ++ unsigned long id; ++ ++ argc = parse_options(argc, argv, options, u, 0); ++ ++ if (argc == 0) ++ usage_with_options(u, options); ++ for (i = 0; i < argc; i++) { ++ if (strcmp(argv[i], "all") == 0) { ++ argv[0] = "all"; ++ argc = 1; ++ break; ++ } ++ ++ if (sscanf(argv[i], "mem%lu", &id) != 1) { ++ fprintf(stderr, "'%s' is not a valid memdev name\n", ++ argv[i]); ++ err++; ++ } ++ } ++ ++ if (err == argc) { ++ usage_with_options(u, options); ++ return -EINVAL; ++ } ++ ++ if (!param.outfile) ++ actx.f_out = stdout; ++ else { ++ actx.f_out = fopen(param.outfile, "w+"); ++ if (!actx.f_out) { ++ fprintf(stderr, "failed to open: %s: (%s)\n", ++ param.outfile, strerror(errno)); ++ rc = -errno; ++ goto out; ++ } ++ } ++ ++ if (!param.infile) { ++ actx.f_in = stdin; ++ } else { ++ actx.f_in = fopen(param.infile, "r"); ++ if (!actx.f_in) { ++ fprintf(stderr, "failed to open: %s: (%s)\n", ++ param.infile, strerror(errno)); ++ rc = -errno; ++ goto out_close_fout; ++ } ++ } ++ ++ if (param.verbose){ ++ cxl_set_log_priority(ctx, LOG_DEBUG); ++ } ++ rc = 0; ++ err = 0; ++ count = 0; ++ ++ for (i = 0; i < argc; i++) { ++ if (sscanf(argv[i], "mem%lu", &id) != 1 ++ && strcmp(argv[i], "all") != 0) ++ continue; ++ ++ cxl_memdev_foreach (ctx, memdev) { ++ if (!util_cxl_memdev_filter(memdev, argv[i])) ++ continue; ++ ++ if (action == action_write) { ++ single = memdev; ++ rc = 0; ++ } else ++ rc = action(memdev, &actx); ++ ++ if (rc == 0) ++ count++; ++ else if (rc && !err) ++ err = rc; ++ } ++ } ++ rc = err; ++ ++ if (action == action_write) { ++ if (count > 1) { ++ error("write-labels only supports writing a single memdev\n"); ++ usage_with_options(u, options); ++ return -EINVAL; ++ } else if (single) { ++ rc = action(single, &actx); ++ if (rc) ++ count = 0; ++ } ++ } ++ ++ if (actx.f_in != stdin) ++ fclose(actx.f_in); ++ ++ out_close_fout: ++ if (actx.f_out != stdout) ++ fclose(actx.f_out); ++ ++ out: ++ /* ++ * count if some actions succeeded, 0 if none were attempted, ++ * negative error code otherwise. ++ */ ++ if (count > 0) ++ return count; ++ return rc; ++} ++ ++int cmd_write_labels(int argc, const char **argv, struct cxl_ctx *ctx) ++{ ++ int count = memdev_action(argc, argv, ctx, action_write, write_options, ++ "cxl write-labels [-i ]"); ++ ++ fprintf(stderr, "wrote %d mem%s\n", count >= 0 ? count : 0, ++ count > 1 ? "s" : ""); ++ return count >= 0 ? 0 : EXIT_FAILURE; ++} ++ ++int cmd_read_labels(int argc, const char **argv, struct cxl_ctx *ctx) ++{ ++ int count = memdev_action(argc, argv, ctx, action_read, read_options, ++ "cxl read-labels [..] [-o ]"); ++ ++ fprintf(stderr, "read %d mem%s\n", count >= 0 ? count : 0, ++ count > 1 ? "s" : ""); ++ return count >= 0 ? 0 : EXIT_FAILURE; ++} ++ ++int cmd_zero_labels(int argc, const char **argv, struct cxl_ctx *ctx) ++{ ++ int count = memdev_action(argc, argv, ctx, action_zero, zero_options, ++ "cxl zero-labels [..] []"); ++ ++ fprintf(stderr, "zeroed %d mem%s\n", count >= 0 ? count : 0, ++ count > 1 ? "s" : ""); ++ return count >= 0 ? 0 : EXIT_FAILURE; ++} ++ ++int cmd_identify(int argc, const char **argv, struct cxl_ctx *ctx) ++{ ++ int rc = memdev_action(argc, argv, ctx, action_cmd_identify, cmd_identify_options, ++ "cxl id-cmd [..] []"); ++ ++ return rc >= 0 ? 0 : EXIT_FAILURE; ++} ++ ++int cmd_get_supported_logs(int argc, const char **argv, struct cxl_ctx *ctx) ++{ ++ int rc = memdev_action(argc, argv, ctx, action_cmd_get_supported_logs, cmd_get_supported_logs_options, ++ "cxl get-supported-logs [..] []"); ++ ++ return rc >= 0 ? 0 : EXIT_FAILURE; ++} ++ ++int cmd_get_cel_log(int argc, const char **argv, struct cxl_ctx *ctx) ++{ ++ int rc = memdev_action(argc, argv, ctx, action_cmd_get_cel_log, cmd_get_cel_log_options, ++ "cxl get-cel-log [..] []"); ++ ++ return rc >= 0 ? 0 : EXIT_FAILURE; ++} ++ ++int cmd_get_event_interrupt_policy(int argc, const char **argv, struct cxl_ctx *ctx) ++{ ++ int rc = memdev_action(argc, argv, ctx, action_cmd_get_event_interrupt_policy, cmd_get_event_interrupt_policy_options, ++ "cxl get-event-interrupt-policy [..] []"); ++ ++ return rc >= 0 ? 0 : EXIT_FAILURE; ++} ++ ++int cmd_set_event_interrupt_policy(int argc, const char **argv, struct cxl_ctx *ctx) ++{ ++ int rc = memdev_action(argc, argv, ctx, action_cmd_set_event_interrupt_policy, cmd_set_event_interrupt_policy_options, ++ "cxl set-event-interrupt-policy [..] []"); ++ ++ return rc >= 0 ? 0 : EXIT_FAILURE; ++} ++ ++int cmd_get_timestamp(int argc, const char **argv, struct cxl_ctx *ctx) ++{ ++ int rc = memdev_action(argc, argv, ctx, action_cmd_get_timestamp, cmd_get_timestamp_options, ++ "cxl get-timestamp [..] []"); ++ ++ return rc >= 0 ? 0 : EXIT_FAILURE; ++} ++ ++int cmd_device_info_get(int argc, const char **argv, struct cxl_ctx *ctx) ++{ ++ int rc = memdev_action(argc, argv, ctx, action_cmd_device_info_get, cmd_device_info_get_options, ++ "cxl device_info_get [..] []"); ++ ++ return rc >= 0 ? 0 : EXIT_FAILURE; ++} ++ ++int cmd_get_fw_info(int argc, const char **argv, struct cxl_ctx *ctx) ++{ ++ int rc = memdev_action(argc, argv, ctx, action_cmd_get_fw_info, cmd_get_fw_info_options, ++ "cxl get_fw_info [..] []"); ++ ++ return rc >= 0 ? 0 : EXIT_FAILURE; ++} ++ ++int cmd_activate_fw(int argc, const char **argv, struct cxl_ctx *ctx) ++{ ++ int rc = memdev_action(argc, argv, ctx, action_cmd_activate_fw, cmd_activate_fw_options, ++ "cxl activate_fw [..] []"); ++ ++ return rc >= 0 ? 0 : EXIT_FAILURE; ++} ++ ++int cmd_set_timestamp(int argc, const char **argv, struct cxl_ctx *ctx) ++{ ++ int rc = memdev_action(argc, argv, ctx, action_cmd_set_timestamp, cmd_set_timestamp_options, ++ "cxl set-timestamp [..] []"); ++ ++ return rc >= 0 ? 0 : EXIT_FAILURE; ++} ++ ++int cmd_get_alert_config(int argc, const char **argv, struct cxl_ctx *ctx) ++{ ++ int rc = memdev_action(argc, argv, ctx, action_cmd_get_alert_config, cmd_get_alert_config_options, ++ "cxl get-alert-config [..] []"); ++ ++ return rc >= 0 ? 0 : EXIT_FAILURE; ++} ++ ++int cmd_update_fw(int argc, const char **argv, struct cxl_ctx *ctx) ++{ ++ int rc = memdev_action(argc, argv, ctx, action_cmd_update_fw, cmd_update_fw_options, ++ "cxl update-fw [..] []"); ++ ++ return rc >= 0 ? 0 : EXIT_FAILURE; ++} ++ ++int cmd_set_alert_config(int argc, const char **argv, struct cxl_ctx *ctx) ++{ ++ int rc = memdev_action(argc, argv, ctx, action_cmd_set_alert_config, cmd_set_alert_config_options, ++ "cxl set-alert-config [..] []"); ++ ++ return rc >= 0 ? 0 : EXIT_FAILURE; ++} ++ ++int cmd_get_health_info(int argc, const char **argv, struct cxl_ctx *ctx) ++{ ++ int rc = memdev_action(argc, argv, ctx, action_cmd_get_health_info, cmd_get_health_info_options, ++ "cxl get-health-info [..] []"); ++ ++ return rc >= 0 ? 0 : EXIT_FAILURE; ++} ++ ++int cmd_get_event_records(int argc, const char **argv, struct cxl_ctx *ctx) ++{ ++ int rc = memdev_action(argc, argv, ctx, action_cmd_get_event_records, cmd_get_event_records_options, ++ "cxl get-event-records [..] []"); ++ ++ return rc >= 0 ? 0 : EXIT_FAILURE; ++} ++ ++int cmd_get_ld_info(int argc, const char **argv, struct cxl_ctx *ctx) ++{ ++ int rc = memdev_action(argc, argv, ctx, action_cmd_get_ld_info, cmd_get_ld_info_options, ++ "cxl get-ld-info [..] []"); ++ ++ return rc >= 0 ? 0 : EXIT_FAILURE; ++} ++ ++int cmd_ddr_info(int argc, const char **argv, struct cxl_ctx *ctx) ++{ ++ int rc = memdev_action(argc, argv, ctx, action_cmd_ddr_info, cmd_ddr_info_options, ++ "cxl ddr-info [..] [-i ]"); ++ ++ return rc >= 0 ? 0 : EXIT_FAILURE; ++} ++ ++int cmd_clear_event_records(int argc, const char **argv, struct cxl_ctx *ctx) ++{ ++ int rc = memdev_action(argc, argv, ctx, action_cmd_clear_event_records, cmd_clear_event_records_options, ++ "cxl clear-event-records [..] []"); ++ ++ return rc >= 0 ? 0 : EXIT_FAILURE; ++} ++ ++int cmd_hct_start_stop_trigger(int argc, const char **argv, struct cxl_ctx *ctx) ++{ ++ int rc = memdev_action(argc, argv, ctx, action_cmd_hct_start_stop_trigger, cmd_hct_start_stop_trigger_options, ++ "cxl hct_start_stop_trigger [..] []"); ++ ++ return rc >= 0 ? 0 : EXIT_FAILURE; ++} ++ ++int cmd_hct_get_buffer_status(int argc, const char **argv, struct cxl_ctx *ctx) ++{ ++ int rc = memdev_action(argc, argv, ctx, action_cmd_hct_get_buffer_status, cmd_hct_get_buffer_status_options, ++ "cxl hct_get_buffer_status [..] []"); ++ ++ return rc >= 0 ? 0 : EXIT_FAILURE; ++} ++ ++int cmd_hct_enable(int argc, const char **argv, struct cxl_ctx *ctx) ++{ ++ int rc = memdev_action(argc, argv, ctx, action_cmd_hct_enable, cmd_hct_enable_options, ++ "cxl hct_enable [..] []"); ++ ++ return rc >= 0 ? 0 : EXIT_FAILURE; ++} ++ ++int cmd_ltmon_capture_clear(int argc, const char **argv, struct cxl_ctx *ctx) ++{ ++ int rc = memdev_action(argc, argv, ctx, action_cmd_ltmon_capture_clear, cmd_ltmon_capture_clear_options, ++ "cxl ltmon_capture_clear [..] []"); ++ ++ return rc >= 0 ? 0 : EXIT_FAILURE; ++} ++ ++int cmd_ltmon_capture(int argc, const char **argv, struct cxl_ctx *ctx) ++{ ++ int rc = memdev_action(argc, argv, ctx, action_cmd_ltmon_capture, cmd_ltmon_capture_options, ++ "cxl ltmon_capture [..] []"); ++ ++ return rc >= 0 ? 0 : EXIT_FAILURE; ++} ++ ++int cmd_ltmon_capture_freeze_and_restore(int argc, const char **argv, struct cxl_ctx *ctx) ++{ ++ int rc = memdev_action(argc, argv, ctx, action_cmd_ltmon_capture_freeze_and_restore, cmd_ltmon_capture_freeze_and_restore_options, ++ "cxl ltmon_capture_freeze_and_restore [..] []"); ++ ++ return rc >= 0 ? 0 : EXIT_FAILURE; ++} ++ ++int cmd_ltmon_l2r_count_dump(int argc, const char **argv, struct cxl_ctx *ctx) ++{ ++ int rc = memdev_action(argc, argv, ctx, action_cmd_ltmon_l2r_count_dump, cmd_ltmon_l2r_count_dump_options, ++ "cxl ltmon_l2r_count_dump [..] []"); ++ ++ return rc >= 0 ? 0 : EXIT_FAILURE; ++} ++ ++int cmd_ltmon_l2r_count_clear(int argc, const char **argv, struct cxl_ctx *ctx) ++{ ++ int rc = memdev_action(argc, argv, ctx, action_cmd_ltmon_l2r_count_clear, cmd_ltmon_l2r_count_clear_options, ++ "cxl ltmon_l2r_count_clear [..] []"); ++ ++ return rc >= 0 ? 0 : EXIT_FAILURE; ++} ++ ++int cmd_ltmon_basic_cfg(int argc, const char **argv, struct cxl_ctx *ctx) ++{ ++ int rc = memdev_action(argc, argv, ctx, action_cmd_ltmon_basic_cfg, cmd_ltmon_basic_cfg_options, ++ "cxl ltmon_basic_cfg [..] []"); ++ ++ return rc >= 0 ? 0 : EXIT_FAILURE; ++} ++ ++int cmd_ltmon_watch(int argc, const char **argv, struct cxl_ctx *ctx) ++{ ++ int rc = memdev_action(argc, argv, ctx, action_cmd_ltmon_watch, cmd_ltmon_watch_options, ++ "cxl ltmon_watch [..] []"); ++ ++ return rc >= 0 ? 0 : EXIT_FAILURE; ++} ++ ++int cmd_ltmon_capture_stat(int argc, const char **argv, struct cxl_ctx *ctx) ++{ ++ int rc = memdev_action(argc, argv, ctx, action_cmd_ltmon_capture_stat, cmd_ltmon_capture_stat_options, ++ "cxl ltmon_capture_stat [..] []"); ++ ++ return rc >= 0 ? 0 : EXIT_FAILURE; ++} ++ ++int cmd_ltmon_capture_log_dmp(int argc, const char **argv, struct cxl_ctx *ctx) ++{ ++ int rc = memdev_action(argc, argv, ctx, action_cmd_ltmon_capture_log_dmp, cmd_ltmon_capture_log_dmp_options, ++ "cxl ltmon_capture_log_dmp [..] []"); ++ ++ return rc >= 0 ? 0 : EXIT_FAILURE; ++} ++ ++int cmd_ltmon_capture_trigger(int argc, const char **argv, struct cxl_ctx *ctx) ++{ ++ int rc = memdev_action(argc, argv, ctx, action_cmd_ltmon_capture_trigger, cmd_ltmon_capture_trigger_options, ++ "cxl ltmon_capture_trigger [..] []"); ++ ++ return rc >= 0 ? 0 : EXIT_FAILURE; ++} ++ ++int cmd_ltmon_enable(int argc, const char **argv, struct cxl_ctx *ctx) ++{ ++ int rc = memdev_action(argc, argv, ctx, action_cmd_ltmon_enable, cmd_ltmon_enable_options, ++ "cxl ltmon_enable [..] []"); ++ ++ return rc >= 0 ? 0 : EXIT_FAILURE; ++} ++ ++int cmd_osa_os_type_trig_cfg(int argc, const char **argv, struct cxl_ctx *ctx) ++{ ++ int rc = memdev_action(argc, argv, ctx, action_cmd_osa_os_type_trig_cfg, cmd_osa_os_type_trig_cfg_options, ++ "cxl osa_os_type_trig_cfg [..] []"); ++ ++ return rc >= 0 ? 0 : EXIT_FAILURE; ++} ++ ++int cmd_osa_cap_ctrl(int argc, const char **argv, struct cxl_ctx *ctx) ++{ ++ int rc = memdev_action(argc, argv, ctx, action_cmd_osa_cap_ctrl, cmd_osa_cap_ctrl_options, ++ "cxl osa_cap_ctrl [..] []"); ++ ++ return rc >= 0 ? 0 : EXIT_FAILURE; ++} ++ ++int cmd_osa_cfg_dump(int argc, const char **argv, struct cxl_ctx *ctx) ++{ ++ int rc = memdev_action(argc, argv, ctx, action_cmd_osa_cfg_dump, cmd_osa_cfg_dump_options, ++ "cxl osa_cfg_dump [..] []"); ++ ++ return rc >= 0 ? 0 : EXIT_FAILURE; ++} ++ ++int cmd_osa_ana_op(int argc, const char **argv, struct cxl_ctx *ctx) ++{ ++ int rc = memdev_action(argc, argv, ctx, action_cmd_osa_ana_op, cmd_osa_ana_op_options, ++ "cxl osa_ana_op [..] []"); ++ ++ return rc >= 0 ? 0 : EXIT_FAILURE; ++} ++ ++int cmd_osa_status_query(int argc, const char **argv, struct cxl_ctx *ctx) ++{ ++ int rc = memdev_action(argc, argv, ctx, action_cmd_osa_status_query, cmd_osa_status_query_options, ++ "cxl osa_status_query [..] []"); ++ ++ return rc >= 0 ? 0 : EXIT_FAILURE; ++} ++ ++int cmd_osa_access_rel(int argc, const char **argv, struct cxl_ctx *ctx) ++{ ++ int rc = memdev_action(argc, argv, ctx, action_cmd_osa_access_rel, cmd_osa_access_rel_options, ++ "cxl osa_access_rel [..] []"); ++ ++ return rc >= 0 ? 0 : EXIT_FAILURE; ++} ++ ++int cmd_perfcnt_mta_ltif_set(int argc, const char **argv, struct cxl_ctx *ctx) ++{ ++ int rc = memdev_action(argc, argv, ctx, action_cmd_perfcnt_mta_ltif_set, cmd_perfcnt_mta_ltif_set_options, ++ "cxl perfcnt_mta_ltif_set [..] []"); ++ ++ return rc >= 0 ? 0 : EXIT_FAILURE; ++} ++ ++int cmd_perfcnt_mta_get(int argc, const char **argv, struct cxl_ctx *ctx) ++{ ++ int rc = memdev_action(argc, argv, ctx, action_cmd_perfcnt_mta_get, cmd_perfcnt_mta_get_options, ++ "cxl perfcnt_mta_get [..] []"); ++ ++ return rc >= 0 ? 0 : EXIT_FAILURE; ++} ++ ++int cmd_perfcnt_mta_latch_val_get(int argc, const char **argv, struct cxl_ctx *ctx) ++{ ++ int rc = memdev_action(argc, argv, ctx, action_cmd_perfcnt_mta_latch_val_get, cmd_perfcnt_mta_latch_val_get_options, ++ "cxl perfcnt_mta_latch_val_get [..] []"); ++ ++ return rc >= 0 ? 0 : EXIT_FAILURE; ++} ++ ++int cmd_perfcnt_mta_counter_clear(int argc, const char **argv, struct cxl_ctx *ctx) ++{ ++ int rc = memdev_action(argc, argv, ctx, action_cmd_perfcnt_mta_counter_clear, cmd_perfcnt_mta_counter_clear_options, ++ "cxl perfcnt_mta_counter_clear [..] []"); ++ ++ return rc >= 0 ? 0 : EXIT_FAILURE; ++} ++ ++int cmd_perfcnt_mta_cnt_val_latch(int argc, const char **argv, struct cxl_ctx *ctx) ++{ ++ int rc = memdev_action(argc, argv, ctx, action_cmd_perfcnt_mta_cnt_val_latch, cmd_perfcnt_mta_cnt_val_latch_options, ++ "cxl perfcnt_mta_cnt_val_latch [..] []"); ++ ++ return rc >= 0 ? 0 : EXIT_FAILURE; ++} ++ ++int cmd_perfcnt_mta_hif_set(int argc, const char **argv, struct cxl_ctx *ctx) ++{ ++ int rc = memdev_action(argc, argv, ctx, action_cmd_perfcnt_mta_hif_set, cmd_perfcnt_mta_hif_set_options, ++ "cxl perfcnt_mta_hif_set [..] []"); ++ ++ return rc >= 0 ? 0 : EXIT_FAILURE; ++} ++ ++int cmd_perfcnt_mta_hif_cfg_get(int argc, const char **argv, struct cxl_ctx *ctx) ++{ ++ int rc = memdev_action(argc, argv, ctx, action_cmd_perfcnt_mta_hif_cfg_get, cmd_perfcnt_mta_hif_cfg_get_options, ++ "cxl perfcnt_mta_hif_cfg_get [..] []"); ++ ++ return rc >= 0 ? 0 : EXIT_FAILURE; ++} ++ ++int cmd_perfcnt_mta_hif_latch_val_get(int argc, const char **argv, struct cxl_ctx *ctx) ++{ ++ int rc = memdev_action(argc, argv, ctx, action_cmd_perfcnt_mta_hif_latch_val_get, cmd_perfcnt_mta_hif_latch_val_get_options, ++ "cxl perfcnt_mta_hif_latch_val_get [..] []"); ++ ++ return rc >= 0 ? 0 : EXIT_FAILURE; ++} ++ ++int cmd_perfcnt_mta_hif_counter_clear(int argc, const char **argv, struct cxl_ctx *ctx) ++{ ++ int rc = memdev_action(argc, argv, ctx, action_cmd_perfcnt_mta_hif_counter_clear, cmd_perfcnt_mta_hif_counter_clear_options, ++ "cxl perfcnt_mta_hif_counter_clear [..] []"); ++ ++ return rc >= 0 ? 0 : EXIT_FAILURE; ++} ++ ++int cmd_perfcnt_mta_hif_cnt_val_latch(int argc, const char **argv, struct cxl_ctx *ctx) ++{ ++ int rc = memdev_action(argc, argv, ctx, action_cmd_perfcnt_mta_hif_cnt_val_latch, cmd_perfcnt_mta_hif_cnt_val_latch_options, ++ "cxl perfcnt_mta_hif_cnt_val_latch [..] []"); ++ ++ return rc >= 0 ? 0 : EXIT_FAILURE; ++} ++ ++int cmd_perfcnt_ddr_generic_select(int argc, const char **argv, struct cxl_ctx *ctx) ++{ ++ int rc = memdev_action(argc, argv, ctx, action_cmd_perfcnt_ddr_generic_select, cmd_perfcnt_ddr_generic_select_options, ++ "cxl perfcnt_ddr_generic_select [..] []"); ++ ++ return rc >= 0 ? 0 : EXIT_FAILURE; ++} ++ ++int cmd_perfcnt_ddr_generic_capture(int argc, const char **argv, struct cxl_ctx *ctx) ++{ ++ int rc = memdev_action(argc, argv, ctx, action_cmd_perfcnt_ddr_generic_capture, cmd_perfcnt_ddr_generic_capture_options, ++ "cxl perfcnt_ddr_generic_capture [..] []"); ++ return rc >= 0 ? 0 : EXIT_FAILURE; ++} ++ ++int cmd_perfcnt_ddr_dfi_capture(int argc, const char **argv, struct cxl_ctx *ctx) ++{ ++ int rc = memdev_action(argc, argv, ctx, action_cmd_perfcnt_ddr_dfi_capture, cmd_perfcnt_ddr_dfi_capture_options, ++ "cxl perfcnt_ddr_dfi_capture [..] []"); ++ return rc >= 0 ? 0 : EXIT_FAILURE; ++} ++ ++int cmd_err_inj_drs_poison(int argc, const char **argv, struct cxl_ctx *ctx) ++{ ++ int rc = memdev_action(argc, argv, ctx, action_cmd_err_inj_drs_poison, cmd_err_inj_drs_poison_options, ++ "cxl err_inj_drs_poison [..] []"); ++ ++ return rc >= 0 ? 0 : EXIT_FAILURE; ++} ++ ++int cmd_err_inj_drs_ecc(int argc, const char **argv, struct cxl_ctx *ctx) ++{ ++ int rc = memdev_action(argc, argv, ctx, action_cmd_err_inj_drs_ecc, cmd_err_inj_drs_ecc_options, ++ "cxl err_inj_drs_ecc [..] []"); ++ ++ return rc >= 0 ? 0 : EXIT_FAILURE; ++} ++ ++int cmd_err_inj_rxflit_crc(int argc, const char **argv, struct cxl_ctx *ctx) ++{ ++ int rc = memdev_action(argc, argv, ctx, action_cmd_err_inj_rxflit_crc, cmd_err_inj_rxflit_crc_options, ++ "cxl err_inj_rxflit_crc [..] []"); ++ ++ return rc >= 0 ? 0 : EXIT_FAILURE; ++} ++ ++int cmd_err_inj_txflit_crc(int argc, const char **argv, struct cxl_ctx *ctx) ++{ ++ int rc = memdev_action(argc, argv, ctx, action_cmd_err_inj_txflit_crc, cmd_err_inj_txflit_crc_options, ++ "cxl err_inj_txflit_crc [..] []"); ++ ++ return rc >= 0 ? 0 : EXIT_FAILURE; ++} ++ ++int cmd_err_inj_viral(int argc, const char **argv, struct cxl_ctx *ctx) ++{ ++ int rc = memdev_action(argc, argv, ctx, action_cmd_err_inj_viral, cmd_err_inj_viral_options, ++ "cxl err_inj_viral [..] []"); ++ ++ return rc >= 0 ? 0 : EXIT_FAILURE; ++} ++ ++int cmd_eh_eye_cap_run(int argc, const char **argv, struct cxl_ctx *ctx) ++{ ++ int rc = memdev_action(argc, argv, ctx, action_cmd_eh_eye_cap_run, cmd_eh_eye_cap_run_options, ++ "cxl eh_eye_cap_run [..] []"); ++ ++ return rc >= 0 ? 0 : EXIT_FAILURE; ++} ++ ++int cmd_eh_eye_cap_read(int argc, const char **argv, struct cxl_ctx *ctx) ++{ ++ int rc = memdev_action(argc, argv, ctx, action_cmd_eh_eye_cap_read, cmd_eh_eye_cap_read_options, ++ "cxl eh_eye_cap_read [..] []"); ++ ++ return rc >= 0 ? 0 : EXIT_FAILURE; ++} ++ ++int cmd_eh_eye_cap_timeout_enable(int argc, const char **argv, struct cxl_ctx *ctx) ++{ ++ int rc = memdev_action(argc, argv, ctx, action_cmd_eh_eye_cap_timeout_enable, cmd_eh_eye_cap_timeout_enable_options, ++ "cxl eh-eye-cap-timeout-enable [..] []"); ++ ++ return rc >= 0 ? 0 : EXIT_FAILURE; ++} ++ ++int cmd_eh_eye_cap_status(int argc, const char **argv, struct cxl_ctx *ctx) ++{ ++ int rc = memdev_action(argc, argv, ctx, action_cmd_eh_eye_cap_status, cmd_eh_eye_cap_status_options, ++ "cxl eh-eye-cap-status [..] []"); ++ ++ return rc >= 0 ? 0 : EXIT_FAILURE; ++} ++ ++int cmd_eh_adapt_get(int argc, const char **argv, struct cxl_ctx *ctx) ++{ ++ int rc = memdev_action(argc, argv, ctx, action_cmd_eh_adapt_get, cmd_eh_adapt_get_options, ++ "cxl eh_adapt_get [..] []"); ++ ++ return rc >= 0 ? 0 : EXIT_FAILURE; ++} ++ ++int cmd_eh_adapt_oneoff(int argc, const char **argv, struct cxl_ctx *ctx) ++{ ++ int rc = memdev_action(argc, argv, ctx, action_cmd_eh_adapt_oneoff, cmd_eh_adapt_oneoff_options, ++ "cxl eh_adapt_oneoff [..] []"); ++ ++ return rc >= 0 ? 0 : EXIT_FAILURE; ++} ++ ++int cmd_eh_adapt_force(int argc, const char **argv, struct cxl_ctx *ctx) ++{ ++ int rc = memdev_action(argc, argv, ctx, action_cmd_eh_adapt_force, cmd_eh_adapt_force_options, ++ "cxl eh_adapt_force [..] []"); ++ ++ return rc >= 0 ? 0 : EXIT_FAILURE; ++} ++ ++int cmd_hbo_status(int argc, const char **argv, struct cxl_ctx *ctx) ++{ ++ int rc = memdev_action(argc, argv, ctx, action_cmd_hbo_status, cmd_hbo_status_options, ++ "cxl hbo_status [..] []"); ++ ++ return rc >= 0 ? 0 : EXIT_FAILURE; ++} ++ ++int cmd_hbo_transfer_fw(int argc, const char **argv, struct cxl_ctx *ctx) ++{ ++ int rc = memdev_action(argc, argv, ctx, action_cmd_hbo_transfer_fw, cmd_hbo_transfer_fw_options, ++ "cxl hbo_transfer_fw [..] []"); ++ ++ return rc >= 0 ? 0 : EXIT_FAILURE; ++} ++ ++int cmd_hbo_activate_fw(int argc, const char **argv, struct cxl_ctx *ctx) ++{ ++ int rc = memdev_action(argc, argv, ctx, action_cmd_hbo_activate_fw, cmd_hbo_activate_fw_options, ++ "cxl hbo_activate_fw [..] []"); ++ ++ return rc >= 0 ? 0 : EXIT_FAILURE; ++} ++ ++int cmd_health_counters_clear(int argc, const char **argv, struct cxl_ctx *ctx) ++{ ++ int rc = memdev_action(argc, argv, ctx, action_cmd_health_counters_clear, cmd_health_counters_clear_options, ++ "cxl health_counters_clear [..] []"); ++ ++ return rc >= 0 ? 0 : EXIT_FAILURE; ++} ++ ++int cmd_health_counters_get(int argc, const char **argv, struct cxl_ctx *ctx) ++{ ++ int rc = memdev_action(argc, argv, ctx, action_cmd_health_counters_get, cmd_health_counters_get_options, ++ "cxl health_counters_get [..] []"); ++ ++ return rc >= 0 ? 0 : EXIT_FAILURE; ++} ++ ++int cmd_hct_get_plat_param(int argc, const char **argv, struct cxl_ctx *ctx) ++{ ++ int rc = memdev_action(argc, argv, ctx, action_cmd_hct_get_plat_param, cmd_hct_get_plat_param_options, ++ "cxl hct-get-plat-params [..] []"); ++ ++ return rc >= 0 ? 0 : EXIT_FAILURE; ++} ++ ++int cmd_err_inj_hif_poison(int argc, const char **argv, struct cxl_ctx *ctx) ++{ ++ int rc = memdev_action(argc, argv, ctx, action_cmd_err_inj_hif_poison, cmd_err_inj_hif_poison_options, ++ "cxl err_inj_hif_poison [..] []"); ++ ++ return rc >= 0 ? 0 : EXIT_FAILURE; ++} ++ ++int cmd_err_inj_hif_ecc(int argc, const char **argv, struct cxl_ctx *ctx) ++{ ++ int rc = memdev_action(argc, argv, ctx, action_cmd_err_inj_hif_ecc, cmd_err_inj_hif_ecc_options, ++ "cxl err_inj_hif_ecc [..] []"); ++ ++ return rc >= 0 ? 0 : EXIT_FAILURE; ++} ++ ++int cmd_eh_link_dbg_cfg(int argc, const char **argv, struct cxl_ctx *ctx) ++{ ++ int rc = memdev_action(argc, argv, ctx, action_cmd_eh_link_dbg_cfg, cmd_eh_link_dbg_cfg_options, ++ "cxl eh-link-dbg-cfg [..] []"); ++ ++ return rc >= 0 ? 0 : EXIT_FAILURE; ++} ++ ++int cmd_eh_link_dbg_entry_dump(int argc, const char **argv, struct cxl_ctx *ctx) ++{ ++ int rc = memdev_action(argc, argv, ctx, action_cmd_eh_link_dbg_entry_dump, cmd_eh_link_dbg_entry_dump_options, ++ "cxl eh-link-dbg-entry-dump [..] []"); ++ ++ return rc >= 0 ? 0 : EXIT_FAILURE; ++} ++ ++int cmd_eh_link_dbg_lane_dump(int argc, const char **argv, struct cxl_ctx *ctx) ++{ ++ int rc = memdev_action(argc, argv, ctx, action_cmd_eh_link_dbg_lane_dump, cmd_eh_link_dbg_lane_dump_options, ++ "cxl eh-link-dbg-lane-dump [..] []"); ++ ++ return rc >= 0 ? 0 : EXIT_FAILURE; ++} ++ ++int cmd_eh_link_dbg_reset(int argc, const char **argv, struct cxl_ctx *ctx) ++{ ++ int rc = memdev_action(argc, argv, ctx, action_cmd_eh_link_dbg_reset, cmd_eh_link_dbg_reset_options, ++ "cxl eh-link-dbg-reset [..] []"); ++ ++ return rc >= 0 ? 0 : EXIT_FAILURE; ++} ++ ++int cmd_fbist_stopconfig_set(int argc, const char **argv, struct cxl_ctx *ctx) ++{ ++ int rc = memdev_action(argc, argv, ctx, action_cmd_fbist_stopconfig_set, cmd_fbist_stopconfig_set_options, ++ "cxl fbist_stopconfig_set [..] []"); ++ ++ return rc >= 0 ? 0 : EXIT_FAILURE; ++} ++ ++int cmd_fbist_cyclecount_set(int argc, const char **argv, struct cxl_ctx *ctx) ++{ ++ int rc = memdev_action(argc, argv, ctx, action_cmd_fbist_cyclecount_set, cmd_fbist_cyclecount_set_options, ++ "cxl fbist_cyclecount_set [..] []"); ++ ++ return rc >= 0 ? 0 : EXIT_FAILURE; ++} ++ ++int cmd_fbist_reset_set(int argc, const char **argv, struct cxl_ctx *ctx) ++{ ++ int rc = memdev_action(argc, argv, ctx, action_cmd_fbist_reset_set, cmd_fbist_reset_set_options, ++ "cxl fbist_reset_set [..] []"); ++ ++ return rc >= 0 ? 0 : EXIT_FAILURE; ++} ++ ++int cmd_fbist_run_set(int argc, const char **argv, struct cxl_ctx *ctx) ++{ ++ int rc = memdev_action(argc, argv, ctx, action_cmd_fbist_run_set, cmd_fbist_run_set_options, ++ "cxl fbist_run_set [..] []"); ++ ++ return rc >= 0 ? 0 : EXIT_FAILURE; ++} ++ ++int cmd_fbist_run_get(int argc, const char **argv, struct cxl_ctx *ctx) ++{ ++ int rc = memdev_action(argc, argv, ctx, action_cmd_fbist_run_get, cmd_fbist_run_get_options, ++ "cxl fbist_run_get [..] []"); ++ ++ return rc >= 0 ? 0 : EXIT_FAILURE; ++} ++ ++int cmd_fbist_xfer_rem_cnt_get(int argc, const char **argv, struct cxl_ctx *ctx) ++{ ++ int rc = memdev_action(argc, argv, ctx, action_cmd_fbist_xfer_rem_cnt_get, cmd_fbist_xfer_rem_cnt_get_options, ++ "cxl fbist_xfer_rem_cnt_get [..] []"); ++ ++ return rc >= 0 ? 0 : EXIT_FAILURE; ++} ++ ++int cmd_fbist_last_exp_read_data_get(int argc, const char **argv, struct cxl_ctx *ctx) ++{ ++ int rc = memdev_action(argc, argv, ctx, action_cmd_fbist_last_exp_read_data_get, cmd_fbist_last_exp_read_data_get_options, ++ "cxl fbist_last_exp_read_data_get [..] []"); ++ ++ return rc >= 0 ? 0 : EXIT_FAILURE; ++} ++ ++int cmd_fbist_curr_cycle_cnt_get(int argc, const char **argv, struct cxl_ctx *ctx) ++{ ++ int rc = memdev_action(argc, argv, ctx, action_cmd_fbist_curr_cycle_cnt_get, cmd_fbist_curr_cycle_cnt_get_options, ++ "cxl fbist_curr_cycle_cnt_get [..] []"); ++ ++ return rc >= 0 ? 0 : EXIT_FAILURE; ++} ++ ++int cmd_fbist_thread_status_get(int argc, const char **argv, struct cxl_ctx *ctx) ++{ ++ int rc = memdev_action(argc, argv, ctx, action_cmd_fbist_thread_status_get, cmd_fbist_thread_status_get_options, ++ "cxl fbist_thread_status_get [..] []"); ++ ++ return rc >= 0 ? 0 : EXIT_FAILURE; ++} ++ ++int cmd_fbist_thread_trans_cnt_get(int argc, const char **argv, struct cxl_ctx *ctx) ++{ ++ int rc = memdev_action(argc, argv, ctx, action_cmd_fbist_thread_trans_cnt_get, cmd_fbist_thread_trans_cnt_get_options, ++ "cxl fbist_thread_trans_cnt_get [..] []"); ++ ++ return rc >= 0 ? 0 : EXIT_FAILURE; ++} ++ ++int cmd_fbist_thread_bandwidth_get(int argc, const char **argv, struct cxl_ctx *ctx) ++{ ++ int rc = memdev_action(argc, argv, ctx, action_cmd_fbist_thread_bandwidth_get, cmd_fbist_thread_bandwidth_get_options, ++ "cxl fbist_thread_bandwidth_get [..] []"); ++ ++ return rc >= 0 ? 0 : EXIT_FAILURE; ++} ++ ++int cmd_fbist_thread_latency_get(int argc, const char **argv, struct cxl_ctx *ctx) ++{ ++ int rc = memdev_action(argc, argv, ctx, action_cmd_fbist_thread_latency_get, cmd_fbist_thread_latency_get_options, ++ "cxl fbist_thread_latency_get [..] []"); ++ ++ return rc >= 0 ? 0 : EXIT_FAILURE; ++} ++ ++int cmd_fbist_thread_perf_mon_set(int argc, const char **argv, struct cxl_ctx *ctx) ++{ ++ int rc = memdev_action(argc, argv, ctx, action_cmd_fbist_thread_perf_mon_set, cmd_fbist_thread_perf_mon_set_options, ++ "cxl fbist_thread_perf_mon_set [..] []"); ++ ++ return rc >= 0 ? 0 : EXIT_FAILURE; ++} ++ ++int cmd_fbist_top_read_status0_get(int argc, const char **argv, struct cxl_ctx *ctx) ++{ ++ int rc = memdev_action(argc, argv, ctx, action_cmd_fbist_top_read_status0_get, cmd_fbist_top_read_status0_get_options, ++ "cxl fbist_top_read_status0_get [..] []"); ++ ++ return rc >= 0 ? 0 : EXIT_FAILURE; ++} ++ ++int cmd_fbist_top_err_cnt_get(int argc, const char **argv, struct cxl_ctx *ctx) ++{ ++ int rc = memdev_action(argc, argv, ctx, action_cmd_fbist_top_err_cnt_get, cmd_fbist_top_err_cnt_get_options, ++ "cxl fbist_top_err_cnt_get [..] []"); ++ ++ return rc >= 0 ? 0 : EXIT_FAILURE; ++} ++ ++int cmd_fbist_last_read_addr_get(int argc, const char **argv, struct cxl_ctx *ctx) ++{ ++ int rc = memdev_action(argc, argv, ctx, action_cmd_fbist_last_read_addr_get, cmd_fbist_last_read_addr_get_options, ++ "cxl fbist_last_read_addr_get [..] []"); ++ ++ return rc >= 0 ? 0 : EXIT_FAILURE; ++} ++ ++int cmd_fbist_test_simpledata(int argc, const char **argv, struct cxl_ctx *ctx) ++{ ++ int rc = memdev_action(argc, argv, ctx, action_cmd_fbist_test_simpledata, cmd_fbist_test_simpledata_options, ++ "cxl fbist_test_simpledata [..] []"); ++ ++ return rc >= 0 ? 0 : EXIT_FAILURE; ++} ++ ++int cmd_fbist_test_addresstest(int argc, const char **argv, struct cxl_ctx *ctx) ++{ ++ int rc = memdev_action(argc, argv, ctx, action_cmd_fbist_test_addresstest, cmd_fbist_test_addresstest_options, ++ "cxl fbist_test_addresstest [..] []"); ++ ++ return rc >= 0 ? 0 : EXIT_FAILURE; ++} ++ ++int cmd_fbist_test_movinginversion(int argc, const char **argv, struct cxl_ctx *ctx) ++{ ++ int rc = memdev_action(argc, argv, ctx, action_cmd_fbist_test_movinginversion, cmd_fbist_test_movinginversion_options, ++ "cxl fbist_test_movinginversion [..] []"); ++ ++ return rc >= 0 ? 0 : EXIT_FAILURE; ++} ++ ++int cmd_fbist_test_randomsequence(int argc, const char **argv, struct cxl_ctx *ctx) ++{ ++ int rc = memdev_action(argc, argv, ctx, action_cmd_fbist_test_randomsequence, cmd_fbist_test_randomsequence_options, ++ "cxl fbist_test_randomsequence [..] []"); ++ ++ return rc >= 0 ? 0 : EXIT_FAILURE; ++} ++ ++int cmd_conf_read(int argc, const char **argv, struct cxl_ctx *ctx) ++{ ++ int rc = memdev_action(argc, argv, ctx, action_cmd_conf_read, cmd_conf_read_options, ++ "cxl conf_read [..] []"); ++ ++ return rc >= 0 ? 0 : EXIT_FAILURE; ++} ++ ++int cmd_hct_get_config(int argc, const char **argv, struct cxl_ctx *ctx) ++{ ++ int rc = memdev_action(argc, argv, ctx, action_cmd_hct_get_config, cmd_hct_get_config_options, ++ "cxl hct_get_config [..] []"); ++ ++ return rc >= 0 ? 0 : EXIT_FAILURE; ++} ++ ++int cmd_hct_read_buffer(int argc, const char **argv, struct cxl_ctx *ctx) ++{ ++ int rc = memdev_action(argc, argv, ctx, action_cmd_hct_read_buffer, cmd_hct_read_buffer_options, ++ "cxl hct_read_buffer [..] []"); ++ ++ return rc >= 0 ? 0 : EXIT_FAILURE; ++} ++ ++int cmd_hct_set_config(int argc, const char **argv, struct cxl_ctx *ctx) ++{ ++ int rc = memdev_action(argc, argv, ctx, action_cmd_hct_set_config, cmd_hct_set_config_options, ++ "cxl hct_set_config [..] []"); ++ ++ return rc >= 0 ? 0 : EXIT_FAILURE; ++} ++ ++int cmd_osa_os_patt_trig_cfg(int argc, const char **argv, struct cxl_ctx *ctx) ++{ ++ int rc = memdev_action(argc, argv, ctx, action_cmd_osa_os_patt_trig_cfg, cmd_osa_os_patt_trig_cfg_options, ++ "cxl osa_os_patt_trig_cfg [..] []"); ++ ++ return rc >= 0 ? 0 : EXIT_FAILURE; ++} ++ ++int cmd_osa_misc_trig_cfg(int argc, const char **argv, struct cxl_ctx *ctx) ++{ ++ int rc = memdev_action(argc, argv, ctx, action_cmd_osa_misc_trig_cfg, cmd_osa_misc_trig_cfg_options, ++ "cxl osa_misc_trig_cfg [..] []"); ++ ++ return rc >= 0 ? 0 : EXIT_FAILURE; ++} ++ ++int cmd_osa_data_read(int argc, const char **argv, struct cxl_ctx *ctx) ++{ ++ int rc = memdev_action(argc, argv, ctx, action_cmd_osa_data_read, cmd_osa_data_read_options, ++ "cxl osa_data_read [..] []"); ++ ++ return rc >= 0 ? 0 : EXIT_FAILURE; ++} ++ ++int cmd_dimm_spd_read(int argc, const char **argv, struct cxl_ctx *ctx) ++{ ++ int rc = memdev_action(argc, argv, ctx, action_cmd_dimm_spd_read, cmd_dimm_spd_read_options, ++ "cxl dimm_spd_read [..] []"); ++ ++ return rc >= 0 ? 0 : EXIT_FAILURE; ++} ++ ++int cmd_ddr_training_status(int argc, const char **argv, struct cxl_ctx *ctx) ++{ ++ int rc = memdev_action(argc, argv, ctx, action_cmd_ddr_training_status, cmd_ddr_training_status_options, ++ "cxl ddr-training-status [..] []"); ++ ++ return rc >= 0 ? 0 : EXIT_FAILURE; ++} +diff --git a/daxctl/device.c b/daxctl/device.c +index 0721a57..a427b7d 100644 +--- a/daxctl/device.c ++++ b/daxctl/device.c +@@ -541,8 +541,18 @@ static int disable_devdax_device(struct daxctl_dev *dev) + + static int reconfig_mode_system_ram(struct daxctl_dev *dev) + { ++ const char *devname = daxctl_dev_get_devname(dev); + int rc, skip_enable = 0; + ++ if (param.no_online || !param.no_movable) { ++ if (!param.force && daxctl_dev_will_auto_online_memory(dev)) { ++ fprintf(stderr, ++ "%s: error: kernel policy will auto-online memory, aborting\n", ++ devname); ++ return -EBUSY; ++ } ++ } ++ + if (daxctl_dev_is_enabled(dev)) { + rc = disable_devdax_device(dev); + if (rc < 0) +diff --git a/daxctl/lib/libdaxctl-private.h b/daxctl/lib/libdaxctl-private.h +index af257fd..ae45311 100644 +--- a/daxctl/lib/libdaxctl-private.h ++++ b/daxctl/lib/libdaxctl-private.h +@@ -111,6 +111,7 @@ struct daxctl_memory { + char *node_path; + unsigned long block_size; + enum memory_zones zone; ++ bool auto_online; + }; + + +diff --git a/daxctl/lib/libdaxctl.c b/daxctl/lib/libdaxctl.c +index 479e8f6..860bd9c 100644 +--- a/daxctl/lib/libdaxctl.c ++++ b/daxctl/lib/libdaxctl.c +@@ -1644,3 +1644,34 @@ DAXCTL_EXPORT int daxctl_memory_is_movable(struct daxctl_memory *mem) + return rc; + return (mem->zone == MEM_ZONE_MOVABLE) ? 1 : 0; + } ++ ++DAXCTL_EXPORT int daxctl_dev_will_auto_online_memory(struct daxctl_dev *dev) ++{ ++ const char *auto_path = "/sys/devices/system/memory/auto_online_blocks"; ++ const char *devname = daxctl_dev_get_devname(dev); ++ struct daxctl_ctx *ctx = daxctl_dev_get_ctx(dev); ++ char buf[SYSFS_ATTR_SIZE]; ++ ++ /* ++ * If we can't read the policy for some reason, don't fail yet. Assume ++ * the auto-onlining policy is absent, and carry on. If onlining blocks ++ * does result in the memory being in an inconsistent state, we have a ++ * check and warning for it after the fact ++ */ ++ if (sysfs_read_attr(ctx, auto_path, buf) != 0) ++ err(ctx, "%s: Unable to determine auto-online policy: %s\n", ++ devname, strerror(errno)); ++ ++ /* match both "online" and "online_movable" */ ++ return !strncmp(buf, "online", 6); ++} ++ ++DAXCTL_EXPORT int daxctl_dev_has_online_memory(struct daxctl_dev *dev) ++{ ++ struct daxctl_memory *mem = daxctl_dev_get_memory(dev); ++ ++ if (mem) ++ return daxctl_memory_is_online(mem); ++ else ++ return 0; ++} +diff --git a/daxctl/lib/libdaxctl.sym b/daxctl/lib/libdaxctl.sym +index a4e1684..a13e93d 100644 +--- a/daxctl/lib/libdaxctl.sym ++++ b/daxctl/lib/libdaxctl.sym +@@ -91,3 +91,9 @@ global: + daxctl_mapping_get_size; + daxctl_dev_set_mapping; + } LIBDAXCTL_7; ++ ++LIBDAXCTL_9 { ++global: ++ daxctl_dev_will_auto_online_memory; ++ daxctl_dev_has_online_memory; ++} LIBDAXCTL_8; +diff --git a/daxctl/libdaxctl.h b/daxctl/libdaxctl.h +index e82b274..683ae9c 100644 +--- a/daxctl/libdaxctl.h ++++ b/daxctl/libdaxctl.h +@@ -71,6 +71,8 @@ int daxctl_dev_disable(struct daxctl_dev *dev); + int daxctl_dev_enable_devdax(struct daxctl_dev *dev); + int daxctl_dev_enable_ram(struct daxctl_dev *dev); + int daxctl_dev_get_target_node(struct daxctl_dev *dev); ++int daxctl_dev_will_auto_online_memory(struct daxctl_dev *dev); ++int daxctl_dev_has_online_memory(struct daxctl_dev *dev); + + struct daxctl_memory; + struct daxctl_memory *daxctl_dev_get_memory(struct daxctl_dev *dev); +diff --git a/ndctl.spec.in b/ndctl.spec.in +index 0563b2d..4b08c05 100644 +--- a/ndctl.spec.in ++++ b/ndctl.spec.in +@@ -8,6 +8,7 @@ Source0: https://github.com/pmem/%{name}/archive/v%{version}.tar.gz#/%{name}-%{v + + Requires: LNAME%{?_isa} = %{version}-%{release} + Requires: DAX_LNAME%{?_isa} = %{version}-%{release} ++Requires: CXL_LNAME%{?_isa} = %{version}-%{release} + BuildRequires: autoconf + %if 0%{?rhel} < 9 + BuildRequires: asciidoc +@@ -54,6 +55,24 @@ the Linux kernel Device-DAX facility. This facility enables DAX mappings + of performance / feature differentiated memory without need of a + filesystem. + ++%package -n cxl-cli ++Summary: Manage CXL devices ++License: GPLv2 ++Requires: CXL_LNAME%{?_isa} = %{version}-%{release} ++ ++%description -n cxl-cli ++The cxl utility provides enumeration and provisioning commands for ++the Linux kernel CXL devices. ++ ++%package -n CXL_DNAME ++Summary: Development files for libcxl ++License: LGPLv2 ++Requires: CXL_LNAME%{?_isa} = %{version}-%{release} ++ ++%description -n CXL_DNAME ++This package contains libraries and header files for developing applications ++that use libcxl, a library for enumerating and communicating with CXL devices. ++ + %package -n DAX_DNAME + Summary: Development files for libdaxctl + License: LGPLv2 +@@ -84,6 +103,13 @@ Device DAX is a facility for establishing DAX mappings of performance / + feature-differentiated memory. DAX_LNAME provides an enumeration / + control API for these devices. + ++%package -n CXL_LNAME ++Summary: Management library for CXL devices ++License: LGPLv2 ++ ++%description -n CXL_LNAME ++libcxl is a library for enumerating and communicating with CXL devices. ++ + + %prep + %setup -q ndctl-%{version} +@@ -105,6 +131,8 @@ make check + + %ldconfig_scriptlets -n DAX_LNAME + ++%ldconfig_scriptlets -n CXL_LNAME ++ + %define bashcompdir %(pkg-config --variable=completionsdir bash-completion) + + %files +@@ -126,6 +154,12 @@ make check + %{_mandir}/man1/daxctl* + %{_datadir}/daxctl/daxctl.conf + ++%files -n cxl-cli ++%defattr(-,root,root) ++%license LICENSES/preferred/GPL-2.0 LICENSES/other/MIT LICENSES/other/CC0-1.0 ++%{_bindir}/cxl ++%{_mandir}/man1/cxl* ++ + %files -n LNAME + %defattr(-,root,root) + %doc README.md +@@ -138,6 +172,12 @@ make check + %license LICENSES/preferred/LGPL-2.1 LICENSES/other/MIT LICENSES/other/CC0-1.0 + %{_libdir}/libdaxctl.so.* + ++%files -n CXL_LNAME ++%defattr(-,root,root) ++%doc README.md ++%license LICENSES/preferred/LGPL-2.1 LICENSES/other/MIT LICENSES/other/CC0-1.0 ++%{_libdir}/libcxl.so.* ++ + %files -n DNAME + %defattr(-,root,root) + %license LICENSES/preferred/LGPL-2.1 +@@ -152,6 +192,15 @@ make check + %{_libdir}/libdaxctl.so + %{_libdir}/pkgconfig/libdaxctl.pc + ++%files -n CXL_DNAME ++%defattr(-,root,root) ++%license LICENSES/preferred/LGPL-2.1 ++%{_includedir}/cxl/ ++%{_libdir}/libcxl.so ++%{_libdir}/pkgconfig/libcxl.pc ++%{_mandir}/man3/cxl* ++%{_mandir}/man3/libcxl.3.gz ++ + + %changelog + * Fri May 27 2016 Dan Williams - 53-1 +diff --git a/ndctl/bat.c b/ndctl/bat.c +index ef00a3b..a3452fa 100644 +--- a/ndctl/bat.c ++++ b/ndctl/bat.c +@@ -9,7 +9,7 @@ + int cmd_bat(int argc, const char **argv, struct ndctl_ctx *ctx) + { + int loglevel = LOG_DEBUG, i, rc; +- struct ndctl_test *test; ++ struct test_ctx *test; + bool force = false; + const char * const u[] = { + "ndctl bat []", +@@ -32,9 +32,9 @@ int cmd_bat(int argc, const char **argv, struct ndctl_ctx *ctx) + usage_with_options(u, options); + + if (force) +- test = ndctl_test_new(UINT_MAX); ++ test = test_new(UINT_MAX); + else +- test = ndctl_test_new(0); ++ test = test_new(0); + + if (!test) { + fprintf(stderr, "failed to initialize test\n"); +@@ -48,5 +48,5 @@ int cmd_bat(int argc, const char **argv, struct ndctl_ctx *ctx) + + rc = test_pmem_namespaces(loglevel, test, ctx); + fprintf(stderr, "test_pmem_namespaces: %s\n", rc ? "FAIL" : "PASS"); +- return ndctl_test_result(test, rc); ++ return test_result(test, rc); + } +diff --git a/ndctl/lib/libndctl.c b/ndctl/lib/libndctl.c +index 36fb6fe..536e142 100644 +--- a/ndctl/lib/libndctl.c ++++ b/ndctl/lib/libndctl.c +@@ -323,12 +323,9 @@ NDCTL_EXPORT int ndctl_new(struct ndctl_ctx **ctx) + dbg(c, "timeout = %ld\n", tmo); + } + +- if (udev) { +- c->udev = udev; +- c->udev_queue = udev_queue_new(udev); +- if (!c->udev_queue) +- err(c, "failed to retrieve udev queue\n"); +- } ++ c->udev_queue = udev_queue_new(udev); ++ if (!c->udev_queue) ++ err(c, "failed to retrieve udev queue\n"); + + c->kmod_ctx = kmod_ctx; + c->daxctl_ctx = daxctl_ctx; +@@ -805,6 +802,8 @@ static void parse_papr_flags(struct ndctl_dimm *dimm, char *flags) + dimm->flags.f_restore = 1; + else if (strcmp(start, "smart_notify") == 0) + dimm->flags.f_smart = 1; ++ else if (strcmp(start, "save_fail") == 0) ++ dimm->flags.f_save = 1; + start = end + 1; + } + if (end != start) +@@ -1035,7 +1034,8 @@ NDCTL_EXPORT int ndctl_bus_is_papr_scm(struct ndctl_bus *bus) + if (sysfs_read_attr(bus->ctx, bus->bus_buf, buf) < 0) + return 0; + +- return (strcmp(buf, "ibm,pmemory") == 0); ++ return (strcmp(buf, "ibm,pmemory") == 0 || ++ strcmp(buf, "nvdimm_test") == 0); + } + + /** +@@ -1357,11 +1357,15 @@ NDCTL_EXPORT int ndctl_bus_start_scrub(struct ndctl_bus *bus) + int rc; + + rc = sysfs_write_attr(ctx, bus->scrub_path, "1\n"); +- if (rc == -EBUSY) +- return rc; +- else if (rc < 0) +- return -EOPNOTSUPP; +- return 0; ++ ++ /* ++ * Try at least 1 poll cycle before reporting busy in case this ++ * request hits the kernel's exponential backoff while the ++ * hardware/platform scrub state is idle. ++ */ ++ if (rc == -EBUSY && ndctl_bus_poll_scrub_completion(bus, 1, 1) == 0) ++ return sysfs_write_attr(ctx, bus->scrub_path, "1\n"); ++ return rc; + } + + NDCTL_EXPORT int ndctl_bus_get_scrub_state(struct ndctl_bus *bus) +@@ -1646,41 +1650,9 @@ static int ndctl_bind(struct ndctl_ctx *ctx, struct kmod_module *module, + static int ndctl_unbind(struct ndctl_ctx *ctx, const char *devpath); + static struct kmod_module *to_module(struct ndctl_ctx *ctx, const char *alias); + +-static int add_papr_dimm(struct ndctl_dimm *dimm, const char *dimm_base) +-{ +- int rc = -ENODEV; +- char buf[SYSFS_ATTR_SIZE]; +- struct ndctl_ctx *ctx = dimm->bus->ctx; +- char *path = calloc(1, strlen(dimm_base) + 100); +- const char * const devname = ndctl_dimm_get_devname(dimm); +- +- dbg(ctx, "%s: Probing of_pmem dimm at %s\n", devname, dimm_base); +- +- if (!path) +- return -ENOMEM; +- +- /* construct path to the papr compatible dimm flags file */ +- sprintf(path, "%s/papr/flags", dimm_base); +- +- if (ndctl_bus_is_papr_scm(dimm->bus) && +- sysfs_read_attr(ctx, path, buf) == 0) { +- +- dbg(ctx, "%s: Adding papr-scm dimm flags:\"%s\"\n", devname, buf); +- dimm->cmd_family = NVDIMM_FAMILY_PAPR; +- +- /* Parse dimm flags */ +- parse_papr_flags(dimm, buf); +- +- /* Allocate monitor mode fd */ +- dimm->health_eventfd = open(path, O_RDONLY|O_CLOEXEC); +- rc = 0; +- } +- +- free(path); +- return rc; +-} +- +-static int add_nfit_dimm(struct ndctl_dimm *dimm, const char *dimm_base) ++static int populate_dimm_attributes(struct ndctl_dimm *dimm, ++ const char *dimm_base, ++ const char *bus_prefix) + { + int i, rc = -1; + char buf[SYSFS_ATTR_SIZE]; +@@ -1694,7 +1666,7 @@ static int add_nfit_dimm(struct ndctl_dimm *dimm, const char *dimm_base) + * 'unique_id' may not be available on older kernels, so don't + * fail if the read fails. + */ +- sprintf(path, "%s/nfit/id", dimm_base); ++ sprintf(path, "%s/%s/id", dimm_base, bus_prefix); + if (sysfs_read_attr(ctx, path, buf) == 0) { + unsigned int b[9]; + +@@ -1709,68 +1681,74 @@ static int add_nfit_dimm(struct ndctl_dimm *dimm, const char *dimm_base) + } + } + +- sprintf(path, "%s/nfit/handle", dimm_base); ++ sprintf(path, "%s/%s/handle", dimm_base, bus_prefix); + if (sysfs_read_attr(ctx, path, buf) < 0) + goto err_read; + dimm->handle = strtoul(buf, NULL, 0); + +- sprintf(path, "%s/nfit/phys_id", dimm_base); ++ sprintf(path, "%s/%s/phys_id", dimm_base, bus_prefix); + if (sysfs_read_attr(ctx, path, buf) < 0) + goto err_read; + dimm->phys_id = strtoul(buf, NULL, 0); + +- sprintf(path, "%s/nfit/serial", dimm_base); ++ sprintf(path, "%s/%s/serial", dimm_base, bus_prefix); + if (sysfs_read_attr(ctx, path, buf) == 0) + dimm->serial = strtoul(buf, NULL, 0); + +- sprintf(path, "%s/nfit/vendor", dimm_base); ++ sprintf(path, "%s/%s/vendor", dimm_base, bus_prefix); + if (sysfs_read_attr(ctx, path, buf) == 0) + dimm->vendor_id = strtoul(buf, NULL, 0); + +- sprintf(path, "%s/nfit/device", dimm_base); ++ sprintf(path, "%s/%s/device", dimm_base, bus_prefix); + if (sysfs_read_attr(ctx, path, buf) == 0) + dimm->device_id = strtoul(buf, NULL, 0); + +- sprintf(path, "%s/nfit/rev_id", dimm_base); ++ sprintf(path, "%s/%s/rev_id", dimm_base, bus_prefix); + if (sysfs_read_attr(ctx, path, buf) == 0) + dimm->revision_id = strtoul(buf, NULL, 0); + +- sprintf(path, "%s/nfit/dirty_shutdown", dimm_base); ++ sprintf(path, "%s/%s/dirty_shutdown", dimm_base, bus_prefix); + if (sysfs_read_attr(ctx, path, buf) == 0) + dimm->dirty_shutdown = strtoll(buf, NULL, 0); + +- sprintf(path, "%s/nfit/subsystem_vendor", dimm_base); ++ sprintf(path, "%s/%s/subsystem_vendor", dimm_base, bus_prefix); + if (sysfs_read_attr(ctx, path, buf) == 0) + dimm->subsystem_vendor_id = strtoul(buf, NULL, 0); + +- sprintf(path, "%s/nfit/subsystem_device", dimm_base); ++ sprintf(path, "%s/%s/subsystem_device", dimm_base, bus_prefix); + if (sysfs_read_attr(ctx, path, buf) == 0) + dimm->subsystem_device_id = strtoul(buf, NULL, 0); + +- sprintf(path, "%s/nfit/subsystem_rev_id", dimm_base); ++ sprintf(path, "%s/%s/subsystem_rev_id", dimm_base, bus_prefix); + if (sysfs_read_attr(ctx, path, buf) == 0) + dimm->subsystem_revision_id = strtoul(buf, NULL, 0); + +- sprintf(path, "%s/nfit/family", dimm_base); ++ sprintf(path, "%s/%s/family", dimm_base, bus_prefix); + if (sysfs_read_attr(ctx, path, buf) == 0) + dimm->cmd_family = strtoul(buf, NULL, 0); + +- sprintf(path, "%s/nfit/dsm_mask", dimm_base); ++ sprintf(path, "%s/%s/dsm_mask", dimm_base, bus_prefix); + if (sysfs_read_attr(ctx, path, buf) == 0) + dimm->nfit_dsm_mask = strtoul(buf, NULL, 0); + +- sprintf(path, "%s/nfit/format", dimm_base); ++ sprintf(path, "%s/%s/format", dimm_base, bus_prefix); + if (sysfs_read_attr(ctx, path, buf) == 0) + dimm->format[0] = strtoul(buf, NULL, 0); + for (i = 1; i < dimm->formats; i++) { +- sprintf(path, "%s/nfit/format%d", dimm_base, i); ++ sprintf(path, "%s/%s/format%d", dimm_base, bus_prefix, i); + if (sysfs_read_attr(ctx, path, buf) == 0) + dimm->format[i] = strtoul(buf, NULL, 0); + } + +- sprintf(path, "%s/nfit/flags", dimm_base); +- if (sysfs_read_attr(ctx, path, buf) == 0) +- parse_nfit_mem_flags(dimm, buf); ++ sprintf(path, "%s/%s/flags", dimm_base, bus_prefix); ++ if (sysfs_read_attr(ctx, path, buf) == 0) { ++ if (ndctl_bus_has_nfit(dimm->bus)) ++ parse_nfit_mem_flags(dimm, buf); ++ else if (ndctl_bus_is_papr_scm(dimm->bus)) { ++ dimm->cmd_family = NVDIMM_FAMILY_PAPR; ++ parse_papr_flags(dimm, buf); ++ } ++ } + + dimm->health_eventfd = open(path, O_RDONLY|O_CLOEXEC); + rc = 0; +@@ -1780,6 +1758,58 @@ static int add_nfit_dimm(struct ndctl_dimm *dimm, const char *dimm_base) + return rc; + } + ++static int add_papr_dimm(struct ndctl_dimm *dimm, const char *dimm_base) ++{ ++ int rc = -ENODEV; ++ char buf[SYSFS_ATTR_SIZE]; ++ struct ndctl_ctx *ctx = dimm->bus->ctx; ++ char *path = calloc(1, strlen(dimm_base) + 100); ++ const char * const devname = ndctl_dimm_get_devname(dimm); ++ ++ dbg(ctx, "%s: Probing of_pmem dimm at %s\n", devname, dimm_base); ++ ++ if (!path) ++ return -ENOMEM; ++ ++ /* Check the compatibility of the probed nvdimm */ ++ sprintf(path, "%s/../of_node/compatible", dimm_base); ++ if (sysfs_read_attr(ctx, path, buf) < 0) { ++ dbg(ctx, "%s: Unable to read compatible field\n", devname); ++ rc = -ENODEV; ++ goto out; ++ } ++ ++ dbg(ctx, "%s:Compatible of_pmem = '%s'\n", devname, buf); ++ ++ /* Probe for papr-scm memory */ ++ if (strcmp(buf, "ibm,pmemory") == 0) { ++ /* Read the dimm flags file */ ++ sprintf(path, "%s/papr/flags", dimm_base); ++ if (sysfs_read_attr(ctx, path, buf) < 0) { ++ rc = -errno; ++ err(ctx, "%s: Unable to read dimm-flags\n", devname); ++ goto out; ++ } ++ ++ dbg(ctx, "%s: Adding papr-scm dimm flags:\"%s\"\n", devname, buf); ++ dimm->cmd_family = NVDIMM_FAMILY_PAPR; ++ ++ /* Parse dimm flags */ ++ parse_papr_flags(dimm, buf); ++ ++ /* Allocate monitor mode fd */ ++ dimm->health_eventfd = open(path, O_RDONLY|O_CLOEXEC); ++ rc = 0; ++ ++ } else if (strcmp(buf, "nvdimm_test") == 0) { ++ /* probe via common populate_dimm_attributes() */ ++ rc = populate_dimm_attributes(dimm, dimm_base, "papr"); ++ } ++out: ++ free(path); ++ return rc; ++} ++ + static void *add_dimm(void *parent, int id, const char *dimm_base) + { + int formats, i, rc = -ENODEV; +@@ -1792,7 +1822,8 @@ static void *add_dimm(void *parent, int id, const char *dimm_base) + if (!path) + return NULL; + +- sprintf(path, "%s/nfit/formats", dimm_base); ++ sprintf(path, "%s/%s/formats", dimm_base, ++ ndctl_bus_has_nfit(bus) ? "nfit" : "papr"); + if (sysfs_read_attr(ctx, path, buf) < 0) + formats = 1; + else +@@ -1866,10 +1897,10 @@ static void *add_dimm(void *parent, int id, const char *dimm_base) + else + dimm->fwa_result = fwa_result_to_result(buf); + ++ dimm->formats = formats; + /* Check if the given dimm supports nfit */ + if (ndctl_bus_has_nfit(bus)) { +- dimm->formats = formats; +- rc = add_nfit_dimm(dimm, dimm_base); ++ rc = populate_dimm_attributes(dimm, dimm_base, "nfit"); + } else if (ndctl_bus_has_of_node(bus)) { + rc = add_papr_dimm(dimm, dimm_base); + } +@@ -2531,13 +2562,12 @@ static void *add_region(void *parent, int id, const char *region_base) + goto err_read; + region->num_mappings = strtoul(buf, NULL, 0); + +- sprintf(path, "%s/nfit/range_index", region_base); +- if (ndctl_bus_has_nfit(bus)) { +- if (sysfs_read_attr(ctx, path, buf) < 0) +- goto err_read; +- region->range_index = strtoul(buf, NULL, 0); +- } else ++ sprintf(path, "%s/%s/range_index", region_base, ++ ndctl_bus_has_nfit(bus) ? "nfit": "papr"); ++ if (sysfs_read_attr(ctx, path, buf) < 0) + region->range_index = -1; ++ else ++ region->range_index = strtoul(buf, NULL, 0); + + sprintf(path, "%s/read_only", region_base); + if (sysfs_read_attr(ctx, path, buf) < 0) +@@ -4593,20 +4623,40 @@ NDCTL_EXPORT int ndctl_namespace_disable_invalidate(struct ndctl_namespace *ndns + return ndctl_namespace_disable(ndns); + } + ++static int ndctl_dax_has_active_memory(struct ndctl_dax *dax) ++{ ++ struct daxctl_region *dax_region; ++ struct daxctl_dev *dax_dev; ++ ++ dax_region = ndctl_dax_get_daxctl_region(dax); ++ if (!dax_region) ++ return 0; ++ ++ daxctl_dev_foreach(dax_region, dax_dev) ++ if (daxctl_dev_has_online_memory(dax_dev)) ++ return 1; ++ ++ return 0; ++} ++ + NDCTL_EXPORT int ndctl_namespace_disable_safe(struct ndctl_namespace *ndns) + { + const char *devname = ndctl_namespace_get_devname(ndns); + struct ndctl_ctx *ctx = ndctl_namespace_get_ctx(ndns); + struct ndctl_pfn *pfn = ndctl_namespace_get_pfn(ndns); + struct ndctl_btt *btt = ndctl_namespace_get_btt(ndns); ++ struct ndctl_dax *dax = ndctl_namespace_get_dax(ndns); + const char *bdev = NULL; ++ int fd, active = 0; + char path[50]; +- int fd; ++ unsigned long long size = ndctl_namespace_get_size(ndns); + + if (pfn && ndctl_pfn_is_enabled(pfn)) + bdev = ndctl_pfn_get_block_device(pfn); + else if (btt && ndctl_btt_is_enabled(btt)) + bdev = ndctl_btt_get_block_device(btt); ++ else if (dax && ndctl_dax_is_enabled(dax)) ++ active = ndctl_dax_has_active_memory(dax); + else if (ndctl_namespace_is_enabled(ndns)) + bdev = ndctl_namespace_get_block_device(ndns); + +@@ -4631,8 +4681,17 @@ NDCTL_EXPORT int ndctl_namespace_disable_safe(struct ndctl_namespace *ndns) + devname, bdev, strerror(errno)); + return -errno; + } +- } else +- ndctl_namespace_disable_invalidate(ndns); ++ } else if (active) { ++ dbg(ctx, "%s: active as system-ram, refusing to disable\n", ++ devname); ++ return -EBUSY; ++ } else { ++ if (size == 0) ++ /* No disable necessary due to no capacity allocated */ ++ return 1; ++ else ++ ndctl_namespace_disable_invalidate(ndns); ++ } + + return 0; + } +diff --git a/ndctl/lib/libndctl.sym b/ndctl/lib/libndctl.sym +index 0a82616..58afb74 100644 +--- a/ndctl/lib/libndctl.sym ++++ b/ndctl/lib/libndctl.sym +@@ -451,3 +451,7 @@ LIBNDCTL_25 { + ndctl_bus_clear_fw_activate_nosuspend; + ndctl_bus_activate_firmware; + } LIBNDCTL_24; ++ ++LIBNDCTL_26 { ++ ndctl_bus_nfit_translate_spa; ++} LIBNDCTL_25; +diff --git a/ndctl/lib/msft.c b/ndctl/lib/msft.c +index 145872c..3112799 100644 +--- a/ndctl/lib/msft.c ++++ b/ndctl/lib/msft.c +@@ -149,10 +149,32 @@ static unsigned int msft_cmd_smart_get_life_used(struct ndctl_cmd *cmd) + return 100 - CMD_MSFT_SMART(cmd)->nvm_lifetime; + } + ++static int msft_cmd_xlat_firmware_status(struct ndctl_cmd *cmd) ++{ ++ unsigned int status; ++ ++ status = cmd->get_firmware_status(cmd) & NDN_MSFT_STATUS_MASK; ++ ++ /* Common statuses */ ++ switch (status) { ++ case NDN_MSFT_STATUS_SUCCESS: ++ return 0; ++ case NDN_MSFT_STATUS_NOTSUPP: ++ return -EOPNOTSUPP; ++ case NDN_MSFT_STATUS_INVALPARM: ++ return -EINVAL; ++ case NDN_MSFT_STATUS_I2CERR: ++ return -EIO; ++ } ++ ++ return -ENOMSG; ++} ++ + struct ndctl_dimm_ops * const msft_dimm_ops = &(struct ndctl_dimm_ops) { + .new_smart = msft_dimm_cmd_new_smart, + .smart_get_flags = msft_cmd_smart_get_flags, + .smart_get_health = msft_cmd_smart_get_health, + .smart_get_media_temperature = msft_cmd_smart_get_media_temperature, + .smart_get_life_used = msft_cmd_smart_get_life_used, ++ .xlat_firmware_status = msft_cmd_xlat_firmware_status, + }; +diff --git a/ndctl/lib/msft.h b/ndctl/lib/msft.h +index 7cfd26f..978cc11 100644 +--- a/ndctl/lib/msft.h ++++ b/ndctl/lib/msft.h +@@ -50,4 +50,10 @@ struct ndn_pkg_msft { + union ndn_msft_cmd u; + } __attribute__((packed)); + ++#define NDN_MSFT_STATUS_MASK 0xffff ++#define NDN_MSFT_STATUS_SUCCESS 0 ++#define NDN_MSFT_STATUS_NOTSUPP 1 ++#define NDN_MSFT_STATUS_INVALPARM 2 ++#define NDN_MSFT_STATUS_I2CERR 3 ++ + #endif /* __NDCTL_MSFT_H__ */ +diff --git a/ndctl/lib/nfit.c b/ndctl/lib/nfit.c +index 6f68fcf..d85682f 100644 +--- a/ndctl/lib/nfit.c ++++ b/ndctl/lib/nfit.c +@@ -114,7 +114,7 @@ static int is_valid_spa(struct ndctl_bus *bus, unsigned long long spa) + * + * If success, returns zero, store dimm's @handle, and @dpa. + */ +-int ndctl_bus_nfit_translate_spa(struct ndctl_bus *bus, ++NDCTL_EXPORT int ndctl_bus_nfit_translate_spa(struct ndctl_bus *bus, + unsigned long long address, unsigned int *handle, unsigned long long *dpa) + { + +diff --git a/ndctl/lib/private.h b/ndctl/lib/private.h +index ede1300..8f4510e 100644 +--- a/ndctl/lib/private.h ++++ b/ndctl/lib/private.h +@@ -370,8 +370,6 @@ static inline int check_kmod(struct kmod_ctx *kmod_ctx) + return kmod_ctx ? 0 : -ENXIO; + } + +-int ndctl_bus_nfit_translate_spa(struct ndctl_bus *bus, unsigned long long addr, +- unsigned int *handle, unsigned long long *dpa); + struct ndctl_cmd *ndctl_bus_cmd_new_err_inj(struct ndctl_bus *bus); + struct ndctl_cmd *ndctl_bus_cmd_new_err_inj_clr(struct ndctl_bus *bus); + struct ndctl_cmd *ndctl_bus_cmd_new_err_inj_stat(struct ndctl_bus *bus, +diff --git a/ndctl/libndctl.h b/ndctl/libndctl.h +index 60e1288..87d07b7 100644 +--- a/ndctl/libndctl.h ++++ b/ndctl/libndctl.h +@@ -152,6 +152,8 @@ int ndctl_bus_clear_fw_activate_noidle(struct ndctl_bus *bus); + int ndctl_bus_set_fw_activate_nosuspend(struct ndctl_bus *bus); + int ndctl_bus_clear_fw_activate_nosuspend(struct ndctl_bus *bus); + int ndctl_bus_activate_firmware(struct ndctl_bus *bus, enum ndctl_fwa_method method); ++int ndctl_bus_nfit_translate_spa(struct ndctl_bus *bus, unsigned long long addr, ++ unsigned int *handle, unsigned long long *dpa); + + struct ndctl_dimm; + struct ndctl_dimm *ndctl_dimm_get_first(struct ndctl_bus *bus); +diff --git a/ndctl/namespace.c b/ndctl/namespace.c +index 0c8df9f..1e8a2cd 100644 +--- a/ndctl/namespace.c ++++ b/ndctl/namespace.c +@@ -1052,6 +1052,9 @@ static int zero_info_block(struct ndctl_namespace *ndns) + void *buf = NULL, *read_buf = NULL; + char path[50]; + ++ if (ndctl_namespace_get_size(ndns) == 0) ++ return 1; ++ + ndctl_namespace_set_raw_mode(ndns, 1); + rc = ndctl_namespace_enable(ndns); + if (rc < 0) { +@@ -1125,7 +1128,7 @@ static int namespace_prep_reconfig(struct ndctl_region *region, + } + + rc = ndctl_namespace_disable_safe(ndns); +- if (rc) ++ if (rc < 0) + return rc; + + ndctl_namespace_set_enforce_mode(ndns, NDCTL_NS_MODE_RAW); +@@ -1431,7 +1434,7 @@ static int dax_clear_badblocks(struct ndctl_dax *dax) + return -ENXIO; + + rc = ndctl_namespace_disable_safe(ndns); +- if (rc) { ++ if (rc < 0) { + error("%s: unable to disable namespace: %s\n", devname, + strerror(-rc)); + return rc; +@@ -1455,7 +1458,7 @@ static int pfn_clear_badblocks(struct ndctl_pfn *pfn) + return -ENXIO; + + rc = ndctl_namespace_disable_safe(ndns); +- if (rc) { ++ if (rc < 0) { + error("%s: unable to disable namespace: %s\n", devname, + strerror(-rc)); + return rc; +@@ -1478,7 +1481,7 @@ static int raw_clear_badblocks(struct ndctl_namespace *ndns) + return -ENXIO; + + rc = ndctl_namespace_disable_safe(ndns); +- if (rc) { ++ if (rc < 0) { + error("%s: unable to disable namespace: %s\n", devname, + strerror(-rc)); + return rc; +diff --git a/ndctl/region.c b/ndctl/region.c +index 3edb9b3..4552c4a 100644 +--- a/ndctl/region.c ++++ b/ndctl/region.c +@@ -70,7 +70,7 @@ static int region_action(struct ndctl_region *region, enum device_action mode) + case ACTION_DISABLE: + ndctl_namespace_foreach(region, ndns) { + rc = ndctl_namespace_disable_safe(ndns); +- if (rc) ++ if (rc < 0) + return rc; + } + rc = ndctl_region_disable_invalidate(region); +diff --git a/ndctl/test.c b/ndctl/test.c +index 6a05d8d..92713df 100644 +--- a/ndctl/test.c ++++ b/ndctl/test.c +@@ -18,7 +18,7 @@ static char *result(int rc) + + int cmd_test(int argc, const char **argv, struct ndctl_ctx *ctx) + { +- struct ndctl_test *test; ++ struct test_ctx *test; + int loglevel = LOG_DEBUG, i, rc; + const char * const u[] = { + "ndctl test []", +@@ -42,9 +42,9 @@ int cmd_test(int argc, const char **argv, struct ndctl_ctx *ctx) + usage_with_options(u, options); + + if (force) +- test = ndctl_test_new(UINT_MAX); ++ test = test_new(UINT_MAX); + else +- test = ndctl_test_new(0); ++ test = test_new(0); + if (!test) + return EXIT_FAILURE; + +@@ -69,5 +69,5 @@ int cmd_test(int argc, const char **argv, struct ndctl_ctx *ctx) + rc = test_multi_pmem(loglevel, test, ctx); + fprintf(stderr, "test-multi-pmem: %s\n", result(rc)); + +- return ndctl_test_result(test, rc); ++ return test_result(test, rc); + } +diff --git a/test.h b/test.h +index cba8d41..a25f6c9 100644 +--- a/test.h ++++ b/test.h +@@ -4,45 +4,45 @@ + #define __TEST_H__ + #include + +-struct ndctl_test; ++struct test_ctx; + struct ndctl_ctx; +-struct ndctl_test *ndctl_test_new(unsigned int kver); +-int ndctl_test_result(struct ndctl_test *test, int rc); +-int ndctl_test_get_skipped(struct ndctl_test *test); +-int ndctl_test_get_attempted(struct ndctl_test *test); +-int __ndctl_test_attempt(struct ndctl_test *test, unsigned int kver, ++struct test_ctx *test_new(unsigned int kver); ++int test_result(struct test_ctx *test, int rc); ++int test_get_skipped(struct test_ctx *test); ++int test_get_attempted(struct test_ctx *test); ++int __test_attempt(struct test_ctx *test, unsigned int kver, + const char *caller, int line); +-#define ndctl_test_attempt(t, v) __ndctl_test_attempt(t, v, __func__, __LINE__) +-void __ndctl_test_skip(struct ndctl_test *test, const char *caller, int line); +-#define ndctl_test_skip(t) __ndctl_test_skip(t, __func__, __LINE__) ++#define test_attempt(t, v) __test_attempt(t, v, __func__, __LINE__) ++void __test_skip(struct test_ctx *test, const char *caller, int line); ++#define test_skip(t) __test_skip(t, __func__, __LINE__) + struct ndctl_namespace *ndctl_get_test_dev(struct ndctl_ctx *ctx); + void builtin_xaction_namespace_reset(void); + + struct kmod_ctx; + struct kmod_module; +-int nfit_test_init(struct kmod_ctx **ctx, struct kmod_module **mod, ++int ndctl_test_init(struct kmod_ctx **ctx, struct kmod_module **mod, + struct ndctl_ctx *nd_ctx, int log_level, +- struct ndctl_test *test); ++ struct test_ctx *test); + + struct ndctl_ctx; +-int test_parent_uuid(int loglevel, struct ndctl_test *test, struct ndctl_ctx *ctx); +-int test_multi_pmem(int loglevel, struct ndctl_test *test, struct ndctl_ctx *ctx); ++int test_parent_uuid(int loglevel, struct test_ctx *test, struct ndctl_ctx *ctx); ++int test_multi_pmem(int loglevel, struct test_ctx *test, struct ndctl_ctx *ctx); + int test_dax_directio(int dax_fd, unsigned long align, void *dax_addr, off_t offset); +-int test_dax_remap(struct ndctl_test *test, int dax_fd, unsigned long align, void *dax_addr, ++int test_dax_remap(struct test_ctx *test, int dax_fd, unsigned long align, void *dax_addr, + off_t offset, bool fsdax); + #ifdef ENABLE_POISON +-int test_dax_poison(struct ndctl_test *test, int dax_fd, unsigned long align, ++int test_dax_poison(struct test_ctx *test, int dax_fd, unsigned long align, + void *dax_addr, off_t offset, bool fsdax); + #else +-static inline int test_dax_poison(struct ndctl_test *test, int dax_fd, ++static inline int test_dax_poison(struct test_ctx *test, int dax_fd, + unsigned long align, void *dax_addr, off_t offset, bool fsdax) + { + return 0; + } + #endif +-int test_dpa_alloc(int loglevel, struct ndctl_test *test, struct ndctl_ctx *ctx); +-int test_dsm_fail(int loglevel, struct ndctl_test *test, struct ndctl_ctx *ctx); +-int test_libndctl(int loglevel, struct ndctl_test *test, struct ndctl_ctx *ctx); +-int test_blk_namespaces(int loglevel, struct ndctl_test *test, struct ndctl_ctx *ctx); +-int test_pmem_namespaces(int loglevel, struct ndctl_test *test, struct ndctl_ctx *ctx); ++int test_dpa_alloc(int loglevel, struct test_ctx *test, struct ndctl_ctx *ctx); ++int test_dsm_fail(int loglevel, struct test_ctx *test, struct ndctl_ctx *ctx); ++int test_libndctl(int loglevel, struct test_ctx *test, struct ndctl_ctx *ctx); ++int test_blk_namespaces(int loglevel, struct test_ctx *test, struct ndctl_ctx *ctx); ++int test_pmem_namespaces(int loglevel, struct test_ctx *test, struct ndctl_ctx *ctx); + #endif /* __TEST_H__ */ +diff --git a/test/Makefile.am b/test/Makefile.am +index c5b8764..23f4860 100644 +--- a/test/Makefile.am ++++ b/test/Makefile.am +@@ -44,7 +44,8 @@ check_PROGRAMS =\ + hugetlb \ + daxdev-errors \ + ack-shutdown-count-set \ +- list-smart-dimm ++ list-smart-dimm \ ++ libcxl + + if ENABLE_DESTRUCTIVE + TESTS +=\ +@@ -61,7 +62,8 @@ TESTS +=\ + daxctl-devices.sh \ + daxctl-create.sh \ + dm.sh \ +- mmap.sh ++ mmap.sh \ ++ libcxl + + if ENABLE_KEYUTILS + TESTS += security.sh +@@ -84,7 +86,8 @@ LIBNDCTL_LIB =\ + testcore =\ + core.c \ + ../util/log.c \ +- ../util/sysfs.c ++ ../util/sysfs.c \ ++ ../util/hexdump.c + + libndctl_SOURCES = libndctl.c $(testcore) + libndctl_LDADD = $(LIBNDCTL_LIB) $(UUID_LIBS) $(KMOD_LIBS) +@@ -190,3 +193,9 @@ list_smart_dimm_LDADD = \ + $(JSON_LIBS) \ + $(UUID_LIBS) \ + ../libutil.a ++ ++LIBCXL_LIB =\ ++ ../cxl/lib/libcxl.la ++ ++libcxl_SOURCES = libcxl.c $(testcore) ++libcxl_LDADD = $(LIBCXL_LIB) $(UUID_LIBS) $(KMOD_LIBS) +diff --git a/test/ack-shutdown-count-set.c b/test/ack-shutdown-count-set.c +index fb1d82b..92690f4 100644 +--- a/test/ack-shutdown-count-set.c ++++ b/test/ack-shutdown-count-set.c +@@ -54,7 +54,7 @@ static void reset_bus(struct ndctl_bus *bus) + ndctl_dimm_zero_labels(dimm); + } + +-static int do_test(struct ndctl_ctx *ctx, struct ndctl_test *test) ++static int do_test(struct ndctl_ctx *ctx, struct test_ctx *test) + { + struct ndctl_bus *bus = ndctl_bus_get_by_provider(ctx, "nfit_test.0"); + struct ndctl_dimm *dimm; +@@ -62,7 +62,7 @@ static int do_test(struct ndctl_ctx *ctx, struct ndctl_test *test) + struct log_ctx log_ctx; + int rc = 0; + +- if (!ndctl_test_attempt(test, KERNEL_VERSION(4, 15, 0))) ++ if (!test_attempt(test, KERNEL_VERSION(4, 15, 0))) + return 77; + + if (!bus) +@@ -91,18 +91,18 @@ out: + return rc; + } + +-static int test_ack_shutdown_count_set(int loglevel, struct ndctl_test *test, +- struct ndctl_ctx *ctx) ++static int test_ack_shutdown_count_set(int loglevel, struct test_ctx *test, ++ struct ndctl_ctx *ctx) + { + struct kmod_module *mod; + struct kmod_ctx *kmod_ctx; + int result = EXIT_FAILURE, err; + + ndctl_set_log_priority(ctx, loglevel); +- err = nfit_test_init(&kmod_ctx, &mod, NULL, loglevel, test); ++ err = ndctl_test_init(&kmod_ctx, &mod, NULL, loglevel, test); + if (err < 0) { + result = 77; +- ndctl_test_skip(test); ++ test_skip(test); + fprintf(stderr, "%s unavailable skipping tests\n", + "nfit_test"); + return result; +@@ -117,7 +117,7 @@ static int test_ack_shutdown_count_set(int loglevel, struct ndctl_test *test, + + int main(int argc, char *argv[]) + { +- struct ndctl_test *test = ndctl_test_new(0); ++ struct test_ctx *test = test_new(0); + struct ndctl_ctx *ctx; + int rc; + +@@ -128,9 +128,9 @@ int main(int argc, char *argv[]) + + rc = ndctl_new(&ctx); + if (rc) +- return ndctl_test_result(test, rc); ++ return test_result(test, rc); + rc = test_ack_shutdown_count_set(LOG_DEBUG, test, ctx); + ndctl_unref(ctx); + +- return ndctl_test_result(test, rc); ++ return test_result(test, rc); + } +diff --git a/test/blk_namespaces.c b/test/blk_namespaces.c +index d7f00cb..04eb600 100644 +--- a/test/blk_namespaces.c ++++ b/test/blk_namespaces.c +@@ -198,8 +198,8 @@ static int ns_do_io(const char *bdev) + + static const char *comm = "test-blk-namespaces"; + +-int test_blk_namespaces(int log_level, struct ndctl_test *test, +- struct ndctl_ctx *ctx) ++int test_blk_namespaces(int log_level, struct test_ctx *test, ++ struct ndctl_ctx *ctx) + { + char bdev[50]; + int rc = -ENXIO; +@@ -210,7 +210,7 @@ int test_blk_namespaces(int log_level, struct ndctl_test *test, + struct ndctl_namespace *ndns[2]; + struct ndctl_region *region, *blk_region = NULL; + +- if (!ndctl_test_attempt(test, KERNEL_VERSION(4, 2, 0))) ++ if (!test_attempt(test, KERNEL_VERSION(4, 2, 0))) + return 77; + + ndctl_set_log_priority(ctx, log_level); +@@ -228,11 +228,11 @@ int test_blk_namespaces(int log_level, struct ndctl_test *test, + + if (!bus) { + fprintf(stderr, "ACPI.NFIT unavailable falling back to nfit_test\n"); +- rc = nfit_test_init(&kmod_ctx, &mod, NULL, log_level, test); ++ rc = ndctl_test_init(&kmod_ctx, &mod, NULL, log_level, test); + ndctl_invalidate(ctx); + bus = ndctl_bus_get_by_provider(ctx, "nfit_test.0"); + if (rc < 0 || !bus) { +- ndctl_test_skip(test); ++ test_skip(test); + fprintf(stderr, "nfit_test unavailable skipping tests\n"); + return 77; + } +@@ -337,7 +337,7 @@ int test_blk_namespaces(int log_level, struct ndctl_test *test, + + int __attribute__((weak)) main(int argc, char *argv[]) + { +- struct ndctl_test *test = ndctl_test_new(0); ++ struct test_ctx *test = test_new(0); + struct ndctl_ctx *ctx; + int rc; + +@@ -349,9 +349,9 @@ int __attribute__((weak)) main(int argc, char *argv[]) + + rc = ndctl_new(&ctx); + if (rc) +- return ndctl_test_result(test, rc); ++ return test_result(test, rc); + + rc = test_blk_namespaces(LOG_DEBUG, test, ctx); + ndctl_unref(ctx); +- return ndctl_test_result(test, rc); ++ return test_result(test, rc); + } +diff --git a/test/core.c b/test/core.c +index cc7d8d9..d143e89 100644 +--- a/test/core.c ++++ b/test/core.c +@@ -11,11 +11,12 @@ + #include + #include + #include ++#include + #include + + #define KVER_STRLEN 20 + +-struct ndctl_test { ++struct test_ctx { + unsigned int kver; + int attempt; + int skip; +@@ -38,9 +39,9 @@ static unsigned int get_system_kver(void) + return KERNEL_VERSION(a,b,c); + } + +-struct ndctl_test *ndctl_test_new(unsigned int kver) ++struct test_ctx *test_new(unsigned int kver) + { +- struct ndctl_test *test = calloc(1, sizeof(*test)); ++ struct test_ctx *test = calloc(1, sizeof(*test)); + + if (!test) + return NULL; +@@ -53,15 +54,15 @@ struct ndctl_test *ndctl_test_new(unsigned int kver) + return test; + } + +-int ndctl_test_result(struct ndctl_test *test, int rc) ++int test_result(struct test_ctx *test, int rc) + { +- if (ndctl_test_get_skipped(test)) ++ if (test_get_skipped(test)) + fprintf(stderr, "attempted: %d skipped: %d\n", +- ndctl_test_get_attempted(test), +- ndctl_test_get_skipped(test)); ++ test_get_attempted(test), ++ test_get_skipped(test)); + if (rc && rc != 77) + return rc; +- if (ndctl_test_get_skipped(test) >= ndctl_test_get_attempted(test)) ++ if (test_get_skipped(test) >= test_get_attempted(test)) + return 77; + /* return success if no failures and at least one test not skipped */ + return 0; +@@ -74,8 +75,8 @@ static char *kver_str(char *buf, unsigned int kver) + return buf; + } + +-int __ndctl_test_attempt(struct ndctl_test *test, unsigned int kver, +- const char *caller, int line) ++int __test_attempt(struct test_ctx *test, unsigned int kver, ++ const char *caller, int line) + { + char requires[KVER_STRLEN], current[KVER_STRLEN]; + +@@ -89,28 +90,28 @@ int __ndctl_test_attempt(struct ndctl_test *test, unsigned int kver, + return 0; + } + +-void __ndctl_test_skip(struct ndctl_test *test, const char *caller, int line) ++void __test_skip(struct test_ctx *test, const char *caller, int line) + { + test->skip++; + test->attempt = test->skip; + fprintf(stderr, "%s: explicit skip %s:%d\n", __func__, caller, line); + } + +-int ndctl_test_get_attempted(struct ndctl_test *test) ++int test_get_attempted(struct test_ctx *test) + { + return test->attempt; + } + +-int ndctl_test_get_skipped(struct ndctl_test *test) ++int test_get_skipped(struct test_ctx *test) + { + return test->skip; + } + +-int nfit_test_init(struct kmod_ctx **ctx, struct kmod_module **mod, ++int ndctl_test_init(struct kmod_ctx **ctx, struct kmod_module **mod, + struct ndctl_ctx *nd_ctx, int log_level, +- struct ndctl_test *test) ++ struct test_ctx *test) + { +- int rc; ++ int rc, family = -1; + unsigned int i; + const char *name; + struct ndctl_bus *bus; +@@ -127,10 +128,28 @@ int nfit_test_init(struct kmod_ctx **ctx, struct kmod_module **mod, + "nd_e820", + "nd_pmem", + }; ++ char *test_env; + + log_init(&log_ctx, "test/init", "NDCTL_TEST"); + log_ctx.log_priority = log_level; + ++ /* ++ * The following two checks determine the platform family. For ++ * Intel/platforms which support ACPI, check sysfs; for other platforms ++ * determine from the environment variable NVDIMM_TEST_FAMILY ++ */ ++ if (access("/sys/bus/acpi", F_OK) == 0) ++ family = NVDIMM_FAMILY_INTEL; ++ ++ test_env = getenv("NDCTL_TEST_FAMILY"); ++ if (test_env && strcmp(test_env, "PAPR") == 0) ++ family = NVDIMM_FAMILY_PAPR; ++ ++ if (family == -1) { ++ log_err(&log_ctx, "Cannot determine NVDIMM family\n"); ++ return -ENOTSUP; ++ } ++ + *ctx = kmod_new(NULL, NULL); + if (!*ctx) + return -ENXIO; +@@ -155,7 +174,7 @@ int nfit_test_init(struct kmod_ctx **ctx, struct kmod_module **mod, + * than 4.7. + */ + if (strcmp(name, "dax") == 0 +- && !ndctl_test_attempt(test, ++ && !test_attempt(test, + KERNEL_VERSION(4, 7, 0))) + continue; + +@@ -164,7 +183,7 @@ int nfit_test_init(struct kmod_ctx **ctx, struct kmod_module **mod, + */ + if ((strcmp(name, "dax_pmem_core") == 0 + || strcmp(name, "dax_pmem_compat") == 0) +- && !ndctl_test_attempt(test, ++ && !test_attempt(test, + KERNEL_VERSION(5, 1, 0))) + continue; + +@@ -185,6 +204,11 @@ retry: + + path = kmod_module_get_path(*mod); + if (!path) { ++ if (family != NVDIMM_FAMILY_INTEL && ++ (strcmp(name, "nfit") == 0 || ++ strcmp(name, "nd_e820") == 0)) ++ continue; ++ + log_err(&log_ctx, "%s.ko: failed to get path\n", name); + break; + } +diff --git a/test/dax-dev.c b/test/dax-dev.c +index 6a1b76d..d61104f 100644 +--- a/test/dax-dev.c ++++ b/test/dax-dev.c +@@ -87,13 +87,13 @@ struct ndctl_namespace *ndctl_get_test_dev(struct ndctl_ctx *ctx) + return rc ? NULL : ndns; + } + +-static int emit_e820_device(int loglevel, struct ndctl_test *test) ++static int emit_e820_device(int loglevel, struct test_ctx *test) + { + int err; + struct ndctl_ctx *ctx; + struct ndctl_namespace *ndns; + +- if (!ndctl_test_attempt(test, KERNEL_VERSION(4, 3, 0))) ++ if (!test_attempt(test, KERNEL_VERSION(4, 3, 0))) + return 77; + + err = ndctl_new(&ctx); +@@ -106,7 +106,7 @@ static int emit_e820_device(int loglevel, struct ndctl_test *test) + if (!ndns) { + fprintf(stderr, "%s: failed to find usable victim device\n", + __func__); +- ndctl_test_skip(test); ++ test_skip(test); + err = 77; + } else { + fprintf(stdout, "%s\n", ndctl_namespace_get_devname(ndns)); +@@ -118,7 +118,7 @@ static int emit_e820_device(int loglevel, struct ndctl_test *test) + + int __attribute__((weak)) main(int argc, char *argv[]) + { +- struct ndctl_test *test = ndctl_test_new(0); ++ struct test_ctx *test = test_new(0); + int rc; + + if (!test) { +@@ -127,5 +127,5 @@ int __attribute__((weak)) main(int argc, char *argv[]) + } + + rc = emit_e820_device(LOG_DEBUG, test); +- return ndctl_test_result(test, rc); ++ return test_result(test, rc); + } +diff --git a/test/dax-ext4.sh b/test/dax-ext4.sh +deleted file mode 120000 +index da4ec43..0000000 +--- a/test/dax-ext4.sh ++++ /dev/null +@@ -1 +0,0 @@ +-dax.sh +\ No newline at end of file +diff --git a/test/dax-pmd.c b/test/dax-pmd.c +index 401826d..190a0fd 100644 +--- a/test/dax-pmd.c ++++ b/test/dax-pmd.c +@@ -37,14 +37,15 @@ static void sigbus(int sig, siginfo_t *siginfo, void *d) + siglongjmp(sj_env, 1); + } + +-int test_dax_remap(struct ndctl_test *test, int dax_fd, unsigned long align, void *dax_addr, +- off_t offset, bool fsdax) ++int test_dax_remap(struct test_ctx *test, int dax_fd, unsigned long align, ++ void *dax_addr, ++ off_t offset, bool fsdax) + { + void *anon, *remap, *addr; + struct sigaction act; + int rc, val; + +- if ((fsdax || align == SZ_2M) && !ndctl_test_attempt(test, KERNEL_VERSION(5, 8, 0))) { ++ if ((fsdax || align == SZ_2M) && !test_attempt(test, KERNEL_VERSION(5, 8, 0))) { + /* kernel's prior to 5.8 may crash on this test */ + fprintf(stderr, "%s: SKIP mremap() test\n", __func__); + return 0; +@@ -69,6 +70,11 @@ int test_dax_remap(struct ndctl_test *test, int dax_fd, unsigned long align, voi + + remap = mremap(addr, REMAP_SIZE, REMAP_SIZE, MREMAP_MAYMOVE|MREMAP_FIXED, anon); + ++ if (remap == MAP_FAILED) { ++ fprintf(stderr, "%s: mremap failed, that's ok too\n", __func__); ++ return 0; ++ } ++ + if (remap != anon) { + rc = -ENXIO; + perror("mremap"); +@@ -83,20 +89,18 @@ int test_dax_remap(struct ndctl_test *test, int dax_fd, unsigned long align, voi + act.sa_flags = SA_SIGINFO; + if (sigaction(SIGBUS, &act, 0)) { + perror("sigaction"); +- rc = EXIT_FAILURE; +- goto out; ++ return EXIT_FAILURE; + } + + /* test fault after device-dax instance disabled */ + if (sigsetjmp(sj_env, 1)) { + if (!fsdax && align > SZ_4K) { + fprintf(stderr, "got expected SIGBUS after mremap() of device-dax\n"); +- rc = 0; ++ return 0; + } else { + fprintf(stderr, "unpexpected SIGBUS after mremap()\n"); +- rc = -EIO; ++ return -EIO; + } +- goto out; + } + + *(int *) anon = 0xAA; +@@ -107,9 +111,7 @@ int test_dax_remap(struct ndctl_test *test, int dax_fd, unsigned long align, voi + return -ENXIO; + } + +- rc = 0; +-out: +- return rc; ++ return 0; + } + + int test_dax_directio(int dax_fd, unsigned long align, void *dax_addr, off_t offset) +@@ -271,7 +273,7 @@ int test_dax_directio(int dax_fd, unsigned long align, void *dax_addr, off_t off + } + + /* test_pmd assumes that fd references a pre-allocated + dax-capable file */ +-static int test_pmd(struct ndctl_test *test, int fd) ++static int test_pmd(struct test_ctx *test, int fd) + { + unsigned long long m_align, p_align, pmd_off; + static const bool fsdax = true; +@@ -350,7 +352,7 @@ err_mmap: + + int __attribute__((weak)) main(int argc, char *argv[]) + { +- struct ndctl_test *test = ndctl_test_new(0); ++ struct test_ctx *test = test_new(0); + int fd, rc; + + if (!test) { +@@ -365,5 +367,5 @@ int __attribute__((weak)) main(int argc, char *argv[]) + rc = test_pmd(test, fd); + if (fd >= 0) + close(fd); +- return ndctl_test_result(test, rc); ++ return test_result(test, rc); + } +diff --git a/test/dax-poison.c b/test/dax-poison.c +index a4ef12e..e50ff8f 100644 +--- a/test/dax-poison.c ++++ b/test/dax-poison.c +@@ -5,6 +5,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -43,16 +44,17 @@ static void sigbus_hdl(int sig, siginfo_t *si, void *ptr) + siglongjmp(sj_env, 1); + } + +-int test_dax_poison(struct ndctl_test *test, int dax_fd, unsigned long align, +- void *dax_addr, off_t offset, bool fsdax) ++int test_dax_poison(struct test_ctx *test, int dax_fd, unsigned long align, ++ void *dax_addr, off_t offset, bool fsdax) + { + unsigned char *addr = MAP_FAILED; + struct sigaction act; + unsigned x = x; ++ FILE *smaps; + void *buf; + int rc; + +- if (!ndctl_test_attempt(test, KERNEL_VERSION(4, 19, 0))) ++ if (!test_attempt(test, KERNEL_VERSION(4, 19, 0))) + return 77; + + /* +@@ -94,6 +96,9 @@ int test_dax_poison(struct ndctl_test *test, int dax_fd, unsigned long align, + goto out; + } + ++ fprintf(stderr, "%s: mmap got %p align: %ld offset: %zd\n", ++ __func__, addr, align, offset); ++ + if (sigsetjmp(sj_env, 1)) { + if (sig_mcerr_ar) { + fprintf(stderr, "madvise triggered 'action required' sigbus\n"); +@@ -104,6 +109,20 @@ int test_dax_poison(struct ndctl_test *test, int dax_fd, unsigned long align, + } + } + ++ rc = madvise(addr + align / 2, 4096, MADV_SOFT_OFFLINE); ++ if (rc == 0) { ++ fprintf(stderr, "softoffline should always fail for dax\n"); ++ smaps = fopen("/proc/self/smaps", "r"); ++ do { ++ rc = fread(buf, 1, 4096, smaps); ++ fwrite(buf, 1, rc, stderr); ++ } while (rc); ++ fclose(smaps); ++ fail(); ++ rc = -ENXIO; ++ goto out; ++ } ++ + rc = madvise(addr + align / 2, 4096, MADV_HWPOISON); + if (rc) { + fail(); +diff --git a/test/dax-xfs.sh b/test/dax-xfs.sh +deleted file mode 120000 +index da4ec43..0000000 +--- a/test/dax-xfs.sh ++++ /dev/null +@@ -1 +0,0 @@ +-dax.sh +\ No newline at end of file +diff --git a/test/daxctl-devices.sh b/test/daxctl-devices.sh +index 496e4f2..56c9691 100755 +--- a/test/daxctl-devices.sh ++++ b/test/daxctl-devices.sh +@@ -64,6 +64,26 @@ daxctl_get_mode() + "$DAXCTL" list -d "$1" | jq -er '.[].mode' + } + ++set_online_policy() ++{ ++ echo "online" > /sys/devices/system/memory/auto_online_blocks ++} ++ ++unset_online_policy() ++{ ++ echo "offline" > /sys/devices/system/memory/auto_online_blocks ++} ++ ++save_online_policy() ++{ ++ saved_policy="$(cat /sys/devices/system/memory/auto_online_blocks)" ++} ++ ++restore_online_policy() ++{ ++ echo "$saved_policy" > /sys/devices/system/memory/auto_online_blocks ++} ++ + daxctl_test() + { + local daxdev +@@ -71,6 +91,9 @@ daxctl_test() + daxdev=$(daxctl_get_dev "$testdev") + test -n "$daxdev" + ++ # these tests need to run with kernel onlining policy turned off ++ save_online_policy ++ unset_online_policy + "$DAXCTL" reconfigure-device -N -m system-ram "$daxdev" + [[ $(daxctl_get_mode "$daxdev") == "system-ram" ]] + "$DAXCTL" online-memory "$daxdev" +@@ -81,6 +104,35 @@ daxctl_test() + [[ $(daxctl_get_mode "$daxdev") == "system-ram" ]] + "$DAXCTL" reconfigure-device -f -m devdax "$daxdev" + [[ $(daxctl_get_mode "$daxdev") == "devdax" ]] ++ ++ # fail 'ndctl-disable-namespace' while the devdax namespace is active ++ # as system-ram. If this test fails, a reboot will be required to ++ # recover from the resulting state. ++ test -n "$testdev" ++ "$DAXCTL" reconfigure-device -m system-ram "$daxdev" ++ [[ $(daxctl_get_mode "$daxdev") == "system-ram" ]] ++ if ! "$NDCTL" disable-namespace "$testdev"; then ++ echo "disable-namespace failed as expected" ++ else ++ echo "disable-namespace succeded, expected failure" ++ echo "reboot required to recover from this state" ++ return 1 ++ fi ++ "$DAXCTL" reconfigure-device -f -m devdax "$daxdev" ++ [[ $(daxctl_get_mode "$daxdev") == "devdax" ]] ++ ++ # this tests for reconfiguration failure if an online-policy is set ++ set_online_policy ++ : "This command is expected to fail:" ++ if ! "$DAXCTL" reconfigure-device -N -m system-ram "$daxdev"; then ++ echo "reconfigure failed as expected" ++ else ++ echo "reconfigure succeded, expected failure" ++ restore_online_policy ++ return 1 ++ fi ++ ++ restore_online_policy + } + + find_testdev +diff --git a/test/daxdev-errors.c b/test/daxdev-errors.c +index fbbea21..4cb6b4d 100644 +--- a/test/daxdev-errors.c ++++ b/test/daxdev-errors.c +@@ -29,7 +29,7 @@ + + struct check_cmd { + struct ndctl_cmd *cmd; +- struct ndctl_test *test; ++ struct test_ctx *test; + }; + + static sigjmp_buf sj_env; +diff --git a/test/daxdev-errors.sh b/test/daxdev-errors.sh +index 6281f32..9547d78 100755 +--- a/test/daxdev-errors.sh ++++ b/test/daxdev-errors.sh +@@ -9,6 +9,7 @@ rc=77 + . $(dirname $0)/common + + check_min_kver "4.12" || do_skip "lacks dax dev error handling" ++check_prereq "jq" + + trap 'err $LINENO' ERR + +diff --git a/test/device-dax.c b/test/device-dax.c +index 5f0da29..80f0ef7 100644 +--- a/test/device-dax.c ++++ b/test/device-dax.c +@@ -90,12 +90,12 @@ static void sigbus(int sig, siginfo_t *siginfo, void *d) + #define VERIFY_TIME(x) (suseconds_t) ((ALIGN(x, SZ_2M) / SZ_4K) * 60) + + static int verify_data(struct daxctl_dev *dev, char *dax_buf, +- unsigned long align, int salt, struct ndctl_test *test) ++ unsigned long align, int salt, struct test_ctx *test) + { + struct timeval tv1, tv2, tv_diff; + unsigned long i; + +- if (!ndctl_test_attempt(test, KERNEL_VERSION(4, 9, 0))) ++ if (!test_attempt(test, KERNEL_VERSION(4, 9, 0))) + return 0; + + /* verify data and cache mode */ +@@ -128,8 +128,46 @@ static int verify_data(struct daxctl_dev *dev, char *dax_buf, + return 0; + } + ++static int test_dax_soft_offline(struct test_ctx *test, struct ndctl_namespace *ndns) ++{ ++ unsigned long long resource = ndctl_namespace_get_resource(ndns); ++ int fd, rc; ++ char *buf; ++ ++ if (resource == ULLONG_MAX) { ++ fprintf(stderr, "failed to get resource: %s\n", ++ ndctl_namespace_get_devname(ndns)); ++ return -ENXIO; ++ } ++ ++ fd = open("/sys/devices/system/memory/soft_offline_page", O_WRONLY); ++ if (fd < 0) { ++ fprintf(stderr, "failed to open soft_offline_page\n"); ++ return -ENOENT; ++ } ++ ++ rc = asprintf(&buf, "%#llx\n", resource); ++ if (rc < 0) { ++ fprintf(stderr, "failed to alloc resource\n"); ++ close(fd); ++ return -ENOMEM; ++ } ++ ++ fprintf(stderr, "%s: try to offline page @%#llx\n", __func__, resource); ++ rc = write(fd, buf, rc); ++ free(buf); ++ close(fd); ++ ++ if (rc >= 0) { ++ fprintf(stderr, "%s: should have failed\n", __func__); ++ return -ENXIO; ++ } ++ ++ return 0; ++} ++ + static int __test_device_dax(unsigned long align, int loglevel, +- struct ndctl_test *test, struct ndctl_ctx *ctx) ++ struct test_ctx *test, struct ndctl_ctx *ctx) + { + unsigned long i; + struct sigaction act; +@@ -150,10 +188,10 @@ static int __test_device_dax(unsigned long align, int loglevel, + return 77; + } + +- if (align > SZ_2M && !ndctl_test_attempt(test, KERNEL_VERSION(4, 11, 0))) ++ if (align > SZ_2M && !test_attempt(test, KERNEL_VERSION(4, 11, 0))) + return 77; + +- if (!ndctl_test_attempt(test, KERNEL_VERSION(4, 7, 0))) ++ if (!test_attempt(test, KERNEL_VERSION(4, 7, 0))) + return 77; + + /* setup up fsdax mode pmem device and seed with verification data */ +@@ -241,7 +279,7 @@ static int __test_device_dax(unsigned long align, int loglevel, + * Prior to 4.8-final these tests cause crashes, or are + * otherwise not supported. + */ +- if (ndctl_test_attempt(test, KERNEL_VERSION(4, 9, 0))) { ++ if (test_attempt(test, KERNEL_VERSION(4, 9, 0))) { + static const bool devdax = false; + int fd2; + +@@ -278,6 +316,13 @@ static int __test_device_dax(unsigned long align, int loglevel, + goto out; + } + ++ rc = test_dax_soft_offline(test, ndns); ++ if (rc) { ++ fprintf(stderr, "%s: failed dax soft offline\n", ++ ndctl_namespace_get_devname(ndns)); ++ goto out; ++ } ++ + rc = test_dax_poison(test, fd, align, NULL, 0, devdax); + if (rc) { + fprintf(stderr, "%s: failed dax poison\n", +@@ -349,7 +394,7 @@ static int __test_device_dax(unsigned long align, int loglevel, + rc = EXIT_SUCCESS; + p = (int *) (buf + align); + *p = 0xff; +- if (ndctl_test_attempt(test, KERNEL_VERSION(4, 9, 0))) { ++ if (test_attempt(test, KERNEL_VERSION(4, 9, 0))) { + /* after 4.9 this test will properly get sigbus above */ + rc = EXIT_FAILURE; + fprintf(stderr, "%s: failed to unmap after reset\n", +@@ -361,8 +406,8 @@ static int __test_device_dax(unsigned long align, int loglevel, + return rc; + } + +-static int test_device_dax(int loglevel, struct ndctl_test *test, +- struct ndctl_ctx *ctx) ++static int test_device_dax(int loglevel, struct test_ctx *test, ++ struct ndctl_ctx *ctx) + { + unsigned long i, aligns[] = { SZ_4K, SZ_2M, SZ_1G }; + int rc; +@@ -378,7 +423,7 @@ static int test_device_dax(int loglevel, struct ndctl_test *test, + + int __attribute__((weak)) main(int argc, char *argv[]) + { +- struct ndctl_test *test = ndctl_test_new(0); ++ struct test_ctx *test = test_new(0); + struct ndctl_ctx *ctx; + int rc; + +@@ -389,9 +434,9 @@ int __attribute__((weak)) main(int argc, char *argv[]) + + rc = ndctl_new(&ctx); + if (rc < 0) +- return ndctl_test_result(test, rc); ++ return test_result(test, rc); + + rc = test_device_dax(LOG_DEBUG, test, ctx); + ndctl_unref(ctx); +- return ndctl_test_result(test, rc); ++ return test_result(test, rc); + } +diff --git a/test/dpa-alloc.c b/test/dpa-alloc.c +index e922009..e530ed4 100644 +--- a/test/dpa-alloc.c ++++ b/test/dpa-alloc.c +@@ -32,18 +32,19 @@ struct test_dpa_namespace { + + #define MIN_SIZE SZ_4M + +-static int do_test(struct ndctl_ctx *ctx, struct ndctl_test *test) ++static int do_test(struct ndctl_ctx *ctx, struct test_ctx *test) + { + unsigned int default_available_slots, available_slots, i; + struct ndctl_region *region, *blk_region = NULL; + struct ndctl_namespace *ndns; + struct ndctl_dimm *dimm; +- unsigned long size; ++ unsigned long size, page_size; + struct ndctl_bus *bus; + char uuid_str[40]; + int round; + int rc; + ++ page_size = sysconf(_SC_PAGESIZE); + /* disable nfit_test.1, not used in this test */ + bus = ndctl_bus_get_by_provider(ctx, NFIT_PROVIDER1); + if (!bus) +@@ -124,11 +125,11 @@ static int do_test(struct ndctl_ctx *ctx, struct ndctl_test *test) + return rc; + } + ndctl_namespace_disable_invalidate(ndns); +- rc = ndctl_namespace_set_size(ndns, SZ_4K); ++ rc = ndctl_namespace_set_size(ndns, page_size); + if (rc) { +- fprintf(stderr, "failed to init %s to size: %d\n", ++ fprintf(stderr, "failed to init %s to size: %lu\n", + ndctl_namespace_get_devname(ndns), +- SZ_4K); ++ page_size); + return rc; + } + namespaces[i].ndns = ndns; +@@ -150,7 +151,7 @@ static int do_test(struct ndctl_ctx *ctx, struct ndctl_test *test) + ndns = namespaces[i % ARRAY_SIZE(namespaces)].ndns; + if (i % ARRAY_SIZE(namespaces) == 0) + round++; +- size = SZ_4K * round; ++ size = page_size * round; + rc = ndctl_namespace_set_size(ndns, size); + if (rc) { + fprintf(stderr, "%s: set_size: %lx failed: %d\n", +@@ -166,7 +167,7 @@ static int do_test(struct ndctl_ctx *ctx, struct ndctl_test *test) + i--; + round++; + ndns = namespaces[i % ARRAY_SIZE(namespaces)].ndns; +- size = SZ_4K * round; ++ size = page_size * round; + rc = ndctl_namespace_set_size(ndns, size); + if (rc) { + fprintf(stderr, "%s failed to update while labels full\n", +@@ -175,7 +176,7 @@ static int do_test(struct ndctl_ctx *ctx, struct ndctl_test *test) + } + + round--; +- size = SZ_4K * round; ++ size = page_size * round; + rc = ndctl_namespace_set_size(ndns, size); + if (rc) { + fprintf(stderr, "%s failed to reduce size while labels full\n", +@@ -279,19 +280,19 @@ static int do_test(struct ndctl_ctx *ctx, struct ndctl_test *test) + return 0; + } + +-int test_dpa_alloc(int loglevel, struct ndctl_test *test, struct ndctl_ctx *ctx) ++int test_dpa_alloc(int loglevel, struct test_ctx *test, struct ndctl_ctx *ctx) + { + struct kmod_module *mod; + struct kmod_ctx *kmod_ctx; + int err, result = EXIT_FAILURE; + +- if (!ndctl_test_attempt(test, KERNEL_VERSION(4, 2, 0))) ++ if (!test_attempt(test, KERNEL_VERSION(4, 2, 0))) + return 77; + + ndctl_set_log_priority(ctx, loglevel); +- err = nfit_test_init(&kmod_ctx, &mod, NULL, loglevel, test); ++ err = ndctl_test_init(&kmod_ctx, &mod, NULL, loglevel, test); + if (err < 0) { +- ndctl_test_skip(test); ++ test_skip(test); + fprintf(stderr, "nfit_test unavailable skipping tests\n"); + return 77; + } +@@ -306,7 +307,7 @@ int test_dpa_alloc(int loglevel, struct ndctl_test *test, struct ndctl_ctx *ctx) + + int __attribute__((weak)) main(int argc, char *argv[]) + { +- struct ndctl_test *test = ndctl_test_new(0); ++ struct test_ctx *test = test_new(0); + struct ndctl_ctx *ctx; + int rc; + +@@ -317,9 +318,9 @@ int __attribute__((weak)) main(int argc, char *argv[]) + + rc = ndctl_new(&ctx); + if (rc) +- return ndctl_test_result(test, rc); ++ return test_result(test, rc); + + rc = test_dpa_alloc(LOG_DEBUG, test, ctx); + ndctl_unref(ctx); +- return ndctl_test_result(test, rc); ++ return test_result(test, rc); + } +diff --git a/test/dsm-fail.c b/test/dsm-fail.c +index 9dfd8b0..5559da2 100644 +--- a/test/dsm-fail.c ++++ b/test/dsm-fail.c +@@ -174,7 +174,7 @@ static int test_regions_enable(struct ndctl_bus *bus, + return 0; + } + +-static int do_test(struct ndctl_ctx *ctx, struct ndctl_test *test) ++static int do_test(struct ndctl_ctx *ctx, struct test_ctx *test) + { + struct ndctl_bus *bus = ndctl_bus_get_by_provider(ctx, "nfit_test.0"); + struct ndctl_region *region, *victim_region = NULL; +@@ -184,7 +184,7 @@ static int do_test(struct ndctl_ctx *ctx, struct ndctl_test *test) + unsigned int handle; + int rc, err = 0; + +- if (!ndctl_test_attempt(test, KERNEL_VERSION(4, 9, 0))) ++ if (!test_attempt(test, KERNEL_VERSION(4, 9, 0))) + return 77; + + if (!bus) +@@ -339,17 +339,17 @@ static int do_test(struct ndctl_ctx *ctx, struct ndctl_test *test) + return err; + } + +-int test_dsm_fail(int loglevel, struct ndctl_test *test, struct ndctl_ctx *ctx) ++int test_dsm_fail(int loglevel, struct test_ctx *test, struct ndctl_ctx *ctx) + { + struct kmod_module *mod; + struct kmod_ctx *kmod_ctx; + int result = EXIT_FAILURE, err; + + ndctl_set_log_priority(ctx, loglevel); +- err = nfit_test_init(&kmod_ctx, &mod, NULL, loglevel, test); ++ err = ndctl_test_init(&kmod_ctx, &mod, NULL, loglevel, test); + if (err < 0) { + result = 77; +- ndctl_test_skip(test); ++ test_skip(test); + fprintf(stderr, "%s unavailable skipping tests\n", + "nfit_test"); + return result; +@@ -364,7 +364,7 @@ int test_dsm_fail(int loglevel, struct ndctl_test *test, struct ndctl_ctx *ctx) + + int __attribute__((weak)) main(int argc, char *argv[]) + { +- struct ndctl_test *test = ndctl_test_new(0); ++ struct test_ctx *test = test_new(0); + struct ndctl_ctx *ctx; + int rc; + +@@ -375,8 +375,8 @@ int __attribute__((weak)) main(int argc, char *argv[]) + + rc = ndctl_new(&ctx); + if (rc) +- return ndctl_test_result(test, rc); ++ return test_result(test, rc); + rc = test_dsm_fail(LOG_DEBUG, test, ctx); + ndctl_unref(ctx); +- return ndctl_test_result(test, rc); ++ return test_result(test, rc); + } +diff --git a/test/inject-error.sh b/test/inject-error.sh +index c636033..7d0b826 100755 +--- a/test/inject-error.sh ++++ b/test/inject-error.sh +@@ -11,6 +11,8 @@ err_count=8 + + . $(dirname $0)/common + ++check_prereq "jq" ++ + trap 'err $LINENO' ERR + + # sample json: +diff --git a/test/inject-smart.sh b/test/inject-smart.sh +index 94705df..4ca83b8 100755 +--- a/test/inject-smart.sh ++++ b/test/inject-smart.sh +@@ -166,6 +166,7 @@ do_tests() + } + + check_min_kver "4.19" || do_skip "kernel $KVER may not support smart (un)injection" ++check_prereq "jq" + modprobe nfit_test + rc=1 + +diff --git a/test/label-compat.sh b/test/label-compat.sh +index 340b93d..8ab2858 100755 +--- a/test/label-compat.sh ++++ b/test/label-compat.sh +@@ -10,6 +10,7 @@ BASE=$(dirname $0) + . $BASE/common + + check_min_kver "4.11" || do_skip "may not provide reliable isetcookie values" ++check_prereq "jq" + + trap 'err $LINENO' ERR + +diff --git a/test/libcxl-expect.h b/test/libcxl-expect.h +new file mode 100644 +index 0000000..acb8db9 +--- /dev/null ++++ b/test/libcxl-expect.h +@@ -0,0 +1,13 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++/* Copyright (C) 2021 Intel Corporation. All rights reserved. */ ++#ifndef __LIBCXL_EXPECT_H__ ++#define __LIBCXL_EXPECT_H__ ++#include ++ ++#define EXPECT_FW_VER "BWFW VERSION 00" ++ ++/* Identify command fields */ ++#define EXPECT_CMD_IDENTIFY_PARTITION_ALIGN 0ULL ++#define EXPECT_CMD_IDENTIFY_LSA_SIZE 1024U ++ ++#endif /* __LIBCXL_EXPECT_H__ */ +diff --git a/test/libcxl.c b/test/libcxl.c +new file mode 100644 +index 0000000..e3da19c +--- /dev/null ++++ b/test/libcxl.c +@@ -0,0 +1,553 @@ ++// SPDX-License-Identifier: LGPL-2.1 ++/* Copyright (C) 2021, Intel Corporation. All rights reserved. */ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include "libcxl-expect.h" ++ ++#define TEST_SKIP 77 ++ ++const char *mod_list[] = { ++ "cxl_pci", ++ "cxl_acpi", ++ "cxl_core", ++}; ++ ++static int test_cxl_presence(struct cxl_ctx *ctx) ++{ ++ struct cxl_memdev *memdev; ++ int count = 0; ++ ++ cxl_memdev_foreach(ctx, memdev) ++ count++; ++ ++ if (count == 0) { ++ fprintf(stderr, "%s: no cxl memdevs found\n", __func__); ++ return TEST_SKIP; ++ } ++ ++ return 0; ++} ++ ++/* ++ * Only continue with tests if all CXL devices in the system are qemu-emulated ++ * 'fake' devices. For now, use the firmware_version to check for this. Later, ++ * this might need to be changed to a vendor specific command. ++ * ++ * TODO: Change this to produce a list of devices that are safe to run tests ++ * against, and only run subsequent tests on this list. That will allow devices ++ * from other, non-emulated sources to be present in the system, and still run ++ * these unit tests safely. ++ */ ++static int test_cxl_emulation_env(struct cxl_ctx *ctx) ++{ ++ struct cxl_memdev *memdev; ++ ++ cxl_memdev_foreach(ctx, memdev) { ++ const char *fw; ++ ++ fw = cxl_memdev_get_firmware_verison(memdev); ++ if (!fw) ++ return -ENXIO; ++ if (strcmp(fw, EXPECT_FW_VER) != 0) { ++ fprintf(stderr, ++ "%s: found non-emulation device, aborting\n", ++ __func__); ++ return TEST_SKIP; ++ } ++ } ++ return 0; ++} ++ ++static int test_cxl_modules(struct cxl_ctx *ctx) ++{ ++ int rc; ++ unsigned int i; ++ const char *name; ++ struct kmod_module *mod; ++ struct kmod_ctx *kmod_ctx; ++ ++ kmod_ctx = kmod_new(NULL, NULL); ++ if (!kmod_ctx) ++ return -ENXIO; ++ kmod_set_log_priority(kmod_ctx, LOG_DEBUG); ++ ++ /* test module removal */ ++ for (i = 0; i < ARRAY_SIZE(mod_list); i++) { ++ int state; ++ ++ name = mod_list[i]; ++ ++ rc = kmod_module_new_from_name(kmod_ctx, name, &mod); ++ if (rc) { ++ fprintf(stderr, "%s: %s.ko: missing\n", __func__, name); ++ break; ++ } ++ ++ state = kmod_module_get_initstate(mod); ++ if (state == KMOD_MODULE_LIVE) { ++ rc = kmod_module_remove_module(mod, 0); ++ if (rc) { ++ fprintf(stderr, ++ "%s: %s.ko: failed to remove: %d\n", ++ __func__, name, rc); ++ break; ++ } ++ } else if (state == KMOD_MODULE_BUILTIN) { ++ fprintf(stderr, ++ "%s: %s is builtin, skipping module removal test\n", ++ __func__, name); ++ } else { ++ fprintf(stderr, ++ "%s: warning: %s.ko: unexpected state (%d), trying to continue\n", ++ __func__, name, state); ++ } ++ } ++ ++ if (rc) ++ goto out; ++ ++ /* test module insertion */ ++ for (i = 0; i < ARRAY_SIZE(mod_list); i++) { ++ name = mod_list[i]; ++ rc = kmod_module_new_from_name(kmod_ctx, name, &mod); ++ if (rc) { ++ fprintf(stderr, "%s: %s.ko: missing\n", __func__, name); ++ break; ++ } ++ ++ rc = kmod_module_probe_insert_module(mod, ++ KMOD_PROBE_APPLY_BLACKLIST, ++ NULL, NULL, NULL, NULL); ++ } ++ ++out: ++ kmod_unref(kmod_ctx); ++ return rc; ++} ++ ++#define expect(c, name, field, expect) \ ++do { \ ++ if (cxl_cmd_##name##_get_##field(c) != expect) { \ ++ fprintf(stderr, \ ++ "%s: %s: " #field " mismatch\n", \ ++ __func__, cxl_cmd_get_devname(c)); \ ++ cxl_cmd_unref(cmd); \ ++ return -ENXIO; \ ++ } \ ++} while (0) ++ ++static int test_cxl_cmd_identify(struct cxl_ctx *ctx) ++{ ++ struct cxl_memdev *memdev; ++ struct cxl_cmd *cmd; ++ int rc; ++ ++ cxl_memdev_foreach(ctx, memdev) { ++ char fw_rev[0x10]; ++ ++ cmd = cxl_cmd_new_identify(memdev); ++ if (!cmd) ++ return -ENOMEM; ++ rc = cxl_cmd_submit(cmd); ++ if (rc < 0) { ++ fprintf(stderr, "%s: %s: cmd submission failed: %s\n", ++ __func__, cxl_memdev_get_devname(memdev), ++ strerror(-rc)); ++ goto out_fail; ++ } ++ rc = cxl_cmd_get_mbox_status(cmd); ++ if (rc) { ++ fprintf(stderr, ++ "%s: %s: cmd failed with firmware status: %d\n", ++ __func__, cxl_memdev_get_devname(memdev), rc); ++ rc = -ENXIO; ++ goto out_fail; ++ } ++ ++ rc = cxl_cmd_identify_get_fw_rev(cmd, fw_rev, 0x10); ++ if (rc) ++ goto out_fail; ++ if (strncmp(fw_rev, EXPECT_FW_VER, 0x10) != 0) { ++ fprintf(stderr, ++ "%s: fw_rev mismatch. Expected %s, got %s\n", ++ __func__, EXPECT_FW_VER, fw_rev); ++ rc = -ENXIO; ++ goto out_fail; ++ } ++ ++ expect(cmd, identify, lsa_size, EXPECT_CMD_IDENTIFY_LSA_SIZE); ++ expect(cmd, identify, partition_align, ++ EXPECT_CMD_IDENTIFY_PARTITION_ALIGN); ++ cxl_cmd_unref(cmd); ++ } ++ return 0; ++ ++out_fail: ++ cxl_cmd_unref(cmd); ++ return rc; ++} ++ ++struct cmd_fuzzer { ++ struct cxl_cmd *(*new_fn)(struct cxl_memdev *memdev); ++ int in; /* in size to set in cmd (INT_MAX = don't change) */ ++ int out; /* out size to set in cmd (INT_MAX = don't change) */ ++ int e_out; /* expected out size returned (INT_MAX = don't check) */ ++ int e_rc; /* expected ioctl return (INT_MAX = don't check) */ ++ int e_hwrc; /* expected 'mbox_status' (INT_MAX = don't check) */ ++} fuzz_set[] = { ++ { cxl_cmd_new_identify, INT_MAX, INT_MAX, 67, 0, 0 }, ++ { cxl_cmd_new_identify, 64, INT_MAX, INT_MAX, -ENOMEM, INT_MAX }, ++ { cxl_cmd_new_identify, INT_MAX, 1024, 67, 0, INT_MAX }, ++ { cxl_cmd_new_identify, INT_MAX, 16, INT_MAX, -ENOMEM, INT_MAX }, ++}; ++ ++static int do_one_cmd_size_test(struct cxl_memdev *memdev, ++ struct cmd_fuzzer *test) ++{ ++ const char *devname = cxl_memdev_get_devname(memdev); ++ struct cxl_cmd *cmd; ++ int rc; ++ ++ cmd = test->new_fn(memdev); ++ if (!cmd) ++ return -ENOMEM; ++ ++ if (test->in != INT_MAX) { ++ rc = cxl_cmd_set_input_payload(cmd, NULL, test->in); ++ if (rc) { ++ fprintf(stderr, ++ "%s: %s: failed to set in.size (%d): %s\n", ++ __func__, devname, test->in, strerror(-rc)); ++ goto out_fail; ++ } ++ } ++ if (test->out != INT_MAX) { ++ rc = cxl_cmd_set_output_payload(cmd, NULL, test->out); ++ if (rc) { ++ fprintf(stderr, ++ "%s: %s: failed to set out.size (%d): %s\n", ++ __func__, devname, test->out, strerror(-rc)); ++ goto out_fail; ++ } ++ } ++ ++ rc = cxl_cmd_submit(cmd); ++ if (test->e_rc != INT_MAX && rc != test->e_rc) { ++ fprintf(stderr, "%s: %s: expected cmd rc %d, got %d\n", ++ __func__, devname, test->e_rc, rc); ++ rc = -ENXIO; ++ goto out_fail; ++ } ++ ++ rc = cxl_cmd_get_out_size(cmd); ++ if (test->e_out != INT_MAX && rc != test->e_out) { ++ fprintf(stderr, "%s: %s: expected response out.size %d, got %d\n", ++ __func__, devname, test->e_out, rc); ++ rc = -ENXIO; ++ goto out_fail; ++ } ++ ++ rc = cxl_cmd_get_mbox_status(cmd); ++ if (test->e_hwrc != INT_MAX && rc != test->e_hwrc) { ++ fprintf(stderr, "%s: %s: expected firmware status %d, got %d\n", ++ __func__, devname, test->e_hwrc, rc); ++ rc = -ENXIO; ++ goto out_fail; ++ } ++ return 0; ++ ++out_fail: ++ cxl_cmd_unref(cmd); ++ return rc; ++ ++} ++ ++static void print_fuzz_test_status(struct cmd_fuzzer *t, const char *devname, ++ unsigned long idx, const char *msg) ++{ ++ fprintf(stderr, ++ "%s: fuzz_set[%lu]: in: %d, out %d, e_out: %d, e_rc: %d, e_hwrc: %d, result: %s\n", ++ devname, idx, ++ (t->in == INT_MAX) ? -1 : t->in, ++ (t->out == INT_MAX) ? -1 : t->out, ++ (t->e_out == INT_MAX) ? -1 : t->e_out, ++ (t->e_rc == INT_MAX) ? -1 : t->e_rc, ++ (t->e_hwrc == INT_MAX) ? -1 : t->e_hwrc, ++ msg); ++} ++ ++static int test_cxl_cmd_fuzz_sizes(struct cxl_ctx *ctx) ++{ ++ struct cxl_memdev *memdev; ++ unsigned long i; ++ int rc; ++ ++ cxl_memdev_foreach(ctx, memdev) { ++ const char *devname = cxl_memdev_get_devname(memdev); ++ ++ for (i = 0; i < ARRAY_SIZE(fuzz_set); i++) { ++ rc = do_one_cmd_size_test(memdev, &fuzz_set[i]); ++ if (rc) { ++ print_fuzz_test_status(&fuzz_set[i], devname, ++ i, "FAIL"); ++ return rc; ++ } ++ print_fuzz_test_status(&fuzz_set[i], devname, i, "OK"); ++ } ++ } ++ return 0; ++} ++ ++static int debugfs_write_raw_flag(char *str) ++{ ++ char *path = "/sys/kernel/debug/cxl/mbox/raw_allow_all"; ++ int fd = open(path, O_WRONLY|O_CLOEXEC); ++ int n, len = strlen(str) + 1, rc; ++ ++ if (fd < 0) ++ return -errno; ++ ++ n = write(fd, str, len); ++ rc = -errno; ++ close(fd); ++ if (n < len) { ++ fprintf(stderr, "failed to write %s to %s: %s\n", str, path, ++ strerror(errno)); ++ return rc; ++ } ++ return 0; ++} ++ ++static char *test_lsa_data = "LIBCXL_TEST LSA DATA 01"; ++static int lsa_size = EXPECT_CMD_IDENTIFY_LSA_SIZE; ++ ++static int test_set_lsa(struct cxl_memdev *memdev) ++{ ++ int data_size = strlen(test_lsa_data) + 1; ++ struct cxl_cmd *cmd; ++ struct { ++ le32 offset; ++ le32 rsvd; ++ unsigned char data[lsa_size]; ++ } __attribute__((packed)) set_lsa; ++ int rc; ++ ++ set_lsa.offset = cpu_to_le32(0); ++ set_lsa.rsvd = cpu_to_le32(0); ++ memcpy(set_lsa.data, test_lsa_data, data_size); ++ ++ cmd = cxl_cmd_new_raw(memdev, 0x4103); ++ if (!cmd) ++ return -ENOMEM; ++ ++ rc = cxl_cmd_set_input_payload(cmd, &set_lsa, sizeof(set_lsa)); ++ if (rc) { ++ fprintf(stderr, "%s: %s: cmd setup failed: %s\n", ++ __func__, cxl_memdev_get_devname(memdev), ++ strerror(-rc)); ++ goto out_fail; ++ } ++ ++ rc = debugfs_write_raw_flag("Y"); ++ if (rc < 0) ++ return rc; ++ ++ rc = cxl_cmd_submit(cmd); ++ if (rc < 0) ++ fprintf(stderr, "%s: %s: cmd submission failed: %s\n", ++ __func__, cxl_memdev_get_devname(memdev), ++ strerror(-rc)); ++ ++ rc = cxl_cmd_get_mbox_status(cmd); ++ if (rc != 0) { ++ fprintf(stderr, "%s: %s: firmware status: %d\n", ++ __func__, cxl_memdev_get_devname(memdev), rc); ++ return -ENXIO; ++ } ++ ++ if(debugfs_write_raw_flag("N") < 0) ++ fprintf(stderr, "%s: %s: failed to restore raw flag\n", ++ __func__, cxl_memdev_get_devname(memdev)); ++ ++out_fail: ++ cxl_cmd_unref(cmd); ++ return rc; ++} ++ ++static int test_cxl_cmd_lsa(struct cxl_ctx *ctx) ++{ ++ int data_size = strlen(test_lsa_data) + 1; ++ struct cxl_memdev *memdev; ++ struct cxl_cmd *cmd; ++ unsigned char *buf; ++ int rc; ++ ++ cxl_memdev_foreach(ctx, memdev) { ++ rc = test_set_lsa(memdev); ++ if (rc) ++ return rc; ++ ++ cmd = cxl_cmd_new_get_lsa(memdev, 0, lsa_size); ++ if (!cmd) ++ return -ENOMEM; ++ rc = cxl_cmd_set_output_payload(cmd, NULL, lsa_size); ++ if (rc) { ++ fprintf(stderr, "%s: output buffer allocation: %s\n", ++ __func__, strerror(-rc)); ++ return rc; ++ } ++ rc = cxl_cmd_submit(cmd); ++ if (rc < 0) { ++ fprintf(stderr, "%s: %s: cmd submission failed: %s\n", ++ __func__, cxl_memdev_get_devname(memdev), ++ strerror(-rc)); ++ goto out_fail; ++ } ++ ++ rc = cxl_cmd_get_mbox_status(cmd); ++ if (rc != 0) { ++ fprintf(stderr, "%s: %s: firmware status: %d\n", ++ __func__, cxl_memdev_get_devname(memdev), rc); ++ return -ENXIO; ++ } ++ ++ buf = cxl_cmd_get_lsa_get_payload(cmd); ++ if (rc < 0) ++ goto out_fail; ++ ++ if (memcmp(buf, test_lsa_data, data_size) != 0) { ++ fprintf(stderr, "%s: LSA data mismatch.\n", __func__); ++ hex_dump_buf(buf, data_size); ++ rc = -EIO; ++ goto out_fail; ++ } ++ cxl_cmd_unref(cmd); ++ } ++ return 0; ++ ++out_fail: ++ cxl_cmd_unref(cmd); ++ return rc; ++} ++ ++static char *test_lsa_api_data = "LIBCXL_TEST READ/WRITE LSA DATA 2"; ++static int test_cxl_read_write_lsa(struct cxl_ctx *ctx) ++{ ++ int data_size = strlen(test_lsa_api_data) + 1; ++ struct cxl_memdev *memdev; ++ unsigned char *buf; ++ int rc = 0; ++ ++ buf = calloc(1, data_size); ++ if (!buf) ++ return -ENOMEM; ++ ++ cxl_memdev_foreach(ctx, memdev) { ++ rc = cxl_memdev_set_lsa(memdev, test_lsa_api_data, data_size, 0); ++ if (rc) ++ goto out_fail; ++ ++ rc = cxl_memdev_get_lsa(memdev, buf, data_size, 0); ++ if (rc < 0) ++ goto out_fail; ++ ++ if (memcmp(buf, test_lsa_api_data, data_size) != 0) { ++ fprintf(stderr, "%s: LSA data mismatch.\n", __func__); ++ fprintf(stderr, "%s: Get LSA returned:\n", __func__); ++ hex_dump_buf(buf, data_size); ++ fprintf(stderr, "%s: Set LSA had set:\n", __func__); ++ hex_dump_buf((unsigned char *)test_lsa_api_data, data_size); ++ rc = -EIO; ++ goto out_fail; ++ } ++ } ++ ++out_fail: ++ free(buf); ++ return rc; ++} ++ ++typedef int (*do_test_fn)(struct cxl_ctx *ctx); ++ ++static do_test_fn do_test[] = { ++ test_cxl_modules, ++ test_cxl_presence, ++ test_cxl_emulation_env, ++ test_cxl_cmd_identify, ++ test_cxl_cmd_lsa, ++ test_cxl_cmd_fuzz_sizes, ++ test_cxl_read_write_lsa, ++}; ++ ++static int test_libcxl(int loglevel, struct test_ctx *test, struct cxl_ctx *ctx) ++{ ++ unsigned int i; ++ int err, result = EXIT_FAILURE; ++ ++ if (!test_attempt(test, KERNEL_VERSION(5, 12, 0))) ++ return 77; ++ ++ cxl_set_log_priority(ctx, loglevel); ++ cxl_set_private_data(ctx, test); ++ ++ for (i = 0; i < ARRAY_SIZE(do_test); i++) { ++ err = do_test[i](ctx); ++ if (err < 0) { ++ fprintf(stderr, "test[%d] failed: %d\n", i, err); ++ break; ++ } else if (err == TEST_SKIP) { ++ fprintf(stderr, "test[%d]: SKIP\n", i); ++ test_skip(test); ++ result = TEST_SKIP; ++ break; ++ } ++ fprintf(stderr, "test[%d]: PASS\n", i); ++ } ++ ++ if (i >= ARRAY_SIZE(do_test)) ++ result = EXIT_SUCCESS; ++ return result; ++} ++ ++int __attribute__((weak)) main(int argc, char *argv[]) ++{ ++ struct test_ctx *test = test_new(0); ++ struct cxl_ctx *ctx; ++ int rc; ++ ++ if (!test) { ++ fprintf(stderr, "failed to initialize test\n"); ++ return EXIT_FAILURE; ++ } ++ ++ rc = cxl_new(&ctx); ++ if (rc) ++ return test_result(test, rc); ++ rc = test_libcxl(LOG_DEBUG, test, ctx); ++ cxl_unref(ctx); ++ return test_result(test, rc); ++} +diff --git a/test/libndctl.c b/test/libndctl.c +index 24d72b3..8aeaded 100644 +--- a/test/libndctl.c ++++ b/test/libndctl.c +@@ -613,7 +613,7 @@ static int validate_dax(struct ndctl_dax *dax) + const char *devname = ndctl_namespace_get_devname(ndns); + struct ndctl_region *region = ndctl_dax_get_region(dax); + struct ndctl_ctx *ctx = ndctl_dax_get_ctx(dax); +- struct ndctl_test *test = ndctl_get_private_data(ctx); ++ struct test_ctx *test = ndctl_get_private_data(ctx); + struct daxctl_region *dax_region = NULL, *found; + int rc = -ENXIO, fd, count, dax_expect; + struct daxctl_dev *dax_dev, *seed; +@@ -639,7 +639,7 @@ static int validate_dax(struct ndctl_dax *dax) + return -ENXIO; + } + +- if (ndctl_test_attempt(test, KERNEL_VERSION(4, 10, 0))) { ++ if (test_attempt(test, KERNEL_VERSION(4, 10, 0))) { + if (daxctl_region_get_size(dax_region) + != ndctl_dax_get_size(dax)) { + fprintf(stderr, "%s: expect size: %llu != %llu\n", +@@ -725,7 +725,7 @@ static int __check_dax_create(struct ndctl_region *region, + { + struct ndctl_dax *dax_seed = ndctl_region_get_dax_seed(region); + struct ndctl_ctx *ctx = ndctl_region_get_ctx(region); +- struct ndctl_test *test = ndctl_get_private_data(ctx); ++ struct test_ctx *test = ndctl_get_private_data(ctx); + enum ndctl_namespace_mode mode; + struct ndctl_dax *dax; + const char *devname; +@@ -745,7 +745,7 @@ static int __check_dax_create(struct ndctl_region *region, + ndctl_dax_set_align(dax, SZ_4K); + + rc = ndctl_namespace_set_enforce_mode(ndns, NDCTL_NS_MODE_DAX); +- if (ndctl_test_attempt(test, KERNEL_VERSION(4, 13, 0)) && rc < 0) { ++ if (test_attempt(test, KERNEL_VERSION(4, 13, 0)) && rc < 0) { + fprintf(stderr, "%s: failed to enforce dax mode\n", devname); + return rc; + } +@@ -835,7 +835,7 @@ static int __check_pfn_create(struct ndctl_region *region, + { + struct ndctl_pfn *pfn_seed = ndctl_region_get_pfn_seed(region); + struct ndctl_ctx *ctx = ndctl_region_get_ctx(region); +- struct ndctl_test *test = ndctl_get_private_data(ctx); ++ struct test_ctx *test = ndctl_get_private_data(ctx); + enum ndctl_namespace_mode mode; + struct ndctl_pfn *pfn; + const char *devname; +@@ -856,7 +856,7 @@ static int __check_pfn_create(struct ndctl_region *region, + */ + ndctl_pfn_set_align(pfn, SZ_4K); + rc = ndctl_namespace_set_enforce_mode(ndns, NDCTL_NS_MODE_MEMORY); +- if (ndctl_test_attempt(test, KERNEL_VERSION(4, 13, 0)) && rc < 0) { ++ if (test_attempt(test, KERNEL_VERSION(4, 13, 0)) && rc < 0) { + fprintf(stderr, "%s: failed to enforce pfn mode\n", devname); + return rc; + } +@@ -978,14 +978,8 @@ static int check_btt_size(struct ndctl_btt *btt) + unsigned long long actual, expect; + int size_select, sect_select; + struct ndctl_ctx *ctx = ndctl_btt_get_ctx(btt); +- struct ndctl_test *test = ndctl_get_private_data(ctx); ++ struct test_ctx *test = ndctl_get_private_data(ctx); + struct ndctl_namespace *ndns = ndctl_btt_get_namespace(btt); +- +- if (!ndns) +- return -ENXIO; +- +- ns_size = ndctl_namespace_get_size(ndns); +- sect_size = ndctl_btt_get_sector_size(btt); + unsigned long long expect_table[][2] = { + [0] = { + [0] = 0x11b5400, +@@ -1001,6 +995,12 @@ static int check_btt_size(struct ndctl_btt *btt) + }, + }; + ++ if (!ndns) ++ return -ENXIO; ++ ++ ns_size = ndctl_namespace_get_size(ndns); ++ sect_size = ndctl_btt_get_sector_size(btt); ++ + if (sect_size >= SZ_4K) + sect_select = 1; + else if (sect_size >= 512) +@@ -1030,7 +1030,7 @@ static int check_btt_size(struct ndctl_btt *btt) + } + + /* prior to 4.8 btt devices did not have a size attribute */ +- if (!ndctl_test_attempt(test, KERNEL_VERSION(4, 8, 0))) ++ if (!test_attempt(test, KERNEL_VERSION(4, 8, 0))) + return 0; + + expect = expect_table[size_select][sect_select]; +@@ -1049,7 +1049,7 @@ static int check_btt_create(struct ndctl_region *region, struct ndctl_namespace + struct namespace *namespace) + { + struct ndctl_ctx *ctx = ndctl_region_get_ctx(region); +- struct ndctl_test *test = ndctl_get_private_data(ctx); ++ struct test_ctx *test = ndctl_get_private_data(ctx); + struct btt *btt_s = namespace->btt_settings; + int i, fd, retry = 10; + struct ndctl_btt *btt; +@@ -1077,7 +1077,7 @@ static int check_btt_create(struct ndctl_region *region, struct ndctl_namespace + ndctl_btt_set_uuid(btt, btt_s->uuid); + ndctl_btt_set_sector_size(btt, btt_s->sector_sizes[i]); + rc = ndctl_namespace_set_enforce_mode(ndns, NDCTL_NS_MODE_SECTOR); +- if (ndctl_test_attempt(test, KERNEL_VERSION(4, 13, 0)) && rc < 0) { ++ if (test_attempt(test, KERNEL_VERSION(4, 13, 0)) && rc < 0) { + fprintf(stderr, "%s: failed to enforce btt mode\n", devname); + goto err; + } +@@ -1094,7 +1094,7 @@ static int check_btt_create(struct ndctl_region *region, struct ndctl_namespace + } + + /* prior to v4.5 the mode attribute did not exist */ +- if (ndctl_test_attempt(test, KERNEL_VERSION(4, 5, 0))) { ++ if (test_attempt(test, KERNEL_VERSION(4, 5, 0))) { + mode = ndctl_namespace_get_mode(ndns); + if (mode >= 0 && mode != NDCTL_NS_MODE_SECTOR) + fprintf(stderr, "%s: expected safe mode got: %d\n", +@@ -1102,7 +1102,7 @@ static int check_btt_create(struct ndctl_region *region, struct ndctl_namespace + } + + /* prior to v4.13 the expected sizes were different due to BTT1.1 */ +- if (ndctl_test_attempt(test, KERNEL_VERSION(4, 13, 0))) { ++ if (test_attempt(test, KERNEL_VERSION(4, 13, 0))) { + rc = check_btt_size(btt); + if (rc) + goto err; +@@ -1257,7 +1257,7 @@ static int check_pfn_autodetect(struct ndctl_bus *bus, + struct ndctl_region *region = ndctl_namespace_get_region(ndns); + struct ndctl_ctx *ctx = ndctl_region_get_ctx(region); + const char *devname = ndctl_namespace_get_devname(ndns); +- struct ndctl_test *test = ndctl_get_private_data(ctx); ++ struct test_ctx *test = ndctl_get_private_data(ctx); + struct pfn *auto_pfn = namespace->pfn_settings; + struct ndctl_pfn *pfn, *found = NULL; + enum ndctl_namespace_mode mode; +@@ -1287,7 +1287,7 @@ static int check_pfn_autodetect(struct ndctl_bus *bus, + return -ENXIO; + + mode = ndctl_namespace_get_enforce_mode(ndns); +- if (ndctl_test_attempt(test, KERNEL_VERSION(4, 13, 0)) ++ if (test_attempt(test, KERNEL_VERSION(4, 13, 0)) + && mode != NDCTL_NS_MODE_MEMORY) { + fprintf(stderr, "%s expected enforce_mode pfn\n", devname); + return -ENXIO; +@@ -1354,7 +1354,7 @@ static int check_dax_autodetect(struct ndctl_bus *bus, + struct ndctl_region *region = ndctl_namespace_get_region(ndns); + struct ndctl_ctx *ctx = ndctl_region_get_ctx(region); + const char *devname = ndctl_namespace_get_devname(ndns); +- struct ndctl_test *test = ndctl_get_private_data(ctx); ++ struct test_ctx *test = ndctl_get_private_data(ctx); + struct dax *auto_dax = namespace->dax_settings; + struct ndctl_dax *dax, *found = NULL; + enum ndctl_namespace_mode mode; +@@ -1384,7 +1384,7 @@ static int check_dax_autodetect(struct ndctl_bus *bus, + return -ENXIO; + + mode = ndctl_namespace_get_enforce_mode(ndns); +- if (ndctl_test_attempt(test, KERNEL_VERSION(4, 13, 0)) ++ if (test_attempt(test, KERNEL_VERSION(4, 13, 0)) + && mode != NDCTL_NS_MODE_DAX) { + fprintf(stderr, "%s expected enforce_mode dax\n", devname); + return -ENXIO; +@@ -1439,7 +1439,7 @@ static int check_btt_autodetect(struct ndctl_bus *bus, + struct ndctl_region *region = ndctl_namespace_get_region(ndns); + struct ndctl_ctx *ctx = ndctl_region_get_ctx(region); + const char *devname = ndctl_namespace_get_devname(ndns); +- struct ndctl_test *test = ndctl_get_private_data(ctx); ++ struct test_ctx *test = ndctl_get_private_data(ctx); + struct btt *auto_btt = namespace->btt_settings; + struct ndctl_btt *btt, *found = NULL; + enum ndctl_namespace_mode mode; +@@ -1469,7 +1469,7 @@ static int check_btt_autodetect(struct ndctl_bus *bus, + return -ENXIO; + + mode = ndctl_namespace_get_enforce_mode(ndns); +- if (ndctl_test_attempt(test, KERNEL_VERSION(4, 13, 0)) ++ if (test_attempt(test, KERNEL_VERSION(4, 13, 0)) + && mode != NDCTL_NS_MODE_SECTOR) { + fprintf(stderr, "%s expected enforce_mode btt\n", devname); + return -ENXIO; +@@ -1541,6 +1541,7 @@ static int validate_bdev(const char *devname, struct ndctl_btt *btt, + struct ndctl_pfn *pfn, struct ndctl_namespace *ndns, + struct namespace *namespace, void *buf) + { ++ struct ndctl_region *region = ndctl_namespace_get_region(ndns); + char bdevpath[50]; + int fd, rc, ro; + +@@ -1578,6 +1579,13 @@ static int validate_bdev(const char *devname, struct ndctl_btt *btt, + } + + ro = 0; ++ rc = ndctl_region_set_ro(region, ro); ++ if (rc < 0) { ++ fprintf(stderr, "%s: ndctl_region_set_ro failed\n", devname); ++ rc = -errno; ++ goto out; ++ } ++ + rc = ioctl(fd, BLKROSET, &ro); + if (rc < 0) { + fprintf(stderr, "%s: BLKROSET failed\n", +@@ -1605,8 +1613,16 @@ static int validate_bdev(const char *devname, struct ndctl_btt *btt, + rc = -ENXIO; + goto out; + } ++ ++ rc = ndctl_region_set_ro(region, namespace->ro); ++ if (rc < 0) { ++ fprintf(stderr, "%s: ndctl_region_set_ro reset failed\n", devname); ++ rc = -errno; ++ goto out; ++ } ++ + rc = 0; +- out: ++out: + close(fd); + return rc; + } +@@ -1665,7 +1681,7 @@ static int check_namespaces(struct ndctl_region *region, + struct namespace **namespaces, enum ns_mode mode) + { + struct ndctl_ctx *ctx = ndctl_region_get_ctx(region); +- struct ndctl_test *test = ndctl_get_private_data(ctx); ++ struct test_ctx *test = ndctl_get_private_data(ctx); + struct ndctl_bus *bus = ndctl_region_get_bus(region); + struct ndctl_namespace **ndns_save; + struct namespace *namespace; +@@ -1698,7 +1714,7 @@ static int check_namespaces(struct ndctl_region *region, + } + + if (ndctl_region_get_type(region) == ND_DEVICE_REGION_PMEM +- && !ndctl_test_attempt(test, KERNEL_VERSION(4, 13, 0))) ++ && !test_attempt(test, KERNEL_VERSION(4, 13, 0))) + /* pass, no sector_size support for pmem prior to 4.13 */; + else { + num_sector_sizes = namespace->num_sector_sizes; +@@ -2028,7 +2044,7 @@ static int check_btts(struct ndctl_region *region, struct btt **btts) + struct check_cmd { + int (*check_fn)(struct ndctl_bus *bus, struct ndctl_dimm *dimm, struct check_cmd *check); + struct ndctl_cmd *cmd; +- struct ndctl_test *test; ++ struct test_ctx *test; + }; + + static struct check_cmd *check_cmds; +@@ -2305,7 +2321,7 @@ static int check_smart_threshold(struct ndctl_bus *bus, struct ndctl_dimm *dimm, + * Starting with v4.9 smart threshold requests trigger the file + * descriptor returned by ndctl_dimm_get_health_eventfd(). + */ +- if (ndctl_test_attempt(check->test, KERNEL_VERSION(4, 9, 0))) { ++ if (test_attempt(check->test, KERNEL_VERSION(4, 9, 0))) { + int pid = fork(); + + if (pid == 0) { +@@ -2380,7 +2396,7 @@ static int check_smart_threshold(struct ndctl_bus *bus, struct ndctl_dimm *dimm, + ndctl_cmd_unref(cmd_set); + } + +- if (ndctl_test_attempt(check->test, KERNEL_VERSION(4, 9, 0))) { ++ if (test_attempt(check->test, KERNEL_VERSION(4, 9, 0))) { + wait(&rc); + if (WEXITSTATUS(rc) == EXIT_FAILURE) { + fprintf(stderr, "%s: expect health event trigger\n", +@@ -2396,7 +2412,7 @@ static int check_smart_threshold(struct ndctl_bus *bus, struct ndctl_dimm *dimm, + #define BITS_PER_LONG 32 + static int check_commands(struct ndctl_bus *bus, struct ndctl_dimm *dimm, + unsigned long bus_commands, unsigned long dimm_commands, +- struct ndctl_test *test) ++ struct test_ctx *test) + { + /* + * For now, by coincidence, these are indexed in test execution +@@ -2423,7 +2439,7 @@ static int check_commands(struct ndctl_bus *bus, struct ndctl_dimm *dimm, + * The kernel did not start emulating v1.2 namespace spec smart data + * until 4.9. + */ +- if (!ndctl_test_attempt(test, KERNEL_VERSION(4, 9, 0))) ++ if (!test_attempt(test, KERNEL_VERSION(4, 9, 0))) + dimm_commands &= ~((1 << ND_CMD_SMART) + | (1 << ND_CMD_SMART_THRESHOLD)); + +@@ -2458,7 +2474,7 @@ static int check_commands(struct ndctl_bus *bus, struct ndctl_dimm *dimm, + if (rc) + goto out; + +- if (!ndctl_test_attempt(test, KERNEL_VERSION(4, 6, 0))) ++ if (!test_attempt(test, KERNEL_VERSION(4, 6, 0))) + goto out; + + out: +@@ -2467,7 +2483,7 @@ static int check_commands(struct ndctl_bus *bus, struct ndctl_dimm *dimm, + + static int check_dimms(struct ndctl_bus *bus, struct dimm *dimms, int n, + unsigned long bus_commands, unsigned long dimm_commands, +- struct ndctl_test *test) ++ struct test_ctx *test) + { + long long dsc; + int i, j, rc; +@@ -2514,7 +2530,7 @@ static int check_dimms(struct ndctl_bus *bus, struct dimm *dimms, int n, + return -ENXIO; + } + +- if (ndctl_test_attempt(test, KERNEL_VERSION(4, 7, 0))) { ++ if (test_attempt(test, KERNEL_VERSION(4, 7, 0))) { + if (ndctl_dimm_get_formats(dimm) != dimms[i].formats) { + fprintf(stderr, "dimm%d expected formats: %d got: %d\n", + i, dimms[i].formats, +@@ -2532,7 +2548,7 @@ static int check_dimms(struct ndctl_bus *bus, struct dimm *dimms, int n, + } + } + +- if (ndctl_test_attempt(test, KERNEL_VERSION(4, 7, 0))) { ++ if (test_attempt(test, KERNEL_VERSION(4, 7, 0))) { + if (ndctl_dimm_get_subsystem_vendor(dimm) + != dimms[i].subsystem_vendor) { + fprintf(stderr, +@@ -2543,7 +2559,7 @@ static int check_dimms(struct ndctl_bus *bus, struct dimm *dimms, int n, + } + } + +- if (ndctl_test_attempt(test, KERNEL_VERSION(4, 8, 0))) { ++ if (test_attempt(test, KERNEL_VERSION(4, 8, 0))) { + if (ndctl_dimm_get_manufacturing_date(dimm) + != dimms[i].manufacturing_date) { + fprintf(stderr, +@@ -2588,7 +2604,7 @@ static void reset_bus(struct ndctl_bus *bus) + ndctl_region_enable(region); + } + +-static int do_test0(struct ndctl_ctx *ctx, struct ndctl_test *test) ++static int do_test0(struct ndctl_ctx *ctx, struct test_ctx *test) + { + struct ndctl_bus *bus = ndctl_bus_get_by_provider(ctx, NFIT_PROVIDER0); + struct ndctl_region *region; +@@ -2629,14 +2645,14 @@ static int do_test0(struct ndctl_ctx *ctx, struct ndctl_test *test) + } + + /* pfn and dax tests require vmalloc-enabled nfit_test */ +- if (ndctl_test_attempt(test, KERNEL_VERSION(4, 8, 0))) { ++ if (test_attempt(test, KERNEL_VERSION(4, 8, 0))) { + rc = check_regions(bus, regions0, ARRAY_SIZE(regions0), DAX); + if (rc) + return rc; + reset_bus(bus); + } + +- if (ndctl_test_attempt(test, KERNEL_VERSION(4, 8, 0))) { ++ if (test_attempt(test, KERNEL_VERSION(4, 8, 0))) { + rc = check_regions(bus, regions0, ARRAY_SIZE(regions0), PFN); + if (rc) + return rc; +@@ -2646,7 +2662,7 @@ static int do_test0(struct ndctl_ctx *ctx, struct ndctl_test *test) + return check_regions(bus, regions0, ARRAY_SIZE(regions0), BTT); + } + +-static int do_test1(struct ndctl_ctx *ctx, struct ndctl_test *test) ++static int do_test1(struct ndctl_ctx *ctx, struct test_ctx *test) + { + struct ndctl_bus *bus = ndctl_bus_get_by_provider(ctx, NFIT_PROVIDER1); + int rc; +@@ -2660,7 +2676,7 @@ static int do_test1(struct ndctl_ctx *ctx, struct ndctl_test *test) + * Starting with v4.10 the dimm on nfit_test.1 gets a unique + * handle. + */ +- if (ndctl_test_attempt(test, KERNEL_VERSION(4, 10, 0))) ++ if (test_attempt(test, KERNEL_VERSION(4, 10, 0))) + dimms1[0].handle = DIMM_HANDLE(1, 0, 0, 0, 0); + + rc = check_dimms(bus, dimms1, ARRAY_SIZE(dimms1), 0, 0, test); +@@ -2670,13 +2686,13 @@ static int do_test1(struct ndctl_ctx *ctx, struct ndctl_test *test) + return check_regions(bus, regions1, ARRAY_SIZE(regions1), BTT); + } + +-typedef int (*do_test_fn)(struct ndctl_ctx *ctx, struct ndctl_test *test); ++typedef int (*do_test_fn)(struct ndctl_ctx *ctx, struct test_ctx *test); + static do_test_fn do_test[] = { + do_test0, + do_test1, + }; + +-int test_libndctl(int loglevel, struct ndctl_test *test, struct ndctl_ctx *ctx) ++int test_libndctl(int loglevel, struct test_ctx *test, struct ndctl_ctx *ctx) + { + unsigned int i; + struct kmod_module *mod; +@@ -2684,7 +2700,7 @@ int test_libndctl(int loglevel, struct ndctl_test *test, struct ndctl_ctx *ctx) + struct daxctl_ctx *daxctl_ctx; + int err, result = EXIT_FAILURE; + +- if (!ndctl_test_attempt(test, KERNEL_VERSION(4, 2, 0))) ++ if (!test_attempt(test, KERNEL_VERSION(4, 2, 0))) + return 77; + + ndctl_set_log_priority(ctx, loglevel); +@@ -2692,9 +2708,9 @@ int test_libndctl(int loglevel, struct ndctl_test *test, struct ndctl_ctx *ctx) + daxctl_set_log_priority(daxctl_ctx, loglevel); + ndctl_set_private_data(ctx, test); + +- err = nfit_test_init(&kmod_ctx, &mod, ctx, loglevel, test); ++ err = ndctl_test_init(&kmod_ctx, &mod, ctx, loglevel, test); + if (err < 0) { +- ndctl_test_skip(test); ++ test_skip(test); + fprintf(stderr, "nfit_test unavailable skipping tests\n"); + return 77; + } +@@ -2716,7 +2732,7 @@ int test_libndctl(int loglevel, struct ndctl_test *test, struct ndctl_ctx *ctx) + + int __attribute__((weak)) main(int argc, char *argv[]) + { +- struct ndctl_test *test = ndctl_test_new(0); ++ struct test_ctx *test = test_new(0); + struct ndctl_ctx *ctx; + int rc; + +@@ -2727,8 +2743,8 @@ int __attribute__((weak)) main(int argc, char *argv[]) + + rc = ndctl_new(&ctx); + if (rc) +- return ndctl_test_result(test, rc); ++ return test_result(test, rc); + rc = test_libndctl(LOG_DEBUG, test, ctx); + ndctl_unref(ctx); +- return ndctl_test_result(test, rc); ++ return test_result(test, rc); + } +diff --git a/test/max_available_extent_ns.sh b/test/max_available_extent_ns.sh +index 14d741d..343f3c9 100755 +--- a/test/max_available_extent_ns.sh ++++ b/test/max_available_extent_ns.sh +@@ -9,6 +9,7 @@ rc=77 + trap 'err $LINENO' ERR + + check_min_kver "4.19" || do_skip "kernel $KVER may not support max_available_size" ++check_prereq "jq" + + init() + { +diff --git a/test/monitor.sh b/test/monitor.sh +index cdab5e1..28c5541 100755 +--- a/test/monitor.sh ++++ b/test/monitor.sh +@@ -13,6 +13,8 @@ smart_supported_bus="" + + . $(dirname $0)/common + ++check_prereq "jq" ++ + trap 'err $LINENO' ERR + + check_min_kver "4.15" || do_skip "kernel $KVER may not support monitor service" +diff --git a/test/multi-dax.sh b/test/multi-dax.sh +index e932569..b343a38 100755 +--- a/test/multi-dax.sh ++++ b/test/multi-dax.sh +@@ -9,9 +9,12 @@ rc=77 + . $(dirname $0)/common + + check_min_kver "4.13" || do_skip "may lack multi-dax support" ++check_prereq "jq" + + trap 'err $LINENO' ERR + ++ALIGN_SIZE=`getconf PAGESIZE` ++ + # setup (reset nfit_test dimms) + modprobe nfit_test + $NDCTL disable-region -b $NFIT_TEST_BUS0 all +@@ -22,9 +25,9 @@ rc=1 + query=". | sort_by(.available_size) | reverse | .[0].dev" + region=$($NDCTL list -b $NFIT_TEST_BUS0 -t pmem -Ri | jq -r "$query") + +-json=$($NDCTL create-namespace -b $NFIT_TEST_BUS0 -r $region -t pmem -m devdax -a 4096 -s 16M) ++json=$($NDCTL create-namespace -b $NFIT_TEST_BUS0 -r $region -t pmem -m devdax -a $ALIGN_SIZE -s 16M) + chardev1=$(echo $json | jq ". | select(.mode == \"devdax\") | .daxregion.devices[0].chardev") +-json=$($NDCTL create-namespace -b $NFIT_TEST_BUS0 -r $region -t pmem -m devdax -a 4096 -s 16M) ++json=$($NDCTL create-namespace -b $NFIT_TEST_BUS0 -r $region -t pmem -m devdax -a $ALIGN_SIZE -s 16M) + chardev2=$(echo $json | jq ". | select(.mode == \"devdax\") | .daxregion.devices[0].chardev") + + _cleanup +diff --git a/test/multi-pmem.c b/test/multi-pmem.c +index 3d10952..f2eb381 100644 +--- a/test/multi-pmem.c ++++ b/test/multi-pmem.c +@@ -53,11 +53,11 @@ static void destroy_namespace(struct ndctl_namespace *ndns) + + /* Check that the namespace device is gone (if it wasn't the seed) */ + static int check_deleted(struct ndctl_region *region, const char *devname, +- struct ndctl_test *test) ++ struct test_ctx *test) + { + struct ndctl_namespace *ndns; + +- if (!ndctl_test_attempt(test, KERNEL_VERSION(4, 10, 0))) ++ if (!test_attempt(test, KERNEL_VERSION(4, 10, 0))) + return 0; + + ndctl_namespace_foreach(region, ndns) { +@@ -73,7 +73,7 @@ static int check_deleted(struct ndctl_region *region, const char *devname, + return 0; + } + +-static int do_multi_pmem(struct ndctl_ctx *ctx, struct ndctl_test *test) ++static int do_multi_pmem(struct ndctl_ctx *ctx, struct test_ctx *test) + { + int i; + char devname[100]; +@@ -85,8 +85,8 @@ static int do_multi_pmem(struct ndctl_ctx *ctx, struct ndctl_test *test) + struct ndctl_namespace *namespaces[NUM_NAMESPACES]; + unsigned long long blk_avail, blk_avail_orig, expect; + +- if (!ndctl_test_attempt(test, KERNEL_VERSION(4, 9, 0))) { +- ndctl_test_skip(test); ++ if (!test_attempt(test, KERNEL_VERSION(4, 9, 0))) { ++ test_skip(test); + return 77; + } + +@@ -238,21 +238,22 @@ static int do_multi_pmem(struct ndctl_ctx *ctx, struct ndctl_test *test) + return 0; + } + +-int test_multi_pmem(int loglevel, struct ndctl_test *test, struct ndctl_ctx *ctx) ++int test_multi_pmem(int loglevel, struct test_ctx *test, ++ struct ndctl_ctx *ctx) + { + struct kmod_module *mod; + struct kmod_ctx *kmod_ctx; + int err, result = EXIT_FAILURE; + +- if (!ndctl_test_attempt(test, KERNEL_VERSION(4, 2, 0))) ++ if (!test_attempt(test, KERNEL_VERSION(4, 2, 0))) + return 77; + + ndctl_set_log_priority(ctx, loglevel); + +- err = nfit_test_init(&kmod_ctx, &mod, NULL, loglevel, test); ++ err = ndctl_test_init(&kmod_ctx, &mod, NULL, loglevel, test); + if (err < 0) { + result = 77; +- ndctl_test_skip(test); ++ test_skip(test); + fprintf(stderr, "%s unavailable skipping tests\n", + "nfit_test"); + return result; +@@ -267,7 +268,7 @@ int test_multi_pmem(int loglevel, struct ndctl_test *test, struct ndctl_ctx *ctx + + int __attribute__((weak)) main(int argc, char *argv[]) + { +- struct ndctl_test *test = ndctl_test_new(0); ++ struct test_ctx *test = test_new(0); + struct ndctl_ctx *ctx; + int rc; + +@@ -278,8 +279,8 @@ int __attribute__((weak)) main(int argc, char *argv[]) + + rc = ndctl_new(&ctx); + if (rc) +- return ndctl_test_result(test, rc); ++ return test_result(test, rc); + rc = test_multi_pmem(LOG_DEBUG, test, ctx); + ndctl_unref(ctx); +- return ndctl_test_result(test, rc); ++ return test_result(test, rc); + } +diff --git a/test/parent-uuid.c b/test/parent-uuid.c +index 6424e9f..8da396f 100644 +--- a/test/parent-uuid.c ++++ b/test/parent-uuid.c +@@ -208,19 +208,20 @@ static int do_test(struct ndctl_ctx *ctx) + return 0; + } + +-int test_parent_uuid(int loglevel, struct ndctl_test *test, struct ndctl_ctx *ctx) ++int test_parent_uuid(int loglevel, struct test_ctx *test, ++ struct ndctl_ctx *ctx) + { + struct kmod_module *mod; + struct kmod_ctx *kmod_ctx; + int err, result = EXIT_FAILURE; + +- if (!ndctl_test_attempt(test, KERNEL_VERSION(4, 3, 0))) ++ if (!test_attempt(test, KERNEL_VERSION(4, 3, 0))) + return 77; + + ndctl_set_log_priority(ctx, loglevel); +- err = nfit_test_init(&kmod_ctx, &mod, NULL, loglevel, test); ++ err = ndctl_test_init(&kmod_ctx, &mod, NULL, loglevel, test); + if (err < 0) { +- ndctl_test_skip(test); ++ test_skip(test); + fprintf(stderr, "nfit_test unavailable skipping tests\n"); + return 77; + } +@@ -235,7 +236,7 @@ int test_parent_uuid(int loglevel, struct ndctl_test *test, struct ndctl_ctx *ct + + int __attribute__((weak)) main(int argc, char *argv[]) + { +- struct ndctl_test *test = ndctl_test_new(0); ++ struct test_ctx *test = test_new(0); + struct ndctl_ctx *ctx; + int rc; + +@@ -246,9 +247,9 @@ int __attribute__((weak)) main(int argc, char *argv[]) + + rc = ndctl_new(&ctx); + if (rc) +- return ndctl_test_result(test, rc); ++ return test_result(test, rc); + + rc = test_parent_uuid(LOG_DEBUG, test, ctx); + ndctl_unref(ctx); +- return ndctl_test_result(test, rc); ++ return test_result(test, rc); + } +diff --git a/test/pmem_namespaces.c b/test/pmem_namespaces.c +index f0f2edd..20f41fe 100644 +--- a/test/pmem_namespaces.c ++++ b/test/pmem_namespaces.c +@@ -161,8 +161,8 @@ static int ns_do_io(const char *bdev) + + static const char *comm = "test-pmem-namespaces"; + +-int test_pmem_namespaces(int log_level, struct ndctl_test *test, +- struct ndctl_ctx *ctx) ++int test_pmem_namespaces(int log_level, struct test_ctx *test, ++ struct ndctl_ctx *ctx) + { + struct ndctl_region *region, *pmem_region = NULL; + struct kmod_ctx *kmod_ctx = NULL; +@@ -173,7 +173,7 @@ int test_pmem_namespaces(int log_level, struct ndctl_test *test, + int rc = -ENXIO; + char bdev[50]; + +- if (!ndctl_test_attempt(test, KERNEL_VERSION(4, 2, 0))) ++ if (!test_attempt(test, KERNEL_VERSION(4, 2, 0))) + return 77; + + ndctl_set_log_priority(ctx, log_level); +@@ -191,12 +191,12 @@ int test_pmem_namespaces(int log_level, struct ndctl_test *test, + + if (!bus) { + fprintf(stderr, "ACPI.NFIT unavailable falling back to nfit_test\n"); +- rc = nfit_test_init(&kmod_ctx, &mod, NULL, log_level, test); ++ rc = ndctl_test_init(&kmod_ctx, &mod, NULL, log_level, test); + ndctl_invalidate(ctx); + bus = ndctl_bus_get_by_provider(ctx, "nfit_test.0"); + if (rc < 0 || !bus) { + rc = 77; +- ndctl_test_skip(test); ++ test_skip(test); + fprintf(stderr, "nfit_test unavailable skipping tests\n"); + goto err_module; + } +@@ -262,7 +262,7 @@ int test_pmem_namespaces(int log_level, struct ndctl_test *test, + + int __attribute__((weak)) main(int argc, char *argv[]) + { +- struct ndctl_test *test = ndctl_test_new(0); ++ struct test_ctx *test = test_new(0); + struct ndctl_ctx *ctx; + int rc; + +@@ -274,9 +274,9 @@ int __attribute__((weak)) main(int argc, char *argv[]) + + rc = ndctl_new(&ctx); + if (rc) +- return ndctl_test_result(test, rc); ++ return test_result(test, rc); + + rc = test_pmem_namespaces(LOG_DEBUG, test, ctx); + ndctl_unref(ctx); +- return ndctl_test_result(test, rc); ++ return test_result(test, rc); + } +diff --git a/test/revoke-devmem.c b/test/revoke-devmem.c +index bb8979e..ac8d81c 100644 +--- a/test/revoke-devmem.c ++++ b/test/revoke-devmem.c +@@ -32,8 +32,8 @@ static void sigbus(int sig, siginfo_t *siginfo, void *d) + #define err(fmt, ...) \ + fprintf(stderr, "%s: " fmt, __func__, ##__VA_ARGS__) + +-static int test_devmem(int loglevel, struct ndctl_test *test, +- struct ndctl_ctx *ctx) ++static int test_devmem(int loglevel, struct test_ctx *test, ++ struct ndctl_ctx *ctx) + { + void *buf; + int fd, rc; +@@ -44,7 +44,7 @@ static int test_devmem(int loglevel, struct ndctl_test *test, + ndctl_set_log_priority(ctx, loglevel); + + /* iostrict devmem started in kernel 4.5 */ +- if (!ndctl_test_attempt(test, KERNEL_VERSION(4, 5, 0))) ++ if (!test_attempt(test, KERNEL_VERSION(4, 5, 0))) + return 77; + + ndns = ndctl_get_test_dev(ctx); +@@ -124,7 +124,7 @@ out_devmem: + + int main(int argc, char *argv[]) + { +- struct ndctl_test *test = ndctl_test_new(0); ++ struct test_ctx *test = test_new(0); + struct ndctl_ctx *ctx; + int rc; + +@@ -135,9 +135,9 @@ int main(int argc, char *argv[]) + + rc = ndctl_new(&ctx); + if (rc < 0) +- return ndctl_test_result(test, rc); ++ return test_result(test, rc); + + rc = test_devmem(LOG_DEBUG, test, ctx); + ndctl_unref(ctx); +- return ndctl_test_result(test, rc); ++ return test_result(test, rc); + } +diff --git a/test/sector-mode.sh b/test/sector-mode.sh +index dd7013e..7a2faea 100755 +--- a/test/sector-mode.sh ++++ b/test/sector-mode.sh +@@ -6,9 +6,13 @@ rc=77 + + . $(dirname $0)/common + ++check_prereq "jq" ++ + set -e + trap 'err $LINENO' ERR + ++ALIGN_SIZE=`getconf PAGESIZE` ++ + # setup (reset nfit_test dimms) + modprobe nfit_test + $NDCTL disable-region -b $NFIT_TEST_BUS0 all +@@ -25,7 +29,7 @@ NAMESPACE=$($NDCTL list -b $NFIT_TEST_BUS1 -N | jq -r "$query") + REGION=$($NDCTL list -R --namespace=$NAMESPACE | jq -r "(.[]) | .dev") + echo 0 > /sys/bus/nd/devices/$REGION/read_only + $NDCTL create-namespace --no-autolabel -e $NAMESPACE -m sector -f -l 4K +-$NDCTL create-namespace --no-autolabel -e $NAMESPACE -m dax -f -a 4K ++$NDCTL create-namespace --no-autolabel -e $NAMESPACE -m dax -f -a $ALIGN_SIZE + $NDCTL create-namespace --no-autolabel -e $NAMESPACE -m sector -f -l 4K + + _cleanup +diff --git a/util/filter.c b/util/filter.c +index 8b4aad3..d81dade 100644 +--- a/util/filter.c ++++ b/util/filter.c +@@ -12,6 +12,7 @@ + #include + #include + #include ++#include + + struct ndctl_bus *util_bus_filter(struct ndctl_bus *bus, const char *__ident) + { +@@ -339,6 +340,25 @@ struct daxctl_region *util_daxctl_region_filter(struct daxctl_region *region, + return NULL; + } + ++struct cxl_memdev *util_cxl_memdev_filter(struct cxl_memdev *memdev, ++ const char *ident) ++{ ++ int memdev_id; ++ ++ if (!ident || strcmp(ident, "all") == 0) ++ return memdev; ++ ++ if (strcmp(ident, cxl_memdev_get_devname(memdev)) == 0) ++ return memdev; ++ ++ if ((sscanf(ident, "%d", &memdev_id) == 1 ++ || sscanf(ident, "mem%d", &memdev_id) == 1) ++ && cxl_memdev_get_id(memdev) == memdev_id) ++ return memdev; ++ ++ return NULL; ++} ++ + enum ndctl_namespace_mode util_nsmode(const char *mode) + { + if (!mode) +diff --git a/util/filter.h b/util/filter.h +index 1e1a41c..9a80d65 100644 +--- a/util/filter.h ++++ b/util/filter.h +@@ -29,6 +29,8 @@ struct daxctl_dev *util_daxctl_dev_filter(struct daxctl_dev *dev, + const char *ident); + struct daxctl_region *util_daxctl_region_filter(struct daxctl_region *region, + const char *ident); ++struct cxl_memdev *util_cxl_memdev_filter(struct cxl_memdev *memdev, ++ const char *ident); + + enum ndctl_namespace_mode util_nsmode(const char *mode); + const char *util_nsmode_name(enum ndctl_namespace_mode mode); +diff --git a/util/hexdump.c b/util/hexdump.c +new file mode 100644 +index 0000000..1ab0118 +--- /dev/null ++++ b/util/hexdump.c +@@ -0,0 +1,53 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* Copyright (C) 2015-2021 Intel Corporation. All rights reserved. */ ++#include ++#include ++ ++static void print_separator(int len) ++{ ++ int i; ++ ++ for (i = 0; i < len; i++) ++ fprintf(stderr, "-"); ++ fprintf(stderr, "\n"); ++} ++ ++void hex_dump_buf(unsigned char *buf, int size) ++{ ++ int i; ++ const int grp = 4; /* Number of bytes in a group */ ++ const int wid = 16; /* Bytes per line. Should be a multiple of grp */ ++ char ascii[wid + 1]; ++ ++ /* Generate header */ ++ print_separator((wid * 4) + (wid / grp) + 12); ++ ++ fprintf(stderr, "Offset "); ++ for (i = 0; i < wid; i++) { ++ if (i % grp == 0) fprintf(stderr, " "); ++ fprintf(stderr, "%02x ", i); ++ } ++ fprintf(stderr, " Ascii\n"); ++ ++ print_separator((wid * 4) + (wid / grp) + 12); ++ ++ /* Generate hex dump */ ++ for (i = 0; i < size; i++) { ++ if (i % wid == 0) fprintf(stderr, "%08x ", i); ++ ascii[i % wid] = ++ ((buf[i] >= ' ') && (buf[i] <= '~')) ? buf[i] : '.'; ++ if (i % grp == 0) fprintf(stderr, " "); ++ fprintf(stderr, "%02x ", buf[i]); ++ if ((i == size - 1) && (size % wid != 0)) { ++ int j; ++ int done = size % wid; ++ int grps_done = (done / grp) + ((done % grp) ? 1 : 0); ++ int spaces = wid / grp - grps_done + ((wid - done) * 3); ++ ++ for (j = 0; j < spaces; j++) fprintf(stderr, " "); ++ } ++ if ((i % wid == wid - 1) || (i == size - 1)) ++ fprintf(stderr, " %.*s\n", (i % wid) + 1, ascii); ++ } ++ print_separator((wid * 4) + (wid / grp) + 12); ++} +diff --git a/util/hexdump.h b/util/hexdump.h +new file mode 100644 +index 0000000..d322b6a +--- /dev/null ++++ b/util/hexdump.h +@@ -0,0 +1,8 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++/* Copyright (C) 2021 Intel Corporation. All rights reserved. */ ++#ifndef _UTIL_HEXDUMP_H_ ++#define _UTIL_HEXDUMP_H_ ++ ++void hex_dump_buf(unsigned char *buf, int size); ++ ++#endif /* _UTIL_HEXDUMP_H_*/ +diff --git a/util/json.c b/util/json.c +index ca0167b..3be3a92 100644 +--- a/util/json.c ++++ b/util/json.c +@@ -9,6 +9,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -482,6 +483,17 @@ struct json_object *util_daxctl_dev_to_json(struct daxctl_dev *dev, + json_object_object_add(jdev, "mode", jobj); + + if (mem && daxctl_dev_get_resource(dev) != 0) { ++ int num_sections = daxctl_memory_num_sections(mem); ++ int num_online = daxctl_memory_is_online(mem); ++ ++ jobj = json_object_new_int(num_online); ++ if (jobj) ++ json_object_object_add(jdev, "online_memblocks", jobj); ++ ++ jobj = json_object_new_int(num_sections); ++ if (jobj) ++ json_object_object_add(jdev, "total_memblocks", jobj); ++ + movable = daxctl_memory_is_movable(mem); + if (movable == 1) + jobj = json_object_new_boolean(true); +@@ -1429,3 +1441,28 @@ struct json_object *util_badblock_rec_to_json(u64 block, u64 count, + json_object_put(jerr); + return NULL; + } ++ ++struct json_object *util_cxl_memdev_to_json(struct cxl_memdev *memdev, ++ unsigned long flags) ++{ ++ const char *devname = cxl_memdev_get_devname(memdev); ++ struct json_object *jdev, *jobj; ++ ++ jdev = json_object_new_object(); ++ if (!devname || !jdev) ++ return NULL; ++ ++ jobj = json_object_new_string(devname); ++ if (jobj) ++ json_object_object_add(jdev, "memdev", jobj); ++ ++ jobj = util_json_object_size(cxl_memdev_get_pmem_size(memdev), flags); ++ if (jobj) ++ json_object_object_add(jdev, "pmem_size", jobj); ++ ++ jobj = util_json_object_size(cxl_memdev_get_ram_size(memdev), flags); ++ if (jobj) ++ json_object_object_add(jdev, "ram_size", jobj); ++ ++ return jdev; ++} +diff --git a/util/json.h b/util/json.h +index 0f09e36..91918c8 100644 +--- a/util/json.h ++++ b/util/json.h +@@ -55,4 +55,7 @@ struct json_object *util_dimm_health_to_json(struct ndctl_dimm *dimm); + struct json_object *util_dimm_firmware_to_json(struct ndctl_dimm *dimm, + unsigned long flags); + struct json_object *util_region_capabilities_to_json(struct ndctl_region *region); ++struct cxl_memdev; ++struct json_object *util_cxl_memdev_to_json(struct cxl_memdev *memdev, ++ unsigned long flags); + #endif /* __NDCTL_JSON_H__ */ +diff --git a/util/log.c b/util/log.c +index 61ac509..5dec1b0 100644 +--- a/util/log.c ++++ b/util/log.c +@@ -7,22 +7,23 @@ + #include + #include + +-void do_log(struct log_ctx *ctx, int priority, const char *file, ++void do_log(struct log_ctx *ctx, int loud, int priority, const char *file, + int line, const char *fn, const char *format, ...) + { + va_list args; + int errno_save = errno; + + va_start(args, format); +- ctx->log_fn(ctx, priority, file, line, fn, format, args); ++ ctx->log_fn(ctx, loud, priority, file, line, fn, format, args); + va_end(args); + errno = errno_save; + } + +-static void log_stderr(struct log_ctx *ctx, int priority, const char *file, ++static void log_stderr(struct log_ctx *ctx, int loud, int priority, const char *file, + int line, const char *fn, const char *format, va_list args) + { +- fprintf(stderr, "%s: %s: ", ctx->owner, fn); ++ if (loud) ++ fprintf(stderr, "%s: %s: ", ctx->owner, fn); + vfprintf(stderr, format, args); + } + +diff --git a/util/log.h b/util/log.h +index 28f1c7b..eb88704 100644 +--- a/util/log.h ++++ b/util/log.h +@@ -7,7 +7,7 @@ + #include + + struct log_ctx; +-typedef void (*log_fn)(struct log_ctx *ctx, int priority, const char *file, ++typedef void (*log_fn)(struct log_ctx *ctx, int loud, int priority, const char *file, + int line, const char *fn, const char *format, va_list args); + + struct log_ctx { +@@ -17,36 +17,40 @@ struct log_ctx { + }; + + +-void do_log(struct log_ctx *ctx, int priority, const char *file, int line, ++void do_log(struct log_ctx *ctx, int loud, int priority, const char *file, int line, + const char *fn, const char *format, ...) +- __attribute__((format(printf, 6, 7))); ++ __attribute__((format(printf, 7, 8))); + void log_init(struct log_ctx *ctx, const char *owner, const char *log_env); + static inline void __attribute__((always_inline, format(printf, 2, 3))) + log_null(struct log_ctx *ctx, const char *format, ...) {} + +-#define log_cond(ctx, prio, arg...) \ ++#define log_cond(ctx, loud, prio, arg...) \ + do { \ + if ((ctx)->log_priority >= prio) \ +- do_log(ctx, prio, __FILE__, __LINE__, __FUNCTION__, ## arg); \ ++ do_log(ctx, loud, prio, __FILE__, __LINE__, __FUNCTION__, ## arg); \ + } while (0) + + #ifdef ENABLE_LOGGING + # ifdef ENABLE_DEBUG +-# define log_dbg(ctx, arg...) log_cond(ctx, LOG_DEBUG, ## arg) ++# define log_dbg(ctx, arg...) log_cond(ctx, 1, LOG_DEBUG, ## arg) ++# define log_dbgs(ctx, arg...) log_cond(ctx, 0, LOG_DEBUG, ## arg) + # else + # define log_dbg(ctx, arg...) log_null(ctx, ## arg) ++# define log_dbgs(ctx, arg...) log_null(ctx, ## arg) + # endif +-# define log_info(ctx, arg...) log_cond(ctx, LOG_INFO, ## arg) +-# define log_err(ctx, arg...) log_cond(ctx, LOG_ERR, ## arg) +-# define log_notice(ctx, arg...) log_cond(ctx, LOG_NOTICE, ## arg) ++# define log_info(ctx, arg...) log_cond(ctx, 1, LOG_INFO, ## arg) ++# define log_err(ctx, arg...) log_cond(ctx, 1, LOG_ERR, ## arg) ++# define log_notice(ctx, arg...) log_cond(ctx, 1, LOG_NOTICE, ## arg) + #else + # define log_dbg(ctx, arg...) log_null(ctx, ## arg) ++# define log_dbgs(ctx, arg...) log_null(ctx, ## arg) + # define log_info(ctx, arg...) log_null(ctx, ## arg) + # define log_err(ctx, arg...) log_null(ctx, ## arg) + # define log_notice(ctx, arg...) log_null(ctx, ## arg) + #endif + + #define dbg(x, arg...) log_dbg(&(x)->ctx, ## arg) ++#define dbg_s(x, arg...) log_dbgs(&(x)->ctx, ## arg) + #define info(x, arg...) log_info(&(x)->ctx, ## arg) + #define err(x, arg...) log_err(&(x)->ctx, ## arg) + #define notice(x, arg...) log_notice(&(x)->ctx, ## arg) +diff --git a/util/main.h b/util/main.h +index c89a843..80b55c4 100644 +--- a/util/main.h ++++ b/util/main.h +@@ -10,16 +10,19 @@ + enum program { + PROG_NDCTL, + PROG_DAXCTL, ++ PROG_CXL, + }; + + struct ndctl_ctx; + struct daxctl_ctx; ++struct cxl_ctx; + + struct cmd_struct { + const char *cmd; + union { + int (*n_fn)(int, const char **, struct ndctl_ctx *ctx); + int (*d_fn)(int, const char **, struct daxctl_ctx *ctx); ++ int (*c_fn)(int, const char **, struct cxl_ctx *ctx); + }; + }; + +diff --git a/util/parse-options.c b/util/parse-options.c +index c7ad6c4..6b3f1cf 100644 +--- a/util/parse-options.c ++++ b/util/parse-options.c +@@ -165,6 +165,8 @@ static int get_value(struct parse_opt_ctx_t *p, + if (get_arg(p, opt, flags, &arg)) + return -1; + *(unsigned int *)opt->value = strtol(arg, (char **)&s, 10); ++ if (*s) ++ *(unsigned int *)opt->value = strtol(arg, (char **)&s, 16); + if (*s) + return opterror(opt, "expects a numerical value", flags); + return 0; +@@ -197,6 +199,8 @@ static int get_value(struct parse_opt_ctx_t *p, + if (get_arg(p, opt, flags, &arg)) + return -1; + *(uint64_t *)opt->value = strtoull(arg, (char **)&s, 10); ++ if (*s) ++ *(uint64_t *)opt->value = strtoull(arg, (char **)&s, 16); + if (*s) + return opterror(opt, "expects a numerical value", flags); + return 0; diff --git a/SPECS/ndctl.spec b/SPECS/ndctl.spec index aeec84d..959d09e 100644 --- a/SPECS/ndctl.spec +++ b/SPECS/ndctl.spec @@ -1,12 +1,17 @@ Name: ndctl Version: 71.1 -Release: 4%{?dist} +Release: 4.1%{?dist} Summary: Manage "libnvdimm" subsystem devices (Non-volatile Memory) License: GPLv2 Group: System Environment/Base Url: https://github.com/pmem/ndctl Source0: https://github.com/pmem/%{name}/archive/v%{version}.tar.gz#/%{name}-%{version}.tar.gz +%if 0%{?facebook} +# https://github.com/elake/ndctl diff from 71.1 (ea014c0) to main (3ff031d) +# This was generated with `git diff` because `git format-patch` doesn't handle the merges well +Patch0: elake-diff-ea014c0-3ff031d.patch +%else Patch0: modprobe-link-user-keyring-before-loadkeys.patch Patch1: fb13dfb-zero_info_block-skip-seed-devices.patch Patch2: daef3a3-libndctl-Unify-adding-dimms-for-papr-and-nfit-families.patch @@ -22,9 +27,13 @@ Patch11: aa99000-libndctl-papr-Add-support-for-reporting-shutdown-count.patch Patch12: edcd9b7-libndctl-intel-Indicate-supported-smart-inject-types.patch Patch13: 9ef460e-libndctl-papr-Add-limited-support-for-inject-smart.patch Patch14: 6e85cac-ndtest-ack-shutdown-count-Skip-the-test-on-ndtest.patch +%endif Requires: ndctl-libs%{?_isa} = %{version}-%{release} Requires: daxctl-libs%{?_isa} = %{version}-%{release} +%if 0%{?facebook} +Requires: cxl-libs%{?_isa} = %{version}-%{release} +%endif BuildRequires: autoconf BuildRequires: asciidoc BuildRequires: xmlto @@ -36,7 +45,7 @@ BuildRequires: pkgconfig(libudev) BuildRequires: pkgconfig(uuid) BuildRequires: pkgconfig(json-c) BuildRequires: pkgconfig(bash-completion) -BuildRequires: systemd +BuildRequires: pkgconfig(systemd) BuildRequires: keyutils-libs-devel %description @@ -68,6 +77,26 @@ the Linux kernel Device-DAX facility. This facility enables DAX mappings of performance / feature differentiated memory without need of a filesystem. +%if 0%{?facebook} +%package -n cxl-cli +Summary: Manage CXL devices +License: GPLv2 +Requires: cxl-libs%{?_isa} = %{version}-%{release} + +%description -n cxl-cli +The cxl utility provides enumeration and provisioning commands for +the Linux kernel CXL devices. + +%package -n cxl-devel +Summary: Development files for libcxl +License: LGPLv2 +Requires: cxl-libs%{?_isa} = %{version}-%{release} + +%description -n cxl-devel +This package contains libraries and header files for developing applications +that use libcxl, a library for enumerating and communicating with CXL devices. +%endif + %package -n daxctl-devel Summary: Development files for libdaxctl License: LGPLv2 @@ -101,6 +130,15 @@ Device DAX is a facility for establishing DAX mappings of performance / feature-differentiated memory. daxctl-libs provides an enumeration / control API for these devices. +%if 0%{?facebook} +%package -n cxl-libs +Summary: Management library for CXL devices +License: LGPLv2 + +%description -n cxl-libs +libcxl is a library for enumerating and communicating with CXL devices. +%endif + %prep %autosetup -p1 ndctl-%{version} @@ -118,13 +156,13 @@ find $RPM_BUILD_ROOT -name '*.la' -exec rm -f {} ';' %check make check -%post -n ndctl-libs -p /sbin/ldconfig +%ldconfig_scriptlets -n ndctl-libs -%postun -n ndctl-libs -p /sbin/ldconfig +%ldconfig_scriptlets -n daxctl-libs -%post -n daxctl-libs -p /sbin/ldconfig - -%postun -n daxctl-libs -p /sbin/ldconfig +%if 0%{?facebook} +%ldconfig_scriptlets -n cxl-libs +%endif %define bashcompdir %(pkg-config --variable=completionsdir bash-completion) @@ -145,6 +183,13 @@ make check %{_mandir}/man1/daxctl* %{_datadir}/daxctl/daxctl.conf +%if 0%{?facebook} +%files -n cxl-cli +%license LICENSES/preferred/GPL-2.0 LICENSES/other/MIT LICENSES/other/CC0-1.0 +%{_bindir}/cxl +%{_mandir}/man1/cxl* +%endif + %files -n ndctl-libs %doc README.md %license LICENSES/preferred/LGPL-2.1 LICENSES/other/MIT LICENSES/other/CC0-1.0 @@ -155,6 +200,13 @@ make check %license LICENSES/preferred/LGPL-2.1 LICENSES/other/MIT LICENSES/other/CC0-1.0 %{_libdir}/libdaxctl.so.* +%if 0%{?facebook} +%files -n cxl-libs +%doc README.md +%license LICENSES/preferred/LGPL-2.1 LICENSES/other/MIT LICENSES/other/CC0-1.0 +%{_libdir}/libcxl.so.* +%endif + %files -n ndctl-devel %license LICENSES/preferred/LGPL-2.1 %{_includedir}/ndctl/ @@ -167,8 +219,22 @@ make check %{_libdir}/libdaxctl.so %{_libdir}/pkgconfig/libdaxctl.pc +%if 0%{?facebook} +%files -n cxl-devel +%license LICENSES/preferred/LGPL-2.1 +%{_includedir}/cxl/ +%{_libdir}/libcxl.so +%{_libdir}/pkgconfig/libcxl.pc +%{_mandir}/man3/cxl* +%{_mandir}/man3/libcxl.3.gz +%endif + %changelog +* Fri Feb 3 2023 Anita Zhang - 71.1-4.1 +- Patch 71.1 with diff between ea014c0..3ff031d from elake/ndctl fork for hs+fb +- Sync changes from rhel/ndctl.spec (autogenerated by the GitHub Makefile) + * Tue Jun 14 2022 Jeff Moyer - 71.1-4.el8 - Pull in fixes from upstream v72 and v73 (Jeff Moyer) - Fix enable-namespace all reporting errors incorrectly