Blob Blame History Raw
From a81f219d7f2bfc70dba1eb12208e3e6ab7c81b50 Mon Sep 17 00:00:00 2001
From: Jerome Marchand <jmarchan@redhat.com>
Date: Thu, 24 Mar 2022 16:08:17 +0100
Subject: [PATCH] C9S: libpbf version fixes

Revert "bcc: Replace deprecated libbpf APIs" since the libbpf version
provided in C9S doesn't provide the new APIs.

Remove BPF_MAP_TYPE_BLOOM_FILTER from bps since the libbpf version in
C9S, doesn't provide bloom filter map.

Add definition of struct bpf_core_relo.
---
 introspection/bps.c |  1 -
 src/cc/bcc_btf.cc   | 73 +++++++++++++++++++++++++++++++++++++++-
 src/cc/libbpf.c     | 82 ++++++---------------------------------------
 3 files changed, 83 insertions(+), 73 deletions(-)

diff --git a/introspection/bps.c b/introspection/bps.c
index 232b23d4..6ec02e6c 100644
--- a/introspection/bps.c
+++ b/introspection/bps.c
@@ -80,7 +80,6 @@ static const char * const map_type_strings[] = {
   [BPF_MAP_TYPE_RINGBUF] = "ringbuf",
   [BPF_MAP_TYPE_INODE_STORAGE] = "inode_storage",
   [BPF_MAP_TYPE_TASK_STORAGE] = "task_storage",
-  [BPF_MAP_TYPE_BLOOM_FILTER] = "bloom_filter",
 };
 
 #define ARRAY_SIZE(x) (sizeof(x) / sizeof(*(x)))
diff --git a/src/cc/bcc_btf.cc b/src/cc/bcc_btf.cc
index 7f551ae8..c78ba823 100644
--- a/src/cc/bcc_btf.cc
+++ b/src/cc/bcc_btf.cc
@@ -170,6 +170,77 @@ static int btf_ext_setup_line_info(struct btf_ext *btf_ext)
         return btf_ext_setup_info(btf_ext, &param);
 }
 
+/* bpf_core_relo_kind encodes which aspect of captured field/type/enum value
+ * has to be adjusted by relocations.
+ */
+enum bpf_core_relo_kind {
+	BPF_FIELD_BYTE_OFFSET = 0,	/* field byte offset */
+	BPF_FIELD_BYTE_SIZE = 1,	/* field size in bytes */
+	BPF_FIELD_EXISTS = 2,		/* field existence in target kernel */
+	BPF_FIELD_SIGNED = 3,		/* field signedness (0 - unsigned, 1 - signed) */
+	BPF_FIELD_LSHIFT_U64 = 4,	/* bitfield-specific left bitshift */
+	BPF_FIELD_RSHIFT_U64 = 5,	/* bitfield-specific right bitshift */
+	BPF_TYPE_ID_LOCAL = 6,		/* type ID in local BPF object */
+	BPF_TYPE_ID_TARGET = 7,		/* type ID in target kernel */
+	BPF_TYPE_EXISTS = 8,		/* type existence in target kernel */
+	BPF_TYPE_SIZE = 9,		/* type size in bytes */
+	BPF_ENUMVAL_EXISTS = 10,	/* enum value existence in target kernel */
+	BPF_ENUMVAL_VALUE = 11,		/* enum value integer value */
+};
+
+/* The minimum bpf_core_relo checked by the loader
+ *
+ * CO-RE relocation captures the following data:
+ * - insn_off - instruction offset (in bytes) within a BPF program that needs
+ *   its insn->imm field to be relocated with actual field info;
+ * - type_id - BTF type ID of the "root" (containing) entity of a relocatable
+ *   type or field;
+ * - access_str_off - offset into corresponding .BTF string section. String
+ *   interpretation depends on specific relocation kind:
+ *     - for field-based relocations, string encodes an accessed field using
+ *     a sequence of field and array indices, separated by colon (:). It's
+ *     conceptually very close to LLVM's getelementptr ([0]) instruction's
+ *     arguments for identifying offset to a field.
+ *     - for type-based relocations, strings is expected to be just "0";
+ *     - for enum value-based relocations, string contains an index of enum
+ *     value within its enum type;
+ *
+ * Example to provide a better feel.
+ *
+ *   struct sample {
+ *       int a;
+ *       struct {
+ *           int b[10];
+ *       };
+ *   };
+ *
+ *   struct sample *s = ...;
+ *   int x = &s->a;     // encoded as "0:0" (a is field #0)
+ *   int y = &s->b[5];  // encoded as "0:1:0:5" (anon struct is field #1, 
+ *                      // b is field #0 inside anon struct, accessing elem #5)
+ *   int z = &s[10]->b; // encoded as "10:1" (ptr is used as an array)
+ *
+ * type_id for all relocs in this example  will capture BTF type id of
+ * `struct sample`.
+ *
+ * Such relocation is emitted when using __builtin_preserve_access_index()
+ * Clang built-in, passing expression that captures field address, e.g.:
+ *
+ * bpf_probe_read(&dst, sizeof(dst),
+ *		  __builtin_preserve_access_index(&src->a.b.c));
+ *
+ * In this case Clang will emit field relocation recording necessary data to
+ * be able to find offset of embedded `a.b.c` field within `src` struct.
+ *
+ *   [0] https://llvm.org/docs/LangRef.html#getelementptr-instruction
+ */
+struct bpf_core_relo {
+	__u32   insn_off;
+	__u32   type_id;
+	__u32   access_str_off;
+	enum bpf_core_relo_kind kind;
+};
+
 static int btf_ext_setup_core_relos(struct btf_ext *btf_ext)
 {
         struct btf_ext_sec_setup_param param = {
@@ -597,7 +668,7 @@ int BTF::load(uint8_t *btf_sec, uintptr_t btf_sec_size,
     return -1;
   }
 
-  if (btf__load_into_kernel(btf)) {
+  if (btf__load(btf)) {
     btf__free(btf);
     warning("Loading .BTF section failed\n");
     return -1;
diff --git a/src/cc/libbpf.c b/src/cc/libbpf.c
index e6403299..68af4b35 100644
--- a/src/cc/libbpf.c
+++ b/src/cc/libbpf.c
@@ -297,25 +297,6 @@ static uint64_t ptr_to_u64(void *ptr)
   return (uint64_t) (unsigned long) ptr;
 }
 
-static int libbpf_bpf_map_create(struct bpf_create_map_attr *create_attr)
-{
-  LIBBPF_OPTS(bpf_map_create_opts, p);
-
-  p.map_flags = create_attr->map_flags;
-  p.numa_node = create_attr->numa_node;
-  p.btf_fd = create_attr->btf_fd;
-  p.btf_key_type_id = create_attr->btf_key_type_id;
-  p.btf_value_type_id = create_attr->btf_value_type_id;
-  p.map_ifindex = create_attr->map_ifindex;
-  if (create_attr->map_type == BPF_MAP_TYPE_STRUCT_OPS)
-    p.btf_vmlinux_value_type_id = create_attr->btf_vmlinux_value_type_id;
-  else
-    p.inner_map_fd = create_attr->inner_map_fd;
-
-  return bpf_map_create(create_attr->map_type, create_attr->name, create_attr->key_size,
-                        create_attr->value_size, create_attr->max_entries, &p);
-}
-
 int bcc_create_map_xattr(struct bpf_create_map_attr *attr, bool allow_rlimit)
 {
   unsigned name_len = attr->name ? strlen(attr->name) : 0;
@@ -323,7 +304,7 @@ int bcc_create_map_xattr(struct bpf_create_map_attr *attr, bool allow_rlimit)
 
   memcpy(map_name, attr->name, min(name_len, BPF_OBJ_NAME_LEN - 1));
   attr->name = map_name;
-  int ret = libbpf_bpf_map_create(attr);
+  int ret = bpf_create_map_xattr(attr);
 
   if (ret < 0 && errno == EPERM) {
     if (!allow_rlimit)
@@ -335,7 +316,7 @@ int bcc_create_map_xattr(struct bpf_create_map_attr *attr, bool allow_rlimit)
       rl.rlim_max = RLIM_INFINITY;
       rl.rlim_cur = rl.rlim_max;
       if (setrlimit(RLIMIT_MEMLOCK, &rl) == 0)
-        ret = libbpf_bpf_map_create(attr);
+        ret = bpf_create_map_xattr(attr);
     }
   }
 
@@ -345,12 +326,12 @@ int bcc_create_map_xattr(struct bpf_create_map_attr *attr, bool allow_rlimit)
     attr->btf_fd = 0;
     attr->btf_key_type_id = 0;
     attr->btf_value_type_id = 0;
-    ret = libbpf_bpf_map_create(attr);
+    ret = bpf_create_map_xattr(attr);
   }
 
   if (ret < 0 && name_len && (errno == E2BIG || errno == EINVAL)) {
     map_name[0] = '\0';
-    ret = libbpf_bpf_map_create(attr);
+    ret = bpf_create_map_xattr(attr);
   }
 
   if (ret < 0 && errno == EPERM) {
@@ -363,7 +344,7 @@ int bcc_create_map_xattr(struct bpf_create_map_attr *attr, bool allow_rlimit)
       rl.rlim_max = RLIM_INFINITY;
       rl.rlim_cur = rl.rlim_max;
       if (setrlimit(RLIMIT_MEMLOCK, &rl) == 0)
-        ret = libbpf_bpf_map_create(attr);
+        ret = bpf_create_map_xattr(attr);
     }
   }
   return ret;
@@ -627,47 +608,6 @@ int bpf_prog_get_tag(int fd, unsigned long long *ptag)
   return 0;
 }
 
-static int libbpf_bpf_prog_load(const struct bpf_load_program_attr *load_attr,
-                                char *log_buf, size_t log_buf_sz)
-{
-  LIBBPF_OPTS(bpf_prog_load_opts, p);
-
-  if (!load_attr || !log_buf != !log_buf_sz) {
-    errno = EINVAL;
-    return -EINVAL;
-  }
-
-  p.expected_attach_type = load_attr->expected_attach_type;
-  switch (load_attr->prog_type) {
-  case BPF_PROG_TYPE_STRUCT_OPS:
-  case BPF_PROG_TYPE_LSM:
-    p.attach_btf_id = load_attr->attach_btf_id;
-    break;
-  case BPF_PROG_TYPE_TRACING:
-  case BPF_PROG_TYPE_EXT:
-    p.attach_btf_id = load_attr->attach_btf_id;
-    p.attach_prog_fd = load_attr->attach_prog_fd;
-    break;
-  default:
-    p.prog_ifindex = load_attr->prog_ifindex;
-    p.kern_version = load_attr->kern_version;
-  }
-  p.log_level = load_attr->log_level;
-  p.log_buf = log_buf;
-  p.log_size = log_buf_sz;
-  p.prog_btf_fd = load_attr->prog_btf_fd;
-  p.func_info_rec_size = load_attr->func_info_rec_size;
-  p.func_info_cnt = load_attr->func_info_cnt;
-  p.func_info = load_attr->func_info;
-  p.line_info_rec_size = load_attr->line_info_rec_size;
-  p.line_info_cnt = load_attr->line_info_cnt;
-  p.line_info = load_attr->line_info;
-  p.prog_flags = load_attr->prog_flags;
-
-  return bpf_prog_load(load_attr->prog_type, load_attr->name, load_attr->license,
-                       load_attr->insns, load_attr->insns_cnt, &p);
-}
-
 int bcc_prog_load_xattr(struct bpf_load_program_attr *attr, int prog_len,
                         char *log_buf, unsigned log_buf_size, bool allow_rlimit)
 {
@@ -750,7 +690,7 @@ int bcc_prog_load_xattr(struct bpf_load_program_attr *attr, int prog_len,
     attr->name = prog_name;
   }
 
-  ret = libbpf_bpf_prog_load(attr, attr_log_buf, attr_log_buf_size);
+  ret = bpf_load_program_xattr(attr, attr_log_buf, attr_log_buf_size);
 
   // func_info/line_info may not be supported in old kernels.
   if (ret < 0 && attr->func_info && errno == EINVAL) {
@@ -761,14 +701,14 @@ int bcc_prog_load_xattr(struct bpf_load_program_attr *attr, int prog_len,
     attr->line_info = NULL;
     attr->line_info_cnt = 0;
     attr->line_info_rec_size = 0;
-    ret = libbpf_bpf_prog_load(attr, attr_log_buf, attr_log_buf_size);
+    ret = bpf_load_program_xattr(attr, attr_log_buf, attr_log_buf_size);
   }
 
   // BPF object name is not supported on older Kernels.
   // If we failed due to this, clear the name and try again.
   if (ret < 0 && name_len && (errno == E2BIG || errno == EINVAL)) {
     prog_name[0] = '\0';
-    ret = libbpf_bpf_prog_load(attr, attr_log_buf, attr_log_buf_size);
+    ret = bpf_load_program_xattr(attr, attr_log_buf, attr_log_buf_size);
   }
 
   if (ret < 0 && errno == EPERM) {
@@ -787,7 +727,7 @@ int bcc_prog_load_xattr(struct bpf_load_program_attr *attr, int prog_len,
       rl.rlim_max = RLIM_INFINITY;
       rl.rlim_cur = rl.rlim_max;
       if (setrlimit(RLIMIT_MEMLOCK, &rl) == 0)
-        ret = libbpf_bpf_prog_load(attr, attr_log_buf, attr_log_buf_size);
+        ret = bpf_load_program_xattr(attr, attr_log_buf, attr_log_buf_size);
     }
   }
 
@@ -805,7 +745,7 @@ int bcc_prog_load_xattr(struct bpf_load_program_attr *attr, int prog_len,
       // If logging is not already enabled, enable it and do the syscall again.
       if (attr->log_level == 0) {
         attr->log_level = 1;
-        ret = libbpf_bpf_prog_load(attr, log_buf, log_buf_size);
+        ret = bpf_load_program_xattr(attr, log_buf, log_buf_size);
       }
       // Print the log message and return.
       bpf_print_hints(ret, log_buf);
@@ -829,7 +769,7 @@ int bcc_prog_load_xattr(struct bpf_load_program_attr *attr, int prog_len,
         goto return_result;
       }
       tmp_log_buf[0] = 0;
-      ret = libbpf_bpf_prog_load(attr, tmp_log_buf, tmp_log_buf_size);
+      ret = bpf_load_program_xattr(attr, tmp_log_buf, tmp_log_buf_size);
       if (ret < 0 && errno == ENOSPC) {
         // Temporary buffer size is not enough. Double it and try again.
         free(tmp_log_buf);
-- 
2.36.1