Blame SOURCES/0002-scsi-scsi-qla2xxx-Fix-DMA-error-when-the-DIF-sg-buff.patch

3c6e85
From 1d7546da3efa9f06c8236e8a2ce6bd174dde55d1 Mon Sep 17 00:00:00 2001
3c6e85
From: Himanshu Madhani <hmadhani@redhat.com>
3c6e85
Date: Thu, 1 Aug 2019 15:54:22 -0400
3c6e85
Subject: [PATCH 002/124] [scsi] scsi: qla2xxx: Fix DMA error when the DIF sg
3c6e85
 buffer crosses 4GB boundary
3c6e85
3c6e85
Message-id: <20190801155618.12650-3-hmadhani@redhat.com>
3c6e85
Patchwork-id: 267810
3c6e85
O-Subject: [RHEL 7.8 e-stor PATCH 002/118] scsi: qla2xxx: Fix DMA error when the DIF sg buffer crosses 4GB boundary
3c6e85
Bugzilla: 1729270
3c6e85
RH-Acked-by: Jarod Wilson <jarod@redhat.com>
3c6e85
RH-Acked-by: Tony Camuso <tcamuso@redhat.com>
3c6e85
3c6e85
From: Giridhar Malavali <gmalavali@marvell.com>
3c6e85
3c6e85
Bugzilla 1729270
3c6e85
3c6e85
When SGE buffer containing DIF information crosses 4G boundary, it results
3c6e85
in DMA error. This patch fixes this issue by calculating SGE buffer size
3c6e85
and if it crosses 4G boundary, driver will split it into multiple SGE
3c6e85
buffers to avoid DMA error.
3c6e85
3c6e85
Signed-off-by: Giridhar Malavali <gmalavali@marvell.com>
3c6e85
Signed-off-by: Himanshu Madhani <hmadhani@marvell.com>
3c6e85
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
3c6e85
(cherry picked from commit 50b812755e9766fa0a1a28533f4d11a34a5b813e)
3c6e85
Signed-off-by: Himanshu Madhani <hmadhani@redhat.com>
3c6e85
3c6e85
[ HM: RH source complains about %llu formatting used in debug ]
3c6e85
[ statement for CS_DMA case, fixed patch for kernel build ]
3c6e85
3c6e85
Signed-off-by: Himanshu Madhani <hmadhani@redhat.com>
3c6e85
Signed-off-by: Jan Stancek <jstancek@redhat.com>
3c6e85
---
3c6e85
 drivers/scsi/qla2xxx/qla_attr.c   |  21 ++-
3c6e85
 drivers/scsi/qla2xxx/qla_def.h    |  28 ++++
3c6e85
 drivers/scsi/qla2xxx/qla_gbl.h    |   3 +-
3c6e85
 drivers/scsi/qla2xxx/qla_iocb.c   | 335 +++++++++++++++++++++++++++++++-------
3c6e85
 drivers/scsi/qla2xxx/qla_isr.c    |  11 ++
3c6e85
 drivers/scsi/qla2xxx/qla_os.c     | 169 ++++++++++++++++++-
3c6e85
 drivers/scsi/qla2xxx/qla_target.c |   2 +-
3c6e85
 drivers/scsi/qla2xxx/qla_target.h |   2 +
3c6e85
 8 files changed, 503 insertions(+), 68 deletions(-)
3c6e85
3c6e85
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
3c6e85
index 7dcd34b3a9c4..da8b16469836 100644
3c6e85
--- a/drivers/scsi/qla2xxx/qla_attr.c
3c6e85
+++ b/drivers/scsi/qla2xxx/qla_attr.c
3c6e85
@@ -1003,7 +1003,7 @@ qla2x00_free_sysfs_attr(scsi_qla_host_t *vha, bool stop_beacon)
3c6e85
 /* Scsi_Host attributes. */
3c6e85
 
3c6e85
 static ssize_t
3c6e85
-qla2x00_drvr_version_show(struct device *dev,
3c6e85
+qla2x00_driver_version_show(struct device *dev,
3c6e85
 			  struct device_attribute *attr, char *buf)
3c6e85
 {
3c6e85
 	return scnprintf(buf, PAGE_SIZE, "%s\n", qla2x00_version_str);
3c6e85
@@ -2060,7 +2060,21 @@ ql2xiniexchg_store(struct device *dev, struct device_attribute *attr,
3c6e85
 	return strlen(buf);
3c6e85
 }
3c6e85
 
3c6e85
-static DEVICE_ATTR(driver_version, S_IRUGO, qla2x00_drvr_version_show, NULL);
3c6e85
+static ssize_t
3c6e85
+qla2x00_dif_bundle_statistics_show(struct device *dev,
3c6e85
+    struct device_attribute *attr, char *buf)
3c6e85
+{
3c6e85
+	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
3c6e85
+	struct qla_hw_data *ha = vha->hw;
3c6e85
+
3c6e85
+	return scnprintf(buf, PAGE_SIZE,
3c6e85
+	    "cross=%llu read=%llu write=%llu kalloc=%llu dma_alloc=%llu unusable=%u\n",
3c6e85
+	    ha->dif_bundle_crossed_pages, ha->dif_bundle_reads,
3c6e85
+	    ha->dif_bundle_writes, ha->dif_bundle_kallocs,
3c6e85
+	    ha->dif_bundle_dma_allocs, ha->pool.unusable.count);
3c6e85
+}
3c6e85
+
3c6e85
+static DEVICE_ATTR(driver_version, S_IRUGO, qla2x00_driver_version_show, NULL);
3c6e85
 static DEVICE_ATTR(fw_version, S_IRUGO, qla2x00_fw_version_show, NULL);
3c6e85
 static DEVICE_ATTR(serial_num, S_IRUGO, qla2x00_serial_num_show, NULL);
3c6e85
 static DEVICE_ATTR(isp_name, S_IRUGO, qla2x00_isp_name_show, NULL);
3c6e85
@@ -2113,6 +2127,8 @@ static DEVICE_ATTR(zio_threshold, 0644,
3c6e85
 static DEVICE_ATTR_RW(qlini_mode);
3c6e85
 static DEVICE_ATTR_RW(ql2xexchoffld);
3c6e85
 static DEVICE_ATTR_RW(ql2xiniexchg);
3c6e85
+static DEVICE_ATTR(dif_bundle_statistics, 0444,
3c6e85
+    qla2x00_dif_bundle_statistics_show, NULL);
3c6e85
 
3c6e85
 
3c6e85
 struct device_attribute *qla2x00_host_attrs[] = {
3c6e85
@@ -2151,6 +2167,7 @@ struct device_attribute *qla2x00_host_attrs[] = {
3c6e85
 	&dev_attr_min_link_speed,
3c6e85
 	&dev_attr_max_speed_sup,
3c6e85
 	&dev_attr_zio_threshold,
3c6e85
+	&dev_attr_dif_bundle_statistics,
3c6e85
 	NULL, /* reserve for qlini_mode */
3c6e85
 	NULL, /* reserve for ql2xiniexchg */
3c6e85
 	NULL, /* reserve for ql2xexchoffld */
3c6e85
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
3c6e85
index bf0faee77106..39de2d91988e 100644
3c6e85
--- a/drivers/scsi/qla2xxx/qla_def.h
3c6e85
+++ b/drivers/scsi/qla2xxx/qla_def.h
3c6e85
@@ -324,6 +324,7 @@ struct srb_cmd {
3c6e85
 #define SRB_CRC_PROT_DMA_VALID		BIT_4	/* DIF: prot DMA valid */
3c6e85
 #define SRB_CRC_CTX_DSD_VALID		BIT_5	/* DIF: dsd_list valid */
3c6e85
 #define SRB_WAKEUP_ON_COMP		BIT_6
3c6e85
+#define SRB_DIF_BUNDL_DMA_VALID		BIT_7   /* DIF: DMA list valid */
3c6e85
 
3c6e85
 /* To identify if a srb is of T10-CRC type. @sp => srb_t pointer */
3c6e85
 #define IS_PROT_IO(sp)	(sp->flags & SRB_CRC_CTX_DSD_VALID)
3c6e85
@@ -1902,6 +1903,13 @@ struct crc_context {
3c6e85
 	/* List of DMA context transfers */
3c6e85
 	struct list_head dsd_list;
3c6e85
 
3c6e85
+	/* List of DIF Bundling context DMA address */
3c6e85
+	struct list_head ldif_dsd_list;
3c6e85
+	u8 no_ldif_dsd;
3c6e85
+
3c6e85
+	struct list_head ldif_dma_hndl_list;
3c6e85
+	u32 dif_bundl_len;
3c6e85
+	u8 no_dif_bundl;
3c6e85
 	/* This structure should not exceed 512 bytes */
3c6e85
 };
3c6e85
 
3c6e85
@@ -4194,6 +4202,26 @@ struct qla_hw_data {
3c6e85
 	uint16_t min_link_speed;
3c6e85
 	uint16_t max_speed_sup;
3c6e85
 
3c6e85
+	/* DMA pool for the DIF bundling buffers */
3c6e85
+	struct dma_pool *dif_bundl_pool;
3c6e85
+	#define DIF_BUNDLING_DMA_POOL_SIZE  1024
3c6e85
+	struct {
3c6e85
+		struct {
3c6e85
+			struct list_head head;
3c6e85
+			uint count;
3c6e85
+		} good;
3c6e85
+		struct {
3c6e85
+			struct list_head head;
3c6e85
+			uint count;
3c6e85
+		} unusable;
3c6e85
+	} pool;
3c6e85
+
3c6e85
+	unsigned long long dif_bundle_crossed_pages;
3c6e85
+	unsigned long long dif_bundle_reads;
3c6e85
+	unsigned long long dif_bundle_writes;
3c6e85
+	unsigned long long dif_bundle_kallocs;
3c6e85
+	unsigned long long dif_bundle_dma_allocs;
3c6e85
+
3c6e85
 	atomic_t        nvme_active_aen_cnt;
3c6e85
 	uint16_t        nvme_last_rptd_aen;             /* Last recorded aen count */
3c6e85
 
3c6e85
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
3c6e85
index 966eebe68584..1e00e93d4066 100644
3c6e85
--- a/drivers/scsi/qla2xxx/qla_gbl.h
3c6e85
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
3c6e85
@@ -159,6 +159,7 @@ extern int ql2xnvmeenable;
3c6e85
 extern int ql2xenablemsix;
3c6e85
 extern int qla2xuseresexchforels;
3c6e85
 extern int ql2xexlogins;
3c6e85
+extern int ql2xdifbundlinginternalbuffers;
3c6e85
 
3c6e85
 extern int qla2x00_loop_reset(scsi_qla_host_t *);
3c6e85
 extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int);
3c6e85
@@ -284,7 +285,7 @@ extern int qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *, srb_t *,
3c6e85
 extern int qla24xx_walk_and_build_sglist(struct qla_hw_data *, srb_t *,
3c6e85
 	uint32_t *, uint16_t, struct qla_tc_param *);
3c6e85
 extern int qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *, srb_t *,
3c6e85
-	uint32_t *, uint16_t, struct qla_tc_param *);
3c6e85
+	uint32_t *, uint16_t, struct qla_tgt_cmd *);
3c6e85
 extern int qla24xx_get_one_block_sg(uint32_t, struct qla2_sgx *, uint32_t *);
3c6e85
 extern int qla24xx_configure_prot_mode(srb_t *, uint16_t *);
3c6e85
 extern int qla24xx_build_scsi_crc_2_iocbs(srb_t *,
3c6e85
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
3c6e85
index 24a779db8530..b0ab5d362f64 100644
3c6e85
--- a/drivers/scsi/qla2xxx/qla_iocb.c
3c6e85
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
3c6e85
@@ -1105,88 +1105,300 @@ qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
3c6e85
 
3c6e85
 int
3c6e85
 qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
3c6e85
-	uint32_t *dsd, uint16_t tot_dsds, struct qla_tc_param *tc)
3c6e85
+    uint32_t *cur_dsd, uint16_t tot_dsds, struct qla_tgt_cmd *tc)
3c6e85
 {
3c6e85
-	void *next_dsd;
3c6e85
-	uint8_t avail_dsds = 0;
3c6e85
-	uint32_t dsd_list_len;
3c6e85
-	struct dsd_dma *dsd_ptr;
3c6e85
+	struct dsd_dma *dsd_ptr = NULL, *dif_dsd, *nxt_dsd;
3c6e85
 	struct scatterlist *sg, *sgl;
3c6e85
-	int	i;
3c6e85
-	struct scsi_cmnd *cmd;
3c6e85
-	uint32_t *cur_dsd = dsd;
3c6e85
-	uint16_t used_dsds = tot_dsds;
3c6e85
+	struct crc_context *difctx = NULL;
3c6e85
 	struct scsi_qla_host *vha;
3c6e85
+	uint dsd_list_len;
3c6e85
+	uint avail_dsds = 0;
3c6e85
+	uint used_dsds = tot_dsds;
3c6e85
+	bool dif_local_dma_alloc = false;
3c6e85
+	bool direction_to_device = false;
3c6e85
+	int i;
3c6e85
 
3c6e85
 	if (sp) {
3c6e85
-		cmd = GET_CMD_SP(sp);
3c6e85
+		struct scsi_cmnd *cmd = GET_CMD_SP(sp);
3c6e85
 		sgl = scsi_prot_sglist(cmd);
3c6e85
 		vha = sp->vha;
3c6e85
+		difctx = sp->u.scmd.ctx;
3c6e85
+		direction_to_device = cmd->sc_data_direction == DMA_TO_DEVICE;
3c6e85
+		ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe021,
3c6e85
+		  "%s: scsi_cmnd: %p, crc_ctx: %p, sp: %p\n",
3c6e85
+			__func__, cmd, difctx, sp);
3c6e85
 	} else if (tc) {
3c6e85
 		vha = tc->vha;
3c6e85
 		sgl = tc->prot_sg;
3c6e85
+		difctx = tc->ctx;
3c6e85
+		direction_to_device = tc->dma_data_direction == DMA_TO_DEVICE;
3c6e85
 	} else {
3c6e85
 		BUG();
3c6e85
 		return 1;
3c6e85
 	}
3c6e85
 
3c6e85
-	ql_dbg(ql_dbg_tgt, vha, 0xe021,
3c6e85
-		"%s: enter\n", __func__);
3c6e85
-
3c6e85
-	for_each_sg(sgl, sg, tot_dsds, i) {
3c6e85
-		dma_addr_t	sle_dma;
3c6e85
-
3c6e85
-		/* Allocate additional continuation packets? */
3c6e85
-		if (avail_dsds == 0) {
3c6e85
-			avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
3c6e85
-						QLA_DSDS_PER_IOCB : used_dsds;
3c6e85
-			dsd_list_len = (avail_dsds + 1) * 12;
3c6e85
-			used_dsds -= avail_dsds;
3c6e85
-
3c6e85
-			/* allocate tracking DS */
3c6e85
-			dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
3c6e85
-			if (!dsd_ptr)
3c6e85
-				return 1;
3c6e85
-
3c6e85
-			/* allocate new list */
3c6e85
-			dsd_ptr->dsd_addr = next_dsd =
3c6e85
-			    dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
3c6e85
-				&dsd_ptr->dsd_list_dma);
3c6e85
-
3c6e85
-			if (!next_dsd) {
3c6e85
-				/*
3c6e85
-				 * Need to cleanup only this dsd_ptr, rest
3c6e85
-				 * will be done by sp_free_dma()
3c6e85
-				 */
3c6e85
-				kfree(dsd_ptr);
3c6e85
-				return 1;
3c6e85
+	ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe021,
3c6e85
+	    "%s: enter (write=%u)\n", __func__, direction_to_device);
3c6e85
+
3c6e85
+	/* if initiator doing write or target doing read */
3c6e85
+	if (direction_to_device) {
3c6e85
+		for_each_sg(sgl, sg, tot_dsds, i) {
3c6e85
+			dma_addr_t sle_phys = sg_phys(sg);
3c6e85
+
3c6e85
+			/* If SGE addr + len flips bits in upper 32-bits */
3c6e85
+			if (MSD(sle_phys + sg->length) ^ MSD(sle_phys)) {
3c6e85
+				ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe022,
3c6e85
+				    "%s: page boundary crossing (phys=%llx len=%x)\n",
3c6e85
+				    __func__, sle_phys, sg->length);
3c6e85
+
3c6e85
+				if (difctx) {
3c6e85
+					ha->dif_bundle_crossed_pages++;
3c6e85
+					dif_local_dma_alloc = true;
3c6e85
+				} else {
3c6e85
+					ql_dbg(ql_dbg_tgt + ql_dbg_verbose,
3c6e85
+					    vha, 0xe022,
3c6e85
+					    "%s: difctx pointer is NULL\n",
3c6e85
+					    __func__);
3c6e85
+				}
3c6e85
+				break;
3c6e85
+			}
3c6e85
+		}
3c6e85
+		ha->dif_bundle_writes++;
3c6e85
+	} else {
3c6e85
+		ha->dif_bundle_reads++;
3c6e85
+	}
3c6e85
+
3c6e85
+	if (ql2xdifbundlinginternalbuffers)
3c6e85
+		dif_local_dma_alloc = direction_to_device;
3c6e85
+
3c6e85
+	if (dif_local_dma_alloc) {
3c6e85
+		u32 track_difbundl_buf = 0;
3c6e85
+		u32 ldma_sg_len = 0;
3c6e85
+		u8 ldma_needed = 1;
3c6e85
+
3c6e85
+		difctx->no_dif_bundl = 0;
3c6e85
+		difctx->dif_bundl_len = 0;
3c6e85
+
3c6e85
+		/* Track DSD buffers */
3c6e85
+		INIT_LIST_HEAD(&difctx->ldif_dsd_list);
3c6e85
+		/* Track local DMA buffers */
3c6e85
+		INIT_LIST_HEAD(&difctx->ldif_dma_hndl_list);
3c6e85
+
3c6e85
+		for_each_sg(sgl, sg, tot_dsds, i) {
3c6e85
+			u32 sglen = sg_dma_len(sg);
3c6e85
+
3c6e85
+			ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe023,
3c6e85
+			    "%s: sg[%x] (phys=%llx sglen=%x) ldma_sg_len: %x dif_bundl_len: %x ldma_needed: %x\n",
3c6e85
+			    __func__, i, sg_phys(sg), sglen, ldma_sg_len,
3c6e85
+			    difctx->dif_bundl_len, ldma_needed);
3c6e85
+
3c6e85
+			while (sglen) {
3c6e85
+				u32 xfrlen = 0;
3c6e85
+
3c6e85
+				if (ldma_needed) {
3c6e85
+					/*
3c6e85
+					 * Allocate list item to store
3c6e85
+					 * the DMA buffers
3c6e85
+					 */
3c6e85
+					dsd_ptr = kzalloc(sizeof(*dsd_ptr),
3c6e85
+					    GFP_ATOMIC);
3c6e85
+					if (!dsd_ptr) {
3c6e85
+						ql_dbg(ql_dbg_tgt, vha, 0xe024,
3c6e85
+						    "%s: failed alloc dsd_ptr\n",
3c6e85
+						    __func__);
3c6e85
+						return 1;
3c6e85
+					}
3c6e85
+					ha->dif_bundle_kallocs++;
3c6e85
+
3c6e85
+					/* allocate dma buffer */
3c6e85
+					dsd_ptr->dsd_addr = dma_pool_alloc
3c6e85
+						(ha->dif_bundl_pool, GFP_ATOMIC,
3c6e85
+						 &dsd_ptr->dsd_list_dma);
3c6e85
+					if (!dsd_ptr->dsd_addr) {
3c6e85
+						ql_dbg(ql_dbg_tgt, vha, 0xe024,
3c6e85
+						    "%s: failed alloc ->dsd_ptr\n",
3c6e85
+						    __func__);
3c6e85
+						/*
3c6e85
+						 * need to cleanup only this
3c6e85
+						 * dsd_ptr rest will be done
3c6e85
+						 * by sp_free_dma()
3c6e85
+						 */
3c6e85
+						kfree(dsd_ptr);
3c6e85
+						ha->dif_bundle_kallocs--;
3c6e85
+						return 1;
3c6e85
+					}
3c6e85
+					ha->dif_bundle_dma_allocs++;
3c6e85
+					ldma_needed = 0;
3c6e85
+					difctx->no_dif_bundl++;
3c6e85
+					list_add_tail(&dsd_ptr->list,
3c6e85
+					    &difctx->ldif_dma_hndl_list);
3c6e85
+				}
3c6e85
+
3c6e85
+				/* xfrlen is min of dma pool size and sglen */
3c6e85
+				xfrlen = (sglen >
3c6e85
+				   (DIF_BUNDLING_DMA_POOL_SIZE - ldma_sg_len)) ?
3c6e85
+				    DIF_BUNDLING_DMA_POOL_SIZE - ldma_sg_len :
3c6e85
+				    sglen;
3c6e85
+
3c6e85
+				/* replace with local allocated dma buffer */
3c6e85
+				sg_pcopy_to_buffer(sgl, sg_nents(sgl),
3c6e85
+				    dsd_ptr->dsd_addr + ldma_sg_len, xfrlen,
3c6e85
+				    difctx->dif_bundl_len);
3c6e85
+				difctx->dif_bundl_len += xfrlen;
3c6e85
+				sglen -= xfrlen;
3c6e85
+				ldma_sg_len += xfrlen;
3c6e85
+				if (ldma_sg_len == DIF_BUNDLING_DMA_POOL_SIZE ||
3c6e85
+				    sg_is_last(sg)) {
3c6e85
+					ldma_needed = 1;
3c6e85
+					ldma_sg_len = 0;
3c6e85
+				}
3c6e85
 			}
3c6e85
+		}
3c6e85
 
3c6e85
-			if (sp) {
3c6e85
-				list_add_tail(&dsd_ptr->list,
3c6e85
-				    &((struct crc_context *)
3c6e85
-					    sp->u.scmd.ctx)->dsd_list);
3c6e85
+		track_difbundl_buf = used_dsds = difctx->no_dif_bundl;
3c6e85
+		ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe025,
3c6e85
+		    "dif_bundl_len=%x, no_dif_bundl=%x track_difbundl_buf: %x\n",
3c6e85
+		    difctx->dif_bundl_len, difctx->no_dif_bundl,
3c6e85
+		    track_difbundl_buf);
3c6e85
 
3c6e85
-				sp->flags |= SRB_CRC_CTX_DSD_VALID;
3c6e85
-			} else {
3c6e85
-				list_add_tail(&dsd_ptr->list,
3c6e85
-				    &(tc->ctx->dsd_list));
3c6e85
-				*tc->ctx_dsd_alloced = 1;
3c6e85
+		if (sp)
3c6e85
+			sp->flags |= SRB_DIF_BUNDL_DMA_VALID;
3c6e85
+		else
3c6e85
+			tc->prot_flags = DIF_BUNDL_DMA_VALID;
3c6e85
+
3c6e85
+		list_for_each_entry_safe(dif_dsd, nxt_dsd,
3c6e85
+		    &difctx->ldif_dma_hndl_list, list) {
3c6e85
+			u32 sglen = (difctx->dif_bundl_len >
3c6e85
+			    DIF_BUNDLING_DMA_POOL_SIZE) ?
3c6e85
+			    DIF_BUNDLING_DMA_POOL_SIZE : difctx->dif_bundl_len;
3c6e85
+
3c6e85
+			BUG_ON(track_difbundl_buf == 0);
3c6e85
+
3c6e85
+			/* Allocate additional continuation packets? */
3c6e85
+			if (avail_dsds == 0) {
3c6e85
+				ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha,
3c6e85
+				    0xe024,
3c6e85
+				    "%s: adding continuation iocb's\n",
3c6e85
+				    __func__);
3c6e85
+				avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
3c6e85
+				    QLA_DSDS_PER_IOCB : used_dsds;
3c6e85
+				dsd_list_len = (avail_dsds + 1) * 12;
3c6e85
+				used_dsds -= avail_dsds;
3c6e85
+
3c6e85
+				/* allocate tracking DS */
3c6e85
+				dsd_ptr = kzalloc(sizeof(*dsd_ptr), GFP_ATOMIC);
3c6e85
+				if (!dsd_ptr) {
3c6e85
+					ql_dbg(ql_dbg_tgt, vha, 0xe026,
3c6e85
+					    "%s: failed alloc dsd_ptr\n",
3c6e85
+					    __func__);
3c6e85
+					return 1;
3c6e85
+				}
3c6e85
+				ha->dif_bundle_kallocs++;
3c6e85
+
3c6e85
+				difctx->no_ldif_dsd++;
3c6e85
+				/* allocate new list */
3c6e85
+				dsd_ptr->dsd_addr =
3c6e85
+				    dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
3c6e85
+					&dsd_ptr->dsd_list_dma);
3c6e85
+				if (!dsd_ptr->dsd_addr) {
3c6e85
+					ql_dbg(ql_dbg_tgt, vha, 0xe026,
3c6e85
+					    "%s: failed alloc ->dsd_addr\n",
3c6e85
+					    __func__);
3c6e85
+					/*
3c6e85
+					 * need to cleanup only this dsd_ptr
3c6e85
+					 *  rest will be done by sp_free_dma()
3c6e85
+					 */
3c6e85
+					kfree(dsd_ptr);
3c6e85
+					ha->dif_bundle_kallocs--;
3c6e85
+					return 1;
3c6e85
+				}
3c6e85
+				ha->dif_bundle_dma_allocs++;
3c6e85
+
3c6e85
+				if (sp) {
3c6e85
+					list_add_tail(&dsd_ptr->list,
3c6e85
+					    &difctx->ldif_dsd_list);
3c6e85
+					sp->flags |= SRB_CRC_CTX_DSD_VALID;
3c6e85
+				} else {
3c6e85
+					list_add_tail(&dsd_ptr->list,
3c6e85
+					    &difctx->ldif_dsd_list);
3c6e85
+					tc->ctx_dsd_alloced = 1;
3c6e85
+				}
3c6e85
+
3c6e85
+				/* add new list to cmd iocb or last list */
3c6e85
+				*cur_dsd++ =
3c6e85
+				    cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
3c6e85
+				*cur_dsd++ =
3c6e85
+				    cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
3c6e85
+				*cur_dsd++ = dsd_list_len;
3c6e85
+				cur_dsd = dsd_ptr->dsd_addr;
3c6e85
 			}
3c6e85
-
3c6e85
-			/* add new list to cmd iocb or last list */
3c6e85
-			*cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
3c6e85
-			*cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
3c6e85
-			*cur_dsd++ = dsd_list_len;
3c6e85
-			cur_dsd = (uint32_t *)next_dsd;
3c6e85
+			*cur_dsd++ = cpu_to_le32(LSD(dif_dsd->dsd_list_dma));
3c6e85
+			*cur_dsd++ = cpu_to_le32(MSD(dif_dsd->dsd_list_dma));
3c6e85
+			*cur_dsd++ = cpu_to_le32(sglen);
3c6e85
+			avail_dsds--;
3c6e85
+			difctx->dif_bundl_len -= sglen;
3c6e85
+			track_difbundl_buf--;
3c6e85
 		}
3c6e85
-		sle_dma = sg_dma_address(sg);
3c6e85
-
3c6e85
-		*cur_dsd++ = cpu_to_le32(LSD(sle_dma));
3c6e85
-		*cur_dsd++ = cpu_to_le32(MSD(sle_dma));
3c6e85
-		*cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
3c6e85
 
3c6e85
-		avail_dsds--;
3c6e85
+		ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe026,
3c6e85
+		    "%s: no_ldif_dsd:%x, no_dif_bundl:%x\n", __func__,
3c6e85
+			difctx->no_ldif_dsd, difctx->no_dif_bundl);
3c6e85
+	} else {
3c6e85
+		for_each_sg(sgl, sg, tot_dsds, i) {
3c6e85
+			dma_addr_t sle_dma;
3c6e85
+
3c6e85
+			/* Allocate additional continuation packets? */
3c6e85
+			if (avail_dsds == 0) {
3c6e85
+				avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
3c6e85
+				    QLA_DSDS_PER_IOCB : used_dsds;
3c6e85
+				dsd_list_len = (avail_dsds + 1) * 12;
3c6e85
+				used_dsds -= avail_dsds;
3c6e85
+
3c6e85
+				/* allocate tracking DS */
3c6e85
+				dsd_ptr = kzalloc(sizeof(*dsd_ptr), GFP_ATOMIC);
3c6e85
+				if (!dsd_ptr) {
3c6e85
+					ql_dbg(ql_dbg_tgt + ql_dbg_verbose,
3c6e85
+					    vha, 0xe027,
3c6e85
+					    "%s: failed alloc dsd_dma...\n",
3c6e85
+					    __func__);
3c6e85
+					return 1;
3c6e85
+				}
3c6e85
+
3c6e85
+				/* allocate new list */
3c6e85
+				dsd_ptr->dsd_addr =
3c6e85
+				    dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
3c6e85
+					&dsd_ptr->dsd_list_dma);
3c6e85
+				if (!dsd_ptr->dsd_addr) {
3c6e85
+					/* need to cleanup only this dsd_ptr */
3c6e85
+					/* rest will be done by sp_free_dma() */
3c6e85
+					kfree(dsd_ptr);
3c6e85
+					return 1;
3c6e85
+				}
3c6e85
+
3c6e85
+				if (sp) {
3c6e85
+					list_add_tail(&dsd_ptr->list,
3c6e85
+					    &difctx->dsd_list);
3c6e85
+					sp->flags |= SRB_CRC_CTX_DSD_VALID;
3c6e85
+				} else {
3c6e85
+					list_add_tail(&dsd_ptr->list,
3c6e85
+					    &difctx->dsd_list);
3c6e85
+					tc->ctx_dsd_alloced = 1;
3c6e85
+				}
3c6e85
+
3c6e85
+				/* add new list to cmd iocb or last list */
3c6e85
+				*cur_dsd++ =
3c6e85
+				    cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
3c6e85
+				*cur_dsd++ =
3c6e85
+				    cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
3c6e85
+				*cur_dsd++ = dsd_list_len;
3c6e85
+				cur_dsd = dsd_ptr->dsd_addr;
3c6e85
+			}
3c6e85
+			sle_dma = sg_dma_address(sg);
3c6e85
+			*cur_dsd++ = cpu_to_le32(LSD(sle_dma));
3c6e85
+			*cur_dsd++ = cpu_to_le32(MSD(sle_dma));
3c6e85
+			*cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
3c6e85
+			avail_dsds--;
3c6e85
+		}
3c6e85
 	}
3c6e85
 	/* Null termination */
3c6e85
 	*cur_dsd++ = 0;
3c6e85
@@ -1194,7 +1406,6 @@ qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
3c6e85
 	*cur_dsd++ = 0;
3c6e85
 	return 0;
3c6e85
 }
3c6e85
-
3c6e85
 /**
3c6e85
  * qla24xx_build_scsi_crc_2_iocbs() - Build IOCB command utilizing Command
3c6e85
  *							Type 6 IOCB types.
3c6e85
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
3c6e85
index 1c90c3989cc6..d3af28eff7f6 100644
3c6e85
--- a/drivers/scsi/qla2xxx/qla_isr.c
3c6e85
+++ b/drivers/scsi/qla2xxx/qla_isr.c
3c6e85
@@ -2722,6 +2722,17 @@ check_scsi_status:
3c6e85
 			    cp->device->vendor);
3c6e85
 		break;
3c6e85
 
3c6e85
+	case CS_DMA:
3c6e85
+		ql_log(ql_log_info, fcport->vha, 0x3022,
3c6e85
+		    "CS_DMA error: 0x%x-0x%x (0x%x) nexus=%ld:%d:%d portid=%06x oxid=0x%x cdb=%10phN len=0x%x rsp_info=0x%x resid=0x%x fw_resid=0x%x sp=%p cp=%p.\n",
3c6e85
+		    comp_status, scsi_status, res, vha->host_no,
3c6e85
+		    cp->device->id, cp->device->lun, fcport->d_id.b24,
3c6e85
+		    ox_id, cp->cmnd, scsi_bufflen(cp), rsp_info_len,
3c6e85
+		    resid_len, fw_resid_len, sp, cp);
3c6e85
+		ql_dump_buffer(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe0ee,
3c6e85
+		    pkt, sizeof(*sts24));
3c6e85
+		res = DID_ERROR << 16;
3c6e85
+		break;
3c6e85
 	default:
3c6e85
 		res = DID_ERROR << 16;
3c6e85
 		break;
3c6e85
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
3c6e85
index 83abed102cf4..399a9072ae02 100644
3c6e85
--- a/drivers/scsi/qla2xxx/qla_os.c
3c6e85
+++ b/drivers/scsi/qla2xxx/qla_os.c
3c6e85
@@ -298,6 +298,13 @@ MODULE_PARM_DESC(ql2xprotguard, "Override choice of DIX checksum\n"
3c6e85
 		 "  1 -- Force T10 CRC\n"
3c6e85
 		 "  2 -- Force IP checksum\n");
3c6e85
 
3c6e85
+int ql2xdifbundlinginternalbuffers;
3c6e85
+module_param(ql2xdifbundlinginternalbuffers, int, 0644);
3c6e85
+MODULE_PARM_DESC(ql2xdifbundlinginternalbuffers,
3c6e85
+    "Force using internal buffers for DIF information\n"
3c6e85
+    "0 (Default). Based on check.\n"
3c6e85
+    "1 Force using internal buffers\n");
3c6e85
+
3c6e85
 /*
3c6e85
  * SCSI host template entry points
3c6e85
  */
3c6e85
@@ -818,7 +825,44 @@ qla2xxx_qpair_sp_free_dma(void *ptr)
3c6e85
 		ha->gbl_dsd_inuse -= ctx1->dsd_use_cnt;
3c6e85
 		ha->gbl_dsd_avail += ctx1->dsd_use_cnt;
3c6e85
 		mempool_free(ctx1, ha->ctx_mempool);
3c6e85
+		sp->flags &= ~SRB_FCP_CMND_DMA_VALID;
3c6e85
+	}
3c6e85
+	if (sp->flags & SRB_DIF_BUNDL_DMA_VALID) {
3c6e85
+		struct crc_context *difctx = sp->u.scmd.ctx;
3c6e85
+		struct dsd_dma *dif_dsd, *nxt_dsd;
3c6e85
+
3c6e85
+		list_for_each_entry_safe(dif_dsd, nxt_dsd,
3c6e85
+		    &difctx->ldif_dma_hndl_list, list) {
3c6e85
+			list_del(&dif_dsd->list);
3c6e85
+			dma_pool_free(ha->dif_bundl_pool, dif_dsd->dsd_addr,
3c6e85
+			    dif_dsd->dsd_list_dma);
3c6e85
+			kfree(dif_dsd);
3c6e85
+			difctx->no_dif_bundl--;
3c6e85
+		}
3c6e85
+
3c6e85
+		list_for_each_entry_safe(dif_dsd, nxt_dsd,
3c6e85
+		    &difctx->ldif_dsd_list, list) {
3c6e85
+			list_del(&dif_dsd->list);
3c6e85
+			dma_pool_free(ha->dl_dma_pool, dif_dsd->dsd_addr,
3c6e85
+			    dif_dsd->dsd_list_dma);
3c6e85
+			kfree(dif_dsd);
3c6e85
+			difctx->no_ldif_dsd--;
3c6e85
+		}
3c6e85
+
3c6e85
+		if (difctx->no_ldif_dsd) {
3c6e85
+			ql_dbg(ql_dbg_tgt+ql_dbg_verbose, sp->vha, 0xe022,
3c6e85
+			    "%s: difctx->no_ldif_dsd=%x\n",
3c6e85
+			    __func__, difctx->no_ldif_dsd);
3c6e85
+		}
3c6e85
+
3c6e85
+		if (difctx->no_dif_bundl) {
3c6e85
+			ql_dbg(ql_dbg_tgt+ql_dbg_verbose, sp->vha, 0xe022,
3c6e85
+			    "%s: difctx->no_dif_bundl=%x\n",
3c6e85
+			    __func__, difctx->no_dif_bundl);
3c6e85
+		}
3c6e85
+		sp->flags &= ~SRB_DIF_BUNDL_DMA_VALID;
3c6e85
 	}
3c6e85
+
3c6e85
 end:
3c6e85
 	CMD_SP(cmd) = NULL;
3c6e85
 	qla2xxx_rel_qpair_sp(sp->qpair, sp);
3c6e85
@@ -4104,9 +4148,86 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
3c6e85
 			    "Failed to allocate memory for fcp_cmnd_dma_pool.\n");
3c6e85
 			goto fail_dl_dma_pool;
3c6e85
 		}
3c6e85
+
3c6e85
+		if (ql2xenabledif) {
3c6e85
+			u64 bufsize = DIF_BUNDLING_DMA_POOL_SIZE;
3c6e85
+			struct dsd_dma *dsd, *nxt;
3c6e85
+			uint i;
3c6e85
+			/* Creata a DMA pool of buffers for DIF bundling */
3c6e85
+			ha->dif_bundl_pool = dma_pool_create(name,
3c6e85
+			    &ha->pdev->dev, DIF_BUNDLING_DMA_POOL_SIZE, 8, 0);
3c6e85
+			if (!ha->dif_bundl_pool) {
3c6e85
+				ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0024,
3c6e85
+				    "%s: failed create dif_bundl_pool\n",
3c6e85
+				    __func__);
3c6e85
+				goto fail_dif_bundl_dma_pool;
3c6e85
+			}
3c6e85
+
3c6e85
+			INIT_LIST_HEAD(&ha->pool.good.head);
3c6e85
+			INIT_LIST_HEAD(&ha->pool.unusable.head);
3c6e85
+			ha->pool.good.count = 0;
3c6e85
+			ha->pool.unusable.count = 0;
3c6e85
+			for (i = 0; i < 128; i++) {
3c6e85
+				dsd = kzalloc(sizeof(*dsd), GFP_ATOMIC);
3c6e85
+				if (!dsd) {
3c6e85
+					ql_dbg_pci(ql_dbg_init, ha->pdev,
3c6e85
+					    0xe0ee, "%s: failed alloc dsd\n",
3c6e85
+					    __func__);
3c6e85
+					return 1;
3c6e85
+				}
3c6e85
+				ha->dif_bundle_kallocs++;
3c6e85
+
3c6e85
+				dsd->dsd_addr = dma_pool_alloc(
3c6e85
+				    ha->dif_bundl_pool, GFP_ATOMIC,
3c6e85
+				    &dsd->dsd_list_dma);
3c6e85
+				if (!dsd->dsd_addr) {
3c6e85
+					ql_dbg_pci(ql_dbg_init, ha->pdev,
3c6e85
+					    0xe0ee,
3c6e85
+					    "%s: failed alloc ->dsd_addr\n",
3c6e85
+					    __func__);
3c6e85
+					kfree(dsd);
3c6e85
+					ha->dif_bundle_kallocs--;
3c6e85
+					continue;
3c6e85
+				}
3c6e85
+				ha->dif_bundle_dma_allocs++;
3c6e85
+
3c6e85
+				/*
3c6e85
+				 * if DMA buffer crosses 4G boundary,
3c6e85
+				 * put it on bad list
3c6e85
+				 */
3c6e85
+				if (MSD(dsd->dsd_list_dma) ^
3c6e85
+				    MSD(dsd->dsd_list_dma + bufsize)) {
3c6e85
+					list_add_tail(&dsd->list,
3c6e85
+					    &ha->pool.unusable.head);
3c6e85
+					ha->pool.unusable.count++;
3c6e85
+				} else {
3c6e85
+					list_add_tail(&dsd->list,
3c6e85
+					    &ha->pool.good.head);
3c6e85
+					ha->pool.good.count++;
3c6e85
+				}
3c6e85
+			}
3c6e85
+
3c6e85
+			/* return the good ones back to the pool */
3c6e85
+			list_for_each_entry_safe(dsd, nxt,
3c6e85
+			    &ha->pool.good.head, list) {
3c6e85
+				list_del(&dsd->list);
3c6e85
+				dma_pool_free(ha->dif_bundl_pool,
3c6e85
+				    dsd->dsd_addr, dsd->dsd_list_dma);
3c6e85
+				ha->dif_bundle_dma_allocs--;
3c6e85
+				kfree(dsd);
3c6e85
+				ha->dif_bundle_kallocs--;
3c6e85
+			}
3c6e85
+
3c6e85
+			ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0024,
3c6e85
+			    "%s: dif dma pool (good=%u unusable=%u)\n",
3c6e85
+			    __func__, ha->pool.good.count,
3c6e85
+			    ha->pool.unusable.count);
3c6e85
+		}
3c6e85
+
3c6e85
 		ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0025,
3c6e85
-		    "dl_dma_pool=%p fcp_cmnd_dma_pool=%p.\n",
3c6e85
-		    ha->dl_dma_pool, ha->fcp_cmnd_dma_pool);
3c6e85
+		    "dl_dma_pool=%p fcp_cmnd_dma_pool=%p dif_bundl_pool=%p.\n",
3c6e85
+		    ha->dl_dma_pool, ha->fcp_cmnd_dma_pool,
3c6e85
+		    ha->dif_bundl_pool);
3c6e85
 	}
3c6e85
 
3c6e85
 	/* Allocate memory for SNS commands */
3c6e85
@@ -4269,6 +4390,24 @@ fail_free_ms_iocb:
3c6e85
 		dma_free_coherent(&ha->pdev->dev, sizeof(struct sns_cmd_pkt),
3c6e85
 		    ha->sns_cmd, ha->sns_cmd_dma);
3c6e85
 fail_dma_pool:
3c6e85
+	if (ql2xenabledif) {
3c6e85
+		struct dsd_dma *dsd, *nxt;
3c6e85
+
3c6e85
+		list_for_each_entry_safe(dsd, nxt, &ha->pool.unusable.head,
3c6e85
+		    list) {
3c6e85
+			list_del(&dsd->list);
3c6e85
+			dma_pool_free(ha->dif_bundl_pool, dsd->dsd_addr,
3c6e85
+			    dsd->dsd_list_dma);
3c6e85
+			ha->dif_bundle_dma_allocs--;
3c6e85
+			kfree(dsd);
3c6e85
+			ha->dif_bundle_kallocs--;
3c6e85
+			ha->pool.unusable.count--;
3c6e85
+		}
3c6e85
+		dma_pool_destroy(ha->dif_bundl_pool);
3c6e85
+		ha->dif_bundl_pool = NULL;
3c6e85
+	}
3c6e85
+
3c6e85
+fail_dif_bundl_dma_pool:
3c6e85
 	if (IS_QLA82XX(ha) || ql2xenabledif) {
3c6e85
 		dma_pool_destroy(ha->fcp_cmnd_dma_pool);
3c6e85
 		ha->fcp_cmnd_dma_pool = NULL;
3c6e85
@@ -4657,6 +4796,32 @@ qla2x00_mem_free(struct qla_hw_data *ha)
3c6e85
 	if (ha->ctx_mempool)
3c6e85
 		mempool_destroy(ha->ctx_mempool);
3c6e85
 
3c6e85
+	if (ql2xenabledif) {
3c6e85
+		struct dsd_dma *dsd, *nxt;
3c6e85
+
3c6e85
+		list_for_each_entry_safe(dsd, nxt, &ha->pool.unusable.head,
3c6e85
+					 list) {
3c6e85
+			list_del(&dsd->list);
3c6e85
+			dma_pool_free(ha->dif_bundl_pool, dsd->dsd_addr,
3c6e85
+				      dsd->dsd_list_dma);
3c6e85
+			ha->dif_bundle_dma_allocs--;
3c6e85
+			kfree(dsd);
3c6e85
+			ha->dif_bundle_kallocs--;
3c6e85
+			ha->pool.unusable.count--;
3c6e85
+		}
3c6e85
+		list_for_each_entry_safe(dsd, nxt, &ha->pool.good.head, list) {
3c6e85
+			list_del(&dsd->list);
3c6e85
+			dma_pool_free(ha->dif_bundl_pool, dsd->dsd_addr,
3c6e85
+				      dsd->dsd_list_dma);
3c6e85
+			ha->dif_bundle_dma_allocs--;
3c6e85
+			kfree(dsd);
3c6e85
+			ha->dif_bundle_kallocs--;
3c6e85
+		}
3c6e85
+	}
3c6e85
+
3c6e85
+	if (ha->dif_bundl_pool)
3c6e85
+		dma_pool_destroy(ha->dif_bundl_pool);
3c6e85
+
3c6e85
 	qlt_mem_free(ha);
3c6e85
 
3c6e85
 	if (ha->init_cb)
3c6e85
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
3c6e85
index c915a8743297..a4ab3401a90b 100644
3c6e85
--- a/drivers/scsi/qla2xxx/qla_target.c
3c6e85
+++ b/drivers/scsi/qla2xxx/qla_target.c
3c6e85
@@ -3226,7 +3226,7 @@ qlt_build_ctio_crc2_pkt(struct qla_qpair *qpair, struct qla_tgt_prm *prm)
3c6e85
 
3c6e85
 		cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.dif_address;
3c6e85
 		if (qla24xx_walk_and_build_prot_sglist(ha, NULL, cur_dsd,
3c6e85
-			prm->prot_seg_cnt, &tc))
3c6e85
+			prm->prot_seg_cnt, cmd))
3c6e85
 			goto crc_queuing_error;
3c6e85
 	}
3c6e85
 	return QLA_SUCCESS;
3c6e85
diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h
3c6e85
index dd7150da9a1a..c0bc02a4caf6 100644
3c6e85
--- a/drivers/scsi/qla2xxx/qla_target.h
3c6e85
+++ b/drivers/scsi/qla2xxx/qla_target.h
3c6e85
@@ -937,6 +937,8 @@ struct qla_tgt_cmd {
3c6e85
 	uint64_t	lba;
3c6e85
 	uint16_t	a_guard, e_guard, a_app_tag, e_app_tag;
3c6e85
 	uint32_t	a_ref_tag, e_ref_tag;
3c6e85
+#define DIF_BUNDL_DMA_VALID 1
3c6e85
+	uint16_t prot_flags;
3c6e85
 
3c6e85
 	uint64_t jiffies_at_alloc;
3c6e85
 	uint64_t jiffies_at_free;
3c6e85
-- 
3c6e85
2.13.6
3c6e85