diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index 8ee228e9ab5aa28b85ee2893a92b3ab243c35388..c98fdb185931644bd3ed92d337f85aa0d2517149 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -51,6 +51,8 @@ isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn);
 static int
 isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
 	       struct isert_rdma_wr *wr);
+static int
+isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd);
 
 static void
 isert_qp_event_callback(struct ib_event *e, void *context)
@@ -87,7 +89,8 @@ isert_query_device(struct ib_device *ib_dev, struct ib_device_attr *devattr)
 }
 
 static int
-isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id)
+isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id,
+		    u8 protection)
 {
 	struct isert_device *device = isert_conn->conn_device;
 	struct ib_qp_init_attr attr;
@@ -119,6 +122,8 @@ isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id)
 	attr.cap.max_recv_sge = 1;
 	attr.sq_sig_type = IB_SIGNAL_REQ_WR;
 	attr.qp_type = IB_QPT_RC;
+	if (protection)
+		attr.create_flags |= IB_QP_CREATE_SIGNATURE_EN;
 
 	pr_debug("isert_conn_setup_qp cma_id->device: %p\n",
 		 cma_id->device);
@@ -226,7 +231,8 @@ isert_create_device_ib_res(struct isert_device *device)
 		return ret;
 
 	/* asign function handlers */
-	if (dev_attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) {
+	if (dev_attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS &&
+	    dev_attr->device_cap_flags & IB_DEVICE_SIGNATURE_HANDOVER) {
 		device->use_fastreg = 1;
 		device->reg_rdma_mem = isert_reg_rdma;
 		device->unreg_rdma_mem = isert_unreg_rdma;
@@ -236,13 +242,18 @@ isert_create_device_ib_res(struct isert_device *device)
 		device->unreg_rdma_mem = isert_unmap_cmd;
 	}
 
+	/* Check signature cap */
+	device->pi_capable = dev_attr->device_cap_flags &
+			     IB_DEVICE_SIGNATURE_HANDOVER ? true : false;
+
 	device->cqs_used = min_t(int, num_online_cpus(),
 				 device->ib_device->num_comp_vectors);
 	device->cqs_used = min(ISERT_MAX_CQ, device->cqs_used);
 	pr_debug("Using %d CQs, device %s supports %d vectors support "
-		 "Fast registration %d\n",
+		 "Fast registration %d pi_capable %d\n",
 		 device->cqs_used, device->ib_device->name,
-		 device->ib_device->num_comp_vectors, device->use_fastreg);
+		 device->ib_device->num_comp_vectors, device->use_fastreg,
+		 device->pi_capable);
 	device->cq_desc = kzalloc(sizeof(struct isert_cq_desc) *
 				device->cqs_used, GFP_KERNEL);
 	if (!device->cq_desc) {
@@ -395,6 +406,12 @@ isert_conn_free_fastreg_pool(struct isert_conn *isert_conn)
 		list_del(&fr_desc->list);
 		ib_free_fast_reg_page_list(fr_desc->data_frpl);
 		ib_dereg_mr(fr_desc->data_mr);
+		if (fr_desc->pi_ctx) {
+			ib_free_fast_reg_page_list(fr_desc->pi_ctx->prot_frpl);
+			ib_dereg_mr(fr_desc->pi_ctx->prot_mr);
+			ib_destroy_mr(fr_desc->pi_ctx->sig_mr);
+			kfree(fr_desc->pi_ctx);
+		}
 		kfree(fr_desc);
 		++i;
 	}
@@ -406,8 +423,10 @@ isert_conn_free_fastreg_pool(struct isert_conn *isert_conn)
 
 static int
 isert_create_fr_desc(struct ib_device *ib_device, struct ib_pd *pd,
-		     struct fast_reg_descriptor *fr_desc)
+		     struct fast_reg_descriptor *fr_desc, u8 protection)
 {
+	int ret;
+
 	fr_desc->data_frpl = ib_alloc_fast_reg_page_list(ib_device,
 							 ISCSI_ISER_SG_TABLESIZE);
 	if (IS_ERR(fr_desc->data_frpl)) {
@@ -420,27 +439,88 @@ isert_create_fr_desc(struct ib_device *ib_device, struct ib_pd *pd,
 	if (IS_ERR(fr_desc->data_mr)) {
 		pr_err("Failed to allocate data frmr err=%ld\n",
 		       PTR_ERR(fr_desc->data_mr));
-		ib_free_fast_reg_page_list(fr_desc->data_frpl);
-		return PTR_ERR(fr_desc->data_mr);
+		ret = PTR_ERR(fr_desc->data_mr);
+		goto err_data_frpl;
 	}
 	pr_debug("Create fr_desc %p page_list %p\n",
 		 fr_desc, fr_desc->data_frpl->page_list);
+	fr_desc->ind |= ISERT_DATA_KEY_VALID;
+
+	if (protection) {
+		struct ib_mr_init_attr mr_init_attr = {0};
+		struct pi_context *pi_ctx;
+
+		fr_desc->pi_ctx = kzalloc(sizeof(*fr_desc->pi_ctx), GFP_KERNEL);
+		if (!fr_desc->pi_ctx) {
+			pr_err("Failed to allocate pi context\n");
+			ret = -ENOMEM;
+			goto err_data_mr;
+		}
+		pi_ctx = fr_desc->pi_ctx;
+
+		pi_ctx->prot_frpl = ib_alloc_fast_reg_page_list(ib_device,
+						    ISCSI_ISER_SG_TABLESIZE);
+		if (IS_ERR(pi_ctx->prot_frpl)) {
+			pr_err("Failed to allocate prot frpl err=%ld\n",
+			       PTR_ERR(pi_ctx->prot_frpl));
+			ret = PTR_ERR(pi_ctx->prot_frpl);
+			goto err_pi_ctx;
+		}
 
-	fr_desc->valid = true;
+		pi_ctx->prot_mr = ib_alloc_fast_reg_mr(pd, ISCSI_ISER_SG_TABLESIZE);
+		if (IS_ERR(pi_ctx->prot_mr)) {
+			pr_err("Failed to allocate prot frmr err=%ld\n",
+			       PTR_ERR(pi_ctx->prot_mr));
+			ret = PTR_ERR(pi_ctx->prot_mr);
+			goto err_prot_frpl;
+		}
+		fr_desc->ind |= ISERT_PROT_KEY_VALID;
+
+		mr_init_attr.max_reg_descriptors = 2;
+		mr_init_attr.flags |= IB_MR_SIGNATURE_EN;
+		pi_ctx->sig_mr = ib_create_mr(pd, &mr_init_attr);
+		if (IS_ERR(pi_ctx->sig_mr)) {
+			pr_err("Failed to allocate signature enabled mr err=%ld\n",
+			       PTR_ERR(pi_ctx->sig_mr));
+			ret = PTR_ERR(pi_ctx->sig_mr);
+			goto err_prot_mr;
+		}
+		fr_desc->ind |= ISERT_SIG_KEY_VALID;
+	}
+	fr_desc->ind &= ~ISERT_PROTECTED;
 
 	return 0;
+err_prot_mr:
+	ib_dereg_mr(fr_desc->pi_ctx->prot_mr);
+err_prot_frpl:
+	ib_free_fast_reg_page_list(fr_desc->pi_ctx->prot_frpl);
+err_pi_ctx:
+	kfree(fr_desc->pi_ctx);
+err_data_mr:
+	ib_dereg_mr(fr_desc->data_mr);
+err_data_frpl:
+	ib_free_fast_reg_page_list(fr_desc->data_frpl);
+
+	return ret;
 }
 
 static int
-isert_conn_create_fastreg_pool(struct isert_conn *isert_conn)
+isert_conn_create_fastreg_pool(struct isert_conn *isert_conn, u8 pi_support)
 {
 	struct fast_reg_descriptor *fr_desc;
 	struct isert_device *device = isert_conn->conn_device;
-	int i, ret;
+	struct se_session *se_sess = isert_conn->conn->sess->se_sess;
+	struct se_node_acl *se_nacl = se_sess->se_node_acl;
+	int i, ret, tag_num;
+	/*
+	 * Setup the number of FRMRs based upon the number of tags
+	 * available to session in iscsi_target_locate_portal().
+	 */
+	tag_num = max_t(u32, ISCSIT_MIN_TAGS, se_nacl->queue_depth);
+	tag_num = (tag_num * 2) + ISCSIT_EXTRA_TAGS;
 
-	INIT_LIST_HEAD(&isert_conn->conn_fr_pool);
 	isert_conn->conn_fr_pool_size = 0;
-	for (i = 0; i < ISCSI_DEF_XMIT_CMDS_MAX; i++) {
+	for (i = 0; i < tag_num; i++) {
 		fr_desc = kzalloc(sizeof(*fr_desc), GFP_KERNEL);
 		if (!fr_desc) {
 			pr_err("Failed to allocate fast_reg descriptor\n");
@@ -449,7 +529,8 @@ isert_conn_create_fastreg_pool(struct isert_conn *isert_conn)
 		}
 
 		ret = isert_create_fr_desc(device->ib_device,
-					   isert_conn->conn_pd, fr_desc);
+					   isert_conn->conn_pd, fr_desc,
+					   pi_support);
 		if (ret) {
 			pr_err("Failed to create fastreg descriptor err=%d\n",
 			       ret);
@@ -480,6 +561,7 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
 	struct isert_device *device;
 	struct ib_device *ib_dev = cma_id->device;
 	int ret = 0;
+	u8 pi_support = np->tpg_np->tpg->tpg_attrib.t10_pi;
 
 	pr_debug("Entering isert_connect_request cma_id: %p, context: %p\n",
 		 cma_id, cma_id->context);
@@ -498,6 +580,7 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
 	kref_get(&isert_conn->conn_kref);
 	mutex_init(&isert_conn->conn_mutex);
 	spin_lock_init(&isert_conn->conn_lock);
+	INIT_LIST_HEAD(&isert_conn->conn_fr_pool);
 
 	cma_id->context = isert_conn;
 	isert_conn->conn_cm_id = cma_id;
@@ -569,16 +652,13 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
 		goto out_mr;
 	}
 
-	if (device->use_fastreg) {
-		ret = isert_conn_create_fastreg_pool(isert_conn);
-		if (ret) {
-			pr_err("Conn: %p failed to create fastreg pool\n",
-			       isert_conn);
-			goto out_fastreg;
-		}
+	if (pi_support && !device->pi_capable) {
+		pr_err("Protection information requested but not supported\n");
+		ret = -EINVAL;
+		goto out_mr;
 	}
 
-	ret = isert_conn_setup_qp(isert_conn, cma_id);
+	ret = isert_conn_setup_qp(isert_conn, cma_id, pi_support);
 	if (ret)
 		goto out_conn_dev;
 
@@ -591,9 +671,6 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
 	return 0;
 
 out_conn_dev:
-	if (device->use_fastreg)
-		isert_conn_free_fastreg_pool(isert_conn);
-out_fastreg:
 	ib_dereg_mr(isert_conn->conn_mr);
 out_mr:
 	ib_dealloc_pd(isert_conn->conn_pd);
@@ -967,6 +1044,18 @@ isert_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login,
 	}
 	if (!login->login_failed) {
 		if (login->login_complete) {
+			if (isert_conn->conn_device->use_fastreg) {
+				u8 pi_support = login->np->tpg_np->tpg->tpg_attrib.t10_pi;
+
+				ret = isert_conn_create_fastreg_pool(isert_conn,
+								     pi_support);
+				if (ret) {
+					pr_err("Conn: %p failed to create"
+					       " fastreg pool\n", isert_conn);
+					return ret;
+				}
+			}
+
 			ret = isert_alloc_rx_descriptors(isert_conn);
 			if (ret)
 				return ret;
@@ -1392,19 +1481,60 @@ isert_rx_completion(struct iser_rx_desc *desc, struct isert_conn *isert_conn,
 	}
 }
 
+static int
+isert_map_data_buf(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
+		   struct scatterlist *sg, u32 nents, u32 length, u32 offset,
+		   enum iser_ib_op_code op, struct isert_data_buf *data)
+{
+	struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
+
+	data->dma_dir = op == ISER_IB_RDMA_WRITE ?
+			      DMA_TO_DEVICE : DMA_FROM_DEVICE;
+
+	data->len = length - offset;
+	data->offset = offset;
+	data->sg_off = data->offset / PAGE_SIZE;
+
+	data->sg = &sg[data->sg_off];
+	data->nents = min_t(unsigned int, nents - data->sg_off,
+					  ISCSI_ISER_SG_TABLESIZE);
+	data->len = min_t(unsigned int, data->len, ISCSI_ISER_SG_TABLESIZE *
+					PAGE_SIZE);
+
+	data->dma_nents = ib_dma_map_sg(ib_dev, data->sg, data->nents,
+					data->dma_dir);
+	if (unlikely(!data->dma_nents)) {
+		pr_err("Cmd: unable to dma map SGs %p\n", sg);
+		return -EINVAL;
+	}
+
+	pr_debug("Mapped cmd: %p count: %u sg: %p sg_nents: %u rdma_len %d\n",
+		 isert_cmd, data->dma_nents, data->sg, data->nents, data->len);
+
+	return 0;
+}
+
+static void
+isert_unmap_data_buf(struct isert_conn *isert_conn, struct isert_data_buf *data)
+{
+	struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
+
+	ib_dma_unmap_sg(ib_dev, data->sg, data->nents, data->dma_dir);
+	memset(data, 0, sizeof(*data));
+}
+
+
+
 static void
 isert_unmap_cmd(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn)
 {
 	struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
-	struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
 
 	pr_debug("isert_unmap_cmd: %p\n", isert_cmd);
-	if (wr->sge) {
+
+	if (wr->data.sg) {
 		pr_debug("isert_unmap_cmd: %p unmap_sg op\n", isert_cmd);
-		ib_dma_unmap_sg(ib_dev, wr->sge, wr->num_sge,
-				(wr->iser_ib_op == ISER_IB_RDMA_WRITE) ?
-				DMA_TO_DEVICE : DMA_FROM_DEVICE);
-		wr->sge = NULL;
+		isert_unmap_data_buf(isert_conn, &wr->data);
 	}
 
 	if (wr->send_wr) {
@@ -1424,7 +1554,6 @@ static void
 isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn)
 {
 	struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
-	struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
 	LIST_HEAD(unmap_list);
 
 	pr_debug("unreg_fastreg_cmd: %p\n", isert_cmd);
@@ -1432,18 +1561,19 @@ isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn)
 	if (wr->fr_desc) {
 		pr_debug("unreg_fastreg_cmd: %p free fr_desc %p\n",
 			 isert_cmd, wr->fr_desc);
+		if (wr->fr_desc->ind & ISERT_PROTECTED) {
+			isert_unmap_data_buf(isert_conn, &wr->prot);
+			wr->fr_desc->ind &= ~ISERT_PROTECTED;
+		}
 		spin_lock_bh(&isert_conn->conn_lock);
 		list_add_tail(&wr->fr_desc->list, &isert_conn->conn_fr_pool);
 		spin_unlock_bh(&isert_conn->conn_lock);
 		wr->fr_desc = NULL;
 	}
 
-	if (wr->sge) {
+	if (wr->data.sg) {
 		pr_debug("unreg_fastreg_cmd: %p unmap_sg op\n", isert_cmd);
-		ib_dma_unmap_sg(ib_dev, wr->sge, wr->num_sge,
-				(wr->iser_ib_op == ISER_IB_RDMA_WRITE) ?
-				DMA_TO_DEVICE : DMA_FROM_DEVICE);
-		wr->sge = NULL;
+		isert_unmap_data_buf(isert_conn, &wr->data);
 	}
 
 	wr->ib_sge = NULL;
@@ -1451,7 +1581,7 @@ isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn)
 }
 
 static void
-isert_put_cmd(struct isert_cmd *isert_cmd)
+isert_put_cmd(struct isert_cmd *isert_cmd, bool comp_err)
 {
 	struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
 	struct isert_conn *isert_conn = isert_cmd->conn;
@@ -1467,8 +1597,21 @@ isert_put_cmd(struct isert_cmd *isert_cmd)
 			list_del_init(&cmd->i_conn_node);
 		spin_unlock_bh(&conn->cmd_lock);
 
-		if (cmd->data_direction == DMA_TO_DEVICE)
+		if (cmd->data_direction == DMA_TO_DEVICE) {
 			iscsit_stop_dataout_timer(cmd);
+			/*
+			 * Check for special case during comp_err where
+			 * WRITE_PENDING has been handed off from core,
+			 * but requires an extra target_put_sess_cmd()
+			 * before transport_generic_free_cmd() below.
+			 */
+			if (comp_err &&
+			    cmd->se_cmd.t_state == TRANSPORT_WRITE_PENDING) {
+				struct se_cmd *se_cmd = &cmd->se_cmd;
+
+				target_put_sess_cmd(se_cmd->se_sess, se_cmd);
+			}
+		}
 
 		device->unreg_rdma_mem(isert_cmd, isert_conn);
 		transport_generic_free_cmd(&cmd->se_cmd, 0);
@@ -1523,7 +1666,7 @@ isert_unmap_tx_desc(struct iser_tx_desc *tx_desc, struct ib_device *ib_dev)
 
 static void
 isert_completion_put(struct iser_tx_desc *tx_desc, struct isert_cmd *isert_cmd,
-		     struct ib_device *ib_dev)
+		     struct ib_device *ib_dev, bool comp_err)
 {
 	if (isert_cmd->pdu_buf_dma != 0) {
 		pr_debug("Calling ib_dma_unmap_single for isert_cmd->pdu_buf_dma\n");
@@ -1533,7 +1676,77 @@ isert_completion_put(struct iser_tx_desc *tx_desc, struct isert_cmd *isert_cmd,
 	}
 
 	isert_unmap_tx_desc(tx_desc, ib_dev);
-	isert_put_cmd(isert_cmd);
+	isert_put_cmd(isert_cmd, comp_err);
+}
+
+static int
+isert_check_pi_status(struct se_cmd *se_cmd, struct ib_mr *sig_mr)
+{
+	struct ib_mr_status mr_status;
+	int ret;
+
+	ret = ib_check_mr_status(sig_mr, IB_MR_CHECK_SIG_STATUS, &mr_status);
+	if (ret) {
+		pr_err("ib_check_mr_status failed, ret %d\n", ret);
+		goto fail_mr_status;
+	}
+
+	if (mr_status.fail_status & IB_MR_CHECK_SIG_STATUS) {
+		u64 sec_offset_err;
+		u32 block_size = se_cmd->se_dev->dev_attrib.block_size + 8;
+
+		switch (mr_status.sig_err.err_type) {
+		case IB_SIG_BAD_GUARD:
+			se_cmd->pi_err = TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED;
+			break;
+		case IB_SIG_BAD_REFTAG:
+			se_cmd->pi_err = TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED;
+			break;
+		case IB_SIG_BAD_APPTAG:
+			se_cmd->pi_err = TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED;
+			break;
+		}
+		sec_offset_err = mr_status.sig_err.sig_err_offset;
+		do_div(sec_offset_err, block_size);
+		se_cmd->bad_sector = sec_offset_err + se_cmd->t_task_lba;
+
+		pr_err("isert: PI error found type %d at sector 0x%llx "
+		       "expected 0x%x vs actual 0x%x\n",
+		       mr_status.sig_err.err_type,
+		       (unsigned long long)se_cmd->bad_sector,
+		       mr_status.sig_err.expected,
+		       mr_status.sig_err.actual);
+		ret = 1;
+	}
+
+fail_mr_status:
+	return ret;
+}
+
+static void
+isert_completion_rdma_write(struct iser_tx_desc *tx_desc,
+			    struct isert_cmd *isert_cmd)
+{
+	struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
+	struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
+	struct se_cmd *se_cmd = &cmd->se_cmd;
+	struct isert_conn *isert_conn = isert_cmd->conn;
+	struct isert_device *device = isert_conn->conn_device;
+	int ret = 0;
+
+	if (wr->fr_desc && wr->fr_desc->ind & ISERT_PROTECTED) {
+		ret = isert_check_pi_status(se_cmd,
+					    wr->fr_desc->pi_ctx->sig_mr);
+		wr->fr_desc->ind &= ~ISERT_PROTECTED;
+	}
+
+	device->unreg_rdma_mem(isert_cmd, isert_conn);
+	wr->send_wr_num = 0;
+	if (ret)
+		transport_send_check_condition_and_sense(se_cmd,
+							 se_cmd->pi_err, 0);
+	else
+		isert_put_response(isert_conn->conn, cmd);
 }
 
 static void
@@ -1545,10 +1758,17 @@ isert_completion_rdma_read(struct iser_tx_desc *tx_desc,
 	struct se_cmd *se_cmd = &cmd->se_cmd;
 	struct isert_conn *isert_conn = isert_cmd->conn;
 	struct isert_device *device = isert_conn->conn_device;
+	int ret = 0;
+
+	if (wr->fr_desc && wr->fr_desc->ind & ISERT_PROTECTED) {
+		ret = isert_check_pi_status(se_cmd,
+					    wr->fr_desc->pi_ctx->sig_mr);
+		wr->fr_desc->ind &= ~ISERT_PROTECTED;
+	}
 
 	iscsit_stop_dataout_timer(cmd);
 	device->unreg_rdma_mem(isert_cmd, isert_conn);
-	cmd->write_data_done = wr->cur_rdma_length;
+	cmd->write_data_done = wr->data.len;
 	wr->send_wr_num = 0;
 
 	pr_debug("Cmd: %p RDMA_READ comp calling execute_cmd\n", isert_cmd);
@@ -1557,7 +1777,11 @@ isert_completion_rdma_read(struct iser_tx_desc *tx_desc,
 	cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT;
 	spin_unlock_bh(&cmd->istate_lock);
 
-	target_execute_cmd(se_cmd);
+	if (ret)
+		transport_send_check_condition_and_sense(se_cmd,
+							 se_cmd->pi_err, 0);
+	else
+		target_execute_cmd(se_cmd);
 }
 
 static void
@@ -1577,14 +1801,14 @@ isert_do_control_comp(struct work_struct *work)
 		iscsit_tmr_post_handler(cmd, cmd->conn);
 
 		cmd->i_state = ISTATE_SENT_STATUS;
-		isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev);
+		isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev, false);
 		break;
 	case ISTATE_SEND_REJECT:
 		pr_debug("Got isert_do_control_comp ISTATE_SEND_REJECT: >>>\n");
 		atomic_dec(&isert_conn->post_send_buf_count);
 
 		cmd->i_state = ISTATE_SENT_STATUS;
-		isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev);
+		isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev, false);
 		break;
 	case ISTATE_SEND_LOGOUTRSP:
 		pr_debug("Calling iscsit_logout_post_handler >>>>>>>>>>>>>>\n");
@@ -1598,7 +1822,7 @@ isert_do_control_comp(struct work_struct *work)
 	case ISTATE_SEND_TEXTRSP:
 		atomic_dec(&isert_conn->post_send_buf_count);
 		cmd->i_state = ISTATE_SENT_STATUS;
-		isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev);
+		isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev, false);
 		break;
 	default:
 		pr_err("Unknown do_control_comp i_state %d\n", cmd->i_state);
@@ -1626,10 +1850,21 @@ isert_response_completion(struct iser_tx_desc *tx_desc,
 		queue_work(isert_comp_wq, &isert_cmd->comp_work);
 		return;
 	}
-	atomic_sub(wr->send_wr_num + 1, &isert_conn->post_send_buf_count);
+
+	/**
+	 * If send_wr_num is 0 this means that we got
+	 * RDMA completion and we cleared it and we should
+	 * simply decrement the response post. else the
+	 * response is incorporated in send_wr_num, just
+	 * sub it.
+	 **/
+	if (wr->send_wr_num)
+		atomic_sub(wr->send_wr_num, &isert_conn->post_send_buf_count);
+	else
+		atomic_dec(&isert_conn->post_send_buf_count);
 
 	cmd->i_state = ISTATE_SENT_STATUS;
-	isert_completion_put(tx_desc, isert_cmd, ib_dev);
+	isert_completion_put(tx_desc, isert_cmd, ib_dev, false);
 }
 
 static void
@@ -1658,8 +1893,9 @@ __isert_send_completion(struct iser_tx_desc *tx_desc,
 					  isert_conn, ib_dev);
 		break;
 	case ISER_IB_RDMA_WRITE:
-		pr_err("isert_send_completion: Got ISER_IB_RDMA_WRITE\n");
-		dump_stack();
+		pr_debug("isert_send_completion: Got ISER_IB_RDMA_WRITE\n");
+		atomic_sub(wr->send_wr_num, &isert_conn->post_send_buf_count);
+		isert_completion_rdma_write(tx_desc, isert_cmd);
 		break;
 	case ISER_IB_RDMA_READ:
 		pr_debug("isert_send_completion: Got ISER_IB_RDMA_READ:\n");
@@ -1709,8 +1945,20 @@ isert_cq_drain_comp_llist(struct isert_conn *isert_conn, struct ib_device *ib_de
 		llnode = llist_next(llnode);
 		wr = &t->isert_cmd->rdma_wr;
 
-		atomic_sub(wr->send_wr_num + 1, &isert_conn->post_send_buf_count);
-		isert_completion_put(t, t->isert_cmd, ib_dev);
+		/**
+		 * If send_wr_num is 0 this means that we got
+		 * RDMA completion and we cleared it and we should
+		 * simply decrement the response post. else the
+		 * response is incorporated in send_wr_num, just
+		 * sub it.
+		 **/
+		if (wr->send_wr_num)
+			atomic_sub(wr->send_wr_num,
+				   &isert_conn->post_send_buf_count);
+		else
+			atomic_dec(&isert_conn->post_send_buf_count);
+
+		isert_completion_put(t, t->isert_cmd, ib_dev, true);
 	}
 }
 
@@ -1728,15 +1976,27 @@ isert_cq_tx_comp_err(struct iser_tx_desc *tx_desc, struct isert_conn *isert_conn
 		llnode = llist_next(llnode);
 		wr = &t->isert_cmd->rdma_wr;
 
-		atomic_sub(wr->send_wr_num + 1, &isert_conn->post_send_buf_count);
-		isert_completion_put(t, t->isert_cmd, ib_dev);
+		/**
+		 * If send_wr_num is 0 this means that we got
+		 * RDMA completion and we cleared it and we should
+		 * simply decrement the response post. else the
+		 * response is incorporated in send_wr_num, just
+		 * sub it.
+		 **/
+		if (wr->send_wr_num)
+			atomic_sub(wr->send_wr_num,
+				   &isert_conn->post_send_buf_count);
+		else
+			atomic_dec(&isert_conn->post_send_buf_count);
+
+		isert_completion_put(t, t->isert_cmd, ib_dev, true);
 	}
 	tx_desc->comp_llnode_batch = NULL;
 
 	if (!isert_cmd)
 		isert_unmap_tx_desc(tx_desc, ib_dev);
 	else
-		isert_completion_put(tx_desc, isert_cmd, ib_dev);
+		isert_completion_put(tx_desc, isert_cmd, ib_dev, true);
 }
 
 static void
@@ -1918,6 +2178,36 @@ isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
 	return isert_post_response(isert_conn, isert_cmd);
 }
 
+static void
+isert_aborted_task(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
+{
+	struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
+	struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
+	struct isert_device *device = isert_conn->conn_device;
+
+	spin_lock_bh(&conn->cmd_lock);
+	if (!list_empty(&cmd->i_conn_node))
+		list_del_init(&cmd->i_conn_node);
+	spin_unlock_bh(&conn->cmd_lock);
+
+	if (cmd->data_direction == DMA_TO_DEVICE)
+		iscsit_stop_dataout_timer(cmd);
+
+	device->unreg_rdma_mem(isert_cmd, isert_conn);
+}
+
+static enum target_prot_op
+isert_get_sup_prot_ops(struct iscsi_conn *conn)
+{
+	struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
+	struct isert_device *device = isert_conn->conn_device;
+
+	if (device->pi_capable)
+		return TARGET_PROT_ALL;
+
+	return TARGET_PROT_NORMAL;
+}
+
 static int
 isert_put_nopin(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
 		bool nopout_response)
@@ -2099,54 +2389,39 @@ isert_map_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
 	struct se_cmd *se_cmd = &cmd->se_cmd;
 	struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
 	struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
-	struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
+	struct isert_data_buf *data = &wr->data;
 	struct ib_send_wr *send_wr;
 	struct ib_sge *ib_sge;
-	struct scatterlist *sg_start;
-	u32 sg_off = 0, sg_nents;
-	u32 offset = 0, data_len, data_left, rdma_write_max, va_offset = 0;
-	int ret = 0, count, i, ib_sge_cnt;
+	u32 offset, data_len, data_left, rdma_write_max, va_offset = 0;
+	int ret = 0, i, ib_sge_cnt;
 
-	if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) {
-		data_left = se_cmd->data_length;
-	} else {
-		sg_off = cmd->write_data_done / PAGE_SIZE;
-		data_left = se_cmd->data_length - cmd->write_data_done;
-		offset = cmd->write_data_done;
-		isert_cmd->tx_desc.isert_cmd = isert_cmd;
-	}
+	isert_cmd->tx_desc.isert_cmd = isert_cmd;
 
-	sg_start = &cmd->se_cmd.t_data_sg[sg_off];
-	sg_nents = se_cmd->t_data_nents - sg_off;
+	offset = wr->iser_ib_op == ISER_IB_RDMA_READ ? cmd->write_data_done : 0;
+	ret = isert_map_data_buf(isert_conn, isert_cmd, se_cmd->t_data_sg,
+				 se_cmd->t_data_nents, se_cmd->data_length,
+				 offset, wr->iser_ib_op, &wr->data);
+	if (ret)
+		return ret;
 
-	count = ib_dma_map_sg(ib_dev, sg_start, sg_nents,
-			      (wr->iser_ib_op == ISER_IB_RDMA_WRITE) ?
-			      DMA_TO_DEVICE : DMA_FROM_DEVICE);
-	if (unlikely(!count)) {
-		pr_err("Cmd: %p unrable to map SGs\n", isert_cmd);
-		return -EINVAL;
-	}
-	wr->sge = sg_start;
-	wr->num_sge = sg_nents;
-	wr->cur_rdma_length = data_left;
-	pr_debug("Mapped cmd: %p count: %u sg: %p sg_nents: %u rdma_len %d\n",
-		 isert_cmd, count, sg_start, sg_nents, data_left);
+	data_left = data->len;
+	offset = data->offset;
 
-	ib_sge = kzalloc(sizeof(struct ib_sge) * sg_nents, GFP_KERNEL);
+	ib_sge = kzalloc(sizeof(struct ib_sge) * data->nents, GFP_KERNEL);
 	if (!ib_sge) {
 		pr_warn("Unable to allocate ib_sge\n");
 		ret = -ENOMEM;
-		goto unmap_sg;
+		goto unmap_cmd;
 	}
 	wr->ib_sge = ib_sge;
 
-	wr->send_wr_num = DIV_ROUND_UP(sg_nents, isert_conn->max_sge);
+	wr->send_wr_num = DIV_ROUND_UP(data->nents, isert_conn->max_sge);
 	wr->send_wr = kzalloc(sizeof(struct ib_send_wr) * wr->send_wr_num,
 				GFP_KERNEL);
 	if (!wr->send_wr) {
 		pr_debug("Unable to allocate wr->send_wr\n");
 		ret = -ENOMEM;
-		goto unmap_sg;
+		goto unmap_cmd;
 	}
 
 	wr->isert_cmd = isert_cmd;
@@ -2185,10 +2460,9 @@ isert_map_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
 	}
 
 	return 0;
-unmap_sg:
-	ib_dma_unmap_sg(ib_dev, sg_start, sg_nents,
-			(wr->iser_ib_op == ISER_IB_RDMA_WRITE) ?
-			DMA_TO_DEVICE : DMA_FROM_DEVICE);
+unmap_cmd:
+	isert_unmap_data_buf(isert_conn, data);
+
 	return ret;
 }
 
@@ -2232,49 +2506,70 @@ isert_map_fr_pagelist(struct ib_device *ib_dev,
 }
 
 static int
-isert_fast_reg_mr(struct fast_reg_descriptor *fr_desc,
-		  struct isert_conn *isert_conn, struct scatterlist *sg_start,
-		  struct ib_sge *ib_sge, u32 sg_nents, u32 offset,
-		  unsigned int data_len)
+isert_fast_reg_mr(struct isert_conn *isert_conn,
+		  struct fast_reg_descriptor *fr_desc,
+		  struct isert_data_buf *mem,
+		  enum isert_indicator ind,
+		  struct ib_sge *sge)
 {
 	struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
+	struct ib_mr *mr;
+	struct ib_fast_reg_page_list *frpl;
 	struct ib_send_wr fr_wr, inv_wr;
 	struct ib_send_wr *bad_wr, *wr = NULL;
 	int ret, pagelist_len;
 	u32 page_off;
 	u8 key;
 
-	sg_nents = min_t(unsigned int, sg_nents, ISCSI_ISER_SG_TABLESIZE);
-	page_off = offset % PAGE_SIZE;
+	if (mem->dma_nents == 1) {
+		sge->lkey = isert_conn->conn_mr->lkey;
+		sge->addr = ib_sg_dma_address(ib_dev, &mem->sg[0]);
+		sge->length = ib_sg_dma_len(ib_dev, &mem->sg[0]);
+		pr_debug("%s:%d sge: addr: 0x%llx  length: %u lkey: %x\n",
+			 __func__, __LINE__, sge->addr, sge->length,
+			 sge->lkey);
+		return 0;
+	}
+
+	if (ind == ISERT_DATA_KEY_VALID) {
+		/* Registering data buffer */
+		mr = fr_desc->data_mr;
+		frpl = fr_desc->data_frpl;
+	} else {
+		/* Registering protection buffer */
+		mr = fr_desc->pi_ctx->prot_mr;
+		frpl = fr_desc->pi_ctx->prot_frpl;
+	}
+
+	page_off = mem->offset % PAGE_SIZE;
 
 	pr_debug("Use fr_desc %p sg_nents %d offset %u\n",
-		 fr_desc, sg_nents, offset);
+		 fr_desc, mem->nents, mem->offset);
 
-	pagelist_len = isert_map_fr_pagelist(ib_dev, sg_start, sg_nents,
-					     &fr_desc->data_frpl->page_list[0]);
+	pagelist_len = isert_map_fr_pagelist(ib_dev, mem->sg, mem->nents,
+					     &frpl->page_list[0]);
 
-	if (!fr_desc->valid) {
+	if (!(fr_desc->ind & ISERT_DATA_KEY_VALID)) {
 		memset(&inv_wr, 0, sizeof(inv_wr));
 		inv_wr.wr_id = ISER_FASTREG_LI_WRID;
 		inv_wr.opcode = IB_WR_LOCAL_INV;
-		inv_wr.ex.invalidate_rkey = fr_desc->data_mr->rkey;
+		inv_wr.ex.invalidate_rkey = mr->rkey;
 		wr = &inv_wr;
 		/* Bump the key */
-		key = (u8)(fr_desc->data_mr->rkey & 0x000000FF);
-		ib_update_fast_reg_key(fr_desc->data_mr, ++key);
+		key = (u8)(mr->rkey & 0x000000FF);
+		ib_update_fast_reg_key(mr, ++key);
 	}
 
 	/* Prepare FASTREG WR */
 	memset(&fr_wr, 0, sizeof(fr_wr));
 	fr_wr.wr_id = ISER_FASTREG_LI_WRID;
 	fr_wr.opcode = IB_WR_FAST_REG_MR;
-	fr_wr.wr.fast_reg.iova_start =
-		fr_desc->data_frpl->page_list[0] + page_off;
-	fr_wr.wr.fast_reg.page_list = fr_desc->data_frpl;
+	fr_wr.wr.fast_reg.iova_start = frpl->page_list[0] + page_off;
+	fr_wr.wr.fast_reg.page_list = frpl;
 	fr_wr.wr.fast_reg.page_list_len = pagelist_len;
 	fr_wr.wr.fast_reg.page_shift = PAGE_SHIFT;
-	fr_wr.wr.fast_reg.length = data_len;
-	fr_wr.wr.fast_reg.rkey = fr_desc->data_mr->rkey;
+	fr_wr.wr.fast_reg.length = mem->len;
+	fr_wr.wr.fast_reg.rkey = mr->rkey;
 	fr_wr.wr.fast_reg.access_flags = IB_ACCESS_LOCAL_WRITE;
 
 	if (!wr)
@@ -2287,15 +2582,157 @@ isert_fast_reg_mr(struct fast_reg_descriptor *fr_desc,
 		pr_err("fast registration failed, ret:%d\n", ret);
 		return ret;
 	}
-	fr_desc->valid = false;
+	fr_desc->ind &= ~ind;
+
+	sge->lkey = mr->lkey;
+	sge->addr = frpl->page_list[0] + page_off;
+	sge->length = mem->len;
+
+	pr_debug("%s:%d sge: addr: 0x%llx  length: %u lkey: %x\n",
+		 __func__, __LINE__, sge->addr, sge->length,
+		 sge->lkey);
+
+	return ret;
+}
+
+static inline enum ib_t10_dif_type
+se2ib_prot_type(enum target_prot_type prot_type)
+{
+	switch (prot_type) {
+	case TARGET_DIF_TYPE0_PROT:
+		return IB_T10DIF_NONE;
+	case TARGET_DIF_TYPE1_PROT:
+		return IB_T10DIF_TYPE1;
+	case TARGET_DIF_TYPE2_PROT:
+		return IB_T10DIF_TYPE2;
+	case TARGET_DIF_TYPE3_PROT:
+		return IB_T10DIF_TYPE3;
+	default:
+		return IB_T10DIF_NONE;
+	}
+}
 
-	ib_sge->lkey = fr_desc->data_mr->lkey;
-	ib_sge->addr = fr_desc->data_frpl->page_list[0] + page_off;
-	ib_sge->length = data_len;
+static int
+isert_set_sig_attrs(struct se_cmd *se_cmd, struct ib_sig_attrs *sig_attrs)
+{
+	enum ib_t10_dif_type ib_prot_type = se2ib_prot_type(se_cmd->prot_type);
+
+	sig_attrs->mem.sig_type = IB_SIG_TYPE_T10_DIF;
+	sig_attrs->wire.sig_type = IB_SIG_TYPE_T10_DIF;
+	sig_attrs->mem.sig.dif.pi_interval =
+				se_cmd->se_dev->dev_attrib.block_size;
+	sig_attrs->wire.sig.dif.pi_interval =
+				se_cmd->se_dev->dev_attrib.block_size;
+
+	switch (se_cmd->prot_op) {
+	case TARGET_PROT_DIN_INSERT:
+	case TARGET_PROT_DOUT_STRIP:
+		sig_attrs->mem.sig.dif.type = IB_T10DIF_NONE;
+		sig_attrs->wire.sig.dif.type = ib_prot_type;
+		sig_attrs->wire.sig.dif.bg_type = IB_T10DIF_CRC;
+		sig_attrs->wire.sig.dif.ref_tag = se_cmd->reftag_seed;
+		break;
+	case TARGET_PROT_DOUT_INSERT:
+	case TARGET_PROT_DIN_STRIP:
+		sig_attrs->mem.sig.dif.type = ib_prot_type;
+		sig_attrs->mem.sig.dif.bg_type = IB_T10DIF_CRC;
+		sig_attrs->mem.sig.dif.ref_tag = se_cmd->reftag_seed;
+		sig_attrs->wire.sig.dif.type = IB_T10DIF_NONE;
+		break;
+	case TARGET_PROT_DIN_PASS:
+	case TARGET_PROT_DOUT_PASS:
+		sig_attrs->mem.sig.dif.type = ib_prot_type;
+		sig_attrs->mem.sig.dif.bg_type = IB_T10DIF_CRC;
+		sig_attrs->mem.sig.dif.ref_tag = se_cmd->reftag_seed;
+		sig_attrs->wire.sig.dif.type = ib_prot_type;
+		sig_attrs->wire.sig.dif.bg_type = IB_T10DIF_CRC;
+		sig_attrs->wire.sig.dif.ref_tag = se_cmd->reftag_seed;
+		break;
+	default:
+		pr_err("Unsupported PI operation %d\n", se_cmd->prot_op);
+		return -EINVAL;
+	}
 
-	pr_debug("RDMA ib_sge: addr: 0x%16llx  length: %u lkey: %08x\n",
-		 ib_sge->addr, ib_sge->length, ib_sge->lkey);
+	return 0;
+}
+
+static inline u8
+isert_set_prot_checks(u8 prot_checks)
+{
+	return (prot_checks & TARGET_DIF_CHECK_GUARD  ? 0xc0 : 0) |
+	       (prot_checks & TARGET_DIF_CHECK_REFTAG ? 0x30 : 0) |
+	       (prot_checks & TARGET_DIF_CHECK_REFTAG ? 0x0f : 0);
+}
+
+static int
+isert_reg_sig_mr(struct isert_conn *isert_conn, struct se_cmd *se_cmd,
+		 struct fast_reg_descriptor *fr_desc,
+		 struct ib_sge *data_sge, struct ib_sge *prot_sge,
+		 struct ib_sge *sig_sge)
+{
+	struct ib_send_wr sig_wr, inv_wr;
+	struct ib_send_wr *bad_wr, *wr = NULL;
+	struct pi_context *pi_ctx = fr_desc->pi_ctx;
+	struct ib_sig_attrs sig_attrs;
+	int ret;
+	u32 key;
+
+	memset(&sig_attrs, 0, sizeof(sig_attrs));
+	ret = isert_set_sig_attrs(se_cmd, &sig_attrs);
+	if (ret)
+		goto err;
+
+	sig_attrs.check_mask = isert_set_prot_checks(se_cmd->prot_checks);
+
+	if (!(fr_desc->ind & ISERT_SIG_KEY_VALID)) {
+		memset(&inv_wr, 0, sizeof(inv_wr));
+		inv_wr.opcode = IB_WR_LOCAL_INV;
+		inv_wr.wr_id = ISER_FASTREG_LI_WRID;
+		inv_wr.ex.invalidate_rkey = pi_ctx->sig_mr->rkey;
+		wr = &inv_wr;
+		/* Bump the key */
+		key = (u8)(pi_ctx->sig_mr->rkey & 0x000000FF);
+		ib_update_fast_reg_key(pi_ctx->sig_mr, ++key);
+	}
+
+	memset(&sig_wr, 0, sizeof(sig_wr));
+	sig_wr.opcode = IB_WR_REG_SIG_MR;
+	sig_wr.wr_id = ISER_FASTREG_LI_WRID;
+	sig_wr.sg_list = data_sge;
+	sig_wr.num_sge = 1;
+	sig_wr.wr.sig_handover.access_flags = IB_ACCESS_LOCAL_WRITE;
+	sig_wr.wr.sig_handover.sig_attrs = &sig_attrs;
+	sig_wr.wr.sig_handover.sig_mr = pi_ctx->sig_mr;
+	if (se_cmd->t_prot_sg)
+		sig_wr.wr.sig_handover.prot = prot_sge;
+
+	if (!wr)
+		wr = &sig_wr;
+	else
+		wr->next = &sig_wr;
+
+	ret = ib_post_send(isert_conn->conn_qp, wr, &bad_wr);
+	if (ret) {
+		pr_err("fast registration failed, ret:%d\n", ret);
+		goto err;
+	}
+	fr_desc->ind &= ~ISERT_SIG_KEY_VALID;
+
+	sig_sge->lkey = pi_ctx->sig_mr->lkey;
+	sig_sge->addr = 0;
+	sig_sge->length = se_cmd->data_length;
+	if (se_cmd->prot_op != TARGET_PROT_DIN_STRIP &&
+	    se_cmd->prot_op != TARGET_PROT_DOUT_INSERT)
+		/*
+		 * We have protection guards on the wire
+		 * so we need to set a larget transfer
+		 */
+		sig_sge->length += se_cmd->prot_length;
 
+	pr_debug("sig_sge: addr: 0x%llx  length: %u lkey: %x\n",
+		 sig_sge->addr, sig_sge->length,
+		 sig_sge->lkey);
+err:
 	return ret;
 }
 
@@ -2305,62 +2742,82 @@ isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
 {
 	struct se_cmd *se_cmd = &cmd->se_cmd;
 	struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
-	struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
-	struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
+	struct isert_conn *isert_conn = conn->context;
+	struct ib_sge data_sge;
 	struct ib_send_wr *send_wr;
-	struct ib_sge *ib_sge;
-	struct scatterlist *sg_start;
-	struct fast_reg_descriptor *fr_desc;
-	u32 sg_off = 0, sg_nents;
-	u32 offset = 0, data_len, data_left, rdma_write_max;
-	int ret = 0, count;
+	struct fast_reg_descriptor *fr_desc = NULL;
+	u32 offset;
+	int ret = 0;
 	unsigned long flags;
 
-	if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) {
-		data_left = se_cmd->data_length;
-	} else {
-		offset = cmd->write_data_done;
-		sg_off = offset / PAGE_SIZE;
-		data_left = se_cmd->data_length - cmd->write_data_done;
-		isert_cmd->tx_desc.isert_cmd = isert_cmd;
-	}
+	isert_cmd->tx_desc.isert_cmd = isert_cmd;
 
-	sg_start = &cmd->se_cmd.t_data_sg[sg_off];
-	sg_nents = se_cmd->t_data_nents - sg_off;
+	offset = wr->iser_ib_op == ISER_IB_RDMA_READ ? cmd->write_data_done : 0;
+	ret = isert_map_data_buf(isert_conn, isert_cmd, se_cmd->t_data_sg,
+				 se_cmd->t_data_nents, se_cmd->data_length,
+				 offset, wr->iser_ib_op, &wr->data);
+	if (ret)
+		return ret;
 
-	count = ib_dma_map_sg(ib_dev, sg_start, sg_nents,
-			      (wr->iser_ib_op == ISER_IB_RDMA_WRITE) ?
-			      DMA_TO_DEVICE : DMA_FROM_DEVICE);
-	if (unlikely(!count)) {
-		pr_err("Cmd: %p unrable to map SGs\n", isert_cmd);
-		return -EINVAL;
+	if (wr->data.dma_nents != 1 ||
+	    se_cmd->prot_op != TARGET_PROT_NORMAL) {
+		spin_lock_irqsave(&isert_conn->conn_lock, flags);
+		fr_desc = list_first_entry(&isert_conn->conn_fr_pool,
+					   struct fast_reg_descriptor, list);
+		list_del(&fr_desc->list);
+		spin_unlock_irqrestore(&isert_conn->conn_lock, flags);
+		wr->fr_desc = fr_desc;
 	}
-	wr->sge = sg_start;
-	wr->num_sge = sg_nents;
-	pr_debug("Mapped cmd: %p count: %u sg: %p sg_nents: %u rdma_len %d\n",
-		 isert_cmd, count, sg_start, sg_nents, data_left);
 
-	memset(&wr->s_ib_sge, 0, sizeof(*ib_sge));
-	ib_sge = &wr->s_ib_sge;
-	wr->ib_sge = ib_sge;
+	ret = isert_fast_reg_mr(isert_conn, fr_desc, &wr->data,
+				ISERT_DATA_KEY_VALID, &data_sge);
+	if (ret)
+		goto unmap_cmd;
+
+	if (se_cmd->prot_op != TARGET_PROT_NORMAL) {
+		struct ib_sge prot_sge, sig_sge;
+
+		if (se_cmd->t_prot_sg) {
+			ret = isert_map_data_buf(isert_conn, isert_cmd,
+						 se_cmd->t_prot_sg,
+						 se_cmd->t_prot_nents,
+						 se_cmd->prot_length,
+						 0, wr->iser_ib_op, &wr->prot);
+			if (ret)
+				goto unmap_cmd;
+
+			ret = isert_fast_reg_mr(isert_conn, fr_desc, &wr->prot,
+						ISERT_PROT_KEY_VALID, &prot_sge);
+			if (ret)
+				goto unmap_prot_cmd;
+		}
+
+		ret = isert_reg_sig_mr(isert_conn, se_cmd, fr_desc,
+				       &data_sge, &prot_sge, &sig_sge);
+		if (ret)
+			goto unmap_prot_cmd;
 
+		fr_desc->ind |= ISERT_PROTECTED;
+		memcpy(&wr->s_ib_sge, &sig_sge, sizeof(sig_sge));
+	} else
+		memcpy(&wr->s_ib_sge, &data_sge, sizeof(data_sge));
+
+	wr->ib_sge = &wr->s_ib_sge;
 	wr->send_wr_num = 1;
 	memset(&wr->s_send_wr, 0, sizeof(*send_wr));
 	wr->send_wr = &wr->s_send_wr;
-
 	wr->isert_cmd = isert_cmd;
-	rdma_write_max = ISCSI_ISER_SG_TABLESIZE * PAGE_SIZE;
 
 	send_wr = &isert_cmd->rdma_wr.s_send_wr;
-	send_wr->sg_list = ib_sge;
+	send_wr->sg_list = &wr->s_ib_sge;
 	send_wr->num_sge = 1;
 	send_wr->wr_id = (unsigned long)&isert_cmd->tx_desc;
 	if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) {
 		send_wr->opcode = IB_WR_RDMA_WRITE;
 		send_wr->wr.rdma.remote_addr = isert_cmd->read_va;
 		send_wr->wr.rdma.rkey = isert_cmd->read_stag;
-		send_wr->send_flags = 0;
-		send_wr->next = &isert_cmd->tx_desc.send_wr;
+		send_wr->send_flags = se_cmd->prot_op == TARGET_PROT_NORMAL ?
+				      0 : IB_SEND_SIGNALED;
 	} else {
 		send_wr->opcode = IB_WR_RDMA_READ;
 		send_wr->wr.rdma.remote_addr = isert_cmd->write_va;
@@ -2368,37 +2825,18 @@ isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
 		send_wr->send_flags = IB_SEND_SIGNALED;
 	}
 
-	data_len = min(data_left, rdma_write_max);
-	wr->cur_rdma_length = data_len;
-
-	/* if there is a single dma entry, dma mr is sufficient */
-	if (count == 1) {
-		ib_sge->addr = ib_sg_dma_address(ib_dev, &sg_start[0]);
-		ib_sge->length = ib_sg_dma_len(ib_dev, &sg_start[0]);
-		ib_sge->lkey = isert_conn->conn_mr->lkey;
-		wr->fr_desc = NULL;
-	} else {
+	return 0;
+unmap_prot_cmd:
+	if (se_cmd->t_prot_sg)
+		isert_unmap_data_buf(isert_conn, &wr->prot);
+unmap_cmd:
+	if (fr_desc) {
 		spin_lock_irqsave(&isert_conn->conn_lock, flags);
-		fr_desc = list_first_entry(&isert_conn->conn_fr_pool,
-					   struct fast_reg_descriptor, list);
-		list_del(&fr_desc->list);
+		list_add_tail(&fr_desc->list, &isert_conn->conn_fr_pool);
 		spin_unlock_irqrestore(&isert_conn->conn_lock, flags);
-		wr->fr_desc = fr_desc;
-
-		ret = isert_fast_reg_mr(fr_desc, isert_conn, sg_start,
-					ib_sge, sg_nents, offset, data_len);
-		if (ret) {
-			list_add_tail(&fr_desc->list, &isert_conn->conn_fr_pool);
-			goto unmap_sg;
-		}
 	}
+	isert_unmap_data_buf(isert_conn, &wr->data);
 
-	return 0;
-
-unmap_sg:
-	ib_dma_unmap_sg(ib_dev, sg_start, sg_nents,
-			(wr->iser_ib_op == ISER_IB_RDMA_WRITE) ?
-			DMA_TO_DEVICE : DMA_FROM_DEVICE);
 	return ret;
 }
 
@@ -2422,25 +2860,35 @@ isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
 		return rc;
 	}
 
-	/*
-	 * Build isert_conn->tx_desc for iSCSI response PDU and attach
-	 */
-	isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
-	iscsit_build_rsp_pdu(cmd, conn, true, (struct iscsi_scsi_rsp *)
-			     &isert_cmd->tx_desc.iscsi_header);
-	isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
-	isert_init_send_wr(isert_conn, isert_cmd,
-			   &isert_cmd->tx_desc.send_wr, true);
+	if (se_cmd->prot_op == TARGET_PROT_NORMAL) {
+		/*
+		 * Build isert_conn->tx_desc for iSCSI response PDU and attach
+		 */
+		isert_create_send_desc(isert_conn, isert_cmd,
+				       &isert_cmd->tx_desc);
+		iscsit_build_rsp_pdu(cmd, conn, true, (struct iscsi_scsi_rsp *)
+				     &isert_cmd->tx_desc.iscsi_header);
+		isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
+		isert_init_send_wr(isert_conn, isert_cmd,
+				   &isert_cmd->tx_desc.send_wr, true);
+		isert_cmd->rdma_wr.s_send_wr.next = &isert_cmd->tx_desc.send_wr;
+		wr->send_wr_num += 1;
+	}
 
-	atomic_add(wr->send_wr_num + 1, &isert_conn->post_send_buf_count);
+	atomic_add(wr->send_wr_num, &isert_conn->post_send_buf_count);
 
 	rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed);
 	if (rc) {
 		pr_warn("ib_post_send() failed for IB_WR_RDMA_WRITE\n");
-		atomic_sub(wr->send_wr_num + 1, &isert_conn->post_send_buf_count);
+		atomic_sub(wr->send_wr_num, &isert_conn->post_send_buf_count);
 	}
-	pr_debug("Cmd: %p posted RDMA_WRITE + Response for iSER Data READ\n",
-		 isert_cmd);
+
+	if (se_cmd->prot_op == TARGET_PROT_NORMAL)
+		pr_debug("Cmd: %p posted RDMA_WRITE + Response for iSER Data "
+			 "READ\n", isert_cmd);
+	else
+		pr_debug("Cmd: %p posted RDMA_WRITE for iSER Data READ\n",
+			 isert_cmd);
 
 	return 1;
 }
@@ -2815,6 +3263,8 @@ static struct iscsit_transport iser_target_transport = {
 	.iscsit_get_dataout	= isert_get_dataout,
 	.iscsit_queue_data_in	= isert_put_datain,
 	.iscsit_queue_status	= isert_put_response,
+	.iscsit_aborted_task	= isert_aborted_task,
+	.iscsit_get_sup_prot_ops = isert_get_sup_prot_ops,
 };
 
 static int __init isert_init(void)
diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h
index f6ae7f5dd4082768f3b6f0dfb36c1d45506b5eb6..4c072ae34c01a3021e57cb5378a3949b97d0876c 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.h
+++ b/drivers/infiniband/ulp/isert/ib_isert.h
@@ -50,11 +50,35 @@ struct iser_tx_desc {
 	struct ib_send_wr send_wr;
 } __packed;
 
+enum isert_indicator {
+	ISERT_PROTECTED		= 1 << 0,
+	ISERT_DATA_KEY_VALID	= 1 << 1,
+	ISERT_PROT_KEY_VALID	= 1 << 2,
+	ISERT_SIG_KEY_VALID	= 1 << 3,
+};
+
+struct pi_context {
+	struct ib_mr		       *prot_mr;
+	struct ib_fast_reg_page_list   *prot_frpl;
+	struct ib_mr		       *sig_mr;
+};
+
 struct fast_reg_descriptor {
-	struct list_head	list;
-	struct ib_mr		*data_mr;
-	struct ib_fast_reg_page_list	*data_frpl;
-	bool			valid;
+	struct list_head		list;
+	struct ib_mr		       *data_mr;
+	struct ib_fast_reg_page_list   *data_frpl;
+	u8				ind;
+	struct pi_context	       *pi_ctx;
+};
+
+struct isert_data_buf {
+	struct scatterlist     *sg;
+	int			nents;
+	u32			sg_off;
+	u32			len; /* cur_rdma_length */
+	u32			offset;
+	unsigned int		dma_nents;
+	enum dma_data_direction dma_dir;
 };
 
 struct isert_rdma_wr {
@@ -63,12 +87,11 @@ struct isert_rdma_wr {
 	enum iser_ib_op_code	iser_ib_op;
 	struct ib_sge		*ib_sge;
 	struct ib_sge		s_ib_sge;
-	int			num_sge;
-	struct scatterlist	*sge;
 	int			send_wr_num;
 	struct ib_send_wr	*send_wr;
 	struct ib_send_wr	s_send_wr;
-	u32			cur_rdma_length;
+	struct isert_data_buf	data;
+	struct isert_data_buf	prot;
 	struct fast_reg_descriptor *fr_desc;
 };
 
@@ -141,6 +164,7 @@ struct isert_cq_desc {
 
 struct isert_device {
 	int			use_fastreg;
+	bool			pi_capable;
 	int			cqs_used;
 	int			refcount;
 	int			cq_active_qps[ISERT_MAX_CQ];
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index 0e537d8d0e4774312d632f68f0cedd0aaa406b7d..fe09f2788b15e7e42b1c076f67e17b3b2ca2f268 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -1078,6 +1078,7 @@ static void srpt_unmap_sg_to_ib_sge(struct srpt_rdma_ch *ch,
 static int srpt_map_sg_to_ib_sge(struct srpt_rdma_ch *ch,
 				 struct srpt_send_ioctx *ioctx)
 {
+	struct ib_device *dev = ch->sport->sdev->device;
 	struct se_cmd *cmd;
 	struct scatterlist *sg, *sg_orig;
 	int sg_cnt;
@@ -1124,7 +1125,7 @@ static int srpt_map_sg_to_ib_sge(struct srpt_rdma_ch *ch,
 
 	db = ioctx->rbufs;
 	tsize = cmd->data_length;
-	dma_len = sg_dma_len(&sg[0]);
+	dma_len = ib_sg_dma_len(dev, &sg[0]);
 	riu = ioctx->rdma_ius;
 
 	/*
@@ -1155,7 +1156,8 @@ static int srpt_map_sg_to_ib_sge(struct srpt_rdma_ch *ch,
 					++j;
 					if (j < count) {
 						sg = sg_next(sg);
-						dma_len = sg_dma_len(sg);
+						dma_len = ib_sg_dma_len(
+								dev, sg);
 					}
 				}
 			} else {
@@ -1192,8 +1194,8 @@ static int srpt_map_sg_to_ib_sge(struct srpt_rdma_ch *ch,
 	tsize = cmd->data_length;
 	riu = ioctx->rdma_ius;
 	sg = sg_orig;
-	dma_len = sg_dma_len(&sg[0]);
-	dma_addr = sg_dma_address(&sg[0]);
+	dma_len = ib_sg_dma_len(dev, &sg[0]);
+	dma_addr = ib_sg_dma_address(dev, &sg[0]);
 
 	/* this second loop is really mapped sg_addres to rdma_iu->ib_sge */
 	for (i = 0, j = 0;
@@ -1216,8 +1218,10 @@ static int srpt_map_sg_to_ib_sge(struct srpt_rdma_ch *ch,
 					++j;
 					if (j < count) {
 						sg = sg_next(sg);
-						dma_len = sg_dma_len(sg);
-						dma_addr = sg_dma_address(sg);
+						dma_len = ib_sg_dma_len(
+								dev, sg);
+						dma_addr = ib_sg_dma_address(
+								dev, sg);
 					}
 				}
 			} else {
@@ -2580,7 +2584,7 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
 		goto destroy_ib;
 	}
 
-	ch->sess = transport_init_session();
+	ch->sess = transport_init_session(TARGET_PROT_NORMAL);
 	if (IS_ERR(ch->sess)) {
 		rej->reason = __constant_cpu_to_be32(
 				SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
@@ -3081,6 +3085,14 @@ static void srpt_queue_tm_rsp(struct se_cmd *cmd)
 	srpt_queue_response(cmd);
 }
 
+static void srpt_aborted_task(struct se_cmd *cmd)
+{
+	struct srpt_send_ioctx *ioctx = container_of(cmd,
+				struct srpt_send_ioctx, cmd);
+
+	srpt_unmap_sg_to_ib_sge(ioctx->ch, ioctx);
+}
+
 static int srpt_queue_status(struct se_cmd *cmd)
 {
 	struct srpt_send_ioctx *ioctx;
@@ -3928,6 +3940,7 @@ static struct target_core_fabric_ops srpt_template = {
 	.queue_data_in			= srpt_queue_data_in,
 	.queue_status			= srpt_queue_status,
 	.queue_tm_rsp			= srpt_queue_tm_rsp,
+	.aborted_task			= srpt_aborted_task,
 	/*
 	 * Setup function pointers for generic logic in
 	 * target_core_fabric_configfs.c
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
index 788c4fe2b0c9ec7a8113078c318421d7407ae855..68fb66fdb757fbc98784fad4ff7a5d03c49ee73d 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
@@ -684,6 +684,20 @@ static void tcm_qla2xxx_queue_tm_rsp(struct se_cmd *se_cmd)
 	qlt_xmit_tm_rsp(mcmd);
 }
 
+static void tcm_qla2xxx_aborted_task(struct se_cmd *se_cmd)
+{
+	struct qla_tgt_cmd *cmd = container_of(se_cmd,
+				struct qla_tgt_cmd, se_cmd);
+	struct scsi_qla_host *vha = cmd->vha;
+	struct qla_hw_data *ha = vha->hw;
+
+	if (!cmd->sg_mapped)
+		return;
+
+	pci_unmap_sg(ha->pdev, cmd->sg, cmd->sg_cnt, cmd->dma_data_direction);
+	cmd->sg_mapped = 0;
+}
+
 /* Local pointer to allocated TCM configfs fabric module */
 struct target_fabric_configfs *tcm_qla2xxx_fabric_configfs;
 struct target_fabric_configfs *tcm_qla2xxx_npiv_fabric_configfs;
@@ -1468,7 +1482,7 @@ static int tcm_qla2xxx_check_initiator_node_acl(
 	}
 	se_tpg = &tpg->se_tpg;
 
-	se_sess = transport_init_session();
+	se_sess = transport_init_session(TARGET_PROT_NORMAL);
 	if (IS_ERR(se_sess)) {
 		pr_err("Unable to initialize struct se_session\n");
 		return PTR_ERR(se_sess);
@@ -1877,6 +1891,7 @@ static struct target_core_fabric_ops tcm_qla2xxx_ops = {
 	.queue_data_in			= tcm_qla2xxx_queue_data_in,
 	.queue_status			= tcm_qla2xxx_queue_status,
 	.queue_tm_rsp			= tcm_qla2xxx_queue_tm_rsp,
+	.aborted_task			= tcm_qla2xxx_aborted_task,
 	/*
 	 * Setup function pointers for generic logic in
 	 * target_core_fabric_configfs.c
@@ -1926,6 +1941,7 @@ static struct target_core_fabric_ops tcm_qla2xxx_npiv_ops = {
 	.queue_data_in			= tcm_qla2xxx_queue_data_in,
 	.queue_status			= tcm_qla2xxx_queue_status,
 	.queue_tm_rsp			= tcm_qla2xxx_queue_tm_rsp,
+	.aborted_task			= tcm_qla2xxx_aborted_task,
 	/*
 	 * Setup function pointers for generic logic in
 	 * target_core_fabric_configfs.c
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index b83ec378d04f8f1ed5fbc820429c6f1657b4d7a0..78cab13bbb1be3796b0e00af4a0667329ed4a2d8 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -499,6 +499,23 @@ static int iscsit_queue_rsp(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
 	return 0;
 }
 
+static void iscsit_aborted_task(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
+{
+	bool scsi_cmd = (cmd->iscsi_opcode == ISCSI_OP_SCSI_CMD);
+
+	spin_lock_bh(&conn->cmd_lock);
+	if (!list_empty(&cmd->i_conn_node))
+		list_del_init(&cmd->i_conn_node);
+	spin_unlock_bh(&conn->cmd_lock);
+
+	__iscsit_free_cmd(cmd, scsi_cmd, true);
+}
+
+static enum target_prot_op iscsit_get_sup_prot_ops(struct iscsi_conn *conn)
+{
+	return TARGET_PROT_NORMAL;
+}
+
 static struct iscsit_transport iscsi_target_transport = {
 	.name			= "iSCSI/TCP",
 	.transport_type		= ISCSI_TCP,
@@ -513,6 +530,8 @@ static struct iscsit_transport iscsi_target_transport = {
 	.iscsit_response_queue	= iscsit_response_queue,
 	.iscsit_queue_data_in	= iscsit_queue_rsp,
 	.iscsit_queue_status	= iscsit_queue_rsp,
+	.iscsit_aborted_task	= iscsit_aborted_task,
+	.iscsit_get_sup_prot_ops = iscsit_get_sup_prot_ops,
 };
 
 static int __init iscsi_target_init_module(void)
@@ -1503,6 +1522,16 @@ int iscsit_setup_nop_out(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
 {
 	u32 payload_length = ntoh24(hdr->dlength);
 
+	if (!(hdr->flags & ISCSI_FLAG_CMD_FINAL)) {
+		pr_err("NopOUT Flag's, Left Most Bit not set, protocol error.\n");
+		if (!cmd)
+			return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR,
+						 (unsigned char *)hdr);
+		
+		return iscsit_reject_cmd(cmd, ISCSI_REASON_PROTOCOL_ERROR,
+					 (unsigned char *)hdr);
+	}
+
 	if (hdr->itt == RESERVED_ITT && !(hdr->opcode & ISCSI_OP_IMMEDIATE)) {
 		pr_err("NOPOUT ITT is reserved, but Immediate Bit is"
 			" not set, protocol error.\n");
@@ -2468,6 +2497,7 @@ static void iscsit_build_conn_drop_async_message(struct iscsi_conn *conn)
 {
 	struct iscsi_cmd *cmd;
 	struct iscsi_conn *conn_p;
+	bool found = false;
 
 	/*
 	 * Only send a Asynchronous Message on connections whos network
@@ -2476,11 +2506,12 @@ static void iscsit_build_conn_drop_async_message(struct iscsi_conn *conn)
 	list_for_each_entry(conn_p, &conn->sess->sess_conn_list, conn_list) {
 		if (conn_p->conn_state == TARG_CONN_STATE_LOGGED_IN) {
 			iscsit_inc_conn_usage_count(conn_p);
+			found = true;
 			break;
 		}
 	}
 
-	if (!conn_p)
+	if (!found)
 		return;
 
 	cmd = iscsit_allocate_cmd(conn_p, TASK_RUNNING);
diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c
index 1c0088fe9e99368c2dfb50b378dee6033cd42cab..ae03f3e5de1e47f8ce9bb43ae134361d8e9e325a 100644
--- a/drivers/target/iscsi/iscsi_target_configfs.c
+++ b/drivers/target/iscsi/iscsi_target_configfs.c
@@ -1052,6 +1052,11 @@ TPG_ATTR(demo_mode_discovery, S_IRUGO | S_IWUSR);
  */
 DEF_TPG_ATTRIB(default_erl);
 TPG_ATTR(default_erl, S_IRUGO | S_IWUSR);
+/*
+ * Define iscsi_tpg_attrib_s_t10_pi
+ */
+DEF_TPG_ATTRIB(t10_pi);
+TPG_ATTR(t10_pi, S_IRUGO | S_IWUSR);
 
 static struct configfs_attribute *lio_target_tpg_attrib_attrs[] = {
 	&iscsi_tpg_attrib_authentication.attr,
@@ -1064,6 +1069,7 @@ static struct configfs_attribute *lio_target_tpg_attrib_attrs[] = {
 	&iscsi_tpg_attrib_prod_mode_write_protect.attr,
 	&iscsi_tpg_attrib_demo_mode_discovery.attr,
 	&iscsi_tpg_attrib_default_erl.attr,
+	&iscsi_tpg_attrib_t10_pi.attr,
 	NULL,
 };
 
@@ -1815,6 +1821,13 @@ static void lio_queue_tm_rsp(struct se_cmd *se_cmd)
 	iscsit_add_cmd_to_response_queue(cmd, cmd->conn, cmd->i_state);
 }
 
+static void lio_aborted_task(struct se_cmd *se_cmd)
+{
+	struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
+
+	cmd->conn->conn_transport->iscsit_aborted_task(cmd->conn, cmd);
+}
+
 static char *lio_tpg_get_endpoint_wwn(struct se_portal_group *se_tpg)
 {
 	struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr;
@@ -1999,6 +2012,7 @@ int iscsi_target_register_configfs(void)
 	fabric->tf_ops.queue_data_in = &lio_queue_data_in;
 	fabric->tf_ops.queue_status = &lio_queue_status;
 	fabric->tf_ops.queue_tm_rsp = &lio_queue_tm_rsp;
+	fabric->tf_ops.aborted_task = &lio_aborted_task;
 	/*
 	 * Setup function pointers for generic logic in target_core_fabric_configfs.c
 	 */
diff --git a/drivers/target/iscsi/iscsi_target_core.h b/drivers/target/iscsi/iscsi_target_core.h
index 48f7b3bf4e8c3d2eec3bb413d33d99b6760ce3be..886d74d6f3d4669b6050d2872ea38d924ac3e484 100644
--- a/drivers/target/iscsi/iscsi_target_core.h
+++ b/drivers/target/iscsi/iscsi_target_core.h
@@ -58,7 +58,8 @@
 #define TA_DEMO_MODE_DISCOVERY		1
 #define TA_DEFAULT_ERL			0
 #define TA_CACHE_CORE_NPS		0
-
+/* T10 protection information disabled by default */
+#define TA_DEFAULT_T10_PI		0
 
 #define ISCSI_IOV_DATA_BUFFER		5
 
@@ -765,6 +766,7 @@ struct iscsi_tpg_attrib {
 	u32			prod_mode_write_protect;
 	u32			demo_mode_discovery;
 	u32			default_erl;
+	u8			t10_pi;
 	struct iscsi_portal_group *tpg;
 };
 
@@ -787,6 +789,7 @@ struct iscsi_np {
 	void			*np_context;
 	struct iscsit_transport *np_transport;
 	struct list_head	np_list;
+	struct iscsi_tpg_np	*tpg_np;
 } ____cacheline_aligned;
 
 struct iscsi_tpg_np {
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
index e29279e6b577dd564e8271f95c171838ead5ca39..8739b98f6f93539b8c6eb95f27d7fde3601b40d7 100644
--- a/drivers/target/iscsi/iscsi_target_login.c
+++ b/drivers/target/iscsi/iscsi_target_login.c
@@ -259,6 +259,7 @@ static int iscsi_login_zero_tsih_s1(
 {
 	struct iscsi_session *sess = NULL;
 	struct iscsi_login_req *pdu = (struct iscsi_login_req *)buf;
+	enum target_prot_op sup_pro_ops;
 	int ret;
 
 	sess = kzalloc(sizeof(struct iscsi_session), GFP_KERNEL);
@@ -320,8 +321,9 @@ static int iscsi_login_zero_tsih_s1(
 		kfree(sess);
 		return -ENOMEM;
 	}
+	sup_pro_ops = conn->conn_transport->iscsit_get_sup_prot_ops(conn);
 
-	sess->se_sess = transport_init_session();
+	sess->se_sess = transport_init_session(sup_pro_ops);
 	if (IS_ERR(sess->se_sess)) {
 		iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
 				ISCSI_LOGIN_STATUS_NO_RESOURCES);
diff --git a/drivers/target/iscsi/iscsi_target_tpg.c b/drivers/target/iscsi/iscsi_target_tpg.c
index 44a5471de00ffe95c5fccdf5462931efa0acc361..eb96b20dc09e13ffe32e226df38b73241f176a63 100644
--- a/drivers/target/iscsi/iscsi_target_tpg.c
+++ b/drivers/target/iscsi/iscsi_target_tpg.c
@@ -225,6 +225,7 @@ static void iscsit_set_default_tpg_attribs(struct iscsi_portal_group *tpg)
 	a->prod_mode_write_protect = TA_PROD_MODE_WRITE_PROTECT;
 	a->demo_mode_discovery = TA_DEMO_MODE_DISCOVERY;
 	a->default_erl = TA_DEFAULT_ERL;
+	a->t10_pi = TA_DEFAULT_T10_PI;
 }
 
 int iscsit_tpg_add_portal_group(struct iscsi_tiqn *tiqn, struct iscsi_portal_group *tpg)
@@ -500,6 +501,7 @@ struct iscsi_tpg_np *iscsit_tpg_add_network_portal(
 	init_completion(&tpg_np->tpg_np_comp);
 	kref_init(&tpg_np->tpg_np_kref);
 	tpg_np->tpg_np		= np;
+	np->tpg_np		= tpg_np;
 	tpg_np->tpg		= tpg;
 
 	spin_lock(&tpg->tpg_np_lock);
@@ -858,3 +860,22 @@ int iscsit_ta_default_erl(
 
 	return 0;
 }
+
+int iscsit_ta_t10_pi(
+	struct iscsi_portal_group *tpg,
+	u32 flag)
+{
+	struct iscsi_tpg_attrib *a = &tpg->tpg_attrib;
+
+	if ((flag != 0) && (flag != 1)) {
+		pr_err("Illegal value %d\n", flag);
+		return -EINVAL;
+	}
+
+	a->t10_pi = flag;
+	pr_debug("iSCSI_TPG[%hu] - T10 Protection information bit:"
+		" %s\n", tpg->tpgt, (a->t10_pi) ?
+		"ON" : "OFF");
+
+	return 0;
+}
diff --git a/drivers/target/iscsi/iscsi_target_tpg.h b/drivers/target/iscsi/iscsi_target_tpg.h
index 213c0fc7fdc9058913bc3b0da38a7d7ef20455e2..0a182f2aa8a25ea07cad39ac58a0d954010abd8b 100644
--- a/drivers/target/iscsi/iscsi_target_tpg.h
+++ b/drivers/target/iscsi/iscsi_target_tpg.h
@@ -39,5 +39,6 @@ extern int iscsit_ta_demo_mode_write_protect(struct iscsi_portal_group *, u32);
 extern int iscsit_ta_prod_mode_write_protect(struct iscsi_portal_group *, u32);
 extern int iscsit_ta_demo_mode_discovery(struct iscsi_portal_group *, u32);
 extern int iscsit_ta_default_erl(struct iscsi_portal_group *, u32);
+extern int iscsit_ta_t10_pi(struct iscsi_portal_group *, u32);
 
 #endif /* ISCSI_TARGET_TPG_H */
diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c
index e655b042ed1895cbb2390dfc2bc13adeed89d517..53e157cb8c547e7776717ffab50137b204bdef90 100644
--- a/drivers/target/iscsi/iscsi_target_util.c
+++ b/drivers/target/iscsi/iscsi_target_util.c
@@ -705,8 +705,8 @@ void iscsit_release_cmd(struct iscsi_cmd *cmd)
 }
 EXPORT_SYMBOL(iscsit_release_cmd);
 
-static void __iscsit_free_cmd(struct iscsi_cmd *cmd, bool scsi_cmd,
-			      bool check_queues)
+void __iscsit_free_cmd(struct iscsi_cmd *cmd, bool scsi_cmd,
+		       bool check_queues)
 {
 	struct iscsi_conn *conn = cmd->conn;
 
diff --git a/drivers/target/iscsi/iscsi_target_util.h b/drivers/target/iscsi/iscsi_target_util.h
index 561a424d19800f4540a97ce92312c04ff0f0bc12..a68508c4fec862b325c6a5015b9493871f166e08 100644
--- a/drivers/target/iscsi/iscsi_target_util.h
+++ b/drivers/target/iscsi/iscsi_target_util.h
@@ -30,6 +30,7 @@ extern void iscsit_remove_cmd_from_tx_queues(struct iscsi_cmd *, struct iscsi_co
 extern bool iscsit_conn_all_queues_empty(struct iscsi_conn *);
 extern void iscsit_free_queue_reqs_for_conn(struct iscsi_conn *);
 extern void iscsit_release_cmd(struct iscsi_cmd *);
+extern void __iscsit_free_cmd(struct iscsi_cmd *, bool, bool);
 extern void iscsit_free_cmd(struct iscsi_cmd *, bool);
 extern int iscsit_check_session_usage_count(struct iscsi_session *);
 extern void iscsit_dec_session_usage_count(struct iscsi_session *);
diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c
index fadad7c5f635f01496acdc79d56a678a4e4fb1a6..c886ad1c39fb357ddb6e72a384474bdc89b01278 100644
--- a/drivers/target/loopback/tcm_loop.c
+++ b/drivers/target/loopback/tcm_loop.c
@@ -212,6 +212,10 @@ static void tcm_loop_submission_work(struct work_struct *work)
 		se_cmd->se_cmd_flags |= SCF_BIDI;
 
 	}
+
+	if (!scsi_prot_sg_count(sc) && scsi_get_prot_op(sc) != SCSI_PROT_NORMAL)
+		se_cmd->prot_pto = true;
+
 	rc = target_submit_cmd_map_sgls(se_cmd, tl_nexus->se_sess, sc->cmnd,
 			&tl_cmd->tl_sense_buf[0], tl_cmd->sc->device->lun,
 			scsi_bufflen(sc), tcm_loop_sam_attr(sc),
@@ -915,6 +919,11 @@ static void tcm_loop_queue_tm_rsp(struct se_cmd *se_cmd)
 	wake_up(&tl_tmr->tl_tmr_wait);
 }
 
+static void tcm_loop_aborted_task(struct se_cmd *se_cmd)
+{
+	return;
+}
+
 static char *tcm_loop_dump_proto_id(struct tcm_loop_hba *tl_hba)
 {
 	switch (tl_hba->tl_proto_id) {
@@ -1009,7 +1018,7 @@ static int tcm_loop_make_nexus(
 	/*
 	 * Initialize the struct se_session pointer
 	 */
-	tl_nexus->se_sess = transport_init_session();
+	tl_nexus->se_sess = transport_init_session(TARGET_PROT_ALL);
 	if (IS_ERR(tl_nexus->se_sess)) {
 		ret = PTR_ERR(tl_nexus->se_sess);
 		goto out;
@@ -1483,6 +1492,7 @@ static int tcm_loop_register_configfs(void)
 	fabric->tf_ops.queue_data_in = &tcm_loop_queue_data_in;
 	fabric->tf_ops.queue_status = &tcm_loop_queue_status;
 	fabric->tf_ops.queue_tm_rsp = &tcm_loop_queue_tm_rsp;
+	fabric->tf_ops.aborted_task = &tcm_loop_aborted_task;
 
 	/*
 	 * Setup function pointers for generic logic in target_core_fabric_configfs.c
diff --git a/drivers/target/sbp/sbp_target.c b/drivers/target/sbp/sbp_target.c
index 24884cac19ced8f9e6c0cae03787f7d8cb771880..e7e93727553cef8d3503201f381bd64b3f02d6b3 100644
--- a/drivers/target/sbp/sbp_target.c
+++ b/drivers/target/sbp/sbp_target.c
@@ -210,7 +210,7 @@ static struct sbp_session *sbp_session_create(
 		return ERR_PTR(-ENOMEM);
 	}
 
-	sess->se_sess = transport_init_session();
+	sess->se_sess = transport_init_session(TARGET_PROT_NORMAL);
 	if (IS_ERR(sess->se_sess)) {
 		pr_err("failed to init se_session\n");
 
@@ -1846,6 +1846,11 @@ static void sbp_queue_tm_rsp(struct se_cmd *se_cmd)
 {
 }
 
+static void sbp_aborted_task(struct se_cmd *se_cmd)
+{
+	return;
+}
+
 static int sbp_check_stop_free(struct se_cmd *se_cmd)
 {
 	struct sbp_target_request *req = container_of(se_cmd,
@@ -2526,6 +2531,7 @@ static struct target_core_fabric_ops sbp_ops = {
 	.queue_data_in			= sbp_queue_data_in,
 	.queue_status			= sbp_queue_status,
 	.queue_tm_rsp			= sbp_queue_tm_rsp,
+	.aborted_task			= sbp_aborted_task,
 	.check_stop_free		= sbp_check_stop_free,
 
 	.fabric_make_wwn		= sbp_make_tport,
diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c
index c3d9df6aaf5f35bc4665355bf7cfe18c9e522aa7..fcbe6125b73ee51289ffe93f9d7951d3ff4485e2 100644
--- a/drivers/target/target_core_alua.c
+++ b/drivers/target/target_core_alua.c
@@ -455,11 +455,26 @@ target_emulate_set_target_port_groups(struct se_cmd *cmd)
 	return rc;
 }
 
-static inline int core_alua_state_nonoptimized(
+static inline void set_ascq(struct se_cmd *cmd, u8 alua_ascq)
+{
+	/*
+	 * Set SCSI additional sense code (ASC) to 'LUN Not Accessible';
+	 * The ALUA additional sense code qualifier (ASCQ) is determined
+	 * by the ALUA primary or secondary access state..
+	 */
+	pr_debug("[%s]: ALUA TG Port not available, "
+		"SenseKey: NOT_READY, ASC/ASCQ: "
+		"0x04/0x%02x\n",
+		cmd->se_tfo->get_fabric_name(), alua_ascq);
+
+	cmd->scsi_asc = 0x04;
+	cmd->scsi_ascq = alua_ascq;
+}
+
+static inline void core_alua_state_nonoptimized(
 	struct se_cmd *cmd,
 	unsigned char *cdb,
-	int nonop_delay_msecs,
-	u8 *alua_ascq)
+	int nonop_delay_msecs)
 {
 	/*
 	 * Set SCF_ALUA_NON_OPTIMIZED here, this value will be checked
@@ -468,13 +483,11 @@ static inline int core_alua_state_nonoptimized(
 	 */
 	cmd->se_cmd_flags |= SCF_ALUA_NON_OPTIMIZED;
 	cmd->alua_nonop_delay = nonop_delay_msecs;
-	return 0;
 }
 
 static inline int core_alua_state_lba_dependent(
 	struct se_cmd *cmd,
-	struct t10_alua_tg_pt_gp *tg_pt_gp,
-	u8 *alua_ascq)
+	struct t10_alua_tg_pt_gp *tg_pt_gp)
 {
 	struct se_device *dev = cmd->se_dev;
 	u64 segment_size, segment_mult, sectors, lba;
@@ -520,7 +533,7 @@ static inline int core_alua_state_lba_dependent(
 		}
 		if (!cur_map) {
 			spin_unlock(&dev->t10_alua.lba_map_lock);
-			*alua_ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE;
+			set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_UNAVAILABLE);
 			return 1;
 		}
 		list_for_each_entry(map_mem, &cur_map->lba_map_mem_list,
@@ -531,11 +544,11 @@ static inline int core_alua_state_lba_dependent(
 			switch(map_mem->lba_map_mem_alua_state) {
 			case ALUA_ACCESS_STATE_STANDBY:
 				spin_unlock(&dev->t10_alua.lba_map_lock);
-				*alua_ascq = ASCQ_04H_ALUA_TG_PT_STANDBY;
+				set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_STANDBY);
 				return 1;
 			case ALUA_ACCESS_STATE_UNAVAILABLE:
 				spin_unlock(&dev->t10_alua.lba_map_lock);
-				*alua_ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE;
+				set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_UNAVAILABLE);
 				return 1;
 			default:
 				break;
@@ -548,8 +561,7 @@ static inline int core_alua_state_lba_dependent(
 
 static inline int core_alua_state_standby(
 	struct se_cmd *cmd,
-	unsigned char *cdb,
-	u8 *alua_ascq)
+	unsigned char *cdb)
 {
 	/*
 	 * Allowed CDBs for ALUA_ACCESS_STATE_STANDBY as defined by
@@ -570,7 +582,7 @@ static inline int core_alua_state_standby(
 		case MI_REPORT_TARGET_PGS:
 			return 0;
 		default:
-			*alua_ascq = ASCQ_04H_ALUA_TG_PT_STANDBY;
+			set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_STANDBY);
 			return 1;
 		}
 	case MAINTENANCE_OUT:
@@ -578,7 +590,7 @@ static inline int core_alua_state_standby(
 		case MO_SET_TARGET_PGS:
 			return 0;
 		default:
-			*alua_ascq = ASCQ_04H_ALUA_TG_PT_STANDBY;
+			set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_STANDBY);
 			return 1;
 		}
 	case REQUEST_SENSE:
@@ -588,7 +600,7 @@ static inline int core_alua_state_standby(
 	case WRITE_BUFFER:
 		return 0;
 	default:
-		*alua_ascq = ASCQ_04H_ALUA_TG_PT_STANDBY;
+		set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_STANDBY);
 		return 1;
 	}
 
@@ -597,8 +609,7 @@ static inline int core_alua_state_standby(
 
 static inline int core_alua_state_unavailable(
 	struct se_cmd *cmd,
-	unsigned char *cdb,
-	u8 *alua_ascq)
+	unsigned char *cdb)
 {
 	/*
 	 * Allowed CDBs for ALUA_ACCESS_STATE_UNAVAILABLE as defined by
@@ -613,7 +624,7 @@ static inline int core_alua_state_unavailable(
 		case MI_REPORT_TARGET_PGS:
 			return 0;
 		default:
-			*alua_ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE;
+			set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_UNAVAILABLE);
 			return 1;
 		}
 	case MAINTENANCE_OUT:
@@ -621,7 +632,7 @@ static inline int core_alua_state_unavailable(
 		case MO_SET_TARGET_PGS:
 			return 0;
 		default:
-			*alua_ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE;
+			set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_UNAVAILABLE);
 			return 1;
 		}
 	case REQUEST_SENSE:
@@ -629,7 +640,7 @@ static inline int core_alua_state_unavailable(
 	case WRITE_BUFFER:
 		return 0;
 	default:
-		*alua_ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE;
+		set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_UNAVAILABLE);
 		return 1;
 	}
 
@@ -638,8 +649,7 @@ static inline int core_alua_state_unavailable(
 
 static inline int core_alua_state_transition(
 	struct se_cmd *cmd,
-	unsigned char *cdb,
-	u8 *alua_ascq)
+	unsigned char *cdb)
 {
 	/*
 	 * Allowed CDBs for ALUA_ACCESS_STATE_TRANSITION as defined by
@@ -654,7 +664,7 @@ static inline int core_alua_state_transition(
 		case MI_REPORT_TARGET_PGS:
 			return 0;
 		default:
-			*alua_ascq = ASCQ_04H_ALUA_STATE_TRANSITION;
+			set_ascq(cmd, ASCQ_04H_ALUA_STATE_TRANSITION);
 			return 1;
 		}
 	case REQUEST_SENSE:
@@ -662,7 +672,7 @@ static inline int core_alua_state_transition(
 	case WRITE_BUFFER:
 		return 0;
 	default:
-		*alua_ascq = ASCQ_04H_ALUA_STATE_TRANSITION;
+		set_ascq(cmd, ASCQ_04H_ALUA_STATE_TRANSITION);
 		return 1;
 	}
 
@@ -684,8 +694,6 @@ target_alua_state_check(struct se_cmd *cmd)
 	struct t10_alua_tg_pt_gp *tg_pt_gp;
 	struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
 	int out_alua_state, nonop_delay_msecs;
-	u8 alua_ascq;
-	int ret;
 
 	if (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)
 		return 0;
@@ -701,9 +709,8 @@ target_alua_state_check(struct se_cmd *cmd)
 	if (atomic_read(&port->sep_tg_pt_secondary_offline)) {
 		pr_debug("ALUA: Got secondary offline status for local"
 				" target port\n");
-		alua_ascq = ASCQ_04H_ALUA_OFFLINE;
-		ret = 1;
-		goto out;
+		set_ascq(cmd, ASCQ_04H_ALUA_OFFLINE);
+		return TCM_CHECK_CONDITION_NOT_READY;
 	}
 	 /*
 	 * Second, obtain the struct t10_alua_tg_pt_gp_member pointer to the
@@ -731,20 +738,23 @@ target_alua_state_check(struct se_cmd *cmd)
 
 	switch (out_alua_state) {
 	case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
-		ret = core_alua_state_nonoptimized(cmd, cdb,
-					nonop_delay_msecs, &alua_ascq);
+		core_alua_state_nonoptimized(cmd, cdb, nonop_delay_msecs);
 		break;
 	case ALUA_ACCESS_STATE_STANDBY:
-		ret = core_alua_state_standby(cmd, cdb, &alua_ascq);
+		if (core_alua_state_standby(cmd, cdb))
+			return TCM_CHECK_CONDITION_NOT_READY;
 		break;
 	case ALUA_ACCESS_STATE_UNAVAILABLE:
-		ret = core_alua_state_unavailable(cmd, cdb, &alua_ascq);
+		if (core_alua_state_unavailable(cmd, cdb))
+			return TCM_CHECK_CONDITION_NOT_READY;
 		break;
 	case ALUA_ACCESS_STATE_TRANSITION:
-		ret = core_alua_state_transition(cmd, cdb, &alua_ascq);
+		if (core_alua_state_transition(cmd, cdb))
+			return TCM_CHECK_CONDITION_NOT_READY;
 		break;
 	case ALUA_ACCESS_STATE_LBA_DEPENDENT:
-		ret = core_alua_state_lba_dependent(cmd, tg_pt_gp, &alua_ascq);
+		if (core_alua_state_lba_dependent(cmd, tg_pt_gp))
+			return TCM_CHECK_CONDITION_NOT_READY;
 		break;
 	/*
 	 * OFFLINE is a secondary ALUA target port group access state, that is
@@ -757,23 +767,6 @@ target_alua_state_check(struct se_cmd *cmd)
 		return TCM_INVALID_CDB_FIELD;
 	}
 
-out:
-	if (ret > 0) {
-		/*
-		 * Set SCSI additional sense code (ASC) to 'LUN Not Accessible';
-		 * The ALUA additional sense code qualifier (ASCQ) is determined
-		 * by the ALUA primary or secondary access state..
-		 */
-		pr_debug("[%s]: ALUA TG Port not available, "
-			"SenseKey: NOT_READY, ASC/ASCQ: "
-			"0x04/0x%02x\n",
-			cmd->se_tfo->get_fabric_name(), alua_ascq);
-
-		cmd->scsi_asc = 0x04;
-		cmd->scsi_ascq = alua_ascq;
-		return TCM_CHECK_CONDITION_NOT_READY;
-	}
-
 	return 0;
 }
 
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index f0e85b1196926383149854c8e373104f1344ea59..60a9ae6df763d4928c4077ee210db39480872daa 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -457,6 +457,10 @@ static int target_fabric_tf_ops_check(
 		pr_err("Missing tfo->queue_tm_rsp()\n");
 		return -EINVAL;
 	}
+	if (!tfo->aborted_task) {
+		pr_err("Missing tfo->aborted_task()\n");
+		return -EINVAL;
+	}
 	/*
 	 * We at least require tfo->fabric_make_wwn(), tfo->fabric_drop_wwn()
 	 * tfo->fabric_make_tpg() and tfo->fabric_drop_tpg() in
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
index cf991a91a8a9699f655fd325f654cb16ed3046d5..7d6cddaec5250cf28239c49b22eeb269a30e69d0 100644
--- a/drivers/target/target_core_file.c
+++ b/drivers/target/target_core_file.c
@@ -854,25 +854,6 @@ static int fd_init_prot(struct se_device *dev)
 	return 0;
 }
 
-static void fd_init_format_buf(struct se_device *dev, unsigned char *buf,
-			       u32 unit_size, u32 *ref_tag, u16 app_tag,
-			       bool inc_reftag)
-{
-	unsigned char *p = buf;
-	int i;
-
-	for (i = 0; i < unit_size; i += dev->prot_length) {
-		*((u16 *)&p[0]) = 0xffff;
-		*((__be16 *)&p[2]) = cpu_to_be16(app_tag);
-		*((__be32 *)&p[4]) = cpu_to_be32(*ref_tag);
-
-		if (inc_reftag)
-			(*ref_tag)++;
-
-		p += dev->prot_length;
-	}
-}
-
 static int fd_format_prot(struct se_device *dev)
 {
 	struct fd_dev *fd_dev = FD_DEV(dev);
@@ -880,10 +861,8 @@ static int fd_format_prot(struct se_device *dev)
 	sector_t prot_length, prot;
 	unsigned char *buf;
 	loff_t pos = 0;
-	u32 ref_tag = 0;
 	int unit_size = FDBD_FORMAT_UNIT_SIZE * dev->dev_attrib.block_size;
 	int rc, ret = 0, size, len;
-	bool inc_reftag = false;
 
 	if (!dev->dev_attrib.pi_prot_type) {
 		pr_err("Unable to format_prot while pi_prot_type == 0\n");
@@ -894,37 +873,20 @@ static int fd_format_prot(struct se_device *dev)
 		return -ENODEV;
 	}
 
-	switch (dev->dev_attrib.pi_prot_type) {
-	case TARGET_DIF_TYPE3_PROT:
-		ref_tag = 0xffffffff;
-		break;
-	case TARGET_DIF_TYPE2_PROT:
-	case TARGET_DIF_TYPE1_PROT:
-		inc_reftag = true;
-		break;
-	default:
-		break;
-	}
-
 	buf = vzalloc(unit_size);
 	if (!buf) {
 		pr_err("Unable to allocate FILEIO prot buf\n");
 		return -ENOMEM;
 	}
-
 	prot_length = (dev->transport->get_blocks(dev) + 1) * dev->prot_length;
 	size = prot_length;
 
 	pr_debug("Using FILEIO prot_length: %llu\n",
 		 (unsigned long long)prot_length);
 
+	memset(buf, 0xff, unit_size);
 	for (prot = 0; prot < prot_length; prot += unit_size) {
-
-		fd_init_format_buf(dev, buf, unit_size, &ref_tag, 0xffff,
-				   inc_reftag);
-
 		len = min(unit_size, size);
-
 		rc = kernel_write(prot_fd, buf, len, pos);
 		if (rc != len) {
 			pr_err("vfs_write to prot file failed: %d\n", rc);
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index 554d4f75a75a6263ac7fb731804f787af64731ac..9e0232cca92e03356c768e3dada2a27f53427452 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -203,10 +203,9 @@ static void iblock_free_device(struct se_device *dev)
 
 	if (ib_dev->ibd_bd != NULL)
 		blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL);
-	if (ib_dev->ibd_bio_set != NULL) {
-		bioset_integrity_free(ib_dev->ibd_bio_set);
+	if (ib_dev->ibd_bio_set != NULL)
 		bioset_free(ib_dev->ibd_bio_set);
-	}
+
 	kfree(ib_dev);
 }
 
diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c
index 66a5aba5a0d9c6a6708582a026b25b0f6a17075e..b920db3388cdc19d4bef721a6c6d55680cfd2d8c 100644
--- a/drivers/target/target_core_rd.c
+++ b/drivers/target/target_core_rd.c
@@ -242,7 +242,7 @@ static void rd_release_prot_space(struct rd_dev *rd_dev)
 	rd_dev->sg_prot_count = 0;
 }
 
-static int rd_build_prot_space(struct rd_dev *rd_dev, int prot_length)
+static int rd_build_prot_space(struct rd_dev *rd_dev, int prot_length, int block_size)
 {
 	struct rd_dev_sg_table *sg_table;
 	u32 total_sg_needed, sg_tables;
@@ -252,8 +252,13 @@ static int rd_build_prot_space(struct rd_dev *rd_dev, int prot_length)
 
 	if (rd_dev->rd_flags & RDF_NULLIO)
 		return 0;
-
-	total_sg_needed = rd_dev->rd_page_count / prot_length;
+	/*
+	 * prot_length=8byte dif data
+	 * tot sg needed = rd_page_count * (PGSZ/block_size) *
+	 * 		   (prot_length/block_size) + pad
+	 * PGSZ canceled each other.
+	 */
+	total_sg_needed = (rd_dev->rd_page_count * prot_length / block_size) + 1;
 
 	sg_tables = (total_sg_needed / max_sg_per_table) + 1;
 
@@ -606,7 +611,8 @@ static int rd_init_prot(struct se_device *dev)
         if (!dev->dev_attrib.pi_prot_type)
 		return 0;
 
-	return rd_build_prot_space(rd_dev, dev->prot_length);
+	return rd_build_prot_space(rd_dev, dev->prot_length,
+				   dev->dev_attrib.block_size);
 }
 
 static void rd_free_prot(struct se_device *dev)
diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
index 77e6531fb0a1c0a25ed16b71d1936ec37c2ed50a..e0229592ec5509656aed292970af719be1f9111d 100644
--- a/drivers/target/target_core_sbc.c
+++ b/drivers/target/target_core_sbc.c
@@ -89,6 +89,7 @@ static sense_reason_t
 sbc_emulate_readcapacity_16(struct se_cmd *cmd)
 {
 	struct se_device *dev = cmd->se_dev;
+	struct se_session *sess = cmd->se_sess;
 	unsigned char *rbuf;
 	unsigned char buf[32];
 	unsigned long long blocks = dev->transport->get_blocks(dev);
@@ -109,8 +110,10 @@ sbc_emulate_readcapacity_16(struct se_cmd *cmd)
 	/*
 	 * Set P_TYPE and PROT_EN bits for DIF support
 	 */
-	if (dev->dev_attrib.pi_prot_type)
-		buf[12] = (dev->dev_attrib.pi_prot_type - 1) << 1 | 0x1;
+	if (sess->sup_prot_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS)) {
+		if (dev->dev_attrib.pi_prot_type)
+			buf[12] = (dev->dev_attrib.pi_prot_type - 1) << 1 | 0x1;
+	}
 
 	if (dev->transport->get_lbppbe)
 		buf[13] = dev->transport->get_lbppbe(dev) & 0x0f;
@@ -425,13 +428,14 @@ static sense_reason_t compare_and_write_callback(struct se_cmd *cmd)
 		goto out;
 	}
 
-	write_sg = kzalloc(sizeof(struct scatterlist) * cmd->t_data_nents,
+	write_sg = kmalloc(sizeof(struct scatterlist) * cmd->t_data_nents,
 			   GFP_KERNEL);
 	if (!write_sg) {
 		pr_err("Unable to allocate compare_and_write sg\n");
 		ret = TCM_OUT_OF_RESOURCES;
 		goto out;
 	}
+	sg_init_table(write_sg, cmd->t_data_nents);
 	/*
 	 * Setup verify and write data payloads from total NumberLBAs.
 	 */
@@ -569,30 +573,85 @@ sbc_compare_and_write(struct se_cmd *cmd)
 	return TCM_NO_SENSE;
 }
 
+static int
+sbc_set_prot_op_checks(u8 protect, enum target_prot_type prot_type,
+		       bool is_write, struct se_cmd *cmd)
+{
+	if (is_write) {
+		cmd->prot_op = protect ? TARGET_PROT_DOUT_PASS :
+					 TARGET_PROT_DOUT_INSERT;
+		switch (protect) {
+		case 0x0:
+		case 0x3:
+			cmd->prot_checks = 0;
+			break;
+		case 0x1:
+		case 0x5:
+			cmd->prot_checks = TARGET_DIF_CHECK_GUARD;
+			if (prot_type == TARGET_DIF_TYPE1_PROT)
+				cmd->prot_checks |= TARGET_DIF_CHECK_REFTAG;
+			break;
+		case 0x2:
+			if (prot_type == TARGET_DIF_TYPE1_PROT)
+				cmd->prot_checks = TARGET_DIF_CHECK_REFTAG;
+			break;
+		case 0x4:
+			cmd->prot_checks = TARGET_DIF_CHECK_GUARD;
+			break;
+		default:
+			pr_err("Unsupported protect field %d\n", protect);
+			return -EINVAL;
+		}
+	} else {
+		cmd->prot_op = protect ? TARGET_PROT_DIN_PASS :
+					 TARGET_PROT_DIN_STRIP;
+		switch (protect) {
+		case 0x0:
+		case 0x1:
+		case 0x5:
+			cmd->prot_checks = TARGET_DIF_CHECK_GUARD;
+			if (prot_type == TARGET_DIF_TYPE1_PROT)
+				cmd->prot_checks |= TARGET_DIF_CHECK_REFTAG;
+			break;
+		case 0x2:
+			if (prot_type == TARGET_DIF_TYPE1_PROT)
+				cmd->prot_checks = TARGET_DIF_CHECK_REFTAG;
+			break;
+		case 0x3:
+			cmd->prot_checks = 0;
+			break;
+		case 0x4:
+			cmd->prot_checks = TARGET_DIF_CHECK_GUARD;
+			break;
+		default:
+			pr_err("Unsupported protect field %d\n", protect);
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
 static bool
 sbc_check_prot(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb,
-	       u32 sectors)
+	       u32 sectors, bool is_write)
 {
-	if (!cmd->t_prot_sg || !cmd->t_prot_nents)
+	u8 protect = cdb[1] >> 5;
+
+	if ((!cmd->t_prot_sg || !cmd->t_prot_nents) && cmd->prot_pto)
 		return true;
 
 	switch (dev->dev_attrib.pi_prot_type) {
 	case TARGET_DIF_TYPE3_PROT:
-		if (!(cdb[1] & 0xe0))
-			return true;
-
 		cmd->reftag_seed = 0xffffffff;
 		break;
 	case TARGET_DIF_TYPE2_PROT:
-		if (cdb[1] & 0xe0)
+		if (protect)
 			return false;
 
 		cmd->reftag_seed = cmd->t_task_lba;
 		break;
 	case TARGET_DIF_TYPE1_PROT:
-		if (!(cdb[1] & 0xe0))
-			return true;
-
 		cmd->reftag_seed = cmd->t_task_lba;
 		break;
 	case TARGET_DIF_TYPE0_PROT:
@@ -600,9 +659,15 @@ sbc_check_prot(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb,
 		return true;
 	}
 
+	if (sbc_set_prot_op_checks(protect, dev->dev_attrib.pi_prot_type,
+				   is_write, cmd))
+		return false;
+
 	cmd->prot_type = dev->dev_attrib.pi_prot_type;
 	cmd->prot_length = dev->prot_length * sectors;
-	cmd->prot_handover = PROT_SEPERATED;
+	pr_debug("%s: prot_type=%d, prot_length=%d prot_op=%d prot_checks=%d\n",
+		 __func__, cmd->prot_type, cmd->prot_length,
+		 cmd->prot_op, cmd->prot_checks);
 
 	return true;
 }
@@ -628,7 +693,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
 		sectors = transport_get_sectors_10(cdb);
 		cmd->t_task_lba = transport_lba_32(cdb);
 
-		if (!sbc_check_prot(dev, cmd, cdb, sectors))
+		if (!sbc_check_prot(dev, cmd, cdb, sectors, false))
 			return TCM_UNSUPPORTED_SCSI_OPCODE;
 
 		cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
@@ -639,7 +704,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
 		sectors = transport_get_sectors_12(cdb);
 		cmd->t_task_lba = transport_lba_32(cdb);
 
-		if (!sbc_check_prot(dev, cmd, cdb, sectors))
+		if (!sbc_check_prot(dev, cmd, cdb, sectors, false))
 			return TCM_UNSUPPORTED_SCSI_OPCODE;
 
 		cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
@@ -650,7 +715,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
 		sectors = transport_get_sectors_16(cdb);
 		cmd->t_task_lba = transport_lba_64(cdb);
 
-		if (!sbc_check_prot(dev, cmd, cdb, sectors))
+		if (!sbc_check_prot(dev, cmd, cdb, sectors, false))
 			return TCM_UNSUPPORTED_SCSI_OPCODE;
 
 		cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
@@ -669,7 +734,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
 		sectors = transport_get_sectors_10(cdb);
 		cmd->t_task_lba = transport_lba_32(cdb);
 
-		if (!sbc_check_prot(dev, cmd, cdb, sectors))
+		if (!sbc_check_prot(dev, cmd, cdb, sectors, true))
 			return TCM_UNSUPPORTED_SCSI_OPCODE;
 
 		if (cdb[1] & 0x8)
@@ -682,7 +747,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
 		sectors = transport_get_sectors_12(cdb);
 		cmd->t_task_lba = transport_lba_32(cdb);
 
-		if (!sbc_check_prot(dev, cmd, cdb, sectors))
+		if (!sbc_check_prot(dev, cmd, cdb, sectors, true))
 			return TCM_UNSUPPORTED_SCSI_OPCODE;
 
 		if (cdb[1] & 0x8)
@@ -695,7 +760,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
 		sectors = transport_get_sectors_16(cdb);
 		cmd->t_task_lba = transport_lba_64(cdb);
 
-		if (!sbc_check_prot(dev, cmd, cdb, sectors))
+		if (!sbc_check_prot(dev, cmd, cdb, sectors, true))
 			return TCM_UNSUPPORTED_SCSI_OPCODE;
 
 		if (cdb[1] & 0x8)
@@ -1031,6 +1096,50 @@ sbc_execute_unmap(struct se_cmd *cmd,
 }
 EXPORT_SYMBOL(sbc_execute_unmap);
 
+void
+sbc_dif_generate(struct se_cmd *cmd)
+{
+	struct se_device *dev = cmd->se_dev;
+	struct se_dif_v1_tuple *sdt;
+	struct scatterlist *dsg, *psg = cmd->t_prot_sg;
+	sector_t sector = cmd->t_task_lba;
+	void *daddr, *paddr;
+	int i, j, offset = 0;
+
+	for_each_sg(cmd->t_data_sg, dsg, cmd->t_data_nents, i) {
+		daddr = kmap_atomic(sg_page(dsg)) + dsg->offset;
+		paddr = kmap_atomic(sg_page(psg)) + psg->offset;
+
+		for (j = 0; j < dsg->length; j += dev->dev_attrib.block_size) {
+
+			if (offset >= psg->length) {
+				kunmap_atomic(paddr);
+				psg = sg_next(psg);
+				paddr = kmap_atomic(sg_page(psg)) + psg->offset;
+				offset = 0;
+			}
+
+			sdt = paddr + offset;
+			sdt->guard_tag = cpu_to_be16(crc_t10dif(daddr + j,
+						dev->dev_attrib.block_size));
+			if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE1_PROT)
+				sdt->ref_tag = cpu_to_be32(sector & 0xffffffff);
+			sdt->app_tag = 0;
+
+			pr_debug("DIF WRITE INSERT sector: %llu guard_tag: 0x%04x"
+				 " app_tag: 0x%04x ref_tag: %u\n",
+				 (unsigned long long)sector, sdt->guard_tag,
+				 sdt->app_tag, be32_to_cpu(sdt->ref_tag));
+
+			sector++;
+			offset += sizeof(struct se_dif_v1_tuple);
+		}
+
+		kunmap_atomic(paddr);
+		kunmap_atomic(daddr);
+	}
+}
+
 static sense_reason_t
 sbc_dif_v1_verify(struct se_device *dev, struct se_dif_v1_tuple *sdt,
 		  const void *p, sector_t sector, unsigned int ei_lba)
@@ -1162,9 +1271,9 @@ sbc_dif_verify_write(struct se_cmd *cmd, sector_t start, unsigned int sectors,
 }
 EXPORT_SYMBOL(sbc_dif_verify_write);
 
-sense_reason_t
-sbc_dif_verify_read(struct se_cmd *cmd, sector_t start, unsigned int sectors,
-		    unsigned int ei_lba, struct scatterlist *sg, int sg_off)
+static sense_reason_t
+__sbc_dif_verify_read(struct se_cmd *cmd, sector_t start, unsigned int sectors,
+		      unsigned int ei_lba, struct scatterlist *sg, int sg_off)
 {
 	struct se_device *dev = cmd->se_dev;
 	struct se_dif_v1_tuple *sdt;
@@ -1217,8 +1326,31 @@ sbc_dif_verify_read(struct se_cmd *cmd, sector_t start, unsigned int sectors,
 		kunmap_atomic(paddr);
 		kunmap_atomic(daddr);
 	}
-	sbc_dif_copy_prot(cmd, sectors, true, sg, sg_off);
 
 	return 0;
 }
+
+sense_reason_t
+sbc_dif_read_strip(struct se_cmd *cmd)
+{
+	struct se_device *dev = cmd->se_dev;
+	u32 sectors = cmd->prot_length / dev->prot_length;
+
+	return __sbc_dif_verify_read(cmd, cmd->t_task_lba, sectors, 0,
+				     cmd->t_prot_sg, 0);
+}
+
+sense_reason_t
+sbc_dif_verify_read(struct se_cmd *cmd, sector_t start, unsigned int sectors,
+		    unsigned int ei_lba, struct scatterlist *sg, int sg_off)
+{
+	sense_reason_t rc;
+
+	rc = __sbc_dif_verify_read(cmd, start, sectors, ei_lba, sg, sg_off);
+	if (rc)
+		return rc;
+
+	sbc_dif_copy_prot(cmd, sectors, true, sg, sg_off);
+	return 0;
+}
 EXPORT_SYMBOL(sbc_dif_verify_read);
diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c
index 3bebc71ea033908e8e64843ba98b295b030a78ad..8653666612a802f5cbfcde2c4d82ac7d20ca9294 100644
--- a/drivers/target/target_core_spc.c
+++ b/drivers/target/target_core_spc.c
@@ -71,6 +71,7 @@ spc_emulate_inquiry_std(struct se_cmd *cmd, unsigned char *buf)
 {
 	struct se_lun *lun = cmd->se_lun;
 	struct se_device *dev = cmd->se_dev;
+	struct se_session *sess = cmd->se_sess;
 
 	/* Set RMB (removable media) for tape devices */
 	if (dev->transport->get_device_type(dev) == TYPE_TAPE)
@@ -101,10 +102,13 @@ spc_emulate_inquiry_std(struct se_cmd *cmd, unsigned char *buf)
 	if (dev->dev_attrib.emulate_3pc)
 		buf[5] |= 0x8;
 	/*
-	 * Set Protection (PROTECT) bit when DIF has been enabled.
+	 * Set Protection (PROTECT) bit when DIF has been enabled on the
+	 * device, and the transport supports VERIFY + PASS.
 	 */
-	if (dev->dev_attrib.pi_prot_type)
-		buf[5] |= 0x1;
+	if (sess->sup_prot_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS)) {
+		if (dev->dev_attrib.pi_prot_type)
+			buf[5] |= 0x1;
+	}
 
 	buf[7] = 0x2; /* CmdQue=1 */
 
@@ -473,16 +477,19 @@ static sense_reason_t
 spc_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf)
 {
 	struct se_device *dev = cmd->se_dev;
+	struct se_session *sess = cmd->se_sess;
 
 	buf[3] = 0x3c;
 	/*
 	 * Set GRD_CHK + REF_CHK for TYPE1 protection, or GRD_CHK
 	 * only for TYPE3 protection.
 	 */
-	if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE1_PROT)
-		buf[4] = 0x5;
-	else if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE3_PROT)
-		buf[4] = 0x4;
+	if (sess->sup_prot_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS)) {
+		if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE1_PROT)
+			buf[4] = 0x5;
+		else if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE3_PROT)
+			buf[4] = 0x4;
+	}
 
 	/* Set HEADSUP, ORDSUP, SIMPSUP */
 	buf[5] = 0x07;
@@ -762,7 +769,7 @@ spc_emulate_inquiry(struct se_cmd *cmd)
 	return ret;
 }
 
-static int spc_modesense_rwrecovery(struct se_device *dev, u8 pc, u8 *p)
+static int spc_modesense_rwrecovery(struct se_cmd *cmd, u8 pc, u8 *p)
 {
 	p[0] = 0x01;
 	p[1] = 0x0a;
@@ -775,8 +782,11 @@ static int spc_modesense_rwrecovery(struct se_device *dev, u8 pc, u8 *p)
 	return 12;
 }
 
-static int spc_modesense_control(struct se_device *dev, u8 pc, u8 *p)
+static int spc_modesense_control(struct se_cmd *cmd, u8 pc, u8 *p)
 {
+	struct se_device *dev = cmd->se_dev;
+	struct se_session *sess = cmd->se_sess;
+
 	p[0] = 0x0a;
 	p[1] = 0x0a;
 
@@ -868,8 +878,10 @@ static int spc_modesense_control(struct se_device *dev, u8 pc, u8 *p)
 	 * type, shall not modify the contents of the LOGICAL BLOCK REFERENCE
 	 * TAG field.
 	 */
-	if (dev->dev_attrib.pi_prot_type)
-		p[5] |= 0x80;
+	if (sess->sup_prot_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS)) {
+		if (dev->dev_attrib.pi_prot_type)
+			p[5] |= 0x80;
+	}
 
 	p[8] = 0xff;
 	p[9] = 0xff;
@@ -879,8 +891,10 @@ static int spc_modesense_control(struct se_device *dev, u8 pc, u8 *p)
 	return 12;
 }
 
-static int spc_modesense_caching(struct se_device *dev, u8 pc, u8 *p)
+static int spc_modesense_caching(struct se_cmd *cmd, u8 pc, u8 *p)
 {
+	struct se_device *dev = cmd->se_dev;
+
 	p[0] = 0x08;
 	p[1] = 0x12;
 
@@ -896,7 +910,7 @@ static int spc_modesense_caching(struct se_device *dev, u8 pc, u8 *p)
 	return 20;
 }
 
-static int spc_modesense_informational_exceptions(struct se_device *dev, u8 pc, unsigned char *p)
+static int spc_modesense_informational_exceptions(struct se_cmd *cmd, u8 pc, unsigned char *p)
 {
 	p[0] = 0x1c;
 	p[1] = 0x0a;
@@ -912,7 +926,7 @@ static int spc_modesense_informational_exceptions(struct se_device *dev, u8 pc,
 static struct {
 	uint8_t		page;
 	uint8_t		subpage;
-	int		(*emulate)(struct se_device *, u8, unsigned char *);
+	int		(*emulate)(struct se_cmd *, u8, unsigned char *);
 } modesense_handlers[] = {
 	{ .page = 0x01, .subpage = 0x00, .emulate = spc_modesense_rwrecovery },
 	{ .page = 0x08, .subpage = 0x00, .emulate = spc_modesense_caching },
@@ -1050,7 +1064,7 @@ static sense_reason_t spc_emulate_modesense(struct se_cmd *cmd)
 			 * the only two possibilities).
 			 */
 			if ((modesense_handlers[i].subpage & ~subpage) == 0) {
-				ret = modesense_handlers[i].emulate(dev, pc, &buf[length]);
+				ret = modesense_handlers[i].emulate(cmd, pc, &buf[length]);
 				if (!ten && length + ret >= 255)
 					break;
 				length += ret;
@@ -1063,7 +1077,7 @@ static sense_reason_t spc_emulate_modesense(struct se_cmd *cmd)
 	for (i = 0; i < ARRAY_SIZE(modesense_handlers); ++i)
 		if (modesense_handlers[i].page == page &&
 		    modesense_handlers[i].subpage == subpage) {
-			length += modesense_handlers[i].emulate(dev, pc, &buf[length]);
+			length += modesense_handlers[i].emulate(cmd, pc, &buf[length]);
 			goto set_length;
 		}
 
@@ -1095,7 +1109,6 @@ static sense_reason_t spc_emulate_modesense(struct se_cmd *cmd)
 
 static sense_reason_t spc_emulate_modeselect(struct se_cmd *cmd)
 {
-	struct se_device *dev = cmd->se_dev;
 	char *cdb = cmd->t_task_cdb;
 	bool ten = cdb[0] == MODE_SELECT_10;
 	int off = ten ? 8 : 4;
@@ -1131,7 +1144,7 @@ static sense_reason_t spc_emulate_modeselect(struct se_cmd *cmd)
 		if (modesense_handlers[i].page == page &&
 		    modesense_handlers[i].subpage == subpage) {
 			memset(tbuf, 0, SE_MODE_PAGE_BUF);
-			length = modesense_handlers[i].emulate(dev, 0, tbuf);
+			length = modesense_handlers[i].emulate(cmd, 0, tbuf);
 			goto check_contents;
 		}
 
diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
index 70c638f730af078e25e4ca75b45372e3d16ac24a..f7cd95e8111a7e496cf198c9f6a7ec18f854942b 100644
--- a/drivers/target/target_core_tmr.c
+++ b/drivers/target/target_core_tmr.c
@@ -87,14 +87,17 @@ static void core_tmr_handle_tas_abort(
 	struct se_cmd *cmd,
 	int tas)
 {
+	bool remove = true;
 	/*
 	 * TASK ABORTED status (TAS) bit support
 	*/
 	if ((tmr_nacl &&
-	     (tmr_nacl == cmd->se_sess->se_node_acl)) || tas)
+	     (tmr_nacl != cmd->se_sess->se_node_acl)) && tas) {
+		remove = false;
 		transport_send_task_abort(cmd);
+	}
 
-	transport_cmd_finish_abort(cmd, 0);
+	transport_cmd_finish_abort(cmd, remove);
 }
 
 static int target_check_cdb_and_preempt(struct list_head *list,
@@ -127,6 +130,11 @@ void core_tmr_abort_task(
 
 		if (dev != se_cmd->se_dev)
 			continue;
+
+		/* skip se_cmd associated with tmr */
+		if (tmr->task_cmd == se_cmd)
+			continue;
+
 		ref_tag = se_cmd->se_tfo->get_task_tag(se_cmd);
 		if (tmr->ref_task_tag != ref_tag)
 			continue;
@@ -150,18 +158,9 @@ void core_tmr_abort_task(
 
 		cancel_work_sync(&se_cmd->work);
 		transport_wait_for_tasks(se_cmd);
-		/*
-		 * Now send SAM_STAT_TASK_ABORTED status for the referenced
-		 * se_cmd descriptor..
-		 */
-		transport_send_task_abort(se_cmd);
-		/*
-		 * Also deal with possible extra acknowledge reference..
-		 */
-		if (se_cmd->se_cmd_flags & SCF_ACK_KREF)
-			target_put_sess_cmd(se_sess, se_cmd);
 
 		target_put_sess_cmd(se_sess, se_cmd);
+		transport_cmd_finish_abort(se_cmd, true);
 
 		printk("ABORT_TASK: Sending TMR_FUNCTION_COMPLETE for"
 				" ref_tag: %d\n", ref_tag);
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 2956250b7225c99d77a7475b27d1bf881231539f..d4b98690a73680244676b6e608ede6c85ff724cb 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -235,7 +235,7 @@ void transport_subsystem_check_init(void)
 	sub_api_initialized = 1;
 }
 
-struct se_session *transport_init_session(void)
+struct se_session *transport_init_session(enum target_prot_op sup_prot_ops)
 {
 	struct se_session *se_sess;
 
@@ -251,6 +251,7 @@ struct se_session *transport_init_session(void)
 	INIT_LIST_HEAD(&se_sess->sess_wait_list);
 	spin_lock_init(&se_sess->sess_cmd_lock);
 	kref_init(&se_sess->sess_kref);
+	se_sess->sup_prot_ops = sup_prot_ops;
 
 	return se_sess;
 }
@@ -288,12 +289,13 @@ int transport_alloc_session_tags(struct se_session *se_sess,
 EXPORT_SYMBOL(transport_alloc_session_tags);
 
 struct se_session *transport_init_session_tags(unsigned int tag_num,
-					       unsigned int tag_size)
+					       unsigned int tag_size,
+					       enum target_prot_op sup_prot_ops)
 {
 	struct se_session *se_sess;
 	int rc;
 
-	se_sess = transport_init_session();
+	se_sess = transport_init_session(sup_prot_ops);
 	if (IS_ERR(se_sess))
 		return se_sess;
 
@@ -603,6 +605,15 @@ static void transport_lun_remove_cmd(struct se_cmd *cmd)
 
 void transport_cmd_finish_abort(struct se_cmd *cmd, int remove)
 {
+	if (cmd->se_cmd_flags & SCF_SE_LUN_CMD)
+		transport_lun_remove_cmd(cmd);
+	/*
+	 * Allow the fabric driver to unmap any resources before
+	 * releasing the descriptor via TFO->release_cmd()
+	 */
+	if (remove)
+		cmd->se_tfo->aborted_task(cmd);
+
 	if (transport_cmd_check_stop_to_fabric(cmd))
 		return;
 	if (remove)
@@ -1365,6 +1376,13 @@ int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess
 		target_put_sess_cmd(se_sess, se_cmd);
 		return 0;
 	}
+
+	rc = target_setup_cmd_from_cdb(se_cmd, cdb);
+	if (rc != 0) {
+		transport_generic_request_failure(se_cmd, rc);
+		return 0;
+	}
+
 	/*
 	 * Save pointers for SGLs containing protection information,
 	 * if present.
@@ -1374,11 +1392,6 @@ int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess
 		se_cmd->t_prot_nents = sgl_prot_count;
 	}
 
-	rc = target_setup_cmd_from_cdb(se_cmd, cdb);
-	if (rc != 0) {
-		transport_generic_request_failure(se_cmd, rc);
-		return 0;
-	}
 	/*
 	 * When a non zero sgl_count has been passed perform SGL passthrough
 	 * mapping for pre-allocated fabric memory instead of having target
@@ -1754,6 +1767,15 @@ void target_execute_cmd(struct se_cmd *cmd)
 	cmd->t_state = TRANSPORT_PROCESSING;
 	cmd->transport_state |= CMD_T_ACTIVE|CMD_T_BUSY|CMD_T_SENT;
 	spin_unlock_irq(&cmd->t_state_lock);
+	/*
+	 * Perform WRITE_INSERT of PI using software emulation when backend
+	 * device has PI enabled, if the transport has not already generated
+	 * PI using hardware WRITE_INSERT offload.
+	 */
+	if (cmd->prot_op == TARGET_PROT_DOUT_INSERT) {
+		if (!(cmd->se_sess->sup_prot_ops & TARGET_PROT_DOUT_INSERT))
+			sbc_dif_generate(cmd);
+	}
 
 	if (target_handle_task_attr(cmd)) {
 		spin_lock_irq(&cmd->t_state_lock);
@@ -1883,6 +1905,21 @@ static void transport_handle_queue_full(
 	schedule_work(&cmd->se_dev->qf_work_queue);
 }
 
+static bool target_check_read_strip(struct se_cmd *cmd)
+{
+	sense_reason_t rc;
+
+	if (!(cmd->se_sess->sup_prot_ops & TARGET_PROT_DIN_STRIP)) {
+		rc = sbc_dif_read_strip(cmd);
+		if (rc) {
+			cmd->pi_err = rc;
+			return true;
+		}
+	}
+
+	return false;
+}
+
 static void target_complete_ok_work(struct work_struct *work)
 {
 	struct se_cmd *cmd = container_of(work, struct se_cmd, work);
@@ -1947,6 +1984,22 @@ static void target_complete_ok_work(struct work_struct *work)
 					cmd->data_length;
 		}
 		spin_unlock(&cmd->se_lun->lun_sep_lock);
+		/*
+		 * Perform READ_STRIP of PI using software emulation when
+		 * backend had PI enabled, if the transport will not be
+		 * performing hardware READ_STRIP offload.
+		 */
+		if (cmd->prot_op == TARGET_PROT_DIN_STRIP &&
+		    target_check_read_strip(cmd)) {
+			ret = transport_send_check_condition_and_sense(cmd,
+						cmd->pi_err, 0);
+			if (ret == -EAGAIN || ret == -ENOMEM)
+				goto queue_full;
+
+			transport_lun_remove_cmd(cmd);
+			transport_cmd_check_stop_to_fabric(cmd);
+			return;
+		}
 
 		trace_target_cmd_complete(cmd);
 		ret = cmd->se_tfo->queue_data_in(cmd);
@@ -2039,6 +2092,10 @@ static inline void transport_free_pages(struct se_cmd *cmd)
 	transport_free_sgl(cmd->t_bidi_data_sg, cmd->t_bidi_data_nents);
 	cmd->t_bidi_data_sg = NULL;
 	cmd->t_bidi_data_nents = 0;
+
+	transport_free_sgl(cmd->t_prot_sg, cmd->t_prot_nents);
+	cmd->t_prot_sg = NULL;
+	cmd->t_prot_nents = 0;
 }
 
 /**
@@ -2202,6 +2259,14 @@ transport_generic_new_cmd(struct se_cmd *cmd)
 				return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
 		}
 
+		if (cmd->prot_op != TARGET_PROT_NORMAL) {
+			ret = target_alloc_sgl(&cmd->t_prot_sg,
+					       &cmd->t_prot_nents,
+					       cmd->prot_length, true);
+			if (ret < 0)
+				return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+		}
+
 		ret = target_alloc_sgl(&cmd->t_data_sg, &cmd->t_data_nents,
 				       cmd->data_length, zero_flag);
 		if (ret < 0)
@@ -2770,13 +2835,17 @@ int transport_check_aborted_status(struct se_cmd *cmd, int send_status)
 	if (!(cmd->transport_state & CMD_T_ABORTED))
 		return 0;
 
-	if (!send_status || (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS))
+	/*
+	 * If cmd has been aborted but either no status is to be sent or it has
+	 * already been sent, just return
+	 */
+	if (!send_status || !(cmd->se_cmd_flags & SCF_SEND_DELAYED_TAS))
 		return 1;
 
 	pr_debug("Sending delayed SAM_STAT_TASK_ABORTED status for CDB: 0x%02x ITT: 0x%08x\n",
 		 cmd->t_task_cdb[0], cmd->se_tfo->get_task_tag(cmd));
 
-	cmd->se_cmd_flags |= SCF_SENT_DELAYED_TAS;
+	cmd->se_cmd_flags &= ~SCF_SEND_DELAYED_TAS;
 	cmd->scsi_status = SAM_STAT_TASK_ABORTED;
 	trace_target_cmd_complete(cmd);
 	cmd->se_tfo->queue_status(cmd);
@@ -2790,7 +2859,7 @@ void transport_send_task_abort(struct se_cmd *cmd)
 	unsigned long flags;
 
 	spin_lock_irqsave(&cmd->t_state_lock, flags);
-	if (cmd->se_cmd_flags & (SCF_SENT_CHECK_CONDITION | SCF_SENT_DELAYED_TAS)) {
+	if (cmd->se_cmd_flags & (SCF_SENT_CHECK_CONDITION)) {
 		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
 		return;
 	}
@@ -2805,6 +2874,7 @@ void transport_send_task_abort(struct se_cmd *cmd)
 	if (cmd->data_direction == DMA_TO_DEVICE) {
 		if (cmd->se_tfo->write_pending_status(cmd) != 0) {
 			cmd->transport_state |= CMD_T_ABORTED;
+			cmd->se_cmd_flags |= SCF_SEND_DELAYED_TAS;
 			smp_mb__after_atomic_inc();
 			return;
 		}
diff --git a/drivers/target/tcm_fc/tcm_fc.h b/drivers/target/tcm_fc/tcm_fc.h
index 752863acecb8ce5716d99fc81172f52d8568937a..a0bcfd3e7e7d5924b06c56928d8c51989c6c66da 100644
--- a/drivers/target/tcm_fc/tcm_fc.h
+++ b/drivers/target/tcm_fc/tcm_fc.h
@@ -94,20 +94,19 @@ struct ft_lun {
  */
 struct ft_tpg {
 	u32 index;
-	struct ft_lport_acl *lport_acl;
+	struct ft_lport_wwn *lport_wwn;
 	struct ft_tport *tport;		/* active tport or NULL */
-	struct list_head list;		/* linkage in ft_lport_acl tpg_list */
 	struct list_head lun_list;	/* head of LUNs */
 	struct se_portal_group se_tpg;
 	struct workqueue_struct *workqueue;
 };
 
-struct ft_lport_acl {
+struct ft_lport_wwn {
 	u64 wwpn;
 	char name[FT_NAMELEN];
-	struct list_head list;
-	struct list_head tpg_list;
-	struct se_wwn fc_lport_wwn;
+	struct list_head ft_wwn_node;
+	struct ft_tpg *tpg;
+	struct se_wwn se_wwn;
 };
 
 /*
@@ -128,7 +127,6 @@ struct ft_cmd {
 	u32 sg_cnt;			/* No. of item in scatterlist */
 };
 
-extern struct list_head ft_lport_list;
 extern struct mutex ft_lport_lock;
 extern struct fc4_prov ft_prov;
 extern struct target_fabric_configfs *ft_configfs;
@@ -163,6 +161,7 @@ int ft_write_pending_status(struct se_cmd *);
 u32 ft_get_task_tag(struct se_cmd *);
 int ft_get_cmd_state(struct se_cmd *);
 void ft_queue_tm_resp(struct se_cmd *);
+void ft_aborted_task(struct se_cmd *);
 
 /*
  * other internal functions.
diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c
index 8b2c1aaf81dede06ebe9263f5c2d010ad4f8a997..01cf37f212c30724ed6a0addbe8c7cbe69dfd6a3 100644
--- a/drivers/target/tcm_fc/tfc_cmd.c
+++ b/drivers/target/tcm_fc/tfc_cmd.c
@@ -426,6 +426,11 @@ void ft_queue_tm_resp(struct se_cmd *se_cmd)
 	ft_send_resp_code(cmd, code);
 }
 
+void ft_aborted_task(struct se_cmd *se_cmd)
+{
+	return;
+}
+
 static void ft_send_work(struct work_struct *work);
 
 /*
diff --git a/drivers/target/tcm_fc/tfc_conf.c b/drivers/target/tcm_fc/tfc_conf.c
index e879da81ad9303c2ad1d0d4d0487662523bc1b1c..efdcb9663a1a699520d3543b544ebac5c34ad860 100644
--- a/drivers/target/tcm_fc/tfc_conf.c
+++ b/drivers/target/tcm_fc/tfc_conf.c
@@ -50,7 +50,7 @@
 
 struct target_fabric_configfs *ft_configfs;
 
-LIST_HEAD(ft_lport_list);
+static LIST_HEAD(ft_wwn_list);
 DEFINE_MUTEX(ft_lport_lock);
 
 unsigned int ft_debug_logging;
@@ -298,7 +298,7 @@ static struct se_portal_group *ft_add_tpg(
 	struct config_group *group,
 	const char *name)
 {
-	struct ft_lport_acl *lacl;
+	struct ft_lport_wwn *ft_wwn;
 	struct ft_tpg *tpg;
 	struct workqueue_struct *wq;
 	unsigned long index;
@@ -318,12 +318,17 @@ static struct se_portal_group *ft_add_tpg(
 	if (index > UINT_MAX)
 		return NULL;
 
-	lacl = container_of(wwn, struct ft_lport_acl, fc_lport_wwn);
+	if ((index != 1)) {
+		pr_err("Error, a single TPG=1 is used for HW port mappings\n");
+		return ERR_PTR(-ENOSYS);
+	}
+
+	ft_wwn = container_of(wwn, struct ft_lport_wwn, se_wwn);
 	tpg = kzalloc(sizeof(*tpg), GFP_KERNEL);
 	if (!tpg)
 		return NULL;
 	tpg->index = index;
-	tpg->lport_acl = lacl;
+	tpg->lport_wwn = ft_wwn;
 	INIT_LIST_HEAD(&tpg->lun_list);
 
 	wq = alloc_workqueue("tcm_fc", 0, 1);
@@ -342,7 +347,7 @@ static struct se_portal_group *ft_add_tpg(
 	tpg->workqueue = wq;
 
 	mutex_lock(&ft_lport_lock);
-	list_add_tail(&tpg->list, &lacl->tpg_list);
+	ft_wwn->tpg = tpg;
 	mutex_unlock(&ft_lport_lock);
 
 	return &tpg->se_tpg;
@@ -351,6 +356,7 @@ static struct se_portal_group *ft_add_tpg(
 static void ft_del_tpg(struct se_portal_group *se_tpg)
 {
 	struct ft_tpg *tpg = container_of(se_tpg, struct ft_tpg, se_tpg);
+	struct ft_lport_wwn *ft_wwn = tpg->lport_wwn;
 
 	pr_debug("del tpg %s\n",
 		    config_item_name(&tpg->se_tpg.tpg_group.cg_item));
@@ -361,7 +367,7 @@ static void ft_del_tpg(struct se_portal_group *se_tpg)
 	synchronize_rcu();
 
 	mutex_lock(&ft_lport_lock);
-	list_del(&tpg->list);
+	ft_wwn->tpg = NULL;
 	if (tpg->tport) {
 		tpg->tport->tpg = NULL;
 		tpg->tport = NULL;
@@ -380,15 +386,11 @@ static void ft_del_tpg(struct se_portal_group *se_tpg)
  */
 struct ft_tpg *ft_lport_find_tpg(struct fc_lport *lport)
 {
-	struct ft_lport_acl *lacl;
-	struct ft_tpg *tpg;
+	struct ft_lport_wwn *ft_wwn;
 
-	list_for_each_entry(lacl, &ft_lport_list, list) {
-		if (lacl->wwpn == lport->wwpn) {
-			list_for_each_entry(tpg, &lacl->tpg_list, list)
-				return tpg; /* XXX for now return first entry */
-			return NULL;
-		}
+	list_for_each_entry(ft_wwn, &ft_wwn_list, ft_wwn_node) {
+		if (ft_wwn->wwpn == lport->wwpn)
+			return ft_wwn->tpg;
 	}
 	return NULL;
 }
@@ -401,50 +403,49 @@ struct ft_tpg *ft_lport_find_tpg(struct fc_lport *lport)
  * Add lport to allowed config.
  * The name is the WWPN in lower-case ASCII, colon-separated bytes.
  */
-static struct se_wwn *ft_add_lport(
+static struct se_wwn *ft_add_wwn(
 	struct target_fabric_configfs *tf,
 	struct config_group *group,
 	const char *name)
 {
-	struct ft_lport_acl *lacl;
-	struct ft_lport_acl *old_lacl;
+	struct ft_lport_wwn *ft_wwn;
+	struct ft_lport_wwn *old_ft_wwn;
 	u64 wwpn;
 
-	pr_debug("add lport %s\n", name);
+	pr_debug("add wwn %s\n", name);
 	if (ft_parse_wwn(name, &wwpn, 1) < 0)
 		return NULL;
-	lacl = kzalloc(sizeof(*lacl), GFP_KERNEL);
-	if (!lacl)
+	ft_wwn = kzalloc(sizeof(*ft_wwn), GFP_KERNEL);
+	if (!ft_wwn)
 		return NULL;
-	lacl->wwpn = wwpn;
-	INIT_LIST_HEAD(&lacl->tpg_list);
+	ft_wwn->wwpn = wwpn;
 
 	mutex_lock(&ft_lport_lock);
-	list_for_each_entry(old_lacl, &ft_lport_list, list) {
-		if (old_lacl->wwpn == wwpn) {
+	list_for_each_entry(old_ft_wwn, &ft_wwn_list, ft_wwn_node) {
+		if (old_ft_wwn->wwpn == wwpn) {
 			mutex_unlock(&ft_lport_lock);
-			kfree(lacl);
+			kfree(ft_wwn);
 			return NULL;
 		}
 	}
-	list_add_tail(&lacl->list, &ft_lport_list);
-	ft_format_wwn(lacl->name, sizeof(lacl->name), wwpn);
+	list_add_tail(&ft_wwn->ft_wwn_node, &ft_wwn_list);
+	ft_format_wwn(ft_wwn->name, sizeof(ft_wwn->name), wwpn);
 	mutex_unlock(&ft_lport_lock);
 
-	return &lacl->fc_lport_wwn;
+	return &ft_wwn->se_wwn;
 }
 
-static void ft_del_lport(struct se_wwn *wwn)
+static void ft_del_wwn(struct se_wwn *wwn)
 {
-	struct ft_lport_acl *lacl = container_of(wwn,
-				struct ft_lport_acl, fc_lport_wwn);
+	struct ft_lport_wwn *ft_wwn = container_of(wwn,
+				struct ft_lport_wwn, se_wwn);
 
-	pr_debug("del lport %s\n", lacl->name);
+	pr_debug("del wwn %s\n", ft_wwn->name);
 	mutex_lock(&ft_lport_lock);
-	list_del(&lacl->list);
+	list_del(&ft_wwn->ft_wwn_node);
 	mutex_unlock(&ft_lport_lock);
 
-	kfree(lacl);
+	kfree(ft_wwn);
 }
 
 static ssize_t ft_wwn_show_attr_version(
@@ -471,7 +472,7 @@ static char *ft_get_fabric_wwn(struct se_portal_group *se_tpg)
 {
 	struct ft_tpg *tpg = se_tpg->se_tpg_fabric_ptr;
 
-	return tpg->lport_acl->name;
+	return tpg->lport_wwn->name;
 }
 
 static u16 ft_get_tag(struct se_portal_group *se_tpg)
@@ -536,12 +537,13 @@ static struct target_core_fabric_ops ft_fabric_ops = {
 	.queue_data_in =		ft_queue_data_in,
 	.queue_status =			ft_queue_status,
 	.queue_tm_rsp =			ft_queue_tm_resp,
+	.aborted_task =			ft_aborted_task,
 	/*
 	 * Setup function pointers for generic logic in
 	 * target_core_fabric_configfs.c
 	 */
-	.fabric_make_wwn =		&ft_add_lport,
-	.fabric_drop_wwn =		&ft_del_lport,
+	.fabric_make_wwn =		&ft_add_wwn,
+	.fabric_drop_wwn =		&ft_del_wwn,
 	.fabric_make_tpg =		&ft_add_tpg,
 	.fabric_drop_tpg =		&ft_del_tpg,
 	.fabric_post_link =		NULL,
diff --git a/drivers/target/tcm_fc/tfc_sess.c b/drivers/target/tcm_fc/tfc_sess.c
index ae52c08dad09071114e730c44b142f49bde1924e..21ce50880c79d1480166b08b52b4bb81cc85d4d6 100644
--- a/drivers/target/tcm_fc/tfc_sess.c
+++ b/drivers/target/tcm_fc/tfc_sess.c
@@ -51,7 +51,7 @@ static void ft_sess_delete_all(struct ft_tport *);
  * Lookup or allocate target local port.
  * Caller holds ft_lport_lock.
  */
-static struct ft_tport *ft_tport_create(struct fc_lport *lport)
+static struct ft_tport *ft_tport_get(struct fc_lport *lport)
 {
 	struct ft_tpg *tpg;
 	struct ft_tport *tport;
@@ -68,6 +68,7 @@ static struct ft_tport *ft_tport_create(struct fc_lport *lport)
 
 	if (tport) {
 		tport->tpg = tpg;
+		tpg->tport = tport;
 		return tport;
 	}
 
@@ -114,7 +115,7 @@ static void ft_tport_delete(struct ft_tport *tport)
 void ft_lport_add(struct fc_lport *lport, void *arg)
 {
 	mutex_lock(&ft_lport_lock);
-	ft_tport_create(lport);
+	ft_tport_get(lport);
 	mutex_unlock(&ft_lport_lock);
 }
 
@@ -211,7 +212,8 @@ static struct ft_sess *ft_sess_create(struct ft_tport *tport, u32 port_id,
 		return NULL;
 
 	sess->se_sess = transport_init_session_tags(TCM_FC_DEFAULT_TAGS,
-						    sizeof(struct ft_cmd));
+						    sizeof(struct ft_cmd),
+						    TARGET_PROT_NORMAL);
 	if (IS_ERR(sess->se_sess)) {
 		kfree(sess);
 		return NULL;
@@ -350,7 +352,7 @@ static int ft_prli_locked(struct fc_rport_priv *rdata, u32 spp_len,
 	struct ft_node_acl *acl;
 	u32 fcp_parm;
 
-	tport = ft_tport_create(rdata->local_port);
+	tport = ft_tport_get(rdata->local_port);
 	if (!tport)
 		goto not_target;	/* not a target for this local port */
 
diff --git a/drivers/usb/gadget/tcm_usb_gadget.c b/drivers/usb/gadget/tcm_usb_gadget.c
index 460c266b8e24bf2bf3c4b8459f2cb5443b9694e2..f058c0368d61a4b4663bca3bd58a3558e6c5ec04 100644
--- a/drivers/usb/gadget/tcm_usb_gadget.c
+++ b/drivers/usb/gadget/tcm_usb_gadget.c
@@ -1471,6 +1471,11 @@ static void usbg_queue_tm_rsp(struct se_cmd *se_cmd)
 {
 }
 
+static void usbg_aborted_task(struct se_cmd *se_cmd)
+{
+	return;
+}
+
 static const char *usbg_check_wwn(const char *name)
 {
 	const char *n;
@@ -1726,7 +1731,7 @@ static int tcm_usbg_make_nexus(struct usbg_tpg *tpg, char *name)
 		pr_err("Unable to allocate struct tcm_vhost_nexus\n");
 		goto err_unlock;
 	}
-	tv_nexus->tvn_se_sess = transport_init_session();
+	tv_nexus->tvn_se_sess = transport_init_session(TARGET_PROT_NORMAL);
 	if (IS_ERR(tv_nexus->tvn_se_sess))
 		goto err_free;
 
@@ -1897,6 +1902,7 @@ static struct target_core_fabric_ops usbg_ops = {
 	.queue_data_in			= usbg_send_read_response,
 	.queue_status			= usbg_send_status_response,
 	.queue_tm_rsp			= usbg_queue_tm_rsp,
+	.aborted_task			= usbg_aborted_task,
 	.check_stop_free		= usbg_check_stop_free,
 
 	.fabric_make_wwn		= usbg_make_tport,
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index e48d4a672580cd5eefaf741946435073a4e58e8c..cf50ce93975bcddb240c3356b4326aac0fc8c3cb 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -539,6 +539,11 @@ static void tcm_vhost_queue_tm_rsp(struct se_cmd *se_cmd)
 	return;
 }
 
+static void tcm_vhost_aborted_task(struct se_cmd *se_cmd)
+{
+	return;
+}
+
 static void tcm_vhost_free_evt(struct vhost_scsi *vs, struct tcm_vhost_evt *evt)
 {
 	vs->vs_events_nr--;
@@ -1740,7 +1745,8 @@ static int tcm_vhost_make_nexus(struct tcm_vhost_tpg *tpg,
 	 */
 	tv_nexus->tvn_se_sess = transport_init_session_tags(
 					TCM_VHOST_DEFAULT_TAGS,
-					sizeof(struct tcm_vhost_cmd));
+					sizeof(struct tcm_vhost_cmd),
+					TARGET_PROT_NORMAL);
 	if (IS_ERR(tv_nexus->tvn_se_sess)) {
 		mutex_unlock(&tpg->tv_tpg_mutex);
 		kfree(tv_nexus);
@@ -2131,6 +2137,7 @@ static struct target_core_fabric_ops tcm_vhost_ops = {
 	.queue_data_in			= tcm_vhost_queue_data_in,
 	.queue_status			= tcm_vhost_queue_status,
 	.queue_tm_rsp			= tcm_vhost_queue_tm_rsp,
+	.aborted_task			= tcm_vhost_aborted_task,
 	/*
 	 * Setup callers for generic logic in target_core_fabric_configfs.c
 	 */
diff --git a/include/target/iscsi/iscsi_transport.h b/include/target/iscsi/iscsi_transport.h
index 4483fadfa68d8fdc744743d6bc5bcd52de301608..33b487b5da92dc76f549df5d09a3852518c14159 100644
--- a/include/target/iscsi/iscsi_transport.h
+++ b/include/target/iscsi/iscsi_transport.h
@@ -21,6 +21,8 @@ struct iscsit_transport {
 	int (*iscsit_get_dataout)(struct iscsi_conn *, struct iscsi_cmd *, bool);
 	int (*iscsit_queue_data_in)(struct iscsi_conn *, struct iscsi_cmd *);
 	int (*iscsit_queue_status)(struct iscsi_conn *, struct iscsi_cmd *);
+	void (*iscsit_aborted_task)(struct iscsi_conn *, struct iscsi_cmd *);
+	enum target_prot_op (*iscsit_get_sup_prot_ops)(struct iscsi_conn *);
 };
 
 static inline void *iscsit_priv_cmd(struct iscsi_cmd *cmd)
diff --git a/include/target/target_core_backend.h b/include/target/target_core_backend.h
index 7020e33e742e595c50d51aa57e83f546c5e57a50..3a1c1eea1fffcaee767d4bef1a8ea2700f1884e2 100644
--- a/include/target/target_core_backend.h
+++ b/include/target/target_core_backend.h
@@ -73,10 +73,12 @@ sense_reason_t sbc_execute_unmap(struct se_cmd *cmd,
 	sense_reason_t (*do_unmap_fn)(struct se_cmd *cmd, void *priv,
 				      sector_t lba, sector_t nolb),
 	void *priv);
+void	sbc_dif_generate(struct se_cmd *);
 sense_reason_t	sbc_dif_verify_write(struct se_cmd *, sector_t, unsigned int,
 				     unsigned int, struct scatterlist *, int);
 sense_reason_t	sbc_dif_verify_read(struct se_cmd *, sector_t, unsigned int,
 				    unsigned int, struct scatterlist *, int);
+sense_reason_t	sbc_dif_read_strip(struct se_cmd *);
 
 void	transport_set_vpd_proto_id(struct t10_vpd *, unsigned char *);
 int	transport_set_vpd_assoc(struct t10_vpd *, unsigned char *);
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index 1772fadcff62aaa8ab1e3ab2b4a7aef15de87fa7..9ec9864ecf38629469ef0823baf153c33fb09472 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -162,7 +162,7 @@ enum se_cmd_flags_table {
 	SCF_SENT_CHECK_CONDITION	= 0x00000800,
 	SCF_OVERFLOW_BIT		= 0x00001000,
 	SCF_UNDERFLOW_BIT		= 0x00002000,
-	SCF_SENT_DELAYED_TAS		= 0x00004000,
+	SCF_SEND_DELAYED_TAS		= 0x00004000,
 	SCF_ALUA_NON_OPTIMIZED		= 0x00008000,
 	SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC = 0x00020000,
 	SCF_ACK_KREF			= 0x00040000,
@@ -442,19 +442,18 @@ struct se_tmr_req {
 };
 
 enum target_prot_op {
-	TARGET_PROT_NORMAL = 0,
-	TARGET_PROT_DIN_INSERT,
-	TARGET_PROT_DOUT_INSERT,
-	TARGET_PROT_DIN_STRIP,
-	TARGET_PROT_DOUT_STRIP,
-	TARGET_PROT_DIN_PASS,
-	TARGET_PROT_DOUT_PASS,
+	TARGET_PROT_NORMAL	= 0,
+	TARGET_PROT_DIN_INSERT	= (1 << 0),
+	TARGET_PROT_DOUT_INSERT	= (1 << 1),
+	TARGET_PROT_DIN_STRIP	= (1 << 2),
+	TARGET_PROT_DOUT_STRIP	= (1 << 3),
+	TARGET_PROT_DIN_PASS	= (1 << 4),
+	TARGET_PROT_DOUT_PASS	= (1 << 5),
 };
 
-enum target_prot_ho {
-	PROT_SEPERATED,
-	PROT_INTERLEAVED,
-};
+#define TARGET_PROT_ALL	TARGET_PROT_DIN_INSERT | TARGET_PROT_DOUT_INSERT | \
+			TARGET_PROT_DIN_STRIP | TARGET_PROT_DOUT_STRIP | \
+			TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS
 
 enum target_prot_type {
 	TARGET_DIF_TYPE0_PROT,
@@ -463,6 +462,12 @@ enum target_prot_type {
 	TARGET_DIF_TYPE3_PROT,
 };
 
+enum target_core_dif_check {
+	TARGET_DIF_CHECK_GUARD  = 0x1 << 0,
+	TARGET_DIF_CHECK_APPTAG = 0x1 << 1,
+	TARGET_DIF_CHECK_REFTAG = 0x1 << 2,
+};
+
 struct se_dif_v1_tuple {
 	__be16			guard_tag;
 	__be16			app_tag;
@@ -556,13 +561,14 @@ struct se_cmd {
 	/* DIF related members */
 	enum target_prot_op	prot_op;
 	enum target_prot_type	prot_type;
+	u8			prot_checks;
 	u32			prot_length;
 	u32			reftag_seed;
 	struct scatterlist	*t_prot_sg;
 	unsigned int		t_prot_nents;
-	enum target_prot_ho	prot_handover;
 	sense_reason_t		pi_err;
 	sector_t		bad_sector;
+	bool			prot_pto;
 };
 
 struct se_ua {
@@ -603,6 +609,7 @@ struct se_node_acl {
 struct se_session {
 	unsigned		sess_tearing_down:1;
 	u64			sess_bin_isid;
+	enum target_prot_op	sup_prot_ops;
 	struct se_node_acl	*se_node_acl;
 	struct se_portal_group *se_tpg;
 	void			*fabric_sess_ptr;
diff --git a/include/target/target_core_fabric.h b/include/target/target_core_fabric.h
index 0218d689b3d787d9d98ecf507de7b4caba6a3146..22a4e98eec807ecaa93a735ad63fd05ef26809a9 100644
--- a/include/target/target_core_fabric.h
+++ b/include/target/target_core_fabric.h
@@ -62,6 +62,7 @@ struct target_core_fabric_ops {
 	int (*queue_data_in)(struct se_cmd *);
 	int (*queue_status)(struct se_cmd *);
 	void (*queue_tm_rsp)(struct se_cmd *);
+	void (*aborted_task)(struct se_cmd *);
 	/*
 	 * fabric module calls for target_core_fabric_configfs.c
 	 */
@@ -83,10 +84,11 @@ struct target_core_fabric_ops {
 	void (*fabric_drop_nodeacl)(struct se_node_acl *);
 };
 
-struct se_session *transport_init_session(void);
+struct se_session *transport_init_session(enum target_prot_op);
 int transport_alloc_session_tags(struct se_session *, unsigned int,
 		unsigned int);
-struct se_session *transport_init_session_tags(unsigned int, unsigned int);
+struct se_session *transport_init_session_tags(unsigned int, unsigned int,
+		enum target_prot_op);
 void	__transport_register_session(struct se_portal_group *,
 		struct se_node_acl *, struct se_session *, void *);
 void	transport_register_session(struct se_portal_group *,