sha->sas_port[i] = &hisi_hba->port[i].sas_port;
}
+ if (hisi_hba->prot_mask) {
+ dev_info(dev, "Registering for DIF/DIX prot_mask=0x%x\n",
+ prot_mask);
+ scsi_host_set_prot(hisi_hba->shost, prot_mask);
++ if (hisi_hba->prot_mask & HISI_SAS_DIX_PROT_MASK)
++ scsi_host_set_guard(hisi_hba->shost,
++ SHOST_DIX_GUARD_CRC);
+ }
+
+ if (hisi_sas_debugfs_enable)
+ hisi_sas_debugfs_init(hisi_hba);
+
rc = scsi_add_host(shost, dev);
if (rc)
goto err_out_ha;
#if (IS_ENABLED(CONFIG_NVME_FC))
struct nvme_fc_local_port *localport;
struct lpfc_nvme_lport *lport;
- struct lpfc_nvme_ctrl_stat *cstat;
int ret;
+ DECLARE_COMPLETION_ONSTACK(lport_unreg_cmp);
if (vport->nvmei_support == 0)
return;
localport = vport->localport;
- vport->localport = NULL;
lport = (struct lpfc_nvme_lport *)localport->private;
- cstat = lport->cstat;
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
"6011 Destroying NVME localport %p\n",
/* Wait for completion. This either blocks
* indefinitely or succeeds
*/
- lpfc_nvme_lport_unreg_wait(vport, lport);
+ lpfc_nvme_lport_unreg_wait(vport, lport, &lport_unreg_cmp);
+ vport->localport = NULL;
- kfree(cstat);
/* Regardless of the unregister upcall response, clear
* nvmei_support. All rports are unregistered and the
/* Declare nvme-based local and remote port definitions. */
struct lpfc_nvme_lport {
struct lpfc_vport *vport;
- struct completion lport_unreg_done;
+ struct completion *lport_unreg_cmp;
/* Add stats counters here */
- struct lpfc_nvme_ctrl_stat *cstat;
atomic_t fc4NvmeLsRequests;
atomic_t fc4NvmeLsCmpls;
atomic_t xmt_fcp_noxri;
return;
if (phba->targetport) {
tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
- for (qidx = 0; qidx < phba->cfg_nvme_io_channel; qidx++) {
- wq = phba->sli4_hba.nvme_wq[qidx];
+ for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
+ wq = phba->sli4_hba.hdwq[qidx].nvme_wq;
lpfc_nvmet_wqfull_flush(phba, wq, NULL);
}
- init_completion(&tgtp->tport_unreg_done);
+ tgtp->tport_unreg_cmp = &tport_unreg_cmp;
nvmet_fc_unregister_targetport(phba->targetport);
- wait_for_completion_timeout(&tgtp->tport_unreg_done, 5);
+ wait_for_completion_timeout(&tport_unreg_cmp, 5);
lpfc_nvmet_cleanup_io_context(phba);
}
phba->targetport = NULL;
lun = (uint16_t)tm_iocb->u.tmf.lun;
/* Issue Marker IOCB */
- qla2x00_marker(vha, vha->hw->req_q_map[0],
- vha->hw->rsp_q_map[0], fcport->loop_id, lun,
+ qla2x00_marker(vha, vha->hw->base_qpair,
- sp->fcport->loop_id, lun,
++ fcport->loop_id, lun,
flags == TCF_LUN_RESET ? MK_SYNC_ID_LUN : MK_SYNC_ID);
}
scsi_qla_host_t *vha = (scsi_qla_host_t *)shost->hostdata;
struct blk_mq_queue_map *qmap = &shost->tag_set.map[0];
- if (USER_CTRL_IRQ(vha->hw))
+ if (USER_CTRL_IRQ(vha->hw) || !vha->hw->mqiobase)
rc = blk_mq_map_queues(qmap);
else
- rc = blk_mq_pci_map_queues(qmap, vha->hw->pdev, 0);
+ rc = blk_mq_pci_map_queues(qmap, vha->hw->pdev, vha->irq_offset);
return rc;
}
static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num,
u32 ei_lba, bool unmap, bool ndob)
{
+ int ret;
unsigned long iflags;
unsigned long long i;
- int ret;
- u64 lba_off;
+ u32 lb_size = sdebug_sector_size;
+ u64 block, lbaa;
+ u8 *fs1p;
- ret = check_device_access_params(scp, lba, num);
+ ret = check_device_access_params(scp, lba, num, true);
if (ret)
return ret;
u64 write_hints[BLK_MAX_WRITE_HINTS];
};
-#define QUEUE_FLAG_STOPPED 1 /* queue is stopped */
-#define QUEUE_FLAG_DYING 2 /* queue being torn down */
-#define QUEUE_FLAG_NOMERGES 5 /* disable merge attempts */
-#define QUEUE_FLAG_SAME_COMP 6 /* complete on same CPU-group */
-#define QUEUE_FLAG_FAIL_IO 7 /* fake timeout */
-#define QUEUE_FLAG_NONROT 9 /* non-rotational device (SSD) */
-#define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */
-#define QUEUE_FLAG_IO_STAT 10 /* do disk/partitions IO accounting */
-#define QUEUE_FLAG_DISCARD 11 /* supports DISCARD */
-#define QUEUE_FLAG_NOXMERGES 12 /* No extended merges */
-#define QUEUE_FLAG_ADD_RANDOM 13 /* Contributes to random pool */
-#define QUEUE_FLAG_SECERASE 14 /* supports secure erase */
-#define QUEUE_FLAG_SAME_FORCE 15 /* force complete on same CPU */
-#define QUEUE_FLAG_DEAD 16 /* queue tear-down finished */
-#define QUEUE_FLAG_INIT_DONE 17 /* queue is initialized */
-#define QUEUE_FLAG_NO_SG_MERGE 18 /* don't attempt to merge SG segments*/
-#define QUEUE_FLAG_POLL 19 /* IO polling enabled if set */
-#define QUEUE_FLAG_WC 20 /* Write back caching */
-#define QUEUE_FLAG_FUA 21 /* device supports FUA writes */
-#define QUEUE_FLAG_FLUSH_NQ 22 /* flush not queueuable */
-#define QUEUE_FLAG_DAX 23 /* device supports DAX */
-#define QUEUE_FLAG_STATS 24 /* track IO start and completion times */
-#define QUEUE_FLAG_POLL_STATS 25 /* collecting stats for hybrid polling */
-#define QUEUE_FLAG_REGISTERED 26 /* queue has been registered to a disk */
-#define QUEUE_FLAG_SCSI_PASSTHROUGH 27 /* queue supports SCSI commands */
-#define QUEUE_FLAG_QUIESCED 28 /* queue has been quiesced */
-#define QUEUE_FLAG_PCI_P2PDMA 29 /* device supports PCI p2p requests */
-
-#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
- (1 << QUEUE_FLAG_SAME_COMP) | \
- (1 << QUEUE_FLAG_ADD_RANDOM))
+#define QUEUE_FLAG_STOPPED 0 /* queue is stopped */
+#define QUEUE_FLAG_DYING 1 /* queue being torn down */
- #define QUEUE_FLAG_BIDI 2 /* queue supports bidi requests */
+#define QUEUE_FLAG_NOMERGES 3 /* disable merge attempts */
+#define QUEUE_FLAG_SAME_COMP 4 /* complete on same CPU-group */
+#define QUEUE_FLAG_FAIL_IO 5 /* fake timeout */
+#define QUEUE_FLAG_NONROT 6 /* non-rotational device (SSD) */
+#define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */
+#define QUEUE_FLAG_IO_STAT 7 /* do disk/partitions IO accounting */
+#define QUEUE_FLAG_DISCARD 8 /* supports DISCARD */
+#define QUEUE_FLAG_NOXMERGES 9 /* No extended merges */
+#define QUEUE_FLAG_ADD_RANDOM 10 /* Contributes to random pool */
+#define QUEUE_FLAG_SECERASE 11 /* supports secure erase */
+#define QUEUE_FLAG_SAME_FORCE 12 /* force complete on same CPU */
+#define QUEUE_FLAG_DEAD 13 /* queue tear-down finished */
+#define QUEUE_FLAG_INIT_DONE 14 /* queue is initialized */
+#define QUEUE_FLAG_POLL 16 /* IO polling enabled if set */
+#define QUEUE_FLAG_WC 17 /* Write back caching */
+#define QUEUE_FLAG_FUA 18 /* device supports FUA writes */
+#define QUEUE_FLAG_DAX 19 /* device supports DAX */
+#define QUEUE_FLAG_STATS 20 /* track IO start and completion times */
+#define QUEUE_FLAG_POLL_STATS 21 /* collecting stats for hybrid polling */
+#define QUEUE_FLAG_REGISTERED 22 /* queue has been registered to a disk */
+#define QUEUE_FLAG_SCSI_PASSTHROUGH 23 /* queue supports SCSI commands */
+#define QUEUE_FLAG_QUIESCED 24 /* queue has been quiesced */
+#define QUEUE_FLAG_PCI_P2PDMA 25 /* device supports PCI p2p requests */
#define QUEUE_FLAG_MQ_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
(1 << QUEUE_FLAG_SAME_COMP))