#define LPFC_RESET_WAIT 2
#define LPFC_ABORT_WAIT 2
-static inline void lpfc_put_lun(struct fcp_cmnd *fcmd, unsigned int lun)
-{
- fcmd->fcpLunLsl = 0;
- fcmd->fcpLunMsl = swab16((uint16_t)lun);
-}
/*
* This routine allocates a scsi buffer, which contains all the necessary
struct ulp_bde64 *bpl;
IOCB_t *iocb;
dma_addr_t pdma_phys;
+ uint16_t iotag;
psb = kmalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL);
if (!psb)
/* Initialize virtual ptrs to dma_buf region. */
memset(psb->data, 0, phba->cfg_sg_dma_buf_size);
+ /* Allocate iotag for psb->cur_iocbq. */
+ iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq);
+ if (iotag == 0) {
+ pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
+ psb->data, psb->dma_handle);
+ kfree (psb);
+ return NULL;
+ }
+
psb->fcp_cmnd = psb->data;
psb->fcp_rsp = psb->data + sizeof(struct fcp_cmnd);
psb->fcp_bpl = psb->data + sizeof(struct fcp_cmnd) +
bpl->tus.f.bdeSize = scsi_cmnd->request_bufflen;
if (datadir == DMA_TO_DEVICE)
bpl->tus.f.bdeFlags = 0;
+ else
+ bpl->tus.f.bdeFlags = BUFF_USE_RCV;
bpl->tus.w = le32_to_cpu(bpl->tus.w);
num_bde = 1;
bpl++;
/*
* Finish initializing those IOCB fields that are dependent on the
- * scsi_cmnd request_buffer
+ * scsi_cmnd request_buffer. Note that the bdeSize is explicitly
+ * reinitialized since all iocb memory resources are used many times
+ * for transmit, receive, and continuation bpl's.
*/
+ iocb_cmd->un.fcpi64.bdl.bdeSize = (2 * sizeof (struct ulp_bde64));
iocb_cmd->un.fcpi64.bdl.bdeSize +=
(num_bde * sizeof (struct ulp_bde64));
iocb_cmd->ulpBdeCount = 1;
break;
}
- if (pnode) {
- if (pnode->nlp_state != NLP_STE_MAPPED_NODE)
- cmd->result = ScsiResult(DID_BUS_BUSY,
- SAM_STAT_BUSY);
- }
- else {
- cmd->result = ScsiResult(DID_NO_CONNECT, 0);
- }
+ if ((pnode == NULL )
+ || (pnode->nlp_state != NLP_STE_MAPPED_NODE))
+ cmd->result = ScsiResult(DID_BUS_BUSY, SAM_STAT_BUSY);
} else {
cmd->result = ScsiResult(DID_OK, 0);
}
int datadir = scsi_cmnd->sc_data_direction;
lpfc_cmd->fcp_rsp->rspSnsLen = 0;
+ /* clear task management bits */
+ lpfc_cmd->fcp_cmnd->fcpCntl2 = 0;
- lpfc_put_lun(lpfc_cmd->fcp_cmnd, lpfc_cmd->pCmd->device->lun);
+ int_to_scsilun(lpfc_cmd->pCmd->device->lun,
+ &lpfc_cmd->fcp_cmnd->fcp_lun);
memcpy(&fcp_cmnd->fcpCdb[0], scsi_cmnd->cmnd, 16);
struct lpfc_rport_data *rdata = scsi_dev->hostdata;
struct lpfc_nodelist *ndlp = rdata->pnode;
- if ((ndlp == 0) || (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) {
+ if ((ndlp == NULL) || (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) {
return 0;
}
piocb = &piocbq->iocb;
fcp_cmnd = lpfc_cmd->fcp_cmnd;
- lpfc_put_lun(lpfc_cmd->fcp_cmnd, lpfc_cmd->pCmd->device->lun);
+ int_to_scsilun(lpfc_cmd->pCmd->device->lun,
+ &lpfc_cmd->fcp_cmnd->fcp_lun);
fcp_cmnd->fcpCntl2 = task_mgmt_cmd;
piocb->ulpCommand = CMD_FCP_ICMND64_CR;
list_remove_head(lpfc_iocb_list, iocbqrsp, struct lpfc_iocbq, list);
if (!iocbqrsp)
return FAILED;
- memset(iocbqrsp, 0, sizeof (struct lpfc_iocbq));
-
- iocbq->iocb_flag |= LPFC_IO_POLL;
- ret = lpfc_sli_issue_iocb_wait_high_priority(phba,
- &phba->sli.ring[phba->sli.fcp_ring],
- iocbq, SLI_IOCB_HIGH_PRIORITY,
- iocbqrsp,
- lpfc_cmd->timeout);
+
+ ret = lpfc_sli_issue_iocb_wait(phba,
+ &phba->sli.ring[phba->sli.fcp_ring],
+ iocbq, iocbqrsp, lpfc_cmd->timeout);
if (ret != IOCB_SUCCESS) {
lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
ret = FAILED;
lpfc_cmd->pCmd->device->id,
lpfc_cmd->pCmd->device->lun, 0, LPFC_CTX_TGT);
- /* Return response IOCB to free list. */
- list_add_tail(&iocbqrsp->list, lpfc_iocb_list);
+ lpfc_sli_release_iocbq(phba, iocbqrsp);
return ret;
}
struct lpfc_rport_data *rdata = cmnd->device->hostdata;
struct lpfc_nodelist *ndlp = rdata->pnode;
struct lpfc_scsi_buf *lpfc_cmd = NULL;
+ struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
struct list_head *scsi_buf_list = &phba->lpfc_scsi_buf_list;
- int err = 0;
+ int err;
- /*
- * The target pointer is guaranteed not to be NULL because the driver
- * only clears the device->hostdata field in lpfc_slave_destroy. This
- * approach guarantees no further IO calls on this target.
- */
- if (!ndlp) {
- cmnd->result = ScsiResult(DID_NO_CONNECT, 0);
+ err = fc_remote_port_chkready(rport);
+ if (err) {
+ cmnd->result = err;
goto out_fail_command;
}
/*
- * A Fibre Channel target is present and functioning only when the node
- * state is MAPPED. Any other state is a failure.
+ * Catch race where our node has transitioned, but the
+ * transport is still transitioning.
*/
- if (ndlp->nlp_state != NLP_STE_MAPPED_NODE) {
- if ((ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) ||
- (ndlp->nlp_state == NLP_STE_UNUSED_NODE)) {
- cmnd->result = ScsiResult(DID_NO_CONNECT, 0);
- goto out_fail_command;
- }
- /*
- * The device is most likely recovered and the driver
- * needs a bit more time to finish. Ask the midlayer
- * to retry.
- */
- goto out_host_busy;
+ if (!ndlp) {
+ cmnd->result = ScsiResult(DID_BUS_BUSY, 0);
+ goto out_fail_command;
}
list_remove_head(scsi_buf_list, lpfc_cmd, struct lpfc_scsi_buf, list);
list_del_init(&iocb->list);
pring->txq_cnt--;
- if (!iocb->iocb_cmpl) {
- list_add_tail(&iocb->list, lpfc_iocb_list);
- }
+ if (!iocb->iocb_cmpl)
+ lpfc_sli_release_iocbq(phba, iocb);
else {
cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
cmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
if (abtsiocb == NULL)
return FAILED;
- memset(abtsiocb, 0, sizeof (struct lpfc_iocbq));
-
/*
* The scsi command was not in the txq. Check the txcmplq and if it is
* found, send an abort to the FW.
abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
if (lpfc_sli_issue_iocb(phba, pring, abtsiocb, 0) ==
IOCB_ERROR) {
- list_add_tail(&abtsiocb->list, lpfc_iocb_list);
+ lpfc_sli_release_iocbq(phba, abtsiocb);
ret = IOCB_ERROR;
break;
}
{
struct Scsi_Host *shost = cmnd->device->host;
struct lpfc_hba *phba = (struct lpfc_hba *)shost->hostdata[0];
- struct lpfc_sli *psli = &phba->sli;
struct lpfc_scsi_buf *lpfc_cmd = NULL;
struct list_head *scsi_buf_list = &phba->lpfc_scsi_buf_list;
struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list;
if (iocbqrsp == NULL)
goto out_free_scsi_buf;
- memset(iocbqrsp, 0, sizeof (struct lpfc_iocbq));
-
- iocbq->iocb_flag |= LPFC_IO_POLL;
- iocbq->iocb_cmpl = lpfc_sli_wake_iocb_high_priority;
-
- ret = lpfc_sli_issue_iocb_wait_high_priority(phba,
- &phba->sli.ring[psli->fcp_ring],
- iocbq, 0, iocbqrsp, 60);
+ ret = lpfc_sli_issue_iocb_wait(phba,
+ &phba->sli.ring[phba->sli.fcp_ring],
+ iocbq, iocbqrsp, lpfc_cmd->timeout);
if (ret == IOCB_SUCCESS)
ret = SUCCESS;
phba->brd_no, cnt);
}
- list_add_tail(&iocbqrsp->list, lpfc_iocb_list);
+ lpfc_sli_release_iocbq(phba, iocbqrsp);
out_free_scsi_buf:
lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
lpfc_slave_alloc(struct scsi_device *sdev)
{
struct lpfc_hba *phba = (struct lpfc_hba *)sdev->host->hostdata[0];
- struct lpfc_nodelist *ndlp = NULL;
- int match = 0;
struct lpfc_scsi_buf *scsi_buf = NULL;
+ struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
uint32_t total = 0, i;
uint32_t num_to_alloc = 0;
unsigned long flags;
- struct list_head *listp;
- struct list_head *node_list[6];
-
- /*
- * Store the target pointer in the scsi_device hostdata pointer provided
- * the driver has already discovered the target id.
- */
- /* Search the nlp lists other than unmap_list for this target ID */
- node_list[0] = &phba->fc_npr_list;
- node_list[1] = &phba->fc_nlpmap_list;
- node_list[2] = &phba->fc_prli_list;
- node_list[3] = &phba->fc_reglogin_list;
- node_list[4] = &phba->fc_adisc_list;
- node_list[5] = &phba->fc_plogi_list;
-
- for (i = 0; i < 6 && !match; i++) {
- listp = node_list[i];
- if (list_empty(listp))
- continue;
- list_for_each_entry(ndlp, listp, nlp_listp) {
- if ((sdev->id == ndlp->nlp_sid) && ndlp->rport) {
- match = 1;
- break;
- }
- }
- }
-
- if (!match)
+ if (!rport || fc_remote_port_chkready(rport))
return -ENXIO;
- sdev->hostdata = ndlp->rport->dd_data;
+ sdev->hostdata = rport->dd_data;
/*
* Populate the cmds_per_lun count scsi_bufs into this host's globally
* available list of scsi buffers. Don't allocate more than the
- * HBA limit conveyed to the midlayer via the host structure. Note
- * that this list of scsi bufs exists for the lifetime of the driver.
+ * HBA limit conveyed to the midlayer via the host structure. The
+ * formula accounts for the lun_queue_depth + error handlers + 1
+ * extra. This list of scsi bufs exists for the lifetime of the driver.
*/
total = phba->total_scsi_bufs;
- num_to_alloc = LPFC_CMD_PER_LUN;
+ num_to_alloc = phba->cfg_lun_queue_depth + 2;
if (total >= phba->cfg_hba_queue_depth) {
- printk(KERN_WARNING "%s, At config limitation of "
- "%d allocated scsi_bufs\n", __FUNCTION__, total);
+ lpfc_printf_log(phba, KERN_WARNING, LOG_FCP,
+ "%d:0704 At limitation of %d preallocated "
+ "command buffers\n", phba->brd_no, total);
return 0;
} else if (total + num_to_alloc > phba->cfg_hba_queue_depth) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_FCP,
+ "%d:0705 Allocation request of %d command "
+ "buffers will exceed max of %d. Reducing "
+ "allocation request to %d.\n", phba->brd_no,
+ num_to_alloc, phba->cfg_hba_queue_depth,
+ (phba->cfg_hba_queue_depth - total));
num_to_alloc = phba->cfg_hba_queue_depth - total;
}
for (i = 0; i < num_to_alloc; i++) {
scsi_buf = lpfc_get_scsi_buf(phba);
if (!scsi_buf) {
- printk(KERN_ERR "%s, failed to allocate "
- "scsi_buf\n", __FUNCTION__);
+ lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
+ "%d:0706 Failed to allocate command "
+ "buffer\n", phba->brd_no);
break;
}