2 * iSCSI Initiator over TCP/IP Data-Path
4 * Copyright (C) 2004 Dmitry Yusupov
5 * Copyright (C) 2004 Alex Aizman
6 * Copyright (C) 2005 Mike Christie
7 * maintained by open-iscsi@googlegroups.com
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published
11 * by the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
19 * See the file COPYING included with this distribution for more details.
28 #include <linux/types.h>
29 #include <linux/list.h>
30 #include <linux/inet.h>
31 #include <linux/blkdev.h>
32 #include <linux/crypto.h>
33 #include <linux/delay.h>
34 #include <linux/kfifo.h>
35 #include <linux/scatterlist.h>
36 #include <linux/mutex.h>
38 #include <scsi/scsi_cmnd.h>
39 #include <scsi/scsi_device.h>
40 #include <scsi/scsi_eh.h>
41 #include <scsi/scsi_request.h>
42 #include <scsi/scsi_tcq.h>
43 #include <scsi/scsi_host.h>
44 #include <scsi/scsi.h>
45 #include <scsi/scsi_transport_iscsi.h>
47 #include "iscsi_tcp.h"
49 MODULE_AUTHOR("Dmitry Yusupov <dmitry_yus@yahoo.com>, "
50 "Alex Aizman <itn780@yahoo.com>");
51 MODULE_DESCRIPTION("iSCSI/TCP data-path");
52 MODULE_LICENSE("GPL");
53 MODULE_VERSION("0:4.445");
54 /* #define DEBUG_TCP */
55 /* #define DEBUG_SCSI */
59 #define debug_tcp(fmt...) printk(KERN_DEBUG "tcp: " fmt)
61 #define debug_tcp(fmt...)
65 #define debug_scsi(fmt...) printk(KERN_DEBUG "scsi: " fmt)
67 #define debug_scsi(fmt...)
77 #define INVALID_SN_DELTA 0xffff
79 static unsigned int iscsi_max_lun = 512;
80 module_param_named(max_lun, iscsi_max_lun, uint, S_IRUGO);
83 static kmem_cache_t *taskcache;
86 iscsi_buf_init_virt(struct iscsi_buf *ibuf, char *vbuf, int size)
88 sg_init_one(&ibuf->sg, (u8 *)vbuf, size);
90 ibuf->use_sendmsg = 0;
94 iscsi_buf_init_iov(struct iscsi_buf *ibuf, char *vbuf, int size)
96 ibuf->sg.page = virt_to_page(vbuf);
97 ibuf->sg.offset = offset_in_page(vbuf);
98 ibuf->sg.length = size;
100 ibuf->use_sendmsg = 1;
104 iscsi_buf_init_sg(struct iscsi_buf *ibuf, struct scatterlist *sg)
106 ibuf->sg.page = sg->page;
107 ibuf->sg.offset = sg->offset;
108 ibuf->sg.length = sg->length;
110 * Fastpath: sg element fits into single page
112 if (sg->length + sg->offset <= PAGE_SIZE && page_count(sg->page) >= 2)
113 ibuf->use_sendmsg = 0;
115 ibuf->use_sendmsg = 1;
120 iscsi_buf_left(struct iscsi_buf *ibuf)
124 rc = ibuf->sg.length - ibuf->sent;
130 iscsi_hdr_digest(struct iscsi_conn *conn, struct iscsi_buf *buf,
133 crypto_digest_digest(conn->tx_tfm, &buf->sg, 1, crc);
134 buf->sg.length += sizeof(uint32_t);
138 iscsi_conn_failure(struct iscsi_conn *conn, enum iscsi_err err)
140 struct iscsi_session *session = conn->session;
143 spin_lock_irqsave(&session->lock, flags);
144 if (session->conn_cnt == 1 || session->leadconn == conn)
145 session->state = ISCSI_STATE_FAILED;
146 spin_unlock_irqrestore(&session->lock, flags);
147 set_bit(SUSPEND_BIT, &conn->suspend_tx);
148 set_bit(SUSPEND_BIT, &conn->suspend_rx);
149 iscsi_conn_error(iscsi_handle(conn), err);
153 iscsi_check_assign_cmdsn(struct iscsi_session *session, struct iscsi_nopin *hdr)
155 uint32_t max_cmdsn = be32_to_cpu(hdr->max_cmdsn);
156 uint32_t exp_cmdsn = be32_to_cpu(hdr->exp_cmdsn);
158 if (max_cmdsn < exp_cmdsn -1 &&
159 max_cmdsn > exp_cmdsn - INVALID_SN_DELTA)
160 return ISCSI_ERR_MAX_CMDSN;
161 if (max_cmdsn > session->max_cmdsn ||
162 max_cmdsn < session->max_cmdsn - INVALID_SN_DELTA)
163 session->max_cmdsn = max_cmdsn;
164 if (exp_cmdsn > session->exp_cmdsn ||
165 exp_cmdsn < session->exp_cmdsn - INVALID_SN_DELTA)
166 session->exp_cmdsn = exp_cmdsn;
172 iscsi_hdr_extract(struct iscsi_conn *conn)
174 struct sk_buff *skb = conn->in.skb;
176 if (conn->in.copy >= conn->hdr_size &&
177 conn->in_progress == IN_PROGRESS_WAIT_HEADER) {
179 * Zero-copy PDU Header: using connection context
180 * to store header pointer.
182 if (skb_shinfo(skb)->frag_list == NULL &&
183 !skb_shinfo(skb)->nr_frags)
184 conn->in.hdr = (struct iscsi_hdr *)
185 ((char*)skb->data + conn->in.offset);
187 /* ignoring return code since we checked
189 skb_copy_bits(skb, conn->in.offset,
190 &conn->hdr, conn->hdr_size);
191 conn->in.hdr = &conn->hdr;
193 conn->in.offset += conn->hdr_size;
194 conn->in.copy -= conn->hdr_size;
200 * PDU header scattered across SKB's,
201 * copying it... This'll happen quite rarely.
204 if (conn->in_progress == IN_PROGRESS_WAIT_HEADER)
205 conn->in.hdr_offset = 0;
207 hdr_remains = conn->hdr_size - conn->in.hdr_offset;
208 BUG_ON(hdr_remains <= 0);
210 copylen = min(conn->in.copy, hdr_remains);
211 skb_copy_bits(skb, conn->in.offset,
212 (char*)&conn->hdr + conn->in.hdr_offset, copylen);
214 debug_tcp("PDU gather offset %d bytes %d in.offset %d "
215 "in.copy %d\n", conn->in.hdr_offset, copylen,
216 conn->in.offset, conn->in.copy);
218 conn->in.offset += copylen;
219 conn->in.copy -= copylen;
220 if (copylen < hdr_remains) {
221 conn->in_progress = IN_PROGRESS_HEADER_GATHER;
222 conn->in.hdr_offset += copylen;
225 conn->in.hdr = &conn->hdr;
226 conn->discontiguous_hdr_cnt++;
227 conn->in_progress = IN_PROGRESS_WAIT_HEADER;
234 iscsi_ctask_cleanup(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
236 struct scsi_cmnd *sc = ctask->sc;
237 struct iscsi_session *session = conn->session;
239 spin_lock(&session->lock);
241 spin_unlock(&session->lock);
244 if (sc->sc_data_direction == DMA_TO_DEVICE) {
245 struct iscsi_data_task *dtask, *n;
246 /* WRITE: cleanup Data-Out's if any */
247 spin_lock(&conn->lock);
248 list_for_each_entry_safe(dtask, n, &ctask->dataqueue, item) {
249 list_del(&dtask->item);
250 mempool_free(dtask, ctask->datapool);
252 spin_unlock(&conn->lock);
254 ctask->xmstate = XMSTATE_IDLE;
257 __kfifo_put(session->cmdpool.queue, (void*)&ctask, sizeof(void*));
258 spin_unlock(&session->lock);
262 * iscsi_cmd_rsp - SCSI Command Response processing
263 * @conn: iscsi connection
264 * @ctask: scsi command task
267 iscsi_cmd_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
270 struct iscsi_cmd_rsp *rhdr = (struct iscsi_cmd_rsp *)conn->in.hdr;
271 struct iscsi_session *session = conn->session;
272 struct scsi_cmnd *sc = ctask->sc;
274 rc = iscsi_check_assign_cmdsn(session, (struct iscsi_nopin*)rhdr);
276 sc->result = (DID_ERROR << 16);
280 conn->exp_statsn = be32_to_cpu(rhdr->statsn) + 1;
282 sc->result = (DID_OK << 16) | rhdr->cmd_status;
284 if (rhdr->response != ISCSI_STATUS_CMD_COMPLETED) {
285 sc->result = (DID_ERROR << 16);
289 if (rhdr->cmd_status == SAM_STAT_CHECK_CONDITION && conn->senselen) {
290 int sensecopy = min(conn->senselen, SCSI_SENSE_BUFFERSIZE);
292 memcpy(sc->sense_buffer, conn->data + 2, sensecopy);
293 debug_scsi("copied %d bytes of sense\n", sensecopy);
296 if (sc->sc_data_direction == DMA_TO_DEVICE)
299 if (rhdr->flags & ISCSI_FLAG_CMD_UNDERFLOW) {
300 int res_count = be32_to_cpu(rhdr->residual_count);
302 if (res_count > 0 && res_count <= sc->request_bufflen)
303 sc->resid = res_count;
305 sc->result = (DID_BAD_TARGET << 16) | rhdr->cmd_status;
306 } else if (rhdr->flags & ISCSI_FLAG_CMD_BIDI_UNDERFLOW)
307 sc->result = (DID_BAD_TARGET << 16) | rhdr->cmd_status;
308 else if (rhdr->flags & ISCSI_FLAG_CMD_OVERFLOW)
309 sc->resid = be32_to_cpu(rhdr->residual_count);
312 debug_scsi("done [sc %lx res %d itt 0x%x]\n",
313 (long)sc, sc->result, ctask->itt);
314 conn->scsirsp_pdus_cnt++;
315 iscsi_ctask_cleanup(conn, ctask);
321 * iscsi_data_rsp - SCSI Data-In Response processing
322 * @conn: iscsi connection
323 * @ctask: scsi command task
326 iscsi_data_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
329 struct iscsi_data_rsp *rhdr = (struct iscsi_data_rsp *)conn->in.hdr;
330 struct iscsi_session *session = conn->session;
331 int datasn = be32_to_cpu(rhdr->datasn);
333 rc = iscsi_check_assign_cmdsn(session, (struct iscsi_nopin*)rhdr);
337 * setup Data-In byte counter (gets decremented..)
339 ctask->data_count = conn->in.datalen;
341 if (conn->in.datalen == 0)
344 if (ctask->datasn != datasn)
345 return ISCSI_ERR_DATASN;
349 ctask->data_offset = be32_to_cpu(rhdr->offset);
350 if (ctask->data_offset + conn->in.datalen > ctask->total_length)
351 return ISCSI_ERR_DATA_OFFSET;
353 if (rhdr->flags & ISCSI_FLAG_DATA_STATUS) {
354 struct scsi_cmnd *sc = ctask->sc;
356 conn->exp_statsn = be32_to_cpu(rhdr->statsn) + 1;
357 if (rhdr->flags & ISCSI_FLAG_DATA_UNDERFLOW) {
358 int res_count = be32_to_cpu(rhdr->residual_count);
361 res_count <= sc->request_bufflen) {
362 sc->resid = res_count;
363 sc->result = (DID_OK << 16) | rhdr->cmd_status;
365 sc->result = (DID_BAD_TARGET << 16) |
367 } else if (rhdr->flags & ISCSI_FLAG_DATA_OVERFLOW) {
368 sc->resid = be32_to_cpu(rhdr->residual_count);
369 sc->result = (DID_OK << 16) | rhdr->cmd_status;
371 sc->result = (DID_OK << 16) | rhdr->cmd_status;
374 conn->datain_pdus_cnt++;
379 * iscsi_solicit_data_init - initialize first Data-Out
380 * @conn: iscsi connection
381 * @ctask: scsi command task
385 * Initialize first Data-Out within this R2T sequence and finds
386 * proper data_offset within this SCSI command.
388 * This function is called with connection lock taken.
391 iscsi_solicit_data_init(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
392 struct iscsi_r2t_info *r2t)
394 struct iscsi_data *hdr;
395 struct iscsi_data_task *dtask;
396 struct scsi_cmnd *sc = ctask->sc;
398 dtask = mempool_alloc(ctask->datapool, GFP_ATOMIC);
401 memset(hdr, 0, sizeof(struct iscsi_data));
403 hdr->datasn = cpu_to_be32(r2t->solicit_datasn);
404 r2t->solicit_datasn++;
405 hdr->opcode = ISCSI_OP_SCSI_DATA_OUT;
406 memcpy(hdr->lun, ctask->hdr.lun, sizeof(hdr->lun));
407 hdr->itt = ctask->hdr.itt;
408 hdr->exp_statsn = r2t->exp_statsn;
409 hdr->offset = cpu_to_be32(r2t->data_offset);
410 if (r2t->data_length > conn->max_xmit_dlength) {
411 hton24(hdr->dlength, conn->max_xmit_dlength);
412 r2t->data_count = conn->max_xmit_dlength;
415 hton24(hdr->dlength, r2t->data_length);
416 r2t->data_count = r2t->data_length;
417 hdr->flags = ISCSI_FLAG_CMD_FINAL;
419 conn->dataout_pdus_cnt++;
423 iscsi_buf_init_virt(&r2t->headbuf, (char*)hdr,
424 sizeof(struct iscsi_hdr));
430 struct scatterlist *sg = sc->request_buffer;
433 for (i = 0; i < sc->use_sg; i++, sg += 1) {
434 /* FIXME: prefetch ? */
435 if (sg_count + sg->length > r2t->data_offset) {
440 /* offset within this page */
441 page_offset = r2t->data_offset - sg_count;
443 /* fill in this buffer */
444 iscsi_buf_init_sg(&r2t->sendbuf, sg);
445 r2t->sendbuf.sg.offset += page_offset;
446 r2t->sendbuf.sg.length -= page_offset;
448 /* xmit logic will continue with next one */
452 sg_count += sg->length;
454 BUG_ON(r2t->sg == NULL);
456 iscsi_buf_init_iov(&ctask->sendbuf,
457 (char*)sc->request_buffer + r2t->data_offset,
460 list_add(&dtask->item, &ctask->dataqueue);
464 * iscsi_r2t_rsp - iSCSI R2T Response processing
465 * @conn: iscsi connection
466 * @ctask: scsi command task
469 iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
471 struct iscsi_r2t_info *r2t;
472 struct iscsi_session *session = conn->session;
473 struct iscsi_r2t_rsp *rhdr = (struct iscsi_r2t_rsp *)conn->in.hdr;
474 int r2tsn = be32_to_cpu(rhdr->r2tsn);
478 return ISCSI_ERR_AHSLEN;
480 if (conn->in.datalen)
481 return ISCSI_ERR_DATALEN;
483 if (ctask->exp_r2tsn && ctask->exp_r2tsn != r2tsn)
484 return ISCSI_ERR_R2TSN;
486 rc = iscsi_check_assign_cmdsn(session, (struct iscsi_nopin*)rhdr);
490 /* FIXME: use R2TSN to detect missing R2T */
492 /* fill-in new R2T associated with the task */
493 spin_lock(&session->lock);
494 if (!ctask->sc || ctask->mtask ||
495 session->state != ISCSI_STATE_LOGGED_IN) {
496 printk(KERN_INFO "iscsi_tcp: dropping R2T itt %d in "
497 "recovery...\n", ctask->itt);
498 spin_unlock(&session->lock);
501 rc = __kfifo_get(ctask->r2tpool.queue, (void*)&r2t, sizeof(void*));
504 r2t->exp_statsn = rhdr->statsn;
505 r2t->data_length = be32_to_cpu(rhdr->data_length);
506 if (r2t->data_length == 0 ||
507 r2t->data_length > session->max_burst) {
508 spin_unlock(&session->lock);
509 return ISCSI_ERR_DATALEN;
512 r2t->data_offset = be32_to_cpu(rhdr->data_offset);
513 if (r2t->data_offset + r2t->data_length > ctask->total_length) {
514 spin_unlock(&session->lock);
515 return ISCSI_ERR_DATALEN;
518 r2t->ttt = rhdr->ttt; /* no flip */
519 r2t->solicit_datasn = 0;
521 iscsi_solicit_data_init(conn, ctask, r2t);
523 ctask->exp_r2tsn = r2tsn + 1;
524 ctask->xmstate |= XMSTATE_SOL_HDR;
525 __kfifo_put(ctask->r2tqueue, (void*)&r2t, sizeof(void*));
526 __kfifo_put(conn->writequeue, (void*)&ctask, sizeof(void*));
528 schedule_work(&conn->xmitwork);
529 conn->r2t_pdus_cnt++;
530 spin_unlock(&session->lock);
536 iscsi_hdr_recv(struct iscsi_conn *conn)
539 struct iscsi_hdr *hdr;
540 struct iscsi_cmd_task *ctask;
541 struct iscsi_session *session = conn->session;
542 uint32_t cdgst, rdgst = 0;
546 /* verify PDU length */
547 conn->in.datalen = ntoh24(hdr->dlength);
548 if (conn->in.datalen > conn->max_recv_dlength) {
549 printk(KERN_ERR "iscsi_tcp: datalen %d > %d\n",
550 conn->in.datalen, conn->max_recv_dlength);
551 return ISCSI_ERR_DATALEN;
553 conn->data_copied = 0;
556 conn->in.ahslen = hdr->hlength * 4;
557 conn->in.offset += conn->in.ahslen;
558 conn->in.copy -= conn->in.ahslen;
559 if (conn->in.copy < 0) {
560 printk(KERN_ERR "iscsi_tcp: can't handle AHS with length "
561 "%d bytes\n", conn->in.ahslen);
562 return ISCSI_ERR_AHSLEN;
565 /* calculate read padding */
566 conn->in.padding = conn->in.datalen & (ISCSI_PAD_LEN-1);
567 if (conn->in.padding) {
568 conn->in.padding = ISCSI_PAD_LEN - conn->in.padding;
569 debug_scsi("read padding %d bytes\n", conn->in.padding);
572 if (conn->hdrdgst_en) {
573 struct scatterlist sg;
575 sg_init_one(&sg, (u8 *)hdr,
576 sizeof(struct iscsi_hdr) + conn->in.ahslen);
577 crypto_digest_digest(conn->rx_tfm, &sg, 1, (u8 *)&cdgst);
578 rdgst = *(uint32_t*)((char*)hdr + sizeof(struct iscsi_hdr) +
580 if (cdgst != rdgst) {
581 printk(KERN_ERR "iscsi_tcp: itt %x: hdrdgst error "
582 "recv 0x%x calc 0x%x\n", conn->in.itt, rdgst,
584 return ISCSI_ERR_HDR_DGST;
588 /* save opcode for later */
589 conn->in.opcode = hdr->opcode & ISCSI_OPCODE_MASK;
591 /* verify itt (itt encoding: age+cid+itt) */
592 if (hdr->itt != cpu_to_be32(ISCSI_RESERVED_TAG)) {
593 if ((hdr->itt & AGE_MASK) !=
594 (session->age << AGE_SHIFT)) {
595 printk(KERN_ERR "iscsi_tcp: received itt %x expected "
596 "session age (%x)\n", hdr->itt,
597 session->age & AGE_MASK);
598 return ISCSI_ERR_BAD_ITT;
601 if ((hdr->itt & CID_MASK) != (conn->id << CID_SHIFT)) {
602 printk(KERN_ERR "iscsi_tcp: received itt %x, expected "
603 "CID (%x)\n", hdr->itt, conn->id);
604 return ISCSI_ERR_BAD_ITT;
606 conn->in.itt = hdr->itt & ITT_MASK;
608 conn->in.itt = hdr->itt;
610 debug_tcp("opcode 0x%x offset %d copy %d ahslen %d datalen %d\n",
611 hdr->opcode, conn->in.offset, conn->in.copy,
612 conn->in.ahslen, conn->in.datalen);
614 if (conn->in.itt < session->cmds_max) {
615 ctask = (struct iscsi_cmd_task *)session->cmds[conn->in.itt];
618 printk(KERN_INFO "iscsi_tcp: dropping ctask with "
619 "itt 0x%x\n", ctask->itt);
620 conn->in.datalen = 0; /* force drop */
624 if (ctask->sc->SCp.phase != session->age) {
625 printk(KERN_ERR "iscsi_tcp: ctask's session age %d, "
626 "expected %d\n", ctask->sc->SCp.phase,
628 return ISCSI_ERR_SESSION_FAILED;
631 conn->in.ctask = ctask;
633 debug_scsi("rsp [op 0x%x cid %d sc %lx itt 0x%x len %d]\n",
634 hdr->opcode, conn->id, (long)ctask->sc,
635 ctask->itt, conn->in.datalen);
637 switch(conn->in.opcode) {
638 case ISCSI_OP_SCSI_CMD_RSP:
639 BUG_ON((void*)ctask != ctask->sc->SCp.ptr);
640 if (!conn->in.datalen)
641 rc = iscsi_cmd_rsp(conn, ctask);
644 * got sense or response data; copying PDU
645 * Header to the connection's header
648 memcpy(&conn->hdr, hdr,
649 sizeof(struct iscsi_hdr));
651 case ISCSI_OP_SCSI_DATA_IN:
652 BUG_ON((void*)ctask != ctask->sc->SCp.ptr);
653 /* save flags for non-exceptional status */
654 conn->in.flags = hdr->flags;
655 /* save cmd_status for sense data */
656 conn->in.cmd_status =
657 ((struct iscsi_data_rsp*)hdr)->cmd_status;
658 rc = iscsi_data_rsp(conn, ctask);
661 BUG_ON((void*)ctask != ctask->sc->SCp.ptr);
662 if (ctask->sc->sc_data_direction == DMA_TO_DEVICE)
663 rc = iscsi_r2t_rsp(conn, ctask);
665 rc = ISCSI_ERR_PROTO;
668 rc = ISCSI_ERR_BAD_OPCODE;
671 } else if (conn->in.itt >= ISCSI_MGMT_ITT_OFFSET &&
672 conn->in.itt < ISCSI_MGMT_ITT_OFFSET +
673 session->mgmtpool_max) {
674 struct iscsi_mgmt_task *mtask = (struct iscsi_mgmt_task *)
675 session->mgmt_cmds[conn->in.itt -
676 ISCSI_MGMT_ITT_OFFSET];
678 debug_scsi("immrsp [op 0x%x cid %d itt 0x%x len %d]\n",
679 conn->in.opcode, conn->id, mtask->itt,
682 switch(conn->in.opcode) {
683 case ISCSI_OP_LOGIN_RSP:
684 case ISCSI_OP_TEXT_RSP:
685 case ISCSI_OP_LOGOUT_RSP:
686 rc = iscsi_check_assign_cmdsn(session,
687 (struct iscsi_nopin*)hdr);
691 if (!conn->in.datalen) {
692 rc = iscsi_recv_pdu(iscsi_handle(conn), hdr,
694 if (conn->login_mtask != mtask) {
695 spin_lock(&session->lock);
696 __kfifo_put(session->mgmtpool.queue,
697 (void*)&mtask, sizeof(void*));
698 spin_unlock(&session->lock);
702 case ISCSI_OP_SCSI_TMFUNC_RSP:
703 rc = iscsi_check_assign_cmdsn(session,
704 (struct iscsi_nopin*)hdr);
708 if (conn->in.datalen || conn->in.ahslen) {
709 rc = ISCSI_ERR_PROTO;
712 conn->tmfrsp_pdus_cnt++;
713 spin_lock(&session->lock);
714 if (conn->tmabort_state == TMABORT_INITIAL) {
715 __kfifo_put(session->mgmtpool.queue,
716 (void*)&mtask, sizeof(void*));
717 conn->tmabort_state =
718 ((struct iscsi_tm_rsp *)hdr)->
719 response == ISCSI_TMF_RSP_COMPLETE ?
720 TMABORT_SUCCESS:TMABORT_FAILED;
721 /* unblock eh_abort() */
722 wake_up(&conn->ehwait);
724 spin_unlock(&session->lock);
726 case ISCSI_OP_NOOP_IN:
727 if (hdr->ttt != ISCSI_RESERVED_TAG) {
728 rc = ISCSI_ERR_PROTO;
731 rc = iscsi_check_assign_cmdsn(session,
732 (struct iscsi_nopin*)hdr);
735 conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1;
737 if (!conn->in.datalen) {
738 struct iscsi_mgmt_task *mtask;
740 rc = iscsi_recv_pdu(iscsi_handle(conn), hdr,
742 mtask = (struct iscsi_mgmt_task *)
743 session->mgmt_cmds[conn->in.itt -
744 ISCSI_MGMT_ITT_OFFSET];
745 if (conn->login_mtask != mtask) {
746 spin_lock(&session->lock);
747 __kfifo_put(session->mgmtpool.queue,
748 (void*)&mtask, sizeof(void*));
749 spin_unlock(&session->lock);
754 rc = ISCSI_ERR_BAD_OPCODE;
757 } else if (conn->in.itt == ISCSI_RESERVED_TAG) {
758 switch(conn->in.opcode) {
759 case ISCSI_OP_NOOP_IN:
760 if (!conn->in.datalen) {
761 rc = iscsi_check_assign_cmdsn(session,
762 (struct iscsi_nopin*)hdr);
763 if (!rc && hdr->ttt != ISCSI_RESERVED_TAG)
764 rc = iscsi_recv_pdu(iscsi_handle(conn),
767 rc = ISCSI_ERR_PROTO;
769 case ISCSI_OP_REJECT:
770 /* we need sth like iscsi_reject_rsp()*/
771 case ISCSI_OP_ASYNC_EVENT:
772 /* we need sth like iscsi_async_event_rsp() */
773 rc = ISCSI_ERR_BAD_OPCODE;
776 rc = ISCSI_ERR_BAD_OPCODE;
780 rc = ISCSI_ERR_BAD_ITT;
786 * iscsi_ctask_copy - copy skb bits to the destanation cmd task
787 * @conn: iscsi connection
788 * @ctask: scsi command task
789 * @buf: buffer to copy to
790 * @buf_size: size of buffer
791 * @offset: offset within the buffer
794 * The function calls skb_copy_bits() and updates per-connection and
795 * per-cmd byte counters.
797 * Read counters (in bytes):
799 * conn->in.offset offset within in progress SKB
800 * conn->in.copy left to copy from in progress SKB
802 * conn->in.copied copied already from in progress SKB
803 * conn->data_copied copied already from in progress buffer
804 * ctask->sent total bytes sent up to the MidLayer
805 * ctask->data_count left to copy from in progress Data-In
806 * buf_left left to copy from in progress buffer
809 iscsi_ctask_copy(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
810 void *buf, int buf_size, int offset)
812 int buf_left = buf_size - (conn->data_copied + offset);
813 int size = min(conn->in.copy, buf_left);
816 size = min(size, ctask->data_count);
818 debug_tcp("ctask_copy %d bytes at offset %d copied %d\n",
819 size, conn->in.offset, conn->in.copied);
822 BUG_ON(ctask->sent + size > ctask->total_length);
824 rc = skb_copy_bits(conn->in.skb, conn->in.offset,
825 (char*)buf + (offset + conn->data_copied), size);
826 /* must fit into skb->len */
829 conn->in.offset += size;
830 conn->in.copy -= size;
831 conn->in.copied += size;
832 conn->data_copied += size;
834 ctask->data_count -= size;
836 BUG_ON(conn->in.copy < 0);
837 BUG_ON(ctask->data_count < 0);
839 if (buf_size != (conn->data_copied + offset)) {
840 if (!ctask->data_count) {
841 BUG_ON(buf_size - conn->data_copied < 0);
842 /* done with this PDU */
843 return buf_size - conn->data_copied;
848 /* done with this buffer or with both - PDU and buffer */
849 conn->data_copied = 0;
854 * iscsi_tcp_copy - copy skb bits to the destanation buffer
855 * @conn: iscsi connection
856 * @buf: buffer to copy to
857 * @buf_size: number of bytes to copy
860 * The function calls skb_copy_bits() and updates per-connection
864 iscsi_tcp_copy(struct iscsi_conn *conn, void *buf, int buf_size)
866 int buf_left = buf_size - conn->data_copied;
867 int size = min(conn->in.copy, buf_left);
870 debug_tcp("tcp_copy %d bytes at offset %d copied %d\n",
871 size, conn->in.offset, conn->data_copied);
874 rc = skb_copy_bits(conn->in.skb, conn->in.offset,
875 (char*)buf + conn->data_copied, size);
878 conn->in.offset += size;
879 conn->in.copy -= size;
880 conn->in.copied += size;
881 conn->data_copied += size;
883 if (buf_size != conn->data_copied)
890 partial_sg_digest_update(struct iscsi_conn *conn, struct scatterlist *sg,
891 int offset, int length)
893 struct scatterlist temp;
895 memcpy(&temp, sg, sizeof(struct scatterlist));
896 temp.offset = offset;
897 temp.length = length;
898 crypto_digest_update(conn->data_rx_tfm, &temp, 1);
902 iscsi_recv_digest_update(struct iscsi_conn *conn, char* buf, int len)
904 struct scatterlist tmp;
906 sg_init_one(&tmp, buf, len);
907 crypto_digest_update(conn->data_rx_tfm, &tmp, 1);
910 static int iscsi_scsi_data_in(struct iscsi_conn *conn)
912 struct iscsi_cmd_task *ctask = conn->in.ctask;
913 struct scsi_cmnd *sc = ctask->sc;
914 struct scatterlist *sg;
915 int i, offset, rc = 0;
917 BUG_ON((void*)ctask != sc->SCp.ptr);
920 * copying Data-In into the Scsi_Cmnd
923 i = ctask->data_count;
924 rc = iscsi_ctask_copy(conn, ctask, sc->request_buffer,
925 sc->request_bufflen, ctask->data_offset);
928 if (conn->datadgst_en)
929 iscsi_recv_digest_update(conn, sc->request_buffer, i);
934 offset = ctask->data_offset;
935 sg = sc->request_buffer;
937 if (ctask->data_offset)
938 for (i = 0; i < ctask->sg_count; i++)
939 offset -= sg[i].length;
940 /* we've passed through partial sg*/
944 for (i = ctask->sg_count; i < sc->use_sg; i++) {
947 dest = kmap_atomic(sg[i].page, KM_SOFTIRQ0);
948 rc = iscsi_ctask_copy(conn, ctask, dest + sg[i].offset,
949 sg[i].length, offset);
950 kunmap_atomic(dest, KM_SOFTIRQ0);
952 /* continue with the next SKB/PDU */
955 if (conn->datadgst_en) {
957 crypto_digest_update(conn->data_rx_tfm,
960 partial_sg_digest_update(conn, &sg[i],
961 sg[i].offset + offset,
962 sg[i].length - offset);
968 if (!ctask->data_count) {
969 if (rc && conn->datadgst_en)
971 * data-in is complete, but buffer not...
973 partial_sg_digest_update(conn, &sg[i],
974 sg[i].offset, sg[i].length-rc);
982 BUG_ON(ctask->data_count);
985 /* check for non-exceptional status */
986 if (conn->in.flags & ISCSI_FLAG_DATA_STATUS) {
987 debug_scsi("done [sc %lx res %d itt 0x%x]\n",
988 (long)sc, sc->result, ctask->itt);
989 conn->scsirsp_pdus_cnt++;
990 iscsi_ctask_cleanup(conn, ctask);
998 iscsi_data_recv(struct iscsi_conn *conn)
1000 struct iscsi_session *session = conn->session;
1003 switch(conn->in.opcode) {
1004 case ISCSI_OP_SCSI_DATA_IN:
1005 rc = iscsi_scsi_data_in(conn);
1007 case ISCSI_OP_SCSI_CMD_RSP: {
1010 * copying the entire Data Segment.
1012 if (iscsi_tcp_copy(conn, conn->data, conn->in.datalen)) {
1020 conn->in.hdr = &conn->hdr;
1021 conn->senselen = (conn->data[0] << 8) | conn->data[1];
1022 rc = iscsi_cmd_rsp(conn, conn->in.ctask);
1023 if (!rc && conn->datadgst_en)
1024 iscsi_recv_digest_update(conn, conn->data,
1028 case ISCSI_OP_TEXT_RSP:
1029 case ISCSI_OP_LOGIN_RSP:
1030 case ISCSI_OP_NOOP_IN: {
1031 struct iscsi_mgmt_task *mtask = NULL;
1033 if (conn->in.itt != ISCSI_RESERVED_TAG)
1034 mtask = (struct iscsi_mgmt_task *)
1035 session->mgmt_cmds[conn->in.itt -
1036 ISCSI_MGMT_ITT_OFFSET];
1039 * Collect data segment to the connection's data
1042 if (iscsi_tcp_copy(conn, conn->data, conn->in.datalen)) {
1047 rc = iscsi_recv_pdu(iscsi_handle(conn), conn->in.hdr,
1048 conn->data, conn->in.datalen);
1050 if (!rc && conn->datadgst_en &&
1051 conn->in.opcode != ISCSI_OP_LOGIN_RSP)
1052 iscsi_recv_digest_update(conn, conn->data,
1055 if (mtask && conn->login_mtask != mtask) {
1056 spin_lock(&session->lock);
1057 __kfifo_put(session->mgmtpool.queue, (void*)&mtask,
1059 spin_unlock(&session->lock);
1063 case ISCSI_OP_ASYNC_EVENT:
1064 case ISCSI_OP_REJECT:
1073 * iscsi_tcp_data_recv - TCP receive in sendfile fashion
1074 * @rd_desc: read descriptor
1075 * @skb: socket buffer
1076 * @offset: offset in skb
1077 * @len: skb->len - offset
1080 iscsi_tcp_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb,
1081 unsigned int offset, size_t len)
1084 struct iscsi_conn *conn = rd_desc->arg.data;
1086 char pad[ISCSI_PAD_LEN];
1087 struct scatterlist sg;
1090 * Save current SKB and its offset in the corresponding
1091 * connection context.
1093 conn->in.copy = skb->len - offset;
1094 conn->in.offset = offset;
1096 conn->in.len = conn->in.copy;
1097 BUG_ON(conn->in.copy <= 0);
1098 debug_tcp("in %d bytes\n", conn->in.copy);
1101 conn->in.copied = 0;
1104 if (unlikely(conn->suspend_rx)) {
1105 debug_tcp("conn %d Rx suspended!\n", conn->id);
1109 if (conn->in_progress == IN_PROGRESS_WAIT_HEADER ||
1110 conn->in_progress == IN_PROGRESS_HEADER_GATHER) {
1111 rc = iscsi_hdr_extract(conn);
1116 iscsi_conn_failure(conn, rc);
1122 * Verify and process incoming PDU header.
1124 rc = iscsi_hdr_recv(conn);
1125 if (!rc && conn->in.datalen) {
1126 if (conn->datadgst_en) {
1127 BUG_ON(!conn->data_rx_tfm);
1128 crypto_digest_init(conn->data_rx_tfm);
1130 conn->in_progress = IN_PROGRESS_DATA_RECV;
1132 iscsi_conn_failure(conn, rc);
1137 if (conn->in_progress == IN_PROGRESS_DDIGEST_RECV) {
1138 uint32_t recv_digest;
1139 debug_tcp("extra data_recv offset %d copy %d\n",
1140 conn->in.offset, conn->in.copy);
1141 skb_copy_bits(conn->in.skb, conn->in.offset,
1143 conn->in.offset += 4;
1145 if (recv_digest != conn->in.datadgst) {
1146 debug_tcp("iscsi_tcp: data digest error!"
1147 "0x%x != 0x%x\n", recv_digest,
1149 iscsi_conn_failure(conn, ISCSI_ERR_DATA_DGST);
1152 debug_tcp("iscsi_tcp: data digest match!"
1153 "0x%x == 0x%x\n", recv_digest,
1155 conn->in_progress = IN_PROGRESS_WAIT_HEADER;
1159 if (conn->in_progress == IN_PROGRESS_DATA_RECV && conn->in.copy) {
1161 debug_tcp("data_recv offset %d copy %d\n",
1162 conn->in.offset, conn->in.copy);
1164 rc = iscsi_data_recv(conn);
1166 if (rc == -EAGAIN) {
1167 rd_desc->count = conn->in.datalen -
1168 conn->in.ctask->data_count;
1171 iscsi_conn_failure(conn, rc);
1174 conn->in.copy -= conn->in.padding;
1175 conn->in.offset += conn->in.padding;
1176 if (conn->datadgst_en) {
1177 if (conn->in.padding) {
1178 debug_tcp("padding -> %d\n", conn->in.padding);
1179 memset(pad, 0, conn->in.padding);
1180 sg_init_one(&sg, pad, conn->in.padding);
1181 crypto_digest_update(conn->data_rx_tfm, &sg, 1);
1183 crypto_digest_final(conn->data_rx_tfm,
1184 (u8 *) & conn->in.datadgst);
1185 debug_tcp("rx digest 0x%x\n", conn->in.datadgst);
1186 conn->in_progress = IN_PROGRESS_DDIGEST_RECV;
1188 conn->in_progress = IN_PROGRESS_WAIT_HEADER;
1191 debug_tcp("f, processed %d from out of %d padding %d\n",
1192 conn->in.offset - offset, (int)len, conn->in.padding);
1193 BUG_ON(conn->in.offset - offset > len);
1195 if (conn->in.offset - offset != len) {
1196 debug_tcp("continue to process %d bytes\n",
1197 (int)len - (conn->in.offset - offset));
1202 processed = conn->in.offset - offset;
1203 BUG_ON(processed == 0);
1207 processed = conn->in.offset - offset;
1208 debug_tcp("c, processed %d from out of %d rd_desc_cnt %d\n",
1209 processed, (int)len, (int)rd_desc->count);
1210 BUG_ON(processed == 0);
1211 BUG_ON(processed > len);
1213 conn->rxdata_octets += processed;
1218 iscsi_tcp_data_ready(struct sock *sk, int flag)
1220 struct iscsi_conn *conn = sk->sk_user_data;
1221 read_descriptor_t rd_desc;
1223 read_lock(&sk->sk_callback_lock);
1225 /* use rd_desc to pass 'conn' to iscsi_tcp_data_recv */
1226 rd_desc.arg.data = conn;
1228 tcp_read_sock(sk, &rd_desc, iscsi_tcp_data_recv);
1230 read_unlock(&sk->sk_callback_lock);
1234 iscsi_tcp_state_change(struct sock *sk)
1236 struct iscsi_conn *conn;
1237 struct iscsi_session *session;
1238 void (*old_state_change)(struct sock *);
1240 read_lock(&sk->sk_callback_lock);
1242 conn = (struct iscsi_conn*)sk->sk_user_data;
1243 session = conn->session;
1245 if ((sk->sk_state == TCP_CLOSE_WAIT ||
1246 sk->sk_state == TCP_CLOSE) &&
1247 !atomic_read(&sk->sk_rmem_alloc)) {
1248 debug_tcp("iscsi_tcp_state_change: TCP_CLOSE|TCP_CLOSE_WAIT\n");
1249 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
1252 old_state_change = conn->old_state_change;
1254 read_unlock(&sk->sk_callback_lock);
1256 old_state_change(sk);
1260 * iscsi_write_space - Called when more output buffer space is available
1261 * @sk: socket space is available for
1264 iscsi_write_space(struct sock *sk)
1266 struct iscsi_conn *conn = (struct iscsi_conn*)sk->sk_user_data;
1267 conn->old_write_space(sk);
1268 debug_tcp("iscsi_write_space: cid %d\n", conn->id);
1269 clear_bit(SUSPEND_BIT, &conn->suspend_tx);
1270 schedule_work(&conn->xmitwork);
1274 iscsi_conn_set_callbacks(struct iscsi_conn *conn)
1276 struct sock *sk = conn->sock->sk;
1278 /* assign new callbacks */
1279 write_lock_bh(&sk->sk_callback_lock);
1280 sk->sk_user_data = conn;
1281 conn->old_data_ready = sk->sk_data_ready;
1282 conn->old_state_change = sk->sk_state_change;
1283 conn->old_write_space = sk->sk_write_space;
1284 sk->sk_data_ready = iscsi_tcp_data_ready;
1285 sk->sk_state_change = iscsi_tcp_state_change;
1286 sk->sk_write_space = iscsi_write_space;
1287 write_unlock_bh(&sk->sk_callback_lock);
1291 iscsi_conn_restore_callbacks(struct iscsi_conn *conn)
1293 struct sock *sk = conn->sock->sk;
1295 /* restore socket callbacks, see also: iscsi_conn_set_callbacks() */
1296 write_lock_bh(&sk->sk_callback_lock);
1297 sk->sk_user_data = NULL;
1298 sk->sk_data_ready = conn->old_data_ready;
1299 sk->sk_state_change = conn->old_state_change;
1300 sk->sk_write_space = conn->old_write_space;
1301 sk->sk_no_check = 0;
1302 write_unlock_bh(&sk->sk_callback_lock);
1306 * iscsi_send - generic send routine
1307 * @sk: kernel's socket
1308 * @buf: buffer to write from
1309 * @size: actual size to write
1310 * @flags: socket's flags
1313 iscsi_send(struct iscsi_conn *conn, struct iscsi_buf *buf, int size, int flags)
1315 struct socket *sk = conn->sock;
1316 int offset = buf->sg.offset + buf->sent;
1319 * if we got use_sg=0 or are sending something we kmallocd
1320 * then we did not have to do kmap (kmap returns page_address)
1322 * if we got use_sg > 0, but had to drop down, we do not
1323 * set clustering so this should only happen for that
1326 if (buf->use_sendmsg)
1327 return sock_no_sendpage(sk, buf->sg.page, offset, size, flags);
1329 return conn->sendpage(sk, buf->sg.page, offset, size, flags);
1333 * iscsi_sendhdr - send PDU Header via tcp_sendpage()
1334 * @conn: iscsi connection
1335 * @buf: buffer to write from
1336 * @datalen: lenght of data to be sent after the header
1342 iscsi_sendhdr(struct iscsi_conn *conn, struct iscsi_buf *buf, int datalen)
1344 int flags = 0; /* MSG_DONTWAIT; */
1347 size = buf->sg.length - buf->sent;
1348 BUG_ON(buf->sent + size > buf->sg.length);
1349 if (buf->sent + size != buf->sg.length || datalen)
1352 res = iscsi_send(conn, buf, size, flags);
1353 debug_tcp("sendhdr %d bytes, sent %d res %d\n", size, buf->sent, res);
1355 conn->txdata_octets += res;
1360 } else if (res == -EAGAIN) {
1361 conn->sendpage_failures_cnt++;
1362 set_bit(SUSPEND_BIT, &conn->suspend_tx);
1363 } else if (res == -EPIPE)
1364 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
1370 * iscsi_sendpage - send one page of iSCSI Data-Out.
1371 * @conn: iscsi connection
1372 * @buf: buffer to write from
1373 * @count: remaining data
1374 * @sent: number of bytes sent
1380 iscsi_sendpage(struct iscsi_conn *conn, struct iscsi_buf *buf,
1381 int *count, int *sent)
1383 int flags = 0; /* MSG_DONTWAIT; */
1386 size = buf->sg.length - buf->sent;
1387 BUG_ON(buf->sent + size > buf->sg.length);
1390 if (buf->sent + size != buf->sg.length || *count != size)
1393 res = iscsi_send(conn, buf, size, flags);
1394 debug_tcp("sendpage: %d bytes, sent %d left %d sent %d res %d\n",
1395 size, buf->sent, *count, *sent, res);
1397 conn->txdata_octets += res;
1404 } else if (res == -EAGAIN) {
1405 conn->sendpage_failures_cnt++;
1406 set_bit(SUSPEND_BIT, &conn->suspend_tx);
1407 } else if (res == -EPIPE)
1408 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
1414 iscsi_data_digest_init(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
1416 BUG_ON(!conn->data_tx_tfm);
1417 crypto_digest_init(conn->data_tx_tfm);
1418 ctask->digest_count = 4;
1422 iscsi_digest_final_send(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
1423 struct iscsi_buf *buf, uint32_t *digest, int final)
1429 crypto_digest_final(conn->data_tx_tfm, (u8*)digest);
1431 iscsi_buf_init_virt(buf, (char*)digest, 4);
1432 rc = iscsi_sendpage(conn, buf, &ctask->digest_count, &sent);
1434 ctask->datadigest = *digest;
1435 ctask->xmstate |= XMSTATE_DATA_DIGEST;
1437 ctask->digest_count = 4;
1442 * iscsi_solicit_data_cont - initialize next Data-Out
1443 * @conn: iscsi connection
1444 * @ctask: scsi command task
1446 * @left: bytes left to transfer
1449 * Initialize next Data-Out within this R2T sequence and continue
1450 * to process next Scatter-Gather element(if any) of this SCSI command.
1452 * Called under connection lock.
1455 iscsi_solicit_data_cont(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
1456 struct iscsi_r2t_info *r2t, int left)
1458 struct iscsi_data *hdr;
1459 struct iscsi_data_task *dtask;
1460 struct scsi_cmnd *sc = ctask->sc;
1463 dtask = mempool_alloc(ctask->datapool, GFP_ATOMIC);
1466 memset(hdr, 0, sizeof(struct iscsi_data));
1467 hdr->ttt = r2t->ttt;
1468 hdr->datasn = cpu_to_be32(r2t->solicit_datasn);
1469 r2t->solicit_datasn++;
1470 hdr->opcode = ISCSI_OP_SCSI_DATA_OUT;
1471 memcpy(hdr->lun, ctask->hdr.lun, sizeof(hdr->lun));
1472 hdr->itt = ctask->hdr.itt;
1473 hdr->exp_statsn = r2t->exp_statsn;
1474 new_offset = r2t->data_offset + r2t->sent;
1475 hdr->offset = cpu_to_be32(new_offset);
1476 if (left > conn->max_xmit_dlength) {
1477 hton24(hdr->dlength, conn->max_xmit_dlength);
1478 r2t->data_count = conn->max_xmit_dlength;
1480 hton24(hdr->dlength, left);
1481 r2t->data_count = left;
1482 hdr->flags = ISCSI_FLAG_CMD_FINAL;
1484 conn->dataout_pdus_cnt++;
1486 iscsi_buf_init_virt(&r2t->headbuf, (char*)hdr,
1487 sizeof(struct iscsi_hdr));
1491 if (sc->use_sg && !iscsi_buf_left(&r2t->sendbuf)) {
1492 BUG_ON(ctask->bad_sg == r2t->sg);
1493 iscsi_buf_init_sg(&r2t->sendbuf, r2t->sg);
1496 iscsi_buf_init_iov(&ctask->sendbuf,
1497 (char*)sc->request_buffer + new_offset,
1500 list_add(&dtask->item, &ctask->dataqueue);
1504 iscsi_unsolicit_data_init(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
1506 struct iscsi_data *hdr;
1507 struct iscsi_data_task *dtask;
1509 dtask = mempool_alloc(ctask->datapool, GFP_ATOMIC);
1512 memset(hdr, 0, sizeof(struct iscsi_data));
1513 hdr->ttt = cpu_to_be32(ISCSI_RESERVED_TAG);
1514 hdr->datasn = cpu_to_be32(ctask->unsol_datasn);
1515 ctask->unsol_datasn++;
1516 hdr->opcode = ISCSI_OP_SCSI_DATA_OUT;
1517 memcpy(hdr->lun, ctask->hdr.lun, sizeof(hdr->lun));
1518 hdr->itt = ctask->hdr.itt;
1519 hdr->exp_statsn = cpu_to_be32(conn->exp_statsn);
1520 hdr->offset = cpu_to_be32(ctask->total_length -
1521 ctask->r2t_data_count -
1522 ctask->unsol_count);
1523 if (ctask->unsol_count > conn->max_xmit_dlength) {
1524 hton24(hdr->dlength, conn->max_xmit_dlength);
1525 ctask->data_count = conn->max_xmit_dlength;
1528 hton24(hdr->dlength, ctask->unsol_count);
1529 ctask->data_count = ctask->unsol_count;
1530 hdr->flags = ISCSI_FLAG_CMD_FINAL;
1533 iscsi_buf_init_virt(&ctask->headbuf, (char*)hdr,
1534 sizeof(struct iscsi_hdr));
1536 list_add(&dtask->item, &ctask->dataqueue);
1538 ctask->dtask = dtask;
1542 * iscsi_cmd_init - Initialize iSCSI SCSI_READ or SCSI_WRITE commands
1543 * @conn: iscsi connection
1544 * @ctask: scsi command task
1548 iscsi_cmd_init(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
1549 struct scsi_cmnd *sc)
1551 struct iscsi_session *session = conn->session;
1553 BUG_ON(__kfifo_len(ctask->r2tqueue));
1557 ctask->hdr.opcode = ISCSI_OP_SCSI_CMD;
1558 ctask->hdr.flags = ISCSI_ATTR_SIMPLE;
1559 int_to_scsilun(sc->device->lun, (struct scsi_lun *)ctask->hdr.lun);
1560 ctask->hdr.itt = ctask->itt | (conn->id << CID_SHIFT) |
1561 (session->age << AGE_SHIFT);
1562 ctask->hdr.data_length = cpu_to_be32(sc->request_bufflen);
1563 ctask->hdr.cmdsn = cpu_to_be32(session->cmdsn); session->cmdsn++;
1564 ctask->hdr.exp_statsn = cpu_to_be32(conn->exp_statsn);
1565 memcpy(ctask->hdr.cdb, sc->cmnd, sc->cmd_len);
1566 memset(&ctask->hdr.cdb[sc->cmd_len], 0, MAX_COMMAND_SIZE - sc->cmd_len);
1568 ctask->mtask = NULL;
1570 ctask->sg_count = 0;
1572 ctask->total_length = sc->request_bufflen;
1574 if (sc->sc_data_direction == DMA_TO_DEVICE) {
1575 ctask->exp_r2tsn = 0;
1576 ctask->hdr.flags |= ISCSI_FLAG_CMD_WRITE;
1577 BUG_ON(ctask->total_length == 0);
1579 struct scatterlist *sg = sc->request_buffer;
1581 iscsi_buf_init_sg(&ctask->sendbuf,
1582 &sg[ctask->sg_count++]);
1584 ctask->bad_sg = sg + sc->use_sg;
1586 iscsi_buf_init_iov(&ctask->sendbuf, sc->request_buffer,
1587 sc->request_bufflen);
1593 * imm_count bytes to be sent right after
1596 * unsol_count bytes(as Data-Out) to be sent
1597 * without R2T ack right after
1600 * r2t_data_count bytes to be sent via R2T ack's
1602 * pad_count bytes to be sent as zero-padding
1604 ctask->imm_count = 0;
1605 ctask->unsol_count = 0;
1606 ctask->unsol_datasn = 0;
1607 ctask->xmstate = XMSTATE_W_HDR;
1608 /* calculate write padding */
1609 ctask->pad_count = ctask->total_length & (ISCSI_PAD_LEN-1);
1610 if (ctask->pad_count) {
1611 ctask->pad_count = ISCSI_PAD_LEN - ctask->pad_count;
1612 debug_scsi("write padding %d bytes\n",
1614 ctask->xmstate |= XMSTATE_W_PAD;
1616 if (session->imm_data_en) {
1617 if (ctask->total_length >= session->first_burst)
1618 ctask->imm_count = min(session->first_burst,
1619 conn->max_xmit_dlength);
1621 ctask->imm_count = min(ctask->total_length,
1622 conn->max_xmit_dlength);
1623 hton24(ctask->hdr.dlength, ctask->imm_count);
1624 ctask->xmstate |= XMSTATE_IMM_DATA;
1626 zero_data(ctask->hdr.dlength);
1628 if (!session->initial_r2t_en)
1629 ctask->unsol_count = min(session->first_burst,
1630 ctask->total_length) - ctask->imm_count;
1631 if (!ctask->unsol_count)
1632 /* No unsolicit Data-Out's */
1633 ctask->hdr.flags |= ISCSI_FLAG_CMD_FINAL;
1635 ctask->xmstate |= XMSTATE_UNS_HDR | XMSTATE_UNS_INIT;
1637 ctask->r2t_data_count = ctask->total_length -
1641 debug_scsi("cmd [itt %x total %d imm %d imm_data %d "
1643 ctask->itt, ctask->total_length, ctask->imm_count,
1644 ctask->unsol_count, ctask->r2t_data_count);
1646 ctask->hdr.flags |= ISCSI_FLAG_CMD_FINAL;
1647 if (sc->sc_data_direction == DMA_FROM_DEVICE)
1648 ctask->hdr.flags |= ISCSI_FLAG_CMD_READ;
1650 ctask->xmstate = XMSTATE_R_HDR;
1651 zero_data(ctask->hdr.dlength);
1654 iscsi_buf_init_virt(&ctask->headbuf, (char*)&ctask->hdr,
1655 sizeof(struct iscsi_hdr));
1656 conn->scsicmd_pdus_cnt++;
1660 * iscsi_mtask_xmit - xmit management(immediate) task
1661 * @conn: iscsi connection
1662 * @mtask: task management task
1665 * The function can return -EAGAIN in which case caller must
1666 * call it again later, or recover. '0' return code means successful
1669 * Management xmit state machine consists of two states:
1670 * IN_PROGRESS_IMM_HEAD - PDU Header xmit in progress
1671 * IN_PROGRESS_IMM_DATA - PDU Data xmit in progress
1674 iscsi_mtask_xmit(struct iscsi_conn *conn, struct iscsi_mgmt_task *mtask)
1677 debug_scsi("mtask deq [cid %d state %x itt 0x%x]\n",
1678 conn->id, mtask->xmstate, mtask->itt);
1680 if (mtask->xmstate & XMSTATE_IMM_HDR) {
1681 mtask->xmstate &= ~XMSTATE_IMM_HDR;
1682 if (mtask->data_count)
1683 mtask->xmstate |= XMSTATE_IMM_DATA;
1684 if (conn->c_stage != ISCSI_CONN_INITIAL_STAGE &&
1685 conn->stop_stage != STOP_CONN_RECOVER &&
1687 iscsi_hdr_digest(conn, &mtask->headbuf,
1688 (u8*)mtask->hdrext);
1689 if (iscsi_sendhdr(conn, &mtask->headbuf, mtask->data_count)) {
1690 mtask->xmstate |= XMSTATE_IMM_HDR;
1691 if (mtask->data_count)
1692 mtask->xmstate &= ~XMSTATE_IMM_DATA;
1697 if (mtask->xmstate & XMSTATE_IMM_DATA) {
1698 BUG_ON(!mtask->data_count);
1699 mtask->xmstate &= ~XMSTATE_IMM_DATA;
1700 /* FIXME: implement.
1701 * Virtual buffer could be spreaded across multiple pages...
1704 if (iscsi_sendpage(conn, &mtask->sendbuf,
1705 &mtask->data_count, &mtask->sent)) {
1706 mtask->xmstate |= XMSTATE_IMM_DATA;
1709 } while (mtask->data_count);
1712 BUG_ON(mtask->xmstate != XMSTATE_IDLE);
1717 handle_xmstate_r_hdr(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
1719 ctask->xmstate &= ~XMSTATE_R_HDR;
1720 if (conn->hdrdgst_en)
1721 iscsi_hdr_digest(conn, &ctask->headbuf, (u8*)ctask->hdrext);
1722 if (!iscsi_sendhdr(conn, &ctask->headbuf, 0)) {
1723 BUG_ON(ctask->xmstate != XMSTATE_IDLE);
1724 return 0; /* wait for Data-In */
1726 ctask->xmstate |= XMSTATE_R_HDR;
1731 handle_xmstate_w_hdr(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
1733 ctask->xmstate &= ~XMSTATE_W_HDR;
1734 if (conn->hdrdgst_en)
1735 iscsi_hdr_digest(conn, &ctask->headbuf, (u8*)ctask->hdrext);
1736 if (iscsi_sendhdr(conn, &ctask->headbuf, ctask->imm_count)) {
1737 ctask->xmstate |= XMSTATE_W_HDR;
1744 handle_xmstate_data_digest(struct iscsi_conn *conn,
1745 struct iscsi_cmd_task *ctask)
1747 ctask->xmstate &= ~XMSTATE_DATA_DIGEST;
1748 debug_tcp("resent data digest 0x%x\n", ctask->datadigest);
1749 if (iscsi_digest_final_send(conn, ctask, &ctask->immbuf,
1750 &ctask->datadigest, 0)) {
1751 ctask->xmstate |= XMSTATE_DATA_DIGEST;
1752 debug_tcp("resent data digest 0x%x fail!\n",
1760 handle_xmstate_imm_data(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
1762 BUG_ON(!ctask->imm_count);
1763 ctask->xmstate &= ~XMSTATE_IMM_DATA;
1765 if (conn->datadgst_en) {
1766 iscsi_data_digest_init(conn, ctask);
1767 ctask->immdigest = 0;
1771 if (iscsi_sendpage(conn, &ctask->sendbuf, &ctask->imm_count,
1773 ctask->xmstate |= XMSTATE_IMM_DATA;
1774 if (conn->datadgst_en) {
1775 crypto_digest_final(conn->data_tx_tfm,
1776 (u8*)&ctask->immdigest);
1777 debug_tcp("tx imm sendpage fail 0x%x\n",
1782 if (conn->datadgst_en)
1783 crypto_digest_update(conn->data_tx_tfm,
1784 &ctask->sendbuf.sg, 1);
1786 if (!ctask->imm_count)
1788 iscsi_buf_init_sg(&ctask->sendbuf,
1789 &ctask->sg[ctask->sg_count++]);
1792 if (conn->datadgst_en && !(ctask->xmstate & XMSTATE_W_PAD)) {
1793 if (iscsi_digest_final_send(conn, ctask, &ctask->immbuf,
1794 &ctask->immdigest, 1)) {
1795 debug_tcp("sending imm digest 0x%x fail!\n",
1799 debug_tcp("sending imm digest 0x%x\n", ctask->immdigest);
1806 handle_xmstate_uns_hdr(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
1808 struct iscsi_data_task *dtask;
1810 ctask->xmstate |= XMSTATE_UNS_DATA;
1811 if (ctask->xmstate & XMSTATE_UNS_INIT) {
1812 iscsi_unsolicit_data_init(conn, ctask);
1813 BUG_ON(!ctask->dtask);
1814 dtask = ctask->dtask;
1815 if (conn->hdrdgst_en)
1816 iscsi_hdr_digest(conn, &ctask->headbuf,
1817 (u8*)dtask->hdrext);
1818 ctask->xmstate &= ~XMSTATE_UNS_INIT;
1820 if (iscsi_sendhdr(conn, &ctask->headbuf, ctask->data_count)) {
1821 ctask->xmstate &= ~XMSTATE_UNS_DATA;
1822 ctask->xmstate |= XMSTATE_UNS_HDR;
1826 debug_scsi("uns dout [itt 0x%x dlen %d sent %d]\n",
1827 ctask->itt, ctask->unsol_count, ctask->sent);
1832 handle_xmstate_uns_data(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
1834 struct iscsi_data_task *dtask = ctask->dtask;
1836 BUG_ON(!ctask->data_count);
1837 ctask->xmstate &= ~XMSTATE_UNS_DATA;
1839 if (conn->datadgst_en) {
1840 iscsi_data_digest_init(conn, ctask);
1845 int start = ctask->sent;
1847 if (iscsi_sendpage(conn, &ctask->sendbuf, &ctask->data_count,
1849 ctask->unsol_count -= ctask->sent - start;
1850 ctask->xmstate |= XMSTATE_UNS_DATA;
1851 /* will continue with this ctask later.. */
1852 if (conn->datadgst_en) {
1853 crypto_digest_final(conn->data_tx_tfm,
1854 (u8 *)&dtask->digest);
1855 debug_tcp("tx uns data fail 0x%x\n",
1861 BUG_ON(ctask->sent > ctask->total_length);
1862 ctask->unsol_count -= ctask->sent - start;
1865 * XXX:we may run here with un-initial sendbuf.
1868 if (conn->datadgst_en && ctask->sent - start > 0)
1869 crypto_digest_update(conn->data_tx_tfm,
1870 &ctask->sendbuf.sg, 1);
1872 if (!ctask->data_count)
1874 iscsi_buf_init_sg(&ctask->sendbuf,
1875 &ctask->sg[ctask->sg_count++]);
1877 BUG_ON(ctask->unsol_count < 0);
1880 * Done with the Data-Out. Next, check if we need
1881 * to send another unsolicited Data-Out.
1883 if (ctask->unsol_count) {
1884 if (conn->datadgst_en) {
1885 if (iscsi_digest_final_send(conn, ctask,
1887 &dtask->digest, 1)) {
1888 debug_tcp("send uns digest 0x%x fail\n",
1892 debug_tcp("sending uns digest 0x%x, more uns\n",
1895 ctask->xmstate |= XMSTATE_UNS_INIT;
1899 if (conn->datadgst_en && !(ctask->xmstate & XMSTATE_W_PAD)) {
1900 if (iscsi_digest_final_send(conn, ctask,
1902 &dtask->digest, 1)) {
1903 debug_tcp("send last uns digest 0x%x fail\n",
1907 debug_tcp("sending uns digest 0x%x\n",dtask->digest);
1914 handle_xmstate_sol_data(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
1916 struct iscsi_session *session = conn->session;
1917 struct iscsi_r2t_info *r2t = ctask->r2t;
1918 struct iscsi_data_task *dtask = r2t->dtask;
1921 ctask->xmstate &= ~XMSTATE_SOL_DATA;
1922 ctask->dtask = dtask;
1924 if (conn->datadgst_en) {
1925 iscsi_data_digest_init(conn, ctask);
1930 * send Data-Out whitnin this R2T sequence.
1932 if (!r2t->data_count)
1935 if (iscsi_sendpage(conn, &r2t->sendbuf, &r2t->data_count, &r2t->sent)) {
1936 ctask->xmstate |= XMSTATE_SOL_DATA;
1937 /* will continue with this ctask later.. */
1938 if (conn->datadgst_en) {
1939 crypto_digest_final(conn->data_tx_tfm,
1940 (u8 *)&dtask->digest);
1941 debug_tcp("r2t data send fail 0x%x\n", dtask->digest);
1946 BUG_ON(r2t->data_count < 0);
1947 if (conn->datadgst_en)
1948 crypto_digest_update(conn->data_tx_tfm, &r2t->sendbuf.sg, 1);
1950 if (r2t->data_count) {
1951 BUG_ON(ctask->sc->use_sg == 0);
1952 if (!iscsi_buf_left(&r2t->sendbuf)) {
1953 BUG_ON(ctask->bad_sg == r2t->sg);
1954 iscsi_buf_init_sg(&r2t->sendbuf, r2t->sg);
1962 * Done with this Data-Out. Next, check if we have
1963 * to send another Data-Out for this R2T.
1965 BUG_ON(r2t->data_length - r2t->sent < 0);
1966 left = r2t->data_length - r2t->sent;
1968 if (conn->datadgst_en) {
1969 if (iscsi_digest_final_send(conn, ctask,
1971 &dtask->digest, 1)) {
1972 debug_tcp("send r2t data digest 0x%x"
1973 "fail\n", dtask->digest);
1976 debug_tcp("r2t data send digest 0x%x\n",
1979 iscsi_solicit_data_cont(conn, ctask, r2t, left);
1980 ctask->xmstate |= XMSTATE_SOL_DATA;
1981 ctask->xmstate &= ~XMSTATE_SOL_HDR;
1986 * Done with this R2T. Check if there are more
1987 * outstanding R2Ts ready to be processed.
1989 BUG_ON(ctask->r2t_data_count - r2t->data_length < 0);
1990 if (conn->datadgst_en) {
1991 if (iscsi_digest_final_send(conn, ctask, &dtask->digestbuf,
1992 &dtask->digest, 1)) {
1993 debug_tcp("send last r2t data digest 0x%x"
1994 "fail\n", dtask->digest);
1997 debug_tcp("r2t done dout digest 0x%x\n", dtask->digest);
2000 ctask->r2t_data_count -= r2t->data_length;
2002 spin_lock_bh(&session->lock);
2003 __kfifo_put(ctask->r2tpool.queue, (void*)&r2t, sizeof(void*));
2004 spin_unlock_bh(&session->lock);
2005 if (__kfifo_get(ctask->r2tqueue, (void*)&r2t, sizeof(void*))) {
2007 ctask->xmstate |= XMSTATE_SOL_DATA;
2008 ctask->xmstate &= ~XMSTATE_SOL_HDR;
2016 handle_xmstate_w_pad(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
2018 struct iscsi_data_task *dtask = ctask->dtask;
2021 ctask->xmstate &= ~XMSTATE_W_PAD;
2022 iscsi_buf_init_virt(&ctask->sendbuf, (char*)&ctask->pad,
2024 if (iscsi_sendpage(conn, &ctask->sendbuf, &ctask->pad_count, &sent)) {
2025 ctask->xmstate |= XMSTATE_W_PAD;
2029 if (conn->datadgst_en) {
2030 crypto_digest_update(conn->data_tx_tfm, &ctask->sendbuf.sg, 1);
2033 if (iscsi_digest_final_send(conn, ctask, &ctask->immbuf,
2034 &ctask->immdigest, 1)) {
2035 debug_tcp("send padding digest 0x%x"
2036 "fail!\n", ctask->immdigest);
2039 debug_tcp("done with padding, digest 0x%x\n",
2042 if (iscsi_digest_final_send(conn, ctask,
2044 &dtask->digest, 1)) {
2045 debug_tcp("send padding digest 0x%x"
2046 "fail\n", dtask->digest);
2049 debug_tcp("done with padding, digest 0x%x\n",
2058 iscsi_ctask_xmit(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
2062 debug_scsi("ctask deq [cid %d xmstate %x itt 0x%x]\n",
2063 conn->id, ctask->xmstate, ctask->itt);
2066 * serialize with TMF AbortTask
2071 if (ctask->xmstate & XMSTATE_R_HDR) {
2072 rc = handle_xmstate_r_hdr(conn, ctask);
2076 if (ctask->xmstate & XMSTATE_W_HDR) {
2077 rc = handle_xmstate_w_hdr(conn, ctask);
2082 /* XXX: for data digest xmit recover */
2083 if (ctask->xmstate & XMSTATE_DATA_DIGEST) {
2084 rc = handle_xmstate_data_digest(conn, ctask);
2089 if (ctask->xmstate & XMSTATE_IMM_DATA) {
2090 rc = handle_xmstate_imm_data(conn, ctask);
2095 if (ctask->xmstate & XMSTATE_UNS_HDR) {
2096 BUG_ON(!ctask->unsol_count);
2097 ctask->xmstate &= ~XMSTATE_UNS_HDR;
2098 unsolicit_head_again:
2099 rc = handle_xmstate_uns_hdr(conn, ctask);
2104 if (ctask->xmstate & XMSTATE_UNS_DATA) {
2105 rc = handle_xmstate_uns_data(conn, ctask);
2107 goto unsolicit_head_again;
2113 if (ctask->xmstate & XMSTATE_SOL_HDR) {
2114 struct iscsi_r2t_info *r2t;
2116 ctask->xmstate &= ~XMSTATE_SOL_HDR;
2117 ctask->xmstate |= XMSTATE_SOL_DATA;
2119 __kfifo_get(ctask->r2tqueue, (void*)&ctask->r2t,
2123 if (conn->hdrdgst_en)
2124 iscsi_hdr_digest(conn, &r2t->headbuf,
2125 (u8*)r2t->dtask->hdrext);
2126 if (iscsi_sendhdr(conn, &r2t->headbuf, r2t->data_count)) {
2127 ctask->xmstate &= ~XMSTATE_SOL_DATA;
2128 ctask->xmstate |= XMSTATE_SOL_HDR;
2132 debug_scsi("sol dout [dsn %d itt 0x%x dlen %d sent %d]\n",
2133 r2t->solicit_datasn - 1, ctask->itt, r2t->data_count,
2137 if (ctask->xmstate & XMSTATE_SOL_DATA) {
2138 rc = handle_xmstate_sol_data(conn, ctask);
2140 goto solicit_head_again;
2147 * Last thing to check is whether we need to send write
2148 * padding. Note that we check for xmstate equality, not just the bit.
2150 if (ctask->xmstate == XMSTATE_W_PAD)
2151 rc = handle_xmstate_w_pad(conn, ctask);
2157 * iscsi_data_xmit - xmit any command into the scheduled connection
2158 * @conn: iscsi connection
2161 * The function can return -EAGAIN in which case the caller must
2162 * re-schedule it again later or recover. '0' return code means
2166 iscsi_data_xmit(struct iscsi_conn *conn)
2168 if (unlikely(conn->suspend_tx)) {
2169 debug_tcp("conn %d Tx suspended!\n", conn->id);
2174 * Transmit in the following order:
2176 * 1) un-finished xmit (ctask or mtask)
2177 * 2) immediate control PDUs
2180 * 5) non-immediate control PDUs
2182 * No need to lock around __kfifo_get as long as
2183 * there's one producer and one consumer.
2186 BUG_ON(conn->ctask && conn->mtask);
2189 if (iscsi_ctask_xmit(conn, conn->ctask))
2191 /* done with this in-progress ctask */
2195 if (iscsi_mtask_xmit(conn, conn->mtask))
2197 /* done with this in-progress mtask */
2201 /* process immediate first */
2202 if (unlikely(__kfifo_len(conn->immqueue))) {
2203 struct iscsi_session *session = conn->session;
2204 while (__kfifo_get(conn->immqueue, (void*)&conn->mtask,
2206 if (iscsi_mtask_xmit(conn, conn->mtask))
2209 if (conn->mtask->hdr.itt ==
2210 cpu_to_be32(ISCSI_RESERVED_TAG)) {
2211 spin_lock_bh(&session->lock);
2212 __kfifo_put(session->mgmtpool.queue,
2213 (void*)&conn->mtask, sizeof(void*));
2214 spin_unlock_bh(&session->lock);
2217 /* done with this mtask */
2221 /* process write queue */
2222 while (__kfifo_get(conn->writequeue, (void*)&conn->ctask,
2224 if (iscsi_ctask_xmit(conn, conn->ctask))
2228 /* process command queue */
2229 while (__kfifo_get(conn->xmitqueue, (void*)&conn->ctask,
2231 if (iscsi_ctask_xmit(conn, conn->ctask))
2234 /* done with this ctask */
2237 /* process the rest control plane PDUs, if any */
2238 if (unlikely(__kfifo_len(conn->mgmtqueue))) {
2239 struct iscsi_session *session = conn->session;
2241 while (__kfifo_get(conn->mgmtqueue, (void*)&conn->mtask,
2243 if (iscsi_mtask_xmit(conn, conn->mtask))
2246 if (conn->mtask->hdr.itt ==
2247 cpu_to_be32(ISCSI_RESERVED_TAG)) {
2248 spin_lock_bh(&session->lock);
2249 __kfifo_put(session->mgmtpool.queue,
2250 (void*)&conn->mtask,
2252 spin_unlock_bh(&session->lock);
2255 /* done with this mtask */
2262 if (unlikely(conn->suspend_tx))
2269 iscsi_xmitworker(void *data)
2271 struct iscsi_conn *conn = data;
2274 * serialize Xmit worker on a per-connection basis.
2276 mutex_lock(&conn->xmitmutex);
2277 if (iscsi_data_xmit(conn))
2278 schedule_work(&conn->xmitwork);
2279 mutex_unlock(&conn->xmitmutex);
2282 #define FAILURE_BAD_HOST 1
2283 #define FAILURE_SESSION_FAILED 2
2284 #define FAILURE_SESSION_FREED 3
2285 #define FAILURE_WINDOW_CLOSED 4
2286 #define FAILURE_SESSION_TERMINATE 5
2289 iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
2291 struct Scsi_Host *host;
2293 struct iscsi_session *session;
2294 struct iscsi_conn *conn = NULL;
2295 struct iscsi_cmd_task *ctask = NULL;
2297 sc->scsi_done = done;
2300 host = sc->device->host;
2301 session = iscsi_hostdata(host->hostdata);
2302 BUG_ON(host != session->host);
2304 spin_lock(&session->lock);
2306 if (session->state != ISCSI_STATE_LOGGED_IN) {
2307 if (session->state == ISCSI_STATE_FAILED) {
2308 reason = FAILURE_SESSION_FAILED;
2310 } else if (session->state == ISCSI_STATE_TERMINATE) {
2311 reason = FAILURE_SESSION_TERMINATE;
2314 reason = FAILURE_SESSION_FREED;
2319 * Check for iSCSI window and take care of CmdSN wrap-around
2321 if ((int)(session->max_cmdsn - session->cmdsn) < 0) {
2322 reason = FAILURE_WINDOW_CLOSED;
2326 conn = session->leadconn;
2328 __kfifo_get(session->cmdpool.queue, (void*)&ctask, sizeof(void*));
2331 sc->SCp.phase = session->age;
2332 sc->SCp.ptr = (char*)ctask;
2333 iscsi_cmd_init(conn, ctask, sc);
2335 __kfifo_put(conn->xmitqueue, (void*)&ctask, sizeof(void*));
2337 "ctask enq [%s cid %d sc %lx itt 0x%x len %d cmdsn %d win %d]\n",
2338 sc->sc_data_direction == DMA_TO_DEVICE ? "write" : "read",
2339 conn->id, (long)sc, ctask->itt, sc->request_bufflen,
2340 session->cmdsn, session->max_cmdsn - session->exp_cmdsn + 1);
2341 spin_unlock(&session->lock);
2343 if (!in_interrupt() && mutex_trylock(&conn->xmitmutex)) {
2344 spin_unlock_irq(host->host_lock);
2345 if (iscsi_data_xmit(conn))
2346 schedule_work(&conn->xmitwork);
2347 mutex_unlock(&conn->xmitmutex);
2348 spin_lock_irq(host->host_lock);
2350 schedule_work(&conn->xmitwork);
2355 spin_unlock(&session->lock);
2356 debug_scsi("cmd 0x%x rejected (%d)\n", sc->cmnd[0], reason);
2357 return SCSI_MLQUEUE_HOST_BUSY;
2360 spin_unlock(&session->lock);
2361 printk(KERN_ERR "iscsi_tcp: cmd 0x%x is not queued (%d)\n",
2362 sc->cmnd[0], reason);
2363 sc->sense_buffer[0] = 0x70;
2364 sc->sense_buffer[2] = NOT_READY;
2365 sc->sense_buffer[7] = 0x6;
2366 sc->sense_buffer[12] = 0x08;
2367 sc->sense_buffer[13] = 0x00;
2368 sc->result = (DID_NO_CONNECT << 16);
2369 sc->resid = sc->request_bufflen;
2375 iscsi_change_queue_depth(struct scsi_device *sdev, int depth)
2377 if (depth > ISCSI_MAX_CMD_PER_LUN)
2378 depth = ISCSI_MAX_CMD_PER_LUN;
2379 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), depth);
2380 return sdev->queue_depth;
2384 iscsi_pool_init(struct iscsi_queue *q, int max, void ***items, int item_size)
2388 *items = kmalloc(max * sizeof(void*), GFP_KERNEL);
2393 q->pool = kmalloc(max * sizeof(void*), GFP_KERNEL);
2394 if (q->pool == NULL) {
2399 q->queue = kfifo_init((void*)q->pool, max * sizeof(void*),
2401 if (q->queue == ERR_PTR(-ENOMEM)) {
2407 for (i = 0; i < max; i++) {
2408 q->pool[i] = kmalloc(item_size, GFP_KERNEL);
2409 if (q->pool[i] == NULL) {
2412 for (j = 0; j < i; j++)
2415 kfifo_free(q->queue);
2420 memset(q->pool[i], 0, item_size);
2421 (*items)[i] = q->pool[i];
2422 __kfifo_put(q->queue, (void*)&q->pool[i], sizeof(void*));
2428 iscsi_pool_free(struct iscsi_queue *q, void **items)
2432 for (i = 0; i < q->max; i++)
2438 static struct iscsi_cls_conn *
2439 iscsi_conn_create(struct Scsi_Host *shost, uint32_t conn_idx)
2441 struct iscsi_session *session = iscsi_hostdata(shost->hostdata);
2442 struct iscsi_conn *conn;
2443 struct iscsi_cls_conn *cls_conn;
2445 cls_conn = iscsi_create_conn(hostdata_session(shost->hostdata),
2449 conn = cls_conn->dd_data;
2451 memset(conn, 0, sizeof(struct iscsi_conn));
2452 conn->c_stage = ISCSI_CONN_INITIAL_STAGE;
2453 conn->in_progress = IN_PROGRESS_WAIT_HEADER;
2454 conn->id = conn_idx;
2455 conn->exp_statsn = 0;
2456 conn->tmabort_state = TMABORT_INITIAL;
2458 /* initial operational parameters */
2459 conn->hdr_size = sizeof(struct iscsi_hdr);
2460 conn->data_size = DEFAULT_MAX_RECV_DATA_SEGMENT_LENGTH;
2461 conn->max_recv_dlength = DEFAULT_MAX_RECV_DATA_SEGMENT_LENGTH;
2463 spin_lock_init(&conn->lock);
2465 /* initialize general xmit PDU commands queue */
2466 conn->xmitqueue = kfifo_alloc(session->cmds_max * sizeof(void*),
2468 if (conn->xmitqueue == ERR_PTR(-ENOMEM))
2469 goto xmitqueue_alloc_fail;
2471 /* initialize write response PDU commands queue */
2472 conn->writequeue = kfifo_alloc(session->cmds_max * sizeof(void*),
2474 if (conn->writequeue == ERR_PTR(-ENOMEM))
2475 goto writequeue_alloc_fail;
2477 /* initialize general immediate & non-immediate PDU commands queue */
2478 conn->immqueue = kfifo_alloc(session->mgmtpool_max * sizeof(void*),
2480 if (conn->immqueue == ERR_PTR(-ENOMEM))
2481 goto immqueue_alloc_fail;
2483 conn->mgmtqueue = kfifo_alloc(session->mgmtpool_max * sizeof(void*),
2485 if (conn->mgmtqueue == ERR_PTR(-ENOMEM))
2486 goto mgmtqueue_alloc_fail;
2488 INIT_WORK(&conn->xmitwork, iscsi_xmitworker, conn);
2490 /* allocate login_mtask used for the login/text sequences */
2491 spin_lock_bh(&session->lock);
2492 if (!__kfifo_get(session->mgmtpool.queue,
2493 (void*)&conn->login_mtask,
2495 spin_unlock_bh(&session->lock);
2496 goto login_mtask_alloc_fail;
2498 spin_unlock_bh(&session->lock);
2500 /* allocate initial PDU receive place holder */
2501 if (conn->data_size <= PAGE_SIZE)
2502 conn->data = kmalloc(conn->data_size, GFP_KERNEL);
2504 conn->data = (void*)__get_free_pages(GFP_KERNEL,
2505 get_order(conn->data_size));
2507 goto max_recv_dlenght_alloc_fail;
2509 init_timer(&conn->tmabort_timer);
2510 mutex_init(&conn->xmitmutex);
2511 init_waitqueue_head(&conn->ehwait);
2515 max_recv_dlenght_alloc_fail:
2516 spin_lock_bh(&session->lock);
2517 __kfifo_put(session->mgmtpool.queue, (void*)&conn->login_mtask,
2519 spin_unlock_bh(&session->lock);
2520 login_mtask_alloc_fail:
2521 kfifo_free(conn->mgmtqueue);
2522 mgmtqueue_alloc_fail:
2523 kfifo_free(conn->immqueue);
2524 immqueue_alloc_fail:
2525 kfifo_free(conn->writequeue);
2526 writequeue_alloc_fail:
2527 kfifo_free(conn->xmitqueue);
2528 xmitqueue_alloc_fail:
2529 iscsi_destroy_conn(cls_conn);
2534 iscsi_conn_destroy(struct iscsi_cls_conn *cls_conn)
2536 struct iscsi_conn *conn = cls_conn->dd_data;
2537 struct iscsi_session *session = conn->session;
2538 unsigned long flags;
2540 mutex_lock(&conn->xmitmutex);
2541 set_bit(SUSPEND_BIT, &conn->suspend_tx);
2542 if (conn->c_stage == ISCSI_CONN_INITIAL_STAGE && conn->sock) {
2543 struct sock *sk = conn->sock->sk;
2546 * conn_start() has never been called!
2547 * need to cleanup the socket.
2549 write_lock_bh(&sk->sk_callback_lock);
2550 set_bit(SUSPEND_BIT, &conn->suspend_rx);
2551 write_unlock_bh(&sk->sk_callback_lock);
2553 sock_hold(conn->sock->sk);
2554 iscsi_conn_restore_callbacks(conn);
2555 sock_put(conn->sock->sk);
2556 sock_release(conn->sock);
2560 spin_lock_bh(&session->lock);
2561 conn->c_stage = ISCSI_CONN_CLEANUP_WAIT;
2562 if (session->leadconn == conn) {
2564 * leading connection? then give up on recovery.
2566 session->state = ISCSI_STATE_TERMINATE;
2567 wake_up(&conn->ehwait);
2569 spin_unlock_bh(&session->lock);
2571 mutex_unlock(&conn->xmitmutex);
2574 * Block until all in-progress commands for this connection
2578 spin_lock_irqsave(session->host->host_lock, flags);
2579 if (!session->host->host_busy) { /* OK for ERL == 0 */
2580 spin_unlock_irqrestore(session->host->host_lock, flags);
2583 spin_unlock_irqrestore(session->host->host_lock, flags);
2584 msleep_interruptible(500);
2585 printk("conn_destroy(): host_busy %d host_failed %d\n",
2586 session->host->host_busy, session->host->host_failed);
2588 * force eh_abort() to unblock
2590 wake_up(&conn->ehwait);
2593 /* now free crypto */
2594 if (conn->hdrdgst_en || conn->datadgst_en) {
2596 crypto_free_tfm(conn->tx_tfm);
2598 crypto_free_tfm(conn->rx_tfm);
2599 if (conn->data_tx_tfm)
2600 crypto_free_tfm(conn->data_tx_tfm);
2601 if (conn->data_rx_tfm)
2602 crypto_free_tfm(conn->data_rx_tfm);
2605 /* free conn->data, size = MaxRecvDataSegmentLength */
2606 if (conn->data_size <= PAGE_SIZE)
2609 free_pages((unsigned long)conn->data,
2610 get_order(conn->data_size));
2612 spin_lock_bh(&session->lock);
2613 __kfifo_put(session->mgmtpool.queue, (void*)&conn->login_mtask,
2615 list_del(&conn->item);
2616 if (list_empty(&session->connections))
2617 session->leadconn = NULL;
2618 if (session->leadconn && session->leadconn == conn)
2619 session->leadconn = container_of(session->connections.next,
2620 struct iscsi_conn, item);
2622 if (session->leadconn == NULL)
2623 /* none connections exits.. reset sequencing */
2624 session->cmdsn = session->max_cmdsn = session->exp_cmdsn = 1;
2625 spin_unlock_bh(&session->lock);
2627 kfifo_free(conn->xmitqueue);
2628 kfifo_free(conn->writequeue);
2629 kfifo_free(conn->immqueue);
2630 kfifo_free(conn->mgmtqueue);
2632 iscsi_destroy_conn(cls_conn);
2636 iscsi_conn_bind(iscsi_sessionh_t sessionh, iscsi_connh_t connh,
2637 uint32_t transport_fd, int is_leading)
2639 struct iscsi_session *session = iscsi_ptr(sessionh);
2640 struct iscsi_conn *tmp = ERR_PTR(-EEXIST), *conn = iscsi_ptr(connh);
2642 struct socket *sock;
2645 /* lookup for existing socket */
2646 sock = sockfd_lookup(transport_fd, &err);
2648 printk(KERN_ERR "iscsi_tcp: sockfd_lookup failed %d\n", err);
2652 /* lookup for existing connection */
2653 spin_lock_bh(&session->lock);
2654 list_for_each_entry(tmp, &session->connections, item) {
2656 if (conn->c_stage != ISCSI_CONN_STOPPED ||
2657 conn->stop_stage == STOP_CONN_TERM) {
2658 printk(KERN_ERR "iscsi_tcp: can't bind "
2659 "non-stopped connection (%d:%d)\n",
2660 conn->c_stage, conn->stop_stage);
2661 spin_unlock_bh(&session->lock);
2668 /* bind new iSCSI connection to session */
2669 conn->session = session;
2671 list_add(&conn->item, &session->connections);
2673 spin_unlock_bh(&session->lock);
2675 if (conn->stop_stage != STOP_CONN_SUSPEND) {
2676 /* bind iSCSI connection and socket */
2679 /* setup Socket parameters */
2682 sk->sk_sndtimeo = 15 * HZ; /* FIXME: make it configurable */
2683 sk->sk_allocation = GFP_ATOMIC;
2685 /* FIXME: disable Nagle's algorithm */
2688 * Intercept TCP callbacks for sendfile like receive
2691 iscsi_conn_set_callbacks(conn);
2693 conn->sendpage = conn->sock->ops->sendpage;
2696 * set receive state machine into initial state
2698 conn->in_progress = IN_PROGRESS_WAIT_HEADER;
2702 session->leadconn = conn;
2705 * Unblock xmitworker(), Login Phase will pass through.
2707 clear_bit(SUSPEND_BIT, &conn->suspend_rx);
2708 clear_bit(SUSPEND_BIT, &conn->suspend_tx);
2714 iscsi_conn_start(iscsi_connh_t connh)
2716 struct iscsi_conn *conn = iscsi_ptr(connh);
2717 struct iscsi_session *session = conn->session;
2720 /* FF phase warming up... */
2722 if (session == NULL) {
2723 printk(KERN_ERR "iscsi_tcp: can't start unbound connection\n");
2727 sk = conn->sock->sk;
2729 write_lock_bh(&sk->sk_callback_lock);
2730 spin_lock_bh(&session->lock);
2731 conn->c_stage = ISCSI_CONN_STARTED;
2732 session->state = ISCSI_STATE_LOGGED_IN;
2734 switch(conn->stop_stage) {
2735 case STOP_CONN_RECOVER:
2737 * unblock eh_abort() if it is blocked. re-try all
2738 * commands after successful recovery
2740 session->conn_cnt++;
2741 conn->stop_stage = 0;
2742 conn->tmabort_state = TMABORT_INITIAL;
2744 wake_up(&conn->ehwait);
2746 case STOP_CONN_TERM:
2747 session->conn_cnt++;
2748 conn->stop_stage = 0;
2750 case STOP_CONN_SUSPEND:
2751 conn->stop_stage = 0;
2752 clear_bit(SUSPEND_BIT, &conn->suspend_rx);
2753 clear_bit(SUSPEND_BIT, &conn->suspend_tx);
2758 spin_unlock_bh(&session->lock);
2759 write_unlock_bh(&sk->sk_callback_lock);
2765 iscsi_conn_stop(iscsi_connh_t connh, int flag)
2767 struct iscsi_conn *conn = iscsi_ptr(connh);
2768 struct iscsi_session *session = conn->session;
2770 unsigned long flags;
2772 BUG_ON(!conn->sock);
2773 sk = conn->sock->sk;
2774 write_lock_bh(&sk->sk_callback_lock);
2775 set_bit(SUSPEND_BIT, &conn->suspend_rx);
2776 write_unlock_bh(&sk->sk_callback_lock);
2778 mutex_lock(&conn->xmitmutex);
2780 spin_lock_irqsave(session->host->host_lock, flags);
2781 spin_lock(&session->lock);
2782 conn->stop_stage = flag;
2783 conn->c_stage = ISCSI_CONN_STOPPED;
2784 set_bit(SUSPEND_BIT, &conn->suspend_tx);
2786 if (flag != STOP_CONN_SUSPEND)
2787 session->conn_cnt--;
2789 if (session->conn_cnt == 0 || session->leadconn == conn)
2790 session->state = ISCSI_STATE_FAILED;
2792 spin_unlock(&session->lock);
2793 spin_unlock_irqrestore(session->host->host_lock, flags);
2795 if (flag == STOP_CONN_TERM || flag == STOP_CONN_RECOVER) {
2796 struct iscsi_cmd_task *ctask;
2797 struct iscsi_mgmt_task *mtask;
2800 * Socket must go now.
2802 sock_hold(conn->sock->sk);
2803 iscsi_conn_restore_callbacks(conn);
2804 sock_put(conn->sock->sk);
2807 * flush xmit queues.
2809 spin_lock_bh(&session->lock);
2810 while (__kfifo_get(conn->writequeue, (void*)&ctask,
2812 __kfifo_get(conn->xmitqueue, (void*)&ctask,
2814 struct iscsi_r2t_info *r2t;
2817 * flush ctask's r2t queues
2819 while (__kfifo_get(ctask->r2tqueue, (void*)&r2t,
2821 __kfifo_put(ctask->r2tpool.queue, (void*)&r2t,
2824 spin_unlock_bh(&session->lock);
2826 iscsi_ctask_cleanup(conn, ctask);
2828 spin_lock_bh(&session->lock);
2831 while (__kfifo_get(conn->immqueue, (void*)&mtask,
2833 __kfifo_get(conn->mgmtqueue, (void*)&mtask,
2835 __kfifo_put(session->mgmtpool.queue,
2836 (void*)&mtask, sizeof(void*));
2839 spin_unlock_bh(&session->lock);
2842 * release socket only after we stopped data_xmit()
2843 * activity and flushed all outstandings
2845 sock_release(conn->sock);
2849 * for connection level recovery we should not calculate
2850 * header digest. conn->hdr_size used for optimization
2851 * in hdr_extract() and will be re-negotiated at
2854 if (flag == STOP_CONN_RECOVER) {
2855 conn->hdr_size = sizeof(struct iscsi_hdr);
2856 conn->hdrdgst_en = 0;
2857 conn->datadgst_en = 0;
2860 mutex_unlock(&conn->xmitmutex);
2864 iscsi_conn_send_generic(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
2865 char *data, uint32_t data_size)
2867 struct iscsi_session *session = conn->session;
2868 struct iscsi_nopout *nop = (struct iscsi_nopout *)hdr;
2869 struct iscsi_mgmt_task *mtask;
2871 spin_lock_bh(&session->lock);
2872 if (session->state == ISCSI_STATE_TERMINATE) {
2873 spin_unlock_bh(&session->lock);
2876 if (hdr->opcode == (ISCSI_OP_LOGIN | ISCSI_OP_IMMEDIATE) ||
2877 hdr->opcode == (ISCSI_OP_TEXT | ISCSI_OP_IMMEDIATE))
2879 * Login and Text are sent serially, in
2880 * request-followed-by-response sequence.
2881 * Same mtask can be used. Same ITT must be used.
2882 * Note that login_mtask is preallocated at conn_create().
2884 mtask = conn->login_mtask;
2886 BUG_ON(conn->c_stage == ISCSI_CONN_INITIAL_STAGE);
2887 BUG_ON(conn->c_stage == ISCSI_CONN_STOPPED);
2889 if (!__kfifo_get(session->mgmtpool.queue,
2890 (void*)&mtask, sizeof(void*))) {
2891 spin_unlock_bh(&session->lock);
2897 * pre-format CmdSN and ExpStatSN for outgoing PDU.
2899 if (hdr->itt != cpu_to_be32(ISCSI_RESERVED_TAG)) {
2900 hdr->itt = mtask->itt | (conn->id << CID_SHIFT) |
2901 (session->age << AGE_SHIFT);
2902 nop->cmdsn = cpu_to_be32(session->cmdsn);
2903 if (conn->c_stage == ISCSI_CONN_STARTED &&
2904 !(hdr->opcode & ISCSI_OP_IMMEDIATE))
2907 /* do not advance CmdSN */
2908 nop->cmdsn = cpu_to_be32(session->cmdsn);
2910 nop->exp_statsn = cpu_to_be32(conn->exp_statsn);
2912 memcpy(&mtask->hdr, hdr, sizeof(struct iscsi_hdr));
2914 iscsi_buf_init_virt(&mtask->headbuf, (char*)&mtask->hdr,
2915 sizeof(struct iscsi_hdr));
2917 spin_unlock_bh(&session->lock);
2920 memcpy(mtask->data, data, data_size);
2921 mtask->data_count = data_size;
2923 mtask->data_count = 0;
2925 mtask->xmstate = XMSTATE_IMM_HDR;
2927 if (mtask->data_count) {
2928 iscsi_buf_init_iov(&mtask->sendbuf, (char*)mtask->data,
2932 debug_scsi("mgmtpdu [op 0x%x hdr->itt 0x%x datalen %d]\n",
2933 hdr->opcode, hdr->itt, data_size);
2936 * since send_pdu() could be called at least from two contexts,
2937 * we need to serialize __kfifo_put, so we don't have to take
2938 * additional lock on fast data-path
2940 if (hdr->opcode & ISCSI_OP_IMMEDIATE)
2941 __kfifo_put(conn->immqueue, (void*)&mtask, sizeof(void*));
2943 __kfifo_put(conn->mgmtqueue, (void*)&mtask, sizeof(void*));
2945 schedule_work(&conn->xmitwork);
2951 iscsi_eh_host_reset(struct scsi_cmnd *sc)
2953 struct iscsi_cmd_task *ctask = (struct iscsi_cmd_task *)sc->SCp.ptr;
2954 struct iscsi_conn *conn = ctask->conn;
2955 struct iscsi_session *session = conn->session;
2957 spin_lock_bh(&session->lock);
2958 if (session->state == ISCSI_STATE_TERMINATE) {
2959 debug_scsi("failing host reset: session terminated "
2960 "[CID %d age %d]", conn->id, session->age);
2961 spin_unlock_bh(&session->lock);
2964 spin_unlock_bh(&session->lock);
2966 debug_scsi("failing connection CID %d due to SCSI host reset "
2967 "[itt 0x%x age %d]", conn->id, ctask->itt,
2969 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
2975 iscsi_tmabort_timedout(unsigned long data)
2977 struct iscsi_cmd_task *ctask = (struct iscsi_cmd_task *)data;
2978 struct iscsi_conn *conn = ctask->conn;
2979 struct iscsi_session *session = conn->session;
2981 spin_lock(&session->lock);
2982 if (conn->tmabort_state == TMABORT_INITIAL) {
2983 __kfifo_put(session->mgmtpool.queue,
2984 (void*)&ctask->mtask, sizeof(void*));
2985 conn->tmabort_state = TMABORT_TIMEDOUT;
2986 debug_scsi("tmabort timedout [sc %lx itt 0x%x]\n",
2987 (long)ctask->sc, ctask->itt);
2988 /* unblock eh_abort() */
2989 wake_up(&conn->ehwait);
2991 spin_unlock(&session->lock);
2995 iscsi_eh_abort(struct scsi_cmnd *sc)
2998 struct iscsi_cmd_task *ctask = (struct iscsi_cmd_task *)sc->SCp.ptr;
2999 struct iscsi_conn *conn = ctask->conn;
3000 struct iscsi_session *session = conn->session;
3002 conn->eh_abort_cnt++;
3003 debug_scsi("aborting [sc %lx itt 0x%x]\n", (long)sc, ctask->itt);
3006 * two cases for ERL=0 here:
3008 * 1) connection-level failure;
3009 * 2) recovery due protocol error;
3011 mutex_lock(&conn->xmitmutex);
3012 spin_lock_bh(&session->lock);
3013 if (session->state != ISCSI_STATE_LOGGED_IN) {
3014 if (session->state == ISCSI_STATE_TERMINATE) {
3015 spin_unlock_bh(&session->lock);
3016 mutex_unlock(&conn->xmitmutex);
3019 spin_unlock_bh(&session->lock);
3021 struct iscsi_tm *hdr = &conn->tmhdr;
3024 * Still LOGGED_IN...
3027 if (!ctask->sc || sc->SCp.phase != session->age) {
3029 * 1) ctask completed before time out. But session
3030 * is still ok => Happy Retry.
3031 * 2) session was re-open during time out of ctask.
3033 spin_unlock_bh(&session->lock);
3034 mutex_unlock(&conn->xmitmutex);
3037 conn->tmabort_state = TMABORT_INITIAL;
3038 spin_unlock_bh(&session->lock);
3041 * ctask timed out but session is OK
3042 * ERL=0 requires task mgmt abort to be issued on each
3043 * failed command. requests must be serialized.
3045 memset(hdr, 0, sizeof(struct iscsi_tm));
3046 hdr->opcode = ISCSI_OP_SCSI_TMFUNC | ISCSI_OP_IMMEDIATE;
3047 hdr->flags = ISCSI_TM_FUNC_ABORT_TASK;
3048 hdr->flags |= ISCSI_FLAG_CMD_FINAL;
3049 memcpy(hdr->lun, ctask->hdr.lun, sizeof(hdr->lun));
3050 hdr->rtt = ctask->hdr.itt;
3051 hdr->refcmdsn = ctask->hdr.cmdsn;
3053 rc = iscsi_conn_send_generic(conn, (struct iscsi_hdr *)hdr,
3056 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
3057 debug_scsi("abort sent failure [itt 0x%x]", ctask->itt);
3059 struct iscsi_r2t_info *r2t;
3062 * TMF abort vs. TMF response race logic
3064 spin_lock_bh(&session->lock);
3065 ctask->mtask = (struct iscsi_mgmt_task *)
3066 session->mgmt_cmds[(hdr->itt & ITT_MASK) -
3067 ISCSI_MGMT_ITT_OFFSET];
3069 * have to flush r2tqueue to avoid r2t leaks
3071 while (__kfifo_get(ctask->r2tqueue, (void*)&r2t,
3073 __kfifo_put(ctask->r2tpool.queue, (void*)&r2t,
3076 if (conn->tmabort_state == TMABORT_INITIAL) {
3077 conn->tmfcmd_pdus_cnt++;
3078 conn->tmabort_timer.expires = 3*HZ + jiffies;
3079 conn->tmabort_timer.function =
3080 iscsi_tmabort_timedout;
3081 conn->tmabort_timer.data = (unsigned long)ctask;
3082 add_timer(&conn->tmabort_timer);
3083 debug_scsi("abort sent [itt 0x%x]", ctask->itt);
3086 conn->tmabort_state == TMABORT_SUCCESS) {
3087 conn->tmabort_state = TMABORT_INITIAL;
3088 spin_unlock_bh(&session->lock);
3089 mutex_unlock(&conn->xmitmutex);
3092 conn->tmabort_state = TMABORT_INITIAL;
3093 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
3095 spin_unlock_bh(&session->lock);
3098 mutex_unlock(&conn->xmitmutex);
3102 * block eh thread until:
3104 * 1) abort response;
3106 * 3) session re-opened;
3107 * 4) session terminated;
3110 int p_state = session->state;
3112 rc = wait_event_interruptible(conn->ehwait,
3113 (p_state == ISCSI_STATE_LOGGED_IN ?
3114 (session->state == ISCSI_STATE_TERMINATE ||
3115 conn->tmabort_state != TMABORT_INITIAL) :
3116 (session->state == ISCSI_STATE_TERMINATE ||
3117 session->state == ISCSI_STATE_LOGGED_IN)));
3120 session->state = ISCSI_STATE_TERMINATE;
3124 if (signal_pending(current))
3125 flush_signals(current);
3127 if (session->state == ISCSI_STATE_TERMINATE)
3130 spin_lock_bh(&session->lock);
3131 if (sc->SCp.phase == session->age &&
3132 (conn->tmabort_state == TMABORT_TIMEDOUT ||
3133 conn->tmabort_state == TMABORT_FAILED)) {
3134 conn->tmabort_state = TMABORT_INITIAL;
3137 * ctask completed before tmf abort response or
3139 * But session is still ok => Happy Retry.
3141 spin_unlock_bh(&session->lock);
3144 spin_unlock_bh(&session->lock);
3145 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
3148 spin_unlock_bh(&session->lock);
3153 debug_scsi("abort success [sc %lx itt 0x%x]\n", (long)sc, ctask->itt);
3158 debug_scsi("abort failed [sc %lx itt 0x%x]\n", (long)sc, ctask->itt);
3162 del_timer_sync(&conn->tmabort_timer);
3164 mutex_lock(&conn->xmitmutex);
3166 struct sock *sk = conn->sock->sk;
3168 write_lock_bh(&sk->sk_callback_lock);
3169 iscsi_ctask_cleanup(conn, ctask);
3170 write_unlock_bh(&sk->sk_callback_lock);
3172 mutex_unlock(&conn->xmitmutex);
3177 iscsi_r2tpool_alloc(struct iscsi_session *session)
3183 * initialize per-task: R2T pool and xmit queue
3185 for (cmd_i = 0; cmd_i < session->cmds_max; cmd_i++) {
3186 struct iscsi_cmd_task *ctask = session->cmds[cmd_i];
3189 * pre-allocated x4 as much r2ts to handle race when
3190 * target acks DataOut faster than we data_xmit() queues
3191 * could replenish r2tqueue.
3195 if (iscsi_pool_init(&ctask->r2tpool, session->max_r2t * 4,
3196 (void***)&ctask->r2ts, sizeof(struct iscsi_r2t_info))) {
3197 goto r2t_alloc_fail;
3200 /* R2T xmit queue */
3201 ctask->r2tqueue = kfifo_alloc(
3202 session->max_r2t * 4 * sizeof(void*), GFP_KERNEL, NULL);
3203 if (ctask->r2tqueue == ERR_PTR(-ENOMEM)) {
3204 iscsi_pool_free(&ctask->r2tpool, (void**)ctask->r2ts);
3205 goto r2t_alloc_fail;
3210 * Data-Out PDU's within R2T-sequence can be quite big;
3213 ctask->datapool = mempool_create(ISCSI_DTASK_DEFAULT_MAX,
3214 mempool_alloc_slab, mempool_free_slab, taskcache);
3215 if (ctask->datapool == NULL) {
3216 kfifo_free(ctask->r2tqueue);
3217 iscsi_pool_free(&ctask->r2tpool, (void**)ctask->r2ts);
3218 goto r2t_alloc_fail;
3220 INIT_LIST_HEAD(&ctask->dataqueue);
3226 for (i = 0; i < cmd_i; i++) {
3227 mempool_destroy(session->cmds[i]->datapool);
3228 kfifo_free(session->cmds[i]->r2tqueue);
3229 iscsi_pool_free(&session->cmds[i]->r2tpool,
3230 (void**)session->cmds[i]->r2ts);
3236 iscsi_r2tpool_free(struct iscsi_session *session)
3240 for (i = 0; i < session->cmds_max; i++) {
3241 mempool_destroy(session->cmds[i]->datapool);
3242 kfifo_free(session->cmds[i]->r2tqueue);
3243 iscsi_pool_free(&session->cmds[i]->r2tpool,
3244 (void**)session->cmds[i]->r2ts);
3248 static struct scsi_host_template iscsi_sht = {
3249 .name = "iSCSI Initiator over TCP/IP, v."
3251 .queuecommand = iscsi_queuecommand,
3252 .change_queue_depth = iscsi_change_queue_depth,
3253 .can_queue = ISCSI_XMIT_CMDS_MAX - 1,
3254 .sg_tablesize = ISCSI_SG_TABLESIZE,
3255 .cmd_per_lun = ISCSI_DEF_CMD_PER_LUN,
3256 .eh_abort_handler = iscsi_eh_abort,
3257 .eh_host_reset_handler = iscsi_eh_host_reset,
3258 .use_clustering = DISABLE_CLUSTERING,
3259 .proc_name = "iscsi_tcp",
3263 static struct iscsi_transport iscsi_tcp_transport;
3265 static struct Scsi_Host *
3266 iscsi_session_create(struct scsi_transport_template *scsit,
3267 uint32_t initial_cmdsn)
3269 struct Scsi_Host *shost;
3270 struct iscsi_session *session;
3273 shost = iscsi_transport_create_session(scsit, &iscsi_tcp_transport);
3277 session = iscsi_hostdata(shost->hostdata);
3278 memset(session, 0, sizeof(struct iscsi_session));
3279 session->host = shost;
3280 session->state = ISCSI_STATE_LOGGED_IN;
3281 session->mgmtpool_max = ISCSI_MGMT_CMDS_MAX;
3282 session->cmds_max = ISCSI_XMIT_CMDS_MAX;
3283 session->cmdsn = initial_cmdsn;
3284 session->exp_cmdsn = initial_cmdsn + 1;
3285 session->max_cmdsn = initial_cmdsn + 1;
3286 session->max_r2t = 1;
3288 /* initialize SCSI PDU commands pool */
3289 if (iscsi_pool_init(&session->cmdpool, session->cmds_max,
3290 (void***)&session->cmds, sizeof(struct iscsi_cmd_task)))
3291 goto cmdpool_alloc_fail;
3293 /* pre-format cmds pool with ITT */
3294 for (cmd_i = 0; cmd_i < session->cmds_max; cmd_i++)
3295 session->cmds[cmd_i]->itt = cmd_i;
3297 spin_lock_init(&session->lock);
3298 INIT_LIST_HEAD(&session->connections);
3300 /* initialize immediate command pool */
3301 if (iscsi_pool_init(&session->mgmtpool, session->mgmtpool_max,
3302 (void***)&session->mgmt_cmds, sizeof(struct iscsi_mgmt_task)))
3303 goto mgmtpool_alloc_fail;
3306 /* pre-format immediate cmds pool with ITT */
3307 for (cmd_i = 0; cmd_i < session->mgmtpool_max; cmd_i++) {
3308 session->mgmt_cmds[cmd_i]->itt = ISCSI_MGMT_ITT_OFFSET + cmd_i;
3309 session->mgmt_cmds[cmd_i]->data = kmalloc(
3310 DEFAULT_MAX_RECV_DATA_SEGMENT_LENGTH, GFP_KERNEL);
3311 if (!session->mgmt_cmds[cmd_i]->data) {
3314 for (j = 0; j < cmd_i; j++)
3315 kfree(session->mgmt_cmds[j]->data);
3316 goto immdata_alloc_fail;
3320 if (iscsi_r2tpool_alloc(session))
3321 goto r2tpool_alloc_fail;
3326 for (cmd_i = 0; cmd_i < session->mgmtpool_max; cmd_i++)
3327 kfree(session->mgmt_cmds[cmd_i]->data);
3328 iscsi_pool_free(&session->mgmtpool, (void**)session->mgmt_cmds);
3330 mgmtpool_alloc_fail:
3331 iscsi_pool_free(&session->cmdpool, (void**)session->cmds);
3337 iscsi_session_destroy(struct Scsi_Host *shost)
3339 struct iscsi_session *session = iscsi_hostdata(shost->hostdata);
3341 struct iscsi_data_task *dtask, *n;
3343 for (cmd_i = 0; cmd_i < session->cmds_max; cmd_i++) {
3344 struct iscsi_cmd_task *ctask = session->cmds[cmd_i];
3345 list_for_each_entry_safe(dtask, n, &ctask->dataqueue, item) {
3346 list_del(&dtask->item);
3347 mempool_free(dtask, ctask->datapool);
3351 for (cmd_i = 0; cmd_i < session->mgmtpool_max; cmd_i++)
3352 kfree(session->mgmt_cmds[cmd_i]->data);
3354 iscsi_r2tpool_free(session);
3355 iscsi_pool_free(&session->mgmtpool, (void**)session->mgmt_cmds);
3356 iscsi_pool_free(&session->cmdpool, (void**)session->cmds);
3358 iscsi_transport_destroy_session(shost);
3362 iscsi_conn_set_param(iscsi_connh_t connh, enum iscsi_param param,
3365 struct iscsi_conn *conn = iscsi_ptr(connh);
3366 struct iscsi_session *session = conn->session;
3368 spin_lock_bh(&session->lock);
3369 if (conn->c_stage != ISCSI_CONN_INITIAL_STAGE &&
3370 conn->stop_stage != STOP_CONN_RECOVER) {
3371 printk(KERN_ERR "iscsi_tcp: can not change parameter [%d]\n",
3373 spin_unlock_bh(&session->lock);
3376 spin_unlock_bh(&session->lock);
3379 case ISCSI_PARAM_MAX_RECV_DLENGTH: {
3380 char *saveptr = conn->data;
3381 gfp_t flags = GFP_KERNEL;
3383 if (conn->data_size >= value) {
3384 conn->max_recv_dlength = value;
3388 spin_lock_bh(&session->lock);
3389 if (conn->stop_stage == STOP_CONN_RECOVER)
3391 spin_unlock_bh(&session->lock);
3393 if (value <= PAGE_SIZE)
3394 conn->data = kmalloc(value, flags);
3396 conn->data = (void*)__get_free_pages(flags,
3398 if (conn->data == NULL) {
3399 conn->data = saveptr;
3402 if (conn->data_size <= PAGE_SIZE)
3405 free_pages((unsigned long)saveptr,
3406 get_order(conn->data_size));
3407 conn->max_recv_dlength = value;
3408 conn->data_size = value;
3411 case ISCSI_PARAM_MAX_XMIT_DLENGTH:
3412 conn->max_xmit_dlength = value;
3414 case ISCSI_PARAM_HDRDGST_EN:
3415 conn->hdrdgst_en = value;
3416 conn->hdr_size = sizeof(struct iscsi_hdr);
3417 if (conn->hdrdgst_en) {
3418 conn->hdr_size += sizeof(__u32);
3420 conn->tx_tfm = crypto_alloc_tfm("crc32c", 0);
3424 conn->rx_tfm = crypto_alloc_tfm("crc32c", 0);
3425 if (!conn->rx_tfm) {
3426 crypto_free_tfm(conn->tx_tfm);
3431 crypto_free_tfm(conn->tx_tfm);
3433 crypto_free_tfm(conn->rx_tfm);
3436 case ISCSI_PARAM_DATADGST_EN:
3437 conn->datadgst_en = value;
3438 if (conn->datadgst_en) {
3439 if (!conn->data_tx_tfm)
3441 crypto_alloc_tfm("crc32c", 0);
3442 if (!conn->data_tx_tfm)
3444 if (!conn->data_rx_tfm)
3446 crypto_alloc_tfm("crc32c", 0);
3447 if (!conn->data_rx_tfm) {
3448 crypto_free_tfm(conn->data_tx_tfm);
3452 if (conn->data_tx_tfm)
3453 crypto_free_tfm(conn->data_tx_tfm);
3454 if (conn->data_rx_tfm)
3455 crypto_free_tfm(conn->data_rx_tfm);
3457 conn->sendpage = conn->datadgst_en ?
3458 sock_no_sendpage : conn->sock->ops->sendpage;
3460 case ISCSI_PARAM_INITIAL_R2T_EN:
3461 session->initial_r2t_en = value;
3463 case ISCSI_PARAM_MAX_R2T:
3464 if (session->max_r2t == roundup_pow_of_two(value))
3466 iscsi_r2tpool_free(session);
3467 session->max_r2t = value;
3468 if (session->max_r2t & (session->max_r2t - 1))
3469 session->max_r2t = roundup_pow_of_two(session->max_r2t);
3470 if (iscsi_r2tpool_alloc(session))
3473 case ISCSI_PARAM_IMM_DATA_EN:
3474 session->imm_data_en = value;
3476 case ISCSI_PARAM_FIRST_BURST:
3477 session->first_burst = value;
3479 case ISCSI_PARAM_MAX_BURST:
3480 session->max_burst = value;
3482 case ISCSI_PARAM_PDU_INORDER_EN:
3483 session->pdu_inorder_en = value;
3485 case ISCSI_PARAM_DATASEQ_INORDER_EN:
3486 session->dataseq_inorder_en = value;
3488 case ISCSI_PARAM_ERL:
3489 session->erl = value;
3491 case ISCSI_PARAM_IFMARKER_EN:
3493 session->ifmarker_en = value;
3495 case ISCSI_PARAM_OFMARKER_EN:
3497 session->ofmarker_en = value;
3507 iscsi_session_get_param(struct Scsi_Host *shost,
3508 enum iscsi_param param, uint32_t *value)
3510 struct iscsi_session *session = iscsi_hostdata(shost->hostdata);
3513 case ISCSI_PARAM_INITIAL_R2T_EN:
3514 *value = session->initial_r2t_en;
3516 case ISCSI_PARAM_MAX_R2T:
3517 *value = session->max_r2t;
3519 case ISCSI_PARAM_IMM_DATA_EN:
3520 *value = session->imm_data_en;
3522 case ISCSI_PARAM_FIRST_BURST:
3523 *value = session->first_burst;
3525 case ISCSI_PARAM_MAX_BURST:
3526 *value = session->max_burst;
3528 case ISCSI_PARAM_PDU_INORDER_EN:
3529 *value = session->pdu_inorder_en;
3531 case ISCSI_PARAM_DATASEQ_INORDER_EN:
3532 *value = session->dataseq_inorder_en;
3534 case ISCSI_PARAM_ERL:
3535 *value = session->erl;
3537 case ISCSI_PARAM_IFMARKER_EN:
3538 *value = session->ifmarker_en;
3540 case ISCSI_PARAM_OFMARKER_EN:
3541 *value = session->ofmarker_en;
3544 return ISCSI_ERR_PARAM_NOT_FOUND;
3551 iscsi_conn_get_param(void *data, enum iscsi_param param, uint32_t *value)
3553 struct iscsi_conn *conn = data;
3556 case ISCSI_PARAM_MAX_RECV_DLENGTH:
3557 *value = conn->max_recv_dlength;
3559 case ISCSI_PARAM_MAX_XMIT_DLENGTH:
3560 *value = conn->max_xmit_dlength;
3562 case ISCSI_PARAM_HDRDGST_EN:
3563 *value = conn->hdrdgst_en;
3565 case ISCSI_PARAM_DATADGST_EN:
3566 *value = conn->datadgst_en;
3569 return ISCSI_ERR_PARAM_NOT_FOUND;
3576 iscsi_conn_get_stats(iscsi_connh_t connh, struct iscsi_stats *stats)
3578 struct iscsi_conn *conn = iscsi_ptr(connh);
3580 stats->txdata_octets = conn->txdata_octets;
3581 stats->rxdata_octets = conn->rxdata_octets;
3582 stats->scsicmd_pdus = conn->scsicmd_pdus_cnt;
3583 stats->dataout_pdus = conn->dataout_pdus_cnt;
3584 stats->scsirsp_pdus = conn->scsirsp_pdus_cnt;
3585 stats->datain_pdus = conn->datain_pdus_cnt;
3586 stats->r2t_pdus = conn->r2t_pdus_cnt;
3587 stats->tmfcmd_pdus = conn->tmfcmd_pdus_cnt;
3588 stats->tmfrsp_pdus = conn->tmfrsp_pdus_cnt;
3589 stats->custom_length = 3;
3590 strcpy(stats->custom[0].desc, "tx_sendpage_failures");
3591 stats->custom[0].value = conn->sendpage_failures_cnt;
3592 strcpy(stats->custom[1].desc, "rx_discontiguous_hdr");
3593 stats->custom[1].value = conn->discontiguous_hdr_cnt;
3594 strcpy(stats->custom[2].desc, "eh_abort_cnt");
3595 stats->custom[2].value = conn->eh_abort_cnt;
3599 iscsi_conn_send_pdu(iscsi_connh_t connh, struct iscsi_hdr *hdr, char *data,
3602 struct iscsi_conn *conn = iscsi_ptr(connh);
3605 mutex_lock(&conn->xmitmutex);
3606 rc = iscsi_conn_send_generic(conn, hdr, data, data_size);
3607 mutex_unlock(&conn->xmitmutex);
3612 static struct iscsi_transport iscsi_tcp_transport = {
3613 .owner = THIS_MODULE,
3615 .caps = CAP_RECOVERY_L0 | CAP_MULTI_R2T | CAP_HDRDGST
3617 .host_template = &iscsi_sht,
3618 .hostdata_size = sizeof(struct iscsi_session),
3619 .conndata_size = sizeof(struct iscsi_conn),
3621 .max_cmd_len = ISCSI_TCP_MAX_CMD_LEN,
3622 .create_session = iscsi_session_create,
3623 .destroy_session = iscsi_session_destroy,
3624 .create_conn = iscsi_conn_create,
3625 .bind_conn = iscsi_conn_bind,
3626 .destroy_conn = iscsi_conn_destroy,
3627 .set_param = iscsi_conn_set_param,
3628 .get_conn_param = iscsi_conn_get_param,
3629 .get_session_param = iscsi_session_get_param,
3630 .start_conn = iscsi_conn_start,
3631 .stop_conn = iscsi_conn_stop,
3632 .send_pdu = iscsi_conn_send_pdu,
3633 .get_stats = iscsi_conn_get_stats,
3637 iscsi_tcp_init(void)
3639 if (iscsi_max_lun < 1) {
3640 printk(KERN_ERR "Invalid max_lun value of %u\n", iscsi_max_lun);
3643 iscsi_tcp_transport.max_lun = iscsi_max_lun;
3645 taskcache = kmem_cache_create("iscsi_taskcache",
3646 sizeof(struct iscsi_data_task), 0,
3647 SLAB_HWCACHE_ALIGN | SLAB_NO_REAP, NULL, NULL);
3651 if (!iscsi_register_transport(&iscsi_tcp_transport))
3652 kmem_cache_destroy(taskcache);
3658 iscsi_tcp_exit(void)
3660 iscsi_unregister_transport(&iscsi_tcp_transport);
3661 kmem_cache_destroy(taskcache);
3664 module_init(iscsi_tcp_init);
3665 module_exit(iscsi_tcp_exit);