blob: 032635321ad6e75a17e3c216692bd120456edae0 [file] [log] [blame]
Andrew Vasquezfa90c542005-10-27 11:10:08 -07001/*
2 * QLogic Fibre Channel HBA Driver
Armen Baloyanbd21eaf2014-04-11 16:54:24 -04003 * Copyright (c) 2003-2014 QLogic Corporation
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 *
Andrew Vasquezfa90c542005-10-27 11:10:08 -07005 * See LICENSE.qla2xxx for copyright and licensing details.
6 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07007#include "qla_def.h"
Nicholas Bellinger2d70c102012-05-15 14:34:28 -04008#include "qla_target.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -07009
10#include <linux/blkdev.h>
11#include <linux/delay.h>
12
13#include <scsi/scsi_tcq.h>
14
Linus Torvalds1da177e2005-04-16 15:20:36 -070015/**
16 * qla2x00_get_cmd_direction() - Determine control_flag data direction.
Bart Van Assche2db62282018-01-23 16:33:51 -080017 * @sp: SCSI command
Linus Torvalds1da177e2005-04-16 15:20:36 -070018 *
19 * Returns the proper CF_* direction based on CDB.
20 */
21static inline uint16_t
Harish Zunjarrao49fd4622008-09-11 21:22:47 -070022qla2x00_get_cmd_direction(srb_t *sp)
Linus Torvalds1da177e2005-04-16 15:20:36 -070023{
24 uint16_t cflags;
Giridhar Malavali9ba56b92012-02-09 11:15:36 -080025 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
Joe Carnuccio25ff6af2017-01-19 22:28:04 -080026 struct scsi_qla_host *vha = sp->vha;
Linus Torvalds1da177e2005-04-16 15:20:36 -070027
28 cflags = 0;
29
30 /* Set transfer direction */
Giridhar Malavali9ba56b92012-02-09 11:15:36 -080031 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070032 cflags = CF_WRITE;
Saurav Kashyap2be21fa2012-05-15 14:34:16 -040033 vha->qla_stats.output_bytes += scsi_bufflen(cmd);
Joe Carnucciofabbb8d2013-08-27 01:37:40 -040034 vha->qla_stats.output_requests++;
Giridhar Malavali9ba56b92012-02-09 11:15:36 -080035 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070036 cflags = CF_READ;
Saurav Kashyap2be21fa2012-05-15 14:34:16 -040037 vha->qla_stats.input_bytes += scsi_bufflen(cmd);
Joe Carnucciofabbb8d2013-08-27 01:37:40 -040038 vha->qla_stats.input_requests++;
Harish Zunjarrao49fd4622008-09-11 21:22:47 -070039 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070040 return (cflags);
41}
42
43/**
44 * qla2x00_calc_iocbs_32() - Determine number of Command Type 2 and
45 * Continuation Type 0 IOCBs to allocate.
46 *
47 * @dsds: number of data segment decriptors needed
48 *
49 * Returns the number of IOCB entries needed to store @dsds.
50 */
51uint16_t
52qla2x00_calc_iocbs_32(uint16_t dsds)
53{
54 uint16_t iocbs;
55
56 iocbs = 1;
57 if (dsds > 3) {
58 iocbs += (dsds - 3) / 7;
59 if ((dsds - 3) % 7)
60 iocbs++;
61 }
62 return (iocbs);
63}
64
65/**
66 * qla2x00_calc_iocbs_64() - Determine number of Command Type 3 and
67 * Continuation Type 1 IOCBs to allocate.
68 *
69 * @dsds: number of data segment decriptors needed
70 *
71 * Returns the number of IOCB entries needed to store @dsds.
72 */
73uint16_t
74qla2x00_calc_iocbs_64(uint16_t dsds)
75{
76 uint16_t iocbs;
77
78 iocbs = 1;
79 if (dsds > 2) {
80 iocbs += (dsds - 2) / 5;
81 if ((dsds - 2) % 5)
82 iocbs++;
83 }
84 return (iocbs);
85}
86
87/**
88 * qla2x00_prep_cont_type0_iocb() - Initialize a Continuation Type 0 IOCB.
Bart Van Assche2db62282018-01-23 16:33:51 -080089 * @vha: HA context
Linus Torvalds1da177e2005-04-16 15:20:36 -070090 *
91 * Returns a pointer to the Continuation Type 0 IOCB packet.
92 */
93static inline cont_entry_t *
Anirban Chakraborty67c2e932009-04-06 22:33:42 -070094qla2x00_prep_cont_type0_iocb(struct scsi_qla_host *vha)
Linus Torvalds1da177e2005-04-16 15:20:36 -070095{
96 cont_entry_t *cont_pkt;
Anirban Chakraborty67c2e932009-04-06 22:33:42 -070097 struct req_que *req = vha->req;
Linus Torvalds1da177e2005-04-16 15:20:36 -070098 /* Adjust ring index. */
Anirban Chakrabortye315cd22008-11-06 10:40:51 -080099 req->ring_index++;
100 if (req->ring_index == req->length) {
101 req->ring_index = 0;
102 req->ring_ptr = req->ring;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103 } else {
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800104 req->ring_ptr++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700105 }
106
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800107 cont_pkt = (cont_entry_t *)req->ring_ptr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108
109 /* Load packet defaults. */
Bart Van Asschead950362015-07-09 07:24:08 -0700110 *((uint32_t *)(&cont_pkt->entry_type)) = cpu_to_le32(CONTINUE_TYPE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111
112 return (cont_pkt);
113}
114
115/**
116 * qla2x00_prep_cont_type1_iocb() - Initialize a Continuation Type 1 IOCB.
Bart Van Assche2db62282018-01-23 16:33:51 -0800117 * @vha: HA context
118 * @req: request queue
Linus Torvalds1da177e2005-04-16 15:20:36 -0700119 *
120 * Returns a pointer to the continuation type 1 IOCB packet.
121 */
122static inline cont_a64_entry_t *
Giridhar Malavali0d2aa382011-11-18 09:02:21 -0800123qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha, struct req_que *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700124{
125 cont_a64_entry_t *cont_pkt;
126
127 /* Adjust ring index. */
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800128 req->ring_index++;
129 if (req->ring_index == req->length) {
130 req->ring_index = 0;
131 req->ring_ptr = req->ring;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700132 } else {
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800133 req->ring_ptr++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700134 }
135
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800136 cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700137
138 /* Load packet defaults. */
Giridhar Malavali8ae6d9c2013-03-28 08:21:23 -0400139 *((uint32_t *)(&cont_pkt->entry_type)) = IS_QLAFX00(vha->hw) ?
Bart Van Asschead950362015-07-09 07:24:08 -0700140 cpu_to_le32(CONTINUE_A64_TYPE_FX00) :
141 cpu_to_le32(CONTINUE_A64_TYPE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700142
143 return (cont_pkt);
144}
145
Michael Hernandezd7459522016-12-12 14:40:07 -0800146inline int
Arun Easibad75002010-05-04 15:01:30 -0700147qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts)
148{
Giridhar Malavali9ba56b92012-02-09 11:15:36 -0800149 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
150 uint8_t guard = scsi_host_get_guard(cmd->device->host);
Arun Easibad75002010-05-04 15:01:30 -0700151
Arun Easibad75002010-05-04 15:01:30 -0700152 /* We always use DIFF Bundling for best performance */
153 *fw_prot_opts = 0;
154
155 /* Translate SCSI opcode to a protection opcode */
Giridhar Malavali9ba56b92012-02-09 11:15:36 -0800156 switch (scsi_get_prot_op(cmd)) {
Arun Easibad75002010-05-04 15:01:30 -0700157 case SCSI_PROT_READ_STRIP:
158 *fw_prot_opts |= PO_MODE_DIF_REMOVE;
159 break;
160 case SCSI_PROT_WRITE_INSERT:
161 *fw_prot_opts |= PO_MODE_DIF_INSERT;
162 break;
163 case SCSI_PROT_READ_INSERT:
164 *fw_prot_opts |= PO_MODE_DIF_INSERT;
165 break;
166 case SCSI_PROT_WRITE_STRIP:
167 *fw_prot_opts |= PO_MODE_DIF_REMOVE;
168 break;
169 case SCSI_PROT_READ_PASS:
Arun Easibad75002010-05-04 15:01:30 -0700170 case SCSI_PROT_WRITE_PASS:
Arun Easi9e522cd2012-08-22 14:21:31 -0400171 if (guard & SHOST_DIX_GUARD_IP)
172 *fw_prot_opts |= PO_MODE_DIF_TCP_CKSUM;
173 else
174 *fw_prot_opts |= PO_MODE_DIF_PASS;
Arun Easibad75002010-05-04 15:01:30 -0700175 break;
176 default: /* Normal Request */
177 *fw_prot_opts |= PO_MODE_DIF_PASS;
178 break;
179 }
180
Giridhar Malavali9ba56b92012-02-09 11:15:36 -0800181 return scsi_prot_sg_count(cmd);
Arun Easibad75002010-05-04 15:01:30 -0700182}
183
184/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700185 * qla2x00_build_scsi_iocbs_32() - Build IOCB command utilizing 32bit
186 * capable IOCB types.
187 *
188 * @sp: SRB command to process
189 * @cmd_pkt: Command type 2 IOCB
190 * @tot_dsds: Total number of segments to transfer
191 */
192void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
193 uint16_t tot_dsds)
194{
195 uint16_t avail_dsds;
196 uint32_t *cur_dsd;
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800197 scsi_qla_host_t *vha;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198 struct scsi_cmnd *cmd;
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900199 struct scatterlist *sg;
200 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700201
Giridhar Malavali9ba56b92012-02-09 11:15:36 -0800202 cmd = GET_CMD_SP(sp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203
204 /* Update entry type to indicate Command Type 2 IOCB */
205 *((uint32_t *)(&cmd_pkt->entry_type)) =
Bart Van Asschead950362015-07-09 07:24:08 -0700206 cpu_to_le32(COMMAND_TYPE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700207
208 /* No data transfer */
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900209 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
Bart Van Asschead950362015-07-09 07:24:08 -0700210 cmd_pkt->byte_count = cpu_to_le32(0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700211 return;
212 }
213
Joe Carnuccio25ff6af2017-01-19 22:28:04 -0800214 vha = sp->vha;
Harish Zunjarrao49fd4622008-09-11 21:22:47 -0700215 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700216
217 /* Three DSDs are available in the Command Type 2 IOCB */
218 avail_dsds = 3;
219 cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
220
221 /* Load data segments */
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900222 scsi_for_each_sg(cmd, sg, tot_dsds, i) {
223 cont_entry_t *cont_pkt;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700224
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900225 /* Allocate additional continuation packets? */
226 if (avail_dsds == 0) {
227 /*
228 * Seven DSDs are available in the Continuation
229 * Type 0 IOCB.
230 */
Anirban Chakraborty67c2e932009-04-06 22:33:42 -0700231 cont_pkt = qla2x00_prep_cont_type0_iocb(vha);
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900232 cur_dsd = (uint32_t *)&cont_pkt->dseg_0_address;
233 avail_dsds = 7;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700234 }
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900235
236 *cur_dsd++ = cpu_to_le32(sg_dma_address(sg));
237 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
238 avail_dsds--;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700239 }
240}
241
242/**
243 * qla2x00_build_scsi_iocbs_64() - Build IOCB command utilizing 64bit
244 * capable IOCB types.
245 *
246 * @sp: SRB command to process
247 * @cmd_pkt: Command type 3 IOCB
248 * @tot_dsds: Total number of segments to transfer
249 */
250void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
251 uint16_t tot_dsds)
252{
253 uint16_t avail_dsds;
254 uint32_t *cur_dsd;
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800255 scsi_qla_host_t *vha;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256 struct scsi_cmnd *cmd;
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900257 struct scatterlist *sg;
258 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700259
Giridhar Malavali9ba56b92012-02-09 11:15:36 -0800260 cmd = GET_CMD_SP(sp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700261
262 /* Update entry type to indicate Command Type 3 IOCB */
Bart Van Asschead950362015-07-09 07:24:08 -0700263 *((uint32_t *)(&cmd_pkt->entry_type)) = cpu_to_le32(COMMAND_A64_TYPE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700264
265 /* No data transfer */
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900266 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
Bart Van Asschead950362015-07-09 07:24:08 -0700267 cmd_pkt->byte_count = cpu_to_le32(0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700268 return;
269 }
270
Joe Carnuccio25ff6af2017-01-19 22:28:04 -0800271 vha = sp->vha;
Harish Zunjarrao49fd4622008-09-11 21:22:47 -0700272 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700273
274 /* Two DSDs are available in the Command Type 3 IOCB */
275 avail_dsds = 2;
276 cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
277
278 /* Load data segments */
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900279 scsi_for_each_sg(cmd, sg, tot_dsds, i) {
280 dma_addr_t sle_dma;
281 cont_a64_entry_t *cont_pkt;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700282
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900283 /* Allocate additional continuation packets? */
284 if (avail_dsds == 0) {
285 /*
286 * Five DSDs are available in the Continuation
287 * Type 1 IOCB.
288 */
Giridhar Malavali0d2aa382011-11-18 09:02:21 -0800289 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900290 cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
291 avail_dsds = 5;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700292 }
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900293
294 sle_dma = sg_dma_address(sg);
295 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
296 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
297 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
298 avail_dsds--;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700299 }
300}
301
302/**
303 * qla2x00_start_scsi() - Send a SCSI command to the ISP
304 * @sp: command to send to the ISP
305 *
Bjorn Helgaascc3ef7b2008-09-11 21:22:51 -0700306 * Returns non-zero if a failure occurred, else zero.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700307 */
308int
309qla2x00_start_scsi(srb_t *sp)
310{
Bart Van Assche52c82822015-07-09 07:23:26 -0700311 int nseg;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700312 unsigned long flags;
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800313 scsi_qla_host_t *vha;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700314 struct scsi_cmnd *cmd;
315 uint32_t *clr_ptr;
316 uint32_t index;
317 uint32_t handle;
318 cmd_entry_t *cmd_pkt;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700319 uint16_t cnt;
320 uint16_t req_cnt;
321 uint16_t tot_dsds;
Andrew Vasquez3d716442005-07-06 10:30:26 -0700322 struct device_reg_2xxx __iomem *reg;
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800323 struct qla_hw_data *ha;
324 struct req_que *req;
Anirban Chakraborty73208df2008-12-09 16:45:39 -0800325 struct rsp_que *rsp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700326
327 /* Setup device pointers. */
Joe Carnuccio25ff6af2017-01-19 22:28:04 -0800328 vha = sp->vha;
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800329 ha = vha->hw;
Andrew Vasquez3d716442005-07-06 10:30:26 -0700330 reg = &ha->iobase->isp;
Giridhar Malavali9ba56b92012-02-09 11:15:36 -0800331 cmd = GET_CMD_SP(sp);
Anirban Chakraborty73208df2008-12-09 16:45:39 -0800332 req = ha->req_q_map[0];
333 rsp = ha->rsp_q_map[0];
83021922005-04-17 15:10:41 -0500334 /* So we know we haven't pci_map'ed anything yet */
335 tot_dsds = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700336
337 /* Send marker if required */
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800338 if (vha->marker_needed != 0) {
Saurav Kashyap7c3df132011-07-14 12:00:13 -0700339 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
340 QLA_SUCCESS) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700341 return (QLA_FUNCTION_FAILED);
Saurav Kashyap7c3df132011-07-14 12:00:13 -0700342 }
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800343 vha->marker_needed = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700344 }
345
346 /* Acquire ring specific lock */
Andrew Vasquezc9c5ced2008-07-24 08:31:49 -0700347 spin_lock_irqsave(&ha->hardware_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700348
349 /* Check for room in outstanding command list. */
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800350 handle = req->current_outstanding_cmd;
Chad Dupuis8d93f552013-01-30 03:34:37 -0500351 for (index = 1; index < req->num_outstanding_cmds; index++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700352 handle++;
Chad Dupuis8d93f552013-01-30 03:34:37 -0500353 if (handle == req->num_outstanding_cmds)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700354 handle = 1;
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800355 if (!req->outstanding_cmds[handle])
Linus Torvalds1da177e2005-04-16 15:20:36 -0700356 break;
357 }
Chad Dupuis8d93f552013-01-30 03:34:37 -0500358 if (index == req->num_outstanding_cmds)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700359 goto queuing_error;
360
83021922005-04-17 15:10:41 -0500361 /* Map the sg table so we have an accurate count of sg entries needed */
Seokmann Ju2c3dfe32007-07-05 13:16:51 -0700362 if (scsi_sg_count(cmd)) {
363 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
364 scsi_sg_count(cmd), cmd->sc_data_direction);
365 if (unlikely(!nseg))
366 goto queuing_error;
367 } else
368 nseg = 0;
369
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900370 tot_dsds = nseg;
83021922005-04-17 15:10:41 -0500371
Linus Torvalds1da177e2005-04-16 15:20:36 -0700372 /* Calculate the number of request entries needed. */
Andrew Vasquezfd34f552007-07-19 15:06:00 -0700373 req_cnt = ha->isp_ops->calc_req_entries(tot_dsds);
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800374 if (req->cnt < (req_cnt + 2)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700375 cnt = RD_REG_WORD_RELAXED(ISP_REQ_Q_OUT(ha, reg));
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800376 if (req->ring_index < cnt)
377 req->cnt = cnt - req->ring_index;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700378 else
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800379 req->cnt = req->length -
380 (req->ring_index - cnt);
Chetan Lokea6eb3c92012-05-15 14:34:09 -0400381 /* If still no head room then bail out */
382 if (req->cnt < (req_cnt + 2))
383 goto queuing_error;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700384 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700385
Linus Torvalds1da177e2005-04-16 15:20:36 -0700386 /* Build command packet */
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800387 req->current_outstanding_cmd = handle;
388 req->outstanding_cmds[handle] = sp;
Andrew Vasquezcf53b062009-08-20 11:06:04 -0700389 sp->handle = handle;
Giridhar Malavali9ba56b92012-02-09 11:15:36 -0800390 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800391 req->cnt -= req_cnt;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700392
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800393 cmd_pkt = (cmd_entry_t *)req->ring_ptr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700394 cmd_pkt->handle = handle;
395 /* Zero out remaining portion of packet. */
396 clr_ptr = (uint32_t *)cmd_pkt + 2;
397 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
398 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
399
bdf79622005-04-17 15:06:53 -0500400 /* Set target ID and LUN number*/
401 SET_TARGET_ID(ha, cmd_pkt->target, sp->fcport->loop_id);
Giridhar Malavali9ba56b92012-02-09 11:15:36 -0800402 cmd_pkt->lun = cpu_to_le16(cmd->device->lun);
Bart Van Asschead950362015-07-09 07:24:08 -0700403 cmd_pkt->control_flags = cpu_to_le16(CF_SIMPLE_TAG);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700404
Linus Torvalds1da177e2005-04-16 15:20:36 -0700405 /* Load SCSI command packet. */
406 memcpy(cmd_pkt->scsi_cdb, cmd->cmnd, cmd->cmd_len);
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900407 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700408
409 /* Build IOCB segments */
Andrew Vasquezfd34f552007-07-19 15:06:00 -0700410 ha->isp_ops->build_iocbs(sp, cmd_pkt, tot_dsds);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700411
412 /* Set total data segment count. */
413 cmd_pkt->entry_count = (uint8_t)req_cnt;
414 wmb();
415
416 /* Adjust ring index. */
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800417 req->ring_index++;
418 if (req->ring_index == req->length) {
419 req->ring_index = 0;
420 req->ring_ptr = req->ring;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700421 } else
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800422 req->ring_ptr++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700423
Linus Torvalds1da177e2005-04-16 15:20:36 -0700424 sp->flags |= SRB_DMA_VALID;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700425
426 /* Set chip new ring index. */
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800427 WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), req->ring_index);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700428 RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, reg)); /* PCI Posting. */
429
Andrew Vasquez4fdfefe2005-10-27 11:09:48 -0700430 /* Manage unprocessed RIO/ZIO commands in response queue. */
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800431 if (vha->flags.process_response_queue &&
Anirban Chakraborty73208df2008-12-09 16:45:39 -0800432 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
433 qla2x00_process_response_queue(rsp);
Andrew Vasquez4fdfefe2005-10-27 11:09:48 -0700434
Andrew Vasquezc9c5ced2008-07-24 08:31:49 -0700435 spin_unlock_irqrestore(&ha->hardware_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700436 return (QLA_SUCCESS);
437
438queuing_error:
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900439 if (tot_dsds)
440 scsi_dma_unmap(cmd);
441
Andrew Vasquezc9c5ced2008-07-24 08:31:49 -0700442 spin_unlock_irqrestore(&ha->hardware_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700443
444 return (QLA_FUNCTION_FAILED);
445}
446
447/**
Giridhar Malavali5162cf02011-11-18 09:03:18 -0800448 * qla2x00_start_iocbs() - Execute the IOCB command
Bart Van Assche2db62282018-01-23 16:33:51 -0800449 * @vha: HA context
450 * @req: request queue
Giridhar Malavali5162cf02011-11-18 09:03:18 -0800451 */
Nicholas Bellinger2d70c102012-05-15 14:34:28 -0400452void
Giridhar Malavali5162cf02011-11-18 09:03:18 -0800453qla2x00_start_iocbs(struct scsi_qla_host *vha, struct req_que *req)
454{
455 struct qla_hw_data *ha = vha->hw;
Bart Van Assche118e2ef2015-07-09 07:24:27 -0700456 device_reg_t *reg = ISP_QUE_REG(ha, req->id);
Giridhar Malavali5162cf02011-11-18 09:03:18 -0800457
Atul Deshmukh7ec0eff2013-08-27 01:37:28 -0400458 if (IS_P3P_TYPE(ha)) {
Giridhar Malavali5162cf02011-11-18 09:03:18 -0800459 qla82xx_start_iocbs(vha);
460 } else {
461 /* Adjust ring index. */
462 req->ring_index++;
463 if (req->ring_index == req->length) {
464 req->ring_index = 0;
465 req->ring_ptr = req->ring;
466 } else
467 req->ring_ptr++;
468
469 /* Set chip new ring index. */
Quinn Trand63b3282017-06-02 09:12:07 -0700470 if (ha->mqenable || IS_QLA27XX(ha)) {
471 WRT_REG_DWORD(req->req_q_in, req->ring_index);
472 } else if (IS_QLA83XX(ha)) {
Giridhar Malavali6246b8a2012-02-09 11:15:34 -0800473 WRT_REG_DWORD(req->req_q_in, req->ring_index);
Arun Easi98878a12012-02-09 11:15:59 -0800474 RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
Giridhar Malavali8ae6d9c2013-03-28 08:21:23 -0400475 } else if (IS_QLAFX00(ha)) {
476 WRT_REG_DWORD(&reg->ispfx00.req_q_in, req->ring_index);
477 RD_REG_DWORD_RELAXED(&reg->ispfx00.req_q_in);
478 QLAFX00_SET_HST_INTR(ha, ha->rqstq_intr_code);
Giridhar Malavali5162cf02011-11-18 09:03:18 -0800479 } else if (IS_FWI2_CAPABLE(ha)) {
480 WRT_REG_DWORD(&reg->isp24.req_q_in, req->ring_index);
481 RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
482 } else {
483 WRT_REG_WORD(ISP_REQ_Q_IN(ha, &reg->isp),
484 req->ring_index);
485 RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, &reg->isp));
486 }
487 }
488}
489
490/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700491 * qla2x00_marker() - Send a marker IOCB to the firmware.
Bart Van Assche2db62282018-01-23 16:33:51 -0800492 * @vha: HA context
493 * @req: request queue
494 * @rsp: response queue
Linus Torvalds1da177e2005-04-16 15:20:36 -0700495 * @loop_id: loop ID
496 * @lun: LUN
497 * @type: marker modifier
498 *
499 * Can be called from both normal and interrupt context.
500 *
Bjorn Helgaascc3ef7b2008-09-11 21:22:51 -0700501 * Returns non-zero if a failure occurred, else zero.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700502 */
Andrew Vasquez3dbe7562010-07-23 15:28:37 +0500503static int
Anirban Chakraborty73208df2008-12-09 16:45:39 -0800504__qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
505 struct rsp_que *rsp, uint16_t loop_id,
Hannes Reinecke9cb78c12014-06-25 15:27:36 +0200506 uint64_t lun, uint8_t type)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700507{
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700508 mrk_entry_t *mrk;
Giridhar Malavali8ae6d9c2013-03-28 08:21:23 -0400509 struct mrk_entry_24xx *mrk24 = NULL;
Giridhar Malavali8ae6d9c2013-03-28 08:21:23 -0400510
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800511 struct qla_hw_data *ha = vha->hw;
512 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700513
Giridhar Malavali99b82122011-11-18 09:03:17 -0800514 req = ha->req_q_map[0];
Saurav Kashyapfa492632012-11-21 02:40:29 -0500515 mrk = (mrk_entry_t *)qla2x00_alloc_iocbs(vha, NULL);
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700516 if (mrk == NULL) {
Saurav Kashyap7c3df132011-07-14 12:00:13 -0700517 ql_log(ql_log_warn, base_vha, 0x3026,
518 "Failed to allocate Marker IOCB.\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700519
520 return (QLA_FUNCTION_FAILED);
521 }
522
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700523 mrk->entry_type = MARKER_TYPE;
524 mrk->modifier = type;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700525 if (type != MK_SYNC_ALL) {
Armen Baloyanbfd73342014-02-26 04:15:07 -0500526 if (IS_FWI2_CAPABLE(ha)) {
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700527 mrk24 = (struct mrk_entry_24xx *) mrk;
528 mrk24->nport_handle = cpu_to_le16(loop_id);
Hannes Reinecke9cb78c12014-06-25 15:27:36 +0200529 int_to_scsilun(lun, (struct scsi_lun *)&mrk24->lun);
Shyam Sundarb797b6d2006-08-01 13:48:13 -0700530 host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun));
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800531 mrk24->vp_index = vha->vp_idx;
Anirban Chakraborty2afa19a2009-04-06 22:33:40 -0700532 mrk24->handle = MAKE_HANDLE(req->id, mrk24->handle);
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700533 } else {
534 SET_TARGET_ID(ha, mrk->target, loop_id);
Hannes Reinecke9cb78c12014-06-25 15:27:36 +0200535 mrk->lun = cpu_to_le16((uint16_t)lun);
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700536 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700537 }
538 wmb();
539
Giridhar Malavali5162cf02011-11-18 09:03:18 -0800540 qla2x00_start_iocbs(vha, req);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700541
542 return (QLA_SUCCESS);
543}
544
Andrew Vasquezfa2a1ce2005-07-06 10:32:07 -0700545int
Anirban Chakraborty73208df2008-12-09 16:45:39 -0800546qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
Hannes Reinecke9cb78c12014-06-25 15:27:36 +0200547 struct rsp_que *rsp, uint16_t loop_id, uint64_t lun,
Anirban Chakraborty73208df2008-12-09 16:45:39 -0800548 uint8_t type)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700549{
550 int ret;
551 unsigned long flags = 0;
552
Anirban Chakraborty73208df2008-12-09 16:45:39 -0800553 spin_lock_irqsave(&vha->hw->hardware_lock, flags);
554 ret = __qla2x00_marker(vha, req, rsp, loop_id, lun, type);
555 spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700556
557 return (ret);
558}
559
Nicholas Bellinger2d70c102012-05-15 14:34:28 -0400560/*
561 * qla2x00_issue_marker
562 *
563 * Issue marker
564 * Caller CAN have hardware lock held as specified by ha_locked parameter.
565 * Might release it, then reaquire.
566 */
567int qla2x00_issue_marker(scsi_qla_host_t *vha, int ha_locked)
568{
569 if (ha_locked) {
570 if (__qla2x00_marker(vha, vha->req, vha->req->rsp, 0, 0,
571 MK_SYNC_ALL) != QLA_SUCCESS)
572 return QLA_FUNCTION_FAILED;
573 } else {
574 if (qla2x00_marker(vha, vha->req, vha->req->rsp, 0, 0,
575 MK_SYNC_ALL) != QLA_SUCCESS)
576 return QLA_FUNCTION_FAILED;
577 }
578 vha->marker_needed = 0;
579
580 return QLA_SUCCESS;
581}
582
Giridhar Malavali5162cf02011-11-18 09:03:18 -0800583static inline int
584qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
585 uint16_t tot_dsds)
586{
587 uint32_t *cur_dsd = NULL;
588 scsi_qla_host_t *vha;
589 struct qla_hw_data *ha;
590 struct scsi_cmnd *cmd;
591 struct scatterlist *cur_seg;
592 uint32_t *dsd_seg;
593 void *next_dsd;
594 uint8_t avail_dsds;
595 uint8_t first_iocb = 1;
596 uint32_t dsd_list_len;
597 struct dsd_dma *dsd_ptr;
598 struct ct6_dsd *ctx;
599
Giridhar Malavali9ba56b92012-02-09 11:15:36 -0800600 cmd = GET_CMD_SP(sp);
Giridhar Malavali5162cf02011-11-18 09:03:18 -0800601
602 /* Update entry type to indicate Command Type 3 IOCB */
Bart Van Asschead950362015-07-09 07:24:08 -0700603 *((uint32_t *)(&cmd_pkt->entry_type)) = cpu_to_le32(COMMAND_TYPE_6);
Giridhar Malavali5162cf02011-11-18 09:03:18 -0800604
605 /* No data transfer */
606 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
Bart Van Asschead950362015-07-09 07:24:08 -0700607 cmd_pkt->byte_count = cpu_to_le32(0);
Giridhar Malavali5162cf02011-11-18 09:03:18 -0800608 return 0;
609 }
610
Joe Carnuccio25ff6af2017-01-19 22:28:04 -0800611 vha = sp->vha;
Giridhar Malavali5162cf02011-11-18 09:03:18 -0800612 ha = vha->hw;
613
614 /* Set transfer direction */
615 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
Bart Van Asschead950362015-07-09 07:24:08 -0700616 cmd_pkt->control_flags = cpu_to_le16(CF_WRITE_DATA);
Saurav Kashyap2be21fa2012-05-15 14:34:16 -0400617 vha->qla_stats.output_bytes += scsi_bufflen(cmd);
Joe Carnucciofabbb8d2013-08-27 01:37:40 -0400618 vha->qla_stats.output_requests++;
Giridhar Malavali5162cf02011-11-18 09:03:18 -0800619 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
Bart Van Asschead950362015-07-09 07:24:08 -0700620 cmd_pkt->control_flags = cpu_to_le16(CF_READ_DATA);
Saurav Kashyap2be21fa2012-05-15 14:34:16 -0400621 vha->qla_stats.input_bytes += scsi_bufflen(cmd);
Joe Carnucciofabbb8d2013-08-27 01:37:40 -0400622 vha->qla_stats.input_requests++;
Giridhar Malavali5162cf02011-11-18 09:03:18 -0800623 }
624
625 cur_seg = scsi_sglist(cmd);
Giridhar Malavali9ba56b92012-02-09 11:15:36 -0800626 ctx = GET_CMD_CTX_SP(sp);
Giridhar Malavali5162cf02011-11-18 09:03:18 -0800627
628 while (tot_dsds) {
629 avail_dsds = (tot_dsds > QLA_DSDS_PER_IOCB) ?
630 QLA_DSDS_PER_IOCB : tot_dsds;
631 tot_dsds -= avail_dsds;
632 dsd_list_len = (avail_dsds + 1) * QLA_DSD_SIZE;
633
634 dsd_ptr = list_first_entry(&ha->gbl_dsd_list,
635 struct dsd_dma, list);
636 next_dsd = dsd_ptr->dsd_addr;
637 list_del(&dsd_ptr->list);
638 ha->gbl_dsd_avail--;
639 list_add_tail(&dsd_ptr->list, &ctx->dsd_list);
640 ctx->dsd_use_cnt++;
641 ha->gbl_dsd_inuse++;
642
643 if (first_iocb) {
644 first_iocb = 0;
645 dsd_seg = (uint32_t *)&cmd_pkt->fcp_data_dseg_address;
646 *dsd_seg++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
647 *dsd_seg++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
648 cmd_pkt->fcp_data_dseg_len = cpu_to_le32(dsd_list_len);
649 } else {
650 *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
651 *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
652 *cur_dsd++ = cpu_to_le32(dsd_list_len);
653 }
654 cur_dsd = (uint32_t *)next_dsd;
655 while (avail_dsds) {
656 dma_addr_t sle_dma;
657
658 sle_dma = sg_dma_address(cur_seg);
659 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
660 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
661 *cur_dsd++ = cpu_to_le32(sg_dma_len(cur_seg));
662 cur_seg = sg_next(cur_seg);
663 avail_dsds--;
664 }
665 }
666
667 /* Null termination */
668 *cur_dsd++ = 0;
669 *cur_dsd++ = 0;
670 *cur_dsd++ = 0;
671 cmd_pkt->control_flags |= CF_DATA_SEG_DESCR_ENABLE;
672 return 0;
673}
674
675/*
676 * qla24xx_calc_dsd_lists() - Determine number of DSD list required
677 * for Command Type 6.
678 *
679 * @dsds: number of data segment decriptors needed
680 *
681 * Returns the number of dsd list needed to store @dsds.
682 */
Bart Van Assche2374dd22015-07-09 07:23:02 -0700683static inline uint16_t
Giridhar Malavali5162cf02011-11-18 09:03:18 -0800684qla24xx_calc_dsd_lists(uint16_t dsds)
685{
686 uint16_t dsd_lists = 0;
687
688 dsd_lists = (dsds/QLA_DSDS_PER_IOCB);
689 if (dsds % QLA_DSDS_PER_IOCB)
690 dsd_lists++;
691 return dsd_lists;
692}
693
694
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700695/**
696 * qla24xx_build_scsi_iocbs() - Build IOCB command utilizing Command Type 7
697 * IOCB types.
698 *
699 * @sp: SRB command to process
700 * @cmd_pkt: Command type 3 IOCB
701 * @tot_dsds: Total number of segments to transfer
Michael Hernandezd7459522016-12-12 14:40:07 -0800702 * @req: pointer to request queue
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700703 */
Michael Hernandezd7459522016-12-12 14:40:07 -0800704inline void
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700705qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
Michael Hernandezd7459522016-12-12 14:40:07 -0800706 uint16_t tot_dsds, struct req_que *req)
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700707{
708 uint16_t avail_dsds;
709 uint32_t *cur_dsd;
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800710 scsi_qla_host_t *vha;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700711 struct scsi_cmnd *cmd;
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900712 struct scatterlist *sg;
713 int i;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700714
Giridhar Malavali9ba56b92012-02-09 11:15:36 -0800715 cmd = GET_CMD_SP(sp);
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700716
717 /* Update entry type to indicate Command Type 3 IOCB */
Bart Van Asschead950362015-07-09 07:24:08 -0700718 *((uint32_t *)(&cmd_pkt->entry_type)) = cpu_to_le32(COMMAND_TYPE_7);
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700719
720 /* No data transfer */
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900721 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
Bart Van Asschead950362015-07-09 07:24:08 -0700722 cmd_pkt->byte_count = cpu_to_le32(0);
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700723 return;
724 }
725
Joe Carnuccio25ff6af2017-01-19 22:28:04 -0800726 vha = sp->vha;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700727
728 /* Set transfer direction */
Harish Zunjarrao49fd4622008-09-11 21:22:47 -0700729 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
Bart Van Asschead950362015-07-09 07:24:08 -0700730 cmd_pkt->task_mgmt_flags = cpu_to_le16(TMF_WRITE_DATA);
Saurav Kashyap2be21fa2012-05-15 14:34:16 -0400731 vha->qla_stats.output_bytes += scsi_bufflen(cmd);
Joe Carnucciofabbb8d2013-08-27 01:37:40 -0400732 vha->qla_stats.output_requests++;
Harish Zunjarrao49fd4622008-09-11 21:22:47 -0700733 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
Bart Van Asschead950362015-07-09 07:24:08 -0700734 cmd_pkt->task_mgmt_flags = cpu_to_le16(TMF_READ_DATA);
Saurav Kashyap2be21fa2012-05-15 14:34:16 -0400735 vha->qla_stats.input_bytes += scsi_bufflen(cmd);
Joe Carnucciofabbb8d2013-08-27 01:37:40 -0400736 vha->qla_stats.input_requests++;
Harish Zunjarrao49fd4622008-09-11 21:22:47 -0700737 }
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700738
739 /* One DSD is available in the Command Type 3 IOCB */
740 avail_dsds = 1;
741 cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
742
743 /* Load data segments */
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700744
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900745 scsi_for_each_sg(cmd, sg, tot_dsds, i) {
746 dma_addr_t sle_dma;
747 cont_a64_entry_t *cont_pkt;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700748
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900749 /* Allocate additional continuation packets? */
750 if (avail_dsds == 0) {
751 /*
752 * Five DSDs are available in the Continuation
753 * Type 1 IOCB.
754 */
Michael Hernandezd7459522016-12-12 14:40:07 -0800755 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, req);
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900756 cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
757 avail_dsds = 5;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700758 }
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900759
760 sle_dma = sg_dma_address(sg);
761 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
762 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
763 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
764 avail_dsds--;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700765 }
766}
767
Arun Easibad75002010-05-04 15:01:30 -0700768struct fw_dif_context {
769 uint32_t ref_tag;
770 uint16_t app_tag;
771 uint8_t ref_tag_mask[4]; /* Validation/Replacement Mask*/
772 uint8_t app_tag_mask[2]; /* Validation/Replacement Mask*/
773};
774
775/*
776 * qla24xx_set_t10dif_tags_from_cmd - Extract Ref and App tags from SCSI command
777 *
778 */
779static inline void
Arun Easie02587d2011-08-16 11:29:23 -0700780qla24xx_set_t10dif_tags(srb_t *sp, struct fw_dif_context *pkt,
Arun Easibad75002010-05-04 15:01:30 -0700781 unsigned int protcnt)
782{
Giridhar Malavali9ba56b92012-02-09 11:15:36 -0800783 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
Arun Easibad75002010-05-04 15:01:30 -0700784
785 switch (scsi_get_prot_type(cmd)) {
Arun Easibad75002010-05-04 15:01:30 -0700786 case SCSI_PROT_DIF_TYPE0:
Arun Easi8cb20492011-08-16 11:29:22 -0700787 /*
788 * No check for ql2xenablehba_err_chk, as it would be an
789 * I/O error if hba tag generation is not done.
790 */
791 pkt->ref_tag = cpu_to_le32((uint32_t)
792 (0xffffffff & scsi_get_lba(cmd)));
Arun Easie02587d2011-08-16 11:29:23 -0700793
794 if (!qla2x00_hba_err_chk_enabled(sp))
795 break;
796
Arun Easi8cb20492011-08-16 11:29:22 -0700797 pkt->ref_tag_mask[0] = 0xff;
798 pkt->ref_tag_mask[1] = 0xff;
799 pkt->ref_tag_mask[2] = 0xff;
800 pkt->ref_tag_mask[3] = 0xff;
Arun Easibad75002010-05-04 15:01:30 -0700801 break;
802
803 /*
804 * For TYPE 2 protection: 16 bit GUARD + 32 bit REF tag has to
805 * match LBA in CDB + N
806 */
807 case SCSI_PROT_DIF_TYPE2:
Bart Van Asschead950362015-07-09 07:24:08 -0700808 pkt->app_tag = cpu_to_le16(0);
Arun Easie02587d2011-08-16 11:29:23 -0700809 pkt->app_tag_mask[0] = 0x0;
810 pkt->app_tag_mask[1] = 0x0;
Arun Easi0c470872010-07-23 15:28:38 +0500811
812 pkt->ref_tag = cpu_to_le32((uint32_t)
813 (0xffffffff & scsi_get_lba(cmd)));
814
Arun Easie02587d2011-08-16 11:29:23 -0700815 if (!qla2x00_hba_err_chk_enabled(sp))
816 break;
817
Arun Easi0c470872010-07-23 15:28:38 +0500818 /* enable ALL bytes of the ref tag */
819 pkt->ref_tag_mask[0] = 0xff;
820 pkt->ref_tag_mask[1] = 0xff;
821 pkt->ref_tag_mask[2] = 0xff;
822 pkt->ref_tag_mask[3] = 0xff;
Arun Easibad75002010-05-04 15:01:30 -0700823 break;
824
825 /* For Type 3 protection: 16 bit GUARD only */
826 case SCSI_PROT_DIF_TYPE3:
827 pkt->ref_tag_mask[0] = pkt->ref_tag_mask[1] =
828 pkt->ref_tag_mask[2] = pkt->ref_tag_mask[3] =
829 0x00;
830 break;
831
832 /*
833 * For TYpe 1 protection: 16 bit GUARD tag, 32 bit REF tag, and
834 * 16 bit app tag.
835 */
836 case SCSI_PROT_DIF_TYPE1:
Arun Easie02587d2011-08-16 11:29:23 -0700837 pkt->ref_tag = cpu_to_le32((uint32_t)
838 (0xffffffff & scsi_get_lba(cmd)));
Bart Van Asschead950362015-07-09 07:24:08 -0700839 pkt->app_tag = cpu_to_le16(0);
Arun Easie02587d2011-08-16 11:29:23 -0700840 pkt->app_tag_mask[0] = 0x0;
841 pkt->app_tag_mask[1] = 0x0;
842
843 if (!qla2x00_hba_err_chk_enabled(sp))
Arun Easibad75002010-05-04 15:01:30 -0700844 break;
845
Arun Easibad75002010-05-04 15:01:30 -0700846 /* enable ALL bytes of the ref tag */
847 pkt->ref_tag_mask[0] = 0xff;
848 pkt->ref_tag_mask[1] = 0xff;
849 pkt->ref_tag_mask[2] = 0xff;
850 pkt->ref_tag_mask[3] = 0xff;
851 break;
852 }
Arun Easibad75002010-05-04 15:01:30 -0700853}
854
Michael Hernandezd7459522016-12-12 14:40:07 -0800855int
Arun Easi8cb20492011-08-16 11:29:22 -0700856qla24xx_get_one_block_sg(uint32_t blk_sz, struct qla2_sgx *sgx,
857 uint32_t *partial)
858{
859 struct scatterlist *sg;
860 uint32_t cumulative_partial, sg_len;
861 dma_addr_t sg_dma_addr;
862
863 if (sgx->num_bytes == sgx->tot_bytes)
864 return 0;
865
866 sg = sgx->cur_sg;
867 cumulative_partial = sgx->tot_partial;
868
869 sg_dma_addr = sg_dma_address(sg);
870 sg_len = sg_dma_len(sg);
871
872 sgx->dma_addr = sg_dma_addr + sgx->bytes_consumed;
873
874 if ((cumulative_partial + (sg_len - sgx->bytes_consumed)) >= blk_sz) {
875 sgx->dma_len = (blk_sz - cumulative_partial);
876 sgx->tot_partial = 0;
877 sgx->num_bytes += blk_sz;
878 *partial = 0;
879 } else {
880 sgx->dma_len = sg_len - sgx->bytes_consumed;
881 sgx->tot_partial += sgx->dma_len;
882 *partial = 1;
883 }
884
885 sgx->bytes_consumed += sgx->dma_len;
886
887 if (sg_len == sgx->bytes_consumed) {
888 sg = sg_next(sg);
889 sgx->num_sg++;
890 sgx->cur_sg = sg;
891 sgx->bytes_consumed = 0;
892 }
893
894 return 1;
895}
896
Quinn Tranf83adb62014-04-11 16:54:43 -0400897int
Arun Easi8cb20492011-08-16 11:29:22 -0700898qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp,
Quinn Tranbe251522017-03-15 09:48:49 -0700899 uint32_t *dsd, uint16_t tot_dsds, struct qla_tc_param *tc)
Arun Easi8cb20492011-08-16 11:29:22 -0700900{
901 void *next_dsd;
902 uint8_t avail_dsds = 0;
903 uint32_t dsd_list_len;
904 struct dsd_dma *dsd_ptr;
905 struct scatterlist *sg_prot;
906 uint32_t *cur_dsd = dsd;
907 uint16_t used_dsds = tot_dsds;
Quinn Tranf83adb62014-04-11 16:54:43 -0400908 uint32_t prot_int; /* protection interval */
Arun Easi8cb20492011-08-16 11:29:22 -0700909 uint32_t partial;
910 struct qla2_sgx sgx;
911 dma_addr_t sle_dma;
912 uint32_t sle_dma_len, tot_prot_dma_len = 0;
Quinn Tranf83adb62014-04-11 16:54:43 -0400913 struct scsi_cmnd *cmd;
Arun Easi8cb20492011-08-16 11:29:22 -0700914
915 memset(&sgx, 0, sizeof(struct qla2_sgx));
Quinn Tranf83adb62014-04-11 16:54:43 -0400916 if (sp) {
Quinn Tranf83adb62014-04-11 16:54:43 -0400917 cmd = GET_CMD_SP(sp);
918 prot_int = cmd->device->sector_size;
Arun Easi8cb20492011-08-16 11:29:22 -0700919
Quinn Tranf83adb62014-04-11 16:54:43 -0400920 sgx.tot_bytes = scsi_bufflen(cmd);
921 sgx.cur_sg = scsi_sglist(cmd);
922 sgx.sp = sp;
923
924 sg_prot = scsi_prot_sglist(cmd);
925 } else if (tc) {
Quinn Tranf83adb62014-04-11 16:54:43 -0400926 prot_int = tc->blk_sz;
927 sgx.tot_bytes = tc->bufflen;
928 sgx.cur_sg = tc->sg;
929 sg_prot = tc->prot_sg;
930 } else {
931 BUG();
932 return 1;
933 }
Arun Easi8cb20492011-08-16 11:29:22 -0700934
935 while (qla24xx_get_one_block_sg(prot_int, &sgx, &partial)) {
936
937 sle_dma = sgx.dma_addr;
938 sle_dma_len = sgx.dma_len;
939alloc_and_fill:
940 /* Allocate additional continuation packets? */
941 if (avail_dsds == 0) {
942 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
943 QLA_DSDS_PER_IOCB : used_dsds;
944 dsd_list_len = (avail_dsds + 1) * 12;
945 used_dsds -= avail_dsds;
946
947 /* allocate tracking DS */
948 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
949 if (!dsd_ptr)
950 return 1;
951
952 /* allocate new list */
953 dsd_ptr->dsd_addr = next_dsd =
954 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
955 &dsd_ptr->dsd_list_dma);
956
957 if (!next_dsd) {
958 /*
959 * Need to cleanup only this dsd_ptr, rest
960 * will be done by sp_free_dma()
961 */
962 kfree(dsd_ptr);
963 return 1;
964 }
965
Quinn Tranf83adb62014-04-11 16:54:43 -0400966 if (sp) {
967 list_add_tail(&dsd_ptr->list,
968 &((struct crc_context *)
969 sp->u.scmd.ctx)->dsd_list);
Arun Easi8cb20492011-08-16 11:29:22 -0700970
Quinn Tranf83adb62014-04-11 16:54:43 -0400971 sp->flags |= SRB_CRC_CTX_DSD_VALID;
972 } else {
973 list_add_tail(&dsd_ptr->list,
974 &(tc->ctx->dsd_list));
Quinn Tranbe251522017-03-15 09:48:49 -0700975 *tc->ctx_dsd_alloced = 1;
Quinn Tranf83adb62014-04-11 16:54:43 -0400976 }
977
Arun Easi8cb20492011-08-16 11:29:22 -0700978
979 /* add new list to cmd iocb or last list */
980 *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
981 *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
982 *cur_dsd++ = dsd_list_len;
983 cur_dsd = (uint32_t *)next_dsd;
984 }
985 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
986 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
987 *cur_dsd++ = cpu_to_le32(sle_dma_len);
988 avail_dsds--;
989
990 if (partial == 0) {
991 /* Got a full protection interval */
992 sle_dma = sg_dma_address(sg_prot) + tot_prot_dma_len;
993 sle_dma_len = 8;
994
995 tot_prot_dma_len += sle_dma_len;
996 if (tot_prot_dma_len == sg_dma_len(sg_prot)) {
997 tot_prot_dma_len = 0;
998 sg_prot = sg_next(sg_prot);
999 }
1000
1001 partial = 1; /* So as to not re-enter this block */
1002 goto alloc_and_fill;
1003 }
1004 }
1005 /* Null termination */
1006 *cur_dsd++ = 0;
1007 *cur_dsd++ = 0;
1008 *cur_dsd++ = 0;
1009 return 0;
1010}
Giridhar Malavali5162cf02011-11-18 09:03:18 -08001011
Quinn Tranf83adb62014-04-11 16:54:43 -04001012int
Arun Easibad75002010-05-04 15:01:30 -07001013qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
Quinn Tranbe251522017-03-15 09:48:49 -07001014 uint16_t tot_dsds, struct qla_tc_param *tc)
Arun Easibad75002010-05-04 15:01:30 -07001015{
1016 void *next_dsd;
1017 uint8_t avail_dsds = 0;
1018 uint32_t dsd_list_len;
1019 struct dsd_dma *dsd_ptr;
Quinn Tranf83adb62014-04-11 16:54:43 -04001020 struct scatterlist *sg, *sgl;
Arun Easibad75002010-05-04 15:01:30 -07001021 uint32_t *cur_dsd = dsd;
1022 int i;
1023 uint16_t used_dsds = tot_dsds;
Quinn Tranf83adb62014-04-11 16:54:43 -04001024 struct scsi_cmnd *cmd;
Arun Easibad75002010-05-04 15:01:30 -07001025
Quinn Tranf83adb62014-04-11 16:54:43 -04001026 if (sp) {
1027 cmd = GET_CMD_SP(sp);
1028 sgl = scsi_sglist(cmd);
Quinn Tranf83adb62014-04-11 16:54:43 -04001029 } else if (tc) {
1030 sgl = tc->sg;
Quinn Tranf83adb62014-04-11 16:54:43 -04001031 } else {
1032 BUG();
1033 return 1;
1034 }
1035
1036
1037 for_each_sg(sgl, sg, tot_dsds, i) {
Arun Easibad75002010-05-04 15:01:30 -07001038 dma_addr_t sle_dma;
1039
1040 /* Allocate additional continuation packets? */
1041 if (avail_dsds == 0) {
1042 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
1043 QLA_DSDS_PER_IOCB : used_dsds;
1044 dsd_list_len = (avail_dsds + 1) * 12;
1045 used_dsds -= avail_dsds;
1046
1047 /* allocate tracking DS */
1048 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
1049 if (!dsd_ptr)
1050 return 1;
1051
1052 /* allocate new list */
1053 dsd_ptr->dsd_addr = next_dsd =
1054 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1055 &dsd_ptr->dsd_list_dma);
1056
1057 if (!next_dsd) {
1058 /*
1059 * Need to cleanup only this dsd_ptr, rest
1060 * will be done by sp_free_dma()
1061 */
1062 kfree(dsd_ptr);
1063 return 1;
1064 }
1065
Quinn Tranf83adb62014-04-11 16:54:43 -04001066 if (sp) {
1067 list_add_tail(&dsd_ptr->list,
1068 &((struct crc_context *)
1069 sp->u.scmd.ctx)->dsd_list);
Arun Easibad75002010-05-04 15:01:30 -07001070
Quinn Tranf83adb62014-04-11 16:54:43 -04001071 sp->flags |= SRB_CRC_CTX_DSD_VALID;
1072 } else {
1073 list_add_tail(&dsd_ptr->list,
1074 &(tc->ctx->dsd_list));
Quinn Tranbe251522017-03-15 09:48:49 -07001075 *tc->ctx_dsd_alloced = 1;
Quinn Tranf83adb62014-04-11 16:54:43 -04001076 }
Arun Easibad75002010-05-04 15:01:30 -07001077
1078 /* add new list to cmd iocb or last list */
1079 *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
1080 *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
1081 *cur_dsd++ = dsd_list_len;
1082 cur_dsd = (uint32_t *)next_dsd;
1083 }
1084 sle_dma = sg_dma_address(sg);
Arun Easi9e522cd2012-08-22 14:21:31 -04001085
Arun Easibad75002010-05-04 15:01:30 -07001086 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
1087 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
1088 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
1089 avail_dsds--;
1090
Arun Easibad75002010-05-04 15:01:30 -07001091 }
1092 /* Null termination */
1093 *cur_dsd++ = 0;
1094 *cur_dsd++ = 0;
1095 *cur_dsd++ = 0;
1096 return 0;
1097}
1098
Quinn Tranf83adb62014-04-11 16:54:43 -04001099int
Arun Easibad75002010-05-04 15:01:30 -07001100qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
Quinn Tranbe251522017-03-15 09:48:49 -07001101 uint32_t *dsd, uint16_t tot_dsds, struct qla_tc_param *tc)
Arun Easibad75002010-05-04 15:01:30 -07001102{
1103 void *next_dsd;
1104 uint8_t avail_dsds = 0;
1105 uint32_t dsd_list_len;
1106 struct dsd_dma *dsd_ptr;
Quinn Tranf83adb62014-04-11 16:54:43 -04001107 struct scatterlist *sg, *sgl;
Arun Easibad75002010-05-04 15:01:30 -07001108 int i;
1109 struct scsi_cmnd *cmd;
1110 uint32_t *cur_dsd = dsd;
Quinn Tranf83adb62014-04-11 16:54:43 -04001111 uint16_t used_dsds = tot_dsds;
1112 struct scsi_qla_host *vha;
Arun Easibad75002010-05-04 15:01:30 -07001113
Quinn Tranf83adb62014-04-11 16:54:43 -04001114 if (sp) {
1115 cmd = GET_CMD_SP(sp);
1116 sgl = scsi_prot_sglist(cmd);
Joe Carnuccio25ff6af2017-01-19 22:28:04 -08001117 vha = sp->vha;
Quinn Tranf83adb62014-04-11 16:54:43 -04001118 } else if (tc) {
1119 vha = tc->vha;
1120 sgl = tc->prot_sg;
1121 } else {
1122 BUG();
1123 return 1;
1124 }
1125
1126 ql_dbg(ql_dbg_tgt, vha, 0xe021,
1127 "%s: enter\n", __func__);
1128
1129 for_each_sg(sgl, sg, tot_dsds, i) {
Arun Easibad75002010-05-04 15:01:30 -07001130 dma_addr_t sle_dma;
1131
1132 /* Allocate additional continuation packets? */
1133 if (avail_dsds == 0) {
1134 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
1135 QLA_DSDS_PER_IOCB : used_dsds;
1136 dsd_list_len = (avail_dsds + 1) * 12;
1137 used_dsds -= avail_dsds;
1138
1139 /* allocate tracking DS */
1140 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
1141 if (!dsd_ptr)
1142 return 1;
1143
1144 /* allocate new list */
1145 dsd_ptr->dsd_addr = next_dsd =
1146 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1147 &dsd_ptr->dsd_list_dma);
1148
1149 if (!next_dsd) {
1150 /*
1151 * Need to cleanup only this dsd_ptr, rest
1152 * will be done by sp_free_dma()
1153 */
1154 kfree(dsd_ptr);
1155 return 1;
1156 }
1157
Quinn Tranf83adb62014-04-11 16:54:43 -04001158 if (sp) {
1159 list_add_tail(&dsd_ptr->list,
1160 &((struct crc_context *)
1161 sp->u.scmd.ctx)->dsd_list);
Arun Easibad75002010-05-04 15:01:30 -07001162
Quinn Tranf83adb62014-04-11 16:54:43 -04001163 sp->flags |= SRB_CRC_CTX_DSD_VALID;
1164 } else {
1165 list_add_tail(&dsd_ptr->list,
1166 &(tc->ctx->dsd_list));
Quinn Tranbe251522017-03-15 09:48:49 -07001167 *tc->ctx_dsd_alloced = 1;
Quinn Tranf83adb62014-04-11 16:54:43 -04001168 }
Arun Easibad75002010-05-04 15:01:30 -07001169
1170 /* add new list to cmd iocb or last list */
1171 *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
1172 *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
1173 *cur_dsd++ = dsd_list_len;
1174 cur_dsd = (uint32_t *)next_dsd;
1175 }
1176 sle_dma = sg_dma_address(sg);
Arun Easi9e522cd2012-08-22 14:21:31 -04001177
Arun Easibad75002010-05-04 15:01:30 -07001178 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
1179 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
1180 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
1181
Arun Easibad75002010-05-04 15:01:30 -07001182 avail_dsds--;
1183 }
1184 /* Null termination */
1185 *cur_dsd++ = 0;
1186 *cur_dsd++ = 0;
1187 *cur_dsd++ = 0;
1188 return 0;
1189}
1190
1191/**
1192 * qla24xx_build_scsi_crc_2_iocbs() - Build IOCB command utilizing Command
1193 * Type 6 IOCB types.
1194 *
1195 * @sp: SRB command to process
1196 * @cmd_pkt: Command type 3 IOCB
1197 * @tot_dsds: Total number of segments to transfer
Bart Van Assche807eb902018-10-18 15:45:41 -07001198 * @tot_prot_dsds: Total number of segments with protection information
1199 * @fw_prot_opts: Protection options to be passed to firmware
Arun Easibad75002010-05-04 15:01:30 -07001200 */
Michael Hernandezd7459522016-12-12 14:40:07 -08001201inline int
Arun Easibad75002010-05-04 15:01:30 -07001202qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
1203 uint16_t tot_dsds, uint16_t tot_prot_dsds, uint16_t fw_prot_opts)
1204{
1205 uint32_t *cur_dsd, *fcp_dl;
1206 scsi_qla_host_t *vha;
1207 struct scsi_cmnd *cmd;
Arun Easi8cb20492011-08-16 11:29:22 -07001208 uint32_t total_bytes = 0;
Arun Easibad75002010-05-04 15:01:30 -07001209 uint32_t data_bytes;
1210 uint32_t dif_bytes;
1211 uint8_t bundling = 1;
1212 uint16_t blk_size;
Arun Easibad75002010-05-04 15:01:30 -07001213 struct crc_context *crc_ctx_pkt = NULL;
1214 struct qla_hw_data *ha;
1215 uint8_t additional_fcpcdb_len;
1216 uint16_t fcp_cmnd_len;
1217 struct fcp_cmnd *fcp_cmnd;
1218 dma_addr_t crc_ctx_dma;
1219
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08001220 cmd = GET_CMD_SP(sp);
Arun Easibad75002010-05-04 15:01:30 -07001221
Arun Easibad75002010-05-04 15:01:30 -07001222 /* Update entry type to indicate Command Type CRC_2 IOCB */
Bart Van Asschead950362015-07-09 07:24:08 -07001223 *((uint32_t *)(&cmd_pkt->entry_type)) = cpu_to_le32(COMMAND_TYPE_CRC_2);
Arun Easibad75002010-05-04 15:01:30 -07001224
Joe Carnuccio25ff6af2017-01-19 22:28:04 -08001225 vha = sp->vha;
Arun Easibad75002010-05-04 15:01:30 -07001226 ha = vha->hw;
1227
Saurav Kashyap7c3df132011-07-14 12:00:13 -07001228 /* No data transfer */
1229 data_bytes = scsi_bufflen(cmd);
1230 if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
Bart Van Asschead950362015-07-09 07:24:08 -07001231 cmd_pkt->byte_count = cpu_to_le32(0);
Saurav Kashyap7c3df132011-07-14 12:00:13 -07001232 return QLA_SUCCESS;
1233 }
Arun Easibad75002010-05-04 15:01:30 -07001234
Joe Carnuccio25ff6af2017-01-19 22:28:04 -08001235 cmd_pkt->vp_index = sp->vha->vp_idx;
Arun Easibad75002010-05-04 15:01:30 -07001236
1237 /* Set transfer direction */
1238 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
1239 cmd_pkt->control_flags =
Bart Van Asschead950362015-07-09 07:24:08 -07001240 cpu_to_le16(CF_WRITE_DATA);
Arun Easibad75002010-05-04 15:01:30 -07001241 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
1242 cmd_pkt->control_flags =
Bart Van Asschead950362015-07-09 07:24:08 -07001243 cpu_to_le16(CF_READ_DATA);
Arun Easibad75002010-05-04 15:01:30 -07001244 }
1245
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08001246 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1247 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP) ||
1248 (scsi_get_prot_op(cmd) == SCSI_PROT_READ_STRIP) ||
1249 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_INSERT))
Arun Easibad75002010-05-04 15:01:30 -07001250 bundling = 0;
1251
1252 /* Allocate CRC context from global pool */
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08001253 crc_ctx_pkt = sp->u.scmd.ctx =
Souptick Joarder501017f2018-02-15 01:40:38 +05301254 dma_pool_zalloc(ha->dl_dma_pool, GFP_ATOMIC, &crc_ctx_dma);
Arun Easibad75002010-05-04 15:01:30 -07001255
1256 if (!crc_ctx_pkt)
1257 goto crc_queuing_error;
1258
Arun Easibad75002010-05-04 15:01:30 -07001259 crc_ctx_pkt->crc_ctx_dma = crc_ctx_dma;
1260
1261 sp->flags |= SRB_CRC_CTX_DMA_VALID;
1262
1263 /* Set handle */
1264 crc_ctx_pkt->handle = cmd_pkt->handle;
1265
1266 INIT_LIST_HEAD(&crc_ctx_pkt->dsd_list);
1267
Arun Easie02587d2011-08-16 11:29:23 -07001268 qla24xx_set_t10dif_tags(sp, (struct fw_dif_context *)
Arun Easibad75002010-05-04 15:01:30 -07001269 &crc_ctx_pkt->ref_tag, tot_prot_dsds);
1270
1271 cmd_pkt->crc_context_address[0] = cpu_to_le32(LSD(crc_ctx_dma));
1272 cmd_pkt->crc_context_address[1] = cpu_to_le32(MSD(crc_ctx_dma));
1273 cmd_pkt->crc_context_len = CRC_CONTEXT_LEN_FW;
1274
1275 /* Determine SCSI command length -- align to 4 byte boundary */
1276 if (cmd->cmd_len > 16) {
Arun Easibad75002010-05-04 15:01:30 -07001277 additional_fcpcdb_len = cmd->cmd_len - 16;
1278 if ((cmd->cmd_len % 4) != 0) {
1279 /* SCSI cmd > 16 bytes must be multiple of 4 */
1280 goto crc_queuing_error;
1281 }
1282 fcp_cmnd_len = 12 + cmd->cmd_len + 4;
1283 } else {
1284 additional_fcpcdb_len = 0;
1285 fcp_cmnd_len = 12 + 16 + 4;
1286 }
1287
1288 fcp_cmnd = &crc_ctx_pkt->fcp_cmnd;
1289
1290 fcp_cmnd->additional_cdb_len = additional_fcpcdb_len;
1291 if (cmd->sc_data_direction == DMA_TO_DEVICE)
1292 fcp_cmnd->additional_cdb_len |= 1;
1293 else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
1294 fcp_cmnd->additional_cdb_len |= 2;
1295
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08001296 int_to_scsilun(cmd->device->lun, &fcp_cmnd->lun);
Arun Easibad75002010-05-04 15:01:30 -07001297 memcpy(fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
1298 cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(fcp_cmnd_len);
1299 cmd_pkt->fcp_cmnd_dseg_address[0] = cpu_to_le32(
1300 LSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF));
1301 cmd_pkt->fcp_cmnd_dseg_address[1] = cpu_to_le32(
1302 MSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF));
Uwe Kleine-König65155b32010-06-11 12:17:01 +02001303 fcp_cmnd->task_management = 0;
Christoph Hellwig50668632014-10-30 14:30:06 +01001304 fcp_cmnd->task_attribute = TSK_SIMPLE;
Andrew Vasquezff2fc422011-02-23 15:27:15 -08001305
Arun Easibad75002010-05-04 15:01:30 -07001306 cmd_pkt->fcp_rsp_dseg_len = 0; /* Let response come in status iocb */
1307
Arun Easibad75002010-05-04 15:01:30 -07001308 /* Compute dif len and adjust data len to incude protection */
Arun Easibad75002010-05-04 15:01:30 -07001309 dif_bytes = 0;
1310 blk_size = cmd->device->sector_size;
Arun Easi8cb20492011-08-16 11:29:22 -07001311 dif_bytes = (data_bytes / blk_size) * 8;
1312
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08001313 switch (scsi_get_prot_op(GET_CMD_SP(sp))) {
Arun Easi8cb20492011-08-16 11:29:22 -07001314 case SCSI_PROT_READ_INSERT:
1315 case SCSI_PROT_WRITE_STRIP:
1316 total_bytes = data_bytes;
1317 data_bytes += dif_bytes;
1318 break;
1319
1320 case SCSI_PROT_READ_STRIP:
1321 case SCSI_PROT_WRITE_INSERT:
1322 case SCSI_PROT_READ_PASS:
1323 case SCSI_PROT_WRITE_PASS:
1324 total_bytes = data_bytes + dif_bytes;
1325 break;
1326 default:
1327 BUG();
Arun Easibad75002010-05-04 15:01:30 -07001328 }
1329
Arun Easie02587d2011-08-16 11:29:23 -07001330 if (!qla2x00_hba_err_chk_enabled(sp))
Arun Easibad75002010-05-04 15:01:30 -07001331 fw_prot_opts |= 0x10; /* Disable Guard tag checking */
Arun Easi9e522cd2012-08-22 14:21:31 -04001332 /* HBA error checking enabled */
1333 else if (IS_PI_UNINIT_CAPABLE(ha)) {
1334 if ((scsi_get_prot_type(GET_CMD_SP(sp)) == SCSI_PROT_DIF_TYPE1)
1335 || (scsi_get_prot_type(GET_CMD_SP(sp)) ==
1336 SCSI_PROT_DIF_TYPE2))
1337 fw_prot_opts |= BIT_10;
1338 else if (scsi_get_prot_type(GET_CMD_SP(sp)) ==
1339 SCSI_PROT_DIF_TYPE3)
1340 fw_prot_opts |= BIT_11;
1341 }
Arun Easibad75002010-05-04 15:01:30 -07001342
1343 if (!bundling) {
1344 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.nobundling.data_address;
1345 } else {
1346 /*
1347 * Configure Bundling if we need to fetch interlaving
1348 * protection PCI accesses
1349 */
1350 fw_prot_opts |= PO_ENABLE_DIF_BUNDLING;
1351 crc_ctx_pkt->u.bundling.dif_byte_count = cpu_to_le32(dif_bytes);
1352 crc_ctx_pkt->u.bundling.dseg_count = cpu_to_le16(tot_dsds -
1353 tot_prot_dsds);
1354 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.data_address;
1355 }
1356
1357 /* Finish the common fields of CRC pkt */
1358 crc_ctx_pkt->blk_size = cpu_to_le16(blk_size);
1359 crc_ctx_pkt->prot_opts = cpu_to_le16(fw_prot_opts);
1360 crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes);
Bart Van Asschead950362015-07-09 07:24:08 -07001361 crc_ctx_pkt->guard_seed = cpu_to_le16(0);
Arun Easibad75002010-05-04 15:01:30 -07001362 /* Fibre channel byte count */
1363 cmd_pkt->byte_count = cpu_to_le32(total_bytes);
1364 fcp_dl = (uint32_t *)(crc_ctx_pkt->fcp_cmnd.cdb + 16 +
1365 additional_fcpcdb_len);
1366 *fcp_dl = htonl(total_bytes);
1367
Arun Easi0c470872010-07-23 15:28:38 +05001368 if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
Bart Van Asschead950362015-07-09 07:24:08 -07001369 cmd_pkt->byte_count = cpu_to_le32(0);
Arun Easi0c470872010-07-23 15:28:38 +05001370 return QLA_SUCCESS;
1371 }
Arun Easibad75002010-05-04 15:01:30 -07001372 /* Walks data segments */
1373
Bart Van Asschead950362015-07-09 07:24:08 -07001374 cmd_pkt->control_flags |= cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE);
Arun Easi8cb20492011-08-16 11:29:22 -07001375
1376 if (!bundling && tot_prot_dsds) {
1377 if (qla24xx_walk_and_build_sglist_no_difb(ha, sp,
Quinn Tranf83adb62014-04-11 16:54:43 -04001378 cur_dsd, tot_dsds, NULL))
Arun Easi8cb20492011-08-16 11:29:22 -07001379 goto crc_queuing_error;
1380 } else if (qla24xx_walk_and_build_sglist(ha, sp, cur_dsd,
Quinn Tranf83adb62014-04-11 16:54:43 -04001381 (tot_dsds - tot_prot_dsds), NULL))
Arun Easibad75002010-05-04 15:01:30 -07001382 goto crc_queuing_error;
1383
1384 if (bundling && tot_prot_dsds) {
1385 /* Walks dif segments */
Bart Van Asschead950362015-07-09 07:24:08 -07001386 cmd_pkt->control_flags |= cpu_to_le16(CF_DIF_SEG_DESCR_ENABLE);
Arun Easibad75002010-05-04 15:01:30 -07001387 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.dif_address;
1388 if (qla24xx_walk_and_build_prot_sglist(ha, sp, cur_dsd,
Quinn Tranf83adb62014-04-11 16:54:43 -04001389 tot_prot_dsds, NULL))
Arun Easibad75002010-05-04 15:01:30 -07001390 goto crc_queuing_error;
1391 }
1392 return QLA_SUCCESS;
1393
1394crc_queuing_error:
Arun Easibad75002010-05-04 15:01:30 -07001395 /* Cleanup will be performed by the caller */
1396
1397 return QLA_FUNCTION_FAILED;
1398}
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001399
1400/**
1401 * qla24xx_start_scsi() - Send a SCSI command to the ISP
1402 * @sp: command to send to the ISP
1403 *
Bjorn Helgaascc3ef7b2008-09-11 21:22:51 -07001404 * Returns non-zero if a failure occurred, else zero.
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001405 */
1406int
1407qla24xx_start_scsi(srb_t *sp)
1408{
Bart Van Assche52c82822015-07-09 07:23:26 -07001409 int nseg;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001410 unsigned long flags;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001411 uint32_t *clr_ptr;
1412 uint32_t index;
1413 uint32_t handle;
1414 struct cmd_type_7 *cmd_pkt;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001415 uint16_t cnt;
1416 uint16_t req_cnt;
1417 uint16_t tot_dsds;
Anirban Chakraborty73208df2008-12-09 16:45:39 -08001418 struct req_que *req = NULL;
1419 struct rsp_que *rsp = NULL;
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08001420 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
Joe Carnuccio25ff6af2017-01-19 22:28:04 -08001421 struct scsi_qla_host *vha = sp->vha;
Anirban Chakraborty73208df2008-12-09 16:45:39 -08001422 struct qla_hw_data *ha = vha->hw;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001423
1424 /* Setup device pointers. */
Anirban Chakraborty59e0b8b2009-06-03 09:55:19 -07001425 req = vha->req;
Michael Hernandezd7459522016-12-12 14:40:07 -08001426 rsp = req->rsp;
Anirban Chakraborty73208df2008-12-09 16:45:39 -08001427
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001428 /* So we know we haven't pci_map'ed anything yet */
1429 tot_dsds = 0;
1430
1431 /* Send marker if required */
Anirban Chakrabortye315cd22008-11-06 10:40:51 -08001432 if (vha->marker_needed != 0) {
Saurav Kashyap7c3df132011-07-14 12:00:13 -07001433 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
1434 QLA_SUCCESS)
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001435 return QLA_FUNCTION_FAILED;
Anirban Chakrabortye315cd22008-11-06 10:40:51 -08001436 vha->marker_needed = 0;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001437 }
1438
1439 /* Acquire ring specific lock */
Anirban Chakrabortye315cd22008-11-06 10:40:51 -08001440 spin_lock_irqsave(&ha->hardware_lock, flags);
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001441
1442 /* Check for room in outstanding command list. */
Anirban Chakrabortye315cd22008-11-06 10:40:51 -08001443 handle = req->current_outstanding_cmd;
Chad Dupuis8d93f552013-01-30 03:34:37 -05001444 for (index = 1; index < req->num_outstanding_cmds; index++) {
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001445 handle++;
Chad Dupuis8d93f552013-01-30 03:34:37 -05001446 if (handle == req->num_outstanding_cmds)
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001447 handle = 1;
Anirban Chakrabortye315cd22008-11-06 10:40:51 -08001448 if (!req->outstanding_cmds[handle])
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001449 break;
1450 }
Chad Dupuis8d93f552013-01-30 03:34:37 -05001451 if (index == req->num_outstanding_cmds)
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001452 goto queuing_error;
1453
1454 /* Map the sg table so we have an accurate count of sg entries needed */
Seokmann Ju2c3dfe32007-07-05 13:16:51 -07001455 if (scsi_sg_count(cmd)) {
1456 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1457 scsi_sg_count(cmd), cmd->sc_data_direction);
1458 if (unlikely(!nseg))
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001459 goto queuing_error;
Seokmann Ju2c3dfe32007-07-05 13:16:51 -07001460 } else
1461 nseg = 0;
1462
FUJITA Tomonori385d70b2007-05-26 01:55:38 +09001463 tot_dsds = nseg;
Saurav Kashyap7c3df132011-07-14 12:00:13 -07001464 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
Anirban Chakrabortye315cd22008-11-06 10:40:51 -08001465 if (req->cnt < (req_cnt + 2)) {
Joe Carnuccio7c6300e2014-04-11 16:54:37 -04001466 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
1467 RD_REG_DWORD_RELAXED(req->req_q_out);
Anirban Chakrabortye315cd22008-11-06 10:40:51 -08001468 if (req->ring_index < cnt)
1469 req->cnt = cnt - req->ring_index;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001470 else
Anirban Chakrabortye315cd22008-11-06 10:40:51 -08001471 req->cnt = req->length -
1472 (req->ring_index - cnt);
Chetan Lokea6eb3c92012-05-15 14:34:09 -04001473 if (req->cnt < (req_cnt + 2))
1474 goto queuing_error;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001475 }
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001476
1477 /* Build command packet. */
Anirban Chakrabortye315cd22008-11-06 10:40:51 -08001478 req->current_outstanding_cmd = handle;
1479 req->outstanding_cmds[handle] = sp;
Andrew Vasquezcf53b062009-08-20 11:06:04 -07001480 sp->handle = handle;
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08001481 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
Anirban Chakrabortye315cd22008-11-06 10:40:51 -08001482 req->cnt -= req_cnt;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001483
Anirban Chakrabortye315cd22008-11-06 10:40:51 -08001484 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
Anirban Chakraborty2afa19a2009-04-06 22:33:40 -07001485 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001486
1487 /* Zero out remaining portion of packet. */
James Bottomley72df8322005-10-28 14:41:19 -05001488 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001489 clr_ptr = (uint32_t *)cmd_pkt + 2;
1490 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1491 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1492
1493 /* Set NPORT-ID and LUN number*/
1494 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1495 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1496 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1497 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
Joe Carnuccio25ff6af2017-01-19 22:28:04 -08001498 cmd_pkt->vp_index = sp->vha->vp_idx;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001499
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08001500 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
andrew.vasquez@qlogic.com0d4be122006-02-07 08:45:35 -08001501 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001502
Christoph Hellwig50668632014-10-30 14:30:06 +01001503 cmd_pkt->task = TSK_SIMPLE;
Andrew Vasquezff2fc422011-02-23 15:27:15 -08001504
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001505 /* Load SCSI command packet. */
1506 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
1507 host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
1508
FUJITA Tomonori385d70b2007-05-26 01:55:38 +09001509 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001510
1511 /* Build IOCB segments */
Michael Hernandezd7459522016-12-12 14:40:07 -08001512 qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req);
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001513
1514 /* Set total data segment count. */
1515 cmd_pkt->entry_count = (uint8_t)req_cnt;
1516 wmb();
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001517 /* Adjust ring index. */
Anirban Chakrabortye315cd22008-11-06 10:40:51 -08001518 req->ring_index++;
1519 if (req->ring_index == req->length) {
1520 req->ring_index = 0;
1521 req->ring_ptr = req->ring;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001522 } else
Anirban Chakrabortye315cd22008-11-06 10:40:51 -08001523 req->ring_ptr++;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001524
1525 sp->flags |= SRB_DMA_VALID;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001526
1527 /* Set chip new ring index. */
Andrew Vasquez080299902009-03-24 09:07:55 -07001528 WRT_REG_DWORD(req->req_q_in, req->ring_index);
Andrew Vasquez4fdfefe2005-10-27 11:09:48 -07001529
Anirban Chakrabortye315cd22008-11-06 10:40:51 -08001530 spin_unlock_irqrestore(&ha->hardware_lock, flags);
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001531 return QLA_SUCCESS;
1532
1533queuing_error:
FUJITA Tomonori385d70b2007-05-26 01:55:38 +09001534 if (tot_dsds)
1535 scsi_dma_unmap(cmd);
1536
Anirban Chakrabortye315cd22008-11-06 10:40:51 -08001537 spin_unlock_irqrestore(&ha->hardware_lock, flags);
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001538
1539 return QLA_FUNCTION_FAILED;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001540}
Anirban Chakraborty68ca9492009-04-06 22:33:41 -07001541
Arun Easibad75002010-05-04 15:01:30 -07001542/**
1543 * qla24xx_dif_start_scsi() - Send a SCSI command to the ISP
1544 * @sp: command to send to the ISP
1545 *
1546 * Returns non-zero if a failure occurred, else zero.
1547 */
1548int
1549qla24xx_dif_start_scsi(srb_t *sp)
1550{
1551 int nseg;
1552 unsigned long flags;
1553 uint32_t *clr_ptr;
1554 uint32_t index;
1555 uint32_t handle;
1556 uint16_t cnt;
1557 uint16_t req_cnt = 0;
1558 uint16_t tot_dsds;
1559 uint16_t tot_prot_dsds;
1560 uint16_t fw_prot_opts = 0;
1561 struct req_que *req = NULL;
1562 struct rsp_que *rsp = NULL;
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08001563 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
Joe Carnuccio25ff6af2017-01-19 22:28:04 -08001564 struct scsi_qla_host *vha = sp->vha;
Arun Easibad75002010-05-04 15:01:30 -07001565 struct qla_hw_data *ha = vha->hw;
1566 struct cmd_type_crc_2 *cmd_pkt;
1567 uint32_t status = 0;
1568
1569#define QDSS_GOT_Q_SPACE BIT_0
1570
Arun Easi0c470872010-07-23 15:28:38 +05001571 /* Only process protection or >16 cdb in this routine */
1572 if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL) {
1573 if (cmd->cmd_len <= 16)
1574 return qla24xx_start_scsi(sp);
1575 }
Arun Easibad75002010-05-04 15:01:30 -07001576
1577 /* Setup device pointers. */
Arun Easibad75002010-05-04 15:01:30 -07001578 req = vha->req;
Michael Hernandezd7459522016-12-12 14:40:07 -08001579 rsp = req->rsp;
Arun Easibad75002010-05-04 15:01:30 -07001580
1581 /* So we know we haven't pci_map'ed anything yet */
1582 tot_dsds = 0;
1583
1584 /* Send marker if required */
1585 if (vha->marker_needed != 0) {
1586 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
1587 QLA_SUCCESS)
1588 return QLA_FUNCTION_FAILED;
1589 vha->marker_needed = 0;
1590 }
1591
1592 /* Acquire ring specific lock */
1593 spin_lock_irqsave(&ha->hardware_lock, flags);
1594
1595 /* Check for room in outstanding command list. */
1596 handle = req->current_outstanding_cmd;
Chad Dupuis8d93f552013-01-30 03:34:37 -05001597 for (index = 1; index < req->num_outstanding_cmds; index++) {
Arun Easibad75002010-05-04 15:01:30 -07001598 handle++;
Chad Dupuis8d93f552013-01-30 03:34:37 -05001599 if (handle == req->num_outstanding_cmds)
Arun Easibad75002010-05-04 15:01:30 -07001600 handle = 1;
1601 if (!req->outstanding_cmds[handle])
1602 break;
1603 }
1604
Chad Dupuis8d93f552013-01-30 03:34:37 -05001605 if (index == req->num_outstanding_cmds)
Arun Easibad75002010-05-04 15:01:30 -07001606 goto queuing_error;
1607
1608 /* Compute number of required data segments */
1609 /* Map the sg table so we have an accurate count of sg entries needed */
1610 if (scsi_sg_count(cmd)) {
1611 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1612 scsi_sg_count(cmd), cmd->sc_data_direction);
1613 if (unlikely(!nseg))
1614 goto queuing_error;
1615 else
1616 sp->flags |= SRB_DMA_VALID;
Arun Easi8cb20492011-08-16 11:29:22 -07001617
1618 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1619 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
1620 struct qla2_sgx sgx;
1621 uint32_t partial;
1622
1623 memset(&sgx, 0, sizeof(struct qla2_sgx));
1624 sgx.tot_bytes = scsi_bufflen(cmd);
1625 sgx.cur_sg = scsi_sglist(cmd);
1626 sgx.sp = sp;
1627
1628 nseg = 0;
1629 while (qla24xx_get_one_block_sg(
1630 cmd->device->sector_size, &sgx, &partial))
1631 nseg++;
1632 }
Arun Easibad75002010-05-04 15:01:30 -07001633 } else
1634 nseg = 0;
1635
1636 /* number of required data segments */
1637 tot_dsds = nseg;
1638
1639 /* Compute number of required protection segments */
1640 if (qla24xx_configure_prot_mode(sp, &fw_prot_opts)) {
1641 nseg = dma_map_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
1642 scsi_prot_sg_count(cmd), cmd->sc_data_direction);
1643 if (unlikely(!nseg))
1644 goto queuing_error;
1645 else
1646 sp->flags |= SRB_CRC_PROT_DMA_VALID;
Arun Easi8cb20492011-08-16 11:29:22 -07001647
1648 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1649 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
1650 nseg = scsi_bufflen(cmd) / cmd->device->sector_size;
1651 }
Arun Easibad75002010-05-04 15:01:30 -07001652 } else {
1653 nseg = 0;
1654 }
1655
1656 req_cnt = 1;
1657 /* Total Data and protection sg segment(s) */
1658 tot_prot_dsds = nseg;
1659 tot_dsds += nseg;
1660 if (req->cnt < (req_cnt + 2)) {
Joe Carnuccio7c6300e2014-04-11 16:54:37 -04001661 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
1662 RD_REG_DWORD_RELAXED(req->req_q_out);
Arun Easibad75002010-05-04 15:01:30 -07001663 if (req->ring_index < cnt)
1664 req->cnt = cnt - req->ring_index;
1665 else
1666 req->cnt = req->length -
1667 (req->ring_index - cnt);
Chetan Lokea6eb3c92012-05-15 14:34:09 -04001668 if (req->cnt < (req_cnt + 2))
1669 goto queuing_error;
Arun Easibad75002010-05-04 15:01:30 -07001670 }
1671
Arun Easibad75002010-05-04 15:01:30 -07001672 status |= QDSS_GOT_Q_SPACE;
1673
1674 /* Build header part of command packet (excluding the OPCODE). */
1675 req->current_outstanding_cmd = handle;
1676 req->outstanding_cmds[handle] = sp;
Arun Easi8cb20492011-08-16 11:29:22 -07001677 sp->handle = handle;
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08001678 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
Arun Easibad75002010-05-04 15:01:30 -07001679 req->cnt -= req_cnt;
1680
1681 /* Fill-in common area */
1682 cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr;
1683 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
1684
1685 clr_ptr = (uint32_t *)cmd_pkt + 2;
1686 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1687
1688 /* Set NPORT-ID and LUN number*/
1689 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1690 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1691 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1692 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1693
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08001694 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
Arun Easibad75002010-05-04 15:01:30 -07001695 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1696
1697 /* Total Data and protection segment(s) */
1698 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1699
1700 /* Build IOCB segments and adjust for data protection segments */
1701 if (qla24xx_build_scsi_crc_2_iocbs(sp, (struct cmd_type_crc_2 *)
1702 req->ring_ptr, tot_dsds, tot_prot_dsds, fw_prot_opts) !=
1703 QLA_SUCCESS)
1704 goto queuing_error;
1705
1706 cmd_pkt->entry_count = (uint8_t)req_cnt;
1707 /* Specify response queue number where completion should happen */
1708 cmd_pkt->entry_status = (uint8_t) rsp->id;
Bart Van Asschead950362015-07-09 07:24:08 -07001709 cmd_pkt->timeout = cpu_to_le16(0);
Arun Easibad75002010-05-04 15:01:30 -07001710 wmb();
1711
1712 /* Adjust ring index. */
1713 req->ring_index++;
1714 if (req->ring_index == req->length) {
1715 req->ring_index = 0;
1716 req->ring_ptr = req->ring;
1717 } else
1718 req->ring_ptr++;
1719
1720 /* Set chip new ring index. */
1721 WRT_REG_DWORD(req->req_q_in, req->ring_index);
Arun Easibad75002010-05-04 15:01:30 -07001722
1723 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1724
1725 return QLA_SUCCESS;
1726
1727queuing_error:
1728 if (status & QDSS_GOT_Q_SPACE) {
1729 req->outstanding_cmds[handle] = NULL;
1730 req->cnt += req_cnt;
1731 }
1732 /* Cleanup will be performed by the caller (queuecommand) */
1733
1734 spin_unlock_irqrestore(&ha->hardware_lock, flags);
Arun Easibad75002010-05-04 15:01:30 -07001735 return QLA_FUNCTION_FAILED;
1736}
1737
Michael Hernandezd7459522016-12-12 14:40:07 -08001738/**
1739 * qla2xxx_start_scsi_mq() - Send a SCSI command to the ISP
1740 * @sp: command to send to the ISP
1741 *
1742 * Returns non-zero if a failure occurred, else zero.
1743 */
1744static int
1745qla2xxx_start_scsi_mq(srb_t *sp)
Anirban Chakraborty68ca9492009-04-06 22:33:41 -07001746{
Michael Hernandezd7459522016-12-12 14:40:07 -08001747 int nseg;
1748 unsigned long flags;
1749 uint32_t *clr_ptr;
1750 uint32_t index;
1751 uint32_t handle;
1752 struct cmd_type_7 *cmd_pkt;
1753 uint16_t cnt;
1754 uint16_t req_cnt;
1755 uint16_t tot_dsds;
1756 struct req_que *req = NULL;
1757 struct rsp_que *rsp = NULL;
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08001758 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
Michael Hernandezd7459522016-12-12 14:40:07 -08001759 struct scsi_qla_host *vha = sp->fcport->vha;
1760 struct qla_hw_data *ha = vha->hw;
1761 struct qla_qpair *qpair = sp->qpair;
Anirban Chakraborty68ca9492009-04-06 22:33:41 -07001762
Johannes Thumshirn578079f2017-06-23 09:10:11 +02001763 /* Acquire qpair specific lock */
1764 spin_lock_irqsave(&qpair->qp_lock, flags);
1765
Michael Hernandezd7459522016-12-12 14:40:07 -08001766 /* Setup qpair pointers */
1767 rsp = qpair->rsp;
1768 req = qpair->req;
1769
1770 /* So we know we haven't pci_map'ed anything yet */
1771 tot_dsds = 0;
1772
1773 /* Send marker if required */
1774 if (vha->marker_needed != 0) {
Johannes Thumshirn578079f2017-06-23 09:10:11 +02001775 if (__qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
1776 QLA_SUCCESS) {
1777 spin_unlock_irqrestore(&qpair->qp_lock, flags);
Michael Hernandezd7459522016-12-12 14:40:07 -08001778 return QLA_FUNCTION_FAILED;
Johannes Thumshirn578079f2017-06-23 09:10:11 +02001779 }
Michael Hernandezd7459522016-12-12 14:40:07 -08001780 vha->marker_needed = 0;
1781 }
1782
Michael Hernandezd7459522016-12-12 14:40:07 -08001783 /* Check for room in outstanding command list. */
1784 handle = req->current_outstanding_cmd;
1785 for (index = 1; index < req->num_outstanding_cmds; index++) {
1786 handle++;
1787 if (handle == req->num_outstanding_cmds)
1788 handle = 1;
1789 if (!req->outstanding_cmds[handle])
1790 break;
1791 }
1792 if (index == req->num_outstanding_cmds)
1793 goto queuing_error;
1794
1795 /* Map the sg table so we have an accurate count of sg entries needed */
1796 if (scsi_sg_count(cmd)) {
1797 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1798 scsi_sg_count(cmd), cmd->sc_data_direction);
1799 if (unlikely(!nseg))
1800 goto queuing_error;
1801 } else
1802 nseg = 0;
1803
1804 tot_dsds = nseg;
1805 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
1806 if (req->cnt < (req_cnt + 2)) {
1807 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
1808 RD_REG_DWORD_RELAXED(req->req_q_out);
1809 if (req->ring_index < cnt)
1810 req->cnt = cnt - req->ring_index;
1811 else
1812 req->cnt = req->length -
1813 (req->ring_index - cnt);
1814 if (req->cnt < (req_cnt + 2))
1815 goto queuing_error;
1816 }
1817
1818 /* Build command packet. */
1819 req->current_outstanding_cmd = handle;
1820 req->outstanding_cmds[handle] = sp;
1821 sp->handle = handle;
1822 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1823 req->cnt -= req_cnt;
1824
1825 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
1826 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
1827
1828 /* Zero out remaining portion of packet. */
1829 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
1830 clr_ptr = (uint32_t *)cmd_pkt + 2;
1831 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1832 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1833
1834 /* Set NPORT-ID and LUN number*/
1835 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1836 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1837 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1838 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1839 cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
1840
1841 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
1842 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1843
1844 cmd_pkt->task = TSK_SIMPLE;
1845
1846 /* Load SCSI command packet. */
1847 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
1848 host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
1849
1850 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
1851
1852 /* Build IOCB segments */
1853 qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req);
1854
1855 /* Set total data segment count. */
1856 cmd_pkt->entry_count = (uint8_t)req_cnt;
1857 wmb();
1858 /* Adjust ring index. */
1859 req->ring_index++;
1860 if (req->ring_index == req->length) {
1861 req->ring_index = 0;
1862 req->ring_ptr = req->ring;
1863 } else
1864 req->ring_ptr++;
1865
1866 sp->flags |= SRB_DMA_VALID;
1867
1868 /* Set chip new ring index. */
1869 WRT_REG_DWORD(req->req_q_in, req->ring_index);
1870
Michael Hernandezd7459522016-12-12 14:40:07 -08001871 spin_unlock_irqrestore(&qpair->qp_lock, flags);
1872 return QLA_SUCCESS;
1873
1874queuing_error:
1875 if (tot_dsds)
1876 scsi_dma_unmap(cmd);
1877
1878 spin_unlock_irqrestore(&qpair->qp_lock, flags);
1879
1880 return QLA_FUNCTION_FAILED;
1881}
1882
1883
1884/**
1885 * qla2xxx_dif_start_scsi_mq() - Send a SCSI command to the ISP
1886 * @sp: command to send to the ISP
1887 *
1888 * Returns non-zero if a failure occurred, else zero.
1889 */
1890int
1891qla2xxx_dif_start_scsi_mq(srb_t *sp)
1892{
1893 int nseg;
1894 unsigned long flags;
1895 uint32_t *clr_ptr;
1896 uint32_t index;
1897 uint32_t handle;
1898 uint16_t cnt;
1899 uint16_t req_cnt = 0;
1900 uint16_t tot_dsds;
1901 uint16_t tot_prot_dsds;
1902 uint16_t fw_prot_opts = 0;
1903 struct req_que *req = NULL;
1904 struct rsp_que *rsp = NULL;
1905 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1906 struct scsi_qla_host *vha = sp->fcport->vha;
1907 struct qla_hw_data *ha = vha->hw;
1908 struct cmd_type_crc_2 *cmd_pkt;
1909 uint32_t status = 0;
1910 struct qla_qpair *qpair = sp->qpair;
1911
1912#define QDSS_GOT_Q_SPACE BIT_0
1913
1914 /* Check for host side state */
1915 if (!qpair->online) {
1916 cmd->result = DID_NO_CONNECT << 16;
1917 return QLA_INTERFACE_ERROR;
1918 }
1919
1920 if (!qpair->difdix_supported &&
1921 scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) {
1922 cmd->result = DID_NO_CONNECT << 16;
1923 return QLA_INTERFACE_ERROR;
1924 }
1925
1926 /* Only process protection or >16 cdb in this routine */
1927 if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL) {
1928 if (cmd->cmd_len <= 16)
1929 return qla2xxx_start_scsi_mq(sp);
1930 }
1931
Johannes Thumshirn578079f2017-06-23 09:10:11 +02001932 spin_lock_irqsave(&qpair->qp_lock, flags);
1933
Michael Hernandezd7459522016-12-12 14:40:07 -08001934 /* Setup qpair pointers */
1935 rsp = qpair->rsp;
1936 req = qpair->req;
1937
1938 /* So we know we haven't pci_map'ed anything yet */
1939 tot_dsds = 0;
1940
1941 /* Send marker if required */
1942 if (vha->marker_needed != 0) {
Johannes Thumshirn578079f2017-06-23 09:10:11 +02001943 if (__qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
1944 QLA_SUCCESS) {
1945 spin_unlock_irqrestore(&qpair->qp_lock, flags);
Michael Hernandezd7459522016-12-12 14:40:07 -08001946 return QLA_FUNCTION_FAILED;
Johannes Thumshirn578079f2017-06-23 09:10:11 +02001947 }
Michael Hernandezd7459522016-12-12 14:40:07 -08001948 vha->marker_needed = 0;
1949 }
1950
Michael Hernandezd7459522016-12-12 14:40:07 -08001951 /* Check for room in outstanding command list. */
1952 handle = req->current_outstanding_cmd;
1953 for (index = 1; index < req->num_outstanding_cmds; index++) {
1954 handle++;
1955 if (handle == req->num_outstanding_cmds)
1956 handle = 1;
1957 if (!req->outstanding_cmds[handle])
1958 break;
1959 }
1960
1961 if (index == req->num_outstanding_cmds)
1962 goto queuing_error;
1963
1964 /* Compute number of required data segments */
1965 /* Map the sg table so we have an accurate count of sg entries needed */
1966 if (scsi_sg_count(cmd)) {
1967 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1968 scsi_sg_count(cmd), cmd->sc_data_direction);
1969 if (unlikely(!nseg))
1970 goto queuing_error;
1971 else
1972 sp->flags |= SRB_DMA_VALID;
1973
1974 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1975 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
1976 struct qla2_sgx sgx;
1977 uint32_t partial;
1978
1979 memset(&sgx, 0, sizeof(struct qla2_sgx));
1980 sgx.tot_bytes = scsi_bufflen(cmd);
1981 sgx.cur_sg = scsi_sglist(cmd);
1982 sgx.sp = sp;
1983
1984 nseg = 0;
1985 while (qla24xx_get_one_block_sg(
1986 cmd->device->sector_size, &sgx, &partial))
1987 nseg++;
1988 }
1989 } else
1990 nseg = 0;
1991
1992 /* number of required data segments */
1993 tot_dsds = nseg;
1994
1995 /* Compute number of required protection segments */
1996 if (qla24xx_configure_prot_mode(sp, &fw_prot_opts)) {
1997 nseg = dma_map_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
1998 scsi_prot_sg_count(cmd), cmd->sc_data_direction);
1999 if (unlikely(!nseg))
2000 goto queuing_error;
2001 else
2002 sp->flags |= SRB_CRC_PROT_DMA_VALID;
2003
2004 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
2005 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
2006 nseg = scsi_bufflen(cmd) / cmd->device->sector_size;
2007 }
2008 } else {
2009 nseg = 0;
2010 }
2011
2012 req_cnt = 1;
2013 /* Total Data and protection sg segment(s) */
2014 tot_prot_dsds = nseg;
2015 tot_dsds += nseg;
2016 if (req->cnt < (req_cnt + 2)) {
2017 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
2018 RD_REG_DWORD_RELAXED(req->req_q_out);
2019 if (req->ring_index < cnt)
2020 req->cnt = cnt - req->ring_index;
2021 else
2022 req->cnt = req->length -
2023 (req->ring_index - cnt);
2024 if (req->cnt < (req_cnt + 2))
2025 goto queuing_error;
2026 }
2027
2028 status |= QDSS_GOT_Q_SPACE;
2029
2030 /* Build header part of command packet (excluding the OPCODE). */
2031 req->current_outstanding_cmd = handle;
2032 req->outstanding_cmds[handle] = sp;
2033 sp->handle = handle;
2034 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
2035 req->cnt -= req_cnt;
2036
2037 /* Fill-in common area */
2038 cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr;
2039 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
2040
2041 clr_ptr = (uint32_t *)cmd_pkt + 2;
2042 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2043
2044 /* Set NPORT-ID and LUN number*/
2045 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2046 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
2047 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
2048 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
2049
2050 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
2051 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
2052
2053 /* Total Data and protection segment(s) */
2054 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
2055
2056 /* Build IOCB segments and adjust for data protection segments */
2057 if (qla24xx_build_scsi_crc_2_iocbs(sp, (struct cmd_type_crc_2 *)
2058 req->ring_ptr, tot_dsds, tot_prot_dsds, fw_prot_opts) !=
2059 QLA_SUCCESS)
2060 goto queuing_error;
2061
2062 cmd_pkt->entry_count = (uint8_t)req_cnt;
2063 cmd_pkt->timeout = cpu_to_le16(0);
2064 wmb();
2065
2066 /* Adjust ring index. */
2067 req->ring_index++;
2068 if (req->ring_index == req->length) {
2069 req->ring_index = 0;
2070 req->ring_ptr = req->ring;
2071 } else
2072 req->ring_ptr++;
2073
2074 /* Set chip new ring index. */
2075 WRT_REG_DWORD(req->req_q_in, req->ring_index);
2076
2077 /* Manage unprocessed RIO/ZIO commands in response queue. */
2078 if (vha->flags.process_response_queue &&
2079 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
2080 qla24xx_process_response_queue(vha, rsp);
2081
2082 spin_unlock_irqrestore(&qpair->qp_lock, flags);
2083
2084 return QLA_SUCCESS;
2085
2086queuing_error:
2087 if (status & QDSS_GOT_Q_SPACE) {
2088 req->outstanding_cmds[handle] = NULL;
2089 req->cnt += req_cnt;
2090 }
2091 /* Cleanup will be performed by the caller (queuecommand) */
2092
2093 spin_unlock_irqrestore(&qpair->qp_lock, flags);
2094 return QLA_FUNCTION_FAILED;
Anirban Chakraborty68ca9492009-04-06 22:33:41 -07002095}
Andrew Vasquezac280b62009-08-20 11:06:05 -07002096
2097/* Generic Control-SRB manipulation functions. */
Arun Easib6a029e2014-09-25 06:14:52 -04002098
2099/* hardware_lock assumed to be held. */
Arun Easib6a029e2014-09-25 06:14:52 -04002100
Giridhar Malavalid94d10e2010-07-23 15:28:23 +05002101void *
Quinn Tran82de8022017-06-13 20:47:17 -07002102__qla2x00_alloc_iocbs(struct qla_qpair *qpair, srb_t *sp)
Andrew Vasquezac280b62009-08-20 11:06:05 -07002103{
Quinn Tran82de8022017-06-13 20:47:17 -07002104 scsi_qla_host_t *vha = qpair->vha;
Andrew Vasquezac280b62009-08-20 11:06:05 -07002105 struct qla_hw_data *ha = vha->hw;
Quinn Tran82de8022017-06-13 20:47:17 -07002106 struct req_que *req = qpair->req;
Bart Van Assche118e2ef2015-07-09 07:24:27 -07002107 device_reg_t *reg = ISP_QUE_REG(ha, req->id);
Andrew Vasquezac280b62009-08-20 11:06:05 -07002108 uint32_t index, handle;
2109 request_t *pkt;
2110 uint16_t cnt, req_cnt;
2111
2112 pkt = NULL;
2113 req_cnt = 1;
Giridhar Malavalid94d10e2010-07-23 15:28:23 +05002114 handle = 0;
2115
Quinn Tran5e53be82018-07-26 16:34:44 -07002116 if (sp && (sp->type != SRB_SCSI_CMD)) {
2117 /* Adjust entry-counts as needed. */
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08002118 req_cnt = sp->iocbs;
Quinn Tran5e53be82018-07-26 16:34:44 -07002119 }
Andrew Vasquez5780790e2011-11-18 09:03:20 -08002120
Andrew Vasquezac280b62009-08-20 11:06:05 -07002121 /* Check for room on request queue. */
Himanshu Madhani94007032014-09-25 06:14:46 -04002122 if (req->cnt < req_cnt + 2) {
Quinn Tran1586e072017-12-28 12:33:18 -08002123 if (qpair->use_shadow_reg)
2124 cnt = *req->out_ptr;
2125 else if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha))
Andrew Vasquezac280b62009-08-20 11:06:05 -07002126 cnt = RD_REG_DWORD(&reg->isp25mq.req_q_out);
Atul Deshmukh7ec0eff2013-08-27 01:37:28 -04002127 else if (IS_P3P_TYPE(ha))
Giridhar Malavalid94d10e2010-07-23 15:28:23 +05002128 cnt = RD_REG_DWORD(&reg->isp82.req_q_out);
Andrew Vasquezac280b62009-08-20 11:06:05 -07002129 else if (IS_FWI2_CAPABLE(ha))
2130 cnt = RD_REG_DWORD(&reg->isp24.req_q_out);
Giridhar Malavali8ae6d9c2013-03-28 08:21:23 -04002131 else if (IS_QLAFX00(ha))
2132 cnt = RD_REG_DWORD(&reg->ispfx00.req_q_out);
Andrew Vasquezac280b62009-08-20 11:06:05 -07002133 else
2134 cnt = qla2x00_debounce_register(
2135 ISP_REQ_Q_OUT(ha, &reg->isp));
2136
2137 if (req->ring_index < cnt)
2138 req->cnt = cnt - req->ring_index;
2139 else
2140 req->cnt = req->length -
2141 (req->ring_index - cnt);
2142 }
Himanshu Madhani94007032014-09-25 06:14:46 -04002143 if (req->cnt < req_cnt + 2)
Andrew Vasquezac280b62009-08-20 11:06:05 -07002144 goto queuing_error;
2145
Quinn Tran5e53be82018-07-26 16:34:44 -07002146 if (sp) {
2147 /* Check for room in outstanding command list. */
2148 handle = req->current_outstanding_cmd;
2149 for (index = 1; index < req->num_outstanding_cmds; index++) {
2150 handle++;
2151 if (handle == req->num_outstanding_cmds)
2152 handle = 1;
2153 if (!req->outstanding_cmds[handle])
2154 break;
2155 }
2156 if (index == req->num_outstanding_cmds) {
2157 ql_log(ql_log_warn, vha, 0x700b,
2158 "No room on outstanding cmd array.\n");
2159 goto queuing_error;
2160 }
2161
2162 /* Prep command array. */
2163 req->current_outstanding_cmd = handle;
2164 req->outstanding_cmds[handle] = sp;
2165 sp->handle = handle;
2166 }
2167
Andrew Vasquezac280b62009-08-20 11:06:05 -07002168 /* Prep packet */
Andrew Vasquezac280b62009-08-20 11:06:05 -07002169 req->cnt -= req_cnt;
Andrew Vasquezac280b62009-08-20 11:06:05 -07002170 pkt = req->ring_ptr;
2171 memset(pkt, 0, REQUEST_ENTRY_SIZE);
Giridhar Malavali8ae6d9c2013-03-28 08:21:23 -04002172 if (IS_QLAFX00(ha)) {
Saurav Kashyap1f8deef2013-06-25 11:27:21 -04002173 WRT_REG_BYTE((void __iomem *)&pkt->entry_count, req_cnt);
2174 WRT_REG_WORD((void __iomem *)&pkt->handle, handle);
Giridhar Malavali8ae6d9c2013-03-28 08:21:23 -04002175 } else {
2176 pkt->entry_count = req_cnt;
2177 pkt->handle = handle;
2178 }
Andrew Vasquezac280b62009-08-20 11:06:05 -07002179
Quinn Tran5e53be82018-07-26 16:34:44 -07002180 return pkt;
2181
Andrew Vasquezac280b62009-08-20 11:06:05 -07002182queuing_error:
Quinn Tran60a9ead2017-06-13 20:47:28 -07002183 qpair->tgt_counters.num_alloc_iocb_failed++;
Andrew Vasquezac280b62009-08-20 11:06:05 -07002184 return pkt;
2185}
2186
Quinn Tran82de8022017-06-13 20:47:17 -07002187void *
2188qla2x00_alloc_iocbs_ready(struct qla_qpair *qpair, srb_t *sp)
2189{
2190 scsi_qla_host_t *vha = qpair->vha;
2191
2192 if (qla2x00_reset_active(vha))
2193 return NULL;
2194
2195 return __qla2x00_alloc_iocbs(qpair, sp);
2196}
2197
2198void *
2199qla2x00_alloc_iocbs(struct scsi_qla_host *vha, srb_t *sp)
2200{
2201 return __qla2x00_alloc_iocbs(vha->hw->base_qpair, sp);
2202}
2203
Andrew Vasquezac280b62009-08-20 11:06:05 -07002204static void
Duane Grigsbya5d42f42017-06-21 13:48:41 -07002205qla24xx_prli_iocb(srb_t *sp, struct logio_entry_24xx *logio)
2206{
2207 struct srb_iocb *lio = &sp->u.iocb_cmd;
2208
2209 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2210 logio->control_flags = cpu_to_le16(LCF_COMMAND_PRLI);
2211 if (lio->u.logio.flags & SRB_LOGIN_NVME_PRLI)
2212 logio->control_flags |= LCF_NVME_PRLI;
2213
2214 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2215 logio->port_id[0] = sp->fcport->d_id.b.al_pa;
2216 logio->port_id[1] = sp->fcport->d_id.b.area;
2217 logio->port_id[2] = sp->fcport->d_id.b.domain;
2218 logio->vp_index = sp->vha->vp_idx;
2219}
2220
2221static void
Andrew Vasquezac280b62009-08-20 11:06:05 -07002222qla24xx_login_iocb(srb_t *sp, struct logio_entry_24xx *logio)
2223{
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08002224 struct srb_iocb *lio = &sp->u.iocb_cmd;
Andrew Vasquezac280b62009-08-20 11:06:05 -07002225
2226 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
Quinn Tran48acad02018-08-02 13:16:44 -07002227 if (lio->u.logio.flags & SRB_LOGIN_PRLI_ONLY) {
2228 logio->control_flags = cpu_to_le16(LCF_COMMAND_PRLI);
2229 } else {
2230 logio->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI);
2231 if (lio->u.logio.flags & SRB_LOGIN_COND_PLOGI)
2232 logio->control_flags |= cpu_to_le16(LCF_COND_PLOGI);
2233 if (lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI)
2234 logio->control_flags |= cpu_to_le16(LCF_SKIP_PRLI);
2235 }
Andrew Vasquezac280b62009-08-20 11:06:05 -07002236 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2237 logio->port_id[0] = sp->fcport->d_id.b.al_pa;
2238 logio->port_id[1] = sp->fcport->d_id.b.area;
2239 logio->port_id[2] = sp->fcport->d_id.b.domain;
Joe Carnuccio25ff6af2017-01-19 22:28:04 -08002240 logio->vp_index = sp->vha->vp_idx;
Andrew Vasquezac280b62009-08-20 11:06:05 -07002241}
2242
2243static void
2244qla2x00_login_iocb(srb_t *sp, struct mbx_entry *mbx)
2245{
Joe Carnuccio25ff6af2017-01-19 22:28:04 -08002246 struct qla_hw_data *ha = sp->vha->hw;
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08002247 struct srb_iocb *lio = &sp->u.iocb_cmd;
Andrew Vasquezac280b62009-08-20 11:06:05 -07002248 uint16_t opts;
2249
Giridhar Malavalib9637522010-05-28 15:08:15 -07002250 mbx->entry_type = MBX_IOCB_TYPE;
Andrew Vasquezac280b62009-08-20 11:06:05 -07002251 SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
2252 mbx->mb0 = cpu_to_le16(MBC_LOGIN_FABRIC_PORT);
Madhuranath Iyengar49163922010-05-04 15:01:28 -07002253 opts = lio->u.logio.flags & SRB_LOGIN_COND_PLOGI ? BIT_0 : 0;
2254 opts |= lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI ? BIT_1 : 0;
Andrew Vasquezac280b62009-08-20 11:06:05 -07002255 if (HAS_EXTENDED_IDS(ha)) {
2256 mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
2257 mbx->mb10 = cpu_to_le16(opts);
2258 } else {
2259 mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | opts);
2260 }
2261 mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
2262 mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
2263 sp->fcport->d_id.b.al_pa);
Joe Carnuccio25ff6af2017-01-19 22:28:04 -08002264 mbx->mb9 = cpu_to_le16(sp->vha->vp_idx);
Andrew Vasquezac280b62009-08-20 11:06:05 -07002265}
2266
2267static void
2268qla24xx_logout_iocb(srb_t *sp, struct logio_entry_24xx *logio)
2269{
2270 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2271 logio->control_flags =
2272 cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO);
Quinn Tran0e324e942018-09-11 10:18:17 -07002273 if (!sp->fcport->keep_nport_handle)
Alexei Potashnika6ca8872015-07-14 16:00:44 -04002274 logio->control_flags |= cpu_to_le16(LCF_FREE_NPORT);
Andrew Vasquezac280b62009-08-20 11:06:05 -07002275 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2276 logio->port_id[0] = sp->fcport->d_id.b.al_pa;
2277 logio->port_id[1] = sp->fcport->d_id.b.area;
2278 logio->port_id[2] = sp->fcport->d_id.b.domain;
Joe Carnuccio25ff6af2017-01-19 22:28:04 -08002279 logio->vp_index = sp->vha->vp_idx;
Andrew Vasquezac280b62009-08-20 11:06:05 -07002280}
2281
2282static void
2283qla2x00_logout_iocb(srb_t *sp, struct mbx_entry *mbx)
2284{
Joe Carnuccio25ff6af2017-01-19 22:28:04 -08002285 struct qla_hw_data *ha = sp->vha->hw;
Andrew Vasquezac280b62009-08-20 11:06:05 -07002286
Giridhar Malavalib9637522010-05-28 15:08:15 -07002287 mbx->entry_type = MBX_IOCB_TYPE;
Andrew Vasquezac280b62009-08-20 11:06:05 -07002288 SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
2289 mbx->mb0 = cpu_to_le16(MBC_LOGOUT_FABRIC_PORT);
2290 mbx->mb1 = HAS_EXTENDED_IDS(ha) ?
2291 cpu_to_le16(sp->fcport->loop_id):
2292 cpu_to_le16(sp->fcport->loop_id << 8);
2293 mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
2294 mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
2295 sp->fcport->d_id.b.al_pa);
Joe Carnuccio25ff6af2017-01-19 22:28:04 -08002296 mbx->mb9 = cpu_to_le16(sp->vha->vp_idx);
Andrew Vasquezac280b62009-08-20 11:06:05 -07002297 /* Implicit: mbx->mbx10 = 0. */
2298}
2299
Giridhar Malavali9a069e12010-01-12 13:02:47 -08002300static void
Andrew Vasquez5ff1d582010-05-04 15:01:26 -07002301qla24xx_adisc_iocb(srb_t *sp, struct logio_entry_24xx *logio)
2302{
2303 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2304 logio->control_flags = cpu_to_le16(LCF_COMMAND_ADISC);
2305 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
Joe Carnuccio25ff6af2017-01-19 22:28:04 -08002306 logio->vp_index = sp->vha->vp_idx;
Andrew Vasquez5ff1d582010-05-04 15:01:26 -07002307}
2308
2309static void
2310qla2x00_adisc_iocb(srb_t *sp, struct mbx_entry *mbx)
2311{
Joe Carnuccio25ff6af2017-01-19 22:28:04 -08002312 struct qla_hw_data *ha = sp->vha->hw;
Andrew Vasquez5ff1d582010-05-04 15:01:26 -07002313
2314 mbx->entry_type = MBX_IOCB_TYPE;
2315 SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
2316 mbx->mb0 = cpu_to_le16(MBC_GET_PORT_DATABASE);
2317 if (HAS_EXTENDED_IDS(ha)) {
2318 mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
2319 mbx->mb10 = cpu_to_le16(BIT_0);
2320 } else {
2321 mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | BIT_0);
2322 }
2323 mbx->mb2 = cpu_to_le16(MSW(ha->async_pd_dma));
2324 mbx->mb3 = cpu_to_le16(LSW(ha->async_pd_dma));
2325 mbx->mb6 = cpu_to_le16(MSW(MSD(ha->async_pd_dma)));
2326 mbx->mb7 = cpu_to_le16(LSW(MSD(ha->async_pd_dma)));
Joe Carnuccio25ff6af2017-01-19 22:28:04 -08002327 mbx->mb9 = cpu_to_le16(sp->vha->vp_idx);
Andrew Vasquez5ff1d582010-05-04 15:01:26 -07002328}
2329
2330static void
Madhuranath Iyengar38222632010-05-04 15:01:29 -07002331qla24xx_tm_iocb(srb_t *sp, struct tsk_mgmt_entry *tsk)
2332{
2333 uint32_t flags;
Hannes Reinecke9cb78c12014-06-25 15:27:36 +02002334 uint64_t lun;
Madhuranath Iyengar38222632010-05-04 15:01:29 -07002335 struct fc_port *fcport = sp->fcport;
2336 scsi_qla_host_t *vha = fcport->vha;
2337 struct qla_hw_data *ha = vha->hw;
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08002338 struct srb_iocb *iocb = &sp->u.iocb_cmd;
Madhuranath Iyengar38222632010-05-04 15:01:29 -07002339 struct req_que *req = vha->req;
2340
2341 flags = iocb->u.tmf.flags;
2342 lun = iocb->u.tmf.lun;
2343
2344 tsk->entry_type = TSK_MGMT_IOCB_TYPE;
2345 tsk->entry_count = 1;
2346 tsk->handle = MAKE_HANDLE(req->id, tsk->handle);
2347 tsk->nport_handle = cpu_to_le16(fcport->loop_id);
2348 tsk->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
2349 tsk->control_flags = cpu_to_le32(flags);
2350 tsk->port_id[0] = fcport->d_id.b.al_pa;
2351 tsk->port_id[1] = fcport->d_id.b.area;
2352 tsk->port_id[2] = fcport->d_id.b.domain;
Joe Carnuccioc6d39e22012-05-15 14:34:20 -04002353 tsk->vp_index = fcport->vha->vp_idx;
Madhuranath Iyengar38222632010-05-04 15:01:29 -07002354
2355 if (flags == TCF_LUN_RESET) {
2356 int_to_scsilun(lun, &tsk->lun);
2357 host_to_fcp_swap((uint8_t *)&tsk->lun,
2358 sizeof(tsk->lun));
2359 }
2360}
2361
2362static void
Joe Carnuccio25ff6af2017-01-19 22:28:04 -08002363qla2x00_els_dcmd_sp_free(void *data)
Himanshu Madhani6eb54712015-12-17 14:57:00 -05002364{
Joe Carnuccio25ff6af2017-01-19 22:28:04 -08002365 srb_t *sp = data;
Himanshu Madhani6eb54712015-12-17 14:57:00 -05002366 struct srb_iocb *elsio = &sp->u.iocb_cmd;
2367
2368 kfree(sp->fcport);
2369
2370 if (elsio->u.els_logo.els_logo_pyld)
Joe Carnuccio25ff6af2017-01-19 22:28:04 -08002371 dma_free_coherent(&sp->vha->hw->pdev->dev, DMA_POOL_SIZE,
Himanshu Madhani6eb54712015-12-17 14:57:00 -05002372 elsio->u.els_logo.els_logo_pyld,
2373 elsio->u.els_logo.els_logo_pyld_dma);
2374
2375 del_timer(&elsio->timer);
Joe Carnuccio25ff6af2017-01-19 22:28:04 -08002376 qla2x00_rel_sp(sp);
Himanshu Madhani6eb54712015-12-17 14:57:00 -05002377}
2378
2379static void
2380qla2x00_els_dcmd_iocb_timeout(void *data)
2381{
Joe Carnuccio25ff6af2017-01-19 22:28:04 -08002382 srb_t *sp = data;
Himanshu Madhani6eb54712015-12-17 14:57:00 -05002383 fc_port_t *fcport = sp->fcport;
Joe Carnuccio25ff6af2017-01-19 22:28:04 -08002384 struct scsi_qla_host *vha = sp->vha;
Joe Carnuccio25ff6af2017-01-19 22:28:04 -08002385 struct srb_iocb *lio = &sp->u.iocb_cmd;
Himanshu Madhani6eb54712015-12-17 14:57:00 -05002386
2387 ql_dbg(ql_dbg_io, vha, 0x3069,
2388 "%s Timeout, hdl=%x, portid=%02x%02x%02x\n",
2389 sp->name, sp->handle, fcport->d_id.b.domain, fcport->d_id.b.area,
2390 fcport->d_id.b.al_pa);
2391
Himanshu Madhani6eb54712015-12-17 14:57:00 -05002392 complete(&lio->u.els_logo.comp);
2393}
2394
2395static void
Joe Carnuccio25ff6af2017-01-19 22:28:04 -08002396qla2x00_els_dcmd_sp_done(void *ptr, int res)
Himanshu Madhani6eb54712015-12-17 14:57:00 -05002397{
Joe Carnuccio25ff6af2017-01-19 22:28:04 -08002398 srb_t *sp = ptr;
Himanshu Madhani6eb54712015-12-17 14:57:00 -05002399 fc_port_t *fcport = sp->fcport;
2400 struct srb_iocb *lio = &sp->u.iocb_cmd;
Joe Carnuccio25ff6af2017-01-19 22:28:04 -08002401 struct scsi_qla_host *vha = sp->vha;
Himanshu Madhani6eb54712015-12-17 14:57:00 -05002402
2403 ql_dbg(ql_dbg_io, vha, 0x3072,
2404 "%s hdl=%x, portid=%02x%02x%02x done\n",
2405 sp->name, sp->handle, fcport->d_id.b.domain,
2406 fcport->d_id.b.area, fcport->d_id.b.al_pa);
2407
2408 complete(&lio->u.els_logo.comp);
2409}
2410
2411int
2412qla24xx_els_dcmd_iocb(scsi_qla_host_t *vha, int els_opcode,
2413 port_id_t remote_did)
2414{
2415 srb_t *sp;
2416 fc_port_t *fcport = NULL;
2417 struct srb_iocb *elsio = NULL;
2418 struct qla_hw_data *ha = vha->hw;
2419 struct els_logo_payload logo_pyld;
2420 int rval = QLA_SUCCESS;
2421
2422 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
2423 if (!fcport) {
2424 ql_log(ql_log_info, vha, 0x70e5, "fcport allocation failed\n");
2425 return -ENOMEM;
2426 }
2427
2428 /* Alloc SRB structure */
2429 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
2430 if (!sp) {
2431 kfree(fcport);
2432 ql_log(ql_log_info, vha, 0x70e6,
2433 "SRB allocation failed\n");
2434 return -ENOMEM;
2435 }
2436
2437 elsio = &sp->u.iocb_cmd;
2438 fcport->loop_id = 0xFFFF;
2439 fcport->d_id.b.domain = remote_did.b.domain;
2440 fcport->d_id.b.area = remote_did.b.area;
2441 fcport->d_id.b.al_pa = remote_did.b.al_pa;
2442
2443 ql_dbg(ql_dbg_io, vha, 0x3073, "portid=%02x%02x%02x done\n",
2444 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa);
2445
2446 sp->type = SRB_ELS_DCMD;
2447 sp->name = "ELS_DCMD";
2448 sp->fcport = fcport;
Himanshu Madhani6eb54712015-12-17 14:57:00 -05002449 elsio->timeout = qla2x00_els_dcmd_iocb_timeout;
Ben Hutchingse74e7d92018-03-20 21:36:14 +00002450 qla2x00_init_timer(sp, ELS_DCMD_TIMEOUT);
Quinn Tran8777e432018-08-02 13:16:57 -07002451 init_completion(&sp->u.iocb_cmd.u.els_logo.comp);
Himanshu Madhani6eb54712015-12-17 14:57:00 -05002452 sp->done = qla2x00_els_dcmd_sp_done;
2453 sp->free = qla2x00_els_dcmd_sp_free;
2454
2455 elsio->u.els_logo.els_logo_pyld = dma_alloc_coherent(&ha->pdev->dev,
2456 DMA_POOL_SIZE, &elsio->u.els_logo.els_logo_pyld_dma,
2457 GFP_KERNEL);
2458
2459 if (!elsio->u.els_logo.els_logo_pyld) {
Joe Carnuccio25ff6af2017-01-19 22:28:04 -08002460 sp->free(sp);
Himanshu Madhani6eb54712015-12-17 14:57:00 -05002461 return QLA_FUNCTION_FAILED;
2462 }
2463
2464 memset(&logo_pyld, 0, sizeof(struct els_logo_payload));
2465
2466 elsio->u.els_logo.els_cmd = els_opcode;
2467 logo_pyld.opcode = els_opcode;
2468 logo_pyld.s_id[0] = vha->d_id.b.al_pa;
2469 logo_pyld.s_id[1] = vha->d_id.b.area;
2470 logo_pyld.s_id[2] = vha->d_id.b.domain;
2471 host_to_fcp_swap(logo_pyld.s_id, sizeof(uint32_t));
2472 memcpy(&logo_pyld.wwpn, vha->port_name, WWN_SIZE);
2473
2474 memcpy(elsio->u.els_logo.els_logo_pyld, &logo_pyld,
2475 sizeof(struct els_logo_payload));
2476
2477 rval = qla2x00_start_sp(sp);
2478 if (rval != QLA_SUCCESS) {
Joe Carnuccio25ff6af2017-01-19 22:28:04 -08002479 sp->free(sp);
Himanshu Madhani6eb54712015-12-17 14:57:00 -05002480 return QLA_FUNCTION_FAILED;
2481 }
2482
2483 ql_dbg(ql_dbg_io, vha, 0x3074,
2484 "%s LOGO sent, hdl=%x, loopid=%x, portid=%02x%02x%02x.\n",
2485 sp->name, sp->handle, fcport->loop_id, fcport->d_id.b.domain,
2486 fcport->d_id.b.area, fcport->d_id.b.al_pa);
2487
2488 wait_for_completion(&elsio->u.els_logo.comp);
2489
Joe Carnuccio25ff6af2017-01-19 22:28:04 -08002490 sp->free(sp);
Himanshu Madhani6eb54712015-12-17 14:57:00 -05002491 return rval;
2492}
2493
2494static void
2495qla24xx_els_logo_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
2496{
Joe Carnuccio25ff6af2017-01-19 22:28:04 -08002497 scsi_qla_host_t *vha = sp->vha;
Himanshu Madhani6eb54712015-12-17 14:57:00 -05002498 struct srb_iocb *elsio = &sp->u.iocb_cmd;
2499
2500 els_iocb->entry_type = ELS_IOCB_TYPE;
2501 els_iocb->entry_count = 1;
2502 els_iocb->sys_define = 0;
2503 els_iocb->entry_status = 0;
2504 els_iocb->handle = sp->handle;
2505 els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2506 els_iocb->tx_dsd_count = 1;
2507 els_iocb->vp_index = vha->vp_idx;
2508 els_iocb->sof_type = EST_SOFI3;
2509 els_iocb->rx_dsd_count = 0;
2510 els_iocb->opcode = elsio->u.els_logo.els_cmd;
2511
2512 els_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
2513 els_iocb->port_id[1] = sp->fcport->d_id.b.area;
2514 els_iocb->port_id[2] = sp->fcport->d_id.b.domain;
Duane Grigsbyedd05de2017-10-13 09:34:06 -07002515 els_iocb->s_id[0] = vha->d_id.b.al_pa;
2516 els_iocb->s_id[1] = vha->d_id.b.area;
2517 els_iocb->s_id[2] = vha->d_id.b.domain;
Himanshu Madhani6eb54712015-12-17 14:57:00 -05002518 els_iocb->control_flags = 0;
2519
Duane Grigsbyedd05de2017-10-13 09:34:06 -07002520 if (elsio->u.els_logo.els_cmd == ELS_DCMD_PLOGI) {
Quinn Tran8777e432018-08-02 13:16:57 -07002521 els_iocb->tx_byte_count = els_iocb->tx_len =
2522 sizeof(struct els_plogi_payload);
Duane Grigsbyedd05de2017-10-13 09:34:06 -07002523 els_iocb->tx_address[0] =
2524 cpu_to_le32(LSD(elsio->u.els_plogi.els_plogi_pyld_dma));
2525 els_iocb->tx_address[1] =
2526 cpu_to_le32(MSD(elsio->u.els_plogi.els_plogi_pyld_dma));
Himanshu Madhani6eb54712015-12-17 14:57:00 -05002527
Duane Grigsbyedd05de2017-10-13 09:34:06 -07002528 els_iocb->rx_dsd_count = 1;
Quinn Tran8777e432018-08-02 13:16:57 -07002529 els_iocb->rx_byte_count = els_iocb->rx_len =
2530 sizeof(struct els_plogi_payload);
Duane Grigsbyedd05de2017-10-13 09:34:06 -07002531 els_iocb->rx_address[0] =
2532 cpu_to_le32(LSD(elsio->u.els_plogi.els_resp_pyld_dma));
2533 els_iocb->rx_address[1] =
2534 cpu_to_le32(MSD(elsio->u.els_plogi.els_resp_pyld_dma));
Quinn Tran8777e432018-08-02 13:16:57 -07002535
Duane Grigsbyedd05de2017-10-13 09:34:06 -07002536 ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x3073,
2537 "PLOGI ELS IOCB:\n");
2538 ql_dump_buffer(ql_log_info, vha, 0x0109,
2539 (uint8_t *)els_iocb, 0x70);
2540 } else {
2541 els_iocb->tx_byte_count = sizeof(struct els_logo_payload);
2542 els_iocb->tx_address[0] =
2543 cpu_to_le32(LSD(elsio->u.els_logo.els_logo_pyld_dma));
2544 els_iocb->tx_address[1] =
2545 cpu_to_le32(MSD(elsio->u.els_logo.els_logo_pyld_dma));
2546 els_iocb->tx_len = cpu_to_le32(sizeof(struct els_logo_payload));
2547
2548 els_iocb->rx_byte_count = 0;
2549 els_iocb->rx_address[0] = 0;
2550 els_iocb->rx_address[1] = 0;
2551 els_iocb->rx_len = 0;
2552 }
Himanshu Madhani6eb54712015-12-17 14:57:00 -05002553
Joe Carnuccio25ff6af2017-01-19 22:28:04 -08002554 sp->vha->qla_stats.control_requests++;
Himanshu Madhani6eb54712015-12-17 14:57:00 -05002555}
2556
2557static void
Duane Grigsbyedd05de2017-10-13 09:34:06 -07002558qla2x00_els_dcmd2_iocb_timeout(void *data)
2559{
2560 srb_t *sp = data;
2561 fc_port_t *fcport = sp->fcport;
2562 struct scsi_qla_host *vha = sp->vha;
2563 struct qla_hw_data *ha = vha->hw;
Duane Grigsbyedd05de2017-10-13 09:34:06 -07002564 unsigned long flags = 0;
2565 int res;
2566
2567 ql_dbg(ql_dbg_io + ql_dbg_disc, vha, 0x3069,
2568 "%s hdl=%x ELS Timeout, %8phC portid=%06x\n",
2569 sp->name, sp->handle, fcport->port_name, fcport->d_id.b24);
2570
2571 /* Abort the exchange */
2572 spin_lock_irqsave(&ha->hardware_lock, flags);
2573 res = ha->isp_ops->abort_command(sp);
2574 ql_dbg(ql_dbg_io, vha, 0x3070,
2575 "mbx abort_command %s\n",
2576 (res == QLA_SUCCESS) ? "successful" : "failed");
2577 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2578
Quinn Tran8777e432018-08-02 13:16:57 -07002579 sp->done(sp, QLA_FUNCTION_TIMEOUT);
Duane Grigsbyedd05de2017-10-13 09:34:06 -07002580}
2581
2582static void
2583qla2x00_els_dcmd2_sp_done(void *ptr, int res)
2584{
2585 srb_t *sp = ptr;
2586 fc_port_t *fcport = sp->fcport;
2587 struct srb_iocb *lio = &sp->u.iocb_cmd;
2588 struct scsi_qla_host *vha = sp->vha;
Quinn Tran8777e432018-08-02 13:16:57 -07002589 struct event_arg ea;
2590 struct qla_work_evt *e;
Duane Grigsbyedd05de2017-10-13 09:34:06 -07002591
Quinn Tran8777e432018-08-02 13:16:57 -07002592 ql_dbg(ql_dbg_disc, vha, 0x3072,
2593 "%s ELS done rc %d hdl=%x, portid=%06x %8phC\n",
2594 sp->name, res, sp->handle, fcport->d_id.b24, fcport->port_name);
Duane Grigsbyedd05de2017-10-13 09:34:06 -07002595
Quinn Tran8777e432018-08-02 13:16:57 -07002596 fcport->flags &= ~(FCF_ASYNC_SENT|FCF_ASYNC_ACTIVE);
2597 del_timer(&sp->u.iocb_cmd.timer);
2598
2599 if (sp->flags & SRB_WAKEUP_ON_COMP)
2600 complete(&lio->u.els_plogi.comp);
2601 else {
2602 if (res) {
2603 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
2604 } else {
2605 memset(&ea, 0, sizeof(ea));
2606 ea.fcport = fcport;
2607 ea.rc = res;
2608 ea.event = FCME_ELS_PLOGI_DONE;
2609 qla2x00_fcport_event_handler(vha, &ea);
2610 }
2611
2612 e = qla2x00_alloc_work(vha, QLA_EVT_UNMAP);
2613 if (!e) {
2614 struct srb_iocb *elsio = &sp->u.iocb_cmd;
2615
2616 if (elsio->u.els_plogi.els_plogi_pyld)
2617 dma_free_coherent(&sp->vha->hw->pdev->dev,
2618 elsio->u.els_plogi.tx_size,
2619 elsio->u.els_plogi.els_plogi_pyld,
2620 elsio->u.els_plogi.els_plogi_pyld_dma);
2621
2622 if (elsio->u.els_plogi.els_resp_pyld)
2623 dma_free_coherent(&sp->vha->hw->pdev->dev,
2624 elsio->u.els_plogi.rx_size,
2625 elsio->u.els_plogi.els_resp_pyld,
2626 elsio->u.els_plogi.els_resp_pyld_dma);
2627 sp->free(sp);
Quinn Trane9f7be02018-08-07 20:39:52 -07002628 return;
Quinn Tran8777e432018-08-02 13:16:57 -07002629 }
2630 e->u.iosb.sp = sp;
2631 qla2x00_post_work(vha, e);
2632 }
Duane Grigsbyedd05de2017-10-13 09:34:06 -07002633}
2634
2635int
2636qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode,
Quinn Tran8777e432018-08-02 13:16:57 -07002637 fc_port_t *fcport, bool wait)
Duane Grigsbyedd05de2017-10-13 09:34:06 -07002638{
2639 srb_t *sp;
2640 struct srb_iocb *elsio = NULL;
2641 struct qla_hw_data *ha = vha->hw;
2642 int rval = QLA_SUCCESS;
2643 void *ptr, *resp_ptr;
Duane Grigsbyedd05de2017-10-13 09:34:06 -07002644
2645 /* Alloc SRB structure */
2646 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
2647 if (!sp) {
2648 ql_log(ql_log_info, vha, 0x70e6,
2649 "SRB allocation failed\n");
2650 return -ENOMEM;
2651 }
2652
2653 elsio = &sp->u.iocb_cmd;
Duane Grigsbyedd05de2017-10-13 09:34:06 -07002654 ql_dbg(ql_dbg_io, vha, 0x3073,
2655 "Enter: PLOGI portid=%06x\n", fcport->d_id.b24);
2656
Himanshu Madhani15b6c3c2018-08-02 13:16:55 -07002657 fcport->flags |= FCF_ASYNC_SENT;
Duane Grigsbyedd05de2017-10-13 09:34:06 -07002658 sp->type = SRB_ELS_DCMD;
2659 sp->name = "ELS_DCMD";
2660 sp->fcport = fcport;
Ben Hutchingse74e7d92018-03-20 21:36:14 +00002661
Duane Grigsbyedd05de2017-10-13 09:34:06 -07002662 elsio->timeout = qla2x00_els_dcmd2_iocb_timeout;
Ben Hutchingse74e7d92018-03-20 21:36:14 +00002663 init_completion(&elsio->u.els_plogi.comp);
Quinn Tran8777e432018-08-02 13:16:57 -07002664 if (wait)
2665 sp->flags = SRB_WAKEUP_ON_COMP;
2666
2667 qla2x00_init_timer(sp, ELS_DCMD_TIMEOUT + 2);
Ben Hutchingse74e7d92018-03-20 21:36:14 +00002668
Duane Grigsbyedd05de2017-10-13 09:34:06 -07002669 sp->done = qla2x00_els_dcmd2_sp_done;
Quinn Tran8777e432018-08-02 13:16:57 -07002670 elsio->u.els_plogi.tx_size = elsio->u.els_plogi.rx_size = DMA_POOL_SIZE;
Duane Grigsbyedd05de2017-10-13 09:34:06 -07002671
2672 ptr = elsio->u.els_plogi.els_plogi_pyld =
2673 dma_alloc_coherent(&ha->pdev->dev, DMA_POOL_SIZE,
2674 &elsio->u.els_plogi.els_plogi_pyld_dma, GFP_KERNEL);
Duane Grigsbyedd05de2017-10-13 09:34:06 -07002675
2676 if (!elsio->u.els_plogi.els_plogi_pyld) {
2677 rval = QLA_FUNCTION_FAILED;
2678 goto out;
2679 }
2680
2681 resp_ptr = elsio->u.els_plogi.els_resp_pyld =
2682 dma_alloc_coherent(&ha->pdev->dev, DMA_POOL_SIZE,
2683 &elsio->u.els_plogi.els_resp_pyld_dma, GFP_KERNEL);
2684
2685 if (!elsio->u.els_plogi.els_resp_pyld) {
2686 rval = QLA_FUNCTION_FAILED;
2687 goto out;
2688 }
2689
2690 ql_dbg(ql_dbg_io, vha, 0x3073, "PLOGI %p %p\n", ptr, resp_ptr);
2691
2692 memset(ptr, 0, sizeof(struct els_plogi_payload));
2693 memset(resp_ptr, 0, sizeof(struct els_plogi_payload));
Quinn Tran8777e432018-08-02 13:16:57 -07002694 memcpy(elsio->u.els_plogi.els_plogi_pyld->data,
2695 &ha->plogi_els_payld.data,
2696 sizeof(elsio->u.els_plogi.els_plogi_pyld->data));
2697
Duane Grigsbyedd05de2017-10-13 09:34:06 -07002698 elsio->u.els_plogi.els_cmd = els_opcode;
2699 elsio->u.els_plogi.els_plogi_pyld->opcode = els_opcode;
Duane Grigsbyedd05de2017-10-13 09:34:06 -07002700
Quinn Tran8777e432018-08-02 13:16:57 -07002701 ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x3073, "PLOGI buffer:\n");
2702 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x0109,
Duane Grigsbyedd05de2017-10-13 09:34:06 -07002703 (uint8_t *)elsio->u.els_plogi.els_plogi_pyld, 0x70);
2704
Duane Grigsbyedd05de2017-10-13 09:34:06 -07002705 rval = qla2x00_start_sp(sp);
2706 if (rval != QLA_SUCCESS) {
2707 rval = QLA_FUNCTION_FAILED;
Quinn Tran8777e432018-08-02 13:16:57 -07002708 } else {
2709 ql_dbg(ql_dbg_disc, vha, 0x3074,
2710 "%s PLOGI sent, hdl=%x, loopid=%x, to port_id %06x from port_id %06x\n",
2711 sp->name, sp->handle, fcport->loop_id,
2712 fcport->d_id.b24, vha->d_id.b24);
Duane Grigsbyedd05de2017-10-13 09:34:06 -07002713 }
2714
Quinn Tran8777e432018-08-02 13:16:57 -07002715 if (wait) {
2716 wait_for_completion(&elsio->u.els_plogi.comp);
Duane Grigsbyedd05de2017-10-13 09:34:06 -07002717
Quinn Tran8777e432018-08-02 13:16:57 -07002718 if (elsio->u.els_plogi.comp_status != CS_COMPLETE)
2719 rval = QLA_FUNCTION_FAILED;
2720 } else {
2721 goto done;
2722 }
Duane Grigsbyedd05de2017-10-13 09:34:06 -07002723
2724out:
Quinn Tran8777e432018-08-02 13:16:57 -07002725 fcport->flags &= ~(FCF_ASYNC_SENT);
2726 if (elsio->u.els_plogi.els_plogi_pyld)
2727 dma_free_coherent(&sp->vha->hw->pdev->dev,
2728 elsio->u.els_plogi.tx_size,
2729 elsio->u.els_plogi.els_plogi_pyld,
2730 elsio->u.els_plogi.els_plogi_pyld_dma);
2731
2732 if (elsio->u.els_plogi.els_resp_pyld)
2733 dma_free_coherent(&sp->vha->hw->pdev->dev,
2734 elsio->u.els_plogi.rx_size,
2735 elsio->u.els_plogi.els_resp_pyld,
2736 elsio->u.els_plogi.els_resp_pyld_dma);
2737
Duane Grigsbyedd05de2017-10-13 09:34:06 -07002738 sp->free(sp);
Quinn Tran8777e432018-08-02 13:16:57 -07002739done:
Duane Grigsbyedd05de2017-10-13 09:34:06 -07002740 return rval;
2741}
2742
2743static void
Giridhar Malavali9a069e12010-01-12 13:02:47 -08002744qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
2745{
Johannes Thumshirn75cc8cf2016-11-17 10:31:19 +01002746 struct bsg_job *bsg_job = sp->u.bsg_job;
Johannes Thumshirn01e0e152016-11-17 10:31:12 +01002747 struct fc_bsg_request *bsg_request = bsg_job->request;
Giridhar Malavali9a069e12010-01-12 13:02:47 -08002748
2749 els_iocb->entry_type = ELS_IOCB_TYPE;
2750 els_iocb->entry_count = 1;
2751 els_iocb->sys_define = 0;
2752 els_iocb->entry_status = 0;
2753 els_iocb->handle = sp->handle;
2754 els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
Bart Van Asschead950362015-07-09 07:24:08 -07002755 els_iocb->tx_dsd_count = cpu_to_le16(bsg_job->request_payload.sg_cnt);
Joe Carnuccio25ff6af2017-01-19 22:28:04 -08002756 els_iocb->vp_index = sp->vha->vp_idx;
Giridhar Malavali9a069e12010-01-12 13:02:47 -08002757 els_iocb->sof_type = EST_SOFI3;
Bart Van Asschead950362015-07-09 07:24:08 -07002758 els_iocb->rx_dsd_count = cpu_to_le16(bsg_job->reply_payload.sg_cnt);
Giridhar Malavali9a069e12010-01-12 13:02:47 -08002759
Madhuranath Iyengar49163922010-05-04 15:01:28 -07002760 els_iocb->opcode =
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08002761 sp->type == SRB_ELS_CMD_RPT ?
Johannes Thumshirn01e0e152016-11-17 10:31:12 +01002762 bsg_request->rqst_data.r_els.els_code :
2763 bsg_request->rqst_data.h_els.command_code;
Giridhar Malavali9a069e12010-01-12 13:02:47 -08002764 els_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
2765 els_iocb->port_id[1] = sp->fcport->d_id.b.area;
2766 els_iocb->port_id[2] = sp->fcport->d_id.b.domain;
2767 els_iocb->control_flags = 0;
2768 els_iocb->rx_byte_count =
2769 cpu_to_le32(bsg_job->reply_payload.payload_len);
2770 els_iocb->tx_byte_count =
2771 cpu_to_le32(bsg_job->request_payload.payload_len);
2772
2773 els_iocb->tx_address[0] = cpu_to_le32(LSD(sg_dma_address
2774 (bsg_job->request_payload.sg_list)));
2775 els_iocb->tx_address[1] = cpu_to_le32(MSD(sg_dma_address
2776 (bsg_job->request_payload.sg_list)));
2777 els_iocb->tx_len = cpu_to_le32(sg_dma_len
2778 (bsg_job->request_payload.sg_list));
2779
2780 els_iocb->rx_address[0] = cpu_to_le32(LSD(sg_dma_address
2781 (bsg_job->reply_payload.sg_list)));
2782 els_iocb->rx_address[1] = cpu_to_le32(MSD(sg_dma_address
2783 (bsg_job->reply_payload.sg_list)));
2784 els_iocb->rx_len = cpu_to_le32(sg_dma_len
2785 (bsg_job->reply_payload.sg_list));
Joe Carnucciofabbb8d2013-08-27 01:37:40 -04002786
Joe Carnuccio25ff6af2017-01-19 22:28:04 -08002787 sp->vha->qla_stats.control_requests++;
Giridhar Malavali9a069e12010-01-12 13:02:47 -08002788}
2789
2790static void
Harish Zunjarrao9bc4f4f2010-07-23 15:28:32 +05002791qla2x00_ct_iocb(srb_t *sp, ms_iocb_entry_t *ct_iocb)
2792{
2793 uint16_t avail_dsds;
2794 uint32_t *cur_dsd;
2795 struct scatterlist *sg;
2796 int index;
2797 uint16_t tot_dsds;
Joe Carnuccio25ff6af2017-01-19 22:28:04 -08002798 scsi_qla_host_t *vha = sp->vha;
Harish Zunjarrao9bc4f4f2010-07-23 15:28:32 +05002799 struct qla_hw_data *ha = vha->hw;
Johannes Thumshirn75cc8cf2016-11-17 10:31:19 +01002800 struct bsg_job *bsg_job = sp->u.bsg_job;
Harish Zunjarrao9bc4f4f2010-07-23 15:28:32 +05002801 int loop_iterartion = 0;
Harish Zunjarrao9bc4f4f2010-07-23 15:28:32 +05002802 int entry_count = 1;
2803
2804 memset(ct_iocb, 0, sizeof(ms_iocb_entry_t));
2805 ct_iocb->entry_type = CT_IOCB_TYPE;
2806 ct_iocb->entry_status = 0;
2807 ct_iocb->handle1 = sp->handle;
2808 SET_TARGET_ID(ha, ct_iocb->loop_id, sp->fcport->loop_id);
Bart Van Asschead950362015-07-09 07:24:08 -07002809 ct_iocb->status = cpu_to_le16(0);
2810 ct_iocb->control_flags = cpu_to_le16(0);
Harish Zunjarrao9bc4f4f2010-07-23 15:28:32 +05002811 ct_iocb->timeout = 0;
2812 ct_iocb->cmd_dsd_count =
Bart Van Asschead950362015-07-09 07:24:08 -07002813 cpu_to_le16(bsg_job->request_payload.sg_cnt);
Harish Zunjarrao9bc4f4f2010-07-23 15:28:32 +05002814 ct_iocb->total_dsd_count =
Bart Van Asschead950362015-07-09 07:24:08 -07002815 cpu_to_le16(bsg_job->request_payload.sg_cnt + 1);
Harish Zunjarrao9bc4f4f2010-07-23 15:28:32 +05002816 ct_iocb->req_bytecount =
2817 cpu_to_le32(bsg_job->request_payload.payload_len);
2818 ct_iocb->rsp_bytecount =
2819 cpu_to_le32(bsg_job->reply_payload.payload_len);
2820
2821 ct_iocb->dseg_req_address[0] = cpu_to_le32(LSD(sg_dma_address
2822 (bsg_job->request_payload.sg_list)));
2823 ct_iocb->dseg_req_address[1] = cpu_to_le32(MSD(sg_dma_address
2824 (bsg_job->request_payload.sg_list)));
2825 ct_iocb->dseg_req_length = ct_iocb->req_bytecount;
2826
2827 ct_iocb->dseg_rsp_address[0] = cpu_to_le32(LSD(sg_dma_address
2828 (bsg_job->reply_payload.sg_list)));
2829 ct_iocb->dseg_rsp_address[1] = cpu_to_le32(MSD(sg_dma_address
2830 (bsg_job->reply_payload.sg_list)));
2831 ct_iocb->dseg_rsp_length = ct_iocb->rsp_bytecount;
2832
2833 avail_dsds = 1;
2834 cur_dsd = (uint32_t *)ct_iocb->dseg_rsp_address;
2835 index = 0;
2836 tot_dsds = bsg_job->reply_payload.sg_cnt;
2837
2838 for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) {
2839 dma_addr_t sle_dma;
2840 cont_a64_entry_t *cont_pkt;
2841
2842 /* Allocate additional continuation packets? */
2843 if (avail_dsds == 0) {
2844 /*
2845 * Five DSDs are available in the Cont.
2846 * Type 1 IOCB.
2847 */
Giridhar Malavali0d2aa382011-11-18 09:02:21 -08002848 cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
2849 vha->hw->req_q_map[0]);
Harish Zunjarrao9bc4f4f2010-07-23 15:28:32 +05002850 cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
2851 avail_dsds = 5;
Harish Zunjarrao9bc4f4f2010-07-23 15:28:32 +05002852 entry_count++;
2853 }
2854
2855 sle_dma = sg_dma_address(sg);
2856 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
2857 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
2858 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
2859 loop_iterartion++;
2860 avail_dsds--;
2861 }
2862 ct_iocb->entry_count = entry_count;
Joe Carnucciofabbb8d2013-08-27 01:37:40 -04002863
Joe Carnuccio25ff6af2017-01-19 22:28:04 -08002864 sp->vha->qla_stats.control_requests++;
Harish Zunjarrao9bc4f4f2010-07-23 15:28:32 +05002865}
2866
2867static void
Giridhar Malavali9a069e12010-01-12 13:02:47 -08002868qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
2869{
2870 uint16_t avail_dsds;
2871 uint32_t *cur_dsd;
2872 struct scatterlist *sg;
2873 int index;
Giridhar Malavalice0779c2017-08-23 15:05:23 -07002874 uint16_t cmd_dsds, rsp_dsds;
Joe Carnuccio25ff6af2017-01-19 22:28:04 -08002875 scsi_qla_host_t *vha = sp->vha;
Giridhar Malavali0d2aa382011-11-18 09:02:21 -08002876 struct qla_hw_data *ha = vha->hw;
Johannes Thumshirn75cc8cf2016-11-17 10:31:19 +01002877 struct bsg_job *bsg_job = sp->u.bsg_job;
Giridhar Malavali9a069e12010-01-12 13:02:47 -08002878 int entry_count = 1;
Giridhar Malavalice0779c2017-08-23 15:05:23 -07002879 cont_a64_entry_t *cont_pkt = NULL;
Giridhar Malavali9a069e12010-01-12 13:02:47 -08002880
2881 ct_iocb->entry_type = CT_IOCB_TYPE;
2882 ct_iocb->entry_status = 0;
2883 ct_iocb->sys_define = 0;
2884 ct_iocb->handle = sp->handle;
2885
2886 ct_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
Joe Carnuccio25ff6af2017-01-19 22:28:04 -08002887 ct_iocb->vp_index = sp->vha->vp_idx;
Bart Van Asschead950362015-07-09 07:24:08 -07002888 ct_iocb->comp_status = cpu_to_le16(0);
Giridhar Malavali9a069e12010-01-12 13:02:47 -08002889
Giridhar Malavalice0779c2017-08-23 15:05:23 -07002890 cmd_dsds = bsg_job->request_payload.sg_cnt;
2891 rsp_dsds = bsg_job->reply_payload.sg_cnt;
2892
2893 ct_iocb->cmd_dsd_count = cpu_to_le16(cmd_dsds);
Giridhar Malavali9a069e12010-01-12 13:02:47 -08002894 ct_iocb->timeout = 0;
Giridhar Malavalice0779c2017-08-23 15:05:23 -07002895 ct_iocb->rsp_dsd_count = cpu_to_le16(rsp_dsds);
Giridhar Malavali9a069e12010-01-12 13:02:47 -08002896 ct_iocb->cmd_byte_count =
2897 cpu_to_le32(bsg_job->request_payload.payload_len);
Giridhar Malavali9a069e12010-01-12 13:02:47 -08002898
Giridhar Malavalice0779c2017-08-23 15:05:23 -07002899 avail_dsds = 2;
2900 cur_dsd = (uint32_t *)ct_iocb->dseg_0_address;
Giridhar Malavali9a069e12010-01-12 13:02:47 -08002901 index = 0;
Giridhar Malavali9a069e12010-01-12 13:02:47 -08002902
Giridhar Malavalice0779c2017-08-23 15:05:23 -07002903 for_each_sg(bsg_job->request_payload.sg_list, sg, cmd_dsds, index) {
Giridhar Malavali9a069e12010-01-12 13:02:47 -08002904 dma_addr_t sle_dma;
Giridhar Malavalice0779c2017-08-23 15:05:23 -07002905
2906 /* Allocate additional continuation packets? */
2907 if (avail_dsds == 0) {
2908 /*
2909 * Five DSDs are available in the Cont.
2910 * Type 1 IOCB.
2911 */
2912 cont_pkt = qla2x00_prep_cont_type1_iocb(
2913 vha, ha->req_q_map[0]);
2914 cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
2915 avail_dsds = 5;
2916 entry_count++;
2917 }
2918
2919 sle_dma = sg_dma_address(sg);
2920 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
2921 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
2922 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
2923 avail_dsds--;
2924 }
2925
2926 index = 0;
2927
2928 for_each_sg(bsg_job->reply_payload.sg_list, sg, rsp_dsds, index) {
2929 dma_addr_t sle_dma;
Giridhar Malavali9a069e12010-01-12 13:02:47 -08002930
2931 /* Allocate additional continuation packets? */
2932 if (avail_dsds == 0) {
2933 /*
2934 * Five DSDs are available in the Cont.
2935 * Type 1 IOCB.
2936 */
Giridhar Malavali0d2aa382011-11-18 09:02:21 -08002937 cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
2938 ha->req_q_map[0]);
Giridhar Malavali9a069e12010-01-12 13:02:47 -08002939 cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
2940 avail_dsds = 5;
Giridhar Malavali9a069e12010-01-12 13:02:47 -08002941 entry_count++;
2942 }
2943
2944 sle_dma = sg_dma_address(sg);
2945 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
2946 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
2947 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
Giridhar Malavali9a069e12010-01-12 13:02:47 -08002948 avail_dsds--;
2949 }
2950 ct_iocb->entry_count = entry_count;
2951}
2952
Giridhar Malavali5162cf02011-11-18 09:03:18 -08002953/*
2954 * qla82xx_start_scsi() - Send a SCSI command to the ISP
2955 * @sp: command to send to the ISP
2956 *
2957 * Returns non-zero if a failure occurred, else zero.
2958 */
2959int
2960qla82xx_start_scsi(srb_t *sp)
2961{
Bart Van Assche52c82822015-07-09 07:23:26 -07002962 int nseg;
Giridhar Malavali5162cf02011-11-18 09:03:18 -08002963 unsigned long flags;
2964 struct scsi_cmnd *cmd;
2965 uint32_t *clr_ptr;
2966 uint32_t index;
2967 uint32_t handle;
2968 uint16_t cnt;
2969 uint16_t req_cnt;
2970 uint16_t tot_dsds;
2971 struct device_reg_82xx __iomem *reg;
2972 uint32_t dbval;
2973 uint32_t *fcp_dl;
2974 uint8_t additional_cdb_len;
2975 struct ct6_dsd *ctx;
Joe Carnuccio25ff6af2017-01-19 22:28:04 -08002976 struct scsi_qla_host *vha = sp->vha;
Giridhar Malavali5162cf02011-11-18 09:03:18 -08002977 struct qla_hw_data *ha = vha->hw;
2978 struct req_que *req = NULL;
2979 struct rsp_que *rsp = NULL;
Giridhar Malavali5162cf02011-11-18 09:03:18 -08002980
2981 /* Setup device pointers. */
Giridhar Malavali5162cf02011-11-18 09:03:18 -08002982 reg = &ha->iobase->isp82;
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08002983 cmd = GET_CMD_SP(sp);
Giridhar Malavali5162cf02011-11-18 09:03:18 -08002984 req = vha->req;
2985 rsp = ha->rsp_q_map[0];
2986
2987 /* So we know we haven't pci_map'ed anything yet */
2988 tot_dsds = 0;
2989
2990 dbval = 0x04 | (ha->portnum << 5);
2991
2992 /* Send marker if required */
2993 if (vha->marker_needed != 0) {
2994 if (qla2x00_marker(vha, req,
2995 rsp, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) {
2996 ql_log(ql_log_warn, vha, 0x300c,
2997 "qla2x00_marker failed for cmd=%p.\n", cmd);
2998 return QLA_FUNCTION_FAILED;
2999 }
3000 vha->marker_needed = 0;
3001 }
3002
3003 /* Acquire ring specific lock */
3004 spin_lock_irqsave(&ha->hardware_lock, flags);
3005
3006 /* Check for room in outstanding command list. */
3007 handle = req->current_outstanding_cmd;
Chad Dupuis8d93f552013-01-30 03:34:37 -05003008 for (index = 1; index < req->num_outstanding_cmds; index++) {
Giridhar Malavali5162cf02011-11-18 09:03:18 -08003009 handle++;
Chad Dupuis8d93f552013-01-30 03:34:37 -05003010 if (handle == req->num_outstanding_cmds)
Giridhar Malavali5162cf02011-11-18 09:03:18 -08003011 handle = 1;
3012 if (!req->outstanding_cmds[handle])
3013 break;
3014 }
Chad Dupuis8d93f552013-01-30 03:34:37 -05003015 if (index == req->num_outstanding_cmds)
Giridhar Malavali5162cf02011-11-18 09:03:18 -08003016 goto queuing_error;
3017
3018 /* Map the sg table so we have an accurate count of sg entries needed */
3019 if (scsi_sg_count(cmd)) {
3020 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
3021 scsi_sg_count(cmd), cmd->sc_data_direction);
3022 if (unlikely(!nseg))
3023 goto queuing_error;
3024 } else
3025 nseg = 0;
3026
3027 tot_dsds = nseg;
3028
3029 if (tot_dsds > ql2xshiftctondsd) {
3030 struct cmd_type_6 *cmd_pkt;
3031 uint16_t more_dsd_lists = 0;
3032 struct dsd_dma *dsd_ptr;
3033 uint16_t i;
3034
3035 more_dsd_lists = qla24xx_calc_dsd_lists(tot_dsds);
3036 if ((more_dsd_lists + ha->gbl_dsd_inuse) >= NUM_DSD_CHAIN) {
3037 ql_dbg(ql_dbg_io, vha, 0x300d,
3038 "Num of DSD list %d is than %d for cmd=%p.\n",
3039 more_dsd_lists + ha->gbl_dsd_inuse, NUM_DSD_CHAIN,
3040 cmd);
3041 goto queuing_error;
3042 }
3043
3044 if (more_dsd_lists <= ha->gbl_dsd_avail)
3045 goto sufficient_dsds;
3046 else
3047 more_dsd_lists -= ha->gbl_dsd_avail;
3048
3049 for (i = 0; i < more_dsd_lists; i++) {
3050 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
3051 if (!dsd_ptr) {
3052 ql_log(ql_log_fatal, vha, 0x300e,
3053 "Failed to allocate memory for dsd_dma "
3054 "for cmd=%p.\n", cmd);
3055 goto queuing_error;
3056 }
3057
3058 dsd_ptr->dsd_addr = dma_pool_alloc(ha->dl_dma_pool,
3059 GFP_ATOMIC, &dsd_ptr->dsd_list_dma);
3060 if (!dsd_ptr->dsd_addr) {
3061 kfree(dsd_ptr);
3062 ql_log(ql_log_fatal, vha, 0x300f,
3063 "Failed to allocate memory for dsd_addr "
3064 "for cmd=%p.\n", cmd);
3065 goto queuing_error;
3066 }
3067 list_add_tail(&dsd_ptr->list, &ha->gbl_dsd_list);
3068 ha->gbl_dsd_avail++;
3069 }
3070
3071sufficient_dsds:
3072 req_cnt = 1;
3073
3074 if (req->cnt < (req_cnt + 2)) {
3075 cnt = (uint16_t)RD_REG_DWORD_RELAXED(
3076 &reg->req_q_out[0]);
3077 if (req->ring_index < cnt)
3078 req->cnt = cnt - req->ring_index;
3079 else
3080 req->cnt = req->length -
3081 (req->ring_index - cnt);
Chetan Lokea6eb3c92012-05-15 14:34:09 -04003082 if (req->cnt < (req_cnt + 2))
3083 goto queuing_error;
Giridhar Malavali5162cf02011-11-18 09:03:18 -08003084 }
3085
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08003086 ctx = sp->u.scmd.ctx =
3087 mempool_alloc(ha->ctx_mempool, GFP_ATOMIC);
3088 if (!ctx) {
Giridhar Malavali5162cf02011-11-18 09:03:18 -08003089 ql_log(ql_log_fatal, vha, 0x3010,
3090 "Failed to allocate ctx for cmd=%p.\n", cmd);
3091 goto queuing_error;
3092 }
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08003093
Giridhar Malavali5162cf02011-11-18 09:03:18 -08003094 memset(ctx, 0, sizeof(struct ct6_dsd));
Souptick Joarder501017f2018-02-15 01:40:38 +05303095 ctx->fcp_cmnd = dma_pool_zalloc(ha->fcp_cmnd_dma_pool,
Giridhar Malavali5162cf02011-11-18 09:03:18 -08003096 GFP_ATOMIC, &ctx->fcp_cmnd_dma);
3097 if (!ctx->fcp_cmnd) {
3098 ql_log(ql_log_fatal, vha, 0x3011,
3099 "Failed to allocate fcp_cmnd for cmd=%p.\n", cmd);
Dan Carpenter841f97b2012-05-17 10:13:40 +03003100 goto queuing_error;
Giridhar Malavali5162cf02011-11-18 09:03:18 -08003101 }
3102
3103 /* Initialize the DSD list and dma handle */
3104 INIT_LIST_HEAD(&ctx->dsd_list);
3105 ctx->dsd_use_cnt = 0;
3106
3107 if (cmd->cmd_len > 16) {
3108 additional_cdb_len = cmd->cmd_len - 16;
3109 if ((cmd->cmd_len % 4) != 0) {
3110 /* SCSI command bigger than 16 bytes must be
3111 * multiple of 4
3112 */
3113 ql_log(ql_log_warn, vha, 0x3012,
3114 "scsi cmd len %d not multiple of 4 "
3115 "for cmd=%p.\n", cmd->cmd_len, cmd);
3116 goto queuing_error_fcp_cmnd;
3117 }
3118 ctx->fcp_cmnd_len = 12 + cmd->cmd_len + 4;
3119 } else {
3120 additional_cdb_len = 0;
3121 ctx->fcp_cmnd_len = 12 + 16 + 4;
3122 }
3123
3124 cmd_pkt = (struct cmd_type_6 *)req->ring_ptr;
3125 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
3126
3127 /* Zero out remaining portion of packet. */
3128 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
3129 clr_ptr = (uint32_t *)cmd_pkt + 2;
3130 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
3131 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
3132
3133 /* Set NPORT-ID and LUN number*/
3134 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3135 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
3136 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
3137 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
Joe Carnuccio25ff6af2017-01-19 22:28:04 -08003138 cmd_pkt->vp_index = sp->vha->vp_idx;
Giridhar Malavali5162cf02011-11-18 09:03:18 -08003139
3140 /* Build IOCB segments */
3141 if (qla24xx_build_scsi_type_6_iocbs(sp, cmd_pkt, tot_dsds))
3142 goto queuing_error_fcp_cmnd;
3143
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08003144 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
Giridhar Malavali5162cf02011-11-18 09:03:18 -08003145 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
3146
3147 /* build FCP_CMND IU */
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08003148 int_to_scsilun(cmd->device->lun, &ctx->fcp_cmnd->lun);
Giridhar Malavali5162cf02011-11-18 09:03:18 -08003149 ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len;
3150
3151 if (cmd->sc_data_direction == DMA_TO_DEVICE)
3152 ctx->fcp_cmnd->additional_cdb_len |= 1;
3153 else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
3154 ctx->fcp_cmnd->additional_cdb_len |= 2;
3155
Saurav Kashyapa00f6292011-11-18 09:03:19 -08003156 /* Populate the FCP_PRIO. */
3157 if (ha->flags.fcp_prio_enabled)
3158 ctx->fcp_cmnd->task_attribute |=
3159 sp->fcport->fcp_prio << 3;
3160
Giridhar Malavali5162cf02011-11-18 09:03:18 -08003161 memcpy(ctx->fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
3162
3163 fcp_dl = (uint32_t *)(ctx->fcp_cmnd->cdb + 16 +
3164 additional_cdb_len);
3165 *fcp_dl = htonl((uint32_t)scsi_bufflen(cmd));
3166
3167 cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(ctx->fcp_cmnd_len);
3168 cmd_pkt->fcp_cmnd_dseg_address[0] =
3169 cpu_to_le32(LSD(ctx->fcp_cmnd_dma));
3170 cmd_pkt->fcp_cmnd_dseg_address[1] =
3171 cpu_to_le32(MSD(ctx->fcp_cmnd_dma));
3172
3173 sp->flags |= SRB_FCP_CMND_DMA_VALID;
3174 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
3175 /* Set total data segment count. */
3176 cmd_pkt->entry_count = (uint8_t)req_cnt;
3177 /* Specify response queue number where
3178 * completion should happen
3179 */
3180 cmd_pkt->entry_status = (uint8_t) rsp->id;
3181 } else {
3182 struct cmd_type_7 *cmd_pkt;
3183 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
3184 if (req->cnt < (req_cnt + 2)) {
3185 cnt = (uint16_t)RD_REG_DWORD_RELAXED(
3186 &reg->req_q_out[0]);
3187 if (req->ring_index < cnt)
3188 req->cnt = cnt - req->ring_index;
3189 else
3190 req->cnt = req->length -
3191 (req->ring_index - cnt);
3192 }
3193 if (req->cnt < (req_cnt + 2))
3194 goto queuing_error;
3195
3196 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
3197 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
3198
3199 /* Zero out remaining portion of packet. */
3200 /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
3201 clr_ptr = (uint32_t *)cmd_pkt + 2;
3202 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
3203 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
3204
3205 /* Set NPORT-ID and LUN number*/
3206 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3207 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
3208 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
3209 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
Joe Carnuccio25ff6af2017-01-19 22:28:04 -08003210 cmd_pkt->vp_index = sp->vha->vp_idx;
Giridhar Malavali5162cf02011-11-18 09:03:18 -08003211
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08003212 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
Giridhar Malavali5162cf02011-11-18 09:03:18 -08003213 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun,
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08003214 sizeof(cmd_pkt->lun));
Giridhar Malavali5162cf02011-11-18 09:03:18 -08003215
Saurav Kashyapa00f6292011-11-18 09:03:19 -08003216 /* Populate the FCP_PRIO. */
3217 if (ha->flags.fcp_prio_enabled)
3218 cmd_pkt->task |= sp->fcport->fcp_prio << 3;
3219
Giridhar Malavali5162cf02011-11-18 09:03:18 -08003220 /* Load SCSI command packet. */
3221 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
3222 host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
3223
3224 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
3225
3226 /* Build IOCB segments */
Michael Hernandezd7459522016-12-12 14:40:07 -08003227 qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req);
Giridhar Malavali5162cf02011-11-18 09:03:18 -08003228
3229 /* Set total data segment count. */
3230 cmd_pkt->entry_count = (uint8_t)req_cnt;
3231 /* Specify response queue number where
3232 * completion should happen.
3233 */
3234 cmd_pkt->entry_status = (uint8_t) rsp->id;
3235
3236 }
3237 /* Build command packet. */
3238 req->current_outstanding_cmd = handle;
3239 req->outstanding_cmds[handle] = sp;
3240 sp->handle = handle;
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08003241 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
Giridhar Malavali5162cf02011-11-18 09:03:18 -08003242 req->cnt -= req_cnt;
3243 wmb();
3244
3245 /* Adjust ring index. */
3246 req->ring_index++;
3247 if (req->ring_index == req->length) {
3248 req->ring_index = 0;
3249 req->ring_ptr = req->ring;
3250 } else
3251 req->ring_ptr++;
3252
3253 sp->flags |= SRB_DMA_VALID;
3254
3255 /* Set chip new ring index. */
3256 /* write, read and verify logic */
3257 dbval = dbval | (req->id << 8) | (req->ring_index << 16);
3258 if (ql2xdbwr)
Bart Van Assche8dfa4b5a2015-07-09 07:24:50 -07003259 qla82xx_wr_32(ha, (uintptr_t __force)ha->nxdb_wr_ptr, dbval);
Giridhar Malavali5162cf02011-11-18 09:03:18 -08003260 else {
Bart Van Assche8dfa4b5a2015-07-09 07:24:50 -07003261 WRT_REG_DWORD(ha->nxdb_wr_ptr, dbval);
Giridhar Malavali5162cf02011-11-18 09:03:18 -08003262 wmb();
Bart Van Assche8dfa4b5a2015-07-09 07:24:50 -07003263 while (RD_REG_DWORD(ha->nxdb_rd_ptr) != dbval) {
3264 WRT_REG_DWORD(ha->nxdb_wr_ptr, dbval);
Giridhar Malavali5162cf02011-11-18 09:03:18 -08003265 wmb();
3266 }
3267 }
3268
3269 /* Manage unprocessed RIO/ZIO commands in response queue. */
3270 if (vha->flags.process_response_queue &&
3271 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
3272 qla24xx_process_response_queue(vha, rsp);
3273
3274 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3275 return QLA_SUCCESS;
3276
3277queuing_error_fcp_cmnd:
3278 dma_pool_free(ha->fcp_cmnd_dma_pool, ctx->fcp_cmnd, ctx->fcp_cmnd_dma);
3279queuing_error:
3280 if (tot_dsds)
3281 scsi_dma_unmap(cmd);
3282
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08003283 if (sp->u.scmd.ctx) {
3284 mempool_free(sp->u.scmd.ctx, ha->ctx_mempool);
3285 sp->u.scmd.ctx = NULL;
Giridhar Malavali5162cf02011-11-18 09:03:18 -08003286 }
3287 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3288
3289 return QLA_FUNCTION_FAILED;
3290}
3291
Joe Carnuccio6d78e552014-09-25 05:17:05 -04003292static void
Armen Baloyan4440e462014-02-26 04:15:18 -05003293qla24xx_abort_iocb(srb_t *sp, struct abort_entry_24xx *abt_iocb)
3294{
3295 struct srb_iocb *aio = &sp->u.iocb_cmd;
Joe Carnuccio25ff6af2017-01-19 22:28:04 -08003296 scsi_qla_host_t *vha = sp->vha;
Quinn Tran49cecca2018-08-31 11:24:28 -07003297 struct req_que *req = sp->qpair->req;
Armen Baloyan4440e462014-02-26 04:15:18 -05003298
3299 memset(abt_iocb, 0, sizeof(struct abort_entry_24xx));
3300 abt_iocb->entry_type = ABORT_IOCB_TYPE;
3301 abt_iocb->entry_count = 1;
Himanshu Madhanif3767222018-02-01 10:33:18 -08003302 abt_iocb->handle = cpu_to_le32(MAKE_HANDLE(req->id, sp->handle));
Quinn Tran49cecca2018-08-31 11:24:28 -07003303 if (sp->fcport) {
3304 abt_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3305 abt_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
3306 abt_iocb->port_id[1] = sp->fcport->d_id.b.area;
3307 abt_iocb->port_id[2] = sp->fcport->d_id.b.domain;
3308 }
Armen Baloyan4440e462014-02-26 04:15:18 -05003309 abt_iocb->handle_to_abort =
Himanshu Madhanif3767222018-02-01 10:33:18 -08003310 cpu_to_le32(MAKE_HANDLE(aio->u.abt.req_que_no,
3311 aio->u.abt.cmd_hndl));
Armen Baloyan4440e462014-02-26 04:15:18 -05003312 abt_iocb->vp_index = vha->vp_idx;
Darren Trappb027a5a2018-01-15 20:46:51 -08003313 abt_iocb->req_que_no = cpu_to_le16(aio->u.abt.req_que_no);
Armen Baloyan4440e462014-02-26 04:15:18 -05003314 /* Send the command to the firmware */
3315 wmb();
3316}
3317
Quinn Tran726b8542017-01-19 22:28:00 -08003318static void
3319qla2x00_mb_iocb(srb_t *sp, struct mbx_24xx_entry *mbx)
3320{
3321 int i, sz;
3322
3323 mbx->entry_type = MBX_IOCB_TYPE;
3324 mbx->handle = sp->handle;
3325 sz = min(ARRAY_SIZE(mbx->mb), ARRAY_SIZE(sp->u.iocb_cmd.u.mbx.out_mb));
3326
3327 for (i = 0; i < sz; i++)
3328 mbx->mb[i] = cpu_to_le16(sp->u.iocb_cmd.u.mbx.out_mb[i]);
3329}
3330
3331static void
3332qla2x00_ctpthru_cmd_iocb(srb_t *sp, struct ct_entry_24xx *ct_pkt)
3333{
3334 sp->u.iocb_cmd.u.ctarg.iocb = ct_pkt;
3335 qla24xx_prep_ms_iocb(sp->vha, &sp->u.iocb_cmd.u.ctarg);
3336 ct_pkt->handle = sp->handle;
3337}
3338
3339static void qla2x00_send_notify_ack_iocb(srb_t *sp,
3340 struct nack_to_isp *nack)
3341{
3342 struct imm_ntfy_from_isp *ntfy = sp->u.iocb_cmd.u.nack.ntfy;
3343
3344 nack->entry_type = NOTIFY_ACK_TYPE;
3345 nack->entry_count = 1;
3346 nack->ox_id = ntfy->ox_id;
3347
3348 nack->u.isp24.handle = sp->handle;
3349 nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle;
3350 if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) {
3351 nack->u.isp24.flags = ntfy->u.isp24.flags &
3352 cpu_to_le32(NOTIFY24XX_FLAGS_PUREX_IOCB);
3353 }
3354 nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id;
3355 nack->u.isp24.status = ntfy->u.isp24.status;
3356 nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode;
3357 nack->u.isp24.fw_handle = ntfy->u.isp24.fw_handle;
3358 nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address;
3359 nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs;
3360 nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui;
3361 nack->u.isp24.srr_flags = 0;
3362 nack->u.isp24.srr_reject_code = 0;
3363 nack->u.isp24.srr_reject_code_expl = 0;
3364 nack->u.isp24.vp_index = ntfy->u.isp24.vp_index;
3365}
3366
Duane Grigsbye84067d2017-06-21 13:48:43 -07003367/*
3368 * Build NVME LS request
3369 */
3370static int
3371qla_nvme_ls(srb_t *sp, struct pt_ls4_request *cmd_pkt)
3372{
3373 struct srb_iocb *nvme;
3374 int rval = QLA_SUCCESS;
3375
3376 nvme = &sp->u.iocb_cmd;
3377 cmd_pkt->entry_type = PT_LS4_REQUEST;
3378 cmd_pkt->entry_count = 1;
3379 cmd_pkt->control_flags = CF_LS4_ORIGINATOR << CF_LS4_SHIFT;
3380
3381 cmd_pkt->timeout = cpu_to_le16(nvme->u.nvme.timeout_sec);
3382 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3383 cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
3384
3385 cmd_pkt->tx_dseg_count = 1;
3386 cmd_pkt->tx_byte_count = nvme->u.nvme.cmd_len;
3387 cmd_pkt->dseg0_len = nvme->u.nvme.cmd_len;
3388 cmd_pkt->dseg0_address[0] = cpu_to_le32(LSD(nvme->u.nvme.cmd_dma));
3389 cmd_pkt->dseg0_address[1] = cpu_to_le32(MSD(nvme->u.nvme.cmd_dma));
3390
3391 cmd_pkt->rx_dseg_count = 1;
3392 cmd_pkt->rx_byte_count = nvme->u.nvme.rsp_len;
3393 cmd_pkt->dseg1_len = nvme->u.nvme.rsp_len;
3394 cmd_pkt->dseg1_address[0] = cpu_to_le32(LSD(nvme->u.nvme.rsp_dma));
3395 cmd_pkt->dseg1_address[1] = cpu_to_le32(MSD(nvme->u.nvme.rsp_dma));
3396
3397 return rval;
3398}
3399
Quinn Tran28531922017-12-28 12:33:10 -08003400static void
3401qla25xx_ctrlvp_iocb(srb_t *sp, struct vp_ctrl_entry_24xx *vce)
3402{
3403 int map, pos;
3404
3405 vce->entry_type = VP_CTRL_IOCB_TYPE;
3406 vce->handle = sp->handle;
3407 vce->entry_count = 1;
3408 vce->command = cpu_to_le16(sp->u.iocb_cmd.u.ctrlvp.cmd);
3409 vce->vp_count = cpu_to_le16(1);
3410
3411 /*
3412 * index map in firmware starts with 1; decrement index
3413 * this is ok as we never use index 0
3414 */
3415 map = (sp->u.iocb_cmd.u.ctrlvp.vp_index - 1) / 8;
3416 pos = (sp->u.iocb_cmd.u.ctrlvp.vp_index - 1) & 7;
3417 vce->vp_idx_map[map] |= 1 << pos;
3418}
3419
Quinn Tran11aea162017-12-28 12:33:20 -08003420static void
3421qla24xx_prlo_iocb(srb_t *sp, struct logio_entry_24xx *logio)
3422{
3423 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
3424 logio->control_flags =
3425 cpu_to_le16(LCF_COMMAND_PRLO|LCF_IMPL_PRLO);
3426
3427 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3428 logio->port_id[0] = sp->fcport->d_id.b.al_pa;
3429 logio->port_id[1] = sp->fcport->d_id.b.area;
3430 logio->port_id[2] = sp->fcport->d_id.b.domain;
3431 logio->vp_index = sp->fcport->vha->vp_idx;
3432}
3433
Andrew Vasquezac280b62009-08-20 11:06:05 -07003434int
3435qla2x00_start_sp(srb_t *sp)
3436{
3437 int rval;
Joe Carnuccio25ff6af2017-01-19 22:28:04 -08003438 scsi_qla_host_t *vha = sp->vha;
Quinn Tran726b8542017-01-19 22:28:00 -08003439 struct qla_hw_data *ha = vha->hw;
Quinn Tran6a629462018-09-04 14:19:15 -07003440 struct qla_qpair *qp = sp->qpair;
Andrew Vasquezac280b62009-08-20 11:06:05 -07003441 void *pkt;
Andrew Vasquezac280b62009-08-20 11:06:05 -07003442 unsigned long flags;
3443
3444 rval = QLA_FUNCTION_FAILED;
Quinn Tran6a629462018-09-04 14:19:15 -07003445 spin_lock_irqsave(qp->qp_lock_ptr, flags);
3446 pkt = __qla2x00_alloc_iocbs(sp->qpair, sp);
Saurav Kashyap7c3df132011-07-14 12:00:13 -07003447 if (!pkt) {
Quinn Tran726b8542017-01-19 22:28:00 -08003448 ql_log(ql_log_warn, vha, 0x700c,
Saurav Kashyap7c3df132011-07-14 12:00:13 -07003449 "qla2x00_alloc_iocbs failed.\n");
Andrew Vasquezac280b62009-08-20 11:06:05 -07003450 goto done;
Saurav Kashyap7c3df132011-07-14 12:00:13 -07003451 }
Andrew Vasquezac280b62009-08-20 11:06:05 -07003452
3453 rval = QLA_SUCCESS;
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08003454 switch (sp->type) {
Andrew Vasquezac280b62009-08-20 11:06:05 -07003455 case SRB_LOGIN_CMD:
3456 IS_FWI2_CAPABLE(ha) ?
Andrew Vasquez5ff1d582010-05-04 15:01:26 -07003457 qla24xx_login_iocb(sp, pkt) :
Andrew Vasquezac280b62009-08-20 11:06:05 -07003458 qla2x00_login_iocb(sp, pkt);
3459 break;
Duane Grigsbya5d42f42017-06-21 13:48:41 -07003460 case SRB_PRLI_CMD:
3461 qla24xx_prli_iocb(sp, pkt);
3462 break;
Andrew Vasquezac280b62009-08-20 11:06:05 -07003463 case SRB_LOGOUT_CMD:
3464 IS_FWI2_CAPABLE(ha) ?
Andrew Vasquez5ff1d582010-05-04 15:01:26 -07003465 qla24xx_logout_iocb(sp, pkt) :
Andrew Vasquezac280b62009-08-20 11:06:05 -07003466 qla2x00_logout_iocb(sp, pkt);
3467 break;
Giridhar Malavali9a069e12010-01-12 13:02:47 -08003468 case SRB_ELS_CMD_RPT:
3469 case SRB_ELS_CMD_HST:
3470 qla24xx_els_iocb(sp, pkt);
3471 break;
3472 case SRB_CT_CMD:
Harish Zunjarrao9bc4f4f2010-07-23 15:28:32 +05003473 IS_FWI2_CAPABLE(ha) ?
Andrew Vasquez5780790e2011-11-18 09:03:20 -08003474 qla24xx_ct_iocb(sp, pkt) :
3475 qla2x00_ct_iocb(sp, pkt);
Giridhar Malavali9a069e12010-01-12 13:02:47 -08003476 break;
Andrew Vasquez5ff1d582010-05-04 15:01:26 -07003477 case SRB_ADISC_CMD:
3478 IS_FWI2_CAPABLE(ha) ?
3479 qla24xx_adisc_iocb(sp, pkt) :
3480 qla2x00_adisc_iocb(sp, pkt);
3481 break;
Madhuranath Iyengar38222632010-05-04 15:01:29 -07003482 case SRB_TM_CMD:
Giridhar Malavali8ae6d9c2013-03-28 08:21:23 -04003483 IS_QLAFX00(ha) ?
3484 qlafx00_tm_iocb(sp, pkt) :
3485 qla24xx_tm_iocb(sp, pkt);
3486 break;
3487 case SRB_FXIOCB_DCMD:
3488 case SRB_FXIOCB_BCMD:
3489 qlafx00_fxdisc_iocb(sp, pkt);
3490 break;
Duane Grigsbye84067d2017-06-21 13:48:43 -07003491 case SRB_NVME_LS:
3492 qla_nvme_ls(sp, pkt);
3493 break;
Giridhar Malavali8ae6d9c2013-03-28 08:21:23 -04003494 case SRB_ABT_CMD:
Armen Baloyan4440e462014-02-26 04:15:18 -05003495 IS_QLAFX00(ha) ?
3496 qlafx00_abort_iocb(sp, pkt) :
3497 qla24xx_abort_iocb(sp, pkt);
Madhuranath Iyengar38222632010-05-04 15:01:29 -07003498 break;
Himanshu Madhani6eb54712015-12-17 14:57:00 -05003499 case SRB_ELS_DCMD:
3500 qla24xx_els_logo_iocb(sp, pkt);
3501 break;
Quinn Tran726b8542017-01-19 22:28:00 -08003502 case SRB_CT_PTHRU_CMD:
3503 qla2x00_ctpthru_cmd_iocb(sp, pkt);
3504 break;
3505 case SRB_MB_IOCB:
3506 qla2x00_mb_iocb(sp, pkt);
3507 break;
3508 case SRB_NACK_PLOGI:
3509 case SRB_NACK_PRLI:
3510 case SRB_NACK_LOGO:
3511 qla2x00_send_notify_ack_iocb(sp, pkt);
3512 break;
Quinn Tran28531922017-12-28 12:33:10 -08003513 case SRB_CTRL_VP:
3514 qla25xx_ctrlvp_iocb(sp, pkt);
3515 break;
Quinn Tran11aea162017-12-28 12:33:20 -08003516 case SRB_PRLO_CMD:
3517 qla24xx_prlo_iocb(sp, pkt);
3518 break;
Andrew Vasquezac280b62009-08-20 11:06:05 -07003519 default:
3520 break;
3521 }
3522
3523 wmb();
Quinn Tran6a629462018-09-04 14:19:15 -07003524 qla2x00_start_iocbs(vha, qp->req);
Andrew Vasquezac280b62009-08-20 11:06:05 -07003525done:
Quinn Tran6a629462018-09-04 14:19:15 -07003526 spin_unlock_irqrestore(qp->qp_lock_ptr, flags);
Andrew Vasquezac280b62009-08-20 11:06:05 -07003527 return rval;
3528}
Saurav Kashyapa9b6f722012-08-22 14:21:01 -04003529
3530static void
3531qla25xx_build_bidir_iocb(srb_t *sp, struct scsi_qla_host *vha,
3532 struct cmd_bidir *cmd_pkt, uint32_t tot_dsds)
3533{
3534 uint16_t avail_dsds;
3535 uint32_t *cur_dsd;
3536 uint32_t req_data_len = 0;
3537 uint32_t rsp_data_len = 0;
3538 struct scatterlist *sg;
3539 int index;
3540 int entry_count = 1;
Johannes Thumshirn75cc8cf2016-11-17 10:31:19 +01003541 struct bsg_job *bsg_job = sp->u.bsg_job;
Saurav Kashyapa9b6f722012-08-22 14:21:01 -04003542
3543 /*Update entry type to indicate bidir command */
3544 *((uint32_t *)(&cmd_pkt->entry_type)) =
Bart Van Asschead950362015-07-09 07:24:08 -07003545 cpu_to_le32(COMMAND_BIDIRECTIONAL);
Saurav Kashyapa9b6f722012-08-22 14:21:01 -04003546
3547 /* Set the transfer direction, in this set both flags
3548 * Also set the BD_WRAP_BACK flag, firmware will take care
3549 * assigning DID=SID for outgoing pkts.
3550 */
3551 cmd_pkt->wr_dseg_count = cpu_to_le16(bsg_job->request_payload.sg_cnt);
3552 cmd_pkt->rd_dseg_count = cpu_to_le16(bsg_job->reply_payload.sg_cnt);
Bart Van Asschead950362015-07-09 07:24:08 -07003553 cmd_pkt->control_flags = cpu_to_le16(BD_WRITE_DATA | BD_READ_DATA |
Saurav Kashyapa9b6f722012-08-22 14:21:01 -04003554 BD_WRAP_BACK);
3555
3556 req_data_len = rsp_data_len = bsg_job->request_payload.payload_len;
3557 cmd_pkt->wr_byte_count = cpu_to_le32(req_data_len);
3558 cmd_pkt->rd_byte_count = cpu_to_le32(rsp_data_len);
3559 cmd_pkt->timeout = cpu_to_le16(qla2x00_get_async_timeout(vha) + 2);
3560
3561 vha->bidi_stats.transfer_bytes += req_data_len;
3562 vha->bidi_stats.io_count++;
3563
Joe Carnucciofabbb8d2013-08-27 01:37:40 -04003564 vha->qla_stats.output_bytes += req_data_len;
3565 vha->qla_stats.output_requests++;
3566
Saurav Kashyapa9b6f722012-08-22 14:21:01 -04003567 /* Only one dsd is available for bidirectional IOCB, remaining dsds
3568 * are bundled in continuation iocb
3569 */
3570 avail_dsds = 1;
3571 cur_dsd = (uint32_t *)&cmd_pkt->fcp_data_dseg_address;
3572
3573 index = 0;
3574
3575 for_each_sg(bsg_job->request_payload.sg_list, sg,
3576 bsg_job->request_payload.sg_cnt, index) {
3577 dma_addr_t sle_dma;
3578 cont_a64_entry_t *cont_pkt;
3579
3580 /* Allocate additional continuation packets */
3581 if (avail_dsds == 0) {
3582 /* Continuation type 1 IOCB can accomodate
3583 * 5 DSDS
3584 */
3585 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
3586 cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
3587 avail_dsds = 5;
3588 entry_count++;
3589 }
3590 sle_dma = sg_dma_address(sg);
3591 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
3592 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
3593 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
3594 avail_dsds--;
3595 }
3596 /* For read request DSD will always goes to continuation IOCB
3597 * and follow the write DSD. If there is room on the current IOCB
3598 * then it is added to that IOCB else new continuation IOCB is
3599 * allocated.
3600 */
3601 for_each_sg(bsg_job->reply_payload.sg_list, sg,
3602 bsg_job->reply_payload.sg_cnt, index) {
3603 dma_addr_t sle_dma;
3604 cont_a64_entry_t *cont_pkt;
3605
3606 /* Allocate additional continuation packets */
3607 if (avail_dsds == 0) {
3608 /* Continuation type 1 IOCB can accomodate
3609 * 5 DSDS
3610 */
3611 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
3612 cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
3613 avail_dsds = 5;
3614 entry_count++;
3615 }
3616 sle_dma = sg_dma_address(sg);
3617 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
3618 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
3619 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
3620 avail_dsds--;
3621 }
3622 /* This value should be same as number of IOCB required for this cmd */
3623 cmd_pkt->entry_count = entry_count;
3624}
3625
3626int
3627qla2x00_start_bidir(srb_t *sp, struct scsi_qla_host *vha, uint32_t tot_dsds)
3628{
3629
3630 struct qla_hw_data *ha = vha->hw;
3631 unsigned long flags;
3632 uint32_t handle;
3633 uint32_t index;
3634 uint16_t req_cnt;
3635 uint16_t cnt;
3636 uint32_t *clr_ptr;
3637 struct cmd_bidir *cmd_pkt = NULL;
3638 struct rsp_que *rsp;
3639 struct req_que *req;
3640 int rval = EXT_STATUS_OK;
Saurav Kashyapa9b6f722012-08-22 14:21:01 -04003641
3642 rval = QLA_SUCCESS;
3643
3644 rsp = ha->rsp_q_map[0];
3645 req = vha->req;
3646
3647 /* Send marker if required */
3648 if (vha->marker_needed != 0) {
3649 if (qla2x00_marker(vha, req,
3650 rsp, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS)
3651 return EXT_STATUS_MAILBOX;
3652 vha->marker_needed = 0;
3653 }
3654
3655 /* Acquire ring specific lock */
3656 spin_lock_irqsave(&ha->hardware_lock, flags);
3657
3658 /* Check for room in outstanding command list. */
3659 handle = req->current_outstanding_cmd;
Chad Dupuis8d93f552013-01-30 03:34:37 -05003660 for (index = 1; index < req->num_outstanding_cmds; index++) {
Saurav Kashyapa9b6f722012-08-22 14:21:01 -04003661 handle++;
Bart Van Assche8d2b21d2015-06-04 15:58:09 -07003662 if (handle == req->num_outstanding_cmds)
3663 handle = 1;
3664 if (!req->outstanding_cmds[handle])
3665 break;
Saurav Kashyapa9b6f722012-08-22 14:21:01 -04003666 }
3667
Chad Dupuis8d93f552013-01-30 03:34:37 -05003668 if (index == req->num_outstanding_cmds) {
Saurav Kashyapa9b6f722012-08-22 14:21:01 -04003669 rval = EXT_STATUS_BUSY;
3670 goto queuing_error;
3671 }
3672
3673 /* Calculate number of IOCB required */
3674 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
3675
3676 /* Check for room on request queue. */
3677 if (req->cnt < req_cnt + 2) {
Joe Carnuccio7c6300e2014-04-11 16:54:37 -04003678 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
3679 RD_REG_DWORD_RELAXED(req->req_q_out);
Saurav Kashyapa9b6f722012-08-22 14:21:01 -04003680 if (req->ring_index < cnt)
3681 req->cnt = cnt - req->ring_index;
3682 else
3683 req->cnt = req->length -
3684 (req->ring_index - cnt);
3685 }
3686 if (req->cnt < req_cnt + 2) {
3687 rval = EXT_STATUS_BUSY;
3688 goto queuing_error;
3689 }
3690
3691 cmd_pkt = (struct cmd_bidir *)req->ring_ptr;
3692 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
3693
3694 /* Zero out remaining portion of packet. */
3695 /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
3696 clr_ptr = (uint32_t *)cmd_pkt + 2;
3697 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
3698
3699 /* Set NPORT-ID (of vha)*/
3700 cmd_pkt->nport_handle = cpu_to_le16(vha->self_login_loop_id);
3701 cmd_pkt->port_id[0] = vha->d_id.b.al_pa;
3702 cmd_pkt->port_id[1] = vha->d_id.b.area;
3703 cmd_pkt->port_id[2] = vha->d_id.b.domain;
3704
3705 qla25xx_build_bidir_iocb(sp, vha, cmd_pkt, tot_dsds);
3706 cmd_pkt->entry_status = (uint8_t) rsp->id;
3707 /* Build command packet. */
3708 req->current_outstanding_cmd = handle;
3709 req->outstanding_cmds[handle] = sp;
3710 sp->handle = handle;
3711 req->cnt -= req_cnt;
3712
3713 /* Send the command to the firmware */
3714 wmb();
3715 qla2x00_start_iocbs(vha, req);
3716queuing_error:
3717 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3718 return rval;
3719}