blob: 88c0338a2ec7d9dc55d676d35bd7088535b8625f [file] [log] [blame]
Milan P. Gandhif65c3382019-03-12 18:21:11 +05301/*
Giridhar Malavali6e980162010-03-19 17:03:58 -07002 * QLogic Fibre Channel HBA Driver
Armen Baloyanbd21eaf2014-04-11 16:54:24 -04003 * Copyright (c) 2003-2014 QLogic Corporation
Giridhar Malavali6e980162010-03-19 17:03:58 -07004 *
5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */
7#include "qla_def.h"
8
9#include <linux/kthread.h>
10#include <linux/vmalloc.h>
11#include <linux/delay.h>
Johannes Thumshirn75cc8cf2016-11-17 10:31:19 +010012#include <linux/bsg-lib.h>
Giridhar Malavali6e980162010-03-19 17:03:58 -070013
Joe Carnuccio054f4382020-02-12 13:44:13 -080014static void qla2xxx_free_fcport_work(struct work_struct *work)
15{
16 struct fc_port *fcport = container_of(work, typeof(*fcport),
17 free_work);
18
19 qla2x00_free_fcport(fcport);
20}
21
Giridhar Malavali6e980162010-03-19 17:03:58 -070022/* BSG support for ELS/CT pass through */
Bart Van Assche6c18a432019-08-08 20:02:04 -070023void qla2x00_bsg_job_done(srb_t *sp, int res)
Giridhar Malavali6e980162010-03-19 17:03:58 -070024{
Johannes Thumshirn75cc8cf2016-11-17 10:31:19 +010025 struct bsg_job *bsg_job = sp->u.bsg_job;
Johannes Thumshirn01e0e152016-11-17 10:31:12 +010026 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
Giridhar Malavali9ba56b92012-02-09 11:15:36 -080027
Johannes Thumshirn01e0e152016-11-17 10:31:12 +010028 bsg_reply->result = res;
Johannes Thumshirn06548162016-11-17 10:31:22 +010029 bsg_job_done(bsg_job, bsg_reply->result,
Johannes Thumshirn1abaede2016-11-17 10:31:13 +010030 bsg_reply->reply_payload_rcv_len);
Joe Carnuccio25ff6af2017-01-19 22:28:04 -080031 sp->free(sp);
Giridhar Malavali9ba56b92012-02-09 11:15:36 -080032}
33
Bart Van Assche6c18a432019-08-08 20:02:04 -070034void qla2x00_bsg_sp_free(srb_t *sp)
Giridhar Malavali9ba56b92012-02-09 11:15:36 -080035{
Joe Carnuccio25ff6af2017-01-19 22:28:04 -080036 struct qla_hw_data *ha = sp->vha->hw;
Johannes Thumshirn75cc8cf2016-11-17 10:31:19 +010037 struct bsg_job *bsg_job = sp->u.bsg_job;
Johannes Thumshirn01e0e152016-11-17 10:31:12 +010038 struct fc_bsg_request *bsg_request = bsg_job->request;
Giridhar Malavali8ae6d9c2013-03-28 08:21:23 -040039 struct qla_mt_iocb_rqst_fx00 *piocb_rqst;
Giridhar Malavali6e980162010-03-19 17:03:58 -070040
Giridhar Malavali8ae6d9c2013-03-28 08:21:23 -040041 if (sp->type == SRB_FXIOCB_BCMD) {
42 piocb_rqst = (struct qla_mt_iocb_rqst_fx00 *)
Johannes Thumshirn01e0e152016-11-17 10:31:12 +010043 &bsg_request->rqst_data.h_vendor.vendor_cmd[1];
Giridhar Malavali6e980162010-03-19 17:03:58 -070044
Giridhar Malavali8ae6d9c2013-03-28 08:21:23 -040045 if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID)
46 dma_unmap_sg(&ha->pdev->dev,
47 bsg_job->request_payload.sg_list,
48 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
49
50 if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID)
51 dma_unmap_sg(&ha->pdev->dev,
52 bsg_job->reply_payload.sg_list,
53 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
54 } else {
55 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
56 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
57
58 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
59 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
60 }
Giridhar Malavali9ba56b92012-02-09 11:15:36 -080061
62 if (sp->type == SRB_CT_CMD ||
Giridhar Malavali8ae6d9c2013-03-28 08:21:23 -040063 sp->type == SRB_FXIOCB_BCMD ||
Joe Carnuccio054f4382020-02-12 13:44:13 -080064 sp->type == SRB_ELS_CMD_HST) {
65 INIT_WORK(&sp->fcport->free_work, qla2xxx_free_fcport_work);
66 queue_work(ha->wq, &sp->fcport->free_work);
67 }
Quinn Tran3dae2202019-12-17 14:06:10 -080068
Joe Carnuccio25ff6af2017-01-19 22:28:04 -080069 qla2x00_rel_sp(sp);
Giridhar Malavali6e980162010-03-19 17:03:58 -070070}
71
Sarang Radke09ff7012010-03-19 17:03:59 -070072int
Saurav Kashyap7c3df132011-07-14 12:00:13 -070073qla24xx_fcp_prio_cfg_valid(scsi_qla_host_t *vha,
74 struct qla_fcp_prio_cfg *pri_cfg, uint8_t flag)
Sarang Radke09ff7012010-03-19 17:03:59 -070075{
76 int i, ret, num_valid;
77 uint8_t *bcode;
78 struct qla_fcp_prio_entry *pri_entry;
Madhuranath Iyengar2f0f3f42010-07-23 15:28:24 +050079 uint32_t *bcode_val_ptr, bcode_val;
Sarang Radke09ff7012010-03-19 17:03:59 -070080
81 ret = 1;
82 num_valid = 0;
83 bcode = (uint8_t *)pri_cfg;
Madhuranath Iyengar2f0f3f42010-07-23 15:28:24 +050084 bcode_val_ptr = (uint32_t *)pri_cfg;
85 bcode_val = (uint32_t)(*bcode_val_ptr);
Sarang Radke09ff7012010-03-19 17:03:59 -070086
Madhuranath Iyengar2f0f3f42010-07-23 15:28:24 +050087 if (bcode_val == 0xFFFFFFFF) {
88 /* No FCP Priority config data in flash */
Saurav Kashyap7c3df132011-07-14 12:00:13 -070089 ql_dbg(ql_dbg_user, vha, 0x7051,
90 "No FCP Priority config data.\n");
Madhuranath Iyengar2f0f3f42010-07-23 15:28:24 +050091 return 0;
92 }
93
Joe Carnuccioa28d9e42019-03-12 11:08:17 -070094 if (memcmp(bcode, "HQOS", 4)) {
Madhuranath Iyengar2f0f3f42010-07-23 15:28:24 +050095 /* Invalid FCP priority data header*/
Saurav Kashyap7c3df132011-07-14 12:00:13 -070096 ql_dbg(ql_dbg_user, vha, 0x7052,
97 "Invalid FCP Priority data header. bcode=0x%x.\n",
98 bcode_val);
Sarang Radke09ff7012010-03-19 17:03:59 -070099 return 0;
100 }
101 if (flag != 1)
102 return ret;
103
104 pri_entry = &pri_cfg->entry[0];
105 for (i = 0; i < pri_cfg->num_entries; i++) {
106 if (pri_entry->flags & FCP_PRIO_ENTRY_TAG_VALID)
107 num_valid++;
108 pri_entry++;
109 }
110
Madhuranath Iyengar2f0f3f42010-07-23 15:28:24 +0500111 if (num_valid == 0) {
112 /* No valid FCP priority data entries */
Saurav Kashyap7c3df132011-07-14 12:00:13 -0700113 ql_dbg(ql_dbg_user, vha, 0x7053,
114 "No valid FCP Priority data entries.\n");
Sarang Radke09ff7012010-03-19 17:03:59 -0700115 ret = 0;
Madhuranath Iyengar2f0f3f42010-07-23 15:28:24 +0500116 } else {
117 /* FCP priority data is valid */
Saurav Kashyap7c3df132011-07-14 12:00:13 -0700118 ql_dbg(ql_dbg_user, vha, 0x7054,
119 "Valid FCP priority data. num entries = %d.\n",
120 num_valid);
Madhuranath Iyengar2f0f3f42010-07-23 15:28:24 +0500121 }
Sarang Radke09ff7012010-03-19 17:03:59 -0700122
123 return ret;
124}
125
126static int
Johannes Thumshirn75cc8cf2016-11-17 10:31:19 +0100127qla24xx_proc_fcp_prio_cfg_cmd(struct bsg_job *bsg_job)
Sarang Radke09ff7012010-03-19 17:03:59 -0700128{
Johannes Thumshirncd21c602016-11-17 10:31:14 +0100129 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
Johannes Thumshirn01e0e152016-11-17 10:31:12 +0100130 struct fc_bsg_request *bsg_request = bsg_job->request;
131 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
Sarang Radke09ff7012010-03-19 17:03:59 -0700132 scsi_qla_host_t *vha = shost_priv(host);
133 struct qla_hw_data *ha = vha->hw;
134 int ret = 0;
135 uint32_t len;
136 uint32_t oper;
137
Atul Deshmukh7ec0eff2013-08-27 01:37:28 -0400138 if (!(IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) || IS_P3P_TYPE(ha))) {
Madhuranath Iyengar2f0f3f42010-07-23 15:28:24 +0500139 ret = -EINVAL;
140 goto exit_fcp_prio_cfg;
141 }
142
Sarang Radke09ff7012010-03-19 17:03:59 -0700143 /* Get the sub command */
Johannes Thumshirn01e0e152016-11-17 10:31:12 +0100144 oper = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
Sarang Radke09ff7012010-03-19 17:03:59 -0700145
146 /* Only set config is allowed if config memory is not allocated */
147 if (!ha->fcp_prio_cfg && (oper != QLFC_FCP_PRIO_SET_CONFIG)) {
148 ret = -EINVAL;
149 goto exit_fcp_prio_cfg;
150 }
151 switch (oper) {
152 case QLFC_FCP_PRIO_DISABLE:
153 if (ha->flags.fcp_prio_enabled) {
154 ha->flags.fcp_prio_enabled = 0;
155 ha->fcp_prio_cfg->attributes &=
156 ~FCP_PRIO_ATTR_ENABLE;
157 qla24xx_update_all_fcp_prio(vha);
Johannes Thumshirn01e0e152016-11-17 10:31:12 +0100158 bsg_reply->result = DID_OK;
Sarang Radke09ff7012010-03-19 17:03:59 -0700159 } else {
160 ret = -EINVAL;
Johannes Thumshirn01e0e152016-11-17 10:31:12 +0100161 bsg_reply->result = (DID_ERROR << 16);
Sarang Radke09ff7012010-03-19 17:03:59 -0700162 goto exit_fcp_prio_cfg;
163 }
164 break;
165
166 case QLFC_FCP_PRIO_ENABLE:
167 if (!ha->flags.fcp_prio_enabled) {
168 if (ha->fcp_prio_cfg) {
169 ha->flags.fcp_prio_enabled = 1;
170 ha->fcp_prio_cfg->attributes |=
171 FCP_PRIO_ATTR_ENABLE;
172 qla24xx_update_all_fcp_prio(vha);
Johannes Thumshirn01e0e152016-11-17 10:31:12 +0100173 bsg_reply->result = DID_OK;
Sarang Radke09ff7012010-03-19 17:03:59 -0700174 } else {
175 ret = -EINVAL;
Johannes Thumshirn01e0e152016-11-17 10:31:12 +0100176 bsg_reply->result = (DID_ERROR << 16);
Sarang Radke09ff7012010-03-19 17:03:59 -0700177 goto exit_fcp_prio_cfg;
178 }
179 }
180 break;
181
182 case QLFC_FCP_PRIO_GET_CONFIG:
183 len = bsg_job->reply_payload.payload_len;
184 if (!len || len > FCP_PRIO_CFG_SIZE) {
185 ret = -EINVAL;
Johannes Thumshirn01e0e152016-11-17 10:31:12 +0100186 bsg_reply->result = (DID_ERROR << 16);
Sarang Radke09ff7012010-03-19 17:03:59 -0700187 goto exit_fcp_prio_cfg;
188 }
189
Johannes Thumshirn01e0e152016-11-17 10:31:12 +0100190 bsg_reply->result = DID_OK;
191 bsg_reply->reply_payload_rcv_len =
Sarang Radke09ff7012010-03-19 17:03:59 -0700192 sg_copy_from_buffer(
193 bsg_job->reply_payload.sg_list,
194 bsg_job->reply_payload.sg_cnt, ha->fcp_prio_cfg,
195 len);
196
197 break;
198
199 case QLFC_FCP_PRIO_SET_CONFIG:
200 len = bsg_job->request_payload.payload_len;
201 if (!len || len > FCP_PRIO_CFG_SIZE) {
Johannes Thumshirn01e0e152016-11-17 10:31:12 +0100202 bsg_reply->result = (DID_ERROR << 16);
Sarang Radke09ff7012010-03-19 17:03:59 -0700203 ret = -EINVAL;
204 goto exit_fcp_prio_cfg;
205 }
206
207 if (!ha->fcp_prio_cfg) {
208 ha->fcp_prio_cfg = vmalloc(FCP_PRIO_CFG_SIZE);
209 if (!ha->fcp_prio_cfg) {
Saurav Kashyap7c3df132011-07-14 12:00:13 -0700210 ql_log(ql_log_warn, vha, 0x7050,
211 "Unable to allocate memory for fcp prio "
212 "config data (%x).\n", FCP_PRIO_CFG_SIZE);
Johannes Thumshirn01e0e152016-11-17 10:31:12 +0100213 bsg_reply->result = (DID_ERROR << 16);
Sarang Radke09ff7012010-03-19 17:03:59 -0700214 ret = -ENOMEM;
215 goto exit_fcp_prio_cfg;
216 }
217 }
218
219 memset(ha->fcp_prio_cfg, 0, FCP_PRIO_CFG_SIZE);
220 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
221 bsg_job->request_payload.sg_cnt, ha->fcp_prio_cfg,
222 FCP_PRIO_CFG_SIZE);
223
224 /* validate fcp priority data */
Saurav Kashyap7c3df132011-07-14 12:00:13 -0700225
226 if (!qla24xx_fcp_prio_cfg_valid(vha,
227 (struct qla_fcp_prio_cfg *) ha->fcp_prio_cfg, 1)) {
Johannes Thumshirn01e0e152016-11-17 10:31:12 +0100228 bsg_reply->result = (DID_ERROR << 16);
Sarang Radke09ff7012010-03-19 17:03:59 -0700229 ret = -EINVAL;
230 /* If buffer was invalidatic int
231 * fcp_prio_cfg is of no use
232 */
233 vfree(ha->fcp_prio_cfg);
234 ha->fcp_prio_cfg = NULL;
235 goto exit_fcp_prio_cfg;
236 }
237
238 ha->flags.fcp_prio_enabled = 0;
239 if (ha->fcp_prio_cfg->attributes & FCP_PRIO_ATTR_ENABLE)
240 ha->flags.fcp_prio_enabled = 1;
241 qla24xx_update_all_fcp_prio(vha);
Johannes Thumshirn01e0e152016-11-17 10:31:12 +0100242 bsg_reply->result = DID_OK;
Sarang Radke09ff7012010-03-19 17:03:59 -0700243 break;
244 default:
245 ret = -EINVAL;
246 break;
247 }
248exit_fcp_prio_cfg:
Armen Baloyan63ea9232012-11-21 02:39:53 -0500249 if (!ret)
Johannes Thumshirn06548162016-11-17 10:31:22 +0100250 bsg_job_done(bsg_job, bsg_reply->result,
Johannes Thumshirn1abaede2016-11-17 10:31:13 +0100251 bsg_reply->reply_payload_rcv_len);
Sarang Radke09ff7012010-03-19 17:03:59 -0700252 return ret;
253}
Giridhar Malavali9ba56b92012-02-09 11:15:36 -0800254
Giridhar Malavali6e980162010-03-19 17:03:58 -0700255static int
Johannes Thumshirn75cc8cf2016-11-17 10:31:19 +0100256qla2x00_process_els(struct bsg_job *bsg_job)
Giridhar Malavali6e980162010-03-19 17:03:58 -0700257{
Johannes Thumshirn01e0e152016-11-17 10:31:12 +0100258 struct fc_bsg_request *bsg_request = bsg_job->request;
Giridhar Malavali6e980162010-03-19 17:03:58 -0700259 struct fc_rport *rport;
Harish Zunjarrao08f71e02010-07-23 15:28:33 +0500260 fc_port_t *fcport = NULL;
Giridhar Malavali6e980162010-03-19 17:03:58 -0700261 struct Scsi_Host *host;
262 scsi_qla_host_t *vha;
263 struct qla_hw_data *ha;
264 srb_t *sp;
265 const char *type;
266 int req_sg_cnt, rsp_sg_cnt;
Hannes Reinecke66cf50e2019-10-18 16:04:58 +0200267 int rval = (DID_ERROR << 16);
Giridhar Malavali6e980162010-03-19 17:03:58 -0700268 uint16_t nextlid = 0;
Giridhar Malavali6e980162010-03-19 17:03:58 -0700269
Johannes Thumshirn01e0e152016-11-17 10:31:12 +0100270 if (bsg_request->msgcode == FC_BSG_RPT_ELS) {
Johannes Thumshirn1d69b122016-11-17 10:31:15 +0100271 rport = fc_bsg_to_rport(bsg_job);
Harish Zunjarrao08f71e02010-07-23 15:28:33 +0500272 fcport = *(fc_port_t **) rport->dd_data;
273 host = rport_to_shost(rport);
274 vha = shost_priv(host);
275 ha = vha->hw;
276 type = "FC_BSG_RPT_ELS";
277 } else {
Johannes Thumshirncd21c602016-11-17 10:31:14 +0100278 host = fc_bsg_to_shost(bsg_job);
Harish Zunjarrao08f71e02010-07-23 15:28:33 +0500279 vha = shost_priv(host);
280 ha = vha->hw;
281 type = "FC_BSG_HST_ELS_NOLOGIN";
282 }
283
Bart Van Assche8c0eb592013-06-25 11:27:31 -0400284 if (!vha->flags.online) {
285 ql_log(ql_log_warn, vha, 0x7005, "Host not online.\n");
286 rval = -EIO;
287 goto done;
288 }
289
Harish Zunjarrao08f71e02010-07-23 15:28:33 +0500290 /* pass through is supported only for ISP 4Gb or higher */
291 if (!IS_FWI2_CAPABLE(ha)) {
Saurav Kashyap7c3df132011-07-14 12:00:13 -0700292 ql_dbg(ql_dbg_user, vha, 0x7001,
293 "ELS passthru not supported for ISP23xx based adapters.\n");
Harish Zunjarrao08f71e02010-07-23 15:28:33 +0500294 rval = -EPERM;
295 goto done;
296 }
297
Giridhar Malavali6e980162010-03-19 17:03:58 -0700298 /* Multiple SG's are not supported for ELS requests */
299 if (bsg_job->request_payload.sg_cnt > 1 ||
300 bsg_job->reply_payload.sg_cnt > 1) {
Saurav Kashyap7c3df132011-07-14 12:00:13 -0700301 ql_dbg(ql_dbg_user, vha, 0x7002,
Colin Ian King0bf0efa2017-06-30 14:47:41 +0100302 "Multiple SG's are not supported for ELS requests, "
Saurav Kashyap7c3df132011-07-14 12:00:13 -0700303 "request_sg_cnt=%x reply_sg_cnt=%x.\n",
304 bsg_job->request_payload.sg_cnt,
305 bsg_job->reply_payload.sg_cnt);
Giridhar Malavali6e980162010-03-19 17:03:58 -0700306 rval = -EPERM;
307 goto done;
308 }
309
310 /* ELS request for rport */
Johannes Thumshirn01e0e152016-11-17 10:31:12 +0100311 if (bsg_request->msgcode == FC_BSG_RPT_ELS) {
Giridhar Malavali6e980162010-03-19 17:03:58 -0700312 /* make sure the rport is logged in,
313 * if not perform fabric login
314 */
315 if (qla2x00_fabric_login(vha, fcport, &nextlid)) {
Saurav Kashyap7c3df132011-07-14 12:00:13 -0700316 ql_dbg(ql_dbg_user, vha, 0x7003,
317 "Failed to login port %06X for ELS passthru.\n",
318 fcport->d_id.b24);
Giridhar Malavali6e980162010-03-19 17:03:58 -0700319 rval = -EIO;
320 goto done;
321 }
322 } else {
Giridhar Malavali6e980162010-03-19 17:03:58 -0700323 /* Allocate a dummy fcport structure, since functions
324 * preparing the IOCB and mailbox command retrieves port
325 * specific information from fcport structure. For Host based
326 * ELS commands there will be no fcport structure allocated
327 */
328 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
329 if (!fcport) {
330 rval = -ENOMEM;
331 goto done;
332 }
333
334 /* Initialize all required fields of fcport */
335 fcport->vha = vha;
Giridhar Malavali6e980162010-03-19 17:03:58 -0700336 fcport->d_id.b.al_pa =
Johannes Thumshirn01e0e152016-11-17 10:31:12 +0100337 bsg_request->rqst_data.h_els.port_id[0];
Giridhar Malavali6e980162010-03-19 17:03:58 -0700338 fcport->d_id.b.area =
Johannes Thumshirn01e0e152016-11-17 10:31:12 +0100339 bsg_request->rqst_data.h_els.port_id[1];
Giridhar Malavali6e980162010-03-19 17:03:58 -0700340 fcport->d_id.b.domain =
Johannes Thumshirn01e0e152016-11-17 10:31:12 +0100341 bsg_request->rqst_data.h_els.port_id[2];
Giridhar Malavali6e980162010-03-19 17:03:58 -0700342 fcport->loop_id =
343 (fcport->d_id.b.al_pa == 0xFD) ?
344 NPH_FABRIC_CONTROLLER : NPH_F_PORT;
345 }
346
Giridhar Malavali6e980162010-03-19 17:03:58 -0700347 req_sg_cnt =
348 dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
349 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
350 if (!req_sg_cnt) {
Himanshu Madhani5d328de2019-07-26 09:07:26 -0700351 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
352 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
Giridhar Malavali6e980162010-03-19 17:03:58 -0700353 rval = -ENOMEM;
354 goto done_free_fcport;
355 }
Andrew Vasquez6c452a42010-03-19 17:04:02 -0700356
357 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
358 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
Giridhar Malavali6e980162010-03-19 17:03:58 -0700359 if (!rsp_sg_cnt) {
Himanshu Madhani5d328de2019-07-26 09:07:26 -0700360 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
361 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
Giridhar Malavali6e980162010-03-19 17:03:58 -0700362 rval = -ENOMEM;
363 goto done_free_fcport;
364 }
365
366 if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
Andrew Vasquez6c452a42010-03-19 17:04:02 -0700367 (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
Saurav Kashyap7c3df132011-07-14 12:00:13 -0700368 ql_log(ql_log_warn, vha, 0x7008,
369 "dma mapping resulted in different sg counts, "
370 "request_sg_cnt: %x dma_request_sg_cnt:%x reply_sg_cnt:%x "
371 "dma_reply_sg_cnt:%x.\n", bsg_job->request_payload.sg_cnt,
372 req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
Giridhar Malavali6e980162010-03-19 17:03:58 -0700373 rval = -EAGAIN;
374 goto done_unmap_sg;
375 }
376
377 /* Alloc SRB structure */
Giridhar Malavali9ba56b92012-02-09 11:15:36 -0800378 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
Giridhar Malavali6e980162010-03-19 17:03:58 -0700379 if (!sp) {
380 rval = -ENOMEM;
Andrew Vasquez6c452a42010-03-19 17:04:02 -0700381 goto done_unmap_sg;
Giridhar Malavali6e980162010-03-19 17:03:58 -0700382 }
383
Giridhar Malavali9ba56b92012-02-09 11:15:36 -0800384 sp->type =
Johannes Thumshirn01e0e152016-11-17 10:31:12 +0100385 (bsg_request->msgcode == FC_BSG_RPT_ELS ?
386 SRB_ELS_CMD_RPT : SRB_ELS_CMD_HST);
Giridhar Malavali9ba56b92012-02-09 11:15:36 -0800387 sp->name =
Johannes Thumshirn01e0e152016-11-17 10:31:12 +0100388 (bsg_request->msgcode == FC_BSG_RPT_ELS ?
389 "bsg_els_rpt" : "bsg_els_hst");
Giridhar Malavali9ba56b92012-02-09 11:15:36 -0800390 sp->u.bsg_job = bsg_job;
391 sp->free = qla2x00_bsg_sp_free;
392 sp->done = qla2x00_bsg_job_done;
Giridhar Malavali6e980162010-03-19 17:03:58 -0700393
Saurav Kashyap7c3df132011-07-14 12:00:13 -0700394 ql_dbg(ql_dbg_user, vha, 0x700a,
395 "bsg rqst type: %s els type: %x - loop-id=%x "
396 "portid=%-2x%02x%02x.\n", type,
Johannes Thumshirn01e0e152016-11-17 10:31:12 +0100397 bsg_request->rqst_data.h_els.command_code, fcport->loop_id,
Saurav Kashyap7c3df132011-07-14 12:00:13 -0700398 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa);
Giridhar Malavali6e980162010-03-19 17:03:58 -0700399
400 rval = qla2x00_start_sp(sp);
401 if (rval != QLA_SUCCESS) {
Saurav Kashyap7c3df132011-07-14 12:00:13 -0700402 ql_log(ql_log_warn, vha, 0x700e,
403 "qla2x00_start_sp failed = %d\n", rval);
Joe Carnuccio25ff6af2017-01-19 22:28:04 -0800404 qla2x00_rel_sp(sp);
Giridhar Malavali6e980162010-03-19 17:03:58 -0700405 rval = -EIO;
406 goto done_unmap_sg;
407 }
408 return rval;
409
410done_unmap_sg:
411 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
412 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
413 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
414 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
415 goto done_free_fcport;
416
417done_free_fcport:
Johannes Thumshirn01e0e152016-11-17 10:31:12 +0100418 if (bsg_request->msgcode == FC_BSG_RPT_ELS)
Quinn Tran3dae2202019-12-17 14:06:10 -0800419 qla2x00_free_fcport(fcport);
Giridhar Malavali6e980162010-03-19 17:03:58 -0700420done:
421 return rval;
422}
423
Bart Van Assche2374dd22015-07-09 07:23:02 -0700424static inline uint16_t
Andrew Vasquez5780790e2011-11-18 09:03:20 -0800425qla24xx_calc_ct_iocbs(uint16_t dsds)
426{
427 uint16_t iocbs;
428
429 iocbs = 1;
430 if (dsds > 2) {
431 iocbs += (dsds - 2) / 5;
432 if ((dsds - 2) % 5)
433 iocbs++;
434 }
435 return iocbs;
436}
437
Giridhar Malavali6e980162010-03-19 17:03:58 -0700438static int
Johannes Thumshirn75cc8cf2016-11-17 10:31:19 +0100439qla2x00_process_ct(struct bsg_job *bsg_job)
Giridhar Malavali6e980162010-03-19 17:03:58 -0700440{
441 srb_t *sp;
Johannes Thumshirn01e0e152016-11-17 10:31:12 +0100442 struct fc_bsg_request *bsg_request = bsg_job->request;
Johannes Thumshirncd21c602016-11-17 10:31:14 +0100443 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
Giridhar Malavali6e980162010-03-19 17:03:58 -0700444 scsi_qla_host_t *vha = shost_priv(host);
445 struct qla_hw_data *ha = vha->hw;
Hannes Reinecke66cf50e2019-10-18 16:04:58 +0200446 int rval = (DID_ERROR << 16);
Giridhar Malavali6e980162010-03-19 17:03:58 -0700447 int req_sg_cnt, rsp_sg_cnt;
448 uint16_t loop_id;
449 struct fc_port *fcport;
450 char *type = "FC_BSG_HST_CT";
Giridhar Malavali6e980162010-03-19 17:03:58 -0700451
Giridhar Malavali6e980162010-03-19 17:03:58 -0700452 req_sg_cnt =
453 dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
454 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
Andrew Vasquez6c452a42010-03-19 17:04:02 -0700455 if (!req_sg_cnt) {
Saurav Kashyap7c3df132011-07-14 12:00:13 -0700456 ql_log(ql_log_warn, vha, 0x700f,
457 "dma_map_sg return %d for request\n", req_sg_cnt);
Giridhar Malavali6e980162010-03-19 17:03:58 -0700458 rval = -ENOMEM;
459 goto done;
460 }
461
462 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
463 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
464 if (!rsp_sg_cnt) {
Saurav Kashyap7c3df132011-07-14 12:00:13 -0700465 ql_log(ql_log_warn, vha, 0x7010,
466 "dma_map_sg return %d for reply\n", rsp_sg_cnt);
Giridhar Malavali6e980162010-03-19 17:03:58 -0700467 rval = -ENOMEM;
468 goto done;
469 }
470
471 if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
Andrew Vasquez6c452a42010-03-19 17:04:02 -0700472 (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
Saurav Kashyap7c3df132011-07-14 12:00:13 -0700473 ql_log(ql_log_warn, vha, 0x7011,
474 "request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt:%x "
475 "dma_reply_sg_cnt: %x\n", bsg_job->request_payload.sg_cnt,
476 req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
Giridhar Malavali6e980162010-03-19 17:03:58 -0700477 rval = -EAGAIN;
Andrew Vasquez6c452a42010-03-19 17:04:02 -0700478 goto done_unmap_sg;
Giridhar Malavali6e980162010-03-19 17:03:58 -0700479 }
480
481 if (!vha->flags.online) {
Saurav Kashyap7c3df132011-07-14 12:00:13 -0700482 ql_log(ql_log_warn, vha, 0x7012,
483 "Host is not online.\n");
Giridhar Malavali6e980162010-03-19 17:03:58 -0700484 rval = -EIO;
485 goto done_unmap_sg;
486 }
487
488 loop_id =
Johannes Thumshirn01e0e152016-11-17 10:31:12 +0100489 (bsg_request->rqst_data.h_ct.preamble_word1 & 0xFF000000)
Giridhar Malavali6e980162010-03-19 17:03:58 -0700490 >> 24;
491 switch (loop_id) {
Andrew Vasquez6c452a42010-03-19 17:04:02 -0700492 case 0xFC:
Bart Van Assche7ffa5b92020-05-18 14:17:12 -0700493 loop_id = NPH_SNS;
Andrew Vasquez6c452a42010-03-19 17:04:02 -0700494 break;
495 case 0xFA:
496 loop_id = vha->mgmt_svr_loop_id;
497 break;
498 default:
Saurav Kashyap7c3df132011-07-14 12:00:13 -0700499 ql_dbg(ql_dbg_user, vha, 0x7013,
500 "Unknown loop id: %x.\n", loop_id);
Andrew Vasquez6c452a42010-03-19 17:04:02 -0700501 rval = -EINVAL;
502 goto done_unmap_sg;
Giridhar Malavali6e980162010-03-19 17:03:58 -0700503 }
504
505 /* Allocate a dummy fcport structure, since functions preparing the
506 * IOCB and mailbox command retrieves port specific information
507 * from fcport structure. For Host based ELS commands there will be
508 * no fcport structure allocated
509 */
510 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
Andrew Vasquez6c452a42010-03-19 17:04:02 -0700511 if (!fcport) {
Saurav Kashyap7c3df132011-07-14 12:00:13 -0700512 ql_log(ql_log_warn, vha, 0x7014,
513 "Failed to allocate fcport.\n");
Giridhar Malavali6e980162010-03-19 17:03:58 -0700514 rval = -ENOMEM;
Andrew Vasquez6c452a42010-03-19 17:04:02 -0700515 goto done_unmap_sg;
Giridhar Malavali6e980162010-03-19 17:03:58 -0700516 }
517
518 /* Initialize all required fields of fcport */
519 fcport->vha = vha;
Johannes Thumshirn01e0e152016-11-17 10:31:12 +0100520 fcport->d_id.b.al_pa = bsg_request->rqst_data.h_ct.port_id[0];
521 fcport->d_id.b.area = bsg_request->rqst_data.h_ct.port_id[1];
522 fcport->d_id.b.domain = bsg_request->rqst_data.h_ct.port_id[2];
Giridhar Malavali6e980162010-03-19 17:03:58 -0700523 fcport->loop_id = loop_id;
524
525 /* Alloc SRB structure */
Giridhar Malavali9ba56b92012-02-09 11:15:36 -0800526 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
Giridhar Malavali6e980162010-03-19 17:03:58 -0700527 if (!sp) {
Saurav Kashyap7c3df132011-07-14 12:00:13 -0700528 ql_log(ql_log_warn, vha, 0x7015,
Giridhar Malavali9ba56b92012-02-09 11:15:36 -0800529 "qla2x00_get_sp failed.\n");
Giridhar Malavali6e980162010-03-19 17:03:58 -0700530 rval = -ENOMEM;
531 goto done_free_fcport;
532 }
533
Giridhar Malavali9ba56b92012-02-09 11:15:36 -0800534 sp->type = SRB_CT_CMD;
535 sp->name = "bsg_ct";
536 sp->iocbs = qla24xx_calc_ct_iocbs(req_sg_cnt + rsp_sg_cnt);
537 sp->u.bsg_job = bsg_job;
538 sp->free = qla2x00_bsg_sp_free;
539 sp->done = qla2x00_bsg_job_done;
Giridhar Malavali6e980162010-03-19 17:03:58 -0700540
Saurav Kashyap7c3df132011-07-14 12:00:13 -0700541 ql_dbg(ql_dbg_user, vha, 0x7016,
542 "bsg rqst type: %s else type: %x - "
543 "loop-id=%x portid=%02x%02x%02x.\n", type,
Johannes Thumshirn01e0e152016-11-17 10:31:12 +0100544 (bsg_request->rqst_data.h_ct.preamble_word2 >> 16),
Saurav Kashyap7c3df132011-07-14 12:00:13 -0700545 fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
546 fcport->d_id.b.al_pa);
Giridhar Malavali6e980162010-03-19 17:03:58 -0700547
548 rval = qla2x00_start_sp(sp);
549 if (rval != QLA_SUCCESS) {
Saurav Kashyap7c3df132011-07-14 12:00:13 -0700550 ql_log(ql_log_warn, vha, 0x7017,
551 "qla2x00_start_sp failed=%d.\n", rval);
Joe Carnuccio25ff6af2017-01-19 22:28:04 -0800552 qla2x00_rel_sp(sp);
Giridhar Malavali6e980162010-03-19 17:03:58 -0700553 rval = -EIO;
554 goto done_free_fcport;
555 }
556 return rval;
557
558done_free_fcport:
Quinn Tran3dae2202019-12-17 14:06:10 -0800559 qla2x00_free_fcport(fcport);
Giridhar Malavali6e980162010-03-19 17:03:58 -0700560done_unmap_sg:
561 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
562 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
563 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
564 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
565done:
566 return rval;
567}
Sarang Radke23f2ebd2010-05-28 15:08:21 -0700568
Chad Dupuis8fcd6b82012-08-22 14:21:06 -0400569/* Disable loopback mode */
Sarang Radke23f2ebd2010-05-28 15:08:21 -0700570static inline int
Chad Dupuis8fcd6b82012-08-22 14:21:06 -0400571qla81xx_reset_loopback_mode(scsi_qla_host_t *vha, uint16_t *config,
Chad Dupuisf356bef2013-02-08 01:58:04 -0500572 int wait, int wait2)
Sarang Radke23f2ebd2010-05-28 15:08:21 -0700573{
574 int ret = 0;
575 int rval = 0;
576 uint16_t new_config[4];
577 struct qla_hw_data *ha = vha->hw;
578
Atul Deshmukh7ec0eff2013-08-27 01:37:28 -0400579 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha) && !IS_QLA8044(ha))
Sarang Radke23f2ebd2010-05-28 15:08:21 -0700580 goto done_reset_internal;
581
582 memset(new_config, 0 , sizeof(new_config));
583 if ((config[0] & INTERNAL_LOOPBACK_MASK) >> 1 ==
Chad Dupuis8fcd6b82012-08-22 14:21:06 -0400584 ENABLE_INTERNAL_LOOPBACK ||
585 (config[0] & INTERNAL_LOOPBACK_MASK) >> 1 ==
586 ENABLE_EXTERNAL_LOOPBACK) {
Sarang Radke23f2ebd2010-05-28 15:08:21 -0700587 new_config[0] = config[0] & ~INTERNAL_LOOPBACK_MASK;
Chad Dupuis8fcd6b82012-08-22 14:21:06 -0400588 ql_dbg(ql_dbg_user, vha, 0x70bf, "new_config[0]=%02x\n",
589 (new_config[0] & INTERNAL_LOOPBACK_MASK));
Sarang Radke23f2ebd2010-05-28 15:08:21 -0700590 memcpy(&new_config[1], &config[1], sizeof(uint16_t) * 3) ;
591
592 ha->notify_dcbx_comp = wait;
Chad Dupuisf356bef2013-02-08 01:58:04 -0500593 ha->notify_lb_portup_comp = wait2;
594
Sarang Radke23f2ebd2010-05-28 15:08:21 -0700595 ret = qla81xx_set_port_config(vha, new_config);
596 if (ret != QLA_SUCCESS) {
Saurav Kashyap7c3df132011-07-14 12:00:13 -0700597 ql_log(ql_log_warn, vha, 0x7025,
598 "Set port config failed.\n");
Sarang Radke23f2ebd2010-05-28 15:08:21 -0700599 ha->notify_dcbx_comp = 0;
Chad Dupuisf356bef2013-02-08 01:58:04 -0500600 ha->notify_lb_portup_comp = 0;
Sarang Radke23f2ebd2010-05-28 15:08:21 -0700601 rval = -EINVAL;
602 goto done_reset_internal;
603 }
604
605 /* Wait for DCBX complete event */
606 if (wait && !wait_for_completion_timeout(&ha->dcbx_comp,
Chad Dupuisf356bef2013-02-08 01:58:04 -0500607 (DCBX_COMP_TIMEOUT * HZ))) {
Saurav Kashyap7c3df132011-07-14 12:00:13 -0700608 ql_dbg(ql_dbg_user, vha, 0x7026,
Chad Dupuisf356bef2013-02-08 01:58:04 -0500609 "DCBX completion not received.\n");
Sarang Radke23f2ebd2010-05-28 15:08:21 -0700610 ha->notify_dcbx_comp = 0;
Chad Dupuisf356bef2013-02-08 01:58:04 -0500611 ha->notify_lb_portup_comp = 0;
Sarang Radke23f2ebd2010-05-28 15:08:21 -0700612 rval = -EINVAL;
613 goto done_reset_internal;
614 } else
Saurav Kashyap7c3df132011-07-14 12:00:13 -0700615 ql_dbg(ql_dbg_user, vha, 0x7027,
Chad Dupuisf356bef2013-02-08 01:58:04 -0500616 "DCBX completion received.\n");
617
618 if (wait2 &&
619 !wait_for_completion_timeout(&ha->lb_portup_comp,
620 (LB_PORTUP_COMP_TIMEOUT * HZ))) {
621 ql_dbg(ql_dbg_user, vha, 0x70c5,
622 "Port up completion not received.\n");
623 ha->notify_lb_portup_comp = 0;
624 rval = -EINVAL;
625 goto done_reset_internal;
626 } else
627 ql_dbg(ql_dbg_user, vha, 0x70c6,
628 "Port up completion received.\n");
Sarang Radke23f2ebd2010-05-28 15:08:21 -0700629
630 ha->notify_dcbx_comp = 0;
Chad Dupuisf356bef2013-02-08 01:58:04 -0500631 ha->notify_lb_portup_comp = 0;
Sarang Radke23f2ebd2010-05-28 15:08:21 -0700632 }
633done_reset_internal:
634 return rval;
635}
636
Chad Dupuis67b2a312013-02-08 01:57:51 -0500637/*
638 * Set the port configuration to enable the internal or external loopback
639 * depending on the loopback mode.
640 */
641static inline int
642qla81xx_set_loopback_mode(scsi_qla_host_t *vha, uint16_t *config,
643 uint16_t *new_config, uint16_t mode)
644{
645 int ret = 0;
646 int rval = 0;
Santosh Vernekar454073c2013-08-27 01:37:48 -0400647 unsigned long rem_tmo = 0, current_tmo = 0;
Chad Dupuis67b2a312013-02-08 01:57:51 -0500648 struct qla_hw_data *ha = vha->hw;
649
Atul Deshmukh7ec0eff2013-08-27 01:37:28 -0400650 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha) && !IS_QLA8044(ha))
Chad Dupuis67b2a312013-02-08 01:57:51 -0500651 goto done_set_internal;
652
653 if (mode == INTERNAL_LOOPBACK)
654 new_config[0] = config[0] | (ENABLE_INTERNAL_LOOPBACK << 1);
655 else if (mode == EXTERNAL_LOOPBACK)
656 new_config[0] = config[0] | (ENABLE_EXTERNAL_LOOPBACK << 1);
657 ql_dbg(ql_dbg_user, vha, 0x70be,
658 "new_config[0]=%02x\n", (new_config[0] & INTERNAL_LOOPBACK_MASK));
659
660 memcpy(&new_config[1], &config[1], sizeof(uint16_t) * 3);
661
662 ha->notify_dcbx_comp = 1;
663 ret = qla81xx_set_port_config(vha, new_config);
664 if (ret != QLA_SUCCESS) {
665 ql_log(ql_log_warn, vha, 0x7021,
666 "set port config failed.\n");
667 ha->notify_dcbx_comp = 0;
668 rval = -EINVAL;
669 goto done_set_internal;
670 }
671
672 /* Wait for DCBX complete event */
Santosh Vernekar454073c2013-08-27 01:37:48 -0400673 current_tmo = DCBX_COMP_TIMEOUT * HZ;
674 while (1) {
675 rem_tmo = wait_for_completion_timeout(&ha->dcbx_comp,
676 current_tmo);
677 if (!ha->idc_extend_tmo || rem_tmo) {
678 ha->idc_extend_tmo = 0;
679 break;
680 }
681 current_tmo = ha->idc_extend_tmo * HZ;
682 ha->idc_extend_tmo = 0;
683 }
684
685 if (!rem_tmo) {
Chad Dupuis67b2a312013-02-08 01:57:51 -0500686 ql_dbg(ql_dbg_user, vha, 0x7022,
Chad Dupuisf356bef2013-02-08 01:58:04 -0500687 "DCBX completion not received.\n");
688 ret = qla81xx_reset_loopback_mode(vha, new_config, 0, 0);
Chad Dupuis67b2a312013-02-08 01:57:51 -0500689 /*
690 * If the reset of the loopback mode doesn't work take a FCoE
691 * dump and reset the chip.
692 */
693 if (ret) {
Bart Van Assche8ae17872020-05-18 14:17:00 -0700694 qla2xxx_dump_fw(vha);
Chad Dupuis67b2a312013-02-08 01:57:51 -0500695 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
696 }
697 rval = -EINVAL;
698 } else {
699 if (ha->flags.idc_compl_status) {
700 ql_dbg(ql_dbg_user, vha, 0x70c3,
701 "Bad status in IDC Completion AEN\n");
702 rval = -EINVAL;
703 ha->flags.idc_compl_status = 0;
704 } else
705 ql_dbg(ql_dbg_user, vha, 0x7023,
Chad Dupuisf356bef2013-02-08 01:58:04 -0500706 "DCBX completion received.\n");
Chad Dupuis67b2a312013-02-08 01:57:51 -0500707 }
708
709 ha->notify_dcbx_comp = 0;
Santosh Vernekar454073c2013-08-27 01:37:48 -0400710 ha->idc_extend_tmo = 0;
Chad Dupuis67b2a312013-02-08 01:57:51 -0500711
712done_set_internal:
713 return rval;
714}
715
Giridhar Malavali6e980162010-03-19 17:03:58 -0700716static int
Johannes Thumshirn75cc8cf2016-11-17 10:31:19 +0100717qla2x00_process_loopback(struct bsg_job *bsg_job)
Giridhar Malavali6e980162010-03-19 17:03:58 -0700718{
Johannes Thumshirn01e0e152016-11-17 10:31:12 +0100719 struct fc_bsg_request *bsg_request = bsg_job->request;
720 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
Johannes Thumshirncd21c602016-11-17 10:31:14 +0100721 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
Giridhar Malavali6e980162010-03-19 17:03:58 -0700722 scsi_qla_host_t *vha = shost_priv(host);
723 struct qla_hw_data *ha = vha->hw;
724 int rval;
725 uint8_t command_sent;
726 char *type;
727 struct msg_echo_lb elreq;
728 uint16_t response[MAILBOX_REGISTER_COUNT];
Sarang Radke23f2ebd2010-05-28 15:08:21 -0700729 uint16_t config[4], new_config[4];
Andrew Vasquez6c452a42010-03-19 17:04:02 -0700730 uint8_t *fw_sts_ptr;
Joe Carnuccio64d21b32020-02-12 13:44:28 -0800731 void *req_data = NULL;
Giridhar Malavali6e980162010-03-19 17:03:58 -0700732 dma_addr_t req_data_dma;
733 uint32_t req_data_len;
734 uint8_t *rsp_data = NULL;
735 dma_addr_t rsp_data_dma;
736 uint32_t rsp_data_len;
737
Giridhar Malavali6e980162010-03-19 17:03:58 -0700738 if (!vha->flags.online) {
Saurav Kashyap7c3df132011-07-14 12:00:13 -0700739 ql_log(ql_log_warn, vha, 0x7019, "Host is not online.\n");
Giridhar Malavali6e980162010-03-19 17:03:58 -0700740 return -EIO;
741 }
742
Joe Carnuccio1d634962017-05-24 18:06:22 -0700743 memset(&elreq, 0, sizeof(elreq));
744
Giridhar Malavali6e980162010-03-19 17:03:58 -0700745 elreq.req_sg_cnt = dma_map_sg(&ha->pdev->dev,
746 bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt,
747 DMA_TO_DEVICE);
748
Saurav Kashyap7c3df132011-07-14 12:00:13 -0700749 if (!elreq.req_sg_cnt) {
750 ql_log(ql_log_warn, vha, 0x701a,
751 "dma_map_sg returned %d for request.\n", elreq.req_sg_cnt);
Giridhar Malavali6e980162010-03-19 17:03:58 -0700752 return -ENOMEM;
Saurav Kashyap7c3df132011-07-14 12:00:13 -0700753 }
Giridhar Malavali6e980162010-03-19 17:03:58 -0700754
755 elreq.rsp_sg_cnt = dma_map_sg(&ha->pdev->dev,
756 bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt,
757 DMA_FROM_DEVICE);
758
759 if (!elreq.rsp_sg_cnt) {
Saurav Kashyap7c3df132011-07-14 12:00:13 -0700760 ql_log(ql_log_warn, vha, 0x701b,
761 "dma_map_sg returned %d for reply.\n", elreq.rsp_sg_cnt);
Giridhar Malavali6e980162010-03-19 17:03:58 -0700762 rval = -ENOMEM;
763 goto done_unmap_req_sg;
Andrew Vasquez6c452a42010-03-19 17:04:02 -0700764 }
Giridhar Malavali6e980162010-03-19 17:03:58 -0700765
766 if ((elreq.req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
767 (elreq.rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
Saurav Kashyap7c3df132011-07-14 12:00:13 -0700768 ql_log(ql_log_warn, vha, 0x701c,
769 "dma mapping resulted in different sg counts, "
770 "request_sg_cnt: %x dma_request_sg_cnt: %x "
771 "reply_sg_cnt: %x dma_reply_sg_cnt: %x.\n",
772 bsg_job->request_payload.sg_cnt, elreq.req_sg_cnt,
773 bsg_job->reply_payload.sg_cnt, elreq.rsp_sg_cnt);
Giridhar Malavali6e980162010-03-19 17:03:58 -0700774 rval = -EAGAIN;
775 goto done_unmap_sg;
776 }
777 req_data_len = rsp_data_len = bsg_job->request_payload.payload_len;
778 req_data = dma_alloc_coherent(&ha->pdev->dev, req_data_len,
779 &req_data_dma, GFP_KERNEL);
780 if (!req_data) {
Saurav Kashyap7c3df132011-07-14 12:00:13 -0700781 ql_log(ql_log_warn, vha, 0x701d,
782 "dma alloc failed for req_data.\n");
Giridhar Malavali6e980162010-03-19 17:03:58 -0700783 rval = -ENOMEM;
784 goto done_unmap_sg;
785 }
786
787 rsp_data = dma_alloc_coherent(&ha->pdev->dev, rsp_data_len,
788 &rsp_data_dma, GFP_KERNEL);
789 if (!rsp_data) {
Saurav Kashyap7c3df132011-07-14 12:00:13 -0700790 ql_log(ql_log_warn, vha, 0x7004,
791 "dma alloc failed for rsp_data.\n");
Giridhar Malavali6e980162010-03-19 17:03:58 -0700792 rval = -ENOMEM;
793 goto done_free_dma_req;
794 }
795
796 /* Copy the request buffer in req_data now */
797 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
798 bsg_job->request_payload.sg_cnt, req_data, req_data_len);
799
800 elreq.send_dma = req_data_dma;
801 elreq.rcv_dma = rsp_data_dma;
802 elreq.transfer_size = req_data_len;
803
Johannes Thumshirn01e0e152016-11-17 10:31:12 +0100804 elreq.options = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
Joe Carnuccio1b98b422013-03-28 08:21:26 -0400805 elreq.iteration_count =
Johannes Thumshirn01e0e152016-11-17 10:31:12 +0100806 bsg_request->rqst_data.h_vendor.vendor_cmd[2];
Giridhar Malavali6e980162010-03-19 17:03:58 -0700807
Chad Dupuis8fcd6b82012-08-22 14:21:06 -0400808 if (atomic_read(&vha->loop_state) == LOOP_READY &&
Joe Carnuccio64d21b32020-02-12 13:44:28 -0800809 ((ha->current_topology == ISP_CFG_F && (elreq.options & 7) >= 2) ||
810 ((IS_QLA81XX(ha) || IS_QLA8031(ha) || IS_QLA8044(ha)) &&
811 get_unaligned_le32(req_data) == ELS_OPCODE_BYTE &&
812 req_data_len == MAX_ELS_FRAME_PAYLOAD &&
813 elreq.options == EXTERNAL_LOOPBACK))) {
Giridhar Malavali6e980162010-03-19 17:03:58 -0700814 type = "FC_BSG_HST_VENDOR_ECHO_DIAG";
Saurav Kashyap7c3df132011-07-14 12:00:13 -0700815 ql_dbg(ql_dbg_user, vha, 0x701e,
816 "BSG request type: %s.\n", type);
Giridhar Malavali6e980162010-03-19 17:03:58 -0700817 command_sent = INT_DEF_LB_ECHO_CMD;
818 rval = qla2x00_echo_test(vha, &elreq, response);
Sarang Radke23f2ebd2010-05-28 15:08:21 -0700819 } else {
Atul Deshmukh7ec0eff2013-08-27 01:37:28 -0400820 if (IS_QLA81XX(ha) || IS_QLA8031(ha) || IS_QLA8044(ha)) {
Sarang Radke23f2ebd2010-05-28 15:08:21 -0700821 memset(config, 0, sizeof(config));
822 memset(new_config, 0, sizeof(new_config));
Chad Dupuisf356bef2013-02-08 01:58:04 -0500823
Sarang Radke23f2ebd2010-05-28 15:08:21 -0700824 if (qla81xx_get_port_config(vha, config)) {
Saurav Kashyap7c3df132011-07-14 12:00:13 -0700825 ql_log(ql_log_warn, vha, 0x701f,
826 "Get port config failed.\n");
Sarang Radke23f2ebd2010-05-28 15:08:21 -0700827 rval = -EPERM;
Steve Hodgson9bceab42012-11-21 02:39:56 -0500828 goto done_free_dma_rsp;
Sarang Radke23f2ebd2010-05-28 15:08:21 -0700829 }
830
Chad Dupuis1bcc46c2013-02-08 01:57:46 -0500831 if ((config[0] & INTERNAL_LOOPBACK_MASK) != 0) {
832 ql_dbg(ql_dbg_user, vha, 0x70c4,
833 "Loopback operation already in "
834 "progress.\n");
835 rval = -EAGAIN;
836 goto done_free_dma_rsp;
837 }
838
Chad Dupuis8fcd6b82012-08-22 14:21:06 -0400839 ql_dbg(ql_dbg_user, vha, 0x70c0,
840 "elreq.options=%04x\n", elreq.options);
841
842 if (elreq.options == EXTERNAL_LOOPBACK)
Atul Deshmukh7ec0eff2013-08-27 01:37:28 -0400843 if (IS_QLA8031(ha) || IS_QLA8044(ha))
Chad Dupuis8fcd6b82012-08-22 14:21:06 -0400844 rval = qla81xx_set_loopback_mode(vha,
845 config, new_config, elreq.options);
846 else
847 rval = qla81xx_reset_loopback_mode(vha,
Chad Dupuisf356bef2013-02-08 01:58:04 -0500848 config, 1, 0);
Chad Dupuis8fcd6b82012-08-22 14:21:06 -0400849 else
850 rval = qla81xx_set_loopback_mode(vha, config,
851 new_config, elreq.options);
852
853 if (rval) {
Chad Dupuis8fcd6b82012-08-22 14:21:06 -0400854 rval = -EPERM;
Steve Hodgson9bceab42012-11-21 02:39:56 -0500855 goto done_free_dma_rsp;
Sarang Radke23f2ebd2010-05-28 15:08:21 -0700856 }
857
858 type = "FC_BSG_HST_VENDOR_LOOPBACK";
Saurav Kashyap7c3df132011-07-14 12:00:13 -0700859 ql_dbg(ql_dbg_user, vha, 0x7028,
860 "BSG request type: %s.\n", type);
Sarang Radke23f2ebd2010-05-28 15:08:21 -0700861
862 command_sent = INT_DEF_LB_LOOPBACK_CMD;
863 rval = qla2x00_loopback_test(vha, &elreq, response);
864
Chad Dupuis992357c2013-02-08 01:57:52 -0500865 if (response[0] == MBS_COMMAND_ERROR &&
866 response[1] == MBS_LB_RESET) {
867 ql_log(ql_log_warn, vha, 0x7029,
868 "MBX command error, Aborting ISP.\n");
869 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
870 qla2xxx_wake_dpc(vha);
871 qla2x00_wait_for_chip_reset(vha);
872 /* Also reset the MPI */
873 if (IS_QLA81XX(ha)) {
874 if (qla81xx_restart_mpi_firmware(vha) !=
875 QLA_SUCCESS) {
876 ql_log(ql_log_warn, vha, 0x702a,
877 "MPI reset failed.\n");
878 }
879 }
880
881 rval = -EIO;
882 goto done_free_dma_rsp;
883 }
884
Joe Carnuccio4052bd52010-12-21 16:00:17 -0800885 if (new_config[0]) {
Chad Dupuis67b2a312013-02-08 01:57:51 -0500886 int ret;
887
Sarang Radke23f2ebd2010-05-28 15:08:21 -0700888 /* Revert back to original port config
889 * Also clear internal loopback
890 */
Chad Dupuis67b2a312013-02-08 01:57:51 -0500891 ret = qla81xx_reset_loopback_mode(vha,
Chad Dupuisf356bef2013-02-08 01:58:04 -0500892 new_config, 0, 1);
Chad Dupuis67b2a312013-02-08 01:57:51 -0500893 if (ret) {
894 /*
895 * If the reset of the loopback mode
896 * doesn't work take FCoE dump and then
897 * reset the chip.
898 */
Bart Van Assche8ae17872020-05-18 14:17:00 -0700899 qla2xxx_dump_fw(vha);
Chad Dupuis67b2a312013-02-08 01:57:51 -0500900 set_bit(ISP_ABORT_NEEDED,
901 &vha->dpc_flags);
902 }
903
Sarang Radke23f2ebd2010-05-28 15:08:21 -0700904 }
905
Sarang Radke23f2ebd2010-05-28 15:08:21 -0700906 } else {
907 type = "FC_BSG_HST_VENDOR_LOOPBACK";
Saurav Kashyap7c3df132011-07-14 12:00:13 -0700908 ql_dbg(ql_dbg_user, vha, 0x702b,
909 "BSG request type: %s.\n", type);
Sarang Radke23f2ebd2010-05-28 15:08:21 -0700910 command_sent = INT_DEF_LB_LOOPBACK_CMD;
911 rval = qla2x00_loopback_test(vha, &elreq, response);
912 }
Giridhar Malavali6e980162010-03-19 17:03:58 -0700913 }
914
915 if (rval) {
Saurav Kashyap7c3df132011-07-14 12:00:13 -0700916 ql_log(ql_log_warn, vha, 0x702c,
917 "Vendor request %s failed.\n", type);
Giridhar Malavali6e980162010-03-19 17:03:58 -0700918
Giridhar Malavali6e980162010-03-19 17:03:58 -0700919 rval = 0;
Johannes Thumshirn01e0e152016-11-17 10:31:12 +0100920 bsg_reply->result = (DID_ERROR << 16);
921 bsg_reply->reply_payload_rcv_len = 0;
Giridhar Malavali6e980162010-03-19 17:03:58 -0700922 } else {
Saurav Kashyap7c3df132011-07-14 12:00:13 -0700923 ql_dbg(ql_dbg_user, vha, 0x702d,
924 "Vendor request %s completed.\n", type);
Johannes Thumshirn01e0e152016-11-17 10:31:12 +0100925 bsg_reply->result = (DID_OK << 16);
Giridhar Malavali6e980162010-03-19 17:03:58 -0700926 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
927 bsg_job->reply_payload.sg_cnt, rsp_data,
928 rsp_data_len);
929 }
Armen Baloyan63ea9232012-11-21 02:39:53 -0500930
931 bsg_job->reply_len = sizeof(struct fc_bsg_reply) +
932 sizeof(response) + sizeof(uint8_t);
Christoph Hellwig05231a32017-10-03 12:48:40 +0200933 fw_sts_ptr = bsg_job->reply + sizeof(struct fc_bsg_reply);
934 memcpy(bsg_job->reply + sizeof(struct fc_bsg_reply), response,
935 sizeof(response));
Armen Baloyan63ea9232012-11-21 02:39:53 -0500936 fw_sts_ptr += sizeof(response);
937 *fw_sts_ptr = command_sent;
Giridhar Malavali6e980162010-03-19 17:03:58 -0700938
Steve Hodgson9bceab42012-11-21 02:39:56 -0500939done_free_dma_rsp:
Giridhar Malavali6e980162010-03-19 17:03:58 -0700940 dma_free_coherent(&ha->pdev->dev, rsp_data_len,
941 rsp_data, rsp_data_dma);
942done_free_dma_req:
943 dma_free_coherent(&ha->pdev->dev, req_data_len,
944 req_data, req_data_dma);
945done_unmap_sg:
946 dma_unmap_sg(&ha->pdev->dev,
947 bsg_job->reply_payload.sg_list,
948 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
949done_unmap_req_sg:
950 dma_unmap_sg(&ha->pdev->dev,
951 bsg_job->request_payload.sg_list,
952 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
Armen Baloyan63ea9232012-11-21 02:39:53 -0500953 if (!rval)
Johannes Thumshirn06548162016-11-17 10:31:22 +0100954 bsg_job_done(bsg_job, bsg_reply->result,
Johannes Thumshirn1abaede2016-11-17 10:31:13 +0100955 bsg_reply->reply_payload_rcv_len);
Andrew Vasquez6c452a42010-03-19 17:04:02 -0700956 return rval;
Giridhar Malavali6e980162010-03-19 17:03:58 -0700957}
958
959static int
Johannes Thumshirn75cc8cf2016-11-17 10:31:19 +0100960qla84xx_reset(struct bsg_job *bsg_job)
Giridhar Malavali6e980162010-03-19 17:03:58 -0700961{
Johannes Thumshirn01e0e152016-11-17 10:31:12 +0100962 struct fc_bsg_request *bsg_request = bsg_job->request;
Johannes Thumshirncd21c602016-11-17 10:31:14 +0100963 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
Johannes Thumshirn01e0e152016-11-17 10:31:12 +0100964 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
Giridhar Malavali6e980162010-03-19 17:03:58 -0700965 scsi_qla_host_t *vha = shost_priv(host);
966 struct qla_hw_data *ha = vha->hw;
967 int rval = 0;
968 uint32_t flag;
969
Giridhar Malavali6e980162010-03-19 17:03:58 -0700970 if (!IS_QLA84XX(ha)) {
Saurav Kashyap7c3df132011-07-14 12:00:13 -0700971 ql_dbg(ql_dbg_user, vha, 0x702f, "Not 84xx, exiting.\n");
Giridhar Malavali6e980162010-03-19 17:03:58 -0700972 return -EINVAL;
973 }
974
Johannes Thumshirn01e0e152016-11-17 10:31:12 +0100975 flag = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
Giridhar Malavali6e980162010-03-19 17:03:58 -0700976
977 rval = qla84xx_reset_chip(vha, flag == A84_ISSUE_RESET_DIAG_FW);
978
979 if (rval) {
Saurav Kashyap7c3df132011-07-14 12:00:13 -0700980 ql_log(ql_log_warn, vha, 0x7030,
981 "Vendor request 84xx reset failed.\n");
Armen Baloyan63ea9232012-11-21 02:39:53 -0500982 rval = (DID_ERROR << 16);
Giridhar Malavali6e980162010-03-19 17:03:58 -0700983
984 } else {
Saurav Kashyap7c3df132011-07-14 12:00:13 -0700985 ql_dbg(ql_dbg_user, vha, 0x7031,
986 "Vendor request 84xx reset completed.\n");
Johannes Thumshirn01e0e152016-11-17 10:31:12 +0100987 bsg_reply->result = DID_OK;
Johannes Thumshirn06548162016-11-17 10:31:22 +0100988 bsg_job_done(bsg_job, bsg_reply->result,
Johannes Thumshirn1abaede2016-11-17 10:31:13 +0100989 bsg_reply->reply_payload_rcv_len);
Giridhar Malavali6e980162010-03-19 17:03:58 -0700990 }
991
Giridhar Malavali6e980162010-03-19 17:03:58 -0700992 return rval;
993}
994
995static int
Johannes Thumshirn75cc8cf2016-11-17 10:31:19 +0100996qla84xx_updatefw(struct bsg_job *bsg_job)
Giridhar Malavali6e980162010-03-19 17:03:58 -0700997{
Johannes Thumshirn01e0e152016-11-17 10:31:12 +0100998 struct fc_bsg_request *bsg_request = bsg_job->request;
999 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
Johannes Thumshirncd21c602016-11-17 10:31:14 +01001000 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
Giridhar Malavali6e980162010-03-19 17:03:58 -07001001 scsi_qla_host_t *vha = shost_priv(host);
1002 struct qla_hw_data *ha = vha->hw;
1003 struct verify_chip_entry_84xx *mn = NULL;
1004 dma_addr_t mn_dma, fw_dma;
1005 void *fw_buf = NULL;
1006 int rval = 0;
1007 uint32_t sg_cnt;
1008 uint32_t data_len;
1009 uint16_t options;
1010 uint32_t flag;
1011 uint32_t fw_ver;
1012
Giridhar Malavali6e980162010-03-19 17:03:58 -07001013 if (!IS_QLA84XX(ha)) {
Saurav Kashyap7c3df132011-07-14 12:00:13 -07001014 ql_dbg(ql_dbg_user, vha, 0x7032,
1015 "Not 84xx, exiting.\n");
Giridhar Malavali6e980162010-03-19 17:03:58 -07001016 return -EINVAL;
1017 }
1018
1019 sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
1020 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
Saurav Kashyap7c3df132011-07-14 12:00:13 -07001021 if (!sg_cnt) {
1022 ql_log(ql_log_warn, vha, 0x7033,
1023 "dma_map_sg returned %d for request.\n", sg_cnt);
Giridhar Malavali6e980162010-03-19 17:03:58 -07001024 return -ENOMEM;
Saurav Kashyap7c3df132011-07-14 12:00:13 -07001025 }
Giridhar Malavali6e980162010-03-19 17:03:58 -07001026
1027 if (sg_cnt != bsg_job->request_payload.sg_cnt) {
Saurav Kashyap7c3df132011-07-14 12:00:13 -07001028 ql_log(ql_log_warn, vha, 0x7034,
1029 "DMA mapping resulted in different sg counts, "
1030 "request_sg_cnt: %x dma_request_sg_cnt: %x.\n",
1031 bsg_job->request_payload.sg_cnt, sg_cnt);
Giridhar Malavali6e980162010-03-19 17:03:58 -07001032 rval = -EAGAIN;
1033 goto done_unmap_sg;
1034 }
1035
1036 data_len = bsg_job->request_payload.payload_len;
1037 fw_buf = dma_alloc_coherent(&ha->pdev->dev, data_len,
1038 &fw_dma, GFP_KERNEL);
1039 if (!fw_buf) {
Saurav Kashyap7c3df132011-07-14 12:00:13 -07001040 ql_log(ql_log_warn, vha, 0x7035,
1041 "DMA alloc failed for fw_buf.\n");
Giridhar Malavali6e980162010-03-19 17:03:58 -07001042 rval = -ENOMEM;
1043 goto done_unmap_sg;
1044 }
1045
1046 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1047 bsg_job->request_payload.sg_cnt, fw_buf, data_len);
1048
Souptick Joarder501017f2018-02-15 01:40:38 +05301049 mn = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
Giridhar Malavali6e980162010-03-19 17:03:58 -07001050 if (!mn) {
Saurav Kashyap7c3df132011-07-14 12:00:13 -07001051 ql_log(ql_log_warn, vha, 0x7036,
1052 "DMA alloc failed for fw buffer.\n");
Giridhar Malavali6e980162010-03-19 17:03:58 -07001053 rval = -ENOMEM;
1054 goto done_free_fw_buf;
1055 }
1056
Johannes Thumshirn01e0e152016-11-17 10:31:12 +01001057 flag = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
Bart Van Assche2c263482019-04-04 12:44:45 -07001058 fw_ver = get_unaligned_le32((uint32_t *)fw_buf + 2);
Giridhar Malavali6e980162010-03-19 17:03:58 -07001059
Giridhar Malavali6e980162010-03-19 17:03:58 -07001060 mn->entry_type = VERIFY_CHIP_IOCB_TYPE;
1061 mn->entry_count = 1;
1062
1063 options = VCO_FORCE_UPDATE | VCO_END_OF_DATA;
1064 if (flag == A84_ISSUE_UPDATE_DIAGFW_CMD)
1065 options |= VCO_DIAG_FW;
1066
1067 mn->options = cpu_to_le16(options);
1068 mn->fw_ver = cpu_to_le32(fw_ver);
1069 mn->fw_size = cpu_to_le32(data_len);
1070 mn->fw_seq_size = cpu_to_le32(data_len);
Bart Van Assche15b7a682019-04-17 14:44:38 -07001071 put_unaligned_le64(fw_dma, &mn->dsd.address);
1072 mn->dsd.length = cpu_to_le32(data_len);
Giridhar Malavali6e980162010-03-19 17:03:58 -07001073 mn->data_seg_cnt = cpu_to_le16(1);
1074
1075 rval = qla2x00_issue_iocb_timeout(vha, mn, mn_dma, 0, 120);
1076
1077 if (rval) {
Saurav Kashyap7c3df132011-07-14 12:00:13 -07001078 ql_log(ql_log_warn, vha, 0x7037,
1079 "Vendor request 84xx updatefw failed.\n");
Giridhar Malavali6e980162010-03-19 17:03:58 -07001080
Armen Baloyan63ea9232012-11-21 02:39:53 -05001081 rval = (DID_ERROR << 16);
Giridhar Malavali6e980162010-03-19 17:03:58 -07001082 } else {
Saurav Kashyap7c3df132011-07-14 12:00:13 -07001083 ql_dbg(ql_dbg_user, vha, 0x7038,
1084 "Vendor request 84xx updatefw completed.\n");
Giridhar Malavali6e980162010-03-19 17:03:58 -07001085
1086 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
Johannes Thumshirn01e0e152016-11-17 10:31:12 +01001087 bsg_reply->result = DID_OK;
Giridhar Malavali6e980162010-03-19 17:03:58 -07001088 }
1089
Giridhar Malavali6e980162010-03-19 17:03:58 -07001090 dma_pool_free(ha->s_dma_pool, mn, mn_dma);
1091
1092done_free_fw_buf:
1093 dma_free_coherent(&ha->pdev->dev, data_len, fw_buf, fw_dma);
1094
1095done_unmap_sg:
1096 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
1097 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1098
Armen Baloyan63ea9232012-11-21 02:39:53 -05001099 if (!rval)
Johannes Thumshirn06548162016-11-17 10:31:22 +01001100 bsg_job_done(bsg_job, bsg_reply->result,
Johannes Thumshirn1abaede2016-11-17 10:31:13 +01001101 bsg_reply->reply_payload_rcv_len);
Giridhar Malavali6e980162010-03-19 17:03:58 -07001102 return rval;
1103}
1104
1105static int
Johannes Thumshirn75cc8cf2016-11-17 10:31:19 +01001106qla84xx_mgmt_cmd(struct bsg_job *bsg_job)
Giridhar Malavali6e980162010-03-19 17:03:58 -07001107{
Johannes Thumshirn01e0e152016-11-17 10:31:12 +01001108 struct fc_bsg_request *bsg_request = bsg_job->request;
1109 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
Johannes Thumshirncd21c602016-11-17 10:31:14 +01001110 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
Giridhar Malavali6e980162010-03-19 17:03:58 -07001111 scsi_qla_host_t *vha = shost_priv(host);
1112 struct qla_hw_data *ha = vha->hw;
1113 struct access_chip_84xx *mn = NULL;
1114 dma_addr_t mn_dma, mgmt_dma;
1115 void *mgmt_b = NULL;
1116 int rval = 0;
1117 struct qla_bsg_a84_mgmt *ql84_mgmt;
1118 uint32_t sg_cnt;
Harish Zunjarraod54590832010-03-19 17:04:00 -07001119 uint32_t data_len = 0;
Giridhar Malavali6e980162010-03-19 17:03:58 -07001120 uint32_t dma_direction = DMA_NONE;
1121
Giridhar Malavali6e980162010-03-19 17:03:58 -07001122 if (!IS_QLA84XX(ha)) {
Saurav Kashyap7c3df132011-07-14 12:00:13 -07001123 ql_log(ql_log_warn, vha, 0x703a,
1124 "Not 84xx, exiting.\n");
Giridhar Malavali6e980162010-03-19 17:03:58 -07001125 return -EINVAL;
1126 }
1127
Thomas Meyer08eb7f42017-09-21 08:15:26 +02001128 mn = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
Giridhar Malavali6e980162010-03-19 17:03:58 -07001129 if (!mn) {
Saurav Kashyap7c3df132011-07-14 12:00:13 -07001130 ql_log(ql_log_warn, vha, 0x703c,
1131 "DMA alloc failed for fw buffer.\n");
Giridhar Malavali6e980162010-03-19 17:03:58 -07001132 return -ENOMEM;
1133 }
1134
Giridhar Malavali6e980162010-03-19 17:03:58 -07001135 mn->entry_type = ACCESS_CHIP_IOCB_TYPE;
1136 mn->entry_count = 1;
Johannes Thumshirn01e0e152016-11-17 10:31:12 +01001137 ql84_mgmt = (void *)bsg_request + sizeof(struct fc_bsg_request);
Giridhar Malavali6e980162010-03-19 17:03:58 -07001138 switch (ql84_mgmt->mgmt.cmd) {
1139 case QLA84_MGMT_READ_MEM:
1140 case QLA84_MGMT_GET_INFO:
1141 sg_cnt = dma_map_sg(&ha->pdev->dev,
1142 bsg_job->reply_payload.sg_list,
1143 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1144 if (!sg_cnt) {
Saurav Kashyap7c3df132011-07-14 12:00:13 -07001145 ql_log(ql_log_warn, vha, 0x703d,
1146 "dma_map_sg returned %d for reply.\n", sg_cnt);
Giridhar Malavali6e980162010-03-19 17:03:58 -07001147 rval = -ENOMEM;
1148 goto exit_mgmt;
1149 }
1150
1151 dma_direction = DMA_FROM_DEVICE;
1152
1153 if (sg_cnt != bsg_job->reply_payload.sg_cnt) {
Saurav Kashyap7c3df132011-07-14 12:00:13 -07001154 ql_log(ql_log_warn, vha, 0x703e,
1155 "DMA mapping resulted in different sg counts, "
1156 "reply_sg_cnt: %x dma_reply_sg_cnt: %x.\n",
1157 bsg_job->reply_payload.sg_cnt, sg_cnt);
Giridhar Malavali6e980162010-03-19 17:03:58 -07001158 rval = -EAGAIN;
1159 goto done_unmap_sg;
1160 }
1161
1162 data_len = bsg_job->reply_payload.payload_len;
1163
1164 mgmt_b = dma_alloc_coherent(&ha->pdev->dev, data_len,
1165 &mgmt_dma, GFP_KERNEL);
1166 if (!mgmt_b) {
Saurav Kashyap7c3df132011-07-14 12:00:13 -07001167 ql_log(ql_log_warn, vha, 0x703f,
1168 "DMA alloc failed for mgmt_b.\n");
Giridhar Malavali6e980162010-03-19 17:03:58 -07001169 rval = -ENOMEM;
1170 goto done_unmap_sg;
1171 }
1172
1173 if (ql84_mgmt->mgmt.cmd == QLA84_MGMT_READ_MEM) {
1174 mn->options = cpu_to_le16(ACO_DUMP_MEMORY);
1175 mn->parameter1 =
1176 cpu_to_le32(
1177 ql84_mgmt->mgmt.mgmtp.u.mem.start_addr);
1178
1179 } else if (ql84_mgmt->mgmt.cmd == QLA84_MGMT_GET_INFO) {
1180 mn->options = cpu_to_le16(ACO_REQUEST_INFO);
1181 mn->parameter1 =
1182 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.info.type);
1183
1184 mn->parameter2 =
1185 cpu_to_le32(
1186 ql84_mgmt->mgmt.mgmtp.u.info.context);
1187 }
1188 break;
1189
1190 case QLA84_MGMT_WRITE_MEM:
1191 sg_cnt = dma_map_sg(&ha->pdev->dev,
1192 bsg_job->request_payload.sg_list,
1193 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1194
1195 if (!sg_cnt) {
Saurav Kashyap7c3df132011-07-14 12:00:13 -07001196 ql_log(ql_log_warn, vha, 0x7040,
1197 "dma_map_sg returned %d.\n", sg_cnt);
Giridhar Malavali6e980162010-03-19 17:03:58 -07001198 rval = -ENOMEM;
1199 goto exit_mgmt;
1200 }
1201
1202 dma_direction = DMA_TO_DEVICE;
1203
1204 if (sg_cnt != bsg_job->request_payload.sg_cnt) {
Saurav Kashyap7c3df132011-07-14 12:00:13 -07001205 ql_log(ql_log_warn, vha, 0x7041,
1206 "DMA mapping resulted in different sg counts, "
1207 "request_sg_cnt: %x dma_request_sg_cnt: %x.\n",
1208 bsg_job->request_payload.sg_cnt, sg_cnt);
Giridhar Malavali6e980162010-03-19 17:03:58 -07001209 rval = -EAGAIN;
1210 goto done_unmap_sg;
1211 }
1212
1213 data_len = bsg_job->request_payload.payload_len;
1214 mgmt_b = dma_alloc_coherent(&ha->pdev->dev, data_len,
1215 &mgmt_dma, GFP_KERNEL);
1216 if (!mgmt_b) {
Saurav Kashyap7c3df132011-07-14 12:00:13 -07001217 ql_log(ql_log_warn, vha, 0x7042,
1218 "DMA alloc failed for mgmt_b.\n");
Giridhar Malavali6e980162010-03-19 17:03:58 -07001219 rval = -ENOMEM;
1220 goto done_unmap_sg;
1221 }
1222
1223 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1224 bsg_job->request_payload.sg_cnt, mgmt_b, data_len);
1225
1226 mn->options = cpu_to_le16(ACO_LOAD_MEMORY);
1227 mn->parameter1 =
1228 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.mem.start_addr);
1229 break;
1230
1231 case QLA84_MGMT_CHNG_CONFIG:
1232 mn->options = cpu_to_le16(ACO_CHANGE_CONFIG_PARAM);
1233 mn->parameter1 =
1234 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.id);
1235
1236 mn->parameter2 =
1237 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.param0);
1238
1239 mn->parameter3 =
1240 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.param1);
1241 break;
1242
1243 default:
1244 rval = -EIO;
1245 goto exit_mgmt;
1246 }
1247
1248 if (ql84_mgmt->mgmt.cmd != QLA84_MGMT_CHNG_CONFIG) {
1249 mn->total_byte_cnt = cpu_to_le32(ql84_mgmt->mgmt.len);
1250 mn->dseg_count = cpu_to_le16(1);
Bart Van Assche15b7a682019-04-17 14:44:38 -07001251 put_unaligned_le64(mgmt_dma, &mn->dsd.address);
1252 mn->dsd.length = cpu_to_le32(ql84_mgmt->mgmt.len);
Giridhar Malavali6e980162010-03-19 17:03:58 -07001253 }
1254
1255 rval = qla2x00_issue_iocb(vha, mn, mn_dma, 0);
1256
1257 if (rval) {
Saurav Kashyap7c3df132011-07-14 12:00:13 -07001258 ql_log(ql_log_warn, vha, 0x7043,
1259 "Vendor request 84xx mgmt failed.\n");
Giridhar Malavali6e980162010-03-19 17:03:58 -07001260
Armen Baloyan63ea9232012-11-21 02:39:53 -05001261 rval = (DID_ERROR << 16);
Giridhar Malavali6e980162010-03-19 17:03:58 -07001262
1263 } else {
Saurav Kashyap7c3df132011-07-14 12:00:13 -07001264 ql_dbg(ql_dbg_user, vha, 0x7044,
1265 "Vendor request 84xx mgmt completed.\n");
Giridhar Malavali6e980162010-03-19 17:03:58 -07001266
1267 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
Johannes Thumshirn01e0e152016-11-17 10:31:12 +01001268 bsg_reply->result = DID_OK;
Giridhar Malavali6e980162010-03-19 17:03:58 -07001269
1270 if ((ql84_mgmt->mgmt.cmd == QLA84_MGMT_READ_MEM) ||
1271 (ql84_mgmt->mgmt.cmd == QLA84_MGMT_GET_INFO)) {
Johannes Thumshirn01e0e152016-11-17 10:31:12 +01001272 bsg_reply->reply_payload_rcv_len =
Giridhar Malavali6e980162010-03-19 17:03:58 -07001273 bsg_job->reply_payload.payload_len;
1274
1275 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
Andrew Vasquez6c452a42010-03-19 17:04:02 -07001276 bsg_job->reply_payload.sg_cnt, mgmt_b,
1277 data_len);
Giridhar Malavali6e980162010-03-19 17:03:58 -07001278 }
1279 }
1280
Giridhar Malavali6e980162010-03-19 17:03:58 -07001281done_unmap_sg:
Harish Zunjarraod54590832010-03-19 17:04:00 -07001282 if (mgmt_b)
1283 dma_free_coherent(&ha->pdev->dev, data_len, mgmt_b, mgmt_dma);
1284
Giridhar Malavali6e980162010-03-19 17:03:58 -07001285 if (dma_direction == DMA_TO_DEVICE)
1286 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
1287 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1288 else if (dma_direction == DMA_FROM_DEVICE)
1289 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
1290 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1291
1292exit_mgmt:
1293 dma_pool_free(ha->s_dma_pool, mn, mn_dma);
1294
Armen Baloyan63ea9232012-11-21 02:39:53 -05001295 if (!rval)
Johannes Thumshirn06548162016-11-17 10:31:22 +01001296 bsg_job_done(bsg_job, bsg_reply->result,
Johannes Thumshirn1abaede2016-11-17 10:31:13 +01001297 bsg_reply->reply_payload_rcv_len);
Giridhar Malavali6e980162010-03-19 17:03:58 -07001298 return rval;
1299}
1300
1301static int
Johannes Thumshirn75cc8cf2016-11-17 10:31:19 +01001302qla24xx_iidma(struct bsg_job *bsg_job)
Giridhar Malavali6e980162010-03-19 17:03:58 -07001303{
Johannes Thumshirn01e0e152016-11-17 10:31:12 +01001304 struct fc_bsg_request *bsg_request = bsg_job->request;
1305 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
Johannes Thumshirncd21c602016-11-17 10:31:14 +01001306 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
Giridhar Malavali6e980162010-03-19 17:03:58 -07001307 scsi_qla_host_t *vha = shost_priv(host);
Giridhar Malavali6e980162010-03-19 17:03:58 -07001308 int rval = 0;
1309 struct qla_port_param *port_param = NULL;
1310 fc_port_t *fcport = NULL;
Joe Carnuccioe8b8b8a2013-08-27 01:37:29 -04001311 int found = 0;
Giridhar Malavali6e980162010-03-19 17:03:58 -07001312 uint16_t mb[MAILBOX_REGISTER_COUNT];
1313 uint8_t *rsp_ptr = NULL;
1314
Giridhar Malavali6e980162010-03-19 17:03:58 -07001315 if (!IS_IIDMA_CAPABLE(vha->hw)) {
Saurav Kashyap7c3df132011-07-14 12:00:13 -07001316 ql_log(ql_log_info, vha, 0x7046, "iiDMA not supported.\n");
Giridhar Malavali6e980162010-03-19 17:03:58 -07001317 return -EINVAL;
1318 }
1319
Johannes Thumshirn01e0e152016-11-17 10:31:12 +01001320 port_param = (void *)bsg_request + sizeof(struct fc_bsg_request);
Giridhar Malavali6e980162010-03-19 17:03:58 -07001321 if (port_param->fc_scsi_addr.dest_type != EXT_DEF_TYPE_WWPN) {
Saurav Kashyap7c3df132011-07-14 12:00:13 -07001322 ql_log(ql_log_warn, vha, 0x7048,
1323 "Invalid destination type.\n");
Giridhar Malavali6e980162010-03-19 17:03:58 -07001324 return -EINVAL;
1325 }
1326
1327 list_for_each_entry(fcport, &vha->vp_fcports, list) {
1328 if (fcport->port_type != FCT_TARGET)
1329 continue;
1330
1331 if (memcmp(port_param->fc_scsi_addr.dest_addr.wwpn,
1332 fcport->port_name, sizeof(fcport->port_name)))
1333 continue;
Joe Carnuccioe8b8b8a2013-08-27 01:37:29 -04001334
1335 found = 1;
Giridhar Malavali6e980162010-03-19 17:03:58 -07001336 break;
1337 }
1338
Joe Carnuccioe8b8b8a2013-08-27 01:37:29 -04001339 if (!found) {
Saurav Kashyap7c3df132011-07-14 12:00:13 -07001340 ql_log(ql_log_warn, vha, 0x7049,
1341 "Failed to find port.\n");
Giridhar Malavali6e980162010-03-19 17:03:58 -07001342 return -EINVAL;
1343 }
1344
Giridhar Malavalic9afb9a2010-09-03 15:20:48 -07001345 if (atomic_read(&fcport->state) != FCS_ONLINE) {
Saurav Kashyap7c3df132011-07-14 12:00:13 -07001346 ql_log(ql_log_warn, vha, 0x704a,
1347 "Port is not online.\n");
Madhuranath Iyengar17cf2c52010-07-23 15:28:22 +05001348 return -EINVAL;
1349 }
1350
Madhuranath Iyengar9a15eb42010-07-23 15:28:31 +05001351 if (fcport->flags & FCF_LOGIN_NEEDED) {
Saurav Kashyap7c3df132011-07-14 12:00:13 -07001352 ql_log(ql_log_warn, vha, 0x704b,
1353 "Remote port not logged in flags = 0x%x.\n", fcport->flags);
Madhuranath Iyengar9a15eb42010-07-23 15:28:31 +05001354 return -EINVAL;
1355 }
1356
Giridhar Malavali6e980162010-03-19 17:03:58 -07001357 if (port_param->mode)
1358 rval = qla2x00_set_idma_speed(vha, fcport->loop_id,
1359 port_param->speed, mb);
1360 else
1361 rval = qla2x00_get_idma_speed(vha, fcport->loop_id,
1362 &port_param->speed, mb);
1363
1364 if (rval) {
Saurav Kashyap7c3df132011-07-14 12:00:13 -07001365 ql_log(ql_log_warn, vha, 0x704c,
Milan P. Gandhi62439b42019-03-12 18:23:15 +05301366 "iiDMA cmd failed for %8phN -- "
Oleksandr Khoshaba7b8335582013-08-27 01:37:27 -04001367 "%04x %x %04x %04x.\n", fcport->port_name,
1368 rval, fcport->fp_speed, mb[0], mb[1]);
Armen Baloyan63ea9232012-11-21 02:39:53 -05001369 rval = (DID_ERROR << 16);
Giridhar Malavali6e980162010-03-19 17:03:58 -07001370 } else {
1371 if (!port_param->mode) {
1372 bsg_job->reply_len = sizeof(struct fc_bsg_reply) +
1373 sizeof(struct qla_port_param);
1374
Johannes Thumshirn01e0e152016-11-17 10:31:12 +01001375 rsp_ptr = ((uint8_t *)bsg_reply) +
Giridhar Malavali6e980162010-03-19 17:03:58 -07001376 sizeof(struct fc_bsg_reply);
1377
1378 memcpy(rsp_ptr, port_param,
1379 sizeof(struct qla_port_param));
1380 }
1381
Johannes Thumshirn01e0e152016-11-17 10:31:12 +01001382 bsg_reply->result = DID_OK;
Johannes Thumshirn06548162016-11-17 10:31:22 +01001383 bsg_job_done(bsg_job, bsg_reply->result,
Johannes Thumshirn1abaede2016-11-17 10:31:13 +01001384 bsg_reply->reply_payload_rcv_len);
Giridhar Malavali6e980162010-03-19 17:03:58 -07001385 }
1386
Giridhar Malavali6e980162010-03-19 17:03:58 -07001387 return rval;
1388}
1389
1390static int
Johannes Thumshirn75cc8cf2016-11-17 10:31:19 +01001391qla2x00_optrom_setup(struct bsg_job *bsg_job, scsi_qla_host_t *vha,
Harish Zunjarraof19af162010-10-15 11:27:43 -07001392 uint8_t is_update)
1393{
Johannes Thumshirn01e0e152016-11-17 10:31:12 +01001394 struct fc_bsg_request *bsg_request = bsg_job->request;
Harish Zunjarraof19af162010-10-15 11:27:43 -07001395 uint32_t start = 0;
1396 int valid = 0;
Saurav Kashyap7c3df132011-07-14 12:00:13 -07001397 struct qla_hw_data *ha = vha->hw;
Harish Zunjarraof19af162010-10-15 11:27:43 -07001398
Harish Zunjarraof19af162010-10-15 11:27:43 -07001399 if (unlikely(pci_channel_offline(ha->pdev)))
1400 return -EINVAL;
1401
Johannes Thumshirn01e0e152016-11-17 10:31:12 +01001402 start = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
Saurav Kashyap7c3df132011-07-14 12:00:13 -07001403 if (start > ha->optrom_size) {
1404 ql_log(ql_log_warn, vha, 0x7055,
1405 "start %d > optrom_size %d.\n", start, ha->optrom_size);
Harish Zunjarraof19af162010-10-15 11:27:43 -07001406 return -EINVAL;
Saurav Kashyap7c3df132011-07-14 12:00:13 -07001407 }
Harish Zunjarraof19af162010-10-15 11:27:43 -07001408
Saurav Kashyap7c3df132011-07-14 12:00:13 -07001409 if (ha->optrom_state != QLA_SWAITING) {
1410 ql_log(ql_log_info, vha, 0x7056,
1411 "optrom_state %d.\n", ha->optrom_state);
Harish Zunjarraof19af162010-10-15 11:27:43 -07001412 return -EBUSY;
Saurav Kashyap7c3df132011-07-14 12:00:13 -07001413 }
Harish Zunjarraof19af162010-10-15 11:27:43 -07001414
1415 ha->optrom_region_start = start;
Saurav Kashyap7c3df132011-07-14 12:00:13 -07001416 ql_dbg(ql_dbg_user, vha, 0x7057, "is_update=%d.\n", is_update);
Harish Zunjarraof19af162010-10-15 11:27:43 -07001417 if (is_update) {
1418 if (ha->optrom_size == OPTROM_SIZE_2300 && start == 0)
1419 valid = 1;
1420 else if (start == (ha->flt_region_boot * 4) ||
1421 start == (ha->flt_region_fw * 4))
1422 valid = 1;
1423 else if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) ||
Joe Carnuccioecc89f22019-03-12 11:08:13 -07001424 IS_CNA_CAPABLE(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha) ||
1425 IS_QLA28XX(ha))
Harish Zunjarraof19af162010-10-15 11:27:43 -07001426 valid = 1;
1427 if (!valid) {
Saurav Kashyap7c3df132011-07-14 12:00:13 -07001428 ql_log(ql_log_warn, vha, 0x7058,
1429 "Invalid start region 0x%x/0x%x.\n", start,
1430 bsg_job->request_payload.payload_len);
Harish Zunjarraof19af162010-10-15 11:27:43 -07001431 return -EINVAL;
1432 }
1433
1434 ha->optrom_region_size = start +
1435 bsg_job->request_payload.payload_len > ha->optrom_size ?
1436 ha->optrom_size - start :
1437 bsg_job->request_payload.payload_len;
1438 ha->optrom_state = QLA_SWRITING;
1439 } else {
1440 ha->optrom_region_size = start +
1441 bsg_job->reply_payload.payload_len > ha->optrom_size ?
1442 ha->optrom_size - start :
1443 bsg_job->reply_payload.payload_len;
1444 ha->optrom_state = QLA_SREADING;
1445 }
1446
Himanshu Jha05583122017-12-30 20:58:25 +05301447 ha->optrom_buffer = vzalloc(ha->optrom_region_size);
Harish Zunjarraof19af162010-10-15 11:27:43 -07001448 if (!ha->optrom_buffer) {
Saurav Kashyap7c3df132011-07-14 12:00:13 -07001449 ql_log(ql_log_warn, vha, 0x7059,
Harish Zunjarraof19af162010-10-15 11:27:43 -07001450 "Read: Unable to allocate memory for optrom retrieval "
Saurav Kashyap7c3df132011-07-14 12:00:13 -07001451 "(%x)\n", ha->optrom_region_size);
Harish Zunjarraof19af162010-10-15 11:27:43 -07001452
1453 ha->optrom_state = QLA_SWAITING;
1454 return -ENOMEM;
1455 }
1456
Harish Zunjarraof19af162010-10-15 11:27:43 -07001457 return 0;
1458}
1459
1460static int
Johannes Thumshirn75cc8cf2016-11-17 10:31:19 +01001461qla2x00_read_optrom(struct bsg_job *bsg_job)
Harish Zunjarraof19af162010-10-15 11:27:43 -07001462{
Johannes Thumshirn01e0e152016-11-17 10:31:12 +01001463 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
Johannes Thumshirncd21c602016-11-17 10:31:14 +01001464 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
Harish Zunjarraof19af162010-10-15 11:27:43 -07001465 scsi_qla_host_t *vha = shost_priv(host);
1466 struct qla_hw_data *ha = vha->hw;
1467 int rval = 0;
1468
Santosh Vernekar7d613ac2012-08-22 14:21:03 -04001469 if (ha->flags.nic_core_reset_hdlr_active)
Giridhar Malavalia49393f2012-04-25 07:26:14 -07001470 return -EBUSY;
1471
Chad Dupuis7a8ab9c2014-02-26 04:14:56 -05001472 mutex_lock(&ha->optrom_mutex);
Saurav Kashyap7c3df132011-07-14 12:00:13 -07001473 rval = qla2x00_optrom_setup(bsg_job, vha, 0);
Chad Dupuis7a8ab9c2014-02-26 04:14:56 -05001474 if (rval) {
1475 mutex_unlock(&ha->optrom_mutex);
Harish Zunjarraof19af162010-10-15 11:27:43 -07001476 return rval;
Chad Dupuis7a8ab9c2014-02-26 04:14:56 -05001477 }
Harish Zunjarraof19af162010-10-15 11:27:43 -07001478
1479 ha->isp_ops->read_optrom(vha, ha->optrom_buffer,
1480 ha->optrom_region_start, ha->optrom_region_size);
1481
1482 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
1483 bsg_job->reply_payload.sg_cnt, ha->optrom_buffer,
1484 ha->optrom_region_size);
1485
Johannes Thumshirn01e0e152016-11-17 10:31:12 +01001486 bsg_reply->reply_payload_rcv_len = ha->optrom_region_size;
1487 bsg_reply->result = DID_OK;
Harish Zunjarraof19af162010-10-15 11:27:43 -07001488 vfree(ha->optrom_buffer);
1489 ha->optrom_buffer = NULL;
1490 ha->optrom_state = QLA_SWAITING;
Chad Dupuis7a8ab9c2014-02-26 04:14:56 -05001491 mutex_unlock(&ha->optrom_mutex);
Johannes Thumshirn06548162016-11-17 10:31:22 +01001492 bsg_job_done(bsg_job, bsg_reply->result,
Johannes Thumshirn1abaede2016-11-17 10:31:13 +01001493 bsg_reply->reply_payload_rcv_len);
Harish Zunjarraof19af162010-10-15 11:27:43 -07001494 return rval;
1495}
1496
1497static int
Johannes Thumshirn75cc8cf2016-11-17 10:31:19 +01001498qla2x00_update_optrom(struct bsg_job *bsg_job)
Harish Zunjarraof19af162010-10-15 11:27:43 -07001499{
Johannes Thumshirn01e0e152016-11-17 10:31:12 +01001500 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
Johannes Thumshirncd21c602016-11-17 10:31:14 +01001501 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
Harish Zunjarraof19af162010-10-15 11:27:43 -07001502 scsi_qla_host_t *vha = shost_priv(host);
1503 struct qla_hw_data *ha = vha->hw;
1504 int rval = 0;
1505
Chad Dupuis7a8ab9c2014-02-26 04:14:56 -05001506 mutex_lock(&ha->optrom_mutex);
Saurav Kashyap7c3df132011-07-14 12:00:13 -07001507 rval = qla2x00_optrom_setup(bsg_job, vha, 1);
Chad Dupuis7a8ab9c2014-02-26 04:14:56 -05001508 if (rval) {
1509 mutex_unlock(&ha->optrom_mutex);
Harish Zunjarraof19af162010-10-15 11:27:43 -07001510 return rval;
Chad Dupuis7a8ab9c2014-02-26 04:14:56 -05001511 }
Harish Zunjarraof19af162010-10-15 11:27:43 -07001512
Giridhar Malavalib6d0d9d2012-05-15 14:34:25 -04001513 /* Set the isp82xx_no_md_cap not to capture minidump */
1514 ha->flags.isp82xx_no_md_cap = 1;
1515
Harish Zunjarraof19af162010-10-15 11:27:43 -07001516 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1517 bsg_job->request_payload.sg_cnt, ha->optrom_buffer,
1518 ha->optrom_region_size);
1519
Michael Hernandez1b81e7f2020-02-26 14:40:10 -08001520 rval = ha->isp_ops->write_optrom(vha, ha->optrom_buffer,
Harish Zunjarraof19af162010-10-15 11:27:43 -07001521 ha->optrom_region_start, ha->optrom_region_size);
1522
Michael Hernandez1b81e7f2020-02-26 14:40:10 -08001523 if (rval) {
1524 bsg_reply->result = -EINVAL;
1525 rval = -EINVAL;
1526 } else {
1527 bsg_reply->result = DID_OK;
1528 }
Harish Zunjarraof19af162010-10-15 11:27:43 -07001529 vfree(ha->optrom_buffer);
1530 ha->optrom_buffer = NULL;
1531 ha->optrom_state = QLA_SWAITING;
Chad Dupuis7a8ab9c2014-02-26 04:14:56 -05001532 mutex_unlock(&ha->optrom_mutex);
Johannes Thumshirn06548162016-11-17 10:31:22 +01001533 bsg_job_done(bsg_job, bsg_reply->result,
Johannes Thumshirn1abaede2016-11-17 10:31:13 +01001534 bsg_reply->reply_payload_rcv_len);
Harish Zunjarraof19af162010-10-15 11:27:43 -07001535 return rval;
1536}
1537
1538static int
Johannes Thumshirn75cc8cf2016-11-17 10:31:19 +01001539qla2x00_update_fru_versions(struct bsg_job *bsg_job)
Joe Carnuccio697a4bc2011-08-16 11:31:52 -07001540{
Johannes Thumshirn01e0e152016-11-17 10:31:12 +01001541 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
Johannes Thumshirncd21c602016-11-17 10:31:14 +01001542 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
Joe Carnuccio697a4bc2011-08-16 11:31:52 -07001543 scsi_qla_host_t *vha = shost_priv(host);
1544 struct qla_hw_data *ha = vha->hw;
1545 int rval = 0;
1546 uint8_t bsg[DMA_POOL_SIZE];
1547 struct qla_image_version_list *list = (void *)bsg;
1548 struct qla_image_version *image;
1549 uint32_t count;
1550 dma_addr_t sfp_dma;
1551 void *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
Bart Van Asschebd432bb2019-04-11 14:53:17 -07001552
Joe Carnuccio697a4bc2011-08-16 11:31:52 -07001553 if (!sfp) {
Johannes Thumshirn01e0e152016-11-17 10:31:12 +01001554 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
Joe Carnuccio697a4bc2011-08-16 11:31:52 -07001555 EXT_STATUS_NO_MEMORY;
1556 goto done;
1557 }
1558
1559 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1560 bsg_job->request_payload.sg_cnt, list, sizeof(bsg));
1561
1562 image = list->version;
1563 count = list->count;
1564 while (count--) {
1565 memcpy(sfp, &image->field_info, sizeof(image->field_info));
1566 rval = qla2x00_write_sfp(vha, sfp_dma, sfp,
1567 image->field_address.device, image->field_address.offset,
1568 sizeof(image->field_info), image->field_address.option);
1569 if (rval) {
Johannes Thumshirn01e0e152016-11-17 10:31:12 +01001570 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
Joe Carnuccio697a4bc2011-08-16 11:31:52 -07001571 EXT_STATUS_MAILBOX;
1572 goto dealloc;
1573 }
1574 image++;
1575 }
1576
Johannes Thumshirn01e0e152016-11-17 10:31:12 +01001577 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
Joe Carnuccio697a4bc2011-08-16 11:31:52 -07001578
1579dealloc:
1580 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1581
1582done:
1583 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
Johannes Thumshirn01e0e152016-11-17 10:31:12 +01001584 bsg_reply->result = DID_OK << 16;
Johannes Thumshirn06548162016-11-17 10:31:22 +01001585 bsg_job_done(bsg_job, bsg_reply->result,
Johannes Thumshirn1abaede2016-11-17 10:31:13 +01001586 bsg_reply->reply_payload_rcv_len);
Joe Carnuccio697a4bc2011-08-16 11:31:52 -07001587
1588 return 0;
1589}
1590
1591static int
Johannes Thumshirn75cc8cf2016-11-17 10:31:19 +01001592qla2x00_read_fru_status(struct bsg_job *bsg_job)
Joe Carnuccio697a4bc2011-08-16 11:31:52 -07001593{
Johannes Thumshirn01e0e152016-11-17 10:31:12 +01001594 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
Johannes Thumshirncd21c602016-11-17 10:31:14 +01001595 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
Joe Carnuccio697a4bc2011-08-16 11:31:52 -07001596 scsi_qla_host_t *vha = shost_priv(host);
1597 struct qla_hw_data *ha = vha->hw;
1598 int rval = 0;
1599 uint8_t bsg[DMA_POOL_SIZE];
1600 struct qla_status_reg *sr = (void *)bsg;
1601 dma_addr_t sfp_dma;
1602 uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
Bart Van Asschebd432bb2019-04-11 14:53:17 -07001603
Joe Carnuccio697a4bc2011-08-16 11:31:52 -07001604 if (!sfp) {
Johannes Thumshirn01e0e152016-11-17 10:31:12 +01001605 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
Joe Carnuccio697a4bc2011-08-16 11:31:52 -07001606 EXT_STATUS_NO_MEMORY;
1607 goto done;
1608 }
1609
1610 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1611 bsg_job->request_payload.sg_cnt, sr, sizeof(*sr));
1612
1613 rval = qla2x00_read_sfp(vha, sfp_dma, sfp,
1614 sr->field_address.device, sr->field_address.offset,
1615 sizeof(sr->status_reg), sr->field_address.option);
1616 sr->status_reg = *sfp;
1617
1618 if (rval) {
Johannes Thumshirn01e0e152016-11-17 10:31:12 +01001619 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
Joe Carnuccio697a4bc2011-08-16 11:31:52 -07001620 EXT_STATUS_MAILBOX;
1621 goto dealloc;
1622 }
1623
1624 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
1625 bsg_job->reply_payload.sg_cnt, sr, sizeof(*sr));
1626
Johannes Thumshirn01e0e152016-11-17 10:31:12 +01001627 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
Joe Carnuccio697a4bc2011-08-16 11:31:52 -07001628
1629dealloc:
1630 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1631
1632done:
1633 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
Johannes Thumshirn01e0e152016-11-17 10:31:12 +01001634 bsg_reply->reply_payload_rcv_len = sizeof(*sr);
1635 bsg_reply->result = DID_OK << 16;
Johannes Thumshirn06548162016-11-17 10:31:22 +01001636 bsg_job_done(bsg_job, bsg_reply->result,
Johannes Thumshirn1abaede2016-11-17 10:31:13 +01001637 bsg_reply->reply_payload_rcv_len);
Joe Carnuccio697a4bc2011-08-16 11:31:52 -07001638
1639 return 0;
1640}
1641
1642static int
Johannes Thumshirn75cc8cf2016-11-17 10:31:19 +01001643qla2x00_write_fru_status(struct bsg_job *bsg_job)
Joe Carnuccio697a4bc2011-08-16 11:31:52 -07001644{
Johannes Thumshirn01e0e152016-11-17 10:31:12 +01001645 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
Johannes Thumshirncd21c602016-11-17 10:31:14 +01001646 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
Joe Carnuccio697a4bc2011-08-16 11:31:52 -07001647 scsi_qla_host_t *vha = shost_priv(host);
1648 struct qla_hw_data *ha = vha->hw;
1649 int rval = 0;
1650 uint8_t bsg[DMA_POOL_SIZE];
1651 struct qla_status_reg *sr = (void *)bsg;
1652 dma_addr_t sfp_dma;
1653 uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
Bart Van Asschebd432bb2019-04-11 14:53:17 -07001654
Joe Carnuccio697a4bc2011-08-16 11:31:52 -07001655 if (!sfp) {
Johannes Thumshirn01e0e152016-11-17 10:31:12 +01001656 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
Joe Carnuccio697a4bc2011-08-16 11:31:52 -07001657 EXT_STATUS_NO_MEMORY;
1658 goto done;
1659 }
1660
1661 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1662 bsg_job->request_payload.sg_cnt, sr, sizeof(*sr));
1663
1664 *sfp = sr->status_reg;
1665 rval = qla2x00_write_sfp(vha, sfp_dma, sfp,
1666 sr->field_address.device, sr->field_address.offset,
1667 sizeof(sr->status_reg), sr->field_address.option);
1668
1669 if (rval) {
Johannes Thumshirn01e0e152016-11-17 10:31:12 +01001670 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
Joe Carnuccio697a4bc2011-08-16 11:31:52 -07001671 EXT_STATUS_MAILBOX;
1672 goto dealloc;
1673 }
1674
Johannes Thumshirn01e0e152016-11-17 10:31:12 +01001675 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
Joe Carnuccio697a4bc2011-08-16 11:31:52 -07001676
1677dealloc:
1678 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1679
1680done:
1681 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
Johannes Thumshirn01e0e152016-11-17 10:31:12 +01001682 bsg_reply->result = DID_OK << 16;
Johannes Thumshirn06548162016-11-17 10:31:22 +01001683 bsg_job_done(bsg_job, bsg_reply->result,
Johannes Thumshirn1abaede2016-11-17 10:31:13 +01001684 bsg_reply->reply_payload_rcv_len);
Joe Carnuccio697a4bc2011-08-16 11:31:52 -07001685
1686 return 0;
1687}
1688
1689static int
Johannes Thumshirn75cc8cf2016-11-17 10:31:19 +01001690qla2x00_write_i2c(struct bsg_job *bsg_job)
Joe Carnuccio9ebb5d92012-08-22 14:20:56 -04001691{
Johannes Thumshirn01e0e152016-11-17 10:31:12 +01001692 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
Johannes Thumshirncd21c602016-11-17 10:31:14 +01001693 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
Joe Carnuccio9ebb5d92012-08-22 14:20:56 -04001694 scsi_qla_host_t *vha = shost_priv(host);
1695 struct qla_hw_data *ha = vha->hw;
1696 int rval = 0;
1697 uint8_t bsg[DMA_POOL_SIZE];
1698 struct qla_i2c_access *i2c = (void *)bsg;
1699 dma_addr_t sfp_dma;
1700 uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
Bart Van Asschebd432bb2019-04-11 14:53:17 -07001701
Joe Carnuccio9ebb5d92012-08-22 14:20:56 -04001702 if (!sfp) {
Johannes Thumshirn01e0e152016-11-17 10:31:12 +01001703 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
Joe Carnuccio9ebb5d92012-08-22 14:20:56 -04001704 EXT_STATUS_NO_MEMORY;
1705 goto done;
1706 }
1707
1708 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1709 bsg_job->request_payload.sg_cnt, i2c, sizeof(*i2c));
1710
1711 memcpy(sfp, i2c->buffer, i2c->length);
1712 rval = qla2x00_write_sfp(vha, sfp_dma, sfp,
1713 i2c->device, i2c->offset, i2c->length, i2c->option);
1714
1715 if (rval) {
Johannes Thumshirn01e0e152016-11-17 10:31:12 +01001716 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
Joe Carnuccio9ebb5d92012-08-22 14:20:56 -04001717 EXT_STATUS_MAILBOX;
1718 goto dealloc;
1719 }
1720
Johannes Thumshirn01e0e152016-11-17 10:31:12 +01001721 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
Joe Carnuccio9ebb5d92012-08-22 14:20:56 -04001722
1723dealloc:
1724 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1725
1726done:
1727 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
Johannes Thumshirn01e0e152016-11-17 10:31:12 +01001728 bsg_reply->result = DID_OK << 16;
Johannes Thumshirn06548162016-11-17 10:31:22 +01001729 bsg_job_done(bsg_job, bsg_reply->result,
Johannes Thumshirn1abaede2016-11-17 10:31:13 +01001730 bsg_reply->reply_payload_rcv_len);
Joe Carnuccio9ebb5d92012-08-22 14:20:56 -04001731
1732 return 0;
1733}
1734
1735static int
Johannes Thumshirn75cc8cf2016-11-17 10:31:19 +01001736qla2x00_read_i2c(struct bsg_job *bsg_job)
Joe Carnuccio9ebb5d92012-08-22 14:20:56 -04001737{
Johannes Thumshirn01e0e152016-11-17 10:31:12 +01001738 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
Johannes Thumshirncd21c602016-11-17 10:31:14 +01001739 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
Joe Carnuccio9ebb5d92012-08-22 14:20:56 -04001740 scsi_qla_host_t *vha = shost_priv(host);
1741 struct qla_hw_data *ha = vha->hw;
1742 int rval = 0;
1743 uint8_t bsg[DMA_POOL_SIZE];
1744 struct qla_i2c_access *i2c = (void *)bsg;
1745 dma_addr_t sfp_dma;
1746 uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
Bart Van Asschebd432bb2019-04-11 14:53:17 -07001747
Joe Carnuccio9ebb5d92012-08-22 14:20:56 -04001748 if (!sfp) {
Johannes Thumshirn01e0e152016-11-17 10:31:12 +01001749 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
Joe Carnuccio9ebb5d92012-08-22 14:20:56 -04001750 EXT_STATUS_NO_MEMORY;
1751 goto done;
1752 }
1753
1754 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1755 bsg_job->request_payload.sg_cnt, i2c, sizeof(*i2c));
1756
1757 rval = qla2x00_read_sfp(vha, sfp_dma, sfp,
1758 i2c->device, i2c->offset, i2c->length, i2c->option);
1759
1760 if (rval) {
Johannes Thumshirn01e0e152016-11-17 10:31:12 +01001761 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
Joe Carnuccio9ebb5d92012-08-22 14:20:56 -04001762 EXT_STATUS_MAILBOX;
1763 goto dealloc;
1764 }
1765
1766 memcpy(i2c->buffer, sfp, i2c->length);
1767 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
1768 bsg_job->reply_payload.sg_cnt, i2c, sizeof(*i2c));
1769
Johannes Thumshirn01e0e152016-11-17 10:31:12 +01001770 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
Joe Carnuccio9ebb5d92012-08-22 14:20:56 -04001771
1772dealloc:
1773 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1774
1775done:
1776 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
Johannes Thumshirn01e0e152016-11-17 10:31:12 +01001777 bsg_reply->reply_payload_rcv_len = sizeof(*i2c);
1778 bsg_reply->result = DID_OK << 16;
Johannes Thumshirn06548162016-11-17 10:31:22 +01001779 bsg_job_done(bsg_job, bsg_reply->result,
Johannes Thumshirn1abaede2016-11-17 10:31:13 +01001780 bsg_reply->reply_payload_rcv_len);
Joe Carnuccio9ebb5d92012-08-22 14:20:56 -04001781
1782 return 0;
1783}
1784
1785static int
Johannes Thumshirn75cc8cf2016-11-17 10:31:19 +01001786qla24xx_process_bidir_cmd(struct bsg_job *bsg_job)
Saurav Kashyapa9b6f722012-08-22 14:21:01 -04001787{
Johannes Thumshirn01e0e152016-11-17 10:31:12 +01001788 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
Johannes Thumshirncd21c602016-11-17 10:31:14 +01001789 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
Saurav Kashyapa9b6f722012-08-22 14:21:01 -04001790 scsi_qla_host_t *vha = shost_priv(host);
1791 struct qla_hw_data *ha = vha->hw;
Saurav Kashyapa9b6f722012-08-22 14:21:01 -04001792 uint32_t rval = EXT_STATUS_OK;
1793 uint16_t req_sg_cnt = 0;
1794 uint16_t rsp_sg_cnt = 0;
1795 uint16_t nextlid = 0;
1796 uint32_t tot_dsds;
1797 srb_t *sp = NULL;
Bart Van Asschec29282c2019-08-08 20:01:48 -07001798 uint32_t req_data_len;
1799 uint32_t rsp_data_len;
Saurav Kashyapa9b6f722012-08-22 14:21:01 -04001800
1801 /* Check the type of the adapter */
1802 if (!IS_BIDI_CAPABLE(ha)) {
1803 ql_log(ql_log_warn, vha, 0x70a0,
1804 "This adapter is not supported\n");
1805 rval = EXT_STATUS_NOT_SUPPORTED;
1806 goto done;
1807 }
1808
1809 if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
1810 test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
1811 test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
1812 rval = EXT_STATUS_BUSY;
1813 goto done;
1814 }
1815
1816 /* Check if host is online */
1817 if (!vha->flags.online) {
1818 ql_log(ql_log_warn, vha, 0x70a1,
1819 "Host is not online\n");
1820 rval = EXT_STATUS_DEVICE_OFFLINE;
1821 goto done;
1822 }
1823
1824 /* Check if cable is plugged in or not */
1825 if (vha->device_flags & DFLG_NO_CABLE) {
1826 ql_log(ql_log_warn, vha, 0x70a2,
1827 "Cable is unplugged...\n");
1828 rval = EXT_STATUS_INVALID_CFG;
1829 goto done;
1830 }
1831
1832 /* Check if the switch is connected or not */
1833 if (ha->current_topology != ISP_CFG_F) {
1834 ql_log(ql_log_warn, vha, 0x70a3,
1835 "Host is not connected to the switch\n");
1836 rval = EXT_STATUS_INVALID_CFG;
1837 goto done;
1838 }
1839
1840 /* Check if operating mode is P2P */
1841 if (ha->operating_mode != P2P) {
1842 ql_log(ql_log_warn, vha, 0x70a4,
Milan P Gandhi5a68a1c2017-03-31 14:37:04 -07001843 "Host operating mode is not P2p\n");
Saurav Kashyapa9b6f722012-08-22 14:21:01 -04001844 rval = EXT_STATUS_INVALID_CFG;
1845 goto done;
1846 }
1847
Saurav Kashyapa9b6f722012-08-22 14:21:01 -04001848 mutex_lock(&ha->selflogin_lock);
1849 if (vha->self_login_loop_id == 0) {
1850 /* Initialize all required fields of fcport */
1851 vha->bidir_fcport.vha = vha;
1852 vha->bidir_fcport.d_id.b.al_pa = vha->d_id.b.al_pa;
1853 vha->bidir_fcport.d_id.b.area = vha->d_id.b.area;
1854 vha->bidir_fcport.d_id.b.domain = vha->d_id.b.domain;
1855 vha->bidir_fcport.loop_id = vha->loop_id;
1856
1857 if (qla2x00_fabric_login(vha, &(vha->bidir_fcport), &nextlid)) {
1858 ql_log(ql_log_warn, vha, 0x70a7,
1859 "Failed to login port %06X for bidirectional IOCB\n",
1860 vha->bidir_fcport.d_id.b24);
1861 mutex_unlock(&ha->selflogin_lock);
1862 rval = EXT_STATUS_MAILBOX;
1863 goto done;
1864 }
1865 vha->self_login_loop_id = nextlid - 1;
1866
1867 }
1868 /* Assign the self login loop id to fcport */
1869 mutex_unlock(&ha->selflogin_lock);
1870
1871 vha->bidir_fcport.loop_id = vha->self_login_loop_id;
1872
1873 req_sg_cnt = dma_map_sg(&ha->pdev->dev,
1874 bsg_job->request_payload.sg_list,
1875 bsg_job->request_payload.sg_cnt,
1876 DMA_TO_DEVICE);
1877
1878 if (!req_sg_cnt) {
1879 rval = EXT_STATUS_NO_MEMORY;
1880 goto done;
1881 }
1882
1883 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev,
1884 bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt,
1885 DMA_FROM_DEVICE);
1886
1887 if (!rsp_sg_cnt) {
1888 rval = EXT_STATUS_NO_MEMORY;
1889 goto done_unmap_req_sg;
1890 }
1891
1892 if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
1893 (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
1894 ql_dbg(ql_dbg_user, vha, 0x70a9,
1895 "Dma mapping resulted in different sg counts "
1896 "[request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt: "
1897 "%x dma_reply_sg_cnt: %x]\n",
1898 bsg_job->request_payload.sg_cnt, req_sg_cnt,
1899 bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
1900 rval = EXT_STATUS_NO_MEMORY;
1901 goto done_unmap_sg;
1902 }
1903
Bart Van Asschec29282c2019-08-08 20:01:48 -07001904 req_data_len = bsg_job->request_payload.payload_len;
1905 rsp_data_len = bsg_job->reply_payload.payload_len;
1906
Saurav Kashyapa9b6f722012-08-22 14:21:01 -04001907 if (req_data_len != rsp_data_len) {
1908 rval = EXT_STATUS_BUSY;
1909 ql_log(ql_log_warn, vha, 0x70aa,
1910 "req_data_len != rsp_data_len\n");
1911 goto done_unmap_sg;
1912 }
1913
Saurav Kashyapa9b6f722012-08-22 14:21:01 -04001914 /* Alloc SRB structure */
1915 sp = qla2x00_get_sp(vha, &(vha->bidir_fcport), GFP_KERNEL);
1916 if (!sp) {
1917 ql_dbg(ql_dbg_user, vha, 0x70ac,
1918 "Alloc SRB structure failed\n");
1919 rval = EXT_STATUS_NO_MEMORY;
1920 goto done_unmap_sg;
1921 }
1922
1923 /*Populate srb->ctx with bidir ctx*/
1924 sp->u.bsg_job = bsg_job;
1925 sp->free = qla2x00_bsg_sp_free;
1926 sp->type = SRB_BIDI_CMD;
1927 sp->done = qla2x00_bsg_job_done;
1928
1929 /* Add the read and write sg count */
1930 tot_dsds = rsp_sg_cnt + req_sg_cnt;
1931
1932 rval = qla2x00_start_bidir(sp, vha, tot_dsds);
1933 if (rval != EXT_STATUS_OK)
1934 goto done_free_srb;
1935 /* the bsg request will be completed in the interrupt handler */
1936 return rval;
1937
1938done_free_srb:
1939 mempool_free(sp, ha->srb_mempool);
1940done_unmap_sg:
1941 dma_unmap_sg(&ha->pdev->dev,
1942 bsg_job->reply_payload.sg_list,
1943 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1944done_unmap_req_sg:
1945 dma_unmap_sg(&ha->pdev->dev,
1946 bsg_job->request_payload.sg_list,
1947 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1948done:
1949
1950 /* Return an error vendor specific response
1951 * and complete the bsg request
1952 */
Johannes Thumshirn01e0e152016-11-17 10:31:12 +01001953 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = rval;
Saurav Kashyapa9b6f722012-08-22 14:21:01 -04001954 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
Johannes Thumshirn01e0e152016-11-17 10:31:12 +01001955 bsg_reply->reply_payload_rcv_len = 0;
1956 bsg_reply->result = (DID_OK) << 16;
Johannes Thumshirn06548162016-11-17 10:31:22 +01001957 bsg_job_done(bsg_job, bsg_reply->result,
Johannes Thumshirn1abaede2016-11-17 10:31:13 +01001958 bsg_reply->reply_payload_rcv_len);
Joe Perches9e03aa22013-09-03 13:45:58 -07001959 /* Always return success, vendor rsp carries correct status */
Saurav Kashyapa9b6f722012-08-22 14:21:01 -04001960 return 0;
1961}
1962
1963static int
Johannes Thumshirn75cc8cf2016-11-17 10:31:19 +01001964qlafx00_mgmt_cmd(struct bsg_job *bsg_job)
Giridhar Malavali8ae6d9c2013-03-28 08:21:23 -04001965{
Johannes Thumshirn01e0e152016-11-17 10:31:12 +01001966 struct fc_bsg_request *bsg_request = bsg_job->request;
Johannes Thumshirncd21c602016-11-17 10:31:14 +01001967 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
Giridhar Malavali8ae6d9c2013-03-28 08:21:23 -04001968 scsi_qla_host_t *vha = shost_priv(host);
1969 struct qla_hw_data *ha = vha->hw;
Hannes Reinecke66cf50e2019-10-18 16:04:58 +02001970 int rval = (DID_ERROR << 16);
Giridhar Malavali8ae6d9c2013-03-28 08:21:23 -04001971 struct qla_mt_iocb_rqst_fx00 *piocb_rqst;
1972 srb_t *sp;
1973 int req_sg_cnt = 0, rsp_sg_cnt = 0;
1974 struct fc_port *fcport;
1975 char *type = "FC_BSG_HST_FX_MGMT";
1976
1977 /* Copy the IOCB specific information */
1978 piocb_rqst = (struct qla_mt_iocb_rqst_fx00 *)
Johannes Thumshirn01e0e152016-11-17 10:31:12 +01001979 &bsg_request->rqst_data.h_vendor.vendor_cmd[1];
Giridhar Malavali8ae6d9c2013-03-28 08:21:23 -04001980
1981 /* Dump the vendor information */
1982 ql_dump_buffer(ql_dbg_user + ql_dbg_verbose , vha, 0x70cf,
Joe Carnucciof8f97b02019-03-12 11:08:16 -07001983 piocb_rqst, sizeof(*piocb_rqst));
Giridhar Malavali8ae6d9c2013-03-28 08:21:23 -04001984
1985 if (!vha->flags.online) {
1986 ql_log(ql_log_warn, vha, 0x70d0,
1987 "Host is not online.\n");
1988 rval = -EIO;
1989 goto done;
1990 }
1991
1992 if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID) {
1993 req_sg_cnt = dma_map_sg(&ha->pdev->dev,
1994 bsg_job->request_payload.sg_list,
1995 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1996 if (!req_sg_cnt) {
1997 ql_log(ql_log_warn, vha, 0x70c7,
1998 "dma_map_sg return %d for request\n", req_sg_cnt);
1999 rval = -ENOMEM;
2000 goto done;
2001 }
2002 }
2003
2004 if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID) {
2005 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev,
2006 bsg_job->reply_payload.sg_list,
2007 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
2008 if (!rsp_sg_cnt) {
2009 ql_log(ql_log_warn, vha, 0x70c8,
2010 "dma_map_sg return %d for reply\n", rsp_sg_cnt);
2011 rval = -ENOMEM;
2012 goto done_unmap_req_sg;
2013 }
2014 }
2015
2016 ql_dbg(ql_dbg_user, vha, 0x70c9,
2017 "request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt:%x "
2018 "dma_reply_sg_cnt: %x\n", bsg_job->request_payload.sg_cnt,
2019 req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
2020
2021 /* Allocate a dummy fcport structure, since functions preparing the
2022 * IOCB and mailbox command retrieves port specific information
2023 * from fcport structure. For Host based ELS commands there will be
2024 * no fcport structure allocated
2025 */
2026 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
2027 if (!fcport) {
2028 ql_log(ql_log_warn, vha, 0x70ca,
2029 "Failed to allocate fcport.\n");
2030 rval = -ENOMEM;
2031 goto done_unmap_rsp_sg;
2032 }
2033
2034 /* Alloc SRB structure */
2035 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
2036 if (!sp) {
2037 ql_log(ql_log_warn, vha, 0x70cb,
2038 "qla2x00_get_sp failed.\n");
2039 rval = -ENOMEM;
2040 goto done_free_fcport;
2041 }
2042
2043 /* Initialize all required fields of fcport */
2044 fcport->vha = vha;
Bart Van Assche7ffa5b92020-05-18 14:17:12 -07002045 fcport->loop_id = le32_to_cpu(piocb_rqst->dataword);
Giridhar Malavali8ae6d9c2013-03-28 08:21:23 -04002046
2047 sp->type = SRB_FXIOCB_BCMD;
2048 sp->name = "bsg_fx_mgmt";
2049 sp->iocbs = qla24xx_calc_ct_iocbs(req_sg_cnt + rsp_sg_cnt);
2050 sp->u.bsg_job = bsg_job;
2051 sp->free = qla2x00_bsg_sp_free;
2052 sp->done = qla2x00_bsg_job_done;
2053
2054 ql_dbg(ql_dbg_user, vha, 0x70cc,
2055 "bsg rqst type: %s fx_mgmt_type: %x id=%x\n",
2056 type, piocb_rqst->func_type, fcport->loop_id);
2057
2058 rval = qla2x00_start_sp(sp);
2059 if (rval != QLA_SUCCESS) {
2060 ql_log(ql_log_warn, vha, 0x70cd,
2061 "qla2x00_start_sp failed=%d.\n", rval);
2062 mempool_free(sp, ha->srb_mempool);
2063 rval = -EIO;
2064 goto done_free_fcport;
2065 }
2066 return rval;
2067
2068done_free_fcport:
Quinn Tran3dae2202019-12-17 14:06:10 -08002069 qla2x00_free_fcport(fcport);
Giridhar Malavali8ae6d9c2013-03-28 08:21:23 -04002070
2071done_unmap_rsp_sg:
2072 if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID)
2073 dma_unmap_sg(&ha->pdev->dev,
2074 bsg_job->reply_payload.sg_list,
2075 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
2076done_unmap_req_sg:
2077 if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID)
2078 dma_unmap_sg(&ha->pdev->dev,
2079 bsg_job->request_payload.sg_list,
2080 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
2081
2082done:
2083 return rval;
2084}
2085
2086static int
Johannes Thumshirn75cc8cf2016-11-17 10:31:19 +01002087qla26xx_serdes_op(struct bsg_job *bsg_job)
Joe Carnucciodb64e932013-10-30 03:38:18 -04002088{
Johannes Thumshirn01e0e152016-11-17 10:31:12 +01002089 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
Johannes Thumshirncd21c602016-11-17 10:31:14 +01002090 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
Joe Carnucciodb64e932013-10-30 03:38:18 -04002091 scsi_qla_host_t *vha = shost_priv(host);
2092 int rval = 0;
2093 struct qla_serdes_reg sr;
2094
2095 memset(&sr, 0, sizeof(sr));
2096
2097 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
2098 bsg_job->request_payload.sg_cnt, &sr, sizeof(sr));
2099
2100 switch (sr.cmd) {
2101 case INT_SC_SERDES_WRITE_REG:
2102 rval = qla2x00_write_serdes_word(vha, sr.addr, sr.val);
Johannes Thumshirn01e0e152016-11-17 10:31:12 +01002103 bsg_reply->reply_payload_rcv_len = 0;
Joe Carnucciodb64e932013-10-30 03:38:18 -04002104 break;
2105 case INT_SC_SERDES_READ_REG:
2106 rval = qla2x00_read_serdes_word(vha, sr.addr, &sr.val);
2107 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2108 bsg_job->reply_payload.sg_cnt, &sr, sizeof(sr));
Johannes Thumshirn01e0e152016-11-17 10:31:12 +01002109 bsg_reply->reply_payload_rcv_len = sizeof(sr);
Joe Carnucciodb64e932013-10-30 03:38:18 -04002110 break;
2111 default:
Joe Carnuccioe8887c52014-04-11 16:54:17 -04002112 ql_dbg(ql_dbg_user, vha, 0x708c,
Joe Carnucciodb64e932013-10-30 03:38:18 -04002113 "Unknown serdes cmd %x.\n", sr.cmd);
Joe Carnuccioe8887c52014-04-11 16:54:17 -04002114 rval = -EINVAL;
2115 break;
2116 }
2117
Johannes Thumshirn01e0e152016-11-17 10:31:12 +01002118 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
Joe Carnuccioe8887c52014-04-11 16:54:17 -04002119 rval ? EXT_STATUS_MAILBOX : 0;
2120
2121 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
Johannes Thumshirn01e0e152016-11-17 10:31:12 +01002122 bsg_reply->result = DID_OK << 16;
Johannes Thumshirn06548162016-11-17 10:31:22 +01002123 bsg_job_done(bsg_job, bsg_reply->result,
Johannes Thumshirn1abaede2016-11-17 10:31:13 +01002124 bsg_reply->reply_payload_rcv_len);
Joe Carnuccioe8887c52014-04-11 16:54:17 -04002125 return 0;
2126}
2127
2128static int
Johannes Thumshirn75cc8cf2016-11-17 10:31:19 +01002129qla8044_serdes_op(struct bsg_job *bsg_job)
Joe Carnuccioe8887c52014-04-11 16:54:17 -04002130{
Johannes Thumshirn01e0e152016-11-17 10:31:12 +01002131 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
Johannes Thumshirncd21c602016-11-17 10:31:14 +01002132 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
Joe Carnuccioe8887c52014-04-11 16:54:17 -04002133 scsi_qla_host_t *vha = shost_priv(host);
2134 int rval = 0;
2135 struct qla_serdes_reg_ex sr;
2136
2137 memset(&sr, 0, sizeof(sr));
2138
2139 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
2140 bsg_job->request_payload.sg_cnt, &sr, sizeof(sr));
2141
2142 switch (sr.cmd) {
2143 case INT_SC_SERDES_WRITE_REG:
2144 rval = qla8044_write_serdes_word(vha, sr.addr, sr.val);
Johannes Thumshirn01e0e152016-11-17 10:31:12 +01002145 bsg_reply->reply_payload_rcv_len = 0;
Joe Carnuccioe8887c52014-04-11 16:54:17 -04002146 break;
2147 case INT_SC_SERDES_READ_REG:
2148 rval = qla8044_read_serdes_word(vha, sr.addr, &sr.val);
2149 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2150 bsg_job->reply_payload.sg_cnt, &sr, sizeof(sr));
Johannes Thumshirn01e0e152016-11-17 10:31:12 +01002151 bsg_reply->reply_payload_rcv_len = sizeof(sr);
Joe Carnuccioe8887c52014-04-11 16:54:17 -04002152 break;
2153 default:
Quinn Tran83548fe2017-06-02 09:12:01 -07002154 ql_dbg(ql_dbg_user, vha, 0x7020,
Joe Carnuccioe8887c52014-04-11 16:54:17 -04002155 "Unknown serdes cmd %x.\n", sr.cmd);
2156 rval = -EINVAL;
Joe Carnucciodb64e932013-10-30 03:38:18 -04002157 break;
2158 }
2159
Johannes Thumshirn01e0e152016-11-17 10:31:12 +01002160 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
Joe Carnucciodb64e932013-10-30 03:38:18 -04002161 rval ? EXT_STATUS_MAILBOX : 0;
2162
2163 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
Johannes Thumshirn01e0e152016-11-17 10:31:12 +01002164 bsg_reply->result = DID_OK << 16;
Johannes Thumshirn06548162016-11-17 10:31:22 +01002165 bsg_job_done(bsg_job, bsg_reply->result,
Johannes Thumshirn1abaede2016-11-17 10:31:13 +01002166 bsg_reply->reply_payload_rcv_len);
Joe Carnucciodb64e932013-10-30 03:38:18 -04002167 return 0;
2168}
2169
2170static int
Johannes Thumshirn75cc8cf2016-11-17 10:31:19 +01002171qla27xx_get_flash_upd_cap(struct bsg_job *bsg_job)
Sawan Chandak4243c112016-01-27 12:03:31 -05002172{
Johannes Thumshirn01e0e152016-11-17 10:31:12 +01002173 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
Johannes Thumshirncd21c602016-11-17 10:31:14 +01002174 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
Sawan Chandak4243c112016-01-27 12:03:31 -05002175 scsi_qla_host_t *vha = shost_priv(host);
2176 struct qla_hw_data *ha = vha->hw;
2177 struct qla_flash_update_caps cap;
2178
Joe Carnuccioecc89f22019-03-12 11:08:13 -07002179 if (!(IS_QLA27XX(ha)) && !IS_QLA28XX(ha))
Sawan Chandak4243c112016-01-27 12:03:31 -05002180 return -EPERM;
2181
2182 memset(&cap, 0, sizeof(cap));
2183 cap.capabilities = (uint64_t)ha->fw_attributes_ext[1] << 48 |
2184 (uint64_t)ha->fw_attributes_ext[0] << 32 |
2185 (uint64_t)ha->fw_attributes_h << 16 |
2186 (uint64_t)ha->fw_attributes;
2187
2188 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2189 bsg_job->reply_payload.sg_cnt, &cap, sizeof(cap));
Johannes Thumshirn01e0e152016-11-17 10:31:12 +01002190 bsg_reply->reply_payload_rcv_len = sizeof(cap);
Sawan Chandak4243c112016-01-27 12:03:31 -05002191
Johannes Thumshirn01e0e152016-11-17 10:31:12 +01002192 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
Sawan Chandak4243c112016-01-27 12:03:31 -05002193 EXT_STATUS_OK;
2194
2195 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
Johannes Thumshirn01e0e152016-11-17 10:31:12 +01002196 bsg_reply->result = DID_OK << 16;
Johannes Thumshirn06548162016-11-17 10:31:22 +01002197 bsg_job_done(bsg_job, bsg_reply->result,
Johannes Thumshirn1abaede2016-11-17 10:31:13 +01002198 bsg_reply->reply_payload_rcv_len);
Sawan Chandak4243c112016-01-27 12:03:31 -05002199 return 0;
2200}
2201
2202static int
Johannes Thumshirn75cc8cf2016-11-17 10:31:19 +01002203qla27xx_set_flash_upd_cap(struct bsg_job *bsg_job)
Sawan Chandak4243c112016-01-27 12:03:31 -05002204{
Johannes Thumshirn01e0e152016-11-17 10:31:12 +01002205 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
Johannes Thumshirncd21c602016-11-17 10:31:14 +01002206 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
Sawan Chandak4243c112016-01-27 12:03:31 -05002207 scsi_qla_host_t *vha = shost_priv(host);
2208 struct qla_hw_data *ha = vha->hw;
2209 uint64_t online_fw_attr = 0;
2210 struct qla_flash_update_caps cap;
2211
Joe Carnuccioecc89f22019-03-12 11:08:13 -07002212 if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
Sawan Chandak4243c112016-01-27 12:03:31 -05002213 return -EPERM;
2214
2215 memset(&cap, 0, sizeof(cap));
2216 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
2217 bsg_job->request_payload.sg_cnt, &cap, sizeof(cap));
2218
2219 online_fw_attr = (uint64_t)ha->fw_attributes_ext[1] << 48 |
2220 (uint64_t)ha->fw_attributes_ext[0] << 32 |
2221 (uint64_t)ha->fw_attributes_h << 16 |
2222 (uint64_t)ha->fw_attributes;
2223
2224 if (online_fw_attr != cap.capabilities) {
Johannes Thumshirn01e0e152016-11-17 10:31:12 +01002225 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
Sawan Chandak4243c112016-01-27 12:03:31 -05002226 EXT_STATUS_INVALID_PARAM;
2227 return -EINVAL;
2228 }
2229
2230 if (cap.outage_duration < MAX_LOOP_TIMEOUT) {
Johannes Thumshirn01e0e152016-11-17 10:31:12 +01002231 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
Sawan Chandak4243c112016-01-27 12:03:31 -05002232 EXT_STATUS_INVALID_PARAM;
2233 return -EINVAL;
2234 }
2235
Johannes Thumshirn01e0e152016-11-17 10:31:12 +01002236 bsg_reply->reply_payload_rcv_len = 0;
Sawan Chandak4243c112016-01-27 12:03:31 -05002237
Johannes Thumshirn01e0e152016-11-17 10:31:12 +01002238 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
Sawan Chandak4243c112016-01-27 12:03:31 -05002239 EXT_STATUS_OK;
2240
2241 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
Johannes Thumshirn01e0e152016-11-17 10:31:12 +01002242 bsg_reply->result = DID_OK << 16;
Johannes Thumshirn06548162016-11-17 10:31:22 +01002243 bsg_job_done(bsg_job, bsg_reply->result,
Johannes Thumshirn1abaede2016-11-17 10:31:13 +01002244 bsg_reply->reply_payload_rcv_len);
Sawan Chandak4243c112016-01-27 12:03:31 -05002245 return 0;
2246}
2247
2248static int
Johannes Thumshirn75cc8cf2016-11-17 10:31:19 +01002249qla27xx_get_bbcr_data(struct bsg_job *bsg_job)
Sawan Chandak969a6192016-01-27 12:03:32 -05002250{
Johannes Thumshirn01e0e152016-11-17 10:31:12 +01002251 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
Johannes Thumshirncd21c602016-11-17 10:31:14 +01002252 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
Sawan Chandak969a6192016-01-27 12:03:32 -05002253 scsi_qla_host_t *vha = shost_priv(host);
2254 struct qla_hw_data *ha = vha->hw;
2255 struct qla_bbcr_data bbcr;
2256 uint16_t loop_id, topo, sw_cap;
2257 uint8_t domain, area, al_pa, state;
2258 int rval;
2259
Joe Carnuccioecc89f22019-03-12 11:08:13 -07002260 if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
Sawan Chandak969a6192016-01-27 12:03:32 -05002261 return -EPERM;
2262
2263 memset(&bbcr, 0, sizeof(bbcr));
2264
2265 if (vha->flags.bbcr_enable)
2266 bbcr.status = QLA_BBCR_STATUS_ENABLED;
2267 else
2268 bbcr.status = QLA_BBCR_STATUS_DISABLED;
2269
2270 if (bbcr.status == QLA_BBCR_STATUS_ENABLED) {
2271 rval = qla2x00_get_adapter_id(vha, &loop_id, &al_pa,
2272 &area, &domain, &topo, &sw_cap);
Harish Zunjarraoc73191b2016-01-27 12:03:35 -05002273 if (rval != QLA_SUCCESS) {
2274 bbcr.status = QLA_BBCR_STATUS_UNKNOWN;
2275 bbcr.state = QLA_BBCR_STATE_OFFLINE;
2276 bbcr.mbx1 = loop_id;
2277 goto done;
2278 }
Sawan Chandak969a6192016-01-27 12:03:32 -05002279
2280 state = (vha->bbcr >> 12) & 0x1;
2281
2282 if (state) {
2283 bbcr.state = QLA_BBCR_STATE_OFFLINE;
2284 bbcr.offline_reason_code = QLA_BBCR_REASON_LOGIN_REJECT;
2285 } else {
2286 bbcr.state = QLA_BBCR_STATE_ONLINE;
2287 bbcr.negotiated_bbscn = (vha->bbcr >> 8) & 0xf;
2288 }
2289
2290 bbcr.configured_bbscn = vha->bbcr & 0xf;
2291 }
2292
Harish Zunjarraoc73191b2016-01-27 12:03:35 -05002293done:
Sawan Chandak969a6192016-01-27 12:03:32 -05002294 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2295 bsg_job->reply_payload.sg_cnt, &bbcr, sizeof(bbcr));
Johannes Thumshirn01e0e152016-11-17 10:31:12 +01002296 bsg_reply->reply_payload_rcv_len = sizeof(bbcr);
Sawan Chandak969a6192016-01-27 12:03:32 -05002297
Johannes Thumshirn01e0e152016-11-17 10:31:12 +01002298 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK;
Sawan Chandak969a6192016-01-27 12:03:32 -05002299
2300 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
Johannes Thumshirn01e0e152016-11-17 10:31:12 +01002301 bsg_reply->result = DID_OK << 16;
Johannes Thumshirn06548162016-11-17 10:31:22 +01002302 bsg_job_done(bsg_job, bsg_reply->result,
Johannes Thumshirn1abaede2016-11-17 10:31:13 +01002303 bsg_reply->reply_payload_rcv_len);
Sawan Chandak969a6192016-01-27 12:03:32 -05002304 return 0;
2305}
2306
2307static int
Johannes Thumshirn75cc8cf2016-11-17 10:31:19 +01002308qla2x00_get_priv_stats(struct bsg_job *bsg_job)
Harish Zunjarrao243de672016-01-27 12:03:33 -05002309{
Johannes Thumshirn01e0e152016-11-17 10:31:12 +01002310 struct fc_bsg_request *bsg_request = bsg_job->request;
2311 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
Johannes Thumshirncd21c602016-11-17 10:31:14 +01002312 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
Harish Zunjarrao243de672016-01-27 12:03:33 -05002313 scsi_qla_host_t *vha = shost_priv(host);
2314 struct qla_hw_data *ha = vha->hw;
2315 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
2316 struct link_statistics *stats = NULL;
2317 dma_addr_t stats_dma;
Sawan Chandak8437dda2016-07-06 11:14:27 -04002318 int rval;
Johannes Thumshirn01e0e152016-11-17 10:31:12 +01002319 uint32_t *cmd = bsg_request->rqst_data.h_vendor.vendor_cmd;
Sawan Chandak8437dda2016-07-06 11:14:27 -04002320 uint options = cmd[0] == QL_VND_GET_PRIV_STATS_EX ? cmd[1] : 0;
Harish Zunjarrao243de672016-01-27 12:03:33 -05002321
2322 if (test_bit(UNLOADING, &vha->dpc_flags))
Sawan Chandak8437dda2016-07-06 11:14:27 -04002323 return -ENODEV;
Harish Zunjarrao243de672016-01-27 12:03:33 -05002324
2325 if (unlikely(pci_channel_offline(ha->pdev)))
Sawan Chandak8437dda2016-07-06 11:14:27 -04002326 return -ENODEV;
Harish Zunjarrao243de672016-01-27 12:03:33 -05002327
2328 if (qla2x00_reset_active(vha))
Sawan Chandak8437dda2016-07-06 11:14:27 -04002329 return -EBUSY;
Harish Zunjarrao243de672016-01-27 12:03:33 -05002330
2331 if (!IS_FWI2_CAPABLE(ha))
Sawan Chandak8437dda2016-07-06 11:14:27 -04002332 return -EPERM;
Harish Zunjarrao243de672016-01-27 12:03:33 -05002333
Luis Chamberlain750afb02019-01-04 09:23:09 +01002334 stats = dma_alloc_coherent(&ha->pdev->dev, sizeof(*stats), &stats_dma,
2335 GFP_KERNEL);
Harish Zunjarrao243de672016-01-27 12:03:33 -05002336 if (!stats) {
2337 ql_log(ql_log_warn, vha, 0x70e2,
Sawan Chandak8437dda2016-07-06 11:14:27 -04002338 "Failed to allocate memory for stats.\n");
2339 return -ENOMEM;
Harish Zunjarrao243de672016-01-27 12:03:33 -05002340 }
2341
Sawan Chandak8437dda2016-07-06 11:14:27 -04002342 rval = qla24xx_get_isp_stats(base_vha, stats, stats_dma, options);
Harish Zunjarrao243de672016-01-27 12:03:33 -05002343
Sawan Chandak8437dda2016-07-06 11:14:27 -04002344 if (rval == QLA_SUCCESS) {
Joe Carnucciof8f97b02019-03-12 11:08:16 -07002345 ql_dump_buffer(ql_dbg_user + ql_dbg_verbose, vha, 0x70e5,
2346 stats, sizeof(*stats));
Sawan Chandak8437dda2016-07-06 11:14:27 -04002347 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2348 bsg_job->reply_payload.sg_cnt, stats, sizeof(*stats));
2349 }
Harish Zunjarrao243de672016-01-27 12:03:33 -05002350
Johannes Thumshirn01e0e152016-11-17 10:31:12 +01002351 bsg_reply->reply_payload_rcv_len = sizeof(*stats);
2352 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
Sawan Chandak8437dda2016-07-06 11:14:27 -04002353 rval ? EXT_STATUS_MAILBOX : EXT_STATUS_OK;
Harish Zunjarrao243de672016-01-27 12:03:33 -05002354
Johannes Thumshirn01e0e152016-11-17 10:31:12 +01002355 bsg_job->reply_len = sizeof(*bsg_reply);
2356 bsg_reply->result = DID_OK << 16;
Johannes Thumshirn06548162016-11-17 10:31:22 +01002357 bsg_job_done(bsg_job, bsg_reply->result,
Johannes Thumshirn1abaede2016-11-17 10:31:13 +01002358 bsg_reply->reply_payload_rcv_len);
Harish Zunjarrao243de672016-01-27 12:03:33 -05002359
Sawan Chandak8437dda2016-07-06 11:14:27 -04002360 dma_free_coherent(&ha->pdev->dev, sizeof(*stats),
Harish Zunjarrao243de672016-01-27 12:03:33 -05002361 stats, stats_dma);
Sawan Chandak8437dda2016-07-06 11:14:27 -04002362
2363 return 0;
Harish Zunjarrao243de672016-01-27 12:03:33 -05002364}
2365
2366static int
Johannes Thumshirn75cc8cf2016-11-17 10:31:19 +01002367qla2x00_do_dport_diagnostics(struct bsg_job *bsg_job)
Joe Carnuccioec891462016-07-06 11:14:26 -04002368{
Johannes Thumshirn01e0e152016-11-17 10:31:12 +01002369 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
Johannes Thumshirncd21c602016-11-17 10:31:14 +01002370 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
Joe Carnuccioec891462016-07-06 11:14:26 -04002371 scsi_qla_host_t *vha = shost_priv(host);
2372 int rval;
2373 struct qla_dport_diag *dd;
2374
Joe Carnuccioecc89f22019-03-12 11:08:13 -07002375 if (!IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw) &&
2376 !IS_QLA28XX(vha->hw))
Joe Carnuccioec891462016-07-06 11:14:26 -04002377 return -EPERM;
2378
2379 dd = kmalloc(sizeof(*dd), GFP_KERNEL);
2380 if (!dd) {
2381 ql_log(ql_log_warn, vha, 0x70db,
2382 "Failed to allocate memory for dport.\n");
2383 return -ENOMEM;
2384 }
2385
2386 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
2387 bsg_job->request_payload.sg_cnt, dd, sizeof(*dd));
2388
2389 rval = qla26xx_dport_diagnostics(
2390 vha, dd->buf, sizeof(dd->buf), dd->options);
2391 if (rval == QLA_SUCCESS) {
2392 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2393 bsg_job->reply_payload.sg_cnt, dd, sizeof(*dd));
2394 }
2395
Johannes Thumshirn01e0e152016-11-17 10:31:12 +01002396 bsg_reply->reply_payload_rcv_len = sizeof(*dd);
2397 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
Joe Carnuccioec891462016-07-06 11:14:26 -04002398 rval ? EXT_STATUS_MAILBOX : EXT_STATUS_OK;
2399
Johannes Thumshirn01e0e152016-11-17 10:31:12 +01002400 bsg_job->reply_len = sizeof(*bsg_reply);
2401 bsg_reply->result = DID_OK << 16;
Johannes Thumshirn06548162016-11-17 10:31:22 +01002402 bsg_job_done(bsg_job, bsg_reply->result,
Johannes Thumshirn1abaede2016-11-17 10:31:13 +01002403 bsg_reply->reply_payload_rcv_len);
Joe Carnuccioec891462016-07-06 11:14:26 -04002404
2405 kfree(dd);
2406
2407 return 0;
2408}
2409
2410static int
Joe Carnuccio5fa87742019-03-12 11:08:21 -07002411qla2x00_get_flash_image_status(struct bsg_job *bsg_job)
2412{
2413 scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job));
2414 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2415 struct qla_hw_data *ha = vha->hw;
2416 struct qla_active_regions regions = { };
2417 struct active_regions active_regions = { };
2418
Himanshu Madhani4e71dca2019-12-03 14:36:55 -08002419 qla27xx_get_active_image(vha, &active_regions);
Joe Carnuccio5fa87742019-03-12 11:08:21 -07002420 regions.global_image = active_regions.global;
2421
2422 if (IS_QLA28XX(ha)) {
Quinn Tran8d4926d2020-02-12 13:44:31 -08002423 qla28xx_get_aux_images(vha, &active_regions);
Joe Carnuccio5fa87742019-03-12 11:08:21 -07002424 regions.board_config = active_regions.aux.board_config;
2425 regions.vpd_nvram = active_regions.aux.vpd_nvram;
2426 regions.npiv_config_0_1 = active_regions.aux.npiv_config_0_1;
2427 regions.npiv_config_2_3 = active_regions.aux.npiv_config_2_3;
2428 }
2429
2430 ql_dbg(ql_dbg_user, vha, 0x70e1,
2431 "%s(%lu): FW=%u BCFG=%u VPDNVR=%u NPIV01=%u NPIV02=%u\n",
2432 __func__, vha->host_no, regions.global_image,
2433 regions.board_config, regions.vpd_nvram,
2434 regions.npiv_config_0_1, regions.npiv_config_2_3);
2435
2436 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2437 bsg_job->reply_payload.sg_cnt, &regions, sizeof(regions));
2438
2439 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK;
2440 bsg_reply->reply_payload_rcv_len = sizeof(regions);
2441 bsg_reply->result = DID_OK << 16;
2442 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
2443 bsg_job_done(bsg_job, bsg_reply->result,
2444 bsg_reply->reply_payload_rcv_len);
2445
2446 return 0;
2447}
2448
2449static int
Johannes Thumshirn75cc8cf2016-11-17 10:31:19 +01002450qla2x00_process_vendor_specific(struct bsg_job *bsg_job)
Giridhar Malavali6e980162010-03-19 17:03:58 -07002451{
Johannes Thumshirn01e0e152016-11-17 10:31:12 +01002452 struct fc_bsg_request *bsg_request = bsg_job->request;
2453
2454 switch (bsg_request->rqst_data.h_vendor.vendor_cmd[0]) {
Giridhar Malavali6e980162010-03-19 17:03:58 -07002455 case QL_VND_LOOPBACK:
2456 return qla2x00_process_loopback(bsg_job);
2457
2458 case QL_VND_A84_RESET:
2459 return qla84xx_reset(bsg_job);
2460
2461 case QL_VND_A84_UPDATE_FW:
2462 return qla84xx_updatefw(bsg_job);
2463
2464 case QL_VND_A84_MGMT_CMD:
2465 return qla84xx_mgmt_cmd(bsg_job);
2466
2467 case QL_VND_IIDMA:
2468 return qla24xx_iidma(bsg_job);
2469
Sarang Radke09ff7012010-03-19 17:03:59 -07002470 case QL_VND_FCP_PRIO_CFG_CMD:
2471 return qla24xx_proc_fcp_prio_cfg_cmd(bsg_job);
2472
Harish Zunjarraof19af162010-10-15 11:27:43 -07002473 case QL_VND_READ_FLASH:
2474 return qla2x00_read_optrom(bsg_job);
2475
2476 case QL_VND_UPDATE_FLASH:
2477 return qla2x00_update_optrom(bsg_job);
2478
Joe Carnuccio697a4bc2011-08-16 11:31:52 -07002479 case QL_VND_SET_FRU_VERSION:
2480 return qla2x00_update_fru_versions(bsg_job);
2481
2482 case QL_VND_READ_FRU_STATUS:
2483 return qla2x00_read_fru_status(bsg_job);
2484
2485 case QL_VND_WRITE_FRU_STATUS:
2486 return qla2x00_write_fru_status(bsg_job);
2487
Joe Carnuccio9ebb5d92012-08-22 14:20:56 -04002488 case QL_VND_WRITE_I2C:
2489 return qla2x00_write_i2c(bsg_job);
2490
2491 case QL_VND_READ_I2C:
2492 return qla2x00_read_i2c(bsg_job);
2493
Saurav Kashyapa9b6f722012-08-22 14:21:01 -04002494 case QL_VND_DIAG_IO_CMD:
2495 return qla24xx_process_bidir_cmd(bsg_job);
2496
Giridhar Malavali8ae6d9c2013-03-28 08:21:23 -04002497 case QL_VND_FX00_MGMT_CMD:
2498 return qlafx00_mgmt_cmd(bsg_job);
Joe Carnucciodb64e932013-10-30 03:38:18 -04002499
2500 case QL_VND_SERDES_OP:
2501 return qla26xx_serdes_op(bsg_job);
2502
Joe Carnuccioe8887c52014-04-11 16:54:17 -04002503 case QL_VND_SERDES_OP_EX:
2504 return qla8044_serdes_op(bsg_job);
2505
Sawan Chandak4243c112016-01-27 12:03:31 -05002506 case QL_VND_GET_FLASH_UPDATE_CAPS:
2507 return qla27xx_get_flash_upd_cap(bsg_job);
2508
2509 case QL_VND_SET_FLASH_UPDATE_CAPS:
2510 return qla27xx_set_flash_upd_cap(bsg_job);
2511
Sawan Chandak969a6192016-01-27 12:03:32 -05002512 case QL_VND_GET_BBCR_DATA:
2513 return qla27xx_get_bbcr_data(bsg_job);
2514
Harish Zunjarrao243de672016-01-27 12:03:33 -05002515 case QL_VND_GET_PRIV_STATS:
Sawan Chandak8437dda2016-07-06 11:14:27 -04002516 case QL_VND_GET_PRIV_STATS_EX:
Harish Zunjarrao243de672016-01-27 12:03:33 -05002517 return qla2x00_get_priv_stats(bsg_job);
2518
Joe Carnuccioec891462016-07-06 11:14:26 -04002519 case QL_VND_DPORT_DIAGNOSTICS:
2520 return qla2x00_do_dport_diagnostics(bsg_job);
2521
Joe Carnuccio5fa87742019-03-12 11:08:21 -07002522 case QL_VND_SS_GET_FLASH_IMAGE_STATUS:
2523 return qla2x00_get_flash_image_status(bsg_job);
2524
Giridhar Malavali6e980162010-03-19 17:03:58 -07002525 default:
Giridhar Malavali6e980162010-03-19 17:03:58 -07002526 return -ENOSYS;
2527 }
2528}
2529
2530int
Johannes Thumshirn75cc8cf2016-11-17 10:31:19 +01002531qla24xx_bsg_request(struct bsg_job *bsg_job)
Giridhar Malavali6e980162010-03-19 17:03:58 -07002532{
Johannes Thumshirn01e0e152016-11-17 10:31:12 +01002533 struct fc_bsg_request *bsg_request = bsg_job->request;
2534 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
Giridhar Malavali6e980162010-03-19 17:03:58 -07002535 int ret = -EINVAL;
Saurav Kashyap7c3df132011-07-14 12:00:13 -07002536 struct fc_rport *rport;
Saurav Kashyap7c3df132011-07-14 12:00:13 -07002537 struct Scsi_Host *host;
2538 scsi_qla_host_t *vha;
2539
Andrew Vasquezb7bfbe12012-02-09 11:15:44 -08002540 /* In case no data transferred. */
Johannes Thumshirn01e0e152016-11-17 10:31:12 +01002541 bsg_reply->reply_payload_rcv_len = 0;
Andrew Vasquezb7bfbe12012-02-09 11:15:44 -08002542
Johannes Thumshirn01e0e152016-11-17 10:31:12 +01002543 if (bsg_request->msgcode == FC_BSG_RPT_ELS) {
Johannes Thumshirn1d69b122016-11-17 10:31:15 +01002544 rport = fc_bsg_to_rport(bsg_job);
Saurav Kashyap7c3df132011-07-14 12:00:13 -07002545 host = rport_to_shost(rport);
2546 vha = shost_priv(host);
2547 } else {
Johannes Thumshirncd21c602016-11-17 10:31:14 +01002548 host = fc_bsg_to_shost(bsg_job);
Saurav Kashyap7c3df132011-07-14 12:00:13 -07002549 vha = shost_priv(host);
2550 }
2551
Quinn Tran56d942d2018-09-11 10:18:22 -07002552 if (qla2x00_chip_is_down(vha)) {
Andrew Vasquezd051a5aa2012-02-09 11:14:05 -08002553 ql_dbg(ql_dbg_user, vha, 0x709f,
2554 "BSG: ISP abort active/needed -- cmd=%d.\n",
Johannes Thumshirn01e0e152016-11-17 10:31:12 +01002555 bsg_request->msgcode);
Andrew Vasquezd051a5aa2012-02-09 11:14:05 -08002556 return -EBUSY;
2557 }
2558
Saurav Kashyap7c3df132011-07-14 12:00:13 -07002559 ql_dbg(ql_dbg_user, vha, 0x7000,
Johannes Thumshirn01e0e152016-11-17 10:31:12 +01002560 "Entered %s msgcode=0x%x.\n", __func__, bsg_request->msgcode);
Giridhar Malavali6e980162010-03-19 17:03:58 -07002561
Johannes Thumshirn01e0e152016-11-17 10:31:12 +01002562 switch (bsg_request->msgcode) {
Giridhar Malavali6e980162010-03-19 17:03:58 -07002563 case FC_BSG_RPT_ELS:
2564 case FC_BSG_HST_ELS_NOLOGIN:
2565 ret = qla2x00_process_els(bsg_job);
2566 break;
2567 case FC_BSG_HST_CT:
2568 ret = qla2x00_process_ct(bsg_job);
2569 break;
2570 case FC_BSG_HST_VENDOR:
2571 ret = qla2x00_process_vendor_specific(bsg_job);
2572 break;
2573 case FC_BSG_HST_ADD_RPORT:
2574 case FC_BSG_HST_DEL_RPORT:
2575 case FC_BSG_RPT_CT:
2576 default:
Saurav Kashyap7c3df132011-07-14 12:00:13 -07002577 ql_log(ql_log_warn, vha, 0x705a, "Unsupported BSG request.\n");
Giridhar Malavali6e980162010-03-19 17:03:58 -07002578 break;
Andrew Vasquez6c452a42010-03-19 17:04:02 -07002579 }
Giridhar Malavali6e980162010-03-19 17:03:58 -07002580 return ret;
2581}
2582
2583int
Johannes Thumshirn75cc8cf2016-11-17 10:31:19 +01002584qla24xx_bsg_timeout(struct bsg_job *bsg_job)
Giridhar Malavali6e980162010-03-19 17:03:58 -07002585{
Johannes Thumshirn01e0e152016-11-17 10:31:12 +01002586 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
Johannes Thumshirncd21c602016-11-17 10:31:14 +01002587 scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job));
Giridhar Malavali6e980162010-03-19 17:03:58 -07002588 struct qla_hw_data *ha = vha->hw;
2589 srb_t *sp;
2590 int cnt, que;
2591 unsigned long flags;
2592 struct req_que *req;
Giridhar Malavali6e980162010-03-19 17:03:58 -07002593
2594 /* find the bsg job from the active list of commands */
2595 spin_lock_irqsave(&ha->hardware_lock, flags);
2596 for (que = 0; que < ha->max_req_queues; que++) {
2597 req = ha->req_q_map[que];
2598 if (!req)
2599 continue;
2600
Chad Dupuis8d93f552013-01-30 03:34:37 -05002601 for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) {
Giridhar Malavali6e980162010-03-19 17:03:58 -07002602 sp = req->outstanding_cmds[cnt];
Giridhar Malavali6e980162010-03-19 17:03:58 -07002603 if (sp) {
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08002604 if (((sp->type == SRB_CT_CMD) ||
Giridhar Malavali8ae6d9c2013-03-28 08:21:23 -04002605 (sp->type == SRB_ELS_CMD_HST) ||
2606 (sp->type == SRB_FXIOCB_BCMD))
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08002607 && (sp->u.bsg_job == bsg_job)) {
Giridhar Malavali8edf3ed2013-06-25 11:27:17 -04002608 req->outstanding_cmds[cnt] = NULL;
Giridhar Malavali900a36e2010-12-21 16:00:26 -08002609 spin_unlock_irqrestore(&ha->hardware_lock, flags);
Giridhar Malavali6e980162010-03-19 17:03:58 -07002610 if (ha->isp_ops->abort_command(sp)) {
Saurav Kashyap7c3df132011-07-14 12:00:13 -07002611 ql_log(ql_log_warn, vha, 0x7089,
2612 "mbx abort_command "
2613 "failed.\n");
Johannes Thumshirn01e0e152016-11-17 10:31:12 +01002614 bsg_reply->result = -EIO;
Giridhar Malavali6e980162010-03-19 17:03:58 -07002615 } else {
Saurav Kashyap7c3df132011-07-14 12:00:13 -07002616 ql_dbg(ql_dbg_user, vha, 0x708a,
2617 "mbx abort_command "
2618 "success.\n");
Johannes Thumshirn01e0e152016-11-17 10:31:12 +01002619 bsg_reply->result = 0;
Giridhar Malavali6e980162010-03-19 17:03:58 -07002620 }
Giridhar Malavali900a36e2010-12-21 16:00:26 -08002621 spin_lock_irqsave(&ha->hardware_lock, flags);
Giridhar Malavali6e980162010-03-19 17:03:58 -07002622 goto done;
2623 }
2624 }
2625 }
2626 }
2627 spin_unlock_irqrestore(&ha->hardware_lock, flags);
Saurav Kashyap7c3df132011-07-14 12:00:13 -07002628 ql_log(ql_log_info, vha, 0x708b, "SRB not found to abort.\n");
Christoph Hellwig05231a32017-10-03 12:48:40 +02002629 bsg_reply->result = -ENXIO;
Giridhar Malavali6e980162010-03-19 17:03:58 -07002630 return 0;
2631
2632done:
2633 spin_unlock_irqrestore(&ha->hardware_lock, flags);
Joe Carnuccio25ff6af2017-01-19 22:28:04 -08002634 sp->free(sp);
Giridhar Malavali6e980162010-03-19 17:03:58 -07002635 return 0;
2636}