blob: f7e1156ac7ecc8aafc5a2bcfc526062e8bff3efa [file] [log] [blame]
Christoph Hellwig77141dc2019-02-18 11:36:11 +01001// SPDX-License-Identifier: GPL-2.0
Christoph Hellwiga07b4972016-06-21 18:04:20 +02002/*
3 * NVMe admin command implementation.
4 * Copyright (c) 2015-2016 HGST, a Western Digital Company.
Christoph Hellwiga07b4972016-06-21 18:04:20 +02005 */
6#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7#include <linux/module.h>
Ingo Molnarb2d09102017-02-04 01:27:20 +01008#include <linux/rculist.h>
Christoph Hellwigc6a564ff2020-03-25 16:48:42 +01009#include <linux/part_stat.h>
Ingo Molnarb2d09102017-02-04 01:27:20 +010010
Christoph Hellwiga07b4972016-06-21 18:04:20 +020011#include <generated/utsrelease.h>
Chaitanya Kulkarni2d79c7d2016-09-01 20:45:03 +010012#include <asm/unaligned.h>
Christoph Hellwiga07b4972016-06-21 18:04:20 +020013#include "nvmet.h"
14
15u32 nvmet_get_log_page_len(struct nvme_command *cmd)
16{
17 u32 len = le16_to_cpu(cmd->get_log_page.numdu);
18
19 len <<= 16;
20 len += le16_to_cpu(cmd->get_log_page.numdl);
21 /* NUMD is a 0's based value */
22 len += 1;
23 len *= sizeof(u32);
24
25 return len;
26}
27
Amit Engele17016f2020-01-08 01:47:24 +090028static u32 nvmet_feat_data_len(struct nvmet_req *req, u32 cdw10)
29{
30 switch (cdw10 & 0xff) {
31 case NVME_FEAT_HOST_ID:
32 return sizeof(req->sq->ctrl->hostid);
33 default:
34 return 0;
35 }
36}
37
Keith Buschd808b7f2019-04-09 10:03:59 -060038u64 nvmet_get_log_page_offset(struct nvme_command *cmd)
39{
40 return le64_to_cpu(cmd->get_log_page.lpo);
41}
42
Christoph Hellwig8ab08052018-05-22 11:10:03 +020043static void nvmet_execute_get_log_page_noop(struct nvmet_req *req)
44{
Christoph Hellwige9061c32019-10-23 10:35:44 -060045 nvmet_req_complete(req, nvmet_zero_sgl(req, 0, req->transfer_len));
Christoph Hellwig8ab08052018-05-22 11:10:03 +020046}
47
Chaitanya Kulkarni11ad5072018-12-12 15:11:47 -080048static void nvmet_execute_get_log_page_error(struct nvmet_req *req)
49{
50 struct nvmet_ctrl *ctrl = req->sq->ctrl;
Chaitanya Kulkarni11ad5072018-12-12 15:11:47 -080051 unsigned long flags;
52 off_t offset = 0;
53 u64 slot;
54 u64 i;
55
56 spin_lock_irqsave(&ctrl->error_lock, flags);
57 slot = ctrl->err_counter % NVMET_ERROR_LOG_SLOTS;
58
59 for (i = 0; i < NVMET_ERROR_LOG_SLOTS; i++) {
Amit5f8badb2019-09-12 08:29:39 +030060 if (nvmet_copy_to_sgl(req, offset, &ctrl->slots[slot],
61 sizeof(struct nvme_error_slot)))
Chaitanya Kulkarni11ad5072018-12-12 15:11:47 -080062 break;
63
64 if (slot == 0)
65 slot = NVMET_ERROR_LOG_SLOTS - 1;
66 else
67 slot--;
68 offset += sizeof(struct nvme_error_slot);
69 }
70 spin_unlock_irqrestore(&ctrl->error_lock, flags);
Amit5f8badb2019-09-12 08:29:39 +030071 nvmet_req_complete(req, 0);
Chaitanya Kulkarni11ad5072018-12-12 15:11:47 -080072}
73
Chaitanya Kulkarni2d79c7d2016-09-01 20:45:03 +010074static u16 nvmet_get_smart_log_nsid(struct nvmet_req *req,
75 struct nvme_smart_log *slog)
76{
Chaitanya Kulkarni2d79c7d2016-09-01 20:45:03 +010077 u64 host_reads, host_writes, data_units_read, data_units_written;
Chaitanya Kulkarni3a1f7c72021-02-09 21:47:54 -080078 u16 status;
Chaitanya Kulkarni2d79c7d2016-09-01 20:45:03 +010079
Chaitanya Kulkarni3a1f7c72021-02-09 21:47:54 -080080 status = nvmet_req_find_ns(req);
81 if (status)
82 return status;
Chaitanya Kulkarni2d79c7d2016-09-01 20:45:03 +010083
Chaitanya Kulkarnid5eff332018-05-23 00:34:39 -040084 /* we don't have the right data for file backed ns */
Chaitanya Kulkarni624e67f2021-01-13 17:33:52 -080085 if (!req->ns->bdev)
86 return NVME_SC_SUCCESS;
Chaitanya Kulkarnid5eff332018-05-23 00:34:39 -040087
Chaitanya Kulkarni624e67f2021-01-13 17:33:52 -080088 host_reads = part_stat_read(req->ns->bdev, ios[READ]);
Christoph Hellwig8446fe92020-11-24 09:36:54 +010089 data_units_read =
Chaitanya Kulkarni624e67f2021-01-13 17:33:52 -080090 DIV_ROUND_UP(part_stat_read(req->ns->bdev, sectors[READ]), 1000);
91 host_writes = part_stat_read(req->ns->bdev, ios[WRITE]);
Christoph Hellwig8446fe92020-11-24 09:36:54 +010092 data_units_written =
Chaitanya Kulkarni624e67f2021-01-13 17:33:52 -080093 DIV_ROUND_UP(part_stat_read(req->ns->bdev, sectors[WRITE]), 1000);
Chaitanya Kulkarni2d79c7d2016-09-01 20:45:03 +010094
95 put_unaligned_le64(host_reads, &slog->host_reads[0]);
96 put_unaligned_le64(data_units_read, &slog->data_units_read[0]);
97 put_unaligned_le64(host_writes, &slog->host_writes[0]);
98 put_unaligned_le64(data_units_written, &slog->data_units_written[0]);
Sagi Grimberg4185f252017-11-08 12:00:30 +020099
100 return NVME_SC_SUCCESS;
Chaitanya Kulkarni2d79c7d2016-09-01 20:45:03 +0100101}
102
103static u16 nvmet_get_smart_log_all(struct nvmet_req *req,
104 struct nvme_smart_log *slog)
105{
Chaitanya Kulkarni2d79c7d2016-09-01 20:45:03 +0100106 u64 host_reads = 0, host_writes = 0;
107 u64 data_units_read = 0, data_units_written = 0;
108 struct nvmet_ns *ns;
109 struct nvmet_ctrl *ctrl;
Chaitanya Kulkarni7774e772020-07-19 20:32:02 -0700110 unsigned long idx;
Chaitanya Kulkarni2d79c7d2016-09-01 20:45:03 +0100111
Chaitanya Kulkarni2d79c7d2016-09-01 20:45:03 +0100112 ctrl = req->sq->ctrl;
Chaitanya Kulkarni7774e772020-07-19 20:32:02 -0700113 xa_for_each(&ctrl->subsys->namespaces, idx, ns) {
Chaitanya Kulkarnid5eff332018-05-23 00:34:39 -0400114 /* we don't have the right data for file backed ns */
115 if (!ns->bdev)
116 continue;
Christoph Hellwig8446fe92020-11-24 09:36:54 +0100117 host_reads += part_stat_read(ns->bdev, ios[READ]);
Tom Wu3bec2e32019-08-08 02:22:36 +0000118 data_units_read += DIV_ROUND_UP(
Christoph Hellwig8446fe92020-11-24 09:36:54 +0100119 part_stat_read(ns->bdev, sectors[READ]), 1000);
120 host_writes += part_stat_read(ns->bdev, ios[WRITE]);
Tom Wu3bec2e32019-08-08 02:22:36 +0000121 data_units_written += DIV_ROUND_UP(
Christoph Hellwig8446fe92020-11-24 09:36:54 +0100122 part_stat_read(ns->bdev, sectors[WRITE]), 1000);
Chaitanya Kulkarni2d79c7d2016-09-01 20:45:03 +0100123 }
Chaitanya Kulkarni2d79c7d2016-09-01 20:45:03 +0100124
125 put_unaligned_le64(host_reads, &slog->host_reads[0]);
126 put_unaligned_le64(data_units_read, &slog->data_units_read[0]);
127 put_unaligned_le64(host_writes, &slog->host_writes[0]);
128 put_unaligned_le64(data_units_written, &slog->data_units_written[0]);
129
Sagi Grimberg4185f252017-11-08 12:00:30 +0200130 return NVME_SC_SUCCESS;
Chaitanya Kulkarni2d79c7d2016-09-01 20:45:03 +0100131}
132
Christoph Hellwig8ab08052018-05-22 11:10:03 +0200133static void nvmet_execute_get_log_page_smart(struct nvmet_req *req)
Chaitanya Kulkarni2d79c7d2016-09-01 20:45:03 +0100134{
Christoph Hellwig8ab08052018-05-22 11:10:03 +0200135 struct nvme_smart_log *log;
136 u16 status = NVME_SC_INTERNAL;
Chaitanya Kulkarni23454d52018-12-12 15:11:48 -0800137 unsigned long flags;
Chaitanya Kulkarni2d79c7d2016-09-01 20:45:03 +0100138
Christoph Hellwige9061c32019-10-23 10:35:44 -0600139 if (req->transfer_len != sizeof(*log))
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200140 goto out;
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200141
Christoph Hellwig8ab08052018-05-22 11:10:03 +0200142 log = kzalloc(sizeof(*log), GFP_KERNEL);
143 if (!log)
144 goto out;
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200145
Christoph Hellwig8ab08052018-05-22 11:10:03 +0200146 if (req->cmd->get_log_page.nsid == cpu_to_le32(NVME_NSID_ALL))
147 status = nvmet_get_smart_log_all(req, log);
148 else
149 status = nvmet_get_smart_log_nsid(req, log);
150 if (status)
Chaitanya Kulkarnic42d7a32018-06-11 03:20:24 -0400151 goto out_free_log;
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200152
Chaitanya Kulkarni23454d52018-12-12 15:11:48 -0800153 spin_lock_irqsave(&req->sq->ctrl->error_lock, flags);
154 put_unaligned_le64(req->sq->ctrl->err_counter,
155 &log->num_err_log_entries);
156 spin_unlock_irqrestore(&req->sq->ctrl->error_lock, flags);
157
Christoph Hellwig8ab08052018-05-22 11:10:03 +0200158 status = nvmet_copy_to_sgl(req, 0, log, sizeof(*log));
Chaitanya Kulkarnic42d7a32018-06-11 03:20:24 -0400159out_free_log:
160 kfree(log);
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200161out:
162 nvmet_req_complete(req, status);
163}
164
Chaitanya Kulkarniab5d0b32021-06-09 18:32:51 -0700165static void nvmet_get_cmd_effects_nvm(struct nvme_effects_log *log)
Chaitanya Kulkarni0866bf02018-06-11 13:40:07 -0400166{
Christoph Hellwig61f37152022-12-12 15:20:04 +0100167 log->acs[nvme_admin_get_log_page] =
168 log->acs[nvme_admin_identify] =
169 log->acs[nvme_admin_abort_cmd] =
170 log->acs[nvme_admin_set_features] =
171 log->acs[nvme_admin_get_features] =
172 log->acs[nvme_admin_async_event] =
173 log->acs[nvme_admin_keep_alive] =
174 cpu_to_le32(NVME_CMD_EFFECTS_CSUPP);
Chaitanya Kulkarni0866bf02018-06-11 13:40:07 -0400175
Christoph Hellwig61f37152022-12-12 15:20:04 +0100176 log->iocs[nvme_cmd_read] =
Christoph Hellwig61f37152022-12-12 15:20:04 +0100177 log->iocs[nvme_cmd_flush] =
178 log->iocs[nvme_cmd_dsm] =
Christoph Hellwig61f37152022-12-12 15:20:04 +0100179 cpu_to_le32(NVME_CMD_EFFECTS_CSUPP);
Christoph Hellwigf2d14212022-12-12 15:20:56 +0100180 log->iocs[nvme_cmd_write] =
181 log->iocs[nvme_cmd_write_zeroes] =
182 cpu_to_le32(NVME_CMD_EFFECTS_CSUPP | NVME_CMD_EFFECTS_LBCC);
Chaitanya Kulkarniab5d0b32021-06-09 18:32:51 -0700183}
184
Chaitanya Kulkarniaaf2e042021-06-09 18:32:52 -0700185static void nvmet_get_cmd_effects_zns(struct nvme_effects_log *log)
186{
Christoph Hellwig61f37152022-12-12 15:20:04 +0100187 log->iocs[nvme_cmd_zone_append] =
188 log->iocs[nvme_cmd_zone_mgmt_send] =
Christoph Hellwigf2d14212022-12-12 15:20:56 +0100189 cpu_to_le32(NVME_CMD_EFFECTS_CSUPP | NVME_CMD_EFFECTS_LBCC);
Christoph Hellwig61f37152022-12-12 15:20:04 +0100190 log->iocs[nvme_cmd_zone_mgmt_recv] =
191 cpu_to_le32(NVME_CMD_EFFECTS_CSUPP);
Chaitanya Kulkarniaaf2e042021-06-09 18:32:52 -0700192}
193
Chaitanya Kulkarniab5d0b32021-06-09 18:32:51 -0700194static void nvmet_execute_get_log_cmd_effects_ns(struct nvmet_req *req)
195{
196 struct nvme_effects_log *log;
197 u16 status = NVME_SC_SUCCESS;
198
199 log = kzalloc(sizeof(*log), GFP_KERNEL);
200 if (!log) {
201 status = NVME_SC_INTERNAL;
202 goto out;
203 }
204
205 switch (req->cmd->get_log_page.csi) {
206 case NVME_CSI_NVM:
207 nvmet_get_cmd_effects_nvm(log);
208 break;
Chaitanya Kulkarniaaf2e042021-06-09 18:32:52 -0700209 case NVME_CSI_ZNS:
210 if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED)) {
211 status = NVME_SC_INVALID_IO_CMD_SET;
212 goto free;
213 }
214 nvmet_get_cmd_effects_nvm(log);
215 nvmet_get_cmd_effects_zns(log);
216 break;
Chaitanya Kulkarniab5d0b32021-06-09 18:32:51 -0700217 default:
218 status = NVME_SC_INVALID_LOG_PAGE;
219 goto free;
220 }
Chaitanya Kulkarni0866bf02018-06-11 13:40:07 -0400221
222 status = nvmet_copy_to_sgl(req, 0, log, sizeof(*log));
Chaitanya Kulkarniab5d0b32021-06-09 18:32:51 -0700223free:
Chaitanya Kulkarni0866bf02018-06-11 13:40:07 -0400224 kfree(log);
225out:
226 nvmet_req_complete(req, status);
227}
228
Christoph Hellwigc16734e2018-05-25 17:16:09 +0200229static void nvmet_execute_get_log_changed_ns(struct nvmet_req *req)
230{
231 struct nvmet_ctrl *ctrl = req->sq->ctrl;
232 u16 status = NVME_SC_INTERNAL;
233 size_t len;
234
Christoph Hellwige9061c32019-10-23 10:35:44 -0600235 if (req->transfer_len != NVME_MAX_CHANGED_NAMESPACES * sizeof(__le32))
Christoph Hellwigc16734e2018-05-25 17:16:09 +0200236 goto out;
237
238 mutex_lock(&ctrl->lock);
239 if (ctrl->nr_changed_ns == U32_MAX)
240 len = sizeof(__le32);
241 else
242 len = ctrl->nr_changed_ns * sizeof(__le32);
243 status = nvmet_copy_to_sgl(req, 0, ctrl->changed_ns_list, len);
244 if (!status)
Christoph Hellwige9061c32019-10-23 10:35:44 -0600245 status = nvmet_zero_sgl(req, len, req->transfer_len - len);
Christoph Hellwigc16734e2018-05-25 17:16:09 +0200246 ctrl->nr_changed_ns = 0;
Jay Sternberg7114dde2018-11-12 13:56:34 -0800247 nvmet_clear_aen_bit(req, NVME_AEN_BIT_NS_ATTR);
Christoph Hellwigc16734e2018-05-25 17:16:09 +0200248 mutex_unlock(&ctrl->lock);
249out:
250 nvmet_req_complete(req, status);
251}
252
Christoph Hellwig72efd252018-07-19 07:35:20 -0700253static u32 nvmet_format_ana_group(struct nvmet_req *req, u32 grpid,
254 struct nvme_ana_group_desc *desc)
255{
256 struct nvmet_ctrl *ctrl = req->sq->ctrl;
257 struct nvmet_ns *ns;
Chaitanya Kulkarni7774e772020-07-19 20:32:02 -0700258 unsigned long idx;
Christoph Hellwig72efd252018-07-19 07:35:20 -0700259 u32 count = 0;
260
261 if (!(req->cmd->get_log_page.lsp & NVME_ANA_LOG_RGO)) {
Chaitanya Kulkarni7774e772020-07-19 20:32:02 -0700262 xa_for_each(&ctrl->subsys->namespaces, idx, ns)
Christoph Hellwig72efd252018-07-19 07:35:20 -0700263 if (ns->anagrpid == grpid)
264 desc->nsids[count++] = cpu_to_le32(ns->nsid);
Christoph Hellwig72efd252018-07-19 07:35:20 -0700265 }
266
267 desc->grpid = cpu_to_le32(grpid);
268 desc->nnsids = cpu_to_le32(count);
269 desc->chgcnt = cpu_to_le64(nvmet_ana_chgcnt);
270 desc->state = req->port->ana_state[grpid];
271 memset(desc->rsvd17, 0, sizeof(desc->rsvd17));
Len Bakerd156cfc2021-10-24 19:29:21 +0200272 return struct_size(desc, nsids, count);
Christoph Hellwig72efd252018-07-19 07:35:20 -0700273}
274
275static void nvmet_execute_get_log_page_ana(struct nvmet_req *req)
276{
277 struct nvme_ana_rsp_hdr hdr = { 0, };
278 struct nvme_ana_group_desc *desc;
279 size_t offset = sizeof(struct nvme_ana_rsp_hdr); /* start beyond hdr */
280 size_t len;
281 u32 grpid;
282 u16 ngrps = 0;
283 u16 status;
284
285 status = NVME_SC_INTERNAL;
Len Baker117d5b62021-10-17 11:56:50 +0200286 desc = kmalloc(struct_size(desc, nsids, NVMET_MAX_NAMESPACES),
287 GFP_KERNEL);
Christoph Hellwig72efd252018-07-19 07:35:20 -0700288 if (!desc)
289 goto out;
290
291 down_read(&nvmet_ana_sem);
292 for (grpid = 1; grpid <= NVMET_MAX_ANAGRPS; grpid++) {
293 if (!nvmet_ana_group_enabled[grpid])
294 continue;
295 len = nvmet_format_ana_group(req, grpid, desc);
296 status = nvmet_copy_to_sgl(req, offset, desc, len);
297 if (status)
298 break;
299 offset += len;
300 ngrps++;
301 }
Hannes Reineckebe1277f2018-07-16 12:58:33 +0200302 for ( ; grpid <= NVMET_MAX_ANAGRPS; grpid++) {
303 if (nvmet_ana_group_enabled[grpid])
304 ngrps++;
305 }
Christoph Hellwig72efd252018-07-19 07:35:20 -0700306
307 hdr.chgcnt = cpu_to_le64(nvmet_ana_chgcnt);
308 hdr.ngrps = cpu_to_le16(ngrps);
Jay Sternberg7114dde2018-11-12 13:56:34 -0800309 nvmet_clear_aen_bit(req, NVME_AEN_BIT_ANA_CHANGE);
Christoph Hellwig72efd252018-07-19 07:35:20 -0700310 up_read(&nvmet_ana_sem);
311
312 kfree(desc);
313
314 /* copy the header last once we know the number of groups */
315 status = nvmet_copy_to_sgl(req, 0, &hdr, sizeof(hdr));
316out:
317 nvmet_req_complete(req, status);
318}
319
Christoph Hellwig2cb69632019-10-23 10:35:41 -0600320static void nvmet_execute_get_log_page(struct nvmet_req *req)
321{
Israel Rukshin136cc1f2020-05-19 17:05:59 +0300322 if (!nvmet_check_transfer_len(req, nvmet_get_log_page_len(req->cmd)))
Christoph Hellwige9061c32019-10-23 10:35:44 -0600323 return;
324
Christoph Hellwig2cb69632019-10-23 10:35:41 -0600325 switch (req->cmd->get_log_page.lid) {
326 case NVME_LOG_ERROR:
327 return nvmet_execute_get_log_page_error(req);
328 case NVME_LOG_SMART:
329 return nvmet_execute_get_log_page_smart(req);
330 case NVME_LOG_FW_SLOT:
331 /*
332 * We only support a single firmware slot which always is
333 * active, so we can zero out the whole firmware slot log and
334 * still claim to fully implement this mandatory log page.
335 */
336 return nvmet_execute_get_log_page_noop(req);
337 case NVME_LOG_CHANGED_NS:
338 return nvmet_execute_get_log_changed_ns(req);
339 case NVME_LOG_CMD_EFFECTS:
340 return nvmet_execute_get_log_cmd_effects_ns(req);
341 case NVME_LOG_ANA:
342 return nvmet_execute_get_log_page_ana(req);
343 }
Keith Busch4a203422021-04-28 21:25:58 -0700344 pr_debug("unhandled lid %d on qid %d\n",
Christoph Hellwig2cb69632019-10-23 10:35:41 -0600345 req->cmd->get_log_page.lid, req->sq->qid);
346 req->error_loc = offsetof(struct nvme_get_log_page_command, lid);
Weiwen Hudd0b0a42024-06-03 20:57:01 +0800347 nvmet_req_complete(req, NVME_SC_INVALID_FIELD | NVME_STATUS_DNR);
Christoph Hellwig2cb69632019-10-23 10:35:41 -0600348}
349
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200350static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
351{
352 struct nvmet_ctrl *ctrl = req->sq->ctrl;
Max Gurtovoyd9f273b2021-02-17 17:19:40 +0000353 struct nvmet_subsys *subsys = ctrl->subsys;
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200354 struct nvme_id_ctrl *id;
Israel Rukshinea52ac12020-05-19 17:06:01 +0300355 u32 cmd_capsule_size;
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200356 u16 status = 0;
357
Noam Gottlieb7ae023c2021-06-07 12:23:22 +0300358 if (!subsys->subsys_discovered) {
359 mutex_lock(&subsys->lock);
360 subsys->subsys_discovered = true;
361 mutex_unlock(&subsys->lock);
362 }
363
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200364 id = kzalloc(sizeof(*id), GFP_KERNEL);
365 if (!id) {
366 status = NVME_SC_INTERNAL;
367 goto out;
368 }
369
370 /* XXX: figure out how to assign real vendors IDs. */
371 id->vid = 0;
372 id->ssvid = 0;
373
Noam Gottliebe13b0612021-06-07 12:23:21 +0300374 memcpy(id->sn, ctrl->subsys->serial, NVMET_SN_MAX_SIZE);
Max Gurtovoyd9f273b2021-02-17 17:19:40 +0000375 memcpy_and_pad(id->mn, sizeof(id->mn), subsys->model_number,
376 strlen(subsys->model_number), ' ');
Martin Wilck17c39d02017-08-14 22:12:39 +0200377 memcpy_and_pad(id->fr, sizeof(id->fr),
Aleksandr Miloserdov68c54442022-11-15 14:58:10 +0300378 subsys->firmware_rev, strlen(subsys->firmware_rev), ' ');
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200379
Aleksandr Miloserdov23855ab2022-11-15 14:58:09 +0300380 put_unaligned_le24(subsys->ieee_oui, id->ieee);
381
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200382 id->rab = 6;
383
Hannes Reinecked3aef702021-09-22 08:35:22 +0200384 if (nvmet_is_disc_subsys(ctrl->subsys))
385 id->cntrltype = NVME_CTRL_DISC;
386 else
387 id->cntrltype = NVME_CTRL_IO;
388
Christoph Hellwig72efd252018-07-19 07:35:20 -0700389 /* we support multiple ports, multiples hosts and ANA: */
Max Gurtovoyd56ae182021-09-23 13:17:44 +0300390 id->cmic = NVME_CTRL_CMIC_MULTI_PORT | NVME_CTRL_CMIC_MULTI_CTRL |
391 NVME_CTRL_CMIC_ANA;
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200392
Max Gurtovoy02cb00e2020-03-08 12:55:03 +0200393 /* Limit MDTS according to transport capability */
394 if (ctrl->ops->get_mdts)
395 id->mdts = ctrl->ops->get_mdts(ctrl);
396 else
397 id->mdts = 0;
398
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200399 id->cntlid = cpu_to_le16(ctrl->cntlid);
400 id->ver = cpu_to_le32(ctrl->subsys->ver);
401
402 /* XXX: figure out what to do about RTD3R/RTD3 */
Christoph Hellwigc86b8f72018-05-30 15:04:47 +0200403 id->oaes = cpu_to_le32(NVMET_AEN_CFG_OPTIONAL);
Sagi Grimbergc09305a2018-11-02 10:28:13 -0700404 id->ctratt = cpu_to_le32(NVME_CTRL_ATTR_HID_128_BIT |
405 NVME_CTRL_ATTR_TBKAS);
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200406
407 id->oacs = 0;
408
409 /*
410 * We don't really have a practical limit on the number of abort
411 * comands. But we don't do anything useful for abort either, so
412 * no point in allowing more abort commands than the spec requires.
413 */
414 id->acl = 3;
415
416 id->aerl = NVMET_ASYNC_EVENTS - 1;
417
418 /* first slot is read-only, only one slot supported */
419 id->frmw = (1 << 0) | (1 << 1);
Chaitanya Kulkarni0866bf02018-06-11 13:40:07 -0400420 id->lpa = (1 << 0) | (1 << 1) | (1 << 2);
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200421 id->elpe = NVMET_ERROR_LOG_SLOTS - 1;
422 id->npss = 0;
423
424 /* We support keep-alive timeout in granularity of seconds */
425 id->kas = cpu_to_le16(NVMET_KAS);
426
427 id->sqes = (0x6 << 4) | 0x6;
428 id->cqes = (0x4 << 4) | 0x4;
429
430 /* no enforcement soft-limit for maxcmd - pick arbitrary high value */
Max Gurtovoy63e8fd62024-01-23 16:40:27 +0200431 id->maxcmd = cpu_to_le16(NVMET_MAX_CMD(ctrl));
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200432
Chaitanya Kulkarni3c3ee162021-06-20 20:01:09 -0700433 id->nn = cpu_to_le32(NVMET_MAX_NAMESPACES);
Christoph Hellwig793c7cf2018-05-13 19:00:13 +0200434 id->mnan = cpu_to_le32(NVMET_MAX_NAMESPACES);
Chaitanya Kulkarnid2629202016-11-30 12:29:02 -0800435 id->oncs = cpu_to_le16(NVME_CTRL_ONCS_DSM |
436 NVME_CTRL_ONCS_WRITE_ZEROES);
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200437
438 /* XXX: don't report vwc if the underlying device is write through */
439 id->vwc = NVME_CTRL_VWC_PRESENT;
440
441 /*
442 * We can't support atomic writes bigger than a LBA without support
443 * from the backend device.
444 */
445 id->awun = 0;
446 id->awupf = 0;
447
448 id->sgls = cpu_to_le32(1 << 0); /* we always support SGLs */
Max Gurtovoy6fa350f2020-06-02 16:15:46 +0300449 if (ctrl->ops->flags & NVMF_KEYED_SGLS)
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200450 id->sgls |= cpu_to_le32(1 << 2);
Steve Wise0d5ee2b2018-06-20 07:15:10 -0700451 if (req->port->inline_data_size)
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200452 id->sgls |= cpu_to_le32(1 << 20);
453
Wolfram Sanga8817cc2022-08-18 23:00:52 +0200454 strscpy(id->subnqn, ctrl->subsys->subsysnqn, sizeof(id->subnqn));
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200455
Israel Rukshinea52ac12020-05-19 17:06:01 +0300456 /*
457 * Max command capsule size is sqe + in-capsule data size.
458 * Disable in-capsule data for Metadata capable controllers.
459 */
460 cmd_capsule_size = sizeof(struct nvme_command);
461 if (!ctrl->pi_support)
462 cmd_capsule_size += req->port->inline_data_size;
463 id->ioccsz = cpu_to_le32(cmd_capsule_size / 16);
464
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200465 /* Max response capsule size is cqe */
466 id->iorcsz = cpu_to_le32(sizeof(struct nvme_completion) / 16);
467
468 id->msdbd = ctrl->ops->msdbd;
469
Christoph Hellwig72efd252018-07-19 07:35:20 -0700470 id->anacap = (1 << 0) | (1 << 1) | (1 << 2) | (1 << 3) | (1 << 4);
471 id->anatt = 10; /* random value */
472 id->anagrpmax = cpu_to_le32(NVMET_MAX_ANAGRPS);
473 id->nanagrpid = cpu_to_le32(NVMET_MAX_ANAGRPS);
474
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200475 /*
476 * Meh, we don't really support any power state. Fake up the same
477 * values that qemu does.
478 */
479 id->psd[0].max_power = cpu_to_le16(0x9c4);
480 id->psd[0].entry_lat = cpu_to_le32(0x10);
481 id->psd[0].exit_lat = cpu_to_le32(0x4);
482
Chaitanya Kulkarnidedf0be2018-08-07 23:01:07 -0700483 id->nwpc = 1 << 0; /* write protect and no write protect */
484
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200485 status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
486
487 kfree(id);
488out:
489 nvmet_req_complete(req, status);
490}
491
492static void nvmet_execute_identify_ns(struct nvmet_req *req)
493{
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200494 struct nvme_id_ns *id;
Chaitanya Kulkarni3a1f7c72021-02-09 21:47:54 -0800495 u16 status;
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200496
Christoph Hellwigf39ae472018-05-31 18:23:48 +0200497 if (le32_to_cpu(req->cmd->identify.nsid) == NVME_NSID_ALL) {
Chaitanya Kulkarni2da6e002018-12-12 15:11:46 -0800498 req->error_loc = offsetof(struct nvme_identify, nsid);
Weiwen Hudd0b0a42024-06-03 20:57:01 +0800499 status = NVME_SC_INVALID_NS | NVME_STATUS_DNR;
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200500 goto out;
501 }
502
503 id = kzalloc(sizeof(*id), GFP_KERNEL);
504 if (!id) {
505 status = NVME_SC_INTERNAL;
Christoph Hellwigf39ae472018-05-31 18:23:48 +0200506 goto out;
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200507 }
508
Christoph Hellwigf39ae472018-05-31 18:23:48 +0200509 /* return an all zeroed buffer if we can't find an active namespace */
Chaitanya Kulkarni3a1f7c72021-02-09 21:47:54 -0800510 status = nvmet_req_find_ns(req);
511 if (status) {
Chaitanya Kulkarni40244ad2021-02-09 21:47:52 -0800512 status = 0;
Christoph Hellwigf39ae472018-05-31 18:23:48 +0200513 goto done;
Chaitanya Kulkarnibffcd502021-01-13 17:33:51 -0800514 }
Christoph Hellwigf39ae472018-05-31 18:23:48 +0200515
Christoph Hellwigda783732022-03-15 08:13:04 +0100516 if (nvmet_ns_revalidate(req->ns)) {
517 mutex_lock(&req->ns->subsys->lock);
518 nvmet_ns_changed(req->ns->subsys, req->ns->nsid);
519 mutex_unlock(&req->ns->subsys->lock);
520 }
Anthony Iliopoulose8cd1ff2020-04-19 16:48:50 -0700521
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200522 /*
Minwoo Im18c53e42017-11-07 21:10:22 +0900523 * nuse = ncap = nsze isn't always true, but we have no way to find
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200524 * that out from the underlying device.
525 */
Chaitanya Kulkarni3c7b2242021-01-13 17:33:54 -0800526 id->ncap = id->nsze =
527 cpu_to_le64(req->ns->size >> req->ns->blksize_shift);
528 switch (req->port->ana_state[req->ns->anagrpid]) {
Christoph Hellwig72efd252018-07-19 07:35:20 -0700529 case NVME_ANA_INACCESSIBLE:
530 case NVME_ANA_PERSISTENT_LOSS:
531 break;
532 default:
533 id->nuse = id->nsze;
534 break;
Chaitanya Kulkarni75b5f9e2021-02-24 17:56:42 -0800535 }
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200536
Chaitanya Kulkarni3c7b2242021-01-13 17:33:54 -0800537 if (req->ns->bdev)
538 nvmet_bdev_set_limits(req->ns->bdev, id);
Bart Van Assche9d05a962019-06-28 09:53:30 -0700539
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200540 /*
541 * We just provide a single LBA format that matches what the
542 * underlying device reports.
543 */
544 id->nlbaf = 0;
545 id->flbas = 0;
546
547 /*
548 * Our namespace might always be shared. Not just with other
549 * controllers, but also with any other user of the block device.
550 */
Max Gurtovoy571b5442021-09-23 13:17:43 +0300551 id->nmic = NVME_NS_NMIC_SHARED;
Chaitanya Kulkarni3c7b2242021-01-13 17:33:54 -0800552 id->anagrpid = cpu_to_le32(req->ns->anagrpid);
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200553
Chaitanya Kulkarni3c7b2242021-01-13 17:33:54 -0800554 memcpy(&id->nguid, &req->ns->nguid, sizeof(id->nguid));
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200555
Chaitanya Kulkarni3c7b2242021-01-13 17:33:54 -0800556 id->lbaf[0].ds = req->ns->blksize_shift;
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200557
Chaitanya Kulkarni39994342021-02-09 21:47:55 -0800558 if (req->sq->ctrl->pi_support && nvmet_ns_has_pi(req->ns)) {
Israel Rukshinea52ac12020-05-19 17:06:01 +0300559 id->dpc = NVME_NS_DPC_PI_FIRST | NVME_NS_DPC_PI_LAST |
560 NVME_NS_DPC_PI_TYPE1 | NVME_NS_DPC_PI_TYPE2 |
561 NVME_NS_DPC_PI_TYPE3;
562 id->mc = NVME_MC_EXTENDED_LBA;
Chaitanya Kulkarni3c7b2242021-01-13 17:33:54 -0800563 id->dps = req->ns->pi_type;
Israel Rukshinea52ac12020-05-19 17:06:01 +0300564 id->flbas = NVME_NS_FLBAS_META_EXT;
Chaitanya Kulkarni3c7b2242021-01-13 17:33:54 -0800565 id->lbaf[0].ms = cpu_to_le16(req->ns->metadata_size);
Israel Rukshinea52ac12020-05-19 17:06:01 +0300566 }
567
Chaitanya Kulkarni3c7b2242021-01-13 17:33:54 -0800568 if (req->ns->readonly)
Sagi Grimberg19b00e02022-12-07 13:28:23 +0200569 id->nsattr |= NVME_NS_ATTR_RO;
Christoph Hellwigf39ae472018-05-31 18:23:48 +0200570done:
Chaitanya Kulkarnibffcd502021-01-13 17:33:51 -0800571 if (!status)
572 status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
573
Christoph Hellwigf39ae472018-05-31 18:23:48 +0200574 kfree(id);
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200575out:
576 nvmet_req_complete(req, status);
577}
578
579static void nvmet_execute_identify_nslist(struct nvmet_req *req)
580{
Johannes Thumshirn0add5e82017-06-07 11:45:29 +0200581 static const int buf_size = NVME_IDENTIFY_DATA_SIZE;
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200582 struct nvmet_ctrl *ctrl = req->sq->ctrl;
583 struct nvmet_ns *ns;
Chaitanya Kulkarni7774e772020-07-19 20:32:02 -0700584 unsigned long idx;
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200585 u32 min_nsid = le32_to_cpu(req->cmd->identify.nsid);
586 __le32 *list;
587 u16 status = 0;
588 int i = 0;
589
590 list = kzalloc(buf_size, GFP_KERNEL);
591 if (!list) {
592 status = NVME_SC_INTERNAL;
593 goto out;
594 }
595
Chaitanya Kulkarni7774e772020-07-19 20:32:02 -0700596 xa_for_each(&ctrl->subsys->namespaces, idx, ns) {
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200597 if (ns->nsid <= min_nsid)
598 continue;
599 list[i++] = cpu_to_le32(ns->nsid);
600 if (i == buf_size / sizeof(__le32))
601 break;
602 }
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200603
604 status = nvmet_copy_to_sgl(req, 0, list, buf_size);
605
606 kfree(list);
607out:
608 nvmet_req_complete(req, status);
609}
610
Johannes Thumshirn637dc0f2017-06-07 11:45:32 +0200611static u16 nvmet_copy_ns_identifier(struct nvmet_req *req, u8 type, u8 len,
612 void *id, off_t *off)
613{
614 struct nvme_ns_id_desc desc = {
615 .nidt = type,
616 .nidl = len,
617 };
618 u16 status;
619
620 status = nvmet_copy_to_sgl(req, *off, &desc, sizeof(desc));
621 if (status)
622 return status;
623 *off += sizeof(desc);
624
625 status = nvmet_copy_to_sgl(req, *off, id, len);
626 if (status)
627 return status;
628 *off += len;
629
630 return 0;
631}
632
633static void nvmet_execute_identify_desclist(struct nvmet_req *req)
634{
Johannes Thumshirn637dc0f2017-06-07 11:45:32 +0200635 off_t off = 0;
Chaitanya Kulkarni3a1f7c72021-02-09 21:47:54 -0800636 u16 status;
Johannes Thumshirn637dc0f2017-06-07 11:45:32 +0200637
Chaitanya Kulkarni3a1f7c72021-02-09 21:47:54 -0800638 status = nvmet_req_find_ns(req);
639 if (status)
Johannes Thumshirn637dc0f2017-06-07 11:45:32 +0200640 goto out;
Johannes Thumshirn637dc0f2017-06-07 11:45:32 +0200641
Chaitanya Kulkarni3631c7f2021-01-13 17:33:53 -0800642 if (memchr_inv(&req->ns->uuid, 0, sizeof(req->ns->uuid))) {
Johannes Thumshirn637dc0f2017-06-07 11:45:32 +0200643 status = nvmet_copy_ns_identifier(req, NVME_NIDT_UUID,
644 NVME_NIDT_UUID_LEN,
Chaitanya Kulkarni3631c7f2021-01-13 17:33:53 -0800645 &req->ns->uuid, &off);
Johannes Thumshirn637dc0f2017-06-07 11:45:32 +0200646 if (status)
Chaitanya Kulkarni3631c7f2021-01-13 17:33:53 -0800647 goto out;
Johannes Thumshirn637dc0f2017-06-07 11:45:32 +0200648 }
Chaitanya Kulkarni3631c7f2021-01-13 17:33:53 -0800649 if (memchr_inv(req->ns->nguid, 0, sizeof(req->ns->nguid))) {
Johannes Thumshirn637dc0f2017-06-07 11:45:32 +0200650 status = nvmet_copy_ns_identifier(req, NVME_NIDT_NGUID,
651 NVME_NIDT_NGUID_LEN,
Chaitanya Kulkarni3631c7f2021-01-13 17:33:53 -0800652 &req->ns->nguid, &off);
Johannes Thumshirn637dc0f2017-06-07 11:45:32 +0200653 if (status)
Chaitanya Kulkarni3631c7f2021-01-13 17:33:53 -0800654 goto out;
Johannes Thumshirn637dc0f2017-06-07 11:45:32 +0200655 }
656
Chaitanya Kulkarniab5d0b32021-06-09 18:32:51 -0700657 status = nvmet_copy_ns_identifier(req, NVME_NIDT_CSI,
658 NVME_NIDT_CSI_LEN,
659 &req->ns->csi, &off);
660 if (status)
661 goto out;
662
Johannes Thumshirn637dc0f2017-06-07 11:45:32 +0200663 if (sg_zero_buffer(req->sg, req->sg_cnt, NVME_IDENTIFY_DATA_SIZE - off,
664 off) != NVME_IDENTIFY_DATA_SIZE - off)
Weiwen Hudd0b0a42024-06-03 20:57:01 +0800665 status = NVME_SC_INTERNAL | NVME_STATUS_DNR;
Chaitanya Kulkarni3631c7f2021-01-13 17:33:53 -0800666
Johannes Thumshirn637dc0f2017-06-07 11:45:32 +0200667out:
668 nvmet_req_complete(req, status);
669}
670
Damien Le Moala5a6ab02023-03-15 19:59:38 +0900671static void nvmet_execute_identify_ctrl_nvm(struct nvmet_req *req)
672{
673 /* Not supported: return zeroes */
674 nvmet_req_complete(req,
675 nvmet_zero_sgl(req, 0, sizeof(struct nvme_id_ctrl_nvm)));
676}
677
Christoph Hellwig2cb69632019-10-23 10:35:41 -0600678static void nvmet_execute_identify(struct nvmet_req *req)
679{
Israel Rukshin136cc1f2020-05-19 17:05:59 +0300680 if (!nvmet_check_transfer_len(req, NVME_IDENTIFY_DATA_SIZE))
Christoph Hellwige9061c32019-10-23 10:35:44 -0600681 return;
682
Christoph Hellwig2cb69632019-10-23 10:35:41 -0600683 switch (req->cmd->identify.cns) {
684 case NVME_ID_CNS_NS:
Damien Le Moal8c098aa2023-03-15 19:59:35 +0900685 nvmet_execute_identify_ns(req);
686 return;
Christoph Hellwig2cb69632019-10-23 10:35:41 -0600687 case NVME_ID_CNS_CTRL:
Damien Le Moal62904b32023-03-15 19:59:36 +0900688 nvmet_execute_identify_ctrl(req);
689 return;
Damien Le Moal145f0db2023-03-15 19:59:39 +0900690 case NVME_ID_CNS_NS_ACTIVE_LIST:
691 nvmet_execute_identify_nslist(req);
692 return;
693 case NVME_ID_CNS_NS_DESC_LIST:
Christoph Hellwig2f17f422023-03-15 15:13:35 +0100694 nvmet_execute_identify_desclist(req);
Christoph Hellwig93263532023-03-15 15:14:31 +0100695 return;
Damien Le Moal145f0db2023-03-15 19:59:39 +0900696 case NVME_ID_CNS_CS_NS:
697 switch (req->cmd->identify.csi) {
698 case NVME_CSI_NVM:
699 /* Not supported */
700 break;
701 case NVME_CSI_ZNS:
702 if (IS_ENABLED(CONFIG_BLK_DEV_ZONED)) {
Christoph Hellwig93263532023-03-15 15:14:31 +0100703 nvmet_execute_identify_ns_zns(req);
Damien Le Moal145f0db2023-03-15 19:59:39 +0900704 return;
705 }
706 break;
707 }
708 break;
Chaitanya Kulkarniaaf2e042021-06-09 18:32:52 -0700709 case NVME_ID_CNS_CS_CTRL:
Damien Le Moala5a6ab02023-03-15 19:59:38 +0900710 switch (req->cmd->identify.csi) {
711 case NVME_CSI_NVM:
712 nvmet_execute_identify_ctrl_nvm(req);
713 return;
714 case NVME_CSI_ZNS:
715 if (IS_ENABLED(CONFIG_BLK_DEV_ZONED)) {
716 nvmet_execute_identify_ctrl_zns(req);
717 return;
Chaitanya Kulkarniaaf2e042021-06-09 18:32:52 -0700718 }
Damien Le Moala5a6ab02023-03-15 19:59:38 +0900719 break;
Chaitanya Kulkarniaaf2e042021-06-09 18:32:52 -0700720 }
721 break;
Christoph Hellwig2cb69632019-10-23 10:35:41 -0600722 }
723
Christoph Hellwigc5a9abf2023-03-22 09:08:59 +0100724 pr_debug("unhandled identify cns %d on qid %d\n",
725 req->cmd->identify.cns, req->sq->qid);
726 req->error_loc = offsetof(struct nvme_identify, cns);
Weiwen Hudd0b0a42024-06-03 20:57:01 +0800727 nvmet_req_complete(req, NVME_SC_INVALID_FIELD | NVME_STATUS_DNR);
Christoph Hellwig2cb69632019-10-23 10:35:41 -0600728}
729
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200730/*
Minwoo Im18c53e42017-11-07 21:10:22 +0900731 * A "minimum viable" abort implementation: the command is mandatory in the
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200732 * spec, but we are not required to do any useful work. We couldn't really
733 * do a useful abort, so don't bother even with waiting for the command
734 * to be exectuted and return immediately telling the command to abort
735 * wasn't found.
736 */
737static void nvmet_execute_abort(struct nvmet_req *req)
738{
Israel Rukshin136cc1f2020-05-19 17:05:59 +0300739 if (!nvmet_check_transfer_len(req, 0))
Christoph Hellwige9061c32019-10-23 10:35:44 -0600740 return;
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200741 nvmet_set_result(req, 1);
742 nvmet_req_complete(req, 0);
743}
744
Chaitanya Kulkarnidedf0be2018-08-07 23:01:07 -0700745static u16 nvmet_write_protect_flush_sync(struct nvmet_req *req)
746{
747 u16 status;
748
749 if (req->ns->file)
750 status = nvmet_file_flush(req);
751 else
752 status = nvmet_bdev_flush(req);
753
754 if (status)
755 pr_err("write protect flush failed nsid: %u\n", req->ns->nsid);
756 return status;
757}
758
759static u16 nvmet_set_feat_write_protect(struct nvmet_req *req)
760{
Chaitanya Kulkarnib7c8f362018-12-12 15:11:37 -0800761 u32 write_protect = le32_to_cpu(req->cmd->common.cdw11);
Chaitanya Kulkarni20c2c3b2021-02-09 21:48:01 -0800762 struct nvmet_subsys *subsys = nvmet_req_subsys(req);
Chaitanya Kulkarni3a1f7c72021-02-09 21:47:54 -0800763 u16 status;
Chaitanya Kulkarnidedf0be2018-08-07 23:01:07 -0700764
Chaitanya Kulkarni3a1f7c72021-02-09 21:47:54 -0800765 status = nvmet_req_find_ns(req);
766 if (status)
767 return status;
Chaitanya Kulkarnidedf0be2018-08-07 23:01:07 -0700768
769 mutex_lock(&subsys->lock);
770 switch (write_protect) {
771 case NVME_NS_WRITE_PROTECT:
772 req->ns->readonly = true;
773 status = nvmet_write_protect_flush_sync(req);
774 if (status)
775 req->ns->readonly = false;
776 break;
777 case NVME_NS_NO_WRITE_PROTECT:
778 req->ns->readonly = false;
779 status = 0;
780 break;
781 default:
782 break;
783 }
784
785 if (!status)
786 nvmet_ns_changed(subsys, req->ns->nsid);
787 mutex_unlock(&subsys->lock);
788 return status;
789}
790
Jay Sternberg90107452018-11-12 13:56:36 -0800791u16 nvmet_set_feat_kato(struct nvmet_req *req)
792{
Chaitanya Kulkarnib7c8f362018-12-12 15:11:37 -0800793 u32 val32 = le32_to_cpu(req->cmd->common.cdw11);
Jay Sternberg90107452018-11-12 13:56:36 -0800794
Amit Engel4e683c42020-09-16 20:47:20 +0300795 nvmet_stop_keep_alive_timer(req->sq->ctrl);
Jay Sternberg90107452018-11-12 13:56:36 -0800796 req->sq->ctrl->kato = DIV_ROUND_UP(val32, 1000);
Amit Engel4e683c42020-09-16 20:47:20 +0300797 nvmet_start_keep_alive_timer(req->sq->ctrl);
Jay Sternberg90107452018-11-12 13:56:36 -0800798
799 nvmet_set_result(req, req->sq->ctrl->kato);
800
801 return 0;
802}
803
804u16 nvmet_set_feat_async_event(struct nvmet_req *req, u32 mask)
805{
Chaitanya Kulkarnib7c8f362018-12-12 15:11:37 -0800806 u32 val32 = le32_to_cpu(req->cmd->common.cdw11);
Jay Sternberg90107452018-11-12 13:56:36 -0800807
Chaitanya Kulkarni2da6e002018-12-12 15:11:46 -0800808 if (val32 & ~mask) {
809 req->error_loc = offsetof(struct nvme_common_command, cdw11);
Weiwen Hudd0b0a42024-06-03 20:57:01 +0800810 return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
Chaitanya Kulkarni2da6e002018-12-12 15:11:46 -0800811 }
Jay Sternberg90107452018-11-12 13:56:36 -0800812
813 WRITE_ONCE(req->sq->ctrl->aen_enabled, val32);
814 nvmet_set_result(req, val32);
815
816 return 0;
817}
818
Logan Gunthorpec1fef73f2020-07-24 11:25:17 -0600819void nvmet_execute_set_features(struct nvmet_req *req)
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200820{
Chaitanya Kulkarni20c2c3b2021-02-09 21:48:01 -0800821 struct nvmet_subsys *subsys = nvmet_req_subsys(req);
Chaitanya Kulkarnib7c8f362018-12-12 15:11:37 -0800822 u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
Amit Engel6d525f92020-02-29 16:28:41 -0800823 u32 cdw11 = le32_to_cpu(req->cmd->common.cdw11);
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200824 u16 status = 0;
Amit Engel6d525f92020-02-29 16:28:41 -0800825 u16 nsqr;
826 u16 ncqr;
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200827
Amit Engelddf917172023-01-04 10:44:32 +0200828 if (!nvmet_check_data_len_lte(req, 0))
Christoph Hellwige9061c32019-10-23 10:35:44 -0600829 return;
830
Omri Mann28dd5cf2017-08-30 15:22:59 +0300831 switch (cdw10 & 0xff) {
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200832 case NVME_FEAT_NUM_QUEUES:
Amit Engel6d525f92020-02-29 16:28:41 -0800833 ncqr = (cdw11 >> 16) & 0xffff;
834 nsqr = cdw11 & 0xffff;
835 if (ncqr == 0xffff || nsqr == 0xffff) {
Weiwen Hudd0b0a42024-06-03 20:57:01 +0800836 status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
Amit Engel6d525f92020-02-29 16:28:41 -0800837 break;
838 }
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200839 nvmet_set_result(req,
840 (subsys->max_qid - 1) | ((subsys->max_qid - 1) << 16));
841 break;
842 case NVME_FEAT_KATO:
Jay Sternberg90107452018-11-12 13:56:36 -0800843 status = nvmet_set_feat_kato(req);
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200844 break;
Christoph Hellwigc86b8f72018-05-30 15:04:47 +0200845 case NVME_FEAT_ASYNC_EVENT:
Jay Sternberg90107452018-11-12 13:56:36 -0800846 status = nvmet_set_feat_async_event(req, NVMET_AEN_CFG_ALL);
Christoph Hellwigc86b8f72018-05-30 15:04:47 +0200847 break;
Omri Mann28dd5cf2017-08-30 15:22:59 +0300848 case NVME_FEAT_HOST_ID:
Weiwen Hudd0b0a42024-06-03 20:57:01 +0800849 status = NVME_SC_CMD_SEQ_ERROR | NVME_STATUS_DNR;
Omri Mann28dd5cf2017-08-30 15:22:59 +0300850 break;
Chaitanya Kulkarnidedf0be2018-08-07 23:01:07 -0700851 case NVME_FEAT_WRITE_PROTECT:
852 status = nvmet_set_feat_write_protect(req);
853 break;
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200854 default:
Chaitanya Kulkarni2da6e002018-12-12 15:11:46 -0800855 req->error_loc = offsetof(struct nvme_common_command, cdw10);
Weiwen Hudd0b0a42024-06-03 20:57:01 +0800856 status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200857 break;
858 }
859
860 nvmet_req_complete(req, status);
861}
862
Chaitanya Kulkarnidedf0be2018-08-07 23:01:07 -0700863static u16 nvmet_get_feat_write_protect(struct nvmet_req *req)
864{
Chaitanya Kulkarni20c2c3b2021-02-09 21:48:01 -0800865 struct nvmet_subsys *subsys = nvmet_req_subsys(req);
Chaitanya Kulkarnidedf0be2018-08-07 23:01:07 -0700866 u32 result;
867
Chaitanya Kulkarni3a1f7c72021-02-09 21:47:54 -0800868 result = nvmet_req_find_ns(req);
869 if (result)
870 return result;
871
Chaitanya Kulkarnidedf0be2018-08-07 23:01:07 -0700872 mutex_lock(&subsys->lock);
873 if (req->ns->readonly == true)
874 result = NVME_NS_WRITE_PROTECT;
875 else
876 result = NVME_NS_NO_WRITE_PROTECT;
877 nvmet_set_result(req, result);
878 mutex_unlock(&subsys->lock);
879
880 return 0;
881}
882
Jay Sternberg90107452018-11-12 13:56:36 -0800883void nvmet_get_feat_kato(struct nvmet_req *req)
884{
885 nvmet_set_result(req, req->sq->ctrl->kato * 1000);
886}
887
888void nvmet_get_feat_async_event(struct nvmet_req *req)
889{
890 nvmet_set_result(req, READ_ONCE(req->sq->ctrl->aen_enabled));
891}
892
Logan Gunthorpec1fef73f2020-07-24 11:25:17 -0600893void nvmet_execute_get_features(struct nvmet_req *req)
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200894{
Chaitanya Kulkarni20c2c3b2021-02-09 21:48:01 -0800895 struct nvmet_subsys *subsys = nvmet_req_subsys(req);
Chaitanya Kulkarnib7c8f362018-12-12 15:11:37 -0800896 u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200897 u16 status = 0;
898
Israel Rukshin136cc1f2020-05-19 17:05:59 +0300899 if (!nvmet_check_transfer_len(req, nvmet_feat_data_len(req, cdw10)))
Christoph Hellwige9061c32019-10-23 10:35:44 -0600900 return;
901
Omri Mann28dd5cf2017-08-30 15:22:59 +0300902 switch (cdw10 & 0xff) {
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200903 /*
904 * These features are mandatory in the spec, but we don't
905 * have a useful way to implement them. We'll eventually
906 * need to come up with some fake values for these.
907 */
908#if 0
909 case NVME_FEAT_ARBITRATION:
910 break;
911 case NVME_FEAT_POWER_MGMT:
912 break;
913 case NVME_FEAT_TEMP_THRESH:
914 break;
915 case NVME_FEAT_ERR_RECOVERY:
916 break;
917 case NVME_FEAT_IRQ_COALESCE:
918 break;
919 case NVME_FEAT_IRQ_CONFIG:
920 break;
921 case NVME_FEAT_WRITE_ATOMIC:
922 break;
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200923#endif
Christoph Hellwigc86b8f72018-05-30 15:04:47 +0200924 case NVME_FEAT_ASYNC_EVENT:
Jay Sternberg90107452018-11-12 13:56:36 -0800925 nvmet_get_feat_async_event(req);
Christoph Hellwigc86b8f72018-05-30 15:04:47 +0200926 break;
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200927 case NVME_FEAT_VOLATILE_WC:
928 nvmet_set_result(req, 1);
929 break;
930 case NVME_FEAT_NUM_QUEUES:
931 nvmet_set_result(req,
932 (subsys->max_qid-1) | ((subsys->max_qid-1) << 16));
933 break;
934 case NVME_FEAT_KATO:
Jay Sternberg90107452018-11-12 13:56:36 -0800935 nvmet_get_feat_kato(req);
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200936 break;
Omri Mann28dd5cf2017-08-30 15:22:59 +0300937 case NVME_FEAT_HOST_ID:
938 /* need 128-bit host identifier flag */
Chaitanya Kulkarnib7c8f362018-12-12 15:11:37 -0800939 if (!(req->cmd->common.cdw11 & cpu_to_le32(1 << 0))) {
Chaitanya Kulkarni2da6e002018-12-12 15:11:46 -0800940 req->error_loc =
941 offsetof(struct nvme_common_command, cdw11);
Weiwen Hudd0b0a42024-06-03 20:57:01 +0800942 status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
Omri Mann28dd5cf2017-08-30 15:22:59 +0300943 break;
944 }
945
946 status = nvmet_copy_to_sgl(req, 0, &req->sq->ctrl->hostid,
947 sizeof(req->sq->ctrl->hostid));
948 break;
Chaitanya Kulkarnidedf0be2018-08-07 23:01:07 -0700949 case NVME_FEAT_WRITE_PROTECT:
950 status = nvmet_get_feat_write_protect(req);
951 break;
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200952 default:
Chaitanya Kulkarni2da6e002018-12-12 15:11:46 -0800953 req->error_loc =
954 offsetof(struct nvme_common_command, cdw10);
Weiwen Hudd0b0a42024-06-03 20:57:01 +0800955 status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200956 break;
957 }
958
959 nvmet_req_complete(req, status);
960}
961
Jay Sternberg90107452018-11-12 13:56:36 -0800962void nvmet_execute_async_event(struct nvmet_req *req)
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200963{
964 struct nvmet_ctrl *ctrl = req->sq->ctrl;
965
Israel Rukshin136cc1f2020-05-19 17:05:59 +0300966 if (!nvmet_check_transfer_len(req, 0))
Christoph Hellwige9061c32019-10-23 10:35:44 -0600967 return;
968
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200969 mutex_lock(&ctrl->lock);
970 if (ctrl->nr_async_event_cmds >= NVMET_ASYNC_EVENTS) {
971 mutex_unlock(&ctrl->lock);
Weiwen Hudd0b0a42024-06-03 20:57:01 +0800972 nvmet_req_complete(req, NVME_SC_ASYNC_LIMIT | NVME_STATUS_DNR);
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200973 return;
974 }
975 ctrl->async_event_cmds[ctrl->nr_async_event_cmds++] = req;
976 mutex_unlock(&ctrl->lock);
977
Sagi Grimberg8832cf92022-03-21 13:57:27 +0200978 queue_work(nvmet_wq, &ctrl->async_event_work);
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200979}
980
Jay Sternbergf9362ac2018-11-12 13:56:35 -0800981void nvmet_execute_keep_alive(struct nvmet_req *req)
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200982{
983 struct nvmet_ctrl *ctrl = req->sq->ctrl;
Hou Pu8f864c52021-04-16 10:45:21 +0800984 u16 status = 0;
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200985
Israel Rukshin136cc1f2020-05-19 17:05:59 +0300986 if (!nvmet_check_transfer_len(req, 0))
Christoph Hellwige9061c32019-10-23 10:35:44 -0600987 return;
988
Hou Pu8f864c52021-04-16 10:45:21 +0800989 if (!ctrl->kato) {
990 status = NVME_SC_KA_TIMEOUT_INVALID;
991 goto out;
992 }
993
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200994 pr_debug("ctrl %d update keep-alive timer for %d secs\n",
995 ctrl->cntlid, ctrl->kato);
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200996 mod_delayed_work(system_wq, &ctrl->ka_work, ctrl->kato * HZ);
Hou Pu8f864c52021-04-16 10:45:21 +0800997out:
998 nvmet_req_complete(req, status);
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200999}
1000
Parav Pandit64a0ca82017-02-27 23:21:33 -06001001u16 nvmet_parse_admin_cmd(struct nvmet_req *req)
Christoph Hellwiga07b4972016-06-21 18:04:20 +02001002{
1003 struct nvme_command *cmd = req->cmd;
Parav Pandit64a0ca82017-02-27 23:21:33 -06001004 u16 ret;
Christoph Hellwiga07b4972016-06-21 18:04:20 +02001005
Christoph Hellwigd84dd8c2019-10-25 15:38:58 +02001006 if (nvme_is_fabrics(cmd))
Hannes Reinecke6490c9e2022-06-27 11:52:04 +02001007 return nvmet_parse_fabrics_admin_cmd(req);
Hannes Reineckedb1312d2022-06-27 11:52:05 +02001008 if (unlikely(!nvmet_check_auth_status(req)))
Weiwen Hudd0b0a42024-06-03 20:57:01 +08001009 return NVME_SC_AUTH_REQUIRED | NVME_STATUS_DNR;
Hannes Reineckea2947112021-09-22 08:35:21 +02001010 if (nvmet_is_disc_subsys(nvmet_req_subsys(req)))
Christoph Hellwigd84dd8c2019-10-25 15:38:58 +02001011 return nvmet_parse_discovery_cmd(req);
1012
Chaitanya Kulkarni7798df62021-02-24 17:56:40 -08001013 ret = nvmet_check_ctrl_status(req);
Parav Pandit64a0ca82017-02-27 23:21:33 -06001014 if (unlikely(ret))
1015 return ret;
Christoph Hellwiga07b4972016-06-21 18:04:20 +02001016
Christoph Hellwigab7a2732021-08-27 08:11:12 +02001017 if (nvmet_is_passthru_req(req))
Logan Gunthorpec1fef73f2020-07-24 11:25:17 -06001018 return nvmet_parse_passthru_admin_cmd(req);
1019
Christoph Hellwiga07b4972016-06-21 18:04:20 +02001020 switch (cmd->common.opcode) {
1021 case nvme_admin_get_log_page:
Christoph Hellwig2cb69632019-10-23 10:35:41 -06001022 req->execute = nvmet_execute_get_log_page;
Christoph Hellwig2cb69632019-10-23 10:35:41 -06001023 return 0;
Christoph Hellwiga07b4972016-06-21 18:04:20 +02001024 case nvme_admin_identify:
Christoph Hellwig2cb69632019-10-23 10:35:41 -06001025 req->execute = nvmet_execute_identify;
Christoph Hellwig2cb69632019-10-23 10:35:41 -06001026 return 0;
Christoph Hellwiga07b4972016-06-21 18:04:20 +02001027 case nvme_admin_abort_cmd:
1028 req->execute = nvmet_execute_abort;
Christoph Hellwiga07b4972016-06-21 18:04:20 +02001029 return 0;
1030 case nvme_admin_set_features:
1031 req->execute = nvmet_execute_set_features;
Christoph Hellwiga07b4972016-06-21 18:04:20 +02001032 return 0;
1033 case nvme_admin_get_features:
1034 req->execute = nvmet_execute_get_features;
Christoph Hellwiga07b4972016-06-21 18:04:20 +02001035 return 0;
1036 case nvme_admin_async_event:
1037 req->execute = nvmet_execute_async_event;
Christoph Hellwiga07b4972016-06-21 18:04:20 +02001038 return 0;
1039 case nvme_admin_keep_alive:
1040 req->execute = nvmet_execute_keep_alive;
Christoph Hellwiga07b4972016-06-21 18:04:20 +02001041 return 0;
Chaitanya Kulkarni4c2dab22021-05-10 12:15:37 -07001042 default:
1043 return nvmet_report_invalid_opcode(req);
Christoph Hellwiga07b4972016-06-21 18:04:20 +02001044 }
Christoph Hellwiga07b4972016-06-21 18:04:20 +02001045}