Christoph Hellwig | 77141dc | 2019-02-18 11:36:11 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 2 | /* |
| 3 | * Discovery service for the NVMe over Fabrics target. |
| 4 | * Copyright (C) 2016 Intel Corporation. All rights reserved. |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 5 | */ |
| 6 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
| 7 | #include <linux/slab.h> |
| 8 | #include <generated/utsrelease.h> |
| 9 | #include "nvmet.h" |
| 10 | |
| 11 | struct nvmet_subsys *nvmet_disc_subsys; |
| 12 | |
Christoph Hellwig | 03198c4 | 2018-11-14 16:46:23 +0100 | [diff] [blame] | 13 | static u64 nvmet_genctr; |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 14 | |
Jay Sternberg | b662a07 | 2018-11-12 13:56:40 -0800 | [diff] [blame] | 15 | static void __nvmet_disc_changed(struct nvmet_port *port, |
| 16 | struct nvmet_ctrl *ctrl) |
| 17 | { |
| 18 | if (ctrl->port != port) |
| 19 | return; |
| 20 | |
| 21 | if (nvmet_aen_bit_disabled(ctrl, NVME_AEN_BIT_DISC_CHANGE)) |
| 22 | return; |
| 23 | |
| 24 | nvmet_add_async_event(ctrl, NVME_AER_TYPE_NOTICE, |
| 25 | NVME_AER_NOTICE_DISC_CHANGED, NVME_LOG_DISC); |
| 26 | } |
| 27 | |
| 28 | void nvmet_port_disc_changed(struct nvmet_port *port, |
| 29 | struct nvmet_subsys *subsys) |
| 30 | { |
| 31 | struct nvmet_ctrl *ctrl; |
| 32 | |
Sagi Grimberg | 6f53e73 | 2019-04-29 16:28:19 -0700 | [diff] [blame] | 33 | lockdep_assert_held(&nvmet_config_sem); |
Jay Sternberg | b662a07 | 2018-11-12 13:56:40 -0800 | [diff] [blame] | 34 | nvmet_genctr++; |
| 35 | |
Sagi Grimberg | 6f53e73 | 2019-04-29 16:28:19 -0700 | [diff] [blame] | 36 | mutex_lock(&nvmet_disc_subsys->lock); |
Jay Sternberg | b662a07 | 2018-11-12 13:56:40 -0800 | [diff] [blame] | 37 | list_for_each_entry(ctrl, &nvmet_disc_subsys->ctrls, subsys_entry) { |
| 38 | if (subsys && !nvmet_host_allowed(subsys, ctrl->hostnqn)) |
| 39 | continue; |
| 40 | |
| 41 | __nvmet_disc_changed(port, ctrl); |
| 42 | } |
Sagi Grimberg | 6f53e73 | 2019-04-29 16:28:19 -0700 | [diff] [blame] | 43 | mutex_unlock(&nvmet_disc_subsys->lock); |
James Smart | 9d09dd8 | 2019-05-14 14:58:02 -0700 | [diff] [blame] | 44 | |
| 45 | /* If transport can signal change, notify transport */ |
| 46 | if (port->tr_ops && port->tr_ops->discovery_chg) |
| 47 | port->tr_ops->discovery_chg(port); |
Jay Sternberg | b662a07 | 2018-11-12 13:56:40 -0800 | [diff] [blame] | 48 | } |
| 49 | |
| 50 | static void __nvmet_subsys_disc_changed(struct nvmet_port *port, |
| 51 | struct nvmet_subsys *subsys, |
| 52 | struct nvmet_host *host) |
| 53 | { |
| 54 | struct nvmet_ctrl *ctrl; |
| 55 | |
Sagi Grimberg | 6f53e73 | 2019-04-29 16:28:19 -0700 | [diff] [blame] | 56 | mutex_lock(&nvmet_disc_subsys->lock); |
Jay Sternberg | b662a07 | 2018-11-12 13:56:40 -0800 | [diff] [blame] | 57 | list_for_each_entry(ctrl, &nvmet_disc_subsys->ctrls, subsys_entry) { |
| 58 | if (host && strcmp(nvmet_host_name(host), ctrl->hostnqn)) |
| 59 | continue; |
| 60 | |
| 61 | __nvmet_disc_changed(port, ctrl); |
| 62 | } |
Sagi Grimberg | 6f53e73 | 2019-04-29 16:28:19 -0700 | [diff] [blame] | 63 | mutex_unlock(&nvmet_disc_subsys->lock); |
Jay Sternberg | b662a07 | 2018-11-12 13:56:40 -0800 | [diff] [blame] | 64 | } |
| 65 | |
| 66 | void nvmet_subsys_disc_changed(struct nvmet_subsys *subsys, |
| 67 | struct nvmet_host *host) |
| 68 | { |
| 69 | struct nvmet_port *port; |
| 70 | struct nvmet_subsys_link *s; |
| 71 | |
Max Gurtovoy | 0068a7b | 2020-11-25 12:27:36 +0000 | [diff] [blame] | 72 | lockdep_assert_held(&nvmet_config_sem); |
Jay Sternberg | b662a07 | 2018-11-12 13:56:40 -0800 | [diff] [blame] | 73 | nvmet_genctr++; |
| 74 | |
| 75 | list_for_each_entry(port, nvmet_ports, global_entry) |
| 76 | list_for_each_entry(s, &port->subsystems, entry) { |
| 77 | if (s->subsys != subsys) |
| 78 | continue; |
| 79 | __nvmet_subsys_disc_changed(port, subsys, host); |
| 80 | } |
| 81 | } |
| 82 | |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 83 | void nvmet_referral_enable(struct nvmet_port *parent, struct nvmet_port *port) |
| 84 | { |
| 85 | down_write(&nvmet_config_sem); |
| 86 | if (list_empty(&port->entry)) { |
| 87 | list_add_tail(&port->entry, &parent->referrals); |
| 88 | port->enabled = true; |
Jay Sternberg | b662a07 | 2018-11-12 13:56:40 -0800 | [diff] [blame] | 89 | nvmet_port_disc_changed(parent, NULL); |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 90 | } |
| 91 | up_write(&nvmet_config_sem); |
| 92 | } |
| 93 | |
Jay Sternberg | b662a07 | 2018-11-12 13:56:40 -0800 | [diff] [blame] | 94 | void nvmet_referral_disable(struct nvmet_port *parent, struct nvmet_port *port) |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 95 | { |
| 96 | down_write(&nvmet_config_sem); |
| 97 | if (!list_empty(&port->entry)) { |
| 98 | port->enabled = false; |
| 99 | list_del_init(&port->entry); |
Jay Sternberg | b662a07 | 2018-11-12 13:56:40 -0800 | [diff] [blame] | 100 | nvmet_port_disc_changed(parent, NULL); |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 101 | } |
| 102 | up_write(&nvmet_config_sem); |
| 103 | } |
| 104 | |
| 105 | static void nvmet_format_discovery_entry(struct nvmf_disc_rsp_page_hdr *hdr, |
Sagi Grimberg | 4c65268 | 2018-01-24 20:27:10 +0200 | [diff] [blame] | 106 | struct nvmet_port *port, char *subsys_nqn, char *traddr, |
| 107 | u8 type, u32 numrec) |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 108 | { |
| 109 | struct nvmf_disc_rsp_page_entry *e = &hdr->entries[numrec]; |
| 110 | |
| 111 | e->trtype = port->disc_addr.trtype; |
| 112 | e->adrfam = port->disc_addr.adrfam; |
| 113 | e->treq = port->disc_addr.treq; |
| 114 | e->portid = port->disc_addr.portid; |
| 115 | /* we support only dynamic controllers */ |
| 116 | e->cntlid = cpu_to_le16(NVME_CNTLID_DYNAMIC); |
Sagi Grimberg | 7aa1f42 | 2017-06-18 16:15:59 +0300 | [diff] [blame] | 117 | e->asqsz = cpu_to_le16(NVME_AQ_DEPTH); |
Christoph Hellwig | a446c08 | 2016-09-30 13:51:06 +0200 | [diff] [blame] | 118 | e->subtype = type; |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 119 | memcpy(e->trsvcid, port->disc_addr.trsvcid, NVMF_TRSVCID_SIZE); |
Sagi Grimberg | 4c65268 | 2018-01-24 20:27:10 +0200 | [diff] [blame] | 120 | memcpy(e->traddr, traddr, NVMF_TRADDR_SIZE); |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 121 | memcpy(e->tsas.common, port->disc_addr.tsas.common, NVMF_TSAS_SIZE); |
Arnd Bergmann | 6038aa5 | 2018-04-12 09:16:07 -0600 | [diff] [blame] | 122 | strncpy(e->subnqn, subsys_nqn, NVMF_NQN_SIZE); |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 123 | } |
| 124 | |
Sagi Grimberg | 4c65268 | 2018-01-24 20:27:10 +0200 | [diff] [blame] | 125 | /* |
| 126 | * nvmet_set_disc_traddr - set a correct discovery log entry traddr |
| 127 | * |
| 128 | * IP based transports (e.g RDMA) can listen on "any" ipv4/ipv6 addresses |
| 129 | * (INADDR_ANY or IN6ADDR_ANY_INIT). The discovery log page traddr reply |
| 130 | * must not contain that "any" IP address. If the transport implements |
| 131 | * .disc_traddr, use it. this callback will set the discovery traddr |
| 132 | * from the req->port address in case the port in question listens |
| 133 | * "any" IP address. |
| 134 | */ |
| 135 | static void nvmet_set_disc_traddr(struct nvmet_req *req, struct nvmet_port *port, |
| 136 | char *traddr) |
| 137 | { |
| 138 | if (req->ops->disc_traddr) |
| 139 | req->ops->disc_traddr(req, port, traddr); |
| 140 | else |
| 141 | memcpy(traddr, port->disc_addr.traddr, NVMF_TRADDR_SIZE); |
| 142 | } |
| 143 | |
Keith Busch | d808b7f | 2019-04-09 10:03:59 -0600 | [diff] [blame] | 144 | static size_t discovery_log_entries(struct nvmet_req *req) |
| 145 | { |
| 146 | struct nvmet_ctrl *ctrl = req->sq->ctrl; |
| 147 | struct nvmet_subsys_link *p; |
| 148 | struct nvmet_port *r; |
Hannes Reinecke | 2953b30 | 2021-10-18 17:21:38 +0200 | [diff] [blame] | 149 | size_t entries = 1; |
Keith Busch | d808b7f | 2019-04-09 10:03:59 -0600 | [diff] [blame] | 150 | |
| 151 | list_for_each_entry(p, &req->port->subsystems, entry) { |
| 152 | if (!nvmet_host_allowed(p->subsys, ctrl->hostnqn)) |
| 153 | continue; |
| 154 | entries++; |
| 155 | } |
| 156 | list_for_each_entry(r, &req->port->referrals, entry) |
| 157 | entries++; |
| 158 | return entries; |
| 159 | } |
| 160 | |
Christoph Hellwig | 6f86f2c | 2019-10-23 10:35:42 -0600 | [diff] [blame] | 161 | static void nvmet_execute_disc_get_log_page(struct nvmet_req *req) |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 162 | { |
| 163 | const int entry_size = sizeof(struct nvmf_disc_rsp_page_entry); |
| 164 | struct nvmet_ctrl *ctrl = req->sq->ctrl; |
| 165 | struct nvmf_disc_rsp_page_hdr *hdr; |
Keith Busch | d808b7f | 2019-04-09 10:03:59 -0600 | [diff] [blame] | 166 | u64 offset = nvmet_get_log_page_offset(req->cmd); |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 167 | size_t data_len = nvmet_get_log_page_len(req->cmd); |
Keith Busch | d808b7f | 2019-04-09 10:03:59 -0600 | [diff] [blame] | 168 | size_t alloc_len; |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 169 | struct nvmet_subsys_link *p; |
| 170 | struct nvmet_port *r; |
| 171 | u32 numrec = 0; |
| 172 | u16 status = 0; |
Keith Busch | d808b7f | 2019-04-09 10:03:59 -0600 | [diff] [blame] | 173 | void *buffer; |
Hannes Reinecke | 2953b30 | 2021-10-18 17:21:38 +0200 | [diff] [blame] | 174 | char traddr[NVMF_TRADDR_SIZE]; |
Keith Busch | d808b7f | 2019-04-09 10:03:59 -0600 | [diff] [blame] | 175 | |
Israel Rukshin | 136cc1f | 2020-05-19 17:05:59 +0300 | [diff] [blame] | 176 | if (!nvmet_check_transfer_len(req, data_len)) |
Christoph Hellwig | e9061c3 | 2019-10-23 10:35:44 -0600 | [diff] [blame] | 177 | return; |
| 178 | |
Christoph Hellwig | 6f86f2c | 2019-10-23 10:35:42 -0600 | [diff] [blame] | 179 | if (req->cmd->get_log_page.lid != NVME_LOG_DISC) { |
| 180 | req->error_loc = |
| 181 | offsetof(struct nvme_get_log_page_command, lid); |
Hou Pu | 79695dc | 2021-03-31 14:52:39 +0800 | [diff] [blame] | 182 | status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; |
Christoph Hellwig | 6f86f2c | 2019-10-23 10:35:42 -0600 | [diff] [blame] | 183 | goto out; |
| 184 | } |
| 185 | |
Keith Busch | d808b7f | 2019-04-09 10:03:59 -0600 | [diff] [blame] | 186 | /* Spec requires dword aligned offsets */ |
| 187 | if (offset & 0x3) { |
Hou Pu | 79695dc | 2021-03-31 14:52:39 +0800 | [diff] [blame] | 188 | req->error_loc = |
| 189 | offsetof(struct nvme_get_log_page_command, lpo); |
Keith Busch | d808b7f | 2019-04-09 10:03:59 -0600 | [diff] [blame] | 190 | status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; |
| 191 | goto out; |
| 192 | } |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 193 | |
| 194 | /* |
| 195 | * Make sure we're passing at least a buffer of response header size. |
| 196 | * If host provided data len is less than the header size, only the |
| 197 | * number of bytes requested by host will be sent to host. |
| 198 | */ |
Keith Busch | d808b7f | 2019-04-09 10:03:59 -0600 | [diff] [blame] | 199 | down_read(&nvmet_config_sem); |
| 200 | alloc_len = sizeof(*hdr) + entry_size * discovery_log_entries(req); |
| 201 | buffer = kzalloc(alloc_len, GFP_KERNEL); |
| 202 | if (!buffer) { |
| 203 | up_read(&nvmet_config_sem); |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 204 | status = NVME_SC_INTERNAL; |
| 205 | goto out; |
| 206 | } |
Keith Busch | d808b7f | 2019-04-09 10:03:59 -0600 | [diff] [blame] | 207 | hdr = buffer; |
Keith Busch | d808b7f | 2019-04-09 10:03:59 -0600 | [diff] [blame] | 208 | |
Hannes Reinecke | 2953b30 | 2021-10-18 17:21:38 +0200 | [diff] [blame] | 209 | nvmet_set_disc_traddr(req, req->port, traddr); |
| 210 | |
| 211 | nvmet_format_discovery_entry(hdr, req->port, |
| 212 | nvmet_disc_subsys->subsysnqn, |
| 213 | traddr, NVME_NQN_CURR, numrec); |
| 214 | numrec++; |
| 215 | |
| 216 | list_for_each_entry(p, &req->port->subsystems, entry) { |
Sagi Grimberg | 253928e | 2018-11-12 13:56:39 -0800 | [diff] [blame] | 217 | if (!nvmet_host_allowed(p->subsys, ctrl->hostnqn)) |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 218 | continue; |
Sagi Grimberg | 4c65268 | 2018-01-24 20:27:10 +0200 | [diff] [blame] | 219 | |
Keith Busch | d808b7f | 2019-04-09 10:03:59 -0600 | [diff] [blame] | 220 | nvmet_format_discovery_entry(hdr, req->port, |
| 221 | p->subsys->subsysnqn, traddr, |
| 222 | NVME_NQN_NVME, numrec); |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 223 | numrec++; |
| 224 | } |
| 225 | |
| 226 | list_for_each_entry(r, &req->port->referrals, entry) { |
Keith Busch | d808b7f | 2019-04-09 10:03:59 -0600 | [diff] [blame] | 227 | nvmet_format_discovery_entry(hdr, r, |
| 228 | NVME_DISC_SUBSYS_NAME, |
| 229 | r->disc_addr.traddr, |
| 230 | NVME_NQN_DISC, numrec); |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 231 | numrec++; |
| 232 | } |
| 233 | |
| 234 | hdr->genctr = cpu_to_le64(nvmet_genctr); |
| 235 | hdr->numrec = cpu_to_le64(numrec); |
| 236 | hdr->recfmt = cpu_to_le16(0); |
| 237 | |
Jay Sternberg | b662a07 | 2018-11-12 13:56:40 -0800 | [diff] [blame] | 238 | nvmet_clear_aen_bit(req, NVME_AEN_BIT_DISC_CHANGE); |
| 239 | |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 240 | up_read(&nvmet_config_sem); |
| 241 | |
Keith Busch | d808b7f | 2019-04-09 10:03:59 -0600 | [diff] [blame] | 242 | status = nvmet_copy_to_sgl(req, 0, buffer + offset, data_len); |
| 243 | kfree(buffer); |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 244 | out: |
| 245 | nvmet_req_complete(req, status); |
| 246 | } |
| 247 | |
Christoph Hellwig | 6f86f2c | 2019-10-23 10:35:42 -0600 | [diff] [blame] | 248 | static void nvmet_execute_disc_identify(struct nvmet_req *req) |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 249 | { |
| 250 | struct nvmet_ctrl *ctrl = req->sq->ctrl; |
| 251 | struct nvme_id_ctrl *id; |
| 252 | u16 status = 0; |
| 253 | |
Israel Rukshin | 136cc1f | 2020-05-19 17:05:59 +0300 | [diff] [blame] | 254 | if (!nvmet_check_transfer_len(req, NVME_IDENTIFY_DATA_SIZE)) |
Christoph Hellwig | e9061c3 | 2019-10-23 10:35:44 -0600 | [diff] [blame] | 255 | return; |
| 256 | |
Christoph Hellwig | 6f86f2c | 2019-10-23 10:35:42 -0600 | [diff] [blame] | 257 | if (req->cmd->identify.cns != NVME_ID_CNS_CTRL) { |
| 258 | req->error_loc = offsetof(struct nvme_identify, cns); |
Hou Pu | 79695dc | 2021-03-31 14:52:39 +0800 | [diff] [blame] | 259 | status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; |
Christoph Hellwig | 6f86f2c | 2019-10-23 10:35:42 -0600 | [diff] [blame] | 260 | goto out; |
| 261 | } |
| 262 | |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 263 | id = kzalloc(sizeof(*id), GFP_KERNEL); |
| 264 | if (!id) { |
| 265 | status = NVME_SC_INTERNAL; |
| 266 | goto out; |
| 267 | } |
| 268 | |
Noam Gottlieb | e13b061 | 2021-06-07 12:23:21 +0300 | [diff] [blame] | 269 | memcpy(id->sn, ctrl->subsys->serial, NVMET_SN_MAX_SIZE); |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 270 | memset(id->fr, ' ', sizeof(id->fr)); |
Noam Gottlieb | 0d148ef | 2021-06-07 12:23:23 +0300 | [diff] [blame] | 271 | memcpy_and_pad(id->mn, sizeof(id->mn), ctrl->subsys->model_number, |
| 272 | strlen(ctrl->subsys->model_number), ' '); |
Sagi Grimberg | d4b3a17 | 2019-10-24 09:55:58 -0700 | [diff] [blame] | 273 | memcpy_and_pad(id->fr, sizeof(id->fr), |
| 274 | UTS_RELEASE, strlen(UTS_RELEASE), ' '); |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 275 | |
Hannes Reinecke | d3aef70 | 2021-09-22 08:35:22 +0200 | [diff] [blame] | 276 | id->cntrltype = NVME_CTRL_DISC; |
| 277 | |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 278 | /* no limit on data transfer sizes for now */ |
| 279 | id->mdts = 0; |
| 280 | id->cntlid = cpu_to_le16(ctrl->cntlid); |
| 281 | id->ver = cpu_to_le32(ctrl->subsys->ver); |
| 282 | id->lpa = (1 << 2); |
| 283 | |
| 284 | /* no enforcement soft-limit for maxcmd - pick arbitrary high value */ |
| 285 | id->maxcmd = cpu_to_le16(NVMET_MAX_CMD); |
| 286 | |
| 287 | id->sgls = cpu_to_le32(1 << 0); /* we always support SGLs */ |
Max Gurtovoy | 6fa350f | 2020-06-02 16:15:46 +0300 | [diff] [blame] | 288 | if (ctrl->ops->flags & NVMF_KEYED_SGLS) |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 289 | id->sgls |= cpu_to_le32(1 << 2); |
Steve Wise | 0d5ee2b | 2018-06-20 07:15:10 -0700 | [diff] [blame] | 290 | if (req->port->inline_data_size) |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 291 | id->sgls |= cpu_to_le32(1 << 20); |
| 292 | |
Jay Sternberg | b662a07 | 2018-11-12 13:56:40 -0800 | [diff] [blame] | 293 | id->oaes = cpu_to_le32(NVMET_DISC_AEN_CFG_OPTIONAL); |
| 294 | |
Wolfram Sang | a8817cc | 2022-08-18 23:00:52 +0200 | [diff] [blame] | 295 | strscpy(id->subnqn, ctrl->subsys->subsysnqn, sizeof(id->subnqn)); |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 296 | |
| 297 | status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id)); |
| 298 | |
| 299 | kfree(id); |
| 300 | out: |
| 301 | nvmet_req_complete(req, status); |
| 302 | } |
| 303 | |
Jay Sternberg | 6a8ec0a | 2018-11-12 13:56:38 -0800 | [diff] [blame] | 304 | static void nvmet_execute_disc_set_features(struct nvmet_req *req) |
| 305 | { |
Chaitanya Kulkarni | b7c8f36 | 2018-12-12 15:11:37 -0800 | [diff] [blame] | 306 | u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10); |
Jay Sternberg | 6a8ec0a | 2018-11-12 13:56:38 -0800 | [diff] [blame] | 307 | u16 stat; |
| 308 | |
Israel Rukshin | 136cc1f | 2020-05-19 17:05:59 +0300 | [diff] [blame] | 309 | if (!nvmet_check_transfer_len(req, 0)) |
Christoph Hellwig | e9061c3 | 2019-10-23 10:35:44 -0600 | [diff] [blame] | 310 | return; |
| 311 | |
Jay Sternberg | 6a8ec0a | 2018-11-12 13:56:38 -0800 | [diff] [blame] | 312 | switch (cdw10 & 0xff) { |
| 313 | case NVME_FEAT_KATO: |
| 314 | stat = nvmet_set_feat_kato(req); |
| 315 | break; |
| 316 | case NVME_FEAT_ASYNC_EVENT: |
| 317 | stat = nvmet_set_feat_async_event(req, |
| 318 | NVMET_DISC_AEN_CFG_OPTIONAL); |
| 319 | break; |
| 320 | default: |
Chaitanya Kulkarni | 84faf42 | 2018-12-12 15:11:44 -0800 | [diff] [blame] | 321 | req->error_loc = |
| 322 | offsetof(struct nvme_common_command, cdw10); |
Jay Sternberg | 6a8ec0a | 2018-11-12 13:56:38 -0800 | [diff] [blame] | 323 | stat = NVME_SC_INVALID_FIELD | NVME_SC_DNR; |
| 324 | break; |
| 325 | } |
| 326 | |
| 327 | nvmet_req_complete(req, stat); |
| 328 | } |
| 329 | |
| 330 | static void nvmet_execute_disc_get_features(struct nvmet_req *req) |
| 331 | { |
Chaitanya Kulkarni | b7c8f36 | 2018-12-12 15:11:37 -0800 | [diff] [blame] | 332 | u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10); |
Jay Sternberg | 6a8ec0a | 2018-11-12 13:56:38 -0800 | [diff] [blame] | 333 | u16 stat = 0; |
| 334 | |
Israel Rukshin | 136cc1f | 2020-05-19 17:05:59 +0300 | [diff] [blame] | 335 | if (!nvmet_check_transfer_len(req, 0)) |
Christoph Hellwig | e9061c3 | 2019-10-23 10:35:44 -0600 | [diff] [blame] | 336 | return; |
| 337 | |
Jay Sternberg | 6a8ec0a | 2018-11-12 13:56:38 -0800 | [diff] [blame] | 338 | switch (cdw10 & 0xff) { |
| 339 | case NVME_FEAT_KATO: |
| 340 | nvmet_get_feat_kato(req); |
| 341 | break; |
| 342 | case NVME_FEAT_ASYNC_EVENT: |
| 343 | nvmet_get_feat_async_event(req); |
| 344 | break; |
| 345 | default: |
Chaitanya Kulkarni | 84faf42 | 2018-12-12 15:11:44 -0800 | [diff] [blame] | 346 | req->error_loc = |
| 347 | offsetof(struct nvme_common_command, cdw10); |
Jay Sternberg | 6a8ec0a | 2018-11-12 13:56:38 -0800 | [diff] [blame] | 348 | stat = NVME_SC_INVALID_FIELD | NVME_SC_DNR; |
| 349 | break; |
| 350 | } |
| 351 | |
| 352 | nvmet_req_complete(req, stat); |
| 353 | } |
| 354 | |
Parav Pandit | 64a0ca8 | 2017-02-27 23:21:33 -0600 | [diff] [blame] | 355 | u16 nvmet_parse_discovery_cmd(struct nvmet_req *req) |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 356 | { |
| 357 | struct nvme_command *cmd = req->cmd; |
| 358 | |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 359 | if (unlikely(!(req->sq->ctrl->csts & NVME_CSTS_RDY))) { |
Parav Pandit | 4151dd9 | 2017-02-27 23:21:02 -0600 | [diff] [blame] | 360 | pr_err("got cmd %d while not ready\n", |
| 361 | cmd->common.opcode); |
Chaitanya Kulkarni | 84faf42 | 2018-12-12 15:11:44 -0800 | [diff] [blame] | 362 | req->error_loc = |
| 363 | offsetof(struct nvme_common_command, opcode); |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 364 | return NVME_SC_INVALID_OPCODE | NVME_SC_DNR; |
| 365 | } |
| 366 | |
| 367 | switch (cmd->common.opcode) { |
Jay Sternberg | 6a8ec0a | 2018-11-12 13:56:38 -0800 | [diff] [blame] | 368 | case nvme_admin_set_features: |
| 369 | req->execute = nvmet_execute_disc_set_features; |
Jay Sternberg | 6a8ec0a | 2018-11-12 13:56:38 -0800 | [diff] [blame] | 370 | return 0; |
| 371 | case nvme_admin_get_features: |
| 372 | req->execute = nvmet_execute_disc_get_features; |
Jay Sternberg | 6a8ec0a | 2018-11-12 13:56:38 -0800 | [diff] [blame] | 373 | return 0; |
| 374 | case nvme_admin_async_event: |
| 375 | req->execute = nvmet_execute_async_event; |
Jay Sternberg | 6a8ec0a | 2018-11-12 13:56:38 -0800 | [diff] [blame] | 376 | return 0; |
Jay Sternberg | f9362ac | 2018-11-12 13:56:35 -0800 | [diff] [blame] | 377 | case nvme_admin_keep_alive: |
| 378 | req->execute = nvmet_execute_keep_alive; |
Jay Sternberg | f9362ac | 2018-11-12 13:56:35 -0800 | [diff] [blame] | 379 | return 0; |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 380 | case nvme_admin_get_log_page: |
Christoph Hellwig | 6f86f2c | 2019-10-23 10:35:42 -0600 | [diff] [blame] | 381 | req->execute = nvmet_execute_disc_get_log_page; |
| 382 | return 0; |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 383 | case nvme_admin_identify: |
Christoph Hellwig | 6f86f2c | 2019-10-23 10:35:42 -0600 | [diff] [blame] | 384 | req->execute = nvmet_execute_disc_identify; |
| 385 | return 0; |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 386 | default: |
Chaitanya Kulkarni | 3651aaa | 2021-05-10 12:15:36 -0700 | [diff] [blame] | 387 | pr_debug("unhandled cmd %d\n", cmd->common.opcode); |
Chaitanya Kulkarni | 84faf42 | 2018-12-12 15:11:44 -0800 | [diff] [blame] | 388 | req->error_loc = offsetof(struct nvme_common_command, opcode); |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 389 | return NVME_SC_INVALID_OPCODE | NVME_SC_DNR; |
| 390 | } |
| 391 | |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 392 | } |
| 393 | |
| 394 | int __init nvmet_init_discovery(void) |
| 395 | { |
| 396 | nvmet_disc_subsys = |
Hannes Reinecke | 2953b30 | 2021-10-18 17:21:38 +0200 | [diff] [blame] | 397 | nvmet_subsys_alloc(NVME_DISC_SUBSYS_NAME, NVME_NQN_CURR); |
Markus Elfring | 1179d33 | 2019-09-06 19:50:19 +0200 | [diff] [blame] | 398 | return PTR_ERR_OR_ZERO(nvmet_disc_subsys); |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 399 | } |
| 400 | |
| 401 | void nvmet_exit_discovery(void) |
| 402 | { |
| 403 | nvmet_subsys_put(nvmet_disc_subsys); |
| 404 | } |