SrujanaChalla | 10b4f09 | 2020-03-13 17:17:07 +0530 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | /* Marvell OcteonTX CPT driver |
| 3 | * |
| 4 | * Copyright (C) 2019 Marvell International Ltd. |
| 5 | * |
| 6 | * This program is free software; you can redistribute it and/or modify |
| 7 | * it under the terms of the GNU General Public License version 2 as |
| 8 | * published by the Free Software Foundation. |
| 9 | */ |
| 10 | |
| 11 | #include <linux/interrupt.h> |
| 12 | #include <linux/module.h> |
| 13 | #include "otx_cptvf.h" |
| 14 | #include "otx_cptvf_algs.h" |
| 15 | #include "otx_cptvf_reqmgr.h" |
| 16 | |
| 17 | #define DRV_NAME "octeontx-cptvf" |
| 18 | #define DRV_VERSION "1.0" |
| 19 | |
| 20 | static void vq_work_handler(unsigned long data) |
| 21 | { |
| 22 | struct otx_cptvf_wqe_info *cwqe_info = |
| 23 | (struct otx_cptvf_wqe_info *) data; |
| 24 | |
| 25 | otx_cpt_post_process(&cwqe_info->vq_wqe[0]); |
| 26 | } |
| 27 | |
| 28 | static int init_worker_threads(struct otx_cptvf *cptvf) |
| 29 | { |
| 30 | struct pci_dev *pdev = cptvf->pdev; |
| 31 | struct otx_cptvf_wqe_info *cwqe_info; |
| 32 | int i; |
| 33 | |
| 34 | cwqe_info = kzalloc(sizeof(*cwqe_info), GFP_KERNEL); |
| 35 | if (!cwqe_info) |
| 36 | return -ENOMEM; |
| 37 | |
| 38 | if (cptvf->num_queues) { |
| 39 | dev_dbg(&pdev->dev, "Creating VQ worker threads (%d)\n", |
| 40 | cptvf->num_queues); |
| 41 | } |
| 42 | |
| 43 | for (i = 0; i < cptvf->num_queues; i++) { |
| 44 | tasklet_init(&cwqe_info->vq_wqe[i].twork, vq_work_handler, |
| 45 | (u64)cwqe_info); |
| 46 | cwqe_info->vq_wqe[i].cptvf = cptvf; |
| 47 | } |
| 48 | cptvf->wqe_info = cwqe_info; |
| 49 | |
| 50 | return 0; |
| 51 | } |
| 52 | |
| 53 | static void cleanup_worker_threads(struct otx_cptvf *cptvf) |
| 54 | { |
| 55 | struct pci_dev *pdev = cptvf->pdev; |
| 56 | struct otx_cptvf_wqe_info *cwqe_info; |
| 57 | int i; |
| 58 | |
| 59 | cwqe_info = (struct otx_cptvf_wqe_info *)cptvf->wqe_info; |
| 60 | if (!cwqe_info) |
| 61 | return; |
| 62 | |
| 63 | if (cptvf->num_queues) { |
| 64 | dev_dbg(&pdev->dev, "Cleaning VQ worker threads (%u)\n", |
| 65 | cptvf->num_queues); |
| 66 | } |
| 67 | |
| 68 | for (i = 0; i < cptvf->num_queues; i++) |
| 69 | tasklet_kill(&cwqe_info->vq_wqe[i].twork); |
| 70 | |
Waiman Long | 453431a | 2020-08-06 23:18:13 -0700 | [diff] [blame] | 71 | kfree_sensitive(cwqe_info); |
SrujanaChalla | 10b4f09 | 2020-03-13 17:17:07 +0530 | [diff] [blame] | 72 | cptvf->wqe_info = NULL; |
| 73 | } |
| 74 | |
| 75 | static void free_pending_queues(struct otx_cpt_pending_qinfo *pqinfo) |
| 76 | { |
| 77 | struct otx_cpt_pending_queue *queue; |
| 78 | int i; |
| 79 | |
| 80 | for_each_pending_queue(pqinfo, queue, i) { |
| 81 | if (!queue->head) |
| 82 | continue; |
| 83 | |
| 84 | /* free single queue */ |
Waiman Long | 453431a | 2020-08-06 23:18:13 -0700 | [diff] [blame] | 85 | kfree_sensitive((queue->head)); |
SrujanaChalla | 10b4f09 | 2020-03-13 17:17:07 +0530 | [diff] [blame] | 86 | queue->front = 0; |
| 87 | queue->rear = 0; |
| 88 | queue->qlen = 0; |
| 89 | } |
| 90 | pqinfo->num_queues = 0; |
| 91 | } |
| 92 | |
| 93 | static int alloc_pending_queues(struct otx_cpt_pending_qinfo *pqinfo, u32 qlen, |
| 94 | u32 num_queues) |
| 95 | { |
| 96 | struct otx_cpt_pending_queue *queue = NULL; |
SrujanaChalla | 10b4f09 | 2020-03-13 17:17:07 +0530 | [diff] [blame] | 97 | int ret; |
| 98 | u32 i; |
| 99 | |
| 100 | pqinfo->num_queues = num_queues; |
SrujanaChalla | 10b4f09 | 2020-03-13 17:17:07 +0530 | [diff] [blame] | 101 | |
| 102 | for_each_pending_queue(pqinfo, queue, i) { |
Gustavo A. R. Silva | 0b62b66 | 2021-12-07 22:17:21 -0600 | [diff] [blame] | 103 | queue->head = kcalloc(qlen, sizeof(*queue->head), GFP_KERNEL); |
SrujanaChalla | 10b4f09 | 2020-03-13 17:17:07 +0530 | [diff] [blame] | 104 | if (!queue->head) { |
| 105 | ret = -ENOMEM; |
| 106 | goto pending_qfail; |
| 107 | } |
| 108 | |
| 109 | queue->pending_count = 0; |
| 110 | queue->front = 0; |
| 111 | queue->rear = 0; |
| 112 | queue->qlen = qlen; |
| 113 | |
| 114 | /* init queue spin lock */ |
| 115 | spin_lock_init(&queue->lock); |
| 116 | } |
| 117 | return 0; |
| 118 | |
| 119 | pending_qfail: |
| 120 | free_pending_queues(pqinfo); |
| 121 | |
| 122 | return ret; |
| 123 | } |
| 124 | |
| 125 | static int init_pending_queues(struct otx_cptvf *cptvf, u32 qlen, |
| 126 | u32 num_queues) |
| 127 | { |
| 128 | struct pci_dev *pdev = cptvf->pdev; |
| 129 | int ret; |
| 130 | |
| 131 | if (!num_queues) |
| 132 | return 0; |
| 133 | |
| 134 | ret = alloc_pending_queues(&cptvf->pqinfo, qlen, num_queues); |
| 135 | if (ret) { |
| 136 | dev_err(&pdev->dev, "Failed to setup pending queues (%u)\n", |
| 137 | num_queues); |
| 138 | return ret; |
| 139 | } |
| 140 | return 0; |
| 141 | } |
| 142 | |
| 143 | static void cleanup_pending_queues(struct otx_cptvf *cptvf) |
| 144 | { |
| 145 | struct pci_dev *pdev = cptvf->pdev; |
| 146 | |
| 147 | if (!cptvf->num_queues) |
| 148 | return; |
| 149 | |
| 150 | dev_dbg(&pdev->dev, "Cleaning VQ pending queue (%u)\n", |
| 151 | cptvf->num_queues); |
| 152 | free_pending_queues(&cptvf->pqinfo); |
| 153 | } |
| 154 | |
| 155 | static void free_command_queues(struct otx_cptvf *cptvf, |
| 156 | struct otx_cpt_cmd_qinfo *cqinfo) |
| 157 | { |
| 158 | struct otx_cpt_cmd_queue *queue = NULL; |
| 159 | struct otx_cpt_cmd_chunk *chunk = NULL; |
| 160 | struct pci_dev *pdev = cptvf->pdev; |
| 161 | int i; |
| 162 | |
| 163 | /* clean up for each queue */ |
| 164 | for (i = 0; i < cptvf->num_queues; i++) { |
| 165 | queue = &cqinfo->queue[i]; |
| 166 | |
| 167 | while (!list_empty(&cqinfo->queue[i].chead)) { |
| 168 | chunk = list_first_entry(&cqinfo->queue[i].chead, |
| 169 | struct otx_cpt_cmd_chunk, nextchunk); |
| 170 | |
| 171 | dma_free_coherent(&pdev->dev, chunk->size, |
| 172 | chunk->head, |
| 173 | chunk->dma_addr); |
| 174 | chunk->head = NULL; |
| 175 | chunk->dma_addr = 0; |
| 176 | list_del(&chunk->nextchunk); |
Waiman Long | 453431a | 2020-08-06 23:18:13 -0700 | [diff] [blame] | 177 | kfree_sensitive(chunk); |
SrujanaChalla | 10b4f09 | 2020-03-13 17:17:07 +0530 | [diff] [blame] | 178 | } |
| 179 | queue->num_chunks = 0; |
| 180 | queue->idx = 0; |
| 181 | |
| 182 | } |
| 183 | } |
| 184 | |
| 185 | static int alloc_command_queues(struct otx_cptvf *cptvf, |
| 186 | struct otx_cpt_cmd_qinfo *cqinfo, |
| 187 | u32 qlen) |
| 188 | { |
| 189 | struct otx_cpt_cmd_chunk *curr, *first, *last; |
| 190 | struct otx_cpt_cmd_queue *queue = NULL; |
| 191 | struct pci_dev *pdev = cptvf->pdev; |
| 192 | size_t q_size, c_size, rem_q_size; |
| 193 | u32 qcsize_bytes; |
| 194 | int i; |
| 195 | |
| 196 | |
| 197 | /* Qsize in dwords, needed for SADDR config, 1-next chunk pointer */ |
| 198 | cptvf->qsize = min(qlen, cqinfo->qchunksize) * |
| 199 | OTX_CPT_NEXT_CHUNK_PTR_SIZE + 1; |
| 200 | /* Qsize in bytes to create space for alignment */ |
| 201 | q_size = qlen * OTX_CPT_INST_SIZE; |
| 202 | |
| 203 | qcsize_bytes = cqinfo->qchunksize * OTX_CPT_INST_SIZE; |
| 204 | |
| 205 | /* per queue initialization */ |
| 206 | for (i = 0; i < cptvf->num_queues; i++) { |
SrujanaChalla | 10b4f09 | 2020-03-13 17:17:07 +0530 | [diff] [blame] | 207 | rem_q_size = q_size; |
| 208 | first = NULL; |
| 209 | last = NULL; |
| 210 | |
| 211 | queue = &cqinfo->queue[i]; |
| 212 | INIT_LIST_HEAD(&queue->chead); |
| 213 | do { |
| 214 | curr = kzalloc(sizeof(*curr), GFP_KERNEL); |
| 215 | if (!curr) |
| 216 | goto cmd_qfail; |
| 217 | |
| 218 | c_size = (rem_q_size > qcsize_bytes) ? qcsize_bytes : |
| 219 | rem_q_size; |
| 220 | curr->head = dma_alloc_coherent(&pdev->dev, |
| 221 | c_size + OTX_CPT_NEXT_CHUNK_PTR_SIZE, |
| 222 | &curr->dma_addr, GFP_KERNEL); |
| 223 | if (!curr->head) { |
| 224 | dev_err(&pdev->dev, |
| 225 | "Command Q (%d) chunk (%d) allocation failed\n", |
| 226 | i, queue->num_chunks); |
| 227 | goto free_curr; |
| 228 | } |
| 229 | curr->size = c_size; |
| 230 | |
| 231 | if (queue->num_chunks == 0) { |
| 232 | first = curr; |
| 233 | queue->base = first; |
| 234 | } |
| 235 | list_add_tail(&curr->nextchunk, |
| 236 | &cqinfo->queue[i].chead); |
| 237 | |
| 238 | queue->num_chunks++; |
| 239 | rem_q_size -= c_size; |
| 240 | if (last) |
| 241 | *((u64 *)(&last->head[last->size])) = |
| 242 | (u64)curr->dma_addr; |
| 243 | |
| 244 | last = curr; |
| 245 | } while (rem_q_size); |
| 246 | |
| 247 | /* |
| 248 | * Make the queue circular, tie back last chunk entry to head |
| 249 | */ |
| 250 | curr = first; |
| 251 | *((u64 *)(&last->head[last->size])) = (u64)curr->dma_addr; |
| 252 | queue->qhead = curr; |
| 253 | } |
| 254 | return 0; |
| 255 | free_curr: |
| 256 | kfree(curr); |
| 257 | cmd_qfail: |
| 258 | free_command_queues(cptvf, cqinfo); |
| 259 | return -ENOMEM; |
| 260 | } |
| 261 | |
| 262 | static int init_command_queues(struct otx_cptvf *cptvf, u32 qlen) |
| 263 | { |
| 264 | struct pci_dev *pdev = cptvf->pdev; |
| 265 | int ret; |
| 266 | |
| 267 | /* setup command queues */ |
| 268 | ret = alloc_command_queues(cptvf, &cptvf->cqinfo, qlen); |
| 269 | if (ret) { |
| 270 | dev_err(&pdev->dev, "Failed to allocate command queues (%u)\n", |
| 271 | cptvf->num_queues); |
| 272 | return ret; |
| 273 | } |
| 274 | return ret; |
| 275 | } |
| 276 | |
| 277 | static void cleanup_command_queues(struct otx_cptvf *cptvf) |
| 278 | { |
| 279 | struct pci_dev *pdev = cptvf->pdev; |
| 280 | |
| 281 | if (!cptvf->num_queues) |
| 282 | return; |
| 283 | |
| 284 | dev_dbg(&pdev->dev, "Cleaning VQ command queue (%u)\n", |
| 285 | cptvf->num_queues); |
| 286 | free_command_queues(cptvf, &cptvf->cqinfo); |
| 287 | } |
| 288 | |
| 289 | static void cptvf_sw_cleanup(struct otx_cptvf *cptvf) |
| 290 | { |
| 291 | cleanup_worker_threads(cptvf); |
| 292 | cleanup_pending_queues(cptvf); |
| 293 | cleanup_command_queues(cptvf); |
| 294 | } |
| 295 | |
| 296 | static int cptvf_sw_init(struct otx_cptvf *cptvf, u32 qlen, u32 num_queues) |
| 297 | { |
| 298 | struct pci_dev *pdev = cptvf->pdev; |
| 299 | u32 max_dev_queues = 0; |
| 300 | int ret; |
| 301 | |
| 302 | max_dev_queues = OTX_CPT_NUM_QS_PER_VF; |
| 303 | /* possible cpus */ |
| 304 | num_queues = min_t(u32, num_queues, max_dev_queues); |
| 305 | cptvf->num_queues = num_queues; |
| 306 | |
| 307 | ret = init_command_queues(cptvf, qlen); |
| 308 | if (ret) { |
| 309 | dev_err(&pdev->dev, "Failed to setup command queues (%u)\n", |
| 310 | num_queues); |
| 311 | return ret; |
| 312 | } |
| 313 | |
| 314 | ret = init_pending_queues(cptvf, qlen, num_queues); |
| 315 | if (ret) { |
| 316 | dev_err(&pdev->dev, "Failed to setup pending queues (%u)\n", |
| 317 | num_queues); |
| 318 | goto setup_pqfail; |
| 319 | } |
| 320 | |
| 321 | /* Create worker threads for BH processing */ |
| 322 | ret = init_worker_threads(cptvf); |
| 323 | if (ret) { |
| 324 | dev_err(&pdev->dev, "Failed to setup worker threads\n"); |
| 325 | goto init_work_fail; |
| 326 | } |
| 327 | return 0; |
| 328 | |
| 329 | init_work_fail: |
| 330 | cleanup_worker_threads(cptvf); |
| 331 | cleanup_pending_queues(cptvf); |
| 332 | |
| 333 | setup_pqfail: |
| 334 | cleanup_command_queues(cptvf); |
| 335 | |
| 336 | return ret; |
| 337 | } |
| 338 | |
| 339 | static void cptvf_free_irq_affinity(struct otx_cptvf *cptvf, int vec) |
| 340 | { |
| 341 | irq_set_affinity_hint(pci_irq_vector(cptvf->pdev, vec), NULL); |
| 342 | free_cpumask_var(cptvf->affinity_mask[vec]); |
| 343 | } |
| 344 | |
| 345 | static void cptvf_write_vq_ctl(struct otx_cptvf *cptvf, bool val) |
| 346 | { |
| 347 | union otx_cptx_vqx_ctl vqx_ctl; |
| 348 | |
| 349 | vqx_ctl.u = readq(cptvf->reg_base + OTX_CPT_VQX_CTL(0)); |
| 350 | vqx_ctl.s.ena = val; |
| 351 | writeq(vqx_ctl.u, cptvf->reg_base + OTX_CPT_VQX_CTL(0)); |
| 352 | } |
| 353 | |
| 354 | void otx_cptvf_write_vq_doorbell(struct otx_cptvf *cptvf, u32 val) |
| 355 | { |
| 356 | union otx_cptx_vqx_doorbell vqx_dbell; |
| 357 | |
| 358 | vqx_dbell.u = readq(cptvf->reg_base + OTX_CPT_VQX_DOORBELL(0)); |
| 359 | vqx_dbell.s.dbell_cnt = val * 8; /* Num of Instructions * 8 words */ |
| 360 | writeq(vqx_dbell.u, cptvf->reg_base + OTX_CPT_VQX_DOORBELL(0)); |
| 361 | } |
| 362 | |
| 363 | static void cptvf_write_vq_inprog(struct otx_cptvf *cptvf, u8 val) |
| 364 | { |
| 365 | union otx_cptx_vqx_inprog vqx_inprg; |
| 366 | |
| 367 | vqx_inprg.u = readq(cptvf->reg_base + OTX_CPT_VQX_INPROG(0)); |
| 368 | vqx_inprg.s.inflight = val; |
| 369 | writeq(vqx_inprg.u, cptvf->reg_base + OTX_CPT_VQX_INPROG(0)); |
| 370 | } |
| 371 | |
| 372 | static void cptvf_write_vq_done_numwait(struct otx_cptvf *cptvf, u32 val) |
| 373 | { |
| 374 | union otx_cptx_vqx_done_wait vqx_dwait; |
| 375 | |
| 376 | vqx_dwait.u = readq(cptvf->reg_base + OTX_CPT_VQX_DONE_WAIT(0)); |
| 377 | vqx_dwait.s.num_wait = val; |
| 378 | writeq(vqx_dwait.u, cptvf->reg_base + OTX_CPT_VQX_DONE_WAIT(0)); |
| 379 | } |
| 380 | |
| 381 | static u32 cptvf_read_vq_done_numwait(struct otx_cptvf *cptvf) |
| 382 | { |
| 383 | union otx_cptx_vqx_done_wait vqx_dwait; |
| 384 | |
| 385 | vqx_dwait.u = readq(cptvf->reg_base + OTX_CPT_VQX_DONE_WAIT(0)); |
| 386 | return vqx_dwait.s.num_wait; |
| 387 | } |
| 388 | |
| 389 | static void cptvf_write_vq_done_timewait(struct otx_cptvf *cptvf, u16 time) |
| 390 | { |
| 391 | union otx_cptx_vqx_done_wait vqx_dwait; |
| 392 | |
| 393 | vqx_dwait.u = readq(cptvf->reg_base + OTX_CPT_VQX_DONE_WAIT(0)); |
| 394 | vqx_dwait.s.time_wait = time; |
| 395 | writeq(vqx_dwait.u, cptvf->reg_base + OTX_CPT_VQX_DONE_WAIT(0)); |
| 396 | } |
| 397 | |
| 398 | |
| 399 | static u16 cptvf_read_vq_done_timewait(struct otx_cptvf *cptvf) |
| 400 | { |
| 401 | union otx_cptx_vqx_done_wait vqx_dwait; |
| 402 | |
| 403 | vqx_dwait.u = readq(cptvf->reg_base + OTX_CPT_VQX_DONE_WAIT(0)); |
| 404 | return vqx_dwait.s.time_wait; |
| 405 | } |
| 406 | |
| 407 | static void cptvf_enable_swerr_interrupts(struct otx_cptvf *cptvf) |
| 408 | { |
| 409 | union otx_cptx_vqx_misc_ena_w1s vqx_misc_ena; |
| 410 | |
| 411 | vqx_misc_ena.u = readq(cptvf->reg_base + OTX_CPT_VQX_MISC_ENA_W1S(0)); |
| 412 | /* Enable SWERR interrupts for the requested VF */ |
| 413 | vqx_misc_ena.s.swerr = 1; |
| 414 | writeq(vqx_misc_ena.u, cptvf->reg_base + OTX_CPT_VQX_MISC_ENA_W1S(0)); |
| 415 | } |
| 416 | |
| 417 | static void cptvf_enable_mbox_interrupts(struct otx_cptvf *cptvf) |
| 418 | { |
| 419 | union otx_cptx_vqx_misc_ena_w1s vqx_misc_ena; |
| 420 | |
| 421 | vqx_misc_ena.u = readq(cptvf->reg_base + OTX_CPT_VQX_MISC_ENA_W1S(0)); |
| 422 | /* Enable MBOX interrupt for the requested VF */ |
| 423 | vqx_misc_ena.s.mbox = 1; |
| 424 | writeq(vqx_misc_ena.u, cptvf->reg_base + OTX_CPT_VQX_MISC_ENA_W1S(0)); |
| 425 | } |
| 426 | |
| 427 | static void cptvf_enable_done_interrupts(struct otx_cptvf *cptvf) |
| 428 | { |
| 429 | union otx_cptx_vqx_done_ena_w1s vqx_done_ena; |
| 430 | |
| 431 | vqx_done_ena.u = readq(cptvf->reg_base + OTX_CPT_VQX_DONE_ENA_W1S(0)); |
| 432 | /* Enable DONE interrupt for the requested VF */ |
| 433 | vqx_done_ena.s.done = 1; |
| 434 | writeq(vqx_done_ena.u, cptvf->reg_base + OTX_CPT_VQX_DONE_ENA_W1S(0)); |
| 435 | } |
| 436 | |
| 437 | static void cptvf_clear_dovf_intr(struct otx_cptvf *cptvf) |
| 438 | { |
| 439 | union otx_cptx_vqx_misc_int vqx_misc_int; |
| 440 | |
| 441 | vqx_misc_int.u = readq(cptvf->reg_base + OTX_CPT_VQX_MISC_INT(0)); |
| 442 | /* W1C for the VF */ |
| 443 | vqx_misc_int.s.dovf = 1; |
| 444 | writeq(vqx_misc_int.u, cptvf->reg_base + OTX_CPT_VQX_MISC_INT(0)); |
| 445 | } |
| 446 | |
| 447 | static void cptvf_clear_irde_intr(struct otx_cptvf *cptvf) |
| 448 | { |
| 449 | union otx_cptx_vqx_misc_int vqx_misc_int; |
| 450 | |
| 451 | vqx_misc_int.u = readq(cptvf->reg_base + OTX_CPT_VQX_MISC_INT(0)); |
| 452 | /* W1C for the VF */ |
| 453 | vqx_misc_int.s.irde = 1; |
| 454 | writeq(vqx_misc_int.u, cptvf->reg_base + OTX_CPT_VQX_MISC_INT(0)); |
| 455 | } |
| 456 | |
| 457 | static void cptvf_clear_nwrp_intr(struct otx_cptvf *cptvf) |
| 458 | { |
| 459 | union otx_cptx_vqx_misc_int vqx_misc_int; |
| 460 | |
| 461 | vqx_misc_int.u = readq(cptvf->reg_base + OTX_CPT_VQX_MISC_INT(0)); |
| 462 | /* W1C for the VF */ |
| 463 | vqx_misc_int.s.nwrp = 1; |
| 464 | writeq(vqx_misc_int.u, cptvf->reg_base + OTX_CPT_VQX_MISC_INT(0)); |
| 465 | } |
| 466 | |
| 467 | static void cptvf_clear_mbox_intr(struct otx_cptvf *cptvf) |
| 468 | { |
| 469 | union otx_cptx_vqx_misc_int vqx_misc_int; |
| 470 | |
| 471 | vqx_misc_int.u = readq(cptvf->reg_base + OTX_CPT_VQX_MISC_INT(0)); |
| 472 | /* W1C for the VF */ |
| 473 | vqx_misc_int.s.mbox = 1; |
| 474 | writeq(vqx_misc_int.u, cptvf->reg_base + OTX_CPT_VQX_MISC_INT(0)); |
| 475 | } |
| 476 | |
| 477 | static void cptvf_clear_swerr_intr(struct otx_cptvf *cptvf) |
| 478 | { |
| 479 | union otx_cptx_vqx_misc_int vqx_misc_int; |
| 480 | |
| 481 | vqx_misc_int.u = readq(cptvf->reg_base + OTX_CPT_VQX_MISC_INT(0)); |
| 482 | /* W1C for the VF */ |
| 483 | vqx_misc_int.s.swerr = 1; |
| 484 | writeq(vqx_misc_int.u, cptvf->reg_base + OTX_CPT_VQX_MISC_INT(0)); |
| 485 | } |
| 486 | |
| 487 | static u64 cptvf_read_vf_misc_intr_status(struct otx_cptvf *cptvf) |
| 488 | { |
| 489 | return readq(cptvf->reg_base + OTX_CPT_VQX_MISC_INT(0)); |
| 490 | } |
| 491 | |
| 492 | static irqreturn_t cptvf_misc_intr_handler(int __always_unused irq, |
| 493 | void *arg) |
| 494 | { |
| 495 | struct otx_cptvf *cptvf = arg; |
| 496 | struct pci_dev *pdev = cptvf->pdev; |
| 497 | u64 intr; |
| 498 | |
| 499 | intr = cptvf_read_vf_misc_intr_status(cptvf); |
| 500 | /* Check for MISC interrupt types */ |
| 501 | if (likely(intr & OTX_CPT_VF_INTR_MBOX_MASK)) { |
| 502 | dev_dbg(&pdev->dev, "Mailbox interrupt 0x%llx on CPT VF %d\n", |
| 503 | intr, cptvf->vfid); |
| 504 | otx_cptvf_handle_mbox_intr(cptvf); |
| 505 | cptvf_clear_mbox_intr(cptvf); |
| 506 | } else if (unlikely(intr & OTX_CPT_VF_INTR_DOVF_MASK)) { |
| 507 | cptvf_clear_dovf_intr(cptvf); |
| 508 | /* Clear doorbell count */ |
| 509 | otx_cptvf_write_vq_doorbell(cptvf, 0); |
| 510 | dev_err(&pdev->dev, |
| 511 | "Doorbell overflow error interrupt 0x%llx on CPT VF %d\n", |
| 512 | intr, cptvf->vfid); |
| 513 | } else if (unlikely(intr & OTX_CPT_VF_INTR_IRDE_MASK)) { |
| 514 | cptvf_clear_irde_intr(cptvf); |
| 515 | dev_err(&pdev->dev, |
| 516 | "Instruction NCB read error interrupt 0x%llx on CPT VF %d\n", |
| 517 | intr, cptvf->vfid); |
| 518 | } else if (unlikely(intr & OTX_CPT_VF_INTR_NWRP_MASK)) { |
| 519 | cptvf_clear_nwrp_intr(cptvf); |
| 520 | dev_err(&pdev->dev, |
| 521 | "NCB response write error interrupt 0x%llx on CPT VF %d\n", |
| 522 | intr, cptvf->vfid); |
| 523 | } else if (unlikely(intr & OTX_CPT_VF_INTR_SERR_MASK)) { |
| 524 | cptvf_clear_swerr_intr(cptvf); |
| 525 | dev_err(&pdev->dev, |
| 526 | "Software error interrupt 0x%llx on CPT VF %d\n", |
| 527 | intr, cptvf->vfid); |
| 528 | } else { |
| 529 | dev_err(&pdev->dev, "Unhandled interrupt in OTX_CPT VF %d\n", |
| 530 | cptvf->vfid); |
| 531 | } |
| 532 | |
| 533 | return IRQ_HANDLED; |
| 534 | } |
| 535 | |
| 536 | static inline struct otx_cptvf_wqe *get_cptvf_vq_wqe(struct otx_cptvf *cptvf, |
| 537 | int qno) |
| 538 | { |
| 539 | struct otx_cptvf_wqe_info *nwqe_info; |
| 540 | |
| 541 | if (unlikely(qno >= cptvf->num_queues)) |
| 542 | return NULL; |
| 543 | nwqe_info = (struct otx_cptvf_wqe_info *)cptvf->wqe_info; |
| 544 | |
| 545 | return &nwqe_info->vq_wqe[qno]; |
| 546 | } |
| 547 | |
| 548 | static inline u32 cptvf_read_vq_done_count(struct otx_cptvf *cptvf) |
| 549 | { |
| 550 | union otx_cptx_vqx_done vqx_done; |
| 551 | |
| 552 | vqx_done.u = readq(cptvf->reg_base + OTX_CPT_VQX_DONE(0)); |
| 553 | return vqx_done.s.done; |
| 554 | } |
| 555 | |
| 556 | static inline void cptvf_write_vq_done_ack(struct otx_cptvf *cptvf, |
| 557 | u32 ackcnt) |
| 558 | { |
| 559 | union otx_cptx_vqx_done_ack vqx_dack_cnt; |
| 560 | |
| 561 | vqx_dack_cnt.u = readq(cptvf->reg_base + OTX_CPT_VQX_DONE_ACK(0)); |
| 562 | vqx_dack_cnt.s.done_ack = ackcnt; |
| 563 | writeq(vqx_dack_cnt.u, cptvf->reg_base + OTX_CPT_VQX_DONE_ACK(0)); |
| 564 | } |
| 565 | |
| 566 | static irqreturn_t cptvf_done_intr_handler(int __always_unused irq, |
| 567 | void *cptvf_dev) |
| 568 | { |
| 569 | struct otx_cptvf *cptvf = (struct otx_cptvf *)cptvf_dev; |
| 570 | struct pci_dev *pdev = cptvf->pdev; |
| 571 | /* Read the number of completions */ |
| 572 | u32 intr = cptvf_read_vq_done_count(cptvf); |
| 573 | |
| 574 | if (intr) { |
| 575 | struct otx_cptvf_wqe *wqe; |
| 576 | |
| 577 | /* |
| 578 | * Acknowledge the number of scheduled completions for |
| 579 | * processing |
| 580 | */ |
| 581 | cptvf_write_vq_done_ack(cptvf, intr); |
| 582 | wqe = get_cptvf_vq_wqe(cptvf, 0); |
| 583 | if (unlikely(!wqe)) { |
Christophe JAILLET | 0a8f598 | 2020-04-11 14:06:33 +0200 | [diff] [blame] | 584 | dev_err(&pdev->dev, "No work to schedule for VF (%d)\n", |
SrujanaChalla | 10b4f09 | 2020-03-13 17:17:07 +0530 | [diff] [blame] | 585 | cptvf->vfid); |
| 586 | return IRQ_NONE; |
| 587 | } |
| 588 | tasklet_hi_schedule(&wqe->twork); |
| 589 | } |
| 590 | |
| 591 | return IRQ_HANDLED; |
| 592 | } |
| 593 | |
| 594 | static void cptvf_set_irq_affinity(struct otx_cptvf *cptvf, int vec) |
| 595 | { |
| 596 | struct pci_dev *pdev = cptvf->pdev; |
| 597 | int cpu; |
| 598 | |
| 599 | if (!zalloc_cpumask_var(&cptvf->affinity_mask[vec], |
| 600 | GFP_KERNEL)) { |
| 601 | dev_err(&pdev->dev, |
Christophe JAILLET | 0a8f598 | 2020-04-11 14:06:33 +0200 | [diff] [blame] | 602 | "Allocation failed for affinity_mask for VF %d\n", |
SrujanaChalla | 10b4f09 | 2020-03-13 17:17:07 +0530 | [diff] [blame] | 603 | cptvf->vfid); |
| 604 | return; |
| 605 | } |
| 606 | |
| 607 | cpu = cptvf->vfid % num_online_cpus(); |
| 608 | cpumask_set_cpu(cpumask_local_spread(cpu, cptvf->node), |
| 609 | cptvf->affinity_mask[vec]); |
| 610 | irq_set_affinity_hint(pci_irq_vector(pdev, vec), |
| 611 | cptvf->affinity_mask[vec]); |
| 612 | } |
| 613 | |
| 614 | static void cptvf_write_vq_saddr(struct otx_cptvf *cptvf, u64 val) |
| 615 | { |
| 616 | union otx_cptx_vqx_saddr vqx_saddr; |
| 617 | |
| 618 | vqx_saddr.u = val; |
| 619 | writeq(vqx_saddr.u, cptvf->reg_base + OTX_CPT_VQX_SADDR(0)); |
| 620 | } |
| 621 | |
| 622 | static void cptvf_device_init(struct otx_cptvf *cptvf) |
| 623 | { |
| 624 | u64 base_addr = 0; |
| 625 | |
| 626 | /* Disable the VQ */ |
| 627 | cptvf_write_vq_ctl(cptvf, 0); |
| 628 | /* Reset the doorbell */ |
| 629 | otx_cptvf_write_vq_doorbell(cptvf, 0); |
| 630 | /* Clear inflight */ |
| 631 | cptvf_write_vq_inprog(cptvf, 0); |
| 632 | /* Write VQ SADDR */ |
| 633 | base_addr = (u64)(cptvf->cqinfo.queue[0].qhead->dma_addr); |
| 634 | cptvf_write_vq_saddr(cptvf, base_addr); |
| 635 | /* Configure timerhold / coalescence */ |
| 636 | cptvf_write_vq_done_timewait(cptvf, OTX_CPT_TIMER_HOLD); |
| 637 | cptvf_write_vq_done_numwait(cptvf, OTX_CPT_COUNT_HOLD); |
| 638 | /* Enable the VQ */ |
| 639 | cptvf_write_vq_ctl(cptvf, 1); |
| 640 | /* Flag the VF ready */ |
| 641 | cptvf->flags |= OTX_CPT_FLAG_DEVICE_READY; |
| 642 | } |
| 643 | |
| 644 | static ssize_t vf_type_show(struct device *dev, |
| 645 | struct device_attribute *attr, |
| 646 | char *buf) |
| 647 | { |
| 648 | struct otx_cptvf *cptvf = dev_get_drvdata(dev); |
| 649 | char *msg; |
| 650 | |
| 651 | switch (cptvf->vftype) { |
| 652 | case OTX_CPT_AE_TYPES: |
| 653 | msg = "AE"; |
| 654 | break; |
| 655 | |
| 656 | case OTX_CPT_SE_TYPES: |
| 657 | msg = "SE"; |
| 658 | break; |
| 659 | |
| 660 | default: |
| 661 | msg = "Invalid"; |
| 662 | } |
| 663 | |
ye xingchen | edfc7e7 | 2022-09-23 01:29:52 +0000 | [diff] [blame] | 664 | return sysfs_emit(buf, "%s\n", msg); |
SrujanaChalla | 10b4f09 | 2020-03-13 17:17:07 +0530 | [diff] [blame] | 665 | } |
| 666 | |
| 667 | static ssize_t vf_engine_group_show(struct device *dev, |
| 668 | struct device_attribute *attr, |
| 669 | char *buf) |
| 670 | { |
| 671 | struct otx_cptvf *cptvf = dev_get_drvdata(dev); |
| 672 | |
ye xingchen | edfc7e7 | 2022-09-23 01:29:52 +0000 | [diff] [blame] | 673 | return sysfs_emit(buf, "%d\n", cptvf->vfgrp); |
SrujanaChalla | 10b4f09 | 2020-03-13 17:17:07 +0530 | [diff] [blame] | 674 | } |
| 675 | |
| 676 | static ssize_t vf_engine_group_store(struct device *dev, |
| 677 | struct device_attribute *attr, |
| 678 | const char *buf, size_t count) |
| 679 | { |
| 680 | struct otx_cptvf *cptvf = dev_get_drvdata(dev); |
| 681 | int val, ret; |
| 682 | |
| 683 | ret = kstrtoint(buf, 10, &val); |
| 684 | if (ret) |
| 685 | return ret; |
| 686 | |
| 687 | if (val < 0) |
| 688 | return -EINVAL; |
| 689 | |
| 690 | if (val >= OTX_CPT_MAX_ENGINE_GROUPS) { |
Christophe JAILLET | 0a8f598 | 2020-04-11 14:06:33 +0200 | [diff] [blame] | 691 | dev_err(dev, "Engine group >= than max available groups %d\n", |
SrujanaChalla | 10b4f09 | 2020-03-13 17:17:07 +0530 | [diff] [blame] | 692 | OTX_CPT_MAX_ENGINE_GROUPS); |
| 693 | return -EINVAL; |
| 694 | } |
| 695 | |
| 696 | ret = otx_cptvf_send_vf_to_grp_msg(cptvf, val); |
| 697 | if (ret) |
| 698 | return ret; |
| 699 | |
| 700 | return count; |
| 701 | } |
| 702 | |
| 703 | static ssize_t vf_coalesc_time_wait_show(struct device *dev, |
| 704 | struct device_attribute *attr, |
| 705 | char *buf) |
| 706 | { |
| 707 | struct otx_cptvf *cptvf = dev_get_drvdata(dev); |
| 708 | |
ye xingchen | edfc7e7 | 2022-09-23 01:29:52 +0000 | [diff] [blame] | 709 | return sysfs_emit(buf, "%d\n", |
SrujanaChalla | 10b4f09 | 2020-03-13 17:17:07 +0530 | [diff] [blame] | 710 | cptvf_read_vq_done_timewait(cptvf)); |
| 711 | } |
| 712 | |
| 713 | static ssize_t vf_coalesc_num_wait_show(struct device *dev, |
| 714 | struct device_attribute *attr, |
| 715 | char *buf) |
| 716 | { |
| 717 | struct otx_cptvf *cptvf = dev_get_drvdata(dev); |
| 718 | |
ye xingchen | edfc7e7 | 2022-09-23 01:29:52 +0000 | [diff] [blame] | 719 | return sysfs_emit(buf, "%d\n", |
SrujanaChalla | 10b4f09 | 2020-03-13 17:17:07 +0530 | [diff] [blame] | 720 | cptvf_read_vq_done_numwait(cptvf)); |
| 721 | } |
| 722 | |
| 723 | static ssize_t vf_coalesc_time_wait_store(struct device *dev, |
| 724 | struct device_attribute *attr, |
| 725 | const char *buf, size_t count) |
| 726 | { |
| 727 | struct otx_cptvf *cptvf = dev_get_drvdata(dev); |
| 728 | long val; |
| 729 | int ret; |
| 730 | |
| 731 | ret = kstrtol(buf, 10, &val); |
| 732 | if (ret != 0) |
| 733 | return ret; |
| 734 | |
| 735 | if (val < OTX_CPT_COALESC_MIN_TIME_WAIT || |
| 736 | val > OTX_CPT_COALESC_MAX_TIME_WAIT) |
| 737 | return -EINVAL; |
| 738 | |
| 739 | cptvf_write_vq_done_timewait(cptvf, val); |
| 740 | return count; |
| 741 | } |
| 742 | |
| 743 | static ssize_t vf_coalesc_num_wait_store(struct device *dev, |
| 744 | struct device_attribute *attr, |
| 745 | const char *buf, size_t count) |
| 746 | { |
| 747 | struct otx_cptvf *cptvf = dev_get_drvdata(dev); |
| 748 | long val; |
| 749 | int ret; |
| 750 | |
| 751 | ret = kstrtol(buf, 10, &val); |
| 752 | if (ret != 0) |
| 753 | return ret; |
| 754 | |
| 755 | if (val < OTX_CPT_COALESC_MIN_NUM_WAIT || |
| 756 | val > OTX_CPT_COALESC_MAX_NUM_WAIT) |
| 757 | return -EINVAL; |
| 758 | |
| 759 | cptvf_write_vq_done_numwait(cptvf, val); |
| 760 | return count; |
| 761 | } |
| 762 | |
| 763 | static DEVICE_ATTR_RO(vf_type); |
| 764 | static DEVICE_ATTR_RW(vf_engine_group); |
| 765 | static DEVICE_ATTR_RW(vf_coalesc_time_wait); |
| 766 | static DEVICE_ATTR_RW(vf_coalesc_num_wait); |
| 767 | |
| 768 | static struct attribute *otx_cptvf_attrs[] = { |
| 769 | &dev_attr_vf_type.attr, |
| 770 | &dev_attr_vf_engine_group.attr, |
| 771 | &dev_attr_vf_coalesc_time_wait.attr, |
| 772 | &dev_attr_vf_coalesc_num_wait.attr, |
| 773 | NULL |
| 774 | }; |
| 775 | |
| 776 | static const struct attribute_group otx_cptvf_sysfs_group = { |
| 777 | .attrs = otx_cptvf_attrs, |
| 778 | }; |
| 779 | |
| 780 | static int otx_cptvf_probe(struct pci_dev *pdev, |
| 781 | const struct pci_device_id *ent) |
| 782 | { |
| 783 | struct device *dev = &pdev->dev; |
| 784 | struct otx_cptvf *cptvf; |
| 785 | int err; |
| 786 | |
| 787 | cptvf = devm_kzalloc(dev, sizeof(*cptvf), GFP_KERNEL); |
| 788 | if (!cptvf) |
| 789 | return -ENOMEM; |
| 790 | |
| 791 | pci_set_drvdata(pdev, cptvf); |
| 792 | cptvf->pdev = pdev; |
| 793 | |
| 794 | err = pci_enable_device(pdev); |
| 795 | if (err) { |
| 796 | dev_err(dev, "Failed to enable PCI device\n"); |
| 797 | goto clear_drvdata; |
| 798 | } |
| 799 | err = pci_request_regions(pdev, DRV_NAME); |
| 800 | if (err) { |
| 801 | dev_err(dev, "PCI request regions failed 0x%x\n", err); |
| 802 | goto disable_device; |
| 803 | } |
Christophe JAILLET | 7f6c383 | 2020-11-21 08:49:16 +0100 | [diff] [blame] | 804 | err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48)); |
SrujanaChalla | 10b4f09 | 2020-03-13 17:17:07 +0530 | [diff] [blame] | 805 | if (err) { |
Christophe JAILLET | 7f6c383 | 2020-11-21 08:49:16 +0100 | [diff] [blame] | 806 | dev_err(dev, "Unable to get usable 48-bit DMA configuration\n"); |
SrujanaChalla | 10b4f09 | 2020-03-13 17:17:07 +0530 | [diff] [blame] | 807 | goto release_regions; |
| 808 | } |
| 809 | |
| 810 | /* MAP PF's configuration registers */ |
| 811 | cptvf->reg_base = pci_iomap(pdev, OTX_CPT_VF_PCI_CFG_BAR, 0); |
| 812 | if (!cptvf->reg_base) { |
| 813 | dev_err(dev, "Cannot map config register space, aborting\n"); |
| 814 | err = -ENOMEM; |
| 815 | goto release_regions; |
| 816 | } |
| 817 | |
| 818 | cptvf->node = dev_to_node(&pdev->dev); |
| 819 | err = pci_alloc_irq_vectors(pdev, OTX_CPT_VF_MSIX_VECTORS, |
| 820 | OTX_CPT_VF_MSIX_VECTORS, PCI_IRQ_MSIX); |
| 821 | if (err < 0) { |
| 822 | dev_err(dev, "Request for #%d msix vectors failed\n", |
| 823 | OTX_CPT_VF_MSIX_VECTORS); |
| 824 | goto unmap_region; |
| 825 | } |
| 826 | |
| 827 | err = request_irq(pci_irq_vector(pdev, CPT_VF_INT_VEC_E_MISC), |
| 828 | cptvf_misc_intr_handler, 0, "CPT VF misc intr", |
| 829 | cptvf); |
| 830 | if (err) { |
Christophe JAILLET | 0a8f598 | 2020-04-11 14:06:33 +0200 | [diff] [blame] | 831 | dev_err(dev, "Failed to request misc irq\n"); |
SrujanaChalla | 10b4f09 | 2020-03-13 17:17:07 +0530 | [diff] [blame] | 832 | goto free_vectors; |
| 833 | } |
| 834 | |
| 835 | /* Enable mailbox interrupt */ |
| 836 | cptvf_enable_mbox_interrupts(cptvf); |
| 837 | cptvf_enable_swerr_interrupts(cptvf); |
| 838 | |
| 839 | /* Check cpt pf status, gets chip ID / device Id from PF if ready */ |
| 840 | err = otx_cptvf_check_pf_ready(cptvf); |
| 841 | if (err) |
| 842 | goto free_misc_irq; |
| 843 | |
| 844 | /* CPT VF software resources initialization */ |
| 845 | cptvf->cqinfo.qchunksize = OTX_CPT_CMD_QCHUNK_SIZE; |
| 846 | err = cptvf_sw_init(cptvf, OTX_CPT_CMD_QLEN, OTX_CPT_NUM_QS_PER_VF); |
| 847 | if (err) { |
Christophe JAILLET | 0a8f598 | 2020-04-11 14:06:33 +0200 | [diff] [blame] | 848 | dev_err(dev, "cptvf_sw_init() failed\n"); |
SrujanaChalla | 10b4f09 | 2020-03-13 17:17:07 +0530 | [diff] [blame] | 849 | goto free_misc_irq; |
| 850 | } |
| 851 | /* Convey VQ LEN to PF */ |
| 852 | err = otx_cptvf_send_vq_size_msg(cptvf); |
| 853 | if (err) |
| 854 | goto sw_cleanup; |
| 855 | |
| 856 | /* CPT VF device initialization */ |
| 857 | cptvf_device_init(cptvf); |
| 858 | /* Send msg to PF to assign currnet Q to required group */ |
| 859 | err = otx_cptvf_send_vf_to_grp_msg(cptvf, cptvf->vfgrp); |
| 860 | if (err) |
| 861 | goto sw_cleanup; |
| 862 | |
| 863 | cptvf->priority = 1; |
| 864 | err = otx_cptvf_send_vf_priority_msg(cptvf); |
| 865 | if (err) |
| 866 | goto sw_cleanup; |
| 867 | |
| 868 | err = request_irq(pci_irq_vector(pdev, CPT_VF_INT_VEC_E_DONE), |
| 869 | cptvf_done_intr_handler, 0, "CPT VF done intr", |
| 870 | cptvf); |
| 871 | if (err) { |
| 872 | dev_err(dev, "Failed to request done irq\n"); |
| 873 | goto free_done_irq; |
| 874 | } |
| 875 | |
| 876 | /* Enable done interrupt */ |
| 877 | cptvf_enable_done_interrupts(cptvf); |
| 878 | |
| 879 | /* Set irq affinity masks */ |
| 880 | cptvf_set_irq_affinity(cptvf, CPT_VF_INT_VEC_E_MISC); |
| 881 | cptvf_set_irq_affinity(cptvf, CPT_VF_INT_VEC_E_DONE); |
| 882 | |
| 883 | err = otx_cptvf_send_vf_up(cptvf); |
| 884 | if (err) |
| 885 | goto free_irq_affinity; |
| 886 | |
| 887 | /* Initialize algorithms and set ops */ |
| 888 | err = otx_cpt_crypto_init(pdev, THIS_MODULE, |
| 889 | cptvf->vftype == OTX_CPT_SE_TYPES ? OTX_CPT_SE : OTX_CPT_AE, |
| 890 | cptvf->vftype, 1, cptvf->num_vfs); |
| 891 | if (err) { |
| 892 | dev_err(dev, "Failed to register crypto algs\n"); |
| 893 | goto free_irq_affinity; |
| 894 | } |
| 895 | |
| 896 | err = sysfs_create_group(&dev->kobj, &otx_cptvf_sysfs_group); |
| 897 | if (err) { |
| 898 | dev_err(dev, "Creating sysfs entries failed\n"); |
| 899 | goto crypto_exit; |
| 900 | } |
| 901 | |
| 902 | return 0; |
| 903 | |
| 904 | crypto_exit: |
| 905 | otx_cpt_crypto_exit(pdev, THIS_MODULE, cptvf->vftype); |
| 906 | free_irq_affinity: |
| 907 | cptvf_free_irq_affinity(cptvf, CPT_VF_INT_VEC_E_DONE); |
| 908 | cptvf_free_irq_affinity(cptvf, CPT_VF_INT_VEC_E_MISC); |
| 909 | free_done_irq: |
| 910 | free_irq(pci_irq_vector(pdev, CPT_VF_INT_VEC_E_DONE), cptvf); |
| 911 | sw_cleanup: |
| 912 | cptvf_sw_cleanup(cptvf); |
| 913 | free_misc_irq: |
| 914 | free_irq(pci_irq_vector(pdev, CPT_VF_INT_VEC_E_MISC), cptvf); |
| 915 | free_vectors: |
| 916 | pci_free_irq_vectors(cptvf->pdev); |
| 917 | unmap_region: |
| 918 | pci_iounmap(pdev, cptvf->reg_base); |
| 919 | release_regions: |
| 920 | pci_release_regions(pdev); |
| 921 | disable_device: |
| 922 | pci_disable_device(pdev); |
| 923 | clear_drvdata: |
| 924 | pci_set_drvdata(pdev, NULL); |
| 925 | |
| 926 | return err; |
| 927 | } |
| 928 | |
| 929 | static void otx_cptvf_remove(struct pci_dev *pdev) |
| 930 | { |
| 931 | struct otx_cptvf *cptvf = pci_get_drvdata(pdev); |
| 932 | |
| 933 | if (!cptvf) { |
| 934 | dev_err(&pdev->dev, "Invalid CPT-VF device\n"); |
| 935 | return; |
| 936 | } |
| 937 | |
| 938 | /* Convey DOWN to PF */ |
| 939 | if (otx_cptvf_send_vf_down(cptvf)) { |
Christophe JAILLET | 0a8f598 | 2020-04-11 14:06:33 +0200 | [diff] [blame] | 940 | dev_err(&pdev->dev, "PF not responding to DOWN msg\n"); |
SrujanaChalla | 10b4f09 | 2020-03-13 17:17:07 +0530 | [diff] [blame] | 941 | } else { |
| 942 | sysfs_remove_group(&pdev->dev.kobj, &otx_cptvf_sysfs_group); |
| 943 | otx_cpt_crypto_exit(pdev, THIS_MODULE, cptvf->vftype); |
| 944 | cptvf_free_irq_affinity(cptvf, CPT_VF_INT_VEC_E_DONE); |
| 945 | cptvf_free_irq_affinity(cptvf, CPT_VF_INT_VEC_E_MISC); |
| 946 | free_irq(pci_irq_vector(pdev, CPT_VF_INT_VEC_E_DONE), cptvf); |
| 947 | free_irq(pci_irq_vector(pdev, CPT_VF_INT_VEC_E_MISC), cptvf); |
| 948 | cptvf_sw_cleanup(cptvf); |
| 949 | pci_free_irq_vectors(cptvf->pdev); |
| 950 | pci_iounmap(pdev, cptvf->reg_base); |
| 951 | pci_release_regions(pdev); |
| 952 | pci_disable_device(pdev); |
| 953 | pci_set_drvdata(pdev, NULL); |
| 954 | } |
| 955 | } |
| 956 | |
| 957 | /* Supported devices */ |
| 958 | static const struct pci_device_id otx_cptvf_id_table[] = { |
| 959 | {PCI_VDEVICE(CAVIUM, OTX_CPT_PCI_VF_DEVICE_ID), 0}, |
| 960 | { 0, } /* end of table */ |
| 961 | }; |
| 962 | |
| 963 | static struct pci_driver otx_cptvf_pci_driver = { |
| 964 | .name = DRV_NAME, |
| 965 | .id_table = otx_cptvf_id_table, |
| 966 | .probe = otx_cptvf_probe, |
| 967 | .remove = otx_cptvf_remove, |
| 968 | }; |
| 969 | |
| 970 | module_pci_driver(otx_cptvf_pci_driver); |
| 971 | |
| 972 | MODULE_AUTHOR("Marvell International Ltd."); |
| 973 | MODULE_DESCRIPTION("Marvell OcteonTX CPT Virtual Function Driver"); |
| 974 | MODULE_LICENSE("GPL v2"); |
| 975 | MODULE_VERSION(DRV_VERSION); |
| 976 | MODULE_DEVICE_TABLE(pci, otx_cptvf_id_table); |