blob: d0222f13d154808f3cfa4ed3cab26cd1a7b899ee [file] [log] [blame]
Jon Masonfce8a7b2012-11-16 19:27:12 -07001/*
2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
4 *
5 * GPL LICENSE SUMMARY
6 *
7 * Copyright(c) 2012 Intel Corporation. All rights reserved.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * BSD LICENSE
14 *
15 * Copyright(c) 2012 Intel Corporation. All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions
19 * are met:
20 *
21 * * Redistributions of source code must retain the above copyright
22 * notice, this list of conditions and the following disclaimer.
23 * * Redistributions in binary form must reproduce the above copy
24 * notice, this list of conditions and the following disclaimer in
25 * the documentation and/or other materials provided with the
26 * distribution.
27 * * Neither the name of Intel Corporation nor the names of its
28 * contributors may be used to endorse or promote products derived
29 * from this software without specific prior written permission.
30 *
31 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
32 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
33 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
34 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
35 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
36 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
37 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
38 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
39 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
40 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
41 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
42 *
43 * Intel PCIe NTB Linux driver
44 *
45 * Contact Information:
46 * Jon Mason <jon.mason@intel.com>
47 */
48#include <linux/debugfs.h>
49#include <linux/delay.h>
Jon Mason282a2fe2013-02-12 09:52:50 -070050#include <linux/dmaengine.h>
Jon Masonfce8a7b2012-11-16 19:27:12 -070051#include <linux/dma-mapping.h>
52#include <linux/errno.h>
53#include <linux/export.h>
54#include <linux/interrupt.h>
55#include <linux/module.h>
56#include <linux/pci.h>
57#include <linux/slab.h>
58#include <linux/types.h>
59#include <linux/ntb.h>
60#include "ntb_hw.h"
61
Jon Mason113fc502013-01-30 11:40:52 -070062#define NTB_TRANSPORT_VERSION 3
Jon Masonfce8a7b2012-11-16 19:27:12 -070063
Jon Masonef114ed2013-01-19 02:02:18 -070064static unsigned int transport_mtu = 0x401E;
Jon Masonfce8a7b2012-11-16 19:27:12 -070065module_param(transport_mtu, uint, 0644);
66MODULE_PARM_DESC(transport_mtu, "Maximum size of NTB transport packets");
67
Jon Mason948d3a62013-04-18 17:07:36 -070068static unsigned char max_num_clients;
Jon Masonfce8a7b2012-11-16 19:27:12 -070069module_param(max_num_clients, byte, 0644);
70MODULE_PARM_DESC(max_num_clients, "Maximum number of NTB transport clients");
71
Jon Mason282a2fe2013-02-12 09:52:50 -070072static unsigned int copy_bytes = 1024;
73module_param(copy_bytes, uint, 0644);
74MODULE_PARM_DESC(copy_bytes, "Threshold under which NTB will use the CPU to copy instead of DMA");
75
Jon Masonfce8a7b2012-11-16 19:27:12 -070076struct ntb_queue_entry {
77 /* ntb_queue list reference */
78 struct list_head entry;
79 /* pointers to data to be transfered */
80 void *cb_data;
81 void *buf;
82 unsigned int len;
83 unsigned int flags;
Jon Mason282a2fe2013-02-12 09:52:50 -070084
85 struct ntb_transport_qp *qp;
86 union {
87 struct ntb_payload_header __iomem *tx_hdr;
88 struct ntb_payload_header *rx_hdr;
89 };
90 unsigned int index;
Jon Masonfce8a7b2012-11-16 19:27:12 -070091};
92
Jon Mason793c20e2013-01-19 02:02:26 -070093struct ntb_rx_info {
94 unsigned int entry;
95};
96
Jon Masonfce8a7b2012-11-16 19:27:12 -070097struct ntb_transport_qp {
98 struct ntb_transport *transport;
99 struct ntb_device *ndev;
100 void *cb_data;
Jon Mason282a2fe2013-02-12 09:52:50 -0700101 struct dma_chan *dma_chan;
Jon Masonfce8a7b2012-11-16 19:27:12 -0700102
103 bool client_ready;
104 bool qp_link;
105 u8 qp_num; /* Only 64 QP's are allowed. 0-63 */
106
Jon Mason74465642013-01-21 15:28:52 -0700107 struct ntb_rx_info __iomem *rx_info;
Jon Mason793c20e2013-01-19 02:02:26 -0700108 struct ntb_rx_info *remote_rx_info;
109
Jon Masonfce8a7b2012-11-16 19:27:12 -0700110 void (*tx_handler) (struct ntb_transport_qp *qp, void *qp_data,
111 void *data, int len);
112 struct list_head tx_free_q;
113 spinlock_t ntb_tx_free_q_lock;
Jon Mason74465642013-01-21 15:28:52 -0700114 void __iomem *tx_mw;
Jon Mason282a2fe2013-02-12 09:52:50 -0700115 dma_addr_t tx_mw_phys;
Jon Mason793c20e2013-01-19 02:02:26 -0700116 unsigned int tx_index;
117 unsigned int tx_max_entry;
Jon Masonef114ed2013-01-19 02:02:18 -0700118 unsigned int tx_max_frame;
Jon Masonfce8a7b2012-11-16 19:27:12 -0700119
120 void (*rx_handler) (struct ntb_transport_qp *qp, void *qp_data,
121 void *data, int len);
122 struct tasklet_struct rx_work;
123 struct list_head rx_pend_q;
124 struct list_head rx_free_q;
125 spinlock_t ntb_rx_pend_q_lock;
126 spinlock_t ntb_rx_free_q_lock;
Jon Mason793c20e2013-01-19 02:02:26 -0700127 void *rx_buff;
128 unsigned int rx_index;
129 unsigned int rx_max_entry;
Jon Masonef114ed2013-01-19 02:02:18 -0700130 unsigned int rx_max_frame;
Jon Mason282a2fe2013-02-12 09:52:50 -0700131 dma_cookie_t last_cookie;
Jon Masonfce8a7b2012-11-16 19:27:12 -0700132
133 void (*event_handler) (void *data, int status);
134 struct delayed_work link_work;
Jon Mason7b4f2d32013-01-19 02:02:19 -0700135 struct work_struct link_cleanup;
Jon Masonfce8a7b2012-11-16 19:27:12 -0700136
137 struct dentry *debugfs_dir;
138 struct dentry *debugfs_stats;
139
140 /* Stats */
141 u64 rx_bytes;
142 u64 rx_pkts;
143 u64 rx_ring_empty;
144 u64 rx_err_no_buf;
145 u64 rx_err_oflow;
146 u64 rx_err_ver;
Jon Mason282a2fe2013-02-12 09:52:50 -0700147 u64 rx_memcpy;
148 u64 rx_async;
Jon Masonfce8a7b2012-11-16 19:27:12 -0700149 u64 tx_bytes;
150 u64 tx_pkts;
151 u64 tx_ring_full;
Jon Mason282a2fe2013-02-12 09:52:50 -0700152 u64 tx_err_no_buf;
153 u64 tx_memcpy;
154 u64 tx_async;
Jon Masonfce8a7b2012-11-16 19:27:12 -0700155};
156
157struct ntb_transport_mw {
158 size_t size;
159 void *virt_addr;
160 dma_addr_t dma_addr;
161};
162
163struct ntb_transport_client_dev {
164 struct list_head entry;
165 struct device dev;
166};
167
168struct ntb_transport {
169 struct list_head entry;
170 struct list_head client_devs;
171
172 struct ntb_device *ndev;
Jon Mason948d3a62013-04-18 17:07:36 -0700173 struct ntb_transport_mw *mw;
Jon Masonfce8a7b2012-11-16 19:27:12 -0700174 struct ntb_transport_qp *qps;
175 unsigned int max_qps;
176 unsigned long qp_bitmap;
177 bool transport_link;
178 struct delayed_work link_work;
Jon Mason7b4f2d32013-01-19 02:02:19 -0700179 struct work_struct link_cleanup;
Jon Masonfce8a7b2012-11-16 19:27:12 -0700180};
181
182enum {
183 DESC_DONE_FLAG = 1 << 0,
184 LINK_DOWN_FLAG = 1 << 1,
185};
186
187struct ntb_payload_header {
Jon Mason74465642013-01-21 15:28:52 -0700188 unsigned int ver;
Jon Masonfce8a7b2012-11-16 19:27:12 -0700189 unsigned int len;
190 unsigned int flags;
191};
192
193enum {
194 VERSION = 0,
Jon Masonfce8a7b2012-11-16 19:27:12 -0700195 QP_LINKS,
Jon Mason113fc502013-01-30 11:40:52 -0700196 NUM_QPS,
197 NUM_MWS,
198 MW0_SZ_HIGH,
199 MW0_SZ_LOW,
200 MW1_SZ_HIGH,
201 MW1_SZ_LOW,
Jon Masonfce8a7b2012-11-16 19:27:12 -0700202 MAX_SPAD,
203};
204
Jon Mason948d3a62013-04-18 17:07:36 -0700205#define QP_TO_MW(ndev, qp) ((qp) % ntb_max_mw(ndev))
Jon Masonfce8a7b2012-11-16 19:27:12 -0700206#define NTB_QP_DEF_NUM_ENTRIES 100
207#define NTB_LINK_DOWN_TIMEOUT 10
208
209static int ntb_match_bus(struct device *dev, struct device_driver *drv)
210{
211 return !strncmp(dev_name(dev), drv->name, strlen(drv->name));
212}
213
214static int ntb_client_probe(struct device *dev)
215{
216 const struct ntb_client *drv = container_of(dev->driver,
217 struct ntb_client, driver);
218 struct pci_dev *pdev = container_of(dev->parent, struct pci_dev, dev);
219 int rc = -EINVAL;
220
221 get_device(dev);
222 if (drv && drv->probe)
223 rc = drv->probe(pdev);
224 if (rc)
225 put_device(dev);
226
227 return rc;
228}
229
230static int ntb_client_remove(struct device *dev)
231{
232 const struct ntb_client *drv = container_of(dev->driver,
233 struct ntb_client, driver);
234 struct pci_dev *pdev = container_of(dev->parent, struct pci_dev, dev);
235
236 if (drv && drv->remove)
237 drv->remove(pdev);
238
239 put_device(dev);
240
241 return 0;
242}
243
Jon Mason170d35a2013-01-19 02:02:23 -0700244static struct bus_type ntb_bus_type = {
Jon Masonfce8a7b2012-11-16 19:27:12 -0700245 .name = "ntb_bus",
246 .match = ntb_match_bus,
247 .probe = ntb_client_probe,
248 .remove = ntb_client_remove,
249};
250
251static LIST_HEAD(ntb_transport_list);
252
Greg Kroah-Hartman78a61ab2013-01-17 19:17:42 -0800253static int ntb_bus_init(struct ntb_transport *nt)
Jon Masonfce8a7b2012-11-16 19:27:12 -0700254{
255 if (list_empty(&ntb_transport_list)) {
256 int rc = bus_register(&ntb_bus_type);
257 if (rc)
258 return rc;
259 }
260
261 list_add(&nt->entry, &ntb_transport_list);
262
263 return 0;
264}
265
Greg Kroah-Hartman78a61ab2013-01-17 19:17:42 -0800266static void ntb_bus_remove(struct ntb_transport *nt)
Jon Masonfce8a7b2012-11-16 19:27:12 -0700267{
268 struct ntb_transport_client_dev *client_dev, *cd;
269
270 list_for_each_entry_safe(client_dev, cd, &nt->client_devs, entry) {
271 dev_err(client_dev->dev.parent, "%s still attached to bus, removing\n",
272 dev_name(&client_dev->dev));
273 list_del(&client_dev->entry);
274 device_unregister(&client_dev->dev);
275 }
276
277 list_del(&nt->entry);
278
279 if (list_empty(&ntb_transport_list))
280 bus_unregister(&ntb_bus_type);
281}
282
283static void ntb_client_release(struct device *dev)
284{
285 struct ntb_transport_client_dev *client_dev;
286 client_dev = container_of(dev, struct ntb_transport_client_dev, dev);
287
288 kfree(client_dev);
289}
290
291/**
292 * ntb_unregister_client_dev - Unregister NTB client device
293 * @device_name: Name of NTB client device
294 *
295 * Unregister an NTB client device with the NTB transport layer
296 */
297void ntb_unregister_client_dev(char *device_name)
298{
299 struct ntb_transport_client_dev *client, *cd;
300 struct ntb_transport *nt;
301
302 list_for_each_entry(nt, &ntb_transport_list, entry)
303 list_for_each_entry_safe(client, cd, &nt->client_devs, entry)
304 if (!strncmp(dev_name(&client->dev), device_name,
305 strlen(device_name))) {
306 list_del(&client->entry);
307 device_unregister(&client->dev);
308 }
309}
310EXPORT_SYMBOL_GPL(ntb_unregister_client_dev);
311
312/**
313 * ntb_register_client_dev - Register NTB client device
314 * @device_name: Name of NTB client device
315 *
316 * Register an NTB client device with the NTB transport layer
317 */
318int ntb_register_client_dev(char *device_name)
319{
320 struct ntb_transport_client_dev *client_dev;
321 struct ntb_transport *nt;
Jon Mason8b19d452013-04-26 14:51:57 -0700322 int rc, i = 0;
Jon Masonfce8a7b2012-11-16 19:27:12 -0700323
Jon Mason8222b402013-01-19 02:02:16 -0700324 if (list_empty(&ntb_transport_list))
325 return -ENODEV;
326
Jon Masonfce8a7b2012-11-16 19:27:12 -0700327 list_for_each_entry(nt, &ntb_transport_list, entry) {
328 struct device *dev;
329
330 client_dev = kzalloc(sizeof(struct ntb_transport_client_dev),
331 GFP_KERNEL);
332 if (!client_dev) {
333 rc = -ENOMEM;
334 goto err;
335 }
336
337 dev = &client_dev->dev;
338
339 /* setup and register client devices */
Jon Mason8b19d452013-04-26 14:51:57 -0700340 dev_set_name(dev, "%s%d", device_name, i);
Jon Masonfce8a7b2012-11-16 19:27:12 -0700341 dev->bus = &ntb_bus_type;
342 dev->release = ntb_client_release;
343 dev->parent = &ntb_query_pdev(nt->ndev)->dev;
344
345 rc = device_register(dev);
346 if (rc) {
347 kfree(client_dev);
348 goto err;
349 }
350
351 list_add_tail(&client_dev->entry, &nt->client_devs);
Jon Mason8b19d452013-04-26 14:51:57 -0700352 i++;
Jon Masonfce8a7b2012-11-16 19:27:12 -0700353 }
354
355 return 0;
356
357err:
358 ntb_unregister_client_dev(device_name);
359
360 return rc;
361}
362EXPORT_SYMBOL_GPL(ntb_register_client_dev);
363
364/**
365 * ntb_register_client - Register NTB client driver
366 * @drv: NTB client driver to be registered
367 *
368 * Register an NTB client driver with the NTB transport layer
369 *
370 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
371 */
372int ntb_register_client(struct ntb_client *drv)
373{
374 drv->driver.bus = &ntb_bus_type;
375
Jon Mason8222b402013-01-19 02:02:16 -0700376 if (list_empty(&ntb_transport_list))
377 return -ENODEV;
378
Jon Masonfce8a7b2012-11-16 19:27:12 -0700379 return driver_register(&drv->driver);
380}
381EXPORT_SYMBOL_GPL(ntb_register_client);
382
383/**
384 * ntb_unregister_client - Unregister NTB client driver
385 * @drv: NTB client driver to be unregistered
386 *
387 * Unregister an NTB client driver with the NTB transport layer
388 *
389 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
390 */
391void ntb_unregister_client(struct ntb_client *drv)
392{
393 driver_unregister(&drv->driver);
394}
395EXPORT_SYMBOL_GPL(ntb_unregister_client);
396
Jon Masonfce8a7b2012-11-16 19:27:12 -0700397static ssize_t debugfs_read(struct file *filp, char __user *ubuf, size_t count,
398 loff_t *offp)
399{
400 struct ntb_transport_qp *qp;
Jon Masond7237e22013-01-19 02:02:25 -0700401 char *buf;
Jon Masonfce8a7b2012-11-16 19:27:12 -0700402 ssize_t ret, out_offset, out_count;
403
Jon Mason282a2fe2013-02-12 09:52:50 -0700404 out_count = 1000;
Jon Masond7237e22013-01-19 02:02:25 -0700405
406 buf = kmalloc(out_count, GFP_KERNEL);
407 if (!buf)
408 return -ENOMEM;
Jon Masonfce8a7b2012-11-16 19:27:12 -0700409
410 qp = filp->private_data;
411 out_offset = 0;
412 out_offset += snprintf(buf + out_offset, out_count - out_offset,
413 "NTB QP stats\n");
414 out_offset += snprintf(buf + out_offset, out_count - out_offset,
415 "rx_bytes - \t%llu\n", qp->rx_bytes);
416 out_offset += snprintf(buf + out_offset, out_count - out_offset,
417 "rx_pkts - \t%llu\n", qp->rx_pkts);
418 out_offset += snprintf(buf + out_offset, out_count - out_offset,
Jon Mason282a2fe2013-02-12 09:52:50 -0700419 "rx_memcpy - \t%llu\n", qp->rx_memcpy);
420 out_offset += snprintf(buf + out_offset, out_count - out_offset,
421 "rx_async - \t%llu\n", qp->rx_async);
422 out_offset += snprintf(buf + out_offset, out_count - out_offset,
Jon Masonfce8a7b2012-11-16 19:27:12 -0700423 "rx_ring_empty - %llu\n", qp->rx_ring_empty);
424 out_offset += snprintf(buf + out_offset, out_count - out_offset,
425 "rx_err_no_buf - %llu\n", qp->rx_err_no_buf);
426 out_offset += snprintf(buf + out_offset, out_count - out_offset,
427 "rx_err_oflow - \t%llu\n", qp->rx_err_oflow);
428 out_offset += snprintf(buf + out_offset, out_count - out_offset,
429 "rx_err_ver - \t%llu\n", qp->rx_err_ver);
430 out_offset += snprintf(buf + out_offset, out_count - out_offset,
Jon Mason793c20e2013-01-19 02:02:26 -0700431 "rx_buff - \t%p\n", qp->rx_buff);
Jon Masonfce8a7b2012-11-16 19:27:12 -0700432 out_offset += snprintf(buf + out_offset, out_count - out_offset,
Jon Mason793c20e2013-01-19 02:02:26 -0700433 "rx_index - \t%u\n", qp->rx_index);
Jon Masonfce8a7b2012-11-16 19:27:12 -0700434 out_offset += snprintf(buf + out_offset, out_count - out_offset,
Jon Mason793c20e2013-01-19 02:02:26 -0700435 "rx_max_entry - \t%u\n", qp->rx_max_entry);
Jon Masonfce8a7b2012-11-16 19:27:12 -0700436
437 out_offset += snprintf(buf + out_offset, out_count - out_offset,
438 "tx_bytes - \t%llu\n", qp->tx_bytes);
439 out_offset += snprintf(buf + out_offset, out_count - out_offset,
440 "tx_pkts - \t%llu\n", qp->tx_pkts);
441 out_offset += snprintf(buf + out_offset, out_count - out_offset,
Jon Mason282a2fe2013-02-12 09:52:50 -0700442 "tx_memcpy - \t%llu\n", qp->tx_memcpy);
443 out_offset += snprintf(buf + out_offset, out_count - out_offset,
444 "tx_async - \t%llu\n", qp->tx_async);
445 out_offset += snprintf(buf + out_offset, out_count - out_offset,
Jon Masonfce8a7b2012-11-16 19:27:12 -0700446 "tx_ring_full - \t%llu\n", qp->tx_ring_full);
447 out_offset += snprintf(buf + out_offset, out_count - out_offset,
Jon Mason282a2fe2013-02-12 09:52:50 -0700448 "tx_err_no_buf - %llu\n", qp->tx_err_no_buf);
449 out_offset += snprintf(buf + out_offset, out_count - out_offset,
Jon Mason793c20e2013-01-19 02:02:26 -0700450 "tx_mw - \t%p\n", qp->tx_mw);
Jon Masonfce8a7b2012-11-16 19:27:12 -0700451 out_offset += snprintf(buf + out_offset, out_count - out_offset,
Jon Mason793c20e2013-01-19 02:02:26 -0700452 "tx_index - \t%u\n", qp->tx_index);
Jon Masonfce8a7b2012-11-16 19:27:12 -0700453 out_offset += snprintf(buf + out_offset, out_count - out_offset,
Jon Mason793c20e2013-01-19 02:02:26 -0700454 "tx_max_entry - \t%u\n", qp->tx_max_entry);
Jon Masonfce8a7b2012-11-16 19:27:12 -0700455
456 out_offset += snprintf(buf + out_offset, out_count - out_offset,
Jon Masond7237e22013-01-19 02:02:25 -0700457 "\nQP Link %s\n", (qp->qp_link == NTB_LINK_UP) ?
Jon Masonfce8a7b2012-11-16 19:27:12 -0700458 "Up" : "Down");
Jon Masond7237e22013-01-19 02:02:25 -0700459 if (out_offset > out_count)
460 out_offset = out_count;
Jon Masonfce8a7b2012-11-16 19:27:12 -0700461
462 ret = simple_read_from_buffer(ubuf, count, offp, buf, out_offset);
Jon Masond7237e22013-01-19 02:02:25 -0700463 kfree(buf);
Jon Masonfce8a7b2012-11-16 19:27:12 -0700464 return ret;
465}
466
467static const struct file_operations ntb_qp_debugfs_stats = {
468 .owner = THIS_MODULE,
Jon Masond66d7ac2013-01-19 02:02:20 -0700469 .open = simple_open,
Jon Masonfce8a7b2012-11-16 19:27:12 -0700470 .read = debugfs_read,
471};
472
473static void ntb_list_add(spinlock_t *lock, struct list_head *entry,
474 struct list_head *list)
475{
476 unsigned long flags;
477
478 spin_lock_irqsave(lock, flags);
479 list_add_tail(entry, list);
480 spin_unlock_irqrestore(lock, flags);
481}
482
483static struct ntb_queue_entry *ntb_list_rm(spinlock_t *lock,
484 struct list_head *list)
485{
486 struct ntb_queue_entry *entry;
487 unsigned long flags;
488
489 spin_lock_irqsave(lock, flags);
490 if (list_empty(list)) {
491 entry = NULL;
492 goto out;
493 }
494 entry = list_first_entry(list, struct ntb_queue_entry, entry);
495 list_del(&entry->entry);
496out:
497 spin_unlock_irqrestore(lock, flags);
498
499 return entry;
500}
501
502static void ntb_transport_setup_qp_mw(struct ntb_transport *nt,
503 unsigned int qp_num)
504{
505 struct ntb_transport_qp *qp = &nt->qps[qp_num];
Jon Masonef114ed2013-01-19 02:02:18 -0700506 unsigned int rx_size, num_qps_mw;
Jon Mason948d3a62013-04-18 17:07:36 -0700507 u8 mw_num, mw_max;
Jon Mason793c20e2013-01-19 02:02:26 -0700508 unsigned int i;
Jon Masonfce8a7b2012-11-16 19:27:12 -0700509
Jon Mason948d3a62013-04-18 17:07:36 -0700510 mw_max = ntb_max_mw(nt->ndev);
511 mw_num = QP_TO_MW(nt->ndev, qp_num);
512
Jon Mason74465642013-01-21 15:28:52 -0700513 WARN_ON(nt->mw[mw_num].virt_addr == NULL);
Jon Masonfce8a7b2012-11-16 19:27:12 -0700514
Jon Mason948d3a62013-04-18 17:07:36 -0700515 if (nt->max_qps % mw_max && mw_num < nt->max_qps % mw_max)
516 num_qps_mw = nt->max_qps / mw_max + 1;
Jon Masonfce8a7b2012-11-16 19:27:12 -0700517 else
Jon Mason948d3a62013-04-18 17:07:36 -0700518 num_qps_mw = nt->max_qps / mw_max;
Jon Masonfce8a7b2012-11-16 19:27:12 -0700519
Jon Mason793c20e2013-01-19 02:02:26 -0700520 rx_size = (unsigned int) nt->mw[mw_num].size / num_qps_mw;
Jon Mason282a2fe2013-02-12 09:52:50 -0700521 qp->rx_buff = nt->mw[mw_num].virt_addr + qp_num / mw_max * rx_size;
Jon Mason793c20e2013-01-19 02:02:26 -0700522 rx_size -= sizeof(struct ntb_rx_info);
523
Jon Mason282a2fe2013-02-12 09:52:50 -0700524 qp->remote_rx_info = qp->rx_buff + rx_size;
525
Jon Masonc9d534c2013-02-01 15:45:16 -0700526 /* Due to housekeeping, there must be atleast 2 buffs */
527 qp->rx_max_frame = min(transport_mtu, rx_size / 2);
Jon Mason793c20e2013-01-19 02:02:26 -0700528 qp->rx_max_entry = rx_size / qp->rx_max_frame;
529 qp->rx_index = 0;
530
Jon Masonc9d534c2013-02-01 15:45:16 -0700531 qp->remote_rx_info->entry = qp->rx_max_entry - 1;
Jon Masonfce8a7b2012-11-16 19:27:12 -0700532
Jon Masonef114ed2013-01-19 02:02:18 -0700533 /* setup the hdr offsets with 0's */
Jon Mason793c20e2013-01-19 02:02:26 -0700534 for (i = 0; i < qp->rx_max_entry; i++) {
535 void *offset = qp->rx_buff + qp->rx_max_frame * (i + 1) -
536 sizeof(struct ntb_payload_header);
Jon Masonef114ed2013-01-19 02:02:18 -0700537 memset(offset, 0, sizeof(struct ntb_payload_header));
Jon Mason793c20e2013-01-19 02:02:26 -0700538 }
Jon Masonfce8a7b2012-11-16 19:27:12 -0700539
540 qp->rx_pkts = 0;
541 qp->tx_pkts = 0;
Jon Mason90f9e932013-02-01 15:34:35 -0700542 qp->tx_index = 0;
Jon Masonfce8a7b2012-11-16 19:27:12 -0700543}
544
Jon Mason113fc502013-01-30 11:40:52 -0700545static void ntb_free_mw(struct ntb_transport *nt, int num_mw)
546{
547 struct ntb_transport_mw *mw = &nt->mw[num_mw];
548 struct pci_dev *pdev = ntb_query_pdev(nt->ndev);
549
550 if (!mw->virt_addr)
551 return;
552
553 dma_free_coherent(&pdev->dev, mw->size, mw->virt_addr, mw->dma_addr);
554 mw->virt_addr = NULL;
555}
556
Jon Masonb77b2632013-02-01 15:25:37 -0700557static int ntb_set_mw(struct ntb_transport *nt, int num_mw, unsigned int size)
558{
559 struct ntb_transport_mw *mw = &nt->mw[num_mw];
560 struct pci_dev *pdev = ntb_query_pdev(nt->ndev);
561
562 /* No need to re-setup */
563 if (mw->size == ALIGN(size, 4096))
564 return 0;
565
566 if (mw->size != 0)
567 ntb_free_mw(nt, num_mw);
568
569 /* Alloc memory for receiving data. Must be 4k aligned */
570 mw->size = ALIGN(size, 4096);
571
572 mw->virt_addr = dma_alloc_coherent(&pdev->dev, mw->size, &mw->dma_addr,
573 GFP_KERNEL);
574 if (!mw->virt_addr) {
575 mw->size = 0;
576 dev_err(&pdev->dev, "Unable to allocate MW buffer of size %d\n",
577 (int) mw->size);
578 return -ENOMEM;
579 }
580
581 /* Notify HW the memory location of the receive buffer */
582 ntb_set_mw_addr(nt->ndev, num_mw, mw->dma_addr);
583
584 return 0;
585}
586
Jon Mason7b4f2d32013-01-19 02:02:19 -0700587static void ntb_qp_link_cleanup(struct work_struct *work)
Jon Masonfce8a7b2012-11-16 19:27:12 -0700588{
Jon Mason7b4f2d32013-01-19 02:02:19 -0700589 struct ntb_transport_qp *qp = container_of(work,
590 struct ntb_transport_qp,
591 link_cleanup);
Jon Masonfce8a7b2012-11-16 19:27:12 -0700592 struct ntb_transport *nt = qp->transport;
593 struct pci_dev *pdev = ntb_query_pdev(nt->ndev);
594
595 if (qp->qp_link == NTB_LINK_DOWN) {
596 cancel_delayed_work_sync(&qp->link_work);
597 return;
598 }
599
600 if (qp->event_handler)
601 qp->event_handler(qp->cb_data, NTB_LINK_DOWN);
602
603 dev_info(&pdev->dev, "qp %d: Link Down\n", qp->qp_num);
604 qp->qp_link = NTB_LINK_DOWN;
605
606 if (nt->transport_link == NTB_LINK_UP)
607 schedule_delayed_work(&qp->link_work,
608 msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT));
609}
610
Jon Mason7b4f2d32013-01-19 02:02:19 -0700611static void ntb_qp_link_down(struct ntb_transport_qp *qp)
Jon Masonfce8a7b2012-11-16 19:27:12 -0700612{
Jon Mason7b4f2d32013-01-19 02:02:19 -0700613 schedule_work(&qp->link_cleanup);
614}
615
616static void ntb_transport_link_cleanup(struct work_struct *work)
617{
618 struct ntb_transport *nt = container_of(work, struct ntb_transport,
619 link_cleanup);
Jon Masonfce8a7b2012-11-16 19:27:12 -0700620 int i;
621
622 if (nt->transport_link == NTB_LINK_DOWN)
623 cancel_delayed_work_sync(&nt->link_work);
624 else
625 nt->transport_link = NTB_LINK_DOWN;
626
627 /* Pass along the info to any clients */
628 for (i = 0; i < nt->max_qps; i++)
629 if (!test_bit(i, &nt->qp_bitmap))
630 ntb_qp_link_down(&nt->qps[i]);
631
632 /* The scratchpad registers keep the values if the remote side
633 * goes down, blast them now to give them a sane value the next
634 * time they are accessed
635 */
636 for (i = 0; i < MAX_SPAD; i++)
637 ntb_write_local_spad(nt->ndev, i, 0);
638}
639
640static void ntb_transport_event_callback(void *data, enum ntb_hw_event event)
641{
642 struct ntb_transport *nt = data;
643
644 switch (event) {
645 case NTB_EVENT_HW_LINK_UP:
646 schedule_delayed_work(&nt->link_work, 0);
647 break;
648 case NTB_EVENT_HW_LINK_DOWN:
Jon Mason7b4f2d32013-01-19 02:02:19 -0700649 schedule_work(&nt->link_cleanup);
Jon Masonfce8a7b2012-11-16 19:27:12 -0700650 break;
651 default:
652 BUG();
653 }
654}
655
656static void ntb_transport_link_work(struct work_struct *work)
657{
658 struct ntb_transport *nt = container_of(work, struct ntb_transport,
659 link_work.work);
660 struct ntb_device *ndev = nt->ndev;
661 struct pci_dev *pdev = ntb_query_pdev(ndev);
662 u32 val;
663 int rc, i;
664
Jon Mason113fc502013-01-30 11:40:52 -0700665 /* send the local info, in the opposite order of the way we read it */
Jon Mason948d3a62013-04-18 17:07:36 -0700666 for (i = 0; i < ntb_max_mw(ndev); i++) {
Jon Mason113fc502013-01-30 11:40:52 -0700667 rc = ntb_write_remote_spad(ndev, MW0_SZ_HIGH + (i * 2),
668 ntb_get_mw_size(ndev, i) >> 32);
669 if (rc) {
670 dev_err(&pdev->dev, "Error writing %u to remote spad %d\n",
671 (u32)(ntb_get_mw_size(ndev, i) >> 32),
672 MW0_SZ_HIGH + (i * 2));
673 goto out;
674 }
675
676 rc = ntb_write_remote_spad(ndev, MW0_SZ_LOW + (i * 2),
677 (u32) ntb_get_mw_size(ndev, i));
678 if (rc) {
679 dev_err(&pdev->dev, "Error writing %u to remote spad %d\n",
680 (u32) ntb_get_mw_size(ndev, i),
681 MW0_SZ_LOW + (i * 2));
682 goto out;
683 }
Jon Masonfce8a7b2012-11-16 19:27:12 -0700684 }
685
Jon Mason948d3a62013-04-18 17:07:36 -0700686 rc = ntb_write_remote_spad(ndev, NUM_MWS, ntb_max_mw(ndev));
Jon Masonfce8a7b2012-11-16 19:27:12 -0700687 if (rc) {
688 dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
Jon Mason948d3a62013-04-18 17:07:36 -0700689 ntb_max_mw(ndev), NUM_MWS);
Jon Masonfce8a7b2012-11-16 19:27:12 -0700690 goto out;
691 }
692
693 rc = ntb_write_remote_spad(ndev, NUM_QPS, nt->max_qps);
694 if (rc) {
695 dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
696 nt->max_qps, NUM_QPS);
697 goto out;
698 }
699
Jon Mason113fc502013-01-30 11:40:52 -0700700 rc = ntb_write_remote_spad(ndev, VERSION, NTB_TRANSPORT_VERSION);
Jon Masonfce8a7b2012-11-16 19:27:12 -0700701 if (rc) {
702 dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
Jon Mason113fc502013-01-30 11:40:52 -0700703 NTB_TRANSPORT_VERSION, VERSION);
Jon Masonfce8a7b2012-11-16 19:27:12 -0700704 goto out;
705 }
706
707 /* Query the remote side for its info */
708 rc = ntb_read_remote_spad(ndev, VERSION, &val);
709 if (rc) {
710 dev_err(&pdev->dev, "Error reading remote spad %d\n", VERSION);
711 goto out;
712 }
713
714 if (val != NTB_TRANSPORT_VERSION)
715 goto out;
716 dev_dbg(&pdev->dev, "Remote version = %d\n", val);
717
718 rc = ntb_read_remote_spad(ndev, NUM_QPS, &val);
719 if (rc) {
720 dev_err(&pdev->dev, "Error reading remote spad %d\n", NUM_QPS);
721 goto out;
722 }
723
724 if (val != nt->max_qps)
725 goto out;
726 dev_dbg(&pdev->dev, "Remote max number of qps = %d\n", val);
727
Jon Mason113fc502013-01-30 11:40:52 -0700728 rc = ntb_read_remote_spad(ndev, NUM_MWS, &val);
Jon Masonfce8a7b2012-11-16 19:27:12 -0700729 if (rc) {
Jon Mason113fc502013-01-30 11:40:52 -0700730 dev_err(&pdev->dev, "Error reading remote spad %d\n", NUM_MWS);
Jon Masonfce8a7b2012-11-16 19:27:12 -0700731 goto out;
732 }
733
Jon Mason948d3a62013-04-18 17:07:36 -0700734 if (val != ntb_max_mw(ndev))
Jon Masonfce8a7b2012-11-16 19:27:12 -0700735 goto out;
Jon Mason113fc502013-01-30 11:40:52 -0700736 dev_dbg(&pdev->dev, "Remote number of mws = %d\n", val);
Jon Masonfce8a7b2012-11-16 19:27:12 -0700737
Jon Mason948d3a62013-04-18 17:07:36 -0700738 for (i = 0; i < ntb_max_mw(ndev); i++) {
Jon Mason113fc502013-01-30 11:40:52 -0700739 u64 val64;
Jon Masonfce8a7b2012-11-16 19:27:12 -0700740
Jon Mason113fc502013-01-30 11:40:52 -0700741 rc = ntb_read_remote_spad(ndev, MW0_SZ_HIGH + (i * 2), &val);
742 if (rc) {
743 dev_err(&pdev->dev, "Error reading remote spad %d\n",
744 MW0_SZ_HIGH + (i * 2));
745 goto out1;
746 }
747
748 val64 = (u64) val << 32;
749
750 rc = ntb_read_remote_spad(ndev, MW0_SZ_LOW + (i * 2), &val);
751 if (rc) {
752 dev_err(&pdev->dev, "Error reading remote spad %d\n",
753 MW0_SZ_LOW + (i * 2));
754 goto out1;
755 }
756
757 val64 |= val;
758
759 dev_dbg(&pdev->dev, "Remote MW%d size = %llu\n", i, val64);
760
761 rc = ntb_set_mw(nt, i, val64);
762 if (rc)
763 goto out1;
Jon Masonfce8a7b2012-11-16 19:27:12 -0700764 }
765
Jon Masonfce8a7b2012-11-16 19:27:12 -0700766 nt->transport_link = NTB_LINK_UP;
767
768 for (i = 0; i < nt->max_qps; i++) {
769 struct ntb_transport_qp *qp = &nt->qps[i];
770
771 ntb_transport_setup_qp_mw(nt, i);
772
773 if (qp->client_ready == NTB_LINK_UP)
774 schedule_delayed_work(&qp->link_work, 0);
775 }
776
777 return;
778
Jon Mason113fc502013-01-30 11:40:52 -0700779out1:
Jon Mason948d3a62013-04-18 17:07:36 -0700780 for (i = 0; i < ntb_max_mw(ndev); i++)
Jon Mason113fc502013-01-30 11:40:52 -0700781 ntb_free_mw(nt, i);
Jon Masonfce8a7b2012-11-16 19:27:12 -0700782out:
783 if (ntb_hw_link_status(ndev))
784 schedule_delayed_work(&nt->link_work,
785 msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT));
786}
787
788static void ntb_qp_link_work(struct work_struct *work)
789{
790 struct ntb_transport_qp *qp = container_of(work,
791 struct ntb_transport_qp,
792 link_work.work);
793 struct pci_dev *pdev = ntb_query_pdev(qp->ndev);
794 struct ntb_transport *nt = qp->transport;
795 int rc, val;
796
797 WARN_ON(nt->transport_link != NTB_LINK_UP);
798
799 rc = ntb_read_local_spad(nt->ndev, QP_LINKS, &val);
800 if (rc) {
801 dev_err(&pdev->dev, "Error reading spad %d\n", QP_LINKS);
802 return;
803 }
804
805 rc = ntb_write_remote_spad(nt->ndev, QP_LINKS, val | 1 << qp->qp_num);
806 if (rc)
807 dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
808 val | 1 << qp->qp_num, QP_LINKS);
809
810 /* query remote spad for qp ready bits */
811 rc = ntb_read_remote_spad(nt->ndev, QP_LINKS, &val);
812 if (rc)
813 dev_err(&pdev->dev, "Error reading remote spad %d\n", QP_LINKS);
814
815 dev_dbg(&pdev->dev, "Remote QP link status = %x\n", val);
816
817 /* See if the remote side is up */
818 if (1 << qp->qp_num & val) {
819 qp->qp_link = NTB_LINK_UP;
820
821 dev_info(&pdev->dev, "qp %d: Link Up\n", qp->qp_num);
822 if (qp->event_handler)
823 qp->event_handler(qp->cb_data, NTB_LINK_UP);
824 } else if (nt->transport_link == NTB_LINK_UP)
825 schedule_delayed_work(&qp->link_work,
826 msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT));
827}
828
Jon Mason282a2fe2013-02-12 09:52:50 -0700829static int ntb_transport_init_queue(struct ntb_transport *nt,
Jon Masonfce8a7b2012-11-16 19:27:12 -0700830 unsigned int qp_num)
831{
832 struct ntb_transport_qp *qp;
Jon Masonef114ed2013-01-19 02:02:18 -0700833 unsigned int num_qps_mw, tx_size;
Jon Mason948d3a62013-04-18 17:07:36 -0700834 u8 mw_num, mw_max;
Jon Mason282a2fe2013-02-12 09:52:50 -0700835 u64 qp_offset;
Jon Mason948d3a62013-04-18 17:07:36 -0700836
837 mw_max = ntb_max_mw(nt->ndev);
838 mw_num = QP_TO_MW(nt->ndev, qp_num);
Jon Masonfce8a7b2012-11-16 19:27:12 -0700839
840 qp = &nt->qps[qp_num];
841 qp->qp_num = qp_num;
842 qp->transport = nt;
843 qp->ndev = nt->ndev;
844 qp->qp_link = NTB_LINK_DOWN;
845 qp->client_ready = NTB_LINK_DOWN;
846 qp->event_handler = NULL;
847
Jon Mason948d3a62013-04-18 17:07:36 -0700848 if (nt->max_qps % mw_max && mw_num < nt->max_qps % mw_max)
849 num_qps_mw = nt->max_qps / mw_max + 1;
Jon Masonef114ed2013-01-19 02:02:18 -0700850 else
Jon Mason948d3a62013-04-18 17:07:36 -0700851 num_qps_mw = nt->max_qps / mw_max;
Jon Masonef114ed2013-01-19 02:02:18 -0700852
Jon Mason793c20e2013-01-19 02:02:26 -0700853 tx_size = (unsigned int) ntb_get_mw_size(qp->ndev, mw_num) / num_qps_mw;
Jon Mason282a2fe2013-02-12 09:52:50 -0700854 qp_offset = qp_num / mw_max * tx_size;
855 qp->tx_mw = ntb_get_mw_vbase(nt->ndev, mw_num) + qp_offset;
856 if (!qp->tx_mw)
857 return -EINVAL;
Jon Mason793c20e2013-01-19 02:02:26 -0700858
Jon Mason282a2fe2013-02-12 09:52:50 -0700859 qp->tx_mw_phys = ntb_get_mw_base(qp->ndev, mw_num) + qp_offset;
860 if (!qp->tx_mw_phys)
861 return -EINVAL;
862
863 tx_size -= sizeof(struct ntb_rx_info);
864 qp->rx_info = qp->tx_mw + tx_size;
865
Jon Masonc9d534c2013-02-01 15:45:16 -0700866 /* Due to housekeeping, there must be atleast 2 buffs */
867 qp->tx_max_frame = min(transport_mtu, tx_size / 2);
Jon Mason793c20e2013-01-19 02:02:26 -0700868 qp->tx_max_entry = tx_size / qp->tx_max_frame;
Jon Masonef114ed2013-01-19 02:02:18 -0700869
Jon Mason1517a3f2013-07-30 15:58:49 -0700870 if (ntb_query_debugfs(nt->ndev)) {
Jon Masonfce8a7b2012-11-16 19:27:12 -0700871 char debugfs_name[4];
872
873 snprintf(debugfs_name, 4, "qp%d", qp_num);
874 qp->debugfs_dir = debugfs_create_dir(debugfs_name,
Jon Mason1517a3f2013-07-30 15:58:49 -0700875 ntb_query_debugfs(nt->ndev));
Jon Masonfce8a7b2012-11-16 19:27:12 -0700876
877 qp->debugfs_stats = debugfs_create_file("stats", S_IRUSR,
878 qp->debugfs_dir, qp,
879 &ntb_qp_debugfs_stats);
880 }
881
882 INIT_DELAYED_WORK(&qp->link_work, ntb_qp_link_work);
Jon Mason7b4f2d32013-01-19 02:02:19 -0700883 INIT_WORK(&qp->link_cleanup, ntb_qp_link_cleanup);
Jon Masonfce8a7b2012-11-16 19:27:12 -0700884
885 spin_lock_init(&qp->ntb_rx_pend_q_lock);
886 spin_lock_init(&qp->ntb_rx_free_q_lock);
887 spin_lock_init(&qp->ntb_tx_free_q_lock);
888
889 INIT_LIST_HEAD(&qp->rx_pend_q);
890 INIT_LIST_HEAD(&qp->rx_free_q);
891 INIT_LIST_HEAD(&qp->tx_free_q);
Jon Mason282a2fe2013-02-12 09:52:50 -0700892
893 return 0;
Jon Masonfce8a7b2012-11-16 19:27:12 -0700894}
895
896int ntb_transport_init(struct pci_dev *pdev)
897{
898 struct ntb_transport *nt;
899 int rc, i;
900
901 nt = kzalloc(sizeof(struct ntb_transport), GFP_KERNEL);
902 if (!nt)
903 return -ENOMEM;
904
Jon Masonfce8a7b2012-11-16 19:27:12 -0700905 nt->ndev = ntb_register_transport(pdev, nt);
906 if (!nt->ndev) {
907 rc = -EIO;
908 goto err;
909 }
910
Jon Mason948d3a62013-04-18 17:07:36 -0700911 nt->mw = kcalloc(ntb_max_mw(nt->ndev), sizeof(struct ntb_transport_mw),
912 GFP_KERNEL);
913 if (!nt->mw) {
914 rc = -ENOMEM;
915 goto err1;
916 }
917
918 if (max_num_clients)
919 nt->max_qps = min(ntb_max_cbs(nt->ndev), max_num_clients);
920 else
921 nt->max_qps = min(ntb_max_cbs(nt->ndev), ntb_max_mw(nt->ndev));
Jon Masonfce8a7b2012-11-16 19:27:12 -0700922
923 nt->qps = kcalloc(nt->max_qps, sizeof(struct ntb_transport_qp),
924 GFP_KERNEL);
925 if (!nt->qps) {
926 rc = -ENOMEM;
Jon Mason948d3a62013-04-18 17:07:36 -0700927 goto err2;
Jon Masonfce8a7b2012-11-16 19:27:12 -0700928 }
929
930 nt->qp_bitmap = ((u64) 1 << nt->max_qps) - 1;
931
Jon Mason282a2fe2013-02-12 09:52:50 -0700932 for (i = 0; i < nt->max_qps; i++) {
933 rc = ntb_transport_init_queue(nt, i);
934 if (rc)
935 goto err3;
936 }
Jon Masonfce8a7b2012-11-16 19:27:12 -0700937
938 INIT_DELAYED_WORK(&nt->link_work, ntb_transport_link_work);
Jon Mason7b4f2d32013-01-19 02:02:19 -0700939 INIT_WORK(&nt->link_cleanup, ntb_transport_link_cleanup);
Jon Masonfce8a7b2012-11-16 19:27:12 -0700940
941 rc = ntb_register_event_callback(nt->ndev,
942 ntb_transport_event_callback);
943 if (rc)
Jon Mason948d3a62013-04-18 17:07:36 -0700944 goto err3;
Jon Masonfce8a7b2012-11-16 19:27:12 -0700945
946 INIT_LIST_HEAD(&nt->client_devs);
947 rc = ntb_bus_init(nt);
948 if (rc)
Jon Mason948d3a62013-04-18 17:07:36 -0700949 goto err4;
Jon Masonfce8a7b2012-11-16 19:27:12 -0700950
951 if (ntb_hw_link_status(nt->ndev))
952 schedule_delayed_work(&nt->link_work, 0);
953
954 return 0;
955
Jon Mason948d3a62013-04-18 17:07:36 -0700956err4:
Jon Masonfce8a7b2012-11-16 19:27:12 -0700957 ntb_unregister_event_callback(nt->ndev);
Jon Mason948d3a62013-04-18 17:07:36 -0700958err3:
Jon Masonfce8a7b2012-11-16 19:27:12 -0700959 kfree(nt->qps);
Jon Mason948d3a62013-04-18 17:07:36 -0700960err2:
961 kfree(nt->mw);
Jon Masonfce8a7b2012-11-16 19:27:12 -0700962err1:
963 ntb_unregister_transport(nt->ndev);
964err:
Jon Masonfce8a7b2012-11-16 19:27:12 -0700965 kfree(nt);
966 return rc;
967}
968
969void ntb_transport_free(void *transport)
970{
971 struct ntb_transport *nt = transport;
Jon Mason948d3a62013-04-18 17:07:36 -0700972 struct ntb_device *ndev = nt->ndev;
Jon Masonfce8a7b2012-11-16 19:27:12 -0700973 int i;
974
975 nt->transport_link = NTB_LINK_DOWN;
976
977 /* verify that all the qp's are freed */
Jon Mason1517a3f2013-07-30 15:58:49 -0700978 for (i = 0; i < nt->max_qps; i++) {
Jon Masonfce8a7b2012-11-16 19:27:12 -0700979 if (!test_bit(i, &nt->qp_bitmap))
980 ntb_transport_free_queue(&nt->qps[i]);
Jon Mason1517a3f2013-07-30 15:58:49 -0700981 debugfs_remove_recursive(nt->qps[i].debugfs_dir);
982 }
Jon Masonfce8a7b2012-11-16 19:27:12 -0700983
984 ntb_bus_remove(nt);
985
986 cancel_delayed_work_sync(&nt->link_work);
987
Jon Mason948d3a62013-04-18 17:07:36 -0700988 ntb_unregister_event_callback(ndev);
Jon Masonfce8a7b2012-11-16 19:27:12 -0700989
Jon Mason948d3a62013-04-18 17:07:36 -0700990 for (i = 0; i < ntb_max_mw(ndev); i++)
Jon Mason113fc502013-01-30 11:40:52 -0700991 ntb_free_mw(nt, i);
Jon Masonfce8a7b2012-11-16 19:27:12 -0700992
993 kfree(nt->qps);
Jon Mason948d3a62013-04-18 17:07:36 -0700994 kfree(nt->mw);
995 ntb_unregister_transport(ndev);
Jon Masonfce8a7b2012-11-16 19:27:12 -0700996 kfree(nt);
997}
998
Jon Mason282a2fe2013-02-12 09:52:50 -0700999static void ntb_rx_copy_callback(void *data)
Jon Masonfce8a7b2012-11-16 19:27:12 -07001000{
Jon Mason282a2fe2013-02-12 09:52:50 -07001001 struct ntb_queue_entry *entry = data;
1002 struct ntb_transport_qp *qp = entry->qp;
Jon Mason448c6fb2013-01-19 02:02:27 -07001003 void *cb_data = entry->cb_data;
1004 unsigned int len = entry->len;
Jon Mason282a2fe2013-02-12 09:52:50 -07001005 struct ntb_payload_header *hdr = entry->rx_hdr;
Jon Mason448c6fb2013-01-19 02:02:27 -07001006
Jon Mason282a2fe2013-02-12 09:52:50 -07001007 /* Ensure that the data is fully copied out before clearing the flag */
1008 wmb();
1009 hdr->flags = 0;
1010
1011 iowrite32(entry->index, &qp->rx_info->entry);
Jon Masonfce8a7b2012-11-16 19:27:12 -07001012
Jon Masonfce8a7b2012-11-16 19:27:12 -07001013 ntb_list_add(&qp->ntb_rx_free_q_lock, &entry->entry, &qp->rx_free_q);
Jon Mason448c6fb2013-01-19 02:02:27 -07001014
1015 if (qp->rx_handler && qp->client_ready == NTB_LINK_UP)
1016 qp->rx_handler(qp, qp->cb_data, cb_data, len);
Jon Masonfce8a7b2012-11-16 19:27:12 -07001017}
1018
Jon Mason282a2fe2013-02-12 09:52:50 -07001019static void ntb_memcpy_rx(struct ntb_queue_entry *entry, void *offset)
1020{
1021 void *buf = entry->buf;
1022 size_t len = entry->len;
1023
1024 memcpy(buf, offset, len);
1025
1026 ntb_rx_copy_callback(entry);
1027}
1028
1029static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset,
1030 size_t len)
1031{
1032 struct dma_async_tx_descriptor *txd;
1033 struct ntb_transport_qp *qp = entry->qp;
1034 struct dma_chan *chan = qp->dma_chan;
1035 struct dma_device *device;
1036 size_t pay_off, buff_off;
Bartlomiej Zolnierkiewicz6f57fd02013-10-18 19:35:31 +02001037 struct dmaengine_unmap_data *unmap;
Jon Mason282a2fe2013-02-12 09:52:50 -07001038 dma_cookie_t cookie;
1039 void *buf = entry->buf;
Jon Mason282a2fe2013-02-12 09:52:50 -07001040
1041 entry->len = len;
1042
1043 if (!chan)
1044 goto err;
1045
1046 if (len < copy_bytes)
Bartlomiej Zolnierkiewicz6f57fd02013-10-18 19:35:31 +02001047 goto err_wait;
Jon Mason282a2fe2013-02-12 09:52:50 -07001048
1049 device = chan->device;
1050 pay_off = (size_t) offset & ~PAGE_MASK;
1051 buff_off = (size_t) buf & ~PAGE_MASK;
1052
1053 if (!is_dma_copy_aligned(device, pay_off, buff_off, len))
Bartlomiej Zolnierkiewicz6f57fd02013-10-18 19:35:31 +02001054 goto err_wait;
Jon Mason282a2fe2013-02-12 09:52:50 -07001055
Bartlomiej Zolnierkiewicz6f57fd02013-10-18 19:35:31 +02001056 unmap = dmaengine_get_unmap_data(device->dev, 2, GFP_NOWAIT);
1057 if (!unmap)
1058 goto err_wait;
Jon Mason282a2fe2013-02-12 09:52:50 -07001059
Bartlomiej Zolnierkiewicz6f57fd02013-10-18 19:35:31 +02001060 unmap->len = len;
1061 unmap->addr[0] = dma_map_page(device->dev, virt_to_page(offset),
1062 pay_off, len, DMA_TO_DEVICE);
1063 if (dma_mapping_error(device->dev, unmap->addr[0]))
1064 goto err_get_unmap;
Jon Mason282a2fe2013-02-12 09:52:50 -07001065
Bartlomiej Zolnierkiewicz6f57fd02013-10-18 19:35:31 +02001066 unmap->to_cnt = 1;
1067
1068 unmap->addr[1] = dma_map_page(device->dev, virt_to_page(buf),
1069 buff_off, len, DMA_FROM_DEVICE);
1070 if (dma_mapping_error(device->dev, unmap->addr[1]))
1071 goto err_get_unmap;
1072
1073 unmap->from_cnt = 1;
1074
Bartlomiej Zolnierkiewicz6f57fd02013-10-18 19:35:31 +02001075 txd = device->device_prep_dma_memcpy(chan, unmap->addr[1],
Bartlomiej Zolnierkiewicz0776ae72013-10-18 19:35:33 +02001076 unmap->addr[0], len,
1077 DMA_PREP_INTERRUPT);
Jon Mason282a2fe2013-02-12 09:52:50 -07001078 if (!txd)
Bartlomiej Zolnierkiewicz6f57fd02013-10-18 19:35:31 +02001079 goto err_get_unmap;
Jon Mason282a2fe2013-02-12 09:52:50 -07001080
1081 txd->callback = ntb_rx_copy_callback;
1082 txd->callback_param = entry;
Bartlomiej Zolnierkiewicz6f57fd02013-10-18 19:35:31 +02001083 dma_set_unmap(txd, unmap);
Jon Mason282a2fe2013-02-12 09:52:50 -07001084
1085 cookie = dmaengine_submit(txd);
1086 if (dma_submit_error(cookie))
Bartlomiej Zolnierkiewicz6f57fd02013-10-18 19:35:31 +02001087 goto err_set_unmap;
1088
1089 dmaengine_unmap_put(unmap);
Jon Mason282a2fe2013-02-12 09:52:50 -07001090
1091 qp->last_cookie = cookie;
1092
1093 qp->rx_async++;
1094
1095 return;
1096
Bartlomiej Zolnierkiewicz6f57fd02013-10-18 19:35:31 +02001097err_set_unmap:
1098 dmaengine_unmap_put(unmap);
1099err_get_unmap:
1100 dmaengine_unmap_put(unmap);
1101err_wait:
Jon Mason282a2fe2013-02-12 09:52:50 -07001102 /* If the callbacks come out of order, the writing of the index to the
1103 * last completed will be out of order. This may result in the
1104 * receive stalling forever.
1105 */
1106 dma_sync_wait(chan, qp->last_cookie);
1107err:
1108 ntb_memcpy_rx(entry, offset);
1109 qp->rx_memcpy++;
1110}
1111
Jon Masonfce8a7b2012-11-16 19:27:12 -07001112static int ntb_process_rxc(struct ntb_transport_qp *qp)
1113{
1114 struct ntb_payload_header *hdr;
1115 struct ntb_queue_entry *entry;
1116 void *offset;
1117
Jon Mason793c20e2013-01-19 02:02:26 -07001118 offset = qp->rx_buff + qp->rx_max_frame * qp->rx_index;
1119 hdr = offset + qp->rx_max_frame - sizeof(struct ntb_payload_header);
1120
Jon Masonfce8a7b2012-11-16 19:27:12 -07001121 entry = ntb_list_rm(&qp->ntb_rx_pend_q_lock, &qp->rx_pend_q);
1122 if (!entry) {
Jon Masonfce8a7b2012-11-16 19:27:12 -07001123 dev_dbg(&ntb_query_pdev(qp->ndev)->dev,
Jon Mason74465642013-01-21 15:28:52 -07001124 "no buffer - HDR ver %u, len %d, flags %x\n",
Jon Masonfce8a7b2012-11-16 19:27:12 -07001125 hdr->ver, hdr->len, hdr->flags);
1126 qp->rx_err_no_buf++;
1127 return -ENOMEM;
1128 }
1129
Jon Masonfce8a7b2012-11-16 19:27:12 -07001130 if (!(hdr->flags & DESC_DONE_FLAG)) {
1131 ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry,
Jon Masonf7667552013-01-19 02:02:24 -07001132 &qp->rx_pend_q);
Jon Masonfce8a7b2012-11-16 19:27:12 -07001133 qp->rx_ring_empty++;
1134 return -EAGAIN;
1135 }
1136
Jon Mason74465642013-01-21 15:28:52 -07001137 if (hdr->ver != (u32) qp->rx_pkts) {
Jon Masonfce8a7b2012-11-16 19:27:12 -07001138 dev_dbg(&ntb_query_pdev(qp->ndev)->dev,
Jon Mason74465642013-01-21 15:28:52 -07001139 "qp %d: version mismatch, expected %llu - got %u\n",
Jon Masonfce8a7b2012-11-16 19:27:12 -07001140 qp->qp_num, qp->rx_pkts, hdr->ver);
1141 ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry,
Jon Masonf7667552013-01-19 02:02:24 -07001142 &qp->rx_pend_q);
Jon Masonfce8a7b2012-11-16 19:27:12 -07001143 qp->rx_err_ver++;
1144 return -EIO;
1145 }
1146
1147 if (hdr->flags & LINK_DOWN_FLAG) {
1148 ntb_qp_link_down(qp);
1149
Jon Mason282a2fe2013-02-12 09:52:50 -07001150 goto err;
Jon Masonfce8a7b2012-11-16 19:27:12 -07001151 }
1152
1153 dev_dbg(&ntb_query_pdev(qp->ndev)->dev,
Jon Mason74465642013-01-21 15:28:52 -07001154 "rx offset %u, ver %u - %d payload received, buf size %d\n",
Jon Mason793c20e2013-01-19 02:02:26 -07001155 qp->rx_index, hdr->ver, hdr->len, entry->len);
Jon Masonfce8a7b2012-11-16 19:27:12 -07001156
Jon Mason282a2fe2013-02-12 09:52:50 -07001157 qp->rx_bytes += hdr->len;
1158 qp->rx_pkts++;
Jon Masonfce8a7b2012-11-16 19:27:12 -07001159
Jon Mason282a2fe2013-02-12 09:52:50 -07001160 if (hdr->len > entry->len) {
Jon Masonfce8a7b2012-11-16 19:27:12 -07001161 qp->rx_err_oflow++;
1162 dev_dbg(&ntb_query_pdev(qp->ndev)->dev,
1163 "RX overflow! Wanted %d got %d\n",
1164 hdr->len, entry->len);
Jon Mason282a2fe2013-02-12 09:52:50 -07001165
1166 goto err;
Jon Masonfce8a7b2012-11-16 19:27:12 -07001167 }
1168
Jon Mason282a2fe2013-02-12 09:52:50 -07001169 entry->index = qp->rx_index;
1170 entry->rx_hdr = hdr;
1171
1172 ntb_async_rx(entry, offset, hdr->len);
Jon Masonfce8a7b2012-11-16 19:27:12 -07001173
1174out:
Jon Mason282a2fe2013-02-12 09:52:50 -07001175 qp->rx_index++;
1176 qp->rx_index %= qp->rx_max_entry;
1177
1178 return 0;
1179
1180err:
1181 ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry,
1182 &qp->rx_pend_q);
Jon Mason793c20e2013-01-19 02:02:26 -07001183 /* Ensure that the data is fully copied out before clearing the flag */
1184 wmb();
1185 hdr->flags = 0;
Jon Mason74465642013-01-21 15:28:52 -07001186 iowrite32(qp->rx_index, &qp->rx_info->entry);
Jon Mason793c20e2013-01-19 02:02:26 -07001187
Jon Mason282a2fe2013-02-12 09:52:50 -07001188 goto out;
Jon Masonfce8a7b2012-11-16 19:27:12 -07001189}
1190
1191static void ntb_transport_rx(unsigned long data)
1192{
1193 struct ntb_transport_qp *qp = (struct ntb_transport_qp *)data;
Jon Masonc336acd2013-01-17 15:28:45 -07001194 int rc, i;
Jon Masonfce8a7b2012-11-16 19:27:12 -07001195
Jon Masonc336acd2013-01-17 15:28:45 -07001196 /* Limit the number of packets processed in a single interrupt to
1197 * provide fairness to others
1198 */
1199 for (i = 0; i < qp->rx_max_entry; i++) {
Jon Masonfce8a7b2012-11-16 19:27:12 -07001200 rc = ntb_process_rxc(qp);
Jon Masonc336acd2013-01-17 15:28:45 -07001201 if (rc)
1202 break;
1203 }
Jon Mason282a2fe2013-02-12 09:52:50 -07001204
1205 if (qp->dma_chan)
1206 dma_async_issue_pending(qp->dma_chan);
Jon Masonfce8a7b2012-11-16 19:27:12 -07001207}
1208
1209static void ntb_transport_rxc_db(void *data, int db_num)
1210{
1211 struct ntb_transport_qp *qp = data;
1212
1213 dev_dbg(&ntb_query_pdev(qp->ndev)->dev, "%s: doorbell %d received\n",
1214 __func__, db_num);
1215
1216 tasklet_schedule(&qp->rx_work);
1217}
1218
Jon Mason282a2fe2013-02-12 09:52:50 -07001219static void ntb_tx_copy_callback(void *data)
Jon Masonfce8a7b2012-11-16 19:27:12 -07001220{
Jon Mason282a2fe2013-02-12 09:52:50 -07001221 struct ntb_queue_entry *entry = data;
1222 struct ntb_transport_qp *qp = entry->qp;
1223 struct ntb_payload_header __iomem *hdr = entry->tx_hdr;
Jon Masonfce8a7b2012-11-16 19:27:12 -07001224
Jon Mason282a2fe2013-02-12 09:52:50 -07001225 /* Ensure that the data is fully copied out before setting the flags */
Jon Mason842c1dd2013-01-19 02:02:17 -07001226 wmb();
Jon Mason74465642013-01-21 15:28:52 -07001227 iowrite32(entry->flags | DESC_DONE_FLAG, &hdr->flags);
Jon Masonfce8a7b2012-11-16 19:27:12 -07001228
Jon Mason49793882013-07-15 15:53:54 -07001229 ntb_ring_doorbell(qp->ndev, qp->qp_num);
Jon Masonfce8a7b2012-11-16 19:27:12 -07001230
1231 /* The entry length can only be zero if the packet is intended to be a
1232 * "link down" or similar. Since no payload is being sent in these
1233 * cases, there is nothing to add to the completion queue.
1234 */
1235 if (entry->len > 0) {
1236 qp->tx_bytes += entry->len;
1237
1238 if (qp->tx_handler)
1239 qp->tx_handler(qp, qp->cb_data, entry->cb_data,
1240 entry->len);
1241 }
1242
1243 ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry, &qp->tx_free_q);
1244}
1245
Jon Mason282a2fe2013-02-12 09:52:50 -07001246static void ntb_memcpy_tx(struct ntb_queue_entry *entry, void __iomem *offset)
1247{
1248 memcpy_toio(offset, entry->buf, entry->len);
1249
1250 ntb_tx_copy_callback(entry);
1251}
1252
1253static void ntb_async_tx(struct ntb_transport_qp *qp,
1254 struct ntb_queue_entry *entry)
1255{
1256 struct ntb_payload_header __iomem *hdr;
1257 struct dma_async_tx_descriptor *txd;
1258 struct dma_chan *chan = qp->dma_chan;
1259 struct dma_device *device;
1260 size_t dest_off, buff_off;
Bartlomiej Zolnierkiewicz6f57fd02013-10-18 19:35:31 +02001261 struct dmaengine_unmap_data *unmap;
1262 dma_addr_t dest;
Jon Mason282a2fe2013-02-12 09:52:50 -07001263 dma_cookie_t cookie;
1264 void __iomem *offset;
1265 size_t len = entry->len;
1266 void *buf = entry->buf;
Jon Mason282a2fe2013-02-12 09:52:50 -07001267
1268 offset = qp->tx_mw + qp->tx_max_frame * qp->tx_index;
1269 hdr = offset + qp->tx_max_frame - sizeof(struct ntb_payload_header);
1270 entry->tx_hdr = hdr;
1271
1272 iowrite32(entry->len, &hdr->len);
1273 iowrite32((u32) qp->tx_pkts, &hdr->ver);
1274
1275 if (!chan)
1276 goto err;
1277
1278 if (len < copy_bytes)
1279 goto err;
1280
1281 device = chan->device;
1282 dest = qp->tx_mw_phys + qp->tx_max_frame * qp->tx_index;
1283 buff_off = (size_t) buf & ~PAGE_MASK;
1284 dest_off = (size_t) dest & ~PAGE_MASK;
1285
1286 if (!is_dma_copy_aligned(device, buff_off, dest_off, len))
1287 goto err;
1288
Bartlomiej Zolnierkiewicz6f57fd02013-10-18 19:35:31 +02001289 unmap = dmaengine_get_unmap_data(device->dev, 1, GFP_NOWAIT);
1290 if (!unmap)
Jon Mason282a2fe2013-02-12 09:52:50 -07001291 goto err;
1292
Bartlomiej Zolnierkiewicz6f57fd02013-10-18 19:35:31 +02001293 unmap->len = len;
1294 unmap->addr[0] = dma_map_page(device->dev, virt_to_page(buf),
1295 buff_off, len, DMA_TO_DEVICE);
1296 if (dma_mapping_error(device->dev, unmap->addr[0]))
1297 goto err_get_unmap;
1298
1299 unmap->to_cnt = 1;
1300
Bartlomiej Zolnierkiewicz6f57fd02013-10-18 19:35:31 +02001301 txd = device->device_prep_dma_memcpy(chan, dest, unmap->addr[0], len,
Bartlomiej Zolnierkiewicz0776ae72013-10-18 19:35:33 +02001302 DMA_PREP_INTERRUPT);
Jon Mason282a2fe2013-02-12 09:52:50 -07001303 if (!txd)
Bartlomiej Zolnierkiewicz6f57fd02013-10-18 19:35:31 +02001304 goto err_get_unmap;
Jon Mason282a2fe2013-02-12 09:52:50 -07001305
1306 txd->callback = ntb_tx_copy_callback;
1307 txd->callback_param = entry;
Bartlomiej Zolnierkiewicz6f57fd02013-10-18 19:35:31 +02001308 dma_set_unmap(txd, unmap);
Jon Mason282a2fe2013-02-12 09:52:50 -07001309
1310 cookie = dmaengine_submit(txd);
1311 if (dma_submit_error(cookie))
Bartlomiej Zolnierkiewicz6f57fd02013-10-18 19:35:31 +02001312 goto err_set_unmap;
1313
1314 dmaengine_unmap_put(unmap);
Jon Mason282a2fe2013-02-12 09:52:50 -07001315
1316 dma_async_issue_pending(chan);
1317 qp->tx_async++;
1318
1319 return;
Bartlomiej Zolnierkiewicz6f57fd02013-10-18 19:35:31 +02001320err_set_unmap:
1321 dmaengine_unmap_put(unmap);
1322err_get_unmap:
1323 dmaengine_unmap_put(unmap);
Jon Mason282a2fe2013-02-12 09:52:50 -07001324err:
1325 ntb_memcpy_tx(entry, offset);
1326 qp->tx_memcpy++;
1327}
1328
Jon Masonfce8a7b2012-11-16 19:27:12 -07001329static int ntb_process_tx(struct ntb_transport_qp *qp,
1330 struct ntb_queue_entry *entry)
1331{
Jon Mason282a2fe2013-02-12 09:52:50 -07001332 dev_dbg(&ntb_query_pdev(qp->ndev)->dev, "%lld - tx %u, entry len %d flags %x buff %p\n",
1333 qp->tx_pkts, qp->tx_index, entry->len, entry->flags,
Jon Masonf7667552013-01-19 02:02:24 -07001334 entry->buf);
Jon Mason793c20e2013-01-19 02:02:26 -07001335 if (qp->tx_index == qp->remote_rx_info->entry) {
Jon Masonfce8a7b2012-11-16 19:27:12 -07001336 qp->tx_ring_full++;
1337 return -EAGAIN;
1338 }
1339
Jon Masonef114ed2013-01-19 02:02:18 -07001340 if (entry->len > qp->tx_max_frame - sizeof(struct ntb_payload_header)) {
Jon Masonfce8a7b2012-11-16 19:27:12 -07001341 if (qp->tx_handler)
1342 qp->tx_handler(qp->cb_data, qp, NULL, -EIO);
1343
1344 ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry,
1345 &qp->tx_free_q);
1346 return 0;
1347 }
1348
Jon Mason282a2fe2013-02-12 09:52:50 -07001349 ntb_async_tx(qp, entry);
Jon Masonfce8a7b2012-11-16 19:27:12 -07001350
Jon Mason793c20e2013-01-19 02:02:26 -07001351 qp->tx_index++;
1352 qp->tx_index %= qp->tx_max_entry;
Jon Masonfce8a7b2012-11-16 19:27:12 -07001353
1354 qp->tx_pkts++;
1355
1356 return 0;
1357}
1358
1359static void ntb_send_link_down(struct ntb_transport_qp *qp)
1360{
1361 struct pci_dev *pdev = ntb_query_pdev(qp->ndev);
1362 struct ntb_queue_entry *entry;
1363 int i, rc;
1364
1365 if (qp->qp_link == NTB_LINK_DOWN)
1366 return;
1367
1368 qp->qp_link = NTB_LINK_DOWN;
1369 dev_info(&pdev->dev, "qp %d: Link Down\n", qp->qp_num);
1370
1371 for (i = 0; i < NTB_LINK_DOWN_TIMEOUT; i++) {
Jon Masonf7667552013-01-19 02:02:24 -07001372 entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q);
Jon Masonfce8a7b2012-11-16 19:27:12 -07001373 if (entry)
1374 break;
1375 msleep(100);
1376 }
1377
1378 if (!entry)
1379 return;
1380
1381 entry->cb_data = NULL;
1382 entry->buf = NULL;
1383 entry->len = 0;
1384 entry->flags = LINK_DOWN_FLAG;
1385
1386 rc = ntb_process_tx(qp, entry);
1387 if (rc)
1388 dev_err(&pdev->dev, "ntb: QP%d unable to send linkdown msg\n",
1389 qp->qp_num);
1390}
1391
1392/**
1393 * ntb_transport_create_queue - Create a new NTB transport layer queue
1394 * @rx_handler: receive callback function
1395 * @tx_handler: transmit callback function
1396 * @event_handler: event callback function
1397 *
1398 * Create a new NTB transport layer queue and provide the queue with a callback
1399 * routine for both transmit and receive. The receive callback routine will be
1400 * used to pass up data when the transport has received it on the queue. The
1401 * transmit callback routine will be called when the transport has completed the
1402 * transmission of the data on the queue and the data is ready to be freed.
1403 *
1404 * RETURNS: pointer to newly created ntb_queue, NULL on error.
1405 */
1406struct ntb_transport_qp *
1407ntb_transport_create_queue(void *data, struct pci_dev *pdev,
1408 const struct ntb_queue_handlers *handlers)
1409{
1410 struct ntb_queue_entry *entry;
1411 struct ntb_transport_qp *qp;
1412 struct ntb_transport *nt;
1413 unsigned int free_queue;
1414 int rc, i;
1415
1416 nt = ntb_find_transport(pdev);
1417 if (!nt)
1418 goto err;
1419
1420 free_queue = ffs(nt->qp_bitmap);
1421 if (!free_queue)
1422 goto err;
1423
1424 /* decrement free_queue to make it zero based */
1425 free_queue--;
1426
1427 clear_bit(free_queue, &nt->qp_bitmap);
1428
1429 qp = &nt->qps[free_queue];
1430 qp->cb_data = data;
1431 qp->rx_handler = handlers->rx_handler;
1432 qp->tx_handler = handlers->tx_handler;
1433 qp->event_handler = handlers->event_handler;
1434
Jon Mason282a2fe2013-02-12 09:52:50 -07001435 qp->dma_chan = dma_find_channel(DMA_MEMCPY);
1436 if (!qp->dma_chan)
1437 dev_info(&pdev->dev, "Unable to allocate DMA channel, using CPU instead\n");
1438 else
1439 dmaengine_get();
1440
Jon Masonfce8a7b2012-11-16 19:27:12 -07001441 for (i = 0; i < NTB_QP_DEF_NUM_ENTRIES; i++) {
1442 entry = kzalloc(sizeof(struct ntb_queue_entry), GFP_ATOMIC);
1443 if (!entry)
1444 goto err1;
1445
Jon Mason282a2fe2013-02-12 09:52:50 -07001446 entry->qp = qp;
Jon Masonfce8a7b2012-11-16 19:27:12 -07001447 ntb_list_add(&qp->ntb_rx_free_q_lock, &entry->entry,
Jon Masonf7667552013-01-19 02:02:24 -07001448 &qp->rx_free_q);
Jon Masonfce8a7b2012-11-16 19:27:12 -07001449 }
1450
1451 for (i = 0; i < NTB_QP_DEF_NUM_ENTRIES; i++) {
1452 entry = kzalloc(sizeof(struct ntb_queue_entry), GFP_ATOMIC);
1453 if (!entry)
1454 goto err2;
1455
Jon Mason282a2fe2013-02-12 09:52:50 -07001456 entry->qp = qp;
Jon Masonfce8a7b2012-11-16 19:27:12 -07001457 ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry,
Jon Masonf7667552013-01-19 02:02:24 -07001458 &qp->tx_free_q);
Jon Masonfce8a7b2012-11-16 19:27:12 -07001459 }
1460
1461 tasklet_init(&qp->rx_work, ntb_transport_rx, (unsigned long) qp);
1462
1463 rc = ntb_register_db_callback(qp->ndev, free_queue, qp,
1464 ntb_transport_rxc_db);
1465 if (rc)
1466 goto err3;
1467
1468 dev_info(&pdev->dev, "NTB Transport QP %d created\n", qp->qp_num);
1469
1470 return qp;
1471
1472err3:
1473 tasklet_disable(&qp->rx_work);
1474err2:
Jon Masonf7667552013-01-19 02:02:24 -07001475 while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q)))
Jon Masonfce8a7b2012-11-16 19:27:12 -07001476 kfree(entry);
1477err1:
Jon Masonf7667552013-01-19 02:02:24 -07001478 while ((entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q)))
Jon Masonfce8a7b2012-11-16 19:27:12 -07001479 kfree(entry);
1480 set_bit(free_queue, &nt->qp_bitmap);
1481err:
1482 return NULL;
1483}
1484EXPORT_SYMBOL_GPL(ntb_transport_create_queue);
1485
1486/**
1487 * ntb_transport_free_queue - Frees NTB transport queue
1488 * @qp: NTB queue to be freed
1489 *
1490 * Frees NTB transport queue
1491 */
1492void ntb_transport_free_queue(struct ntb_transport_qp *qp)
1493{
Jon Mason186f27f2013-01-22 11:35:40 -07001494 struct pci_dev *pdev;
Jon Masonfce8a7b2012-11-16 19:27:12 -07001495 struct ntb_queue_entry *entry;
1496
1497 if (!qp)
1498 return;
1499
Jon Mason186f27f2013-01-22 11:35:40 -07001500 pdev = ntb_query_pdev(qp->ndev);
1501
Jon Mason282a2fe2013-02-12 09:52:50 -07001502 if (qp->dma_chan) {
1503 struct dma_chan *chan = qp->dma_chan;
1504 /* Putting the dma_chan to NULL will force any new traffic to be
1505 * processed by the CPU instead of the DAM engine
1506 */
1507 qp->dma_chan = NULL;
1508
1509 /* Try to be nice and wait for any queued DMA engine
1510 * transactions to process before smashing it with a rock
1511 */
1512 dma_sync_wait(chan, qp->last_cookie);
1513 dmaengine_terminate_all(chan);
1514 dmaengine_put();
1515 }
Jon Masonfce8a7b2012-11-16 19:27:12 -07001516
1517 ntb_unregister_db_callback(qp->ndev, qp->qp_num);
1518 tasklet_disable(&qp->rx_work);
1519
Jon Mason282a2fe2013-02-12 09:52:50 -07001520 cancel_delayed_work_sync(&qp->link_work);
1521
Jon Masonf7667552013-01-19 02:02:24 -07001522 while ((entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q)))
Jon Masonfce8a7b2012-11-16 19:27:12 -07001523 kfree(entry);
1524
Jon Masonf7667552013-01-19 02:02:24 -07001525 while ((entry = ntb_list_rm(&qp->ntb_rx_pend_q_lock, &qp->rx_pend_q))) {
Jon Masonfce8a7b2012-11-16 19:27:12 -07001526 dev_warn(&pdev->dev, "Freeing item from a non-empty queue\n");
1527 kfree(entry);
1528 }
1529
Jon Masonf7667552013-01-19 02:02:24 -07001530 while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q)))
Jon Masonfce8a7b2012-11-16 19:27:12 -07001531 kfree(entry);
1532
1533 set_bit(qp->qp_num, &qp->transport->qp_bitmap);
1534
1535 dev_info(&pdev->dev, "NTB Transport QP %d freed\n", qp->qp_num);
1536}
1537EXPORT_SYMBOL_GPL(ntb_transport_free_queue);
1538
1539/**
1540 * ntb_transport_rx_remove - Dequeues enqueued rx packet
1541 * @qp: NTB queue to be freed
1542 * @len: pointer to variable to write enqueued buffers length
1543 *
1544 * Dequeues unused buffers from receive queue. Should only be used during
1545 * shutdown of qp.
1546 *
1547 * RETURNS: NULL error value on error, or void* for success.
1548 */
1549void *ntb_transport_rx_remove(struct ntb_transport_qp *qp, unsigned int *len)
1550{
1551 struct ntb_queue_entry *entry;
1552 void *buf;
1553
1554 if (!qp || qp->client_ready == NTB_LINK_UP)
1555 return NULL;
1556
1557 entry = ntb_list_rm(&qp->ntb_rx_pend_q_lock, &qp->rx_pend_q);
1558 if (!entry)
1559 return NULL;
1560
1561 buf = entry->cb_data;
1562 *len = entry->len;
1563
Jon Masonf7667552013-01-19 02:02:24 -07001564 ntb_list_add(&qp->ntb_rx_free_q_lock, &entry->entry, &qp->rx_free_q);
Jon Masonfce8a7b2012-11-16 19:27:12 -07001565
1566 return buf;
1567}
1568EXPORT_SYMBOL_GPL(ntb_transport_rx_remove);
1569
1570/**
1571 * ntb_transport_rx_enqueue - Enqueue a new NTB queue entry
1572 * @qp: NTB transport layer queue the entry is to be enqueued on
1573 * @cb: per buffer pointer for callback function to use
1574 * @data: pointer to data buffer that incoming packets will be copied into
1575 * @len: length of the data buffer
1576 *
1577 * Enqueue a new receive buffer onto the transport queue into which a NTB
1578 * payload can be received into.
1579 *
1580 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
1581 */
1582int ntb_transport_rx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data,
1583 unsigned int len)
1584{
1585 struct ntb_queue_entry *entry;
1586
1587 if (!qp)
1588 return -EINVAL;
1589
1590 entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q);
1591 if (!entry)
1592 return -ENOMEM;
1593
1594 entry->cb_data = cb;
1595 entry->buf = data;
1596 entry->len = len;
1597
Jon Masonf7667552013-01-19 02:02:24 -07001598 ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry, &qp->rx_pend_q);
Jon Masonfce8a7b2012-11-16 19:27:12 -07001599
1600 return 0;
1601}
1602EXPORT_SYMBOL_GPL(ntb_transport_rx_enqueue);
1603
1604/**
1605 * ntb_transport_tx_enqueue - Enqueue a new NTB queue entry
1606 * @qp: NTB transport layer queue the entry is to be enqueued on
1607 * @cb: per buffer pointer for callback function to use
1608 * @data: pointer to data buffer that will be sent
1609 * @len: length of the data buffer
1610 *
1611 * Enqueue a new transmit buffer onto the transport queue from which a NTB
Jon Masonf9a2cf82013-07-29 16:46:43 -07001612 * payload will be transmitted. This assumes that a lock is being held to
Jon Masonfce8a7b2012-11-16 19:27:12 -07001613 * serialize access to the qp.
1614 *
1615 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
1616 */
1617int ntb_transport_tx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data,
1618 unsigned int len)
1619{
1620 struct ntb_queue_entry *entry;
1621 int rc;
1622
1623 if (!qp || qp->qp_link != NTB_LINK_UP || !len)
1624 return -EINVAL;
1625
1626 entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q);
Jon Mason282a2fe2013-02-12 09:52:50 -07001627 if (!entry) {
1628 qp->tx_err_no_buf++;
Jon Masonfce8a7b2012-11-16 19:27:12 -07001629 return -ENOMEM;
Jon Mason282a2fe2013-02-12 09:52:50 -07001630 }
Jon Masonfce8a7b2012-11-16 19:27:12 -07001631
1632 entry->cb_data = cb;
1633 entry->buf = data;
1634 entry->len = len;
1635 entry->flags = 0;
1636
1637 rc = ntb_process_tx(qp, entry);
1638 if (rc)
1639 ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry,
1640 &qp->tx_free_q);
1641
1642 return rc;
1643}
1644EXPORT_SYMBOL_GPL(ntb_transport_tx_enqueue);
1645
1646/**
1647 * ntb_transport_link_up - Notify NTB transport of client readiness to use queue
1648 * @qp: NTB transport layer queue to be enabled
1649 *
1650 * Notify NTB transport layer of client readiness to use queue
1651 */
1652void ntb_transport_link_up(struct ntb_transport_qp *qp)
1653{
1654 if (!qp)
1655 return;
1656
1657 qp->client_ready = NTB_LINK_UP;
1658
1659 if (qp->transport->transport_link == NTB_LINK_UP)
1660 schedule_delayed_work(&qp->link_work, 0);
1661}
1662EXPORT_SYMBOL_GPL(ntb_transport_link_up);
1663
1664/**
1665 * ntb_transport_link_down - Notify NTB transport to no longer enqueue data
1666 * @qp: NTB transport layer queue to be disabled
1667 *
1668 * Notify NTB transport layer of client's desire to no longer receive data on
1669 * transport queue specified. It is the client's responsibility to ensure all
Jon Masonf9a2cf82013-07-29 16:46:43 -07001670 * entries on queue are purged or otherwise handled appropriately.
Jon Masonfce8a7b2012-11-16 19:27:12 -07001671 */
1672void ntb_transport_link_down(struct ntb_transport_qp *qp)
1673{
Jon Mason186f27f2013-01-22 11:35:40 -07001674 struct pci_dev *pdev;
Jon Masonfce8a7b2012-11-16 19:27:12 -07001675 int rc, val;
1676
1677 if (!qp)
1678 return;
1679
Jon Mason186f27f2013-01-22 11:35:40 -07001680 pdev = ntb_query_pdev(qp->ndev);
Jon Masonfce8a7b2012-11-16 19:27:12 -07001681 qp->client_ready = NTB_LINK_DOWN;
1682
1683 rc = ntb_read_local_spad(qp->ndev, QP_LINKS, &val);
1684 if (rc) {
1685 dev_err(&pdev->dev, "Error reading spad %d\n", QP_LINKS);
1686 return;
1687 }
1688
1689 rc = ntb_write_remote_spad(qp->ndev, QP_LINKS,
1690 val & ~(1 << qp->qp_num));
1691 if (rc)
1692 dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
1693 val & ~(1 << qp->qp_num), QP_LINKS);
1694
1695 if (qp->qp_link == NTB_LINK_UP)
1696 ntb_send_link_down(qp);
1697 else
1698 cancel_delayed_work_sync(&qp->link_work);
1699}
1700EXPORT_SYMBOL_GPL(ntb_transport_link_down);
1701
1702/**
1703 * ntb_transport_link_query - Query transport link state
1704 * @qp: NTB transport layer queue to be queried
1705 *
1706 * Query connectivity to the remote system of the NTB transport queue
1707 *
1708 * RETURNS: true for link up or false for link down
1709 */
1710bool ntb_transport_link_query(struct ntb_transport_qp *qp)
1711{
Jon Mason186f27f2013-01-22 11:35:40 -07001712 if (!qp)
1713 return false;
1714
Jon Masonfce8a7b2012-11-16 19:27:12 -07001715 return qp->qp_link == NTB_LINK_UP;
1716}
1717EXPORT_SYMBOL_GPL(ntb_transport_link_query);
1718
1719/**
1720 * ntb_transport_qp_num - Query the qp number
1721 * @qp: NTB transport layer queue to be queried
1722 *
1723 * Query qp number of the NTB transport queue
1724 *
1725 * RETURNS: a zero based number specifying the qp number
1726 */
1727unsigned char ntb_transport_qp_num(struct ntb_transport_qp *qp)
1728{
Jon Mason186f27f2013-01-22 11:35:40 -07001729 if (!qp)
1730 return 0;
1731
Jon Masonfce8a7b2012-11-16 19:27:12 -07001732 return qp->qp_num;
1733}
1734EXPORT_SYMBOL_GPL(ntb_transport_qp_num);
1735
1736/**
1737 * ntb_transport_max_size - Query the max payload size of a qp
1738 * @qp: NTB transport layer queue to be queried
1739 *
1740 * Query the maximum payload size permissible on the given qp
1741 *
1742 * RETURNS: the max payload size of a qp
1743 */
Jon Masonef114ed2013-01-19 02:02:18 -07001744unsigned int ntb_transport_max_size(struct ntb_transport_qp *qp)
Jon Masonfce8a7b2012-11-16 19:27:12 -07001745{
Jon Mason282a2fe2013-02-12 09:52:50 -07001746 unsigned int max;
1747
Jon Mason186f27f2013-01-22 11:35:40 -07001748 if (!qp)
1749 return 0;
1750
Jon Mason282a2fe2013-02-12 09:52:50 -07001751 if (!qp->dma_chan)
1752 return qp->tx_max_frame - sizeof(struct ntb_payload_header);
1753
1754 /* If DMA engine usage is possible, try to find the max size for that */
1755 max = qp->tx_max_frame - sizeof(struct ntb_payload_header);
1756 max -= max % (1 << qp->dma_chan->device->copy_align);
1757
1758 return max;
Jon Masonfce8a7b2012-11-16 19:27:12 -07001759}
1760EXPORT_SYMBOL_GPL(ntb_transport_max_size);