blob: a180171087a872e1a593572ad40802bc6d375f5a [file] [log] [blame]
Thomas Gleixner9ab65af2019-05-19 15:51:37 +02001/* SPDX-License-Identifier: GPL-2.0-or-later */
Chris Leech0bbd5f42006-05-23 17:35:34 -07002/*
Maciej Sosnowski211a22c2009-02-26 11:05:43 +01003 * Copyright(c) 2004 - 2009 Intel Corporation. All rights reserved.
Chris Leech0bbd5f42006-05-23 17:35:34 -07004 */
5#ifndef IOATDMA_H
6#define IOATDMA_H
7
8#include <linux/dmaengine.h>
Chris Leech0bbd5f42006-05-23 17:35:34 -07009#include <linux/init.h>
10#include <linux/dmapool.h>
11#include <linux/cache.h>
David S. Miller57c651f2006-05-23 17:39:49 -070012#include <linux/pci_ids.h>
Dave Jiang885b2012015-08-11 08:48:32 -070013#include <linux/circ_buf.h>
14#include <linux/interrupt.h>
15#include "registers.h"
16#include "hw.h"
Chris Leech0bbd5f42006-05-23 17:35:34 -070017
Dave Jiang4d758732019-02-22 09:59:54 -070018#define IOAT_DMA_VERSION "5.00"
Shannon Nelson5149fd02007-10-18 03:07:13 -070019
Shannon Nelson7bb67c12007-11-14 16:59:51 -080020#define IOAT_DMA_DCA_ANY_CPU ~0
21
Dave Jiang55f878e2015-08-11 08:48:27 -070022#define to_ioatdma_device(dev) container_of(dev, struct ioatdma_device, dma_dev)
23#define to_dev(ioat_chan) (&(ioat_chan)->ioat_dma->pdev->dev)
24#define to_pdev(ioat_chan) ((ioat_chan)->ioat_dma->pdev)
Dan Williams1f27adc22009-09-08 17:29:02 -070025
Dave Jiang55f878e2015-08-11 08:48:27 -070026#define chan_num(ch) ((int)((ch)->reg_base - (ch)->ioat_dma->reg_base) / 0x80)
Dan Williams1f27adc22009-09-08 17:29:02 -070027
Dave Jiang599d49d2015-08-11 08:48:49 -070028/* ioat hardware assumes at least two sources for raid operations */
29#define src_cnt_to_sw(x) ((x) + 2)
30#define src_cnt_to_hw(x) ((x) - 2)
31#define ndest_to_sw(x) ((x) + 1)
32#define ndest_to_hw(x) ((x) - 1)
33#define src16_cnt_to_sw(x) ((x) + 9)
34#define src16_cnt_to_hw(x) ((x) - 9)
35
Dan Williams1f27adc22009-09-08 17:29:02 -070036/*
37 * workaround for IOAT ver.3.0 null descriptor issue
38 * (channel returns error when size is 0)
39 */
40#define NULL_DESC_BUFFER_SIZE 1
41
Dave Jiang8a52b9f2013-03-26 15:42:47 -070042enum ioat_irq_mode {
43 IOAT_NOIRQ = 0,
44 IOAT_MSIX,
Dave Jiang8a52b9f2013-03-26 15:42:47 -070045 IOAT_MSI,
46 IOAT_INTX
47};
48
Chris Leech0bbd5f42006-05-23 17:35:34 -070049/**
Shannon Nelson8ab89562007-10-16 01:27:39 -070050 * struct ioatdma_device - internal representation of a IOAT device
Chris Leech0bbd5f42006-05-23 17:35:34 -070051 * @pdev: PCI-Express device
52 * @reg_base: MMIO register space base address
Dave Jiangc7b0e8d2015-08-11 08:49:05 -070053 * @completion_pool: DMA buffers for completion ops
54 * @sed_hw_pool: DMA super descriptor pools
Dave Jiang55f878e2015-08-11 08:48:27 -070055 * @dma_dev: embedded struct dma_device
Shannon Nelson8ab89562007-10-16 01:27:39 -070056 * @version: version of ioatdma device
Shannon Nelson7bb67c12007-11-14 16:59:51 -080057 * @msix_entries: irq handlers
58 * @idx: per channel data
Dan Williamsf2427e272009-07-28 14:42:38 -070059 * @dca: direct cache access context
Dave Jiangc7b0e8d2015-08-11 08:49:05 -070060 * @irq_mode: interrupt mode (INTX, MSI, MSIX)
61 * @cap: read DMA capabilities register
Chris Leech0bbd5f42006-05-23 17:35:34 -070062 */
Shannon Nelson8ab89562007-10-16 01:27:39 -070063struct ioatdma_device {
Chris Leech0bbd5f42006-05-23 17:35:34 -070064 struct pci_dev *pdev;
Al Viro47b16532006-10-10 22:45:47 +010065 void __iomem *reg_base;
Dave Jiang679cfbf2016-02-10 15:00:21 -070066 struct dma_pool *completion_pool;
Dave Jiang7727eaa2013-04-15 10:25:56 -070067#define MAX_SED_POOLS 5
68 struct dma_pool *sed_hw_pool[MAX_SED_POOLS];
Dave Jiang55f878e2015-08-11 08:48:27 -070069 struct dma_device dma_dev;
Shannon Nelson8ab89562007-10-16 01:27:39 -070070 u8 version;
Dave Jiangad4a7b52015-08-26 13:17:24 -070071#define IOAT_MAX_CHANS 4
72 struct msix_entry msix_entries[IOAT_MAX_CHANS];
73 struct ioatdma_chan *idx[IOAT_MAX_CHANS];
Dan Williamsf2427e272009-07-28 14:42:38 -070074 struct dca_provider *dca;
Dave Jiang8a52b9f2013-03-26 15:42:47 -070075 enum ioat_irq_mode irq_mode;
Dave Jiang75c6f0a2013-04-10 16:44:39 -070076 u32 cap;
Yajun Dengf4f84fb2023-08-15 14:11:51 +080077 int chancnt;
Dave Jiangc997e302016-03-10 16:18:40 -070078
79 /* shadow version for CB3.3 chan reset errata workaround */
80 u64 msixtba0;
81 u64 msixdata0;
82 u32 msixpba;
Chris Leech0bbd5f42006-05-23 17:35:34 -070083};
84
Leonid Ravichbd2bf302020-04-16 20:06:21 +030085#define IOAT_MAX_ORDER 16
86#define IOAT_MAX_DESCS (1 << IOAT_MAX_ORDER)
Leonid Ravicha02254f2020-04-16 20:06:22 +030087#define IOAT_CHUNK_SIZE (SZ_512K)
Leonid Ravichbd2bf302020-04-16 20:06:21 +030088#define IOAT_DESCS_PER_CHUNK (IOAT_CHUNK_SIZE / IOAT_DESC_SZ)
89
Dave Jiangdd4645e2016-02-10 15:00:32 -070090struct ioat_descs {
91 void *virt;
92 dma_addr_t hw;
93};
94
Dave Jiang5a976882015-08-11 08:48:21 -070095struct ioatdma_chan {
96 struct dma_chan dma_chan;
Al Viro47b16532006-10-10 22:45:47 +010097 void __iomem *reg_base;
Dan Williams27502932012-03-23 13:36:42 -070098 dma_addr_t last_completion;
Chris Leech0bbd5f42006-05-23 17:35:34 -070099 spinlock_t cleanup_lock;
Dan Williams09c8a5b2009-09-08 12:01:49 -0700100 unsigned long state;
Dave Jiangad4a7b52015-08-26 13:17:24 -0700101 #define IOAT_CHAN_DOWN 0
Dan Williams09c8a5b2009-09-08 12:01:49 -0700102 #define IOAT_COMPLETION_ACK 1
103 #define IOAT_RESET_PENDING 2
Dan Williams5669e312009-09-08 17:42:56 -0700104 #define IOAT_KOBJ_INIT_FAIL 3
Dan Williams556ab452010-07-23 15:47:56 -0700105 #define IOAT_RUN 5
Dave Jiang4dec23d2013-02-07 14:38:32 -0700106 #define IOAT_CHAN_ACTIVE 6
Dan Williams09c8a5b2009-09-08 12:01:49 -0700107 struct timer_list timer;
Dan Williams09c8a5b2009-09-08 12:01:49 -0700108 #define RESET_DELAY msecs_to_jiffies(100)
Dave Jiang55f878e2015-08-11 08:48:27 -0700109 struct ioatdma_device *ioat_dma;
Dan Williams4fb9b9e2009-09-08 12:01:04 -0700110 dma_addr_t completion_dma;
111 u64 *completion;
Shannon Nelson3e037452007-10-16 01:27:40 -0700112 struct tasklet_struct cleanup_task;
Dan Williams5669e312009-09-08 17:42:56 -0700113 struct kobject kobj;
Dave Jiang5a976882015-08-11 08:48:21 -0700114
115/* ioat v2 / v3 channel attributes
116 * @xfercap_log; log2 of channel max transfer length (for fast division)
117 * @head: allocated index
118 * @issued: hardware notification point
119 * @tail: cleanup index
120 * @dmacount: identical to 'head' except for occasionally resetting to zero
121 * @alloc_order: log2 of the number of allocated descriptors
122 * @produce: number of descriptors to produce at submit time
123 * @ring: software ring buffer implementation of hardware ring
124 * @prep_lock: serializes descriptor preparation (producers)
125 */
126 size_t xfercap_log;
127 u16 head;
128 u16 issued;
129 u16 tail;
130 u16 dmacount;
131 u16 alloc_order;
132 u16 produce;
133 struct ioat_ring_ent **ring;
134 spinlock_t prep_lock;
Leonid Ravichbd2bf302020-04-16 20:06:21 +0300135 struct ioat_descs descs[IOAT_MAX_DESCS / IOAT_DESCS_PER_CHUNK];
Dave Jiangdd4645e2016-02-10 15:00:32 -0700136 int desc_chunks;
Ujjal Singh268e2512017-08-22 20:31:18 -0400137 int intr_coalesce;
138 int prev_intr_coalesce;
Chris Leech0bbd5f42006-05-23 17:35:34 -0700139};
140
Dan Williams5669e312009-09-08 17:42:56 -0700141struct ioat_sysfs_entry {
142 struct attribute attr;
143 ssize_t (*show)(struct dma_chan *, char *);
Ujjal Singh268e2512017-08-22 20:31:18 -0400144 ssize_t (*store)(struct dma_chan *, const char *, size_t);
Dan Williams5669e312009-09-08 17:42:56 -0700145};
Dan Williams5cbafa62009-08-26 13:01:44 -0700146
Dan Williamsdcbc8532009-07-28 14:44:50 -0700147/**
Dave Jiang7727eaa2013-04-15 10:25:56 -0700148 * struct ioat_sed_ent - wrapper around super extended hardware descriptor
149 * @hw: hardware SED
Dave Jiangc7b0e8d2015-08-11 08:49:05 -0700150 * @dma: dma address for the SED
Dave Jiang7727eaa2013-04-15 10:25:56 -0700151 * @parent: point to the dma descriptor that's the parent
Dave Jiangc7b0e8d2015-08-11 08:49:05 -0700152 * @hw_pool: descriptor pool index
Dave Jiang7727eaa2013-04-15 10:25:56 -0700153 */
154struct ioat_sed_ent {
155 struct ioat_sed_raw_descriptor *hw;
156 dma_addr_t dma;
157 struct ioat_ring_ent *parent;
158 unsigned int hw_pool;
159};
160
Dave Jiang885b2012015-08-11 08:48:32 -0700161/**
162 * struct ioat_ring_ent - wrapper around hardware descriptor
163 * @hw: hardware DMA descriptor (for memcpy)
Dave Jiang885b2012015-08-11 08:48:32 -0700164 * @xor: hardware xor descriptor
165 * @xor_ex: hardware xor extension descriptor
166 * @pq: hardware pq descriptor
167 * @pq_ex: hardware pq extension descriptor
168 * @pqu: hardware pq update descriptor
169 * @raw: hardware raw (un-typed) descriptor
170 * @txd: the generic software descriptor for all engines
171 * @len: total transaction length for unmap
172 * @result: asynchronous result of validate operations
173 * @id: identifier for debug
Dave Jiangc7b0e8d2015-08-11 08:49:05 -0700174 * @sed: pointer to super extended descriptor sw desc
Dave Jiang885b2012015-08-11 08:48:32 -0700175 */
176
177struct ioat_ring_ent {
178 union {
179 struct ioat_dma_descriptor *hw;
180 struct ioat_xor_descriptor *xor;
181 struct ioat_xor_ext_descriptor *xor_ex;
182 struct ioat_pq_descriptor *pq;
183 struct ioat_pq_ext_descriptor *pq_ex;
184 struct ioat_pq_update_descriptor *pqu;
185 struct ioat_raw_descriptor *raw;
186 };
187 size_t len;
188 struct dma_async_tx_descriptor txd;
189 enum sum_check_flags *result;
190 #ifdef DEBUG
191 int id;
192 #endif
193 struct ioat_sed_ent *sed;
194};
195
Dave Jiang599d49d2015-08-11 08:48:49 -0700196extern const struct sysfs_ops ioat_sysfs_ops;
197extern struct ioat_sysfs_entry ioat_version_attr;
198extern struct ioat_sysfs_entry ioat_cap_attr;
199extern int ioat_pending_level;
Dave Jiang599d49d2015-08-11 08:48:49 -0700200extern struct kobj_type ioat_ktype;
201extern struct kmem_cache *ioat_cache;
Dave Jiang599d49d2015-08-11 08:48:49 -0700202extern struct kmem_cache *ioat_sed_cache;
203
Dave Jiang5a976882015-08-11 08:48:21 -0700204static inline struct ioatdma_chan *to_ioat_chan(struct dma_chan *c)
Dan Williamsdcbc8532009-07-28 14:44:50 -0700205{
Dave Jiang5a976882015-08-11 08:48:21 -0700206 return container_of(c, struct ioatdma_chan, dma_chan);
Dan Williamsdcbc8532009-07-28 14:44:50 -0700207}
208
Chris Leech0bbd5f42006-05-23 17:35:34 -0700209/* wrapper around hardware descriptor format + additional software fields */
Dan Williams6df91832009-09-08 12:00:55 -0700210#ifdef DEBUG
211#define set_desc_id(desc, i) ((desc)->id = (i))
212#define desc_id(desc) ((desc)->id)
213#else
214#define set_desc_id(desc, i)
215#define desc_id(desc) (0)
216#endif
217
218static inline void
Dave Jiang5a976882015-08-11 08:48:21 -0700219__dump_desc_dbg(struct ioatdma_chan *ioat_chan, struct ioat_dma_descriptor *hw,
Dan Williams6df91832009-09-08 12:00:55 -0700220 struct dma_async_tx_descriptor *tx, int id)
221{
Dave Jiang5a976882015-08-11 08:48:21 -0700222 struct device *dev = to_dev(ioat_chan);
Dan Williams6df91832009-09-08 12:00:55 -0700223
224 dev_dbg(dev, "desc[%d]: (%#llx->%#llx) cookie: %d flags: %#x"
Dave Jiang50f9f972013-03-04 10:59:54 -0700225 " ctl: %#10.8x (op: %#x int_en: %d compl: %d)\n", id,
Dan Williams6df91832009-09-08 12:00:55 -0700226 (unsigned long long) tx->phys,
227 (unsigned long long) hw->next, tx->cookie, tx->flags,
228 hw->ctl, hw->ctl_f.op, hw->ctl_f.int_en, hw->ctl_f.compl_write);
229}
230
231#define dump_desc_dbg(c, d) \
Dave Jiang5a976882015-08-11 08:48:21 -0700232 ({ if (d) __dump_desc_dbg(c, d->hw, &d->txd, desc_id(d)); 0; })
Dan Williams6df91832009-09-08 12:00:55 -0700233
Dave Jiang5a976882015-08-11 08:48:21 -0700234static inline struct ioatdma_chan *
Dave Jiang55f878e2015-08-11 08:48:27 -0700235ioat_chan_by_index(struct ioatdma_device *ioat_dma, int index)
Dan Williams5cbafa62009-08-26 13:01:44 -0700236{
Dave Jiang55f878e2015-08-11 08:48:27 -0700237 return ioat_dma->idx[index];
Dan Williams5cbafa62009-08-26 13:01:44 -0700238}
239
Dave Jiang5a976882015-08-11 08:48:21 -0700240static inline u64 ioat_chansts(struct ioatdma_chan *ioat_chan)
Dave Jiangd92a8d72013-03-26 15:42:41 -0700241{
Dave Jiangd3cd63f2015-11-06 13:24:01 -0700242 return readq(ioat_chan->reg_base + IOAT_CHANSTS_OFFSET);
Dave Jiangd92a8d72013-03-26 15:42:41 -0700243}
244
Dan Williams09c8a5b2009-09-08 12:01:49 -0700245static inline u64 ioat_chansts_to_addr(u64 status)
246{
247 return status & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR;
248}
249
Dave Jiang5a976882015-08-11 08:48:21 -0700250static inline u32 ioat_chanerr(struct ioatdma_chan *ioat_chan)
Dan Williams09c8a5b2009-09-08 12:01:49 -0700251{
Dave Jiang5a976882015-08-11 08:48:21 -0700252 return readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
Dan Williams09c8a5b2009-09-08 12:01:49 -0700253}
254
Dave Jiang5a976882015-08-11 08:48:21 -0700255static inline void ioat_suspend(struct ioatdma_chan *ioat_chan)
Dan Williams09c8a5b2009-09-08 12:01:49 -0700256{
Dave Jiang55f878e2015-08-11 08:48:27 -0700257 u8 ver = ioat_chan->ioat_dma->version;
Dan Williams09c8a5b2009-09-08 12:01:49 -0700258
Dave Jiang5a976882015-08-11 08:48:21 -0700259 writeb(IOAT_CHANCMD_SUSPEND,
260 ioat_chan->reg_base + IOAT_CHANCMD_OFFSET(ver));
Dan Williams09c8a5b2009-09-08 12:01:49 -0700261}
262
Dave Jiang5a976882015-08-11 08:48:21 -0700263static inline void ioat_reset(struct ioatdma_chan *ioat_chan)
Dan Williamsa6d52d72009-12-19 15:36:02 -0700264{
Dave Jiang55f878e2015-08-11 08:48:27 -0700265 u8 ver = ioat_chan->ioat_dma->version;
Dan Williamsa6d52d72009-12-19 15:36:02 -0700266
Dave Jiang5a976882015-08-11 08:48:21 -0700267 writeb(IOAT_CHANCMD_RESET,
268 ioat_chan->reg_base + IOAT_CHANCMD_OFFSET(ver));
Dan Williamsa6d52d72009-12-19 15:36:02 -0700269}
270
Dave Jiang5a976882015-08-11 08:48:21 -0700271static inline bool ioat_reset_pending(struct ioatdma_chan *ioat_chan)
Dan Williamsa6d52d72009-12-19 15:36:02 -0700272{
Dave Jiang55f878e2015-08-11 08:48:27 -0700273 u8 ver = ioat_chan->ioat_dma->version;
Dan Williamsa6d52d72009-12-19 15:36:02 -0700274 u8 cmd;
275
Dave Jiang5a976882015-08-11 08:48:21 -0700276 cmd = readb(ioat_chan->reg_base + IOAT_CHANCMD_OFFSET(ver));
Dan Williamsa6d52d72009-12-19 15:36:02 -0700277 return (cmd & IOAT_CHANCMD_RESET) == IOAT_CHANCMD_RESET;
278}
279
Dan Williams09c8a5b2009-09-08 12:01:49 -0700280static inline bool is_ioat_active(unsigned long status)
281{
282 return ((status & IOAT_CHANSTS_STATUS) == IOAT_CHANSTS_ACTIVE);
283}
284
285static inline bool is_ioat_idle(unsigned long status)
286{
287 return ((status & IOAT_CHANSTS_STATUS) == IOAT_CHANSTS_DONE);
288}
289
290static inline bool is_ioat_halted(unsigned long status)
291{
292 return ((status & IOAT_CHANSTS_STATUS) == IOAT_CHANSTS_HALTED);
293}
294
295static inline bool is_ioat_suspended(unsigned long status)
296{
297 return ((status & IOAT_CHANSTS_STATUS) == IOAT_CHANSTS_SUSPENDED);
298}
299
300/* channel was fatally programmed */
301static inline bool is_ioat_bug(unsigned long err)
302{
Dan Williamsb57014d2009-11-19 17:10:07 -0700303 return !!err;
Dan Williams09c8a5b2009-09-08 12:01:49 -0700304}
305
Dave Jiang885b2012015-08-11 08:48:32 -0700306
307static inline u32 ioat_ring_size(struct ioatdma_chan *ioat_chan)
308{
309 return 1 << ioat_chan->alloc_order;
310}
311
312/* count of descriptors in flight with the engine */
313static inline u16 ioat_ring_active(struct ioatdma_chan *ioat_chan)
314{
315 return CIRC_CNT(ioat_chan->head, ioat_chan->tail,
316 ioat_ring_size(ioat_chan));
317}
318
319/* count of descriptors pending submission to hardware */
320static inline u16 ioat_ring_pending(struct ioatdma_chan *ioat_chan)
321{
322 return CIRC_CNT(ioat_chan->head, ioat_chan->issued,
323 ioat_ring_size(ioat_chan));
324}
325
326static inline u32 ioat_ring_space(struct ioatdma_chan *ioat_chan)
327{
328 return ioat_ring_size(ioat_chan) - ioat_ring_active(ioat_chan);
329}
330
331static inline u16
332ioat_xferlen_to_descs(struct ioatdma_chan *ioat_chan, size_t len)
333{
334 u16 num_descs = len >> ioat_chan->xfercap_log;
335
336 num_descs += !!(len & ((1 << ioat_chan->xfercap_log) - 1));
337 return num_descs;
338}
339
340static inline struct ioat_ring_ent *
341ioat_get_ring_ent(struct ioatdma_chan *ioat_chan, u16 idx)
342{
343 return ioat_chan->ring[idx & (ioat_ring_size(ioat_chan) - 1)];
344}
345
346static inline void
347ioat_set_chainaddr(struct ioatdma_chan *ioat_chan, u64 addr)
348{
349 writel(addr & 0x00000000FFFFFFFF,
350 ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_LOW);
351 writel(addr >> 32,
352 ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_HIGH);
353}
354
Dave Jiang599d49d2015-08-11 08:48:49 -0700355/* IOAT Prep functions */
356struct dma_async_tx_descriptor *
357ioat_dma_prep_memcpy_lock(struct dma_chan *c, dma_addr_t dma_dest,
358 dma_addr_t dma_src, size_t len, unsigned long flags);
Dave Jiangc0f28ce2015-08-11 08:48:43 -0700359struct dma_async_tx_descriptor *
360ioat_prep_interrupt_lock(struct dma_chan *c, unsigned long flags);
361struct dma_async_tx_descriptor *
362ioat_prep_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
363 unsigned int src_cnt, size_t len, unsigned long flags);
364struct dma_async_tx_descriptor *
365ioat_prep_xor_val(struct dma_chan *chan, dma_addr_t *src,
366 unsigned int src_cnt, size_t len,
367 enum sum_check_flags *result, unsigned long flags);
368struct dma_async_tx_descriptor *
369ioat_prep_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src,
370 unsigned int src_cnt, const unsigned char *scf, size_t len,
371 unsigned long flags);
372struct dma_async_tx_descriptor *
373ioat_prep_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src,
374 unsigned int src_cnt, const unsigned char *scf, size_t len,
375 enum sum_check_flags *pqres, unsigned long flags);
376struct dma_async_tx_descriptor *
377ioat_prep_pqxor(struct dma_chan *chan, dma_addr_t dst, dma_addr_t *src,
378 unsigned int src_cnt, size_t len, unsigned long flags);
379struct dma_async_tx_descriptor *
380ioat_prep_pqxor_val(struct dma_chan *chan, dma_addr_t *src,
381 unsigned int src_cnt, size_t len,
382 enum sum_check_flags *result, unsigned long flags);
Dave Jiang599d49d2015-08-11 08:48:49 -0700383
384/* IOAT Operation functions */
385irqreturn_t ioat_dma_do_interrupt(int irq, void *data);
386irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data);
387struct ioat_ring_ent **
388ioat_alloc_ring(struct dma_chan *c, int order, gfp_t flags);
389void ioat_start_null_desc(struct ioatdma_chan *ioat_chan);
390void ioat_free_ring_ent(struct ioat_ring_ent *desc, struct dma_chan *chan);
391int ioat_reset_hw(struct ioatdma_chan *ioat_chan);
Dave Jiangc0f28ce2015-08-11 08:48:43 -0700392enum dma_status
393ioat_tx_status(struct dma_chan *c, dma_cookie_t cookie,
394 struct dma_tx_state *txstate);
Allen Pais3b8040d2020-08-31 16:05:16 +0530395void ioat_cleanup_event(struct tasklet_struct *t);
Kees Cookbcdc4bd2017-10-24 03:02:23 -0700396void ioat_timer_event(struct timer_list *t);
Dave Jiang885b2012015-08-11 08:48:32 -0700397int ioat_check_space_lock(struct ioatdma_chan *ioat_chan, int num_descs);
Dave Jiang885b2012015-08-11 08:48:32 -0700398void ioat_issue_pending(struct dma_chan *chan);
Dave Jiang885b2012015-08-11 08:48:32 -0700399
Dave Jiang599d49d2015-08-11 08:48:49 -0700400/* IOAT Init functions */
401bool is_bwd_ioat(struct pci_dev *pdev);
Dave Jiang3372de52015-08-11 08:48:55 -0700402struct dca_provider *ioat_dca_init(struct pci_dev *pdev, void __iomem *iobase);
Dave Jiang599d49d2015-08-11 08:48:49 -0700403void ioat_kobject_add(struct ioatdma_device *ioat_dma, struct kobj_type *type);
404void ioat_kobject_del(struct ioatdma_device *ioat_dma);
405int ioat_dma_setup_interrupts(struct ioatdma_device *ioat_dma);
406void ioat_stop(struct ioatdma_chan *ioat_chan);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700407#endif /* IOATDMA_H */