blob: a63837ca14106a9831824af4e2e11a0c5ae44188 [file] [log] [blame]
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001/*
2 * offload engine driver for the Marvell XOR engine
3 * Copyright (C) 2007, 2008, Marvell International Ltd.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17 */
18
19#include <linux/init.h>
20#include <linux/module.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090021#include <linux/slab.h>
Saeed Bisharaff7b0472008-07-08 11:58:36 -070022#include <linux/delay.h>
23#include <linux/dma-mapping.h>
24#include <linux/spinlock.h>
25#include <linux/interrupt.h>
26#include <linux/platform_device.h>
27#include <linux/memory.h>
Andrew Lunnc5101822012-02-19 13:30:26 +010028#include <linux/clk.h>
Thomas Petazzonif7d12ef2012-11-15 16:47:58 +010029#include <linux/of.h>
30#include <linux/of_irq.h>
31#include <linux/irqdomain.h>
Arnd Bergmannc02cecb2012-08-24 15:21:54 +020032#include <linux/platform_data/dma-mv_xor.h>
Russell King - ARM Linuxd2ebfb32012-03-06 22:34:26 +000033
34#include "dmaengine.h"
Saeed Bisharaff7b0472008-07-08 11:58:36 -070035#include "mv_xor.h"
36
37static void mv_xor_issue_pending(struct dma_chan *chan);
38
39#define to_mv_xor_chan(chan) \
Thomas Petazzoni98817b92012-11-15 14:57:44 +010040 container_of(chan, struct mv_xor_chan, dmachan)
Saeed Bisharaff7b0472008-07-08 11:58:36 -070041
42#define to_mv_xor_slot(tx) \
43 container_of(tx, struct mv_xor_desc_slot, async_tx)
44
Thomas Petazzonic98c1782012-11-15 14:17:18 +010045#define mv_chan_to_devp(chan) \
Thomas Petazzoni1ef48a22012-11-15 15:17:05 +010046 ((chan)->dmadev.dev)
Thomas Petazzonic98c1782012-11-15 14:17:18 +010047
Lior Amsalemdfc97662014-08-27 10:52:51 -030048static void mv_desc_init(struct mv_xor_desc_slot *desc,
Lior Amsalemba87d132014-08-27 10:52:53 -030049 dma_addr_t addr, u32 byte_count,
50 enum dma_ctrl_flags flags)
Saeed Bisharaff7b0472008-07-08 11:58:36 -070051{
52 struct mv_xor_desc *hw_desc = desc->hw_desc;
53
Ezequiel Garcia0e7488e2014-08-27 10:52:52 -030054 hw_desc->status = XOR_DESC_DMA_OWNED;
Saeed Bisharaff7b0472008-07-08 11:58:36 -070055 hw_desc->phy_next_desc = 0;
Lior Amsalemba87d132014-08-27 10:52:53 -030056 /* Enable end-of-descriptor interrupts only for DMA_PREP_INTERRUPT */
57 hw_desc->desc_command = (flags & DMA_PREP_INTERRUPT) ?
58 XOR_DESC_EOD_INT_EN : 0;
Lior Amsalemdfc97662014-08-27 10:52:51 -030059 hw_desc->phy_dest_addr = addr;
Saeed Bisharaff7b0472008-07-08 11:58:36 -070060 hw_desc->byte_count = byte_count;
61}
62
63static void mv_desc_set_next_desc(struct mv_xor_desc_slot *desc,
64 u32 next_desc_addr)
65{
66 struct mv_xor_desc *hw_desc = desc->hw_desc;
67 BUG_ON(hw_desc->phy_next_desc);
68 hw_desc->phy_next_desc = next_desc_addr;
69}
70
71static void mv_desc_clear_next_desc(struct mv_xor_desc_slot *desc)
72{
73 struct mv_xor_desc *hw_desc = desc->hw_desc;
74 hw_desc->phy_next_desc = 0;
75}
76
Saeed Bisharaff7b0472008-07-08 11:58:36 -070077static void mv_desc_set_src_addr(struct mv_xor_desc_slot *desc,
78 int index, dma_addr_t addr)
79{
80 struct mv_xor_desc *hw_desc = desc->hw_desc;
Thomas Petazzonie03bc652013-07-29 17:42:14 +020081 hw_desc->phy_src_addr[mv_phy_src_idx(index)] = addr;
Saeed Bisharaff7b0472008-07-08 11:58:36 -070082 if (desc->type == DMA_XOR)
83 hw_desc->desc_command |= (1 << index);
84}
85
86static u32 mv_chan_get_current_desc(struct mv_xor_chan *chan)
87{
Thomas Petazzoni5733c382013-07-29 17:42:13 +020088 return readl_relaxed(XOR_CURR_DESC(chan));
Saeed Bisharaff7b0472008-07-08 11:58:36 -070089}
90
91static void mv_chan_set_next_descriptor(struct mv_xor_chan *chan,
92 u32 next_desc_addr)
93{
Thomas Petazzoni5733c382013-07-29 17:42:13 +020094 writel_relaxed(next_desc_addr, XOR_NEXT_DESC(chan));
Saeed Bisharaff7b0472008-07-08 11:58:36 -070095}
96
Saeed Bisharaff7b0472008-07-08 11:58:36 -070097static void mv_chan_unmask_interrupts(struct mv_xor_chan *chan)
98{
Thomas Petazzoni5733c382013-07-29 17:42:13 +020099 u32 val = readl_relaxed(XOR_INTR_MASK(chan));
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700100 val |= XOR_INTR_MASK_VALUE << (chan->idx * 16);
Thomas Petazzoni5733c382013-07-29 17:42:13 +0200101 writel_relaxed(val, XOR_INTR_MASK(chan));
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700102}
103
104static u32 mv_chan_get_intr_cause(struct mv_xor_chan *chan)
105{
Thomas Petazzoni5733c382013-07-29 17:42:13 +0200106 u32 intr_cause = readl_relaxed(XOR_INTR_CAUSE(chan));
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700107 intr_cause = (intr_cause >> (chan->idx * 16)) & 0xFFFF;
108 return intr_cause;
109}
110
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700111static void mv_xor_device_clear_eoc_cause(struct mv_xor_chan *chan)
112{
Lior Amsalemba87d132014-08-27 10:52:53 -0300113 u32 val;
114
115 val = XOR_INT_END_OF_DESC | XOR_INT_END_OF_CHAIN | XOR_INT_STOPPED;
116 val = ~(val << (chan->idx * 16));
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100117 dev_dbg(mv_chan_to_devp(chan), "%s, val 0x%08x\n", __func__, val);
Thomas Petazzoni5733c382013-07-29 17:42:13 +0200118 writel_relaxed(val, XOR_INTR_CAUSE(chan));
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700119}
120
121static void mv_xor_device_clear_err_status(struct mv_xor_chan *chan)
122{
123 u32 val = 0xFFFF0000 >> (chan->idx * 16);
Thomas Petazzoni5733c382013-07-29 17:42:13 +0200124 writel_relaxed(val, XOR_INTR_CAUSE(chan));
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700125}
126
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700127static void mv_set_mode(struct mv_xor_chan *chan,
128 enum dma_transaction_type type)
129{
130 u32 op_mode;
Thomas Petazzoni5733c382013-07-29 17:42:13 +0200131 u32 config = readl_relaxed(XOR_CONFIG(chan));
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700132
133 switch (type) {
134 case DMA_XOR:
135 op_mode = XOR_OPERATION_MODE_XOR;
136 break;
137 case DMA_MEMCPY:
138 op_mode = XOR_OPERATION_MODE_MEMCPY;
139 break;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700140 default:
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100141 dev_err(mv_chan_to_devp(chan),
Joe Perches1ba151c2012-10-28 01:05:44 -0700142 "error: unsupported operation %d\n",
Thomas Petazzonia3fc74b2012-11-15 12:50:27 +0100143 type);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700144 BUG();
145 return;
146 }
147
148 config &= ~0x7;
149 config |= op_mode;
Thomas Petazzonie03bc652013-07-29 17:42:14 +0200150
151#if defined(__BIG_ENDIAN)
152 config |= XOR_DESCRIPTOR_SWAP;
153#else
154 config &= ~XOR_DESCRIPTOR_SWAP;
155#endif
156
Thomas Petazzoni5733c382013-07-29 17:42:13 +0200157 writel_relaxed(config, XOR_CONFIG(chan));
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700158 chan->current_type = type;
159}
160
161static void mv_chan_activate(struct mv_xor_chan *chan)
162{
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100163 dev_dbg(mv_chan_to_devp(chan), " activate chan.\n");
Ezequiel Garcia5a9a55b2014-05-21 14:02:35 -0700164
165 /* writel ensures all descriptors are flushed before activation */
166 writel(BIT(0), XOR_ACTIVATION(chan));
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700167}
168
169static char mv_chan_is_busy(struct mv_xor_chan *chan)
170{
Thomas Petazzoni5733c382013-07-29 17:42:13 +0200171 u32 state = readl_relaxed(XOR_ACTIVATION(chan));
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700172
173 state = (state >> 4) & 0x3;
174
175 return (state == 1) ? 1 : 0;
176}
177
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700178/**
179 * mv_xor_free_slots - flags descriptor slots for reuse
180 * @slot: Slot to free
181 * Caller must hold &mv_chan->lock while calling this function
182 */
183static void mv_xor_free_slots(struct mv_xor_chan *mv_chan,
184 struct mv_xor_desc_slot *slot)
185{
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100186 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d slot %p\n",
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700187 __func__, __LINE__, slot);
188
Lior Amsalemdfc97662014-08-27 10:52:51 -0300189 slot->slot_used = 0;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700190
191}
192
193/*
194 * mv_xor_start_new_chain - program the engine to operate on new chain headed by
195 * sw_desc
196 * Caller must hold &mv_chan->lock while calling this function
197 */
198static void mv_xor_start_new_chain(struct mv_xor_chan *mv_chan,
199 struct mv_xor_desc_slot *sw_desc)
200{
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100201 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d: sw_desc %p\n",
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700202 __func__, __LINE__, sw_desc);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700203
Bartlomiej Zolnierkiewicz48a9db42013-07-03 15:05:06 -0700204 /* set the hardware chain */
205 mv_chan_set_next_descriptor(mv_chan, sw_desc->async_tx.phys);
206
Lior Amsalemdfc97662014-08-27 10:52:51 -0300207 mv_chan->pending++;
Thomas Petazzoni98817b92012-11-15 14:57:44 +0100208 mv_xor_issue_pending(&mv_chan->dmachan);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700209}
210
211static dma_cookie_t
212mv_xor_run_tx_complete_actions(struct mv_xor_desc_slot *desc,
213 struct mv_xor_chan *mv_chan, dma_cookie_t cookie)
214{
215 BUG_ON(desc->async_tx.cookie < 0);
216
217 if (desc->async_tx.cookie > 0) {
218 cookie = desc->async_tx.cookie;
219
220 /* call the callback (must not sleep or submit new
221 * operations to this channel)
222 */
223 if (desc->async_tx.callback)
224 desc->async_tx.callback(
225 desc->async_tx.callback_param);
226
Dan Williamsd38a8c62013-10-18 19:35:23 +0200227 dma_descriptor_unmap(&desc->async_tx);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700228 }
229
230 /* run dependent operations */
Dan Williams07f22112009-01-05 17:14:31 -0700231 dma_run_dependencies(&desc->async_tx);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700232
233 return cookie;
234}
235
236static int
237mv_xor_clean_completed_slots(struct mv_xor_chan *mv_chan)
238{
239 struct mv_xor_desc_slot *iter, *_iter;
240
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100241 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d\n", __func__, __LINE__);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700242 list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots,
243 completed_node) {
244
245 if (async_tx_test_ack(&iter->async_tx)) {
246 list_del(&iter->completed_node);
247 mv_xor_free_slots(mv_chan, iter);
248 }
249 }
250 return 0;
251}
252
253static int
254mv_xor_clean_slot(struct mv_xor_desc_slot *desc,
255 struct mv_xor_chan *mv_chan)
256{
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100257 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d: desc %p flags %d\n",
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700258 __func__, __LINE__, desc, desc->async_tx.flags);
259 list_del(&desc->chain_node);
260 /* the client is allowed to attach dependent operations
261 * until 'ack' is set
262 */
263 if (!async_tx_test_ack(&desc->async_tx)) {
264 /* move this slot to the completed_slots */
265 list_add_tail(&desc->completed_node, &mv_chan->completed_slots);
266 return 0;
267 }
268
269 mv_xor_free_slots(mv_chan, desc);
270 return 0;
271}
272
Ezequiel Garciafbeec992014-03-07 16:46:47 -0300273/* This function must be called with the mv_xor_chan spinlock held */
274static void mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700275{
276 struct mv_xor_desc_slot *iter, *_iter;
277 dma_cookie_t cookie = 0;
278 int busy = mv_chan_is_busy(mv_chan);
279 u32 current_desc = mv_chan_get_current_desc(mv_chan);
280 int seen_current = 0;
281
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100282 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d\n", __func__, __LINE__);
283 dev_dbg(mv_chan_to_devp(mv_chan), "current_desc %x\n", current_desc);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700284 mv_xor_clean_completed_slots(mv_chan);
285
286 /* free completed slots from the chain starting with
287 * the oldest descriptor
288 */
289
290 list_for_each_entry_safe(iter, _iter, &mv_chan->chain,
291 chain_node) {
292 prefetch(_iter);
293 prefetch(&_iter->async_tx);
294
295 /* do not advance past the current descriptor loaded into the
296 * hardware channel, subsequent descriptors are either in
297 * process or have not been submitted
298 */
299 if (seen_current)
300 break;
301
302 /* stop the search if we reach the current descriptor and the
303 * channel is busy
304 */
305 if (iter->async_tx.phys == current_desc) {
306 seen_current = 1;
307 if (busy)
308 break;
309 }
310
311 cookie = mv_xor_run_tx_complete_actions(iter, mv_chan, cookie);
312
313 if (mv_xor_clean_slot(iter, mv_chan))
314 break;
315 }
316
317 if ((busy == 0) && !list_empty(&mv_chan->chain)) {
318 struct mv_xor_desc_slot *chain_head;
319 chain_head = list_entry(mv_chan->chain.next,
320 struct mv_xor_desc_slot,
321 chain_node);
322
323 mv_xor_start_new_chain(mv_chan, chain_head);
324 }
325
326 if (cookie > 0)
Thomas Petazzoni98817b92012-11-15 14:57:44 +0100327 mv_chan->dmachan.completed_cookie = cookie;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700328}
329
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700330static void mv_xor_tasklet(unsigned long data)
331{
332 struct mv_xor_chan *chan = (struct mv_xor_chan *) data;
Ezequiel Garciae43147a2014-03-07 16:46:46 -0300333
334 spin_lock_bh(&chan->lock);
Saeed Bishara8333f652010-12-21 16:53:39 +0200335 mv_xor_slot_cleanup(chan);
Ezequiel Garciae43147a2014-03-07 16:46:46 -0300336 spin_unlock_bh(&chan->lock);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700337}
338
339static struct mv_xor_desc_slot *
Lior Amsalemdfc97662014-08-27 10:52:51 -0300340mv_xor_alloc_slot(struct mv_xor_chan *mv_chan)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700341{
Lior Amsalemdfc97662014-08-27 10:52:51 -0300342 struct mv_xor_desc_slot *iter, *_iter;
343 int retry = 0;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700344
345 /* start search from the last allocated descrtiptor
346 * if a contiguous allocation can not be found start searching
347 * from the beginning of the list
348 */
349retry:
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700350 if (retry == 0)
351 iter = mv_chan->last_used;
352 else
353 iter = list_entry(&mv_chan->all_slots,
354 struct mv_xor_desc_slot,
355 slot_node);
356
357 list_for_each_entry_safe_continue(
358 iter, _iter, &mv_chan->all_slots, slot_node) {
Lior Amsalemdfc97662014-08-27 10:52:51 -0300359
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700360 prefetch(_iter);
361 prefetch(&_iter->async_tx);
Lior Amsalemdfc97662014-08-27 10:52:51 -0300362 if (iter->slot_used) {
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700363 /* give up after finding the first busy slot
364 * on the second pass through the list
365 */
366 if (retry)
367 break;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700368 continue;
369 }
370
Lior Amsalemdfc97662014-08-27 10:52:51 -0300371 /* pre-ack descriptor */
372 async_tx_ack(&iter->async_tx);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700373
Lior Amsalemdfc97662014-08-27 10:52:51 -0300374 iter->slot_used = 1;
375 INIT_LIST_HEAD(&iter->chain_node);
376 iter->async_tx.cookie = -EBUSY;
377 mv_chan->last_used = iter;
378 mv_desc_clear_next_desc(iter);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700379
Lior Amsalemdfc97662014-08-27 10:52:51 -0300380 return iter;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700381
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700382 }
383 if (!retry++)
384 goto retry;
385
386 /* try to free some slots if the allocation fails */
387 tasklet_schedule(&mv_chan->irq_tasklet);
388
389 return NULL;
390}
391
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700392/************************ DMA engine API functions ****************************/
393static dma_cookie_t
394mv_xor_tx_submit(struct dma_async_tx_descriptor *tx)
395{
396 struct mv_xor_desc_slot *sw_desc = to_mv_xor_slot(tx);
397 struct mv_xor_chan *mv_chan = to_mv_xor_chan(tx->chan);
Lior Amsalemdfc97662014-08-27 10:52:51 -0300398 struct mv_xor_desc_slot *old_chain_tail;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700399 dma_cookie_t cookie;
400 int new_hw_chain = 1;
401
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100402 dev_dbg(mv_chan_to_devp(mv_chan),
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700403 "%s sw_desc %p: async_tx %p\n",
404 __func__, sw_desc, &sw_desc->async_tx);
405
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700406 spin_lock_bh(&mv_chan->lock);
Russell King - ARM Linux884485e2012-03-06 22:34:46 +0000407 cookie = dma_cookie_assign(tx);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700408
409 if (list_empty(&mv_chan->chain))
Lior Amsalemdfc97662014-08-27 10:52:51 -0300410 list_add_tail(&sw_desc->chain_node, &mv_chan->chain);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700411 else {
412 new_hw_chain = 0;
413
414 old_chain_tail = list_entry(mv_chan->chain.prev,
415 struct mv_xor_desc_slot,
416 chain_node);
Lior Amsalemdfc97662014-08-27 10:52:51 -0300417 list_add_tail(&sw_desc->chain_node, &mv_chan->chain);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700418
Olof Johansson31fd8f52014-02-03 17:13:23 -0800419 dev_dbg(mv_chan_to_devp(mv_chan), "Append to last desc %pa\n",
420 &old_chain_tail->async_tx.phys);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700421
422 /* fix up the hardware chain */
Lior Amsalemdfc97662014-08-27 10:52:51 -0300423 mv_desc_set_next_desc(old_chain_tail, sw_desc->async_tx.phys);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700424
425 /* if the channel is not busy */
426 if (!mv_chan_is_busy(mv_chan)) {
427 u32 current_desc = mv_chan_get_current_desc(mv_chan);
428 /*
429 * and the curren desc is the end of the chain before
430 * the append, then we need to start the channel
431 */
432 if (current_desc == old_chain_tail->async_tx.phys)
433 new_hw_chain = 1;
434 }
435 }
436
437 if (new_hw_chain)
Lior Amsalemdfc97662014-08-27 10:52:51 -0300438 mv_xor_start_new_chain(mv_chan, sw_desc);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700439
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700440 spin_unlock_bh(&mv_chan->lock);
441
442 return cookie;
443}
444
445/* returns the number of allocated descriptors */
Dan Williamsaa1e6f12009-01-06 11:38:17 -0700446static int mv_xor_alloc_chan_resources(struct dma_chan *chan)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700447{
Olof Johansson31fd8f52014-02-03 17:13:23 -0800448 void *virt_desc;
449 dma_addr_t dma_desc;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700450 int idx;
451 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
452 struct mv_xor_desc_slot *slot = NULL;
Thomas Petazzonib503fa02012-11-15 15:55:30 +0100453 int num_descs_in_pool = MV_XOR_POOL_SIZE/MV_XOR_SLOT_SIZE;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700454
455 /* Allocate descriptor slots */
456 idx = mv_chan->slots_allocated;
457 while (idx < num_descs_in_pool) {
458 slot = kzalloc(sizeof(*slot), GFP_KERNEL);
459 if (!slot) {
Ezequiel Garciab8291dd2014-08-27 10:52:49 -0300460 dev_info(mv_chan_to_devp(mv_chan),
461 "channel only initialized %d descriptor slots",
462 idx);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700463 break;
464 }
Olof Johansson31fd8f52014-02-03 17:13:23 -0800465 virt_desc = mv_chan->dma_desc_pool_virt;
466 slot->hw_desc = virt_desc + idx * MV_XOR_SLOT_SIZE;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700467
468 dma_async_tx_descriptor_init(&slot->async_tx, chan);
469 slot->async_tx.tx_submit = mv_xor_tx_submit;
470 INIT_LIST_HEAD(&slot->chain_node);
471 INIT_LIST_HEAD(&slot->slot_node);
Olof Johansson31fd8f52014-02-03 17:13:23 -0800472 dma_desc = mv_chan->dma_desc_pool;
473 slot->async_tx.phys = dma_desc + idx * MV_XOR_SLOT_SIZE;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700474 slot->idx = idx++;
475
476 spin_lock_bh(&mv_chan->lock);
477 mv_chan->slots_allocated = idx;
478 list_add_tail(&slot->slot_node, &mv_chan->all_slots);
479 spin_unlock_bh(&mv_chan->lock);
480 }
481
482 if (mv_chan->slots_allocated && !mv_chan->last_used)
483 mv_chan->last_used = list_entry(mv_chan->all_slots.next,
484 struct mv_xor_desc_slot,
485 slot_node);
486
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100487 dev_dbg(mv_chan_to_devp(mv_chan),
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700488 "allocated %d descriptor slots last_used: %p\n",
489 mv_chan->slots_allocated, mv_chan->last_used);
490
491 return mv_chan->slots_allocated ? : -ENOMEM;
492}
493
494static struct dma_async_tx_descriptor *
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700495mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
496 unsigned int src_cnt, size_t len, unsigned long flags)
497{
498 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
Lior Amsalemdfc97662014-08-27 10:52:51 -0300499 struct mv_xor_desc_slot *sw_desc;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700500
501 if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
502 return NULL;
503
Coly Li7912d3002011-03-27 01:26:53 +0800504 BUG_ON(len > MV_XOR_MAX_BYTE_COUNT);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700505
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100506 dev_dbg(mv_chan_to_devp(mv_chan),
Olof Johansson31fd8f52014-02-03 17:13:23 -0800507 "%s src_cnt: %d len: %u dest %pad flags: %ld\n",
508 __func__, src_cnt, len, &dest, flags);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700509
510 spin_lock_bh(&mv_chan->lock);
Lior Amsalemdfc97662014-08-27 10:52:51 -0300511 sw_desc = mv_xor_alloc_slot(mv_chan);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700512 if (sw_desc) {
513 sw_desc->type = DMA_XOR;
514 sw_desc->async_tx.flags = flags;
Lior Amsalemba87d132014-08-27 10:52:53 -0300515 mv_desc_init(sw_desc, dest, len, flags);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700516 while (src_cnt--)
Lior Amsalemdfc97662014-08-27 10:52:51 -0300517 mv_desc_set_src_addr(sw_desc, src_cnt, src[src_cnt]);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700518 }
519 spin_unlock_bh(&mv_chan->lock);
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100520 dev_dbg(mv_chan_to_devp(mv_chan),
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700521 "%s sw_desc %p async_tx %p \n",
522 __func__, sw_desc, &sw_desc->async_tx);
523 return sw_desc ? &sw_desc->async_tx : NULL;
524}
525
Lior Amsalem3e4f52e2014-08-27 10:52:50 -0300526static struct dma_async_tx_descriptor *
527mv_xor_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
528 size_t len, unsigned long flags)
529{
530 /*
531 * A MEMCPY operation is identical to an XOR operation with only
532 * a single source address.
533 */
534 return mv_xor_prep_dma_xor(chan, dest, &src, 1, len, flags);
535}
536
Lior Amsalem22843542014-08-27 10:52:55 -0300537static struct dma_async_tx_descriptor *
538mv_xor_prep_dma_interrupt(struct dma_chan *chan, unsigned long flags)
539{
540 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
541 dma_addr_t src, dest;
542 size_t len;
543
544 src = mv_chan->dummy_src_addr;
545 dest = mv_chan->dummy_dst_addr;
546 len = MV_XOR_MIN_BYTE_COUNT;
547
548 /*
549 * We implement the DMA_INTERRUPT operation as a minimum sized
550 * XOR operation with a single dummy source address.
551 */
552 return mv_xor_prep_dma_xor(chan, dest, &src, 1, len, flags);
553}
554
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700555static void mv_xor_free_chan_resources(struct dma_chan *chan)
556{
557 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
558 struct mv_xor_desc_slot *iter, *_iter;
559 int in_use_descs = 0;
560
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700561 spin_lock_bh(&mv_chan->lock);
Ezequiel Garciae43147a2014-03-07 16:46:46 -0300562
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700563 mv_xor_slot_cleanup(mv_chan);
564
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700565 list_for_each_entry_safe(iter, _iter, &mv_chan->chain,
566 chain_node) {
567 in_use_descs++;
568 list_del(&iter->chain_node);
569 }
570 list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots,
571 completed_node) {
572 in_use_descs++;
573 list_del(&iter->completed_node);
574 }
575 list_for_each_entry_safe_reverse(
576 iter, _iter, &mv_chan->all_slots, slot_node) {
577 list_del(&iter->slot_node);
578 kfree(iter);
579 mv_chan->slots_allocated--;
580 }
581 mv_chan->last_used = NULL;
582
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100583 dev_dbg(mv_chan_to_devp(mv_chan), "%s slots_allocated %d\n",
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700584 __func__, mv_chan->slots_allocated);
585 spin_unlock_bh(&mv_chan->lock);
586
587 if (in_use_descs)
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100588 dev_err(mv_chan_to_devp(mv_chan),
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700589 "freeing %d in use descriptors!\n", in_use_descs);
590}
591
592/**
Linus Walleij07934482010-03-26 16:50:49 -0700593 * mv_xor_status - poll the status of an XOR transaction
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700594 * @chan: XOR channel handle
595 * @cookie: XOR transaction identifier
Linus Walleij07934482010-03-26 16:50:49 -0700596 * @txstate: XOR transactions state holder (or NULL)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700597 */
Linus Walleij07934482010-03-26 16:50:49 -0700598static enum dma_status mv_xor_status(struct dma_chan *chan,
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700599 dma_cookie_t cookie,
Linus Walleij07934482010-03-26 16:50:49 -0700600 struct dma_tx_state *txstate)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700601{
602 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700603 enum dma_status ret;
604
Russell King - ARM Linux96a2af42012-03-06 22:35:27 +0000605 ret = dma_cookie_status(chan, cookie, txstate);
Ezequiel Garcia890766d2014-03-07 16:46:45 -0300606 if (ret == DMA_COMPLETE)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700607 return ret;
Ezequiel Garciae43147a2014-03-07 16:46:46 -0300608
609 spin_lock_bh(&mv_chan->lock);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700610 mv_xor_slot_cleanup(mv_chan);
Ezequiel Garciae43147a2014-03-07 16:46:46 -0300611 spin_unlock_bh(&mv_chan->lock);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700612
Russell King - ARM Linux96a2af42012-03-06 22:35:27 +0000613 return dma_cookie_status(chan, cookie, txstate);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700614}
615
616static void mv_dump_xor_regs(struct mv_xor_chan *chan)
617{
618 u32 val;
619
Thomas Petazzoni5733c382013-07-29 17:42:13 +0200620 val = readl_relaxed(XOR_CONFIG(chan));
Joe Perches1ba151c2012-10-28 01:05:44 -0700621 dev_err(mv_chan_to_devp(chan), "config 0x%08x\n", val);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700622
Thomas Petazzoni5733c382013-07-29 17:42:13 +0200623 val = readl_relaxed(XOR_ACTIVATION(chan));
Joe Perches1ba151c2012-10-28 01:05:44 -0700624 dev_err(mv_chan_to_devp(chan), "activation 0x%08x\n", val);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700625
Thomas Petazzoni5733c382013-07-29 17:42:13 +0200626 val = readl_relaxed(XOR_INTR_CAUSE(chan));
Joe Perches1ba151c2012-10-28 01:05:44 -0700627 dev_err(mv_chan_to_devp(chan), "intr cause 0x%08x\n", val);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700628
Thomas Petazzoni5733c382013-07-29 17:42:13 +0200629 val = readl_relaxed(XOR_INTR_MASK(chan));
Joe Perches1ba151c2012-10-28 01:05:44 -0700630 dev_err(mv_chan_to_devp(chan), "intr mask 0x%08x\n", val);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700631
Thomas Petazzoni5733c382013-07-29 17:42:13 +0200632 val = readl_relaxed(XOR_ERROR_CAUSE(chan));
Joe Perches1ba151c2012-10-28 01:05:44 -0700633 dev_err(mv_chan_to_devp(chan), "error cause 0x%08x\n", val);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700634
Thomas Petazzoni5733c382013-07-29 17:42:13 +0200635 val = readl_relaxed(XOR_ERROR_ADDR(chan));
Joe Perches1ba151c2012-10-28 01:05:44 -0700636 dev_err(mv_chan_to_devp(chan), "error addr 0x%08x\n", val);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700637}
638
639static void mv_xor_err_interrupt_handler(struct mv_xor_chan *chan,
640 u32 intr_cause)
641{
Ezequiel Garcia0e7488e2014-08-27 10:52:52 -0300642 if (intr_cause & XOR_INT_ERR_DECODE) {
643 dev_dbg(mv_chan_to_devp(chan), "ignoring address decode error\n");
644 return;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700645 }
646
Ezequiel Garcia0e7488e2014-08-27 10:52:52 -0300647 dev_err(mv_chan_to_devp(chan), "error on chan %d. intr cause 0x%08x\n",
Thomas Petazzonia3fc74b2012-11-15 12:50:27 +0100648 chan->idx, intr_cause);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700649
650 mv_dump_xor_regs(chan);
Ezequiel Garcia0e7488e2014-08-27 10:52:52 -0300651 WARN_ON(1);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700652}
653
654static irqreturn_t mv_xor_interrupt_handler(int irq, void *data)
655{
656 struct mv_xor_chan *chan = data;
657 u32 intr_cause = mv_chan_get_intr_cause(chan);
658
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100659 dev_dbg(mv_chan_to_devp(chan), "intr cause %x\n", intr_cause);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700660
Ezequiel Garcia0e7488e2014-08-27 10:52:52 -0300661 if (intr_cause & XOR_INTR_ERRORS)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700662 mv_xor_err_interrupt_handler(chan, intr_cause);
663
664 tasklet_schedule(&chan->irq_tasklet);
665
666 mv_xor_device_clear_eoc_cause(chan);
667
668 return IRQ_HANDLED;
669}
670
671static void mv_xor_issue_pending(struct dma_chan *chan)
672{
673 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
674
675 if (mv_chan->pending >= MV_XOR_THRESHOLD) {
676 mv_chan->pending = 0;
677 mv_chan_activate(mv_chan);
678 }
679}
680
681/*
682 * Perform a transaction to verify the HW works.
683 */
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700684
Linus Torvaldsc2714332012-12-14 14:54:26 -0800685static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700686{
Ezequiel Garciab8c01d22013-12-10 09:32:37 -0300687 int i, ret;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700688 void *src, *dest;
689 dma_addr_t src_dma, dest_dma;
690 struct dma_chan *dma_chan;
691 dma_cookie_t cookie;
692 struct dma_async_tx_descriptor *tx;
Ezequiel Garciad16695a2013-12-10 09:32:36 -0300693 struct dmaengine_unmap_data *unmap;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700694 int err = 0;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700695
Ezequiel Garciad16695a2013-12-10 09:32:36 -0300696 src = kmalloc(sizeof(u8) * PAGE_SIZE, GFP_KERNEL);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700697 if (!src)
698 return -ENOMEM;
699
Ezequiel Garciad16695a2013-12-10 09:32:36 -0300700 dest = kzalloc(sizeof(u8) * PAGE_SIZE, GFP_KERNEL);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700701 if (!dest) {
702 kfree(src);
703 return -ENOMEM;
704 }
705
706 /* Fill in src buffer */
Ezequiel Garciad16695a2013-12-10 09:32:36 -0300707 for (i = 0; i < PAGE_SIZE; i++)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700708 ((u8 *) src)[i] = (u8)i;
709
Thomas Petazzoni275cc0c2012-11-15 15:09:42 +0100710 dma_chan = &mv_chan->dmachan;
Dan Williamsaa1e6f12009-01-06 11:38:17 -0700711 if (mv_xor_alloc_chan_resources(dma_chan) < 1) {
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700712 err = -ENODEV;
713 goto out;
714 }
715
Ezequiel Garciad16695a2013-12-10 09:32:36 -0300716 unmap = dmaengine_get_unmap_data(dma_chan->device->dev, 2, GFP_KERNEL);
717 if (!unmap) {
718 err = -ENOMEM;
719 goto free_resources;
720 }
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700721
Ezequiel Garciad16695a2013-12-10 09:32:36 -0300722 src_dma = dma_map_page(dma_chan->device->dev, virt_to_page(src), 0,
723 PAGE_SIZE, DMA_TO_DEVICE);
Ezequiel Garciad16695a2013-12-10 09:32:36 -0300724 unmap->addr[0] = src_dma;
725
Ezequiel Garciab8c01d22013-12-10 09:32:37 -0300726 ret = dma_mapping_error(dma_chan->device->dev, src_dma);
727 if (ret) {
728 err = -ENOMEM;
729 goto free_resources;
730 }
731 unmap->to_cnt = 1;
732
Ezequiel Garciad16695a2013-12-10 09:32:36 -0300733 dest_dma = dma_map_page(dma_chan->device->dev, virt_to_page(dest), 0,
734 PAGE_SIZE, DMA_FROM_DEVICE);
Ezequiel Garciad16695a2013-12-10 09:32:36 -0300735 unmap->addr[1] = dest_dma;
736
Ezequiel Garciab8c01d22013-12-10 09:32:37 -0300737 ret = dma_mapping_error(dma_chan->device->dev, dest_dma);
738 if (ret) {
739 err = -ENOMEM;
740 goto free_resources;
741 }
742 unmap->from_cnt = 1;
Ezequiel Garciad16695a2013-12-10 09:32:36 -0300743 unmap->len = PAGE_SIZE;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700744
745 tx = mv_xor_prep_dma_memcpy(dma_chan, dest_dma, src_dma,
Ezequiel Garciad16695a2013-12-10 09:32:36 -0300746 PAGE_SIZE, 0);
Ezequiel Garciab8c01d22013-12-10 09:32:37 -0300747 if (!tx) {
748 dev_err(dma_chan->device->dev,
749 "Self-test cannot prepare operation, disabling\n");
750 err = -ENODEV;
751 goto free_resources;
752 }
753
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700754 cookie = mv_xor_tx_submit(tx);
Ezequiel Garciab8c01d22013-12-10 09:32:37 -0300755 if (dma_submit_error(cookie)) {
756 dev_err(dma_chan->device->dev,
757 "Self-test submit error, disabling\n");
758 err = -ENODEV;
759 goto free_resources;
760 }
761
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700762 mv_xor_issue_pending(dma_chan);
763 async_tx_ack(tx);
764 msleep(1);
765
Linus Walleij07934482010-03-26 16:50:49 -0700766 if (mv_xor_status(dma_chan, cookie, NULL) !=
Vinod Koulb3efb8f2013-10-16 20:51:04 +0530767 DMA_COMPLETE) {
Thomas Petazzonia3fc74b2012-11-15 12:50:27 +0100768 dev_err(dma_chan->device->dev,
769 "Self-test copy timed out, disabling\n");
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700770 err = -ENODEV;
771 goto free_resources;
772 }
773
Thomas Petazzonic35064c2012-11-15 13:01:59 +0100774 dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma,
Ezequiel Garciad16695a2013-12-10 09:32:36 -0300775 PAGE_SIZE, DMA_FROM_DEVICE);
776 if (memcmp(src, dest, PAGE_SIZE)) {
Thomas Petazzonia3fc74b2012-11-15 12:50:27 +0100777 dev_err(dma_chan->device->dev,
778 "Self-test copy failed compare, disabling\n");
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700779 err = -ENODEV;
780 goto free_resources;
781 }
782
783free_resources:
Ezequiel Garciad16695a2013-12-10 09:32:36 -0300784 dmaengine_unmap_put(unmap);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700785 mv_xor_free_chan_resources(dma_chan);
786out:
787 kfree(src);
788 kfree(dest);
789 return err;
790}
791
792#define MV_XOR_NUM_SRC_TEST 4 /* must be <= 15 */
Bill Pemberton463a1f82012-11-19 13:22:55 -0500793static int
Thomas Petazzoni275cc0c2012-11-15 15:09:42 +0100794mv_xor_xor_self_test(struct mv_xor_chan *mv_chan)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700795{
Ezequiel Garciab8c01d22013-12-10 09:32:37 -0300796 int i, src_idx, ret;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700797 struct page *dest;
798 struct page *xor_srcs[MV_XOR_NUM_SRC_TEST];
799 dma_addr_t dma_srcs[MV_XOR_NUM_SRC_TEST];
800 dma_addr_t dest_dma;
801 struct dma_async_tx_descriptor *tx;
Ezequiel Garciad16695a2013-12-10 09:32:36 -0300802 struct dmaengine_unmap_data *unmap;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700803 struct dma_chan *dma_chan;
804 dma_cookie_t cookie;
805 u8 cmp_byte = 0;
806 u32 cmp_word;
807 int err = 0;
Ezequiel Garciad16695a2013-12-10 09:32:36 -0300808 int src_count = MV_XOR_NUM_SRC_TEST;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700809
Ezequiel Garciad16695a2013-12-10 09:32:36 -0300810 for (src_idx = 0; src_idx < src_count; src_idx++) {
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700811 xor_srcs[src_idx] = alloc_page(GFP_KERNEL);
Roel Kluina09b09a2009-02-25 13:56:21 +0100812 if (!xor_srcs[src_idx]) {
813 while (src_idx--)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700814 __free_page(xor_srcs[src_idx]);
Roel Kluina09b09a2009-02-25 13:56:21 +0100815 return -ENOMEM;
816 }
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700817 }
818
819 dest = alloc_page(GFP_KERNEL);
Roel Kluina09b09a2009-02-25 13:56:21 +0100820 if (!dest) {
821 while (src_idx--)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700822 __free_page(xor_srcs[src_idx]);
Roel Kluina09b09a2009-02-25 13:56:21 +0100823 return -ENOMEM;
824 }
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700825
826 /* Fill in src buffers */
Ezequiel Garciad16695a2013-12-10 09:32:36 -0300827 for (src_idx = 0; src_idx < src_count; src_idx++) {
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700828 u8 *ptr = page_address(xor_srcs[src_idx]);
829 for (i = 0; i < PAGE_SIZE; i++)
830 ptr[i] = (1 << src_idx);
831 }
832
Ezequiel Garciad16695a2013-12-10 09:32:36 -0300833 for (src_idx = 0; src_idx < src_count; src_idx++)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700834 cmp_byte ^= (u8) (1 << src_idx);
835
836 cmp_word = (cmp_byte << 24) | (cmp_byte << 16) |
837 (cmp_byte << 8) | cmp_byte;
838
839 memset(page_address(dest), 0, PAGE_SIZE);
840
Thomas Petazzoni275cc0c2012-11-15 15:09:42 +0100841 dma_chan = &mv_chan->dmachan;
Dan Williamsaa1e6f12009-01-06 11:38:17 -0700842 if (mv_xor_alloc_chan_resources(dma_chan) < 1) {
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700843 err = -ENODEV;
844 goto out;
845 }
846
Ezequiel Garciad16695a2013-12-10 09:32:36 -0300847 unmap = dmaengine_get_unmap_data(dma_chan->device->dev, src_count + 1,
848 GFP_KERNEL);
849 if (!unmap) {
850 err = -ENOMEM;
851 goto free_resources;
852 }
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700853
Ezequiel Garciad16695a2013-12-10 09:32:36 -0300854 /* test xor */
855 for (i = 0; i < src_count; i++) {
856 unmap->addr[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i],
857 0, PAGE_SIZE, DMA_TO_DEVICE);
858 dma_srcs[i] = unmap->addr[i];
Ezequiel Garciab8c01d22013-12-10 09:32:37 -0300859 ret = dma_mapping_error(dma_chan->device->dev, unmap->addr[i]);
860 if (ret) {
861 err = -ENOMEM;
862 goto free_resources;
863 }
Ezequiel Garciad16695a2013-12-10 09:32:36 -0300864 unmap->to_cnt++;
865 }
866
867 unmap->addr[src_count] = dma_map_page(dma_chan->device->dev, dest, 0, PAGE_SIZE,
868 DMA_FROM_DEVICE);
869 dest_dma = unmap->addr[src_count];
Ezequiel Garciab8c01d22013-12-10 09:32:37 -0300870 ret = dma_mapping_error(dma_chan->device->dev, unmap->addr[src_count]);
871 if (ret) {
872 err = -ENOMEM;
873 goto free_resources;
874 }
Ezequiel Garciad16695a2013-12-10 09:32:36 -0300875 unmap->from_cnt = 1;
876 unmap->len = PAGE_SIZE;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700877
878 tx = mv_xor_prep_dma_xor(dma_chan, dest_dma, dma_srcs,
Ezequiel Garciad16695a2013-12-10 09:32:36 -0300879 src_count, PAGE_SIZE, 0);
Ezequiel Garciab8c01d22013-12-10 09:32:37 -0300880 if (!tx) {
881 dev_err(dma_chan->device->dev,
882 "Self-test cannot prepare operation, disabling\n");
883 err = -ENODEV;
884 goto free_resources;
885 }
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700886
887 cookie = mv_xor_tx_submit(tx);
Ezequiel Garciab8c01d22013-12-10 09:32:37 -0300888 if (dma_submit_error(cookie)) {
889 dev_err(dma_chan->device->dev,
890 "Self-test submit error, disabling\n");
891 err = -ENODEV;
892 goto free_resources;
893 }
894
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700895 mv_xor_issue_pending(dma_chan);
896 async_tx_ack(tx);
897 msleep(8);
898
Linus Walleij07934482010-03-26 16:50:49 -0700899 if (mv_xor_status(dma_chan, cookie, NULL) !=
Vinod Koulb3efb8f2013-10-16 20:51:04 +0530900 DMA_COMPLETE) {
Thomas Petazzonia3fc74b2012-11-15 12:50:27 +0100901 dev_err(dma_chan->device->dev,
902 "Self-test xor timed out, disabling\n");
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700903 err = -ENODEV;
904 goto free_resources;
905 }
906
Thomas Petazzonic35064c2012-11-15 13:01:59 +0100907 dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma,
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700908 PAGE_SIZE, DMA_FROM_DEVICE);
909 for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) {
910 u32 *ptr = page_address(dest);
911 if (ptr[i] != cmp_word) {
Thomas Petazzonia3fc74b2012-11-15 12:50:27 +0100912 dev_err(dma_chan->device->dev,
Joe Perches1ba151c2012-10-28 01:05:44 -0700913 "Self-test xor failed compare, disabling. index %d, data %x, expected %x\n",
914 i, ptr[i], cmp_word);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700915 err = -ENODEV;
916 goto free_resources;
917 }
918 }
919
920free_resources:
Ezequiel Garciad16695a2013-12-10 09:32:36 -0300921 dmaengine_unmap_put(unmap);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700922 mv_xor_free_chan_resources(dma_chan);
923out:
Ezequiel Garciad16695a2013-12-10 09:32:36 -0300924 src_idx = src_count;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700925 while (src_idx--)
926 __free_page(xor_srcs[src_idx]);
927 __free_page(dest);
928 return err;
929}
930
Andrew Lunn34c93c82012-11-18 11:44:56 +0100931/* This driver does not implement any of the optional DMA operations. */
932static int
933mv_xor_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
934 unsigned long arg)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700935{
Andrew Lunn34c93c82012-11-18 11:44:56 +0100936 return -ENOSYS;
937}
938
Thomas Petazzoni1ef48a22012-11-15 15:17:05 +0100939static int mv_xor_channel_remove(struct mv_xor_chan *mv_chan)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700940{
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700941 struct dma_chan *chan, *_chan;
Thomas Petazzoni1ef48a22012-11-15 15:17:05 +0100942 struct device *dev = mv_chan->dmadev.dev;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700943
Thomas Petazzoni1ef48a22012-11-15 15:17:05 +0100944 dma_async_device_unregister(&mv_chan->dmadev);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700945
Thomas Petazzonib503fa02012-11-15 15:55:30 +0100946 dma_free_coherent(dev, MV_XOR_POOL_SIZE,
Thomas Petazzoni1ef48a22012-11-15 15:17:05 +0100947 mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool);
Lior Amsalem22843542014-08-27 10:52:55 -0300948 dma_unmap_single(dev, mv_chan->dummy_src_addr,
949 MV_XOR_MIN_BYTE_COUNT, DMA_FROM_DEVICE);
950 dma_unmap_single(dev, mv_chan->dummy_dst_addr,
951 MV_XOR_MIN_BYTE_COUNT, DMA_TO_DEVICE);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700952
Thomas Petazzoni1ef48a22012-11-15 15:17:05 +0100953 list_for_each_entry_safe(chan, _chan, &mv_chan->dmadev.channels,
Thomas Petazzonia6b4a9d2012-10-29 16:45:46 +0100954 device_node) {
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700955 list_del(&chan->device_node);
956 }
957
Thomas Petazzoni88eb92c2012-11-15 16:11:18 +0100958 free_irq(mv_chan->irq, mv_chan);
959
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700960 return 0;
961}
962
Thomas Petazzoni1ef48a22012-11-15 15:17:05 +0100963static struct mv_xor_chan *
Thomas Petazzoni297eedba2012-11-15 15:29:53 +0100964mv_xor_channel_add(struct mv_xor_device *xordev,
Thomas Petazzonia6b4a9d2012-10-29 16:45:46 +0100965 struct platform_device *pdev,
Thomas Petazzonib503fa02012-11-15 15:55:30 +0100966 int idx, dma_cap_mask_t cap_mask, int irq)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700967{
968 int ret = 0;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700969 struct mv_xor_chan *mv_chan;
970 struct dma_device *dma_dev;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700971
Thomas Petazzoni1ef48a22012-11-15 15:17:05 +0100972 mv_chan = devm_kzalloc(&pdev->dev, sizeof(*mv_chan), GFP_KERNEL);
Sachin Kamata5776592013-09-02 13:54:20 +0530973 if (!mv_chan)
974 return ERR_PTR(-ENOMEM);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700975
Thomas Petazzoni9aedbdb2012-11-15 15:36:37 +0100976 mv_chan->idx = idx;
Thomas Petazzoni88eb92c2012-11-15 16:11:18 +0100977 mv_chan->irq = irq;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700978
Thomas Petazzoni1ef48a22012-11-15 15:17:05 +0100979 dma_dev = &mv_chan->dmadev;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700980
Lior Amsalem22843542014-08-27 10:52:55 -0300981 /*
982 * These source and destination dummy buffers are used to implement
983 * a DMA_INTERRUPT operation as a minimum-sized XOR operation.
984 * Hence, we only need to map the buffers at initialization-time.
985 */
986 mv_chan->dummy_src_addr = dma_map_single(dma_dev->dev,
987 mv_chan->dummy_src, MV_XOR_MIN_BYTE_COUNT, DMA_FROM_DEVICE);
988 mv_chan->dummy_dst_addr = dma_map_single(dma_dev->dev,
989 mv_chan->dummy_dst, MV_XOR_MIN_BYTE_COUNT, DMA_TO_DEVICE);
990
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700991 /* allocate coherent memory for hardware descriptors
992 * note: writecombine gives slightly better performance, but
993 * requires that we explicitly flush the writes
994 */
Thomas Petazzoni1ef48a22012-11-15 15:17:05 +0100995 mv_chan->dma_desc_pool_virt =
Thomas Petazzonib503fa02012-11-15 15:55:30 +0100996 dma_alloc_writecombine(&pdev->dev, MV_XOR_POOL_SIZE,
Thomas Petazzoni1ef48a22012-11-15 15:17:05 +0100997 &mv_chan->dma_desc_pool, GFP_KERNEL);
998 if (!mv_chan->dma_desc_pool_virt)
Thomas Petazzonia6b4a9d2012-10-29 16:45:46 +0100999 return ERR_PTR(-ENOMEM);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001000
1001 /* discover transaction capabilites from the platform data */
Thomas Petazzonia6b4a9d2012-10-29 16:45:46 +01001002 dma_dev->cap_mask = cap_mask;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001003
1004 INIT_LIST_HEAD(&dma_dev->channels);
1005
1006 /* set base routines */
1007 dma_dev->device_alloc_chan_resources = mv_xor_alloc_chan_resources;
1008 dma_dev->device_free_chan_resources = mv_xor_free_chan_resources;
Linus Walleij07934482010-03-26 16:50:49 -07001009 dma_dev->device_tx_status = mv_xor_status;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001010 dma_dev->device_issue_pending = mv_xor_issue_pending;
Andrew Lunn34c93c82012-11-18 11:44:56 +01001011 dma_dev->device_control = mv_xor_control;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001012 dma_dev->dev = &pdev->dev;
1013
1014 /* set prep routines based on capability */
Lior Amsalem22843542014-08-27 10:52:55 -03001015 if (dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask))
1016 dma_dev->device_prep_dma_interrupt = mv_xor_prep_dma_interrupt;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001017 if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask))
1018 dma_dev->device_prep_dma_memcpy = mv_xor_prep_dma_memcpy;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001019 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
Joe Perchesc0198942009-06-28 09:26:21 -07001020 dma_dev->max_xor = 8;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001021 dma_dev->device_prep_dma_xor = mv_xor_prep_dma_xor;
1022 }
1023
Thomas Petazzoni297eedba2012-11-15 15:29:53 +01001024 mv_chan->mmr_base = xordev->xor_base;
Ezequiel Garcia82a14022013-10-30 12:01:43 -03001025 mv_chan->mmr_high_base = xordev->xor_high_base;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001026 tasklet_init(&mv_chan->irq_tasklet, mv_xor_tasklet, (unsigned long)
1027 mv_chan);
1028
1029 /* clear errors before enabling interrupts */
1030 mv_xor_device_clear_err_status(mv_chan);
1031
Thomas Petazzoni2d0a0742012-11-22 18:19:09 +01001032 ret = request_irq(mv_chan->irq, mv_xor_interrupt_handler,
1033 0, dev_name(&pdev->dev), mv_chan);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001034 if (ret)
1035 goto err_free_dma;
1036
1037 mv_chan_unmask_interrupts(mv_chan);
1038
Lior Amsalem3e4f52e2014-08-27 10:52:50 -03001039 mv_set_mode(mv_chan, DMA_XOR);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001040
1041 spin_lock_init(&mv_chan->lock);
1042 INIT_LIST_HEAD(&mv_chan->chain);
1043 INIT_LIST_HEAD(&mv_chan->completed_slots);
1044 INIT_LIST_HEAD(&mv_chan->all_slots);
Thomas Petazzoni98817b92012-11-15 14:57:44 +01001045 mv_chan->dmachan.device = dma_dev;
1046 dma_cookie_init(&mv_chan->dmachan);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001047
Thomas Petazzoni98817b92012-11-15 14:57:44 +01001048 list_add_tail(&mv_chan->dmachan.device_node, &dma_dev->channels);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001049
1050 if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) {
Thomas Petazzoni275cc0c2012-11-15 15:09:42 +01001051 ret = mv_xor_memcpy_self_test(mv_chan);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001052 dev_dbg(&pdev->dev, "memcpy self test returned %d\n", ret);
1053 if (ret)
Thomas Petazzoni2d0a0742012-11-22 18:19:09 +01001054 goto err_free_irq;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001055 }
1056
1057 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
Thomas Petazzoni275cc0c2012-11-15 15:09:42 +01001058 ret = mv_xor_xor_self_test(mv_chan);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001059 dev_dbg(&pdev->dev, "xor self test returned %d\n", ret);
1060 if (ret)
Thomas Petazzoni2d0a0742012-11-22 18:19:09 +01001061 goto err_free_irq;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001062 }
1063
Bartlomiej Zolnierkiewicz48a9db42013-07-03 15:05:06 -07001064 dev_info(&pdev->dev, "Marvell XOR: ( %s%s%s)\n",
Joe Perches1ba151c2012-10-28 01:05:44 -07001065 dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "",
Joe Perches1ba151c2012-10-28 01:05:44 -07001066 dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "",
1067 dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : "");
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001068
1069 dma_async_device_register(dma_dev);
Thomas Petazzoni1ef48a22012-11-15 15:17:05 +01001070 return mv_chan;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001071
Thomas Petazzoni2d0a0742012-11-22 18:19:09 +01001072err_free_irq:
1073 free_irq(mv_chan->irq, mv_chan);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001074 err_free_dma:
Thomas Petazzonib503fa02012-11-15 15:55:30 +01001075 dma_free_coherent(&pdev->dev, MV_XOR_POOL_SIZE,
Thomas Petazzoni1ef48a22012-11-15 15:17:05 +01001076 mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool);
Thomas Petazzonia6b4a9d2012-10-29 16:45:46 +01001077 return ERR_PTR(ret);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001078}
1079
1080static void
Thomas Petazzoni297eedba2012-11-15 15:29:53 +01001081mv_xor_conf_mbus_windows(struct mv_xor_device *xordev,
Andrew Lunn63a93322011-12-07 21:48:07 +01001082 const struct mbus_dram_target_info *dram)
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001083{
Ezequiel Garcia82a14022013-10-30 12:01:43 -03001084 void __iomem *base = xordev->xor_high_base;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001085 u32 win_enable = 0;
1086 int i;
1087
1088 for (i = 0; i < 8; i++) {
1089 writel(0, base + WINDOW_BASE(i));
1090 writel(0, base + WINDOW_SIZE(i));
1091 if (i < 4)
1092 writel(0, base + WINDOW_REMAP_HIGH(i));
1093 }
1094
1095 for (i = 0; i < dram->num_cs; i++) {
Andrew Lunn63a93322011-12-07 21:48:07 +01001096 const struct mbus_dram_window *cs = dram->cs + i;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001097
1098 writel((cs->base & 0xffff0000) |
1099 (cs->mbus_attr << 8) |
1100 dram->mbus_dram_target_id, base + WINDOW_BASE(i));
1101 writel((cs->size - 1) & 0xffff0000, base + WINDOW_SIZE(i));
1102
1103 win_enable |= (1 << i);
1104 win_enable |= 3 << (16 + (2 * i));
1105 }
1106
1107 writel(win_enable, base + WINDOW_BAR_ENABLE(0));
1108 writel(win_enable, base + WINDOW_BAR_ENABLE(1));
Thomas Petazzonic4b4b732012-11-22 18:16:37 +01001109 writel(0, base + WINDOW_OVERRIDE_CTRL(0));
1110 writel(0, base + WINDOW_OVERRIDE_CTRL(1));
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001111}
1112
Linus Torvaldsc2714332012-12-14 14:54:26 -08001113static int mv_xor_probe(struct platform_device *pdev)
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001114{
Andrew Lunn63a93322011-12-07 21:48:07 +01001115 const struct mbus_dram_target_info *dram;
Thomas Petazzoni297eedba2012-11-15 15:29:53 +01001116 struct mv_xor_device *xordev;
Jingoo Hand4adcc02013-07-30 17:09:11 +09001117 struct mv_xor_platform_data *pdata = dev_get_platdata(&pdev->dev);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001118 struct resource *res;
Thomas Petazzoni60d151f2012-10-29 16:54:49 +01001119 int i, ret;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001120
Joe Perches1ba151c2012-10-28 01:05:44 -07001121 dev_notice(&pdev->dev, "Marvell shared XOR driver\n");
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001122
Thomas Petazzoni297eedba2012-11-15 15:29:53 +01001123 xordev = devm_kzalloc(&pdev->dev, sizeof(*xordev), GFP_KERNEL);
1124 if (!xordev)
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001125 return -ENOMEM;
1126
1127 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1128 if (!res)
1129 return -ENODEV;
1130
Thomas Petazzoni297eedba2012-11-15 15:29:53 +01001131 xordev->xor_base = devm_ioremap(&pdev->dev, res->start,
1132 resource_size(res));
1133 if (!xordev->xor_base)
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001134 return -EBUSY;
1135
1136 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1137 if (!res)
1138 return -ENODEV;
1139
Thomas Petazzoni297eedba2012-11-15 15:29:53 +01001140 xordev->xor_high_base = devm_ioremap(&pdev->dev, res->start,
1141 resource_size(res));
1142 if (!xordev->xor_high_base)
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001143 return -EBUSY;
1144
Thomas Petazzoni297eedba2012-11-15 15:29:53 +01001145 platform_set_drvdata(pdev, xordev);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001146
1147 /*
1148 * (Re-)program MBUS remapping windows if we are asked to.
1149 */
Andrew Lunn63a93322011-12-07 21:48:07 +01001150 dram = mv_mbus_dram_info();
1151 if (dram)
Thomas Petazzoni297eedba2012-11-15 15:29:53 +01001152 mv_xor_conf_mbus_windows(xordev, dram);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001153
Andrew Lunnc5101822012-02-19 13:30:26 +01001154 /* Not all platforms can gate the clock, so it is not
1155 * an error if the clock does not exists.
1156 */
Thomas Petazzoni297eedba2012-11-15 15:29:53 +01001157 xordev->clk = clk_get(&pdev->dev, NULL);
1158 if (!IS_ERR(xordev->clk))
1159 clk_prepare_enable(xordev->clk);
Andrew Lunnc5101822012-02-19 13:30:26 +01001160
Thomas Petazzonif7d12ef2012-11-15 16:47:58 +01001161 if (pdev->dev.of_node) {
1162 struct device_node *np;
1163 int i = 0;
1164
1165 for_each_child_of_node(pdev->dev.of_node, np) {
Russell King0be82532013-12-12 23:59:08 +00001166 struct mv_xor_chan *chan;
Thomas Petazzonif7d12ef2012-11-15 16:47:58 +01001167 dma_cap_mask_t cap_mask;
1168 int irq;
1169
1170 dma_cap_zero(cap_mask);
1171 if (of_property_read_bool(np, "dmacap,memcpy"))
1172 dma_cap_set(DMA_MEMCPY, cap_mask);
1173 if (of_property_read_bool(np, "dmacap,xor"))
1174 dma_cap_set(DMA_XOR, cap_mask);
Thomas Petazzonif7d12ef2012-11-15 16:47:58 +01001175 if (of_property_read_bool(np, "dmacap,interrupt"))
1176 dma_cap_set(DMA_INTERRUPT, cap_mask);
1177
1178 irq = irq_of_parse_and_map(np, 0);
Thomas Petazzonif8eb9e72012-11-22 18:22:12 +01001179 if (!irq) {
1180 ret = -ENODEV;
Thomas Petazzonif7d12ef2012-11-15 16:47:58 +01001181 goto err_channel_add;
1182 }
1183
Russell King0be82532013-12-12 23:59:08 +00001184 chan = mv_xor_channel_add(xordev, pdev, i,
1185 cap_mask, irq);
1186 if (IS_ERR(chan)) {
1187 ret = PTR_ERR(chan);
Thomas Petazzonif7d12ef2012-11-15 16:47:58 +01001188 irq_dispose_mapping(irq);
1189 goto err_channel_add;
1190 }
1191
Russell King0be82532013-12-12 23:59:08 +00001192 xordev->channels[i] = chan;
Thomas Petazzonif7d12ef2012-11-15 16:47:58 +01001193 i++;
1194 }
1195 } else if (pdata && pdata->channels) {
Thomas Petazzoni60d151f2012-10-29 16:54:49 +01001196 for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) {
Thomas Petazzonie39f6ec2012-10-30 11:56:26 +01001197 struct mv_xor_channel_data *cd;
Russell King0be82532013-12-12 23:59:08 +00001198 struct mv_xor_chan *chan;
Thomas Petazzoni60d151f2012-10-29 16:54:49 +01001199 int irq;
1200
1201 cd = &pdata->channels[i];
1202 if (!cd) {
1203 ret = -ENODEV;
1204 goto err_channel_add;
1205 }
1206
1207 irq = platform_get_irq(pdev, i);
1208 if (irq < 0) {
1209 ret = irq;
1210 goto err_channel_add;
1211 }
1212
Russell King0be82532013-12-12 23:59:08 +00001213 chan = mv_xor_channel_add(xordev, pdev, i,
1214 cd->cap_mask, irq);
1215 if (IS_ERR(chan)) {
1216 ret = PTR_ERR(chan);
Thomas Petazzoni60d151f2012-10-29 16:54:49 +01001217 goto err_channel_add;
1218 }
Russell King0be82532013-12-12 23:59:08 +00001219
1220 xordev->channels[i] = chan;
Thomas Petazzoni60d151f2012-10-29 16:54:49 +01001221 }
1222 }
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001223
1224 return 0;
Thomas Petazzoni60d151f2012-10-29 16:54:49 +01001225
1226err_channel_add:
1227 for (i = 0; i < MV_XOR_MAX_CHANNELS; i++)
Thomas Petazzonif7d12ef2012-11-15 16:47:58 +01001228 if (xordev->channels[i]) {
Thomas Petazzoniab6e4392013-01-06 11:10:43 +01001229 mv_xor_channel_remove(xordev->channels[i]);
Thomas Petazzonif7d12ef2012-11-15 16:47:58 +01001230 if (pdev->dev.of_node)
1231 irq_dispose_mapping(xordev->channels[i]->irq);
Thomas Petazzonif7d12ef2012-11-15 16:47:58 +01001232 }
Thomas Petazzoni60d151f2012-10-29 16:54:49 +01001233
Thomas Petazzonidab92062013-01-06 11:10:44 +01001234 if (!IS_ERR(xordev->clk)) {
1235 clk_disable_unprepare(xordev->clk);
1236 clk_put(xordev->clk);
1237 }
1238
Thomas Petazzoni60d151f2012-10-29 16:54:49 +01001239 return ret;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001240}
1241
Linus Torvaldsc2714332012-12-14 14:54:26 -08001242static int mv_xor_remove(struct platform_device *pdev)
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001243{
Thomas Petazzoni297eedba2012-11-15 15:29:53 +01001244 struct mv_xor_device *xordev = platform_get_drvdata(pdev);
Thomas Petazzoni60d151f2012-10-29 16:54:49 +01001245 int i;
Andrew Lunnc5101822012-02-19 13:30:26 +01001246
Thomas Petazzoni60d151f2012-10-29 16:54:49 +01001247 for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) {
Thomas Petazzoni297eedba2012-11-15 15:29:53 +01001248 if (xordev->channels[i])
1249 mv_xor_channel_remove(xordev->channels[i]);
Thomas Petazzoni60d151f2012-10-29 16:54:49 +01001250 }
Andrew Lunnc5101822012-02-19 13:30:26 +01001251
Thomas Petazzoni297eedba2012-11-15 15:29:53 +01001252 if (!IS_ERR(xordev->clk)) {
1253 clk_disable_unprepare(xordev->clk);
1254 clk_put(xordev->clk);
Andrew Lunnc5101822012-02-19 13:30:26 +01001255 }
1256
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001257 return 0;
1258}
1259
Thomas Petazzonif7d12ef2012-11-15 16:47:58 +01001260#ifdef CONFIG_OF
Linus Torvaldsc2714332012-12-14 14:54:26 -08001261static struct of_device_id mv_xor_dt_ids[] = {
Thomas Petazzonif7d12ef2012-11-15 16:47:58 +01001262 { .compatible = "marvell,orion-xor", },
1263 {},
1264};
1265MODULE_DEVICE_TABLE(of, mv_xor_dt_ids);
1266#endif
1267
Thomas Petazzoni61971652012-10-30 12:05:40 +01001268static struct platform_driver mv_xor_driver = {
1269 .probe = mv_xor_probe,
Linus Torvaldsc2714332012-12-14 14:54:26 -08001270 .remove = mv_xor_remove,
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001271 .driver = {
Thomas Petazzonif7d12ef2012-11-15 16:47:58 +01001272 .owner = THIS_MODULE,
1273 .name = MV_XOR_NAME,
1274 .of_match_table = of_match_ptr(mv_xor_dt_ids),
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001275 },
1276};
1277
1278
1279static int __init mv_xor_init(void)
1280{
Thomas Petazzoni61971652012-10-30 12:05:40 +01001281 return platform_driver_register(&mv_xor_driver);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001282}
1283module_init(mv_xor_init);
1284
1285/* it's currently unsafe to unload this module */
1286#if 0
1287static void __exit mv_xor_exit(void)
1288{
1289 platform_driver_unregister(&mv_xor_driver);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001290 return;
1291}
1292
1293module_exit(mv_xor_exit);
1294#endif
1295
1296MODULE_AUTHOR("Saeed Bishara <saeed@marvell.com>");
1297MODULE_DESCRIPTION("DMA engine driver for Marvell's XOR engine");
1298MODULE_LICENSE("GPL");