blob: a4151c3bb78bf426be3c6f4483923907e5335c1e [file] [log] [blame]
Zhang Wei173acc72008-03-01 07:42:48 -07001/*
2 * Freescale MPC85xx, MPC83xx DMA Engine support
3 *
4 * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved.
5 *
6 * Author:
7 * Zhang Wei <wei.zhang@freescale.com>, Jul 2007
8 * Ebony Zhu <ebony.zhu@freescale.com>, May 2007
9 *
10 * Description:
11 * DMA engine driver for Freescale MPC8540 DMA controller, which is
12 * also fit for MPC8560, MPC8555, MPC8548, MPC8641, and etc.
13 * The support for MPC8349 DMA contorller is also added.
14 *
15 * This is free software; you can redistribute it and/or modify
16 * it under the terms of the GNU General Public License as published by
17 * the Free Software Foundation; either version 2 of the License, or
18 * (at your option) any later version.
19 *
20 */
21
22#include <linux/init.h>
23#include <linux/module.h>
24#include <linux/pci.h>
25#include <linux/interrupt.h>
26#include <linux/dmaengine.h>
27#include <linux/delay.h>
28#include <linux/dma-mapping.h>
29#include <linux/dmapool.h>
30#include <linux/of_platform.h>
31
32#include "fsldma.h"
33
34static void dma_init(struct fsl_dma_chan *fsl_chan)
35{
36 /* Reset the channel */
37 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, 0, 32);
38
39 switch (fsl_chan->feature & FSL_DMA_IP_MASK) {
40 case FSL_DMA_IP_85XX:
41 /* Set the channel to below modes:
42 * EIE - Error interrupt enable
43 * EOSIE - End of segments interrupt enable (basic mode)
44 * EOLNIE - End of links interrupt enable
45 */
46 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, FSL_DMA_MR_EIE
47 | FSL_DMA_MR_EOLNIE | FSL_DMA_MR_EOSIE, 32);
48 break;
49 case FSL_DMA_IP_83XX:
50 /* Set the channel to below modes:
51 * EOTIE - End-of-transfer interrupt enable
52 */
53 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, FSL_DMA_MR_EOTIE,
54 32);
55 break;
56 }
57
58}
59
Zhang Wei56822842008-03-13 10:45:27 -070060static void set_sr(struct fsl_dma_chan *fsl_chan, u32 val)
Zhang Wei173acc72008-03-01 07:42:48 -070061{
62 DMA_OUT(fsl_chan, &fsl_chan->reg_base->sr, val, 32);
63}
64
Zhang Wei56822842008-03-13 10:45:27 -070065static u32 get_sr(struct fsl_dma_chan *fsl_chan)
Zhang Wei173acc72008-03-01 07:42:48 -070066{
67 return DMA_IN(fsl_chan, &fsl_chan->reg_base->sr, 32);
68}
69
70static void set_desc_cnt(struct fsl_dma_chan *fsl_chan,
71 struct fsl_dma_ld_hw *hw, u32 count)
72{
73 hw->count = CPU_TO_DMA(fsl_chan, count, 32);
74}
75
76static void set_desc_src(struct fsl_dma_chan *fsl_chan,
77 struct fsl_dma_ld_hw *hw, dma_addr_t src)
78{
79 u64 snoop_bits;
80
81 snoop_bits = ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX)
82 ? ((u64)FSL_DMA_SATR_SREADTYPE_SNOOP_READ << 32) : 0;
83 hw->src_addr = CPU_TO_DMA(fsl_chan, snoop_bits | src, 64);
84}
85
86static void set_desc_dest(struct fsl_dma_chan *fsl_chan,
87 struct fsl_dma_ld_hw *hw, dma_addr_t dest)
88{
89 u64 snoop_bits;
90
91 snoop_bits = ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX)
92 ? ((u64)FSL_DMA_DATR_DWRITETYPE_SNOOP_WRITE << 32) : 0;
93 hw->dst_addr = CPU_TO_DMA(fsl_chan, snoop_bits | dest, 64);
94}
95
96static void set_desc_next(struct fsl_dma_chan *fsl_chan,
97 struct fsl_dma_ld_hw *hw, dma_addr_t next)
98{
99 u64 snoop_bits;
100
101 snoop_bits = ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX)
102 ? FSL_DMA_SNEN : 0;
103 hw->next_ln_addr = CPU_TO_DMA(fsl_chan, snoop_bits | next, 64);
104}
105
106static void set_cdar(struct fsl_dma_chan *fsl_chan, dma_addr_t addr)
107{
108 DMA_OUT(fsl_chan, &fsl_chan->reg_base->cdar, addr | FSL_DMA_SNEN, 64);
109}
110
111static dma_addr_t get_cdar(struct fsl_dma_chan *fsl_chan)
112{
113 return DMA_IN(fsl_chan, &fsl_chan->reg_base->cdar, 64) & ~FSL_DMA_SNEN;
114}
115
116static void set_ndar(struct fsl_dma_chan *fsl_chan, dma_addr_t addr)
117{
118 DMA_OUT(fsl_chan, &fsl_chan->reg_base->ndar, addr, 64);
119}
120
121static dma_addr_t get_ndar(struct fsl_dma_chan *fsl_chan)
122{
123 return DMA_IN(fsl_chan, &fsl_chan->reg_base->ndar, 64);
124}
125
Zhang Weif79abb62008-03-18 18:45:00 -0700126static u32 get_bcr(struct fsl_dma_chan *fsl_chan)
127{
128 return DMA_IN(fsl_chan, &fsl_chan->reg_base->bcr, 32);
129}
130
Zhang Wei173acc72008-03-01 07:42:48 -0700131static int dma_is_idle(struct fsl_dma_chan *fsl_chan)
132{
133 u32 sr = get_sr(fsl_chan);
134 return (!(sr & FSL_DMA_SR_CB)) || (sr & FSL_DMA_SR_CH);
135}
136
137static void dma_start(struct fsl_dma_chan *fsl_chan)
138{
139 u32 mr_set = 0;;
140
141 if (fsl_chan->feature & FSL_DMA_CHAN_PAUSE_EXT) {
142 DMA_OUT(fsl_chan, &fsl_chan->reg_base->bcr, 0, 32);
143 mr_set |= FSL_DMA_MR_EMP_EN;
144 } else
145 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr,
146 DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32)
147 & ~FSL_DMA_MR_EMP_EN, 32);
148
149 if (fsl_chan->feature & FSL_DMA_CHAN_START_EXT)
150 mr_set |= FSL_DMA_MR_EMS_EN;
151 else
152 mr_set |= FSL_DMA_MR_CS;
153
154 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr,
155 DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32)
156 | mr_set, 32);
157}
158
159static void dma_halt(struct fsl_dma_chan *fsl_chan)
160{
Dan Williams900325a62009-03-02 15:33:46 -0700161 int i;
162
Zhang Wei173acc72008-03-01 07:42:48 -0700163 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr,
164 DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) | FSL_DMA_MR_CA,
165 32);
166 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr,
167 DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) & ~(FSL_DMA_MR_CS
168 | FSL_DMA_MR_EMS_EN | FSL_DMA_MR_CA), 32);
169
Dan Williams900325a62009-03-02 15:33:46 -0700170 for (i = 0; i < 100; i++) {
171 if (dma_is_idle(fsl_chan))
172 break;
Zhang Wei173acc72008-03-01 07:42:48 -0700173 udelay(10);
Dan Williams900325a62009-03-02 15:33:46 -0700174 }
Zhang Wei173acc72008-03-01 07:42:48 -0700175 if (i >= 100 && !dma_is_idle(fsl_chan))
176 dev_err(fsl_chan->dev, "DMA halt timeout!\n");
177}
178
179static void set_ld_eol(struct fsl_dma_chan *fsl_chan,
180 struct fsl_desc_sw *desc)
181{
182 desc->hw.next_ln_addr = CPU_TO_DMA(fsl_chan,
183 DMA_TO_CPU(fsl_chan, desc->hw.next_ln_addr, 64) | FSL_DMA_EOL,
184 64);
185}
186
187static void append_ld_queue(struct fsl_dma_chan *fsl_chan,
188 struct fsl_desc_sw *new_desc)
189{
190 struct fsl_desc_sw *queue_tail = to_fsl_desc(fsl_chan->ld_queue.prev);
191
192 if (list_empty(&fsl_chan->ld_queue))
193 return;
194
195 /* Link to the new descriptor physical address and
196 * Enable End-of-segment interrupt for
197 * the last link descriptor.
198 * (the previous node's next link descriptor)
199 *
200 * For FSL_DMA_IP_83xx, the snoop enable bit need be set.
201 */
202 queue_tail->hw.next_ln_addr = CPU_TO_DMA(fsl_chan,
203 new_desc->async_tx.phys | FSL_DMA_EOSIE |
204 (((fsl_chan->feature & FSL_DMA_IP_MASK)
205 == FSL_DMA_IP_83XX) ? FSL_DMA_SNEN : 0), 64);
206}
207
208/**
209 * fsl_chan_set_src_loop_size - Set source address hold transfer size
210 * @fsl_chan : Freescale DMA channel
211 * @size : Address loop size, 0 for disable loop
212 *
213 * The set source address hold transfer size. The source
214 * address hold or loop transfer size is when the DMA transfer
215 * data from source address (SA), if the loop size is 4, the DMA will
216 * read data from SA, SA + 1, SA + 2, SA + 3, then loop back to SA,
217 * SA + 1 ... and so on.
218 */
219static void fsl_chan_set_src_loop_size(struct fsl_dma_chan *fsl_chan, int size)
220{
221 switch (size) {
222 case 0:
223 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr,
224 DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) &
225 (~FSL_DMA_MR_SAHE), 32);
226 break;
227 case 1:
228 case 2:
229 case 4:
230 case 8:
231 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr,
232 DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) |
233 FSL_DMA_MR_SAHE | (__ilog2(size) << 14),
234 32);
235 break;
236 }
237}
238
239/**
240 * fsl_chan_set_dest_loop_size - Set destination address hold transfer size
241 * @fsl_chan : Freescale DMA channel
242 * @size : Address loop size, 0 for disable loop
243 *
244 * The set destination address hold transfer size. The destination
245 * address hold or loop transfer size is when the DMA transfer
246 * data to destination address (TA), if the loop size is 4, the DMA will
247 * write data to TA, TA + 1, TA + 2, TA + 3, then loop back to TA,
248 * TA + 1 ... and so on.
249 */
250static void fsl_chan_set_dest_loop_size(struct fsl_dma_chan *fsl_chan, int size)
251{
252 switch (size) {
253 case 0:
254 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr,
255 DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) &
256 (~FSL_DMA_MR_DAHE), 32);
257 break;
258 case 1:
259 case 2:
260 case 4:
261 case 8:
262 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr,
263 DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) |
264 FSL_DMA_MR_DAHE | (__ilog2(size) << 16),
265 32);
266 break;
267 }
268}
269
270/**
271 * fsl_chan_toggle_ext_pause - Toggle channel external pause status
272 * @fsl_chan : Freescale DMA channel
273 * @size : Pause control size, 0 for disable external pause control.
274 * The maximum is 1024.
275 *
276 * The Freescale DMA channel can be controlled by the external
277 * signal DREQ#. The pause control size is how many bytes are allowed
278 * to transfer before pausing the channel, after which a new assertion
279 * of DREQ# resumes channel operation.
280 */
281static void fsl_chan_toggle_ext_pause(struct fsl_dma_chan *fsl_chan, int size)
282{
283 if (size > 1024)
284 return;
285
286 if (size) {
287 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr,
288 DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32)
289 | ((__ilog2(size) << 24) & 0x0f000000),
290 32);
291 fsl_chan->feature |= FSL_DMA_CHAN_PAUSE_EXT;
292 } else
293 fsl_chan->feature &= ~FSL_DMA_CHAN_PAUSE_EXT;
294}
295
296/**
297 * fsl_chan_toggle_ext_start - Toggle channel external start status
298 * @fsl_chan : Freescale DMA channel
299 * @enable : 0 is disabled, 1 is enabled.
300 *
301 * If enable the external start, the channel can be started by an
302 * external DMA start pin. So the dma_start() does not start the
303 * transfer immediately. The DMA channel will wait for the
304 * control pin asserted.
305 */
306static void fsl_chan_toggle_ext_start(struct fsl_dma_chan *fsl_chan, int enable)
307{
308 if (enable)
309 fsl_chan->feature |= FSL_DMA_CHAN_START_EXT;
310 else
311 fsl_chan->feature &= ~FSL_DMA_CHAN_START_EXT;
312}
313
314static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx)
315{
316 struct fsl_desc_sw *desc = tx_to_fsl_desc(tx);
317 struct fsl_dma_chan *fsl_chan = to_fsl_chan(tx->chan);
318 unsigned long flags;
319 dma_cookie_t cookie;
320
321 /* cookie increment and adding to ld_queue must be atomic */
322 spin_lock_irqsave(&fsl_chan->desc_lock, flags);
323
324 cookie = fsl_chan->common.cookie;
325 cookie++;
326 if (cookie < 0)
327 cookie = 1;
328 desc->async_tx.cookie = cookie;
329 fsl_chan->common.cookie = desc->async_tx.cookie;
330
331 append_ld_queue(fsl_chan, desc);
332 list_splice_init(&desc->async_tx.tx_list, fsl_chan->ld_queue.prev);
333
334 spin_unlock_irqrestore(&fsl_chan->desc_lock, flags);
335
336 return cookie;
337}
338
339/**
340 * fsl_dma_alloc_descriptor - Allocate descriptor from channel's DMA pool.
341 * @fsl_chan : Freescale DMA channel
342 *
343 * Return - The descriptor allocated. NULL for failed.
344 */
345static struct fsl_desc_sw *fsl_dma_alloc_descriptor(
346 struct fsl_dma_chan *fsl_chan)
347{
348 dma_addr_t pdesc;
349 struct fsl_desc_sw *desc_sw;
350
351 desc_sw = dma_pool_alloc(fsl_chan->desc_pool, GFP_ATOMIC, &pdesc);
352 if (desc_sw) {
353 memset(desc_sw, 0, sizeof(struct fsl_desc_sw));
354 dma_async_tx_descriptor_init(&desc_sw->async_tx,
355 &fsl_chan->common);
356 desc_sw->async_tx.tx_submit = fsl_dma_tx_submit;
Zhang Wei173acc72008-03-01 07:42:48 -0700357 desc_sw->async_tx.phys = pdesc;
358 }
359
360 return desc_sw;
361}
362
363
364/**
365 * fsl_dma_alloc_chan_resources - Allocate resources for DMA channel.
366 * @fsl_chan : Freescale DMA channel
367 *
368 * This function will create a dma pool for descriptor allocation.
369 *
370 * Return - The number of descriptors allocated.
371 */
Dan Williamsaa1e6f12009-01-06 11:38:17 -0700372static int fsl_dma_alloc_chan_resources(struct dma_chan *chan)
Zhang Wei173acc72008-03-01 07:42:48 -0700373{
374 struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan);
Timur Tabi77cd62e2008-09-26 17:00:11 -0700375
376 /* Has this channel already been allocated? */
377 if (fsl_chan->desc_pool)
378 return 1;
Zhang Wei173acc72008-03-01 07:42:48 -0700379
380 /* We need the descriptor to be aligned to 32bytes
381 * for meeting FSL DMA specification requirement.
382 */
383 fsl_chan->desc_pool = dma_pool_create("fsl_dma_engine_desc_pool",
384 fsl_chan->dev, sizeof(struct fsl_desc_sw),
385 32, 0);
386 if (!fsl_chan->desc_pool) {
387 dev_err(fsl_chan->dev, "No memory for channel %d "
388 "descriptor dma pool.\n", fsl_chan->id);
389 return 0;
390 }
391
392 return 1;
393}
394
395/**
396 * fsl_dma_free_chan_resources - Free all resources of the channel.
397 * @fsl_chan : Freescale DMA channel
398 */
399static void fsl_dma_free_chan_resources(struct dma_chan *chan)
400{
401 struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan);
402 struct fsl_desc_sw *desc, *_desc;
403 unsigned long flags;
404
405 dev_dbg(fsl_chan->dev, "Free all channel resources.\n");
406 spin_lock_irqsave(&fsl_chan->desc_lock, flags);
407 list_for_each_entry_safe(desc, _desc, &fsl_chan->ld_queue, node) {
408#ifdef FSL_DMA_LD_DEBUG
409 dev_dbg(fsl_chan->dev,
410 "LD %p will be released.\n", desc);
411#endif
412 list_del(&desc->node);
413 /* free link descriptor */
414 dma_pool_free(fsl_chan->desc_pool, desc, desc->async_tx.phys);
415 }
416 spin_unlock_irqrestore(&fsl_chan->desc_lock, flags);
417 dma_pool_destroy(fsl_chan->desc_pool);
Timur Tabi77cd62e2008-09-26 17:00:11 -0700418
419 fsl_chan->desc_pool = NULL;
Zhang Wei173acc72008-03-01 07:42:48 -0700420}
421
Zhang Wei2187c262008-03-13 17:45:28 -0700422static struct dma_async_tx_descriptor *
Dan Williams636bdea2008-04-17 20:17:26 -0700423fsl_dma_prep_interrupt(struct dma_chan *chan, unsigned long flags)
Zhang Wei2187c262008-03-13 17:45:28 -0700424{
425 struct fsl_dma_chan *fsl_chan;
426 struct fsl_desc_sw *new;
427
428 if (!chan)
429 return NULL;
430
431 fsl_chan = to_fsl_chan(chan);
432
433 new = fsl_dma_alloc_descriptor(fsl_chan);
434 if (!new) {
435 dev_err(fsl_chan->dev, "No free memory for link descriptor\n");
436 return NULL;
437 }
438
439 new->async_tx.cookie = -EBUSY;
Dan Williams636bdea2008-04-17 20:17:26 -0700440 new->async_tx.flags = flags;
Zhang Wei2187c262008-03-13 17:45:28 -0700441
Zhang Weif79abb62008-03-18 18:45:00 -0700442 /* Insert the link descriptor to the LD ring */
443 list_add_tail(&new->node, &new->async_tx.tx_list);
444
Zhang Wei2187c262008-03-13 17:45:28 -0700445 /* Set End-of-link to the last link descriptor of new list*/
446 set_ld_eol(fsl_chan, new);
447
448 return &new->async_tx;
449}
450
Zhang Wei173acc72008-03-01 07:42:48 -0700451static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy(
452 struct dma_chan *chan, dma_addr_t dma_dest, dma_addr_t dma_src,
453 size_t len, unsigned long flags)
454{
455 struct fsl_dma_chan *fsl_chan;
456 struct fsl_desc_sw *first = NULL, *prev = NULL, *new;
457 size_t copy;
458 LIST_HEAD(link_chain);
459
460 if (!chan)
461 return NULL;
462
463 if (!len)
464 return NULL;
465
466 fsl_chan = to_fsl_chan(chan);
467
468 do {
469
470 /* Allocate the link descriptor from DMA pool */
471 new = fsl_dma_alloc_descriptor(fsl_chan);
472 if (!new) {
473 dev_err(fsl_chan->dev,
474 "No free memory for link descriptor\n");
475 return NULL;
476 }
477#ifdef FSL_DMA_LD_DEBUG
478 dev_dbg(fsl_chan->dev, "new link desc alloc %p\n", new);
479#endif
480
Zhang Wei56822842008-03-13 10:45:27 -0700481 copy = min(len, (size_t)FSL_DMA_BCR_MAX_CNT);
Zhang Wei173acc72008-03-01 07:42:48 -0700482
483 set_desc_cnt(fsl_chan, &new->hw, copy);
484 set_desc_src(fsl_chan, &new->hw, dma_src);
485 set_desc_dest(fsl_chan, &new->hw, dma_dest);
486
487 if (!first)
488 first = new;
489 else
490 set_desc_next(fsl_chan, &prev->hw, new->async_tx.phys);
491
492 new->async_tx.cookie = 0;
Dan Williams636bdea2008-04-17 20:17:26 -0700493 async_tx_ack(&new->async_tx);
Zhang Wei173acc72008-03-01 07:42:48 -0700494
495 prev = new;
496 len -= copy;
497 dma_src += copy;
498 dma_dest += copy;
499
500 /* Insert the link descriptor to the LD ring */
501 list_add_tail(&new->node, &first->async_tx.tx_list);
502 } while (len);
503
Dan Williams636bdea2008-04-17 20:17:26 -0700504 new->async_tx.flags = flags; /* client is in control of this ack */
Zhang Wei173acc72008-03-01 07:42:48 -0700505 new->async_tx.cookie = -EBUSY;
506
507 /* Set End-of-link to the last link descriptor of new list*/
508 set_ld_eol(fsl_chan, new);
509
510 return first ? &first->async_tx : NULL;
511}
512
513/**
514 * fsl_dma_update_completed_cookie - Update the completed cookie.
515 * @fsl_chan : Freescale DMA channel
516 */
517static void fsl_dma_update_completed_cookie(struct fsl_dma_chan *fsl_chan)
518{
519 struct fsl_desc_sw *cur_desc, *desc;
520 dma_addr_t ld_phy;
521
522 ld_phy = get_cdar(fsl_chan) & FSL_DMA_NLDA_MASK;
523
524 if (ld_phy) {
525 cur_desc = NULL;
526 list_for_each_entry(desc, &fsl_chan->ld_queue, node)
527 if (desc->async_tx.phys == ld_phy) {
528 cur_desc = desc;
529 break;
530 }
531
532 if (cur_desc && cur_desc->async_tx.cookie) {
533 if (dma_is_idle(fsl_chan))
534 fsl_chan->completed_cookie =
535 cur_desc->async_tx.cookie;
536 else
537 fsl_chan->completed_cookie =
538 cur_desc->async_tx.cookie - 1;
539 }
540 }
541}
542
543/**
544 * fsl_chan_ld_cleanup - Clean up link descriptors
545 * @fsl_chan : Freescale DMA channel
546 *
547 * This function clean up the ld_queue of DMA channel.
548 * If 'in_intr' is set, the function will move the link descriptor to
549 * the recycle list. Otherwise, free it directly.
550 */
551static void fsl_chan_ld_cleanup(struct fsl_dma_chan *fsl_chan)
552{
553 struct fsl_desc_sw *desc, *_desc;
554 unsigned long flags;
555
556 spin_lock_irqsave(&fsl_chan->desc_lock, flags);
557
Zhang Wei173acc72008-03-01 07:42:48 -0700558 dev_dbg(fsl_chan->dev, "chan completed_cookie = %d\n",
559 fsl_chan->completed_cookie);
560 list_for_each_entry_safe(desc, _desc, &fsl_chan->ld_queue, node) {
561 dma_async_tx_callback callback;
562 void *callback_param;
563
564 if (dma_async_is_complete(desc->async_tx.cookie,
565 fsl_chan->completed_cookie, fsl_chan->common.cookie)
566 == DMA_IN_PROGRESS)
567 break;
568
569 callback = desc->async_tx.callback;
570 callback_param = desc->async_tx.callback_param;
571
572 /* Remove from ld_queue list */
573 list_del(&desc->node);
574
575 dev_dbg(fsl_chan->dev, "link descriptor %p will be recycle.\n",
576 desc);
577 dma_pool_free(fsl_chan->desc_pool, desc, desc->async_tx.phys);
578
579 /* Run the link descriptor callback function */
580 if (callback) {
581 spin_unlock_irqrestore(&fsl_chan->desc_lock, flags);
582 dev_dbg(fsl_chan->dev, "link descriptor %p callback\n",
583 desc);
584 callback(callback_param);
585 spin_lock_irqsave(&fsl_chan->desc_lock, flags);
586 }
587 }
588 spin_unlock_irqrestore(&fsl_chan->desc_lock, flags);
589}
590
591/**
592 * fsl_chan_xfer_ld_queue - Transfer link descriptors in channel ld_queue.
593 * @fsl_chan : Freescale DMA channel
594 */
595static void fsl_chan_xfer_ld_queue(struct fsl_dma_chan *fsl_chan)
596{
597 struct list_head *ld_node;
598 dma_addr_t next_dest_addr;
599 unsigned long flags;
600
Ira Snyder138ef012009-05-19 15:42:13 -0700601 spin_lock_irqsave(&fsl_chan->desc_lock, flags);
602
Zhang Wei173acc72008-03-01 07:42:48 -0700603 if (!dma_is_idle(fsl_chan))
Ira Snyder138ef012009-05-19 15:42:13 -0700604 goto out_unlock;
Zhang Wei173acc72008-03-01 07:42:48 -0700605
606 dma_halt(fsl_chan);
607
608 /* If there are some link descriptors
609 * not transfered in queue. We need to start it.
610 */
Zhang Wei173acc72008-03-01 07:42:48 -0700611
612 /* Find the first un-transfer desciptor */
613 for (ld_node = fsl_chan->ld_queue.next;
614 (ld_node != &fsl_chan->ld_queue)
615 && (dma_async_is_complete(
616 to_fsl_desc(ld_node)->async_tx.cookie,
617 fsl_chan->completed_cookie,
618 fsl_chan->common.cookie) == DMA_SUCCESS);
619 ld_node = ld_node->next);
620
Zhang Wei173acc72008-03-01 07:42:48 -0700621 if (ld_node != &fsl_chan->ld_queue) {
622 /* Get the ld start address from ld_queue */
623 next_dest_addr = to_fsl_desc(ld_node)->async_tx.phys;
Zhang Wei56822842008-03-13 10:45:27 -0700624 dev_dbg(fsl_chan->dev, "xfer LDs staring from %p\n",
625 (void *)next_dest_addr);
Zhang Wei173acc72008-03-01 07:42:48 -0700626 set_cdar(fsl_chan, next_dest_addr);
627 dma_start(fsl_chan);
628 } else {
629 set_cdar(fsl_chan, 0);
630 set_ndar(fsl_chan, 0);
631 }
Ira Snyder138ef012009-05-19 15:42:13 -0700632
633out_unlock:
634 spin_unlock_irqrestore(&fsl_chan->desc_lock, flags);
Zhang Wei173acc72008-03-01 07:42:48 -0700635}
636
637/**
638 * fsl_dma_memcpy_issue_pending - Issue the DMA start command
639 * @fsl_chan : Freescale DMA channel
640 */
641static void fsl_dma_memcpy_issue_pending(struct dma_chan *chan)
642{
643 struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan);
644
645#ifdef FSL_DMA_LD_DEBUG
646 struct fsl_desc_sw *ld;
647 unsigned long flags;
648
649 spin_lock_irqsave(&fsl_chan->desc_lock, flags);
650 if (list_empty(&fsl_chan->ld_queue)) {
651 spin_unlock_irqrestore(&fsl_chan->desc_lock, flags);
652 return;
653 }
654
655 dev_dbg(fsl_chan->dev, "--memcpy issue--\n");
656 list_for_each_entry(ld, &fsl_chan->ld_queue, node) {
657 int i;
658 dev_dbg(fsl_chan->dev, "Ch %d, LD %08x\n",
659 fsl_chan->id, ld->async_tx.phys);
660 for (i = 0; i < 8; i++)
661 dev_dbg(fsl_chan->dev, "LD offset %d: %08x\n",
662 i, *(((u32 *)&ld->hw) + i));
663 }
664 dev_dbg(fsl_chan->dev, "----------------\n");
665 spin_unlock_irqrestore(&fsl_chan->desc_lock, flags);
666#endif
667
668 fsl_chan_xfer_ld_queue(fsl_chan);
669}
670
Zhang Wei173acc72008-03-01 07:42:48 -0700671/**
672 * fsl_dma_is_complete - Determine the DMA status
673 * @fsl_chan : Freescale DMA channel
674 */
675static enum dma_status fsl_dma_is_complete(struct dma_chan *chan,
676 dma_cookie_t cookie,
677 dma_cookie_t *done,
678 dma_cookie_t *used)
679{
680 struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan);
681 dma_cookie_t last_used;
682 dma_cookie_t last_complete;
683
684 fsl_chan_ld_cleanup(fsl_chan);
685
686 last_used = chan->cookie;
687 last_complete = fsl_chan->completed_cookie;
688
689 if (done)
690 *done = last_complete;
691
692 if (used)
693 *used = last_used;
694
695 return dma_async_is_complete(cookie, last_complete, last_used);
696}
697
698static irqreturn_t fsl_dma_chan_do_interrupt(int irq, void *data)
699{
700 struct fsl_dma_chan *fsl_chan = (struct fsl_dma_chan *)data;
Zhang Wei56822842008-03-13 10:45:27 -0700701 u32 stat;
Zhang Wei1c629792008-04-17 20:17:25 -0700702 int update_cookie = 0;
703 int xfer_ld_q = 0;
Zhang Wei173acc72008-03-01 07:42:48 -0700704
705 stat = get_sr(fsl_chan);
706 dev_dbg(fsl_chan->dev, "event: channel %d, stat = 0x%x\n",
707 fsl_chan->id, stat);
708 set_sr(fsl_chan, stat); /* Clear the event register */
709
710 stat &= ~(FSL_DMA_SR_CB | FSL_DMA_SR_CH);
711 if (!stat)
712 return IRQ_NONE;
713
714 if (stat & FSL_DMA_SR_TE)
715 dev_err(fsl_chan->dev, "Transfer Error!\n");
716
Zhang Weif79abb62008-03-18 18:45:00 -0700717 /* Programming Error
718 * The DMA_INTERRUPT async_tx is a NULL transfer, which will
719 * triger a PE interrupt.
720 */
721 if (stat & FSL_DMA_SR_PE) {
722 dev_dbg(fsl_chan->dev, "event: Programming Error INT\n");
723 if (get_bcr(fsl_chan) == 0) {
724 /* BCR register is 0, this is a DMA_INTERRUPT async_tx.
725 * Now, update the completed cookie, and continue the
726 * next uncompleted transfer.
727 */
Zhang Wei1c629792008-04-17 20:17:25 -0700728 update_cookie = 1;
729 xfer_ld_q = 1;
Zhang Weif79abb62008-03-18 18:45:00 -0700730 }
731 stat &= ~FSL_DMA_SR_PE;
732 }
733
Zhang Wei173acc72008-03-01 07:42:48 -0700734 /* If the link descriptor segment transfer finishes,
735 * we will recycle the used descriptor.
736 */
737 if (stat & FSL_DMA_SR_EOSI) {
738 dev_dbg(fsl_chan->dev, "event: End-of-segments INT\n");
Zhang Wei56822842008-03-13 10:45:27 -0700739 dev_dbg(fsl_chan->dev, "event: clndar %p, nlndar %p\n",
740 (void *)get_cdar(fsl_chan), (void *)get_ndar(fsl_chan));
Zhang Wei173acc72008-03-01 07:42:48 -0700741 stat &= ~FSL_DMA_SR_EOSI;
Zhang Wei1c629792008-04-17 20:17:25 -0700742 update_cookie = 1;
743 }
744
745 /* For MPC8349, EOCDI event need to update cookie
746 * and start the next transfer if it exist.
747 */
748 if (stat & FSL_DMA_SR_EOCDI) {
749 dev_dbg(fsl_chan->dev, "event: End-of-Chain link INT\n");
750 stat &= ~FSL_DMA_SR_EOCDI;
751 update_cookie = 1;
752 xfer_ld_q = 1;
Zhang Wei173acc72008-03-01 07:42:48 -0700753 }
754
755 /* If it current transfer is the end-of-transfer,
756 * we should clear the Channel Start bit for
757 * prepare next transfer.
758 */
Zhang Wei1c629792008-04-17 20:17:25 -0700759 if (stat & FSL_DMA_SR_EOLNI) {
Zhang Wei173acc72008-03-01 07:42:48 -0700760 dev_dbg(fsl_chan->dev, "event: End-of-link INT\n");
761 stat &= ~FSL_DMA_SR_EOLNI;
Zhang Wei1c629792008-04-17 20:17:25 -0700762 xfer_ld_q = 1;
Zhang Wei173acc72008-03-01 07:42:48 -0700763 }
764
Zhang Wei1c629792008-04-17 20:17:25 -0700765 if (update_cookie)
766 fsl_dma_update_completed_cookie(fsl_chan);
767 if (xfer_ld_q)
768 fsl_chan_xfer_ld_queue(fsl_chan);
Zhang Wei173acc72008-03-01 07:42:48 -0700769 if (stat)
770 dev_dbg(fsl_chan->dev, "event: unhandled sr 0x%02x\n",
771 stat);
772
773 dev_dbg(fsl_chan->dev, "event: Exit\n");
774 tasklet_schedule(&fsl_chan->tasklet);
775 return IRQ_HANDLED;
776}
777
778static irqreturn_t fsl_dma_do_interrupt(int irq, void *data)
779{
780 struct fsl_dma_device *fdev = (struct fsl_dma_device *)data;
781 u32 gsr;
782 int ch_nr;
783
784 gsr = (fdev->feature & FSL_DMA_BIG_ENDIAN) ? in_be32(fdev->reg_base)
785 : in_le32(fdev->reg_base);
786 ch_nr = (32 - ffs(gsr)) / 8;
787
788 return fdev->chan[ch_nr] ? fsl_dma_chan_do_interrupt(irq,
789 fdev->chan[ch_nr]) : IRQ_NONE;
790}
791
792static void dma_do_tasklet(unsigned long data)
793{
794 struct fsl_dma_chan *fsl_chan = (struct fsl_dma_chan *)data;
795 fsl_chan_ld_cleanup(fsl_chan);
796}
797
Timur Tabi77cd62e2008-09-26 17:00:11 -0700798static int __devinit fsl_dma_chan_probe(struct fsl_dma_device *fdev,
799 struct device_node *node, u32 feature, const char *compatible)
Zhang Wei173acc72008-03-01 07:42:48 -0700800{
Zhang Wei173acc72008-03-01 07:42:48 -0700801 struct fsl_dma_chan *new_fsl_chan;
802 int err;
803
Zhang Wei173acc72008-03-01 07:42:48 -0700804 /* alloc channel */
805 new_fsl_chan = kzalloc(sizeof(struct fsl_dma_chan), GFP_KERNEL);
806 if (!new_fsl_chan) {
Timur Tabi77cd62e2008-09-26 17:00:11 -0700807 dev_err(fdev->dev, "No free memory for allocating "
Zhang Wei173acc72008-03-01 07:42:48 -0700808 "dma channels!\n");
Li Yang51ee87f2008-05-29 23:25:45 -0700809 return -ENOMEM;
Zhang Wei173acc72008-03-01 07:42:48 -0700810 }
811
812 /* get dma channel register base */
Timur Tabi77cd62e2008-09-26 17:00:11 -0700813 err = of_address_to_resource(node, 0, &new_fsl_chan->reg);
Zhang Wei173acc72008-03-01 07:42:48 -0700814 if (err) {
Timur Tabi77cd62e2008-09-26 17:00:11 -0700815 dev_err(fdev->dev, "Can't get %s property 'reg'\n",
816 node->full_name);
Li Yang51ee87f2008-05-29 23:25:45 -0700817 goto err_no_reg;
Zhang Wei173acc72008-03-01 07:42:48 -0700818 }
819
Timur Tabi77cd62e2008-09-26 17:00:11 -0700820 new_fsl_chan->feature = feature;
Zhang Wei173acc72008-03-01 07:42:48 -0700821
822 if (!fdev->feature)
823 fdev->feature = new_fsl_chan->feature;
824
825 /* If the DMA device's feature is different than its channels',
826 * report the bug.
827 */
828 WARN_ON(fdev->feature != new_fsl_chan->feature);
829
Dan Williams6527de62009-01-12 15:18:34 -0700830 new_fsl_chan->dev = fdev->dev;
Zhang Wei173acc72008-03-01 07:42:48 -0700831 new_fsl_chan->reg_base = ioremap(new_fsl_chan->reg.start,
832 new_fsl_chan->reg.end - new_fsl_chan->reg.start + 1);
833
834 new_fsl_chan->id = ((new_fsl_chan->reg.start - 0x100) & 0xfff) >> 7;
Roel Kluinf47edc62009-05-22 16:46:52 +0800835 if (new_fsl_chan->id >= FSL_DMA_MAX_CHANS_PER_DEVICE) {
Timur Tabi77cd62e2008-09-26 17:00:11 -0700836 dev_err(fdev->dev, "There is no %d channel!\n",
Zhang Wei173acc72008-03-01 07:42:48 -0700837 new_fsl_chan->id);
838 err = -EINVAL;
Li Yang51ee87f2008-05-29 23:25:45 -0700839 goto err_no_chan;
Zhang Wei173acc72008-03-01 07:42:48 -0700840 }
841 fdev->chan[new_fsl_chan->id] = new_fsl_chan;
842 tasklet_init(&new_fsl_chan->tasklet, dma_do_tasklet,
843 (unsigned long)new_fsl_chan);
844
845 /* Init the channel */
846 dma_init(new_fsl_chan);
847
848 /* Clear cdar registers */
849 set_cdar(new_fsl_chan, 0);
850
851 switch (new_fsl_chan->feature & FSL_DMA_IP_MASK) {
852 case FSL_DMA_IP_85XX:
853 new_fsl_chan->toggle_ext_start = fsl_chan_toggle_ext_start;
854 new_fsl_chan->toggle_ext_pause = fsl_chan_toggle_ext_pause;
855 case FSL_DMA_IP_83XX:
856 new_fsl_chan->set_src_loop_size = fsl_chan_set_src_loop_size;
857 new_fsl_chan->set_dest_loop_size = fsl_chan_set_dest_loop_size;
858 }
859
860 spin_lock_init(&new_fsl_chan->desc_lock);
861 INIT_LIST_HEAD(&new_fsl_chan->ld_queue);
862
863 new_fsl_chan->common.device = &fdev->common;
864
865 /* Add the channel to DMA device channel list */
866 list_add_tail(&new_fsl_chan->common.device_node,
867 &fdev->common.channels);
868 fdev->common.chancnt++;
869
Timur Tabi77cd62e2008-09-26 17:00:11 -0700870 new_fsl_chan->irq = irq_of_parse_and_map(node, 0);
Zhang Wei173acc72008-03-01 07:42:48 -0700871 if (new_fsl_chan->irq != NO_IRQ) {
872 err = request_irq(new_fsl_chan->irq,
873 &fsl_dma_chan_do_interrupt, IRQF_SHARED,
874 "fsldma-channel", new_fsl_chan);
875 if (err) {
Timur Tabi77cd62e2008-09-26 17:00:11 -0700876 dev_err(fdev->dev, "DMA channel %s request_irq error "
877 "with return %d\n", node->full_name, err);
Li Yang51ee87f2008-05-29 23:25:45 -0700878 goto err_no_irq;
Zhang Wei173acc72008-03-01 07:42:48 -0700879 }
880 }
881
Timur Tabi77cd62e2008-09-26 17:00:11 -0700882 dev_info(fdev->dev, "#%d (%s), irq %d\n", new_fsl_chan->id,
Peter Korsgaard169d5f662009-01-14 22:33:31 -0700883 compatible,
884 new_fsl_chan->irq != NO_IRQ ? new_fsl_chan->irq : fdev->irq);
Zhang Wei173acc72008-03-01 07:42:48 -0700885
886 return 0;
Li Yang51ee87f2008-05-29 23:25:45 -0700887
Li Yang51ee87f2008-05-29 23:25:45 -0700888err_no_irq:
Zhang Wei173acc72008-03-01 07:42:48 -0700889 list_del(&new_fsl_chan->common.device_node);
Li Yang51ee87f2008-05-29 23:25:45 -0700890err_no_chan:
891 iounmap(new_fsl_chan->reg_base);
892err_no_reg:
Zhang Wei173acc72008-03-01 07:42:48 -0700893 kfree(new_fsl_chan);
894 return err;
895}
896
Timur Tabi77cd62e2008-09-26 17:00:11 -0700897static void fsl_dma_chan_remove(struct fsl_dma_chan *fchan)
Zhang Wei173acc72008-03-01 07:42:48 -0700898{
Peter Korsgaard6782dfe2009-01-14 22:32:58 -0700899 if (fchan->irq != NO_IRQ)
900 free_irq(fchan->irq, fchan);
Timur Tabi77cd62e2008-09-26 17:00:11 -0700901 list_del(&fchan->common.device_node);
902 iounmap(fchan->reg_base);
903 kfree(fchan);
Zhang Wei173acc72008-03-01 07:42:48 -0700904}
905
906static int __devinit of_fsl_dma_probe(struct of_device *dev,
907 const struct of_device_id *match)
908{
909 int err;
Zhang Wei173acc72008-03-01 07:42:48 -0700910 struct fsl_dma_device *fdev;
Timur Tabi77cd62e2008-09-26 17:00:11 -0700911 struct device_node *child;
Zhang Wei173acc72008-03-01 07:42:48 -0700912
913 fdev = kzalloc(sizeof(struct fsl_dma_device), GFP_KERNEL);
914 if (!fdev) {
915 dev_err(&dev->dev, "No enough memory for 'priv'\n");
Li Yang51ee87f2008-05-29 23:25:45 -0700916 return -ENOMEM;
Zhang Wei173acc72008-03-01 07:42:48 -0700917 }
918 fdev->dev = &dev->dev;
919 INIT_LIST_HEAD(&fdev->common.channels);
920
921 /* get DMA controller register base */
922 err = of_address_to_resource(dev->node, 0, &fdev->reg);
923 if (err) {
924 dev_err(&dev->dev, "Can't get %s property 'reg'\n",
925 dev->node->full_name);
Li Yang51ee87f2008-05-29 23:25:45 -0700926 goto err_no_reg;
Zhang Wei173acc72008-03-01 07:42:48 -0700927 }
928
929 dev_info(&dev->dev, "Probe the Freescale DMA driver for %s "
Zhang Wei56822842008-03-13 10:45:27 -0700930 "controller at %p...\n",
931 match->compatible, (void *)fdev->reg.start);
Zhang Wei173acc72008-03-01 07:42:48 -0700932 fdev->reg_base = ioremap(fdev->reg.start, fdev->reg.end
933 - fdev->reg.start + 1);
934
935 dma_cap_set(DMA_MEMCPY, fdev->common.cap_mask);
936 dma_cap_set(DMA_INTERRUPT, fdev->common.cap_mask);
937 fdev->common.device_alloc_chan_resources = fsl_dma_alloc_chan_resources;
938 fdev->common.device_free_chan_resources = fsl_dma_free_chan_resources;
Zhang Wei2187c262008-03-13 17:45:28 -0700939 fdev->common.device_prep_dma_interrupt = fsl_dma_prep_interrupt;
Zhang Wei173acc72008-03-01 07:42:48 -0700940 fdev->common.device_prep_dma_memcpy = fsl_dma_prep_memcpy;
941 fdev->common.device_is_tx_complete = fsl_dma_is_complete;
942 fdev->common.device_issue_pending = fsl_dma_memcpy_issue_pending;
Zhang Wei173acc72008-03-01 07:42:48 -0700943 fdev->common.dev = &dev->dev;
944
Timur Tabi77cd62e2008-09-26 17:00:11 -0700945 fdev->irq = irq_of_parse_and_map(dev->node, 0);
946 if (fdev->irq != NO_IRQ) {
947 err = request_irq(fdev->irq, &fsl_dma_do_interrupt, IRQF_SHARED,
Zhang Wei173acc72008-03-01 07:42:48 -0700948 "fsldma-device", fdev);
949 if (err) {
950 dev_err(&dev->dev, "DMA device request_irq error "
951 "with return %d\n", err);
952 goto err;
953 }
954 }
955
956 dev_set_drvdata(&(dev->dev), fdev);
Timur Tabi77cd62e2008-09-26 17:00:11 -0700957
958 /* We cannot use of_platform_bus_probe() because there is no
959 * of_platform_bus_remove. Instead, we manually instantiate every DMA
960 * channel object.
961 */
962 for_each_child_of_node(dev->node, child) {
963 if (of_device_is_compatible(child, "fsl,eloplus-dma-channel"))
964 fsl_dma_chan_probe(fdev, child,
965 FSL_DMA_IP_85XX | FSL_DMA_BIG_ENDIAN,
966 "fsl,eloplus-dma-channel");
967 if (of_device_is_compatible(child, "fsl,elo-dma-channel"))
968 fsl_dma_chan_probe(fdev, child,
969 FSL_DMA_IP_83XX | FSL_DMA_LITTLE_ENDIAN,
970 "fsl,elo-dma-channel");
971 }
Zhang Wei173acc72008-03-01 07:42:48 -0700972
973 dma_async_device_register(&fdev->common);
974 return 0;
975
976err:
977 iounmap(fdev->reg_base);
Li Yang51ee87f2008-05-29 23:25:45 -0700978err_no_reg:
Zhang Wei173acc72008-03-01 07:42:48 -0700979 kfree(fdev);
980 return err;
981}
982
Timur Tabi77cd62e2008-09-26 17:00:11 -0700983static int of_fsl_dma_remove(struct of_device *of_dev)
984{
985 struct fsl_dma_device *fdev;
986 unsigned int i;
987
988 fdev = dev_get_drvdata(&of_dev->dev);
989
990 dma_async_device_unregister(&fdev->common);
991
992 for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++)
993 if (fdev->chan[i])
994 fsl_dma_chan_remove(fdev->chan[i]);
995
996 if (fdev->irq != NO_IRQ)
997 free_irq(fdev->irq, fdev);
998
999 iounmap(fdev->reg_base);
1000
1001 kfree(fdev);
1002 dev_set_drvdata(&of_dev->dev, NULL);
1003
1004 return 0;
1005}
1006
Zhang Wei173acc72008-03-01 07:42:48 -07001007static struct of_device_id of_fsl_dma_ids[] = {
Kumar Gala049c9d42008-03-31 11:13:21 -05001008 { .compatible = "fsl,eloplus-dma", },
1009 { .compatible = "fsl,elo-dma", },
Zhang Wei173acc72008-03-01 07:42:48 -07001010 {}
1011};
1012
1013static struct of_platform_driver of_fsl_dma_driver = {
Timur Tabi77cd62e2008-09-26 17:00:11 -07001014 .name = "fsl-elo-dma",
Zhang Wei173acc72008-03-01 07:42:48 -07001015 .match_table = of_fsl_dma_ids,
1016 .probe = of_fsl_dma_probe,
Timur Tabi77cd62e2008-09-26 17:00:11 -07001017 .remove = of_fsl_dma_remove,
Zhang Wei173acc72008-03-01 07:42:48 -07001018};
1019
1020static __init int of_fsl_dma_init(void)
1021{
Timur Tabi77cd62e2008-09-26 17:00:11 -07001022 int ret;
1023
1024 pr_info("Freescale Elo / Elo Plus DMA driver\n");
1025
1026 ret = of_register_platform_driver(&of_fsl_dma_driver);
1027 if (ret)
1028 pr_err("fsldma: failed to register platform driver\n");
1029
1030 return ret;
Zhang Wei173acc72008-03-01 07:42:48 -07001031}
1032
Timur Tabi77cd62e2008-09-26 17:00:11 -07001033static void __exit of_fsl_dma_exit(void)
1034{
1035 of_unregister_platform_driver(&of_fsl_dma_driver);
1036}
1037
Zhang Wei173acc72008-03-01 07:42:48 -07001038subsys_initcall(of_fsl_dma_init);
Timur Tabi77cd62e2008-09-26 17:00:11 -07001039module_exit(of_fsl_dma_exit);
1040
1041MODULE_DESCRIPTION("Freescale Elo / Elo Plus DMA driver");
1042MODULE_LICENSE("GPL");