blob: e9f5250fbe4dbc9e4f1f9d6c01fdbae8b2243cf2 [file] [log] [blame]
Thomas Gleixnerd2912cb2019-06-04 10:11:33 +02001/* SPDX-License-Identifier: GPL-2.0-only */
Russell King50437bf2012-04-13 12:07:23 +01002/*
3 * Virtual DMA channel support for DMAengine
4 *
5 * Copyright (C) 2012 Russell King
Russell King50437bf2012-04-13 12:07:23 +01006 */
7#ifndef VIRT_DMA_H
8#define VIRT_DMA_H
9
10#include <linux/dmaengine.h>
11#include <linux/interrupt.h>
12
13#include "dmaengine.h"
14
15struct virt_dma_desc {
16 struct dma_async_tx_descriptor tx;
Alexandru Ardelean09d5b702019-06-06 13:45:47 +030017 struct dmaengine_result tx_result;
Russell King50437bf2012-04-13 12:07:23 +010018 /* protected by vc.lock */
19 struct list_head node;
20};
21
22struct virt_dma_chan {
23 struct dma_chan chan;
24 struct tasklet_struct task;
25 void (*desc_free)(struct virt_dma_desc *);
26
27 spinlock_t lock;
28
29 /* protected by vc.lock */
Robert Jarzmik13bb26a2015-10-13 21:54:28 +020030 struct list_head desc_allocated;
Russell King50437bf2012-04-13 12:07:23 +010031 struct list_head desc_submitted;
32 struct list_head desc_issued;
33 struct list_head desc_completed;
Sascha Hauerf88210112019-12-16 11:53:23 +010034 struct list_head desc_terminated;
Russell King571fa742012-05-14 15:17:20 +010035
36 struct virt_dma_desc *cyclic;
Russell King50437bf2012-04-13 12:07:23 +010037};
38
39static inline struct virt_dma_chan *to_virt_chan(struct dma_chan *chan)
40{
41 return container_of(chan, struct virt_dma_chan, chan);
42}
43
44void vchan_dma_desc_free_list(struct virt_dma_chan *vc, struct list_head *head);
Russell King50437bf2012-04-13 12:07:23 +010045void vchan_init(struct virt_dma_chan *vc, struct dma_device *dmadev);
Russell Kingfe045872012-05-10 23:39:27 +010046struct virt_dma_desc *vchan_find_desc(struct virt_dma_chan *, dma_cookie_t);
Baoyou Xie02aa8482016-09-24 12:37:05 +080047extern dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *);
48extern int vchan_tx_desc_free(struct dma_async_tx_descriptor *);
Russell King50437bf2012-04-13 12:07:23 +010049
50/**
51 * vchan_tx_prep - prepare a descriptor
Lars-Peter Clausen28ca3e82015-10-20 13:14:45 +020052 * @vc: virtual channel allocating this descriptor
53 * @vd: virtual descriptor to prepare
54 * @tx_flags: flags argument passed in to prepare function
Russell King50437bf2012-04-13 12:07:23 +010055 */
56static inline struct dma_async_tx_descriptor *vchan_tx_prep(struct virt_dma_chan *vc,
57 struct virt_dma_desc *vd, unsigned long tx_flags)
58{
Robert Jarzmik13bb26a2015-10-13 21:54:28 +020059 unsigned long flags;
Russell King50437bf2012-04-13 12:07:23 +010060
61 dma_async_tx_descriptor_init(&vd->tx, &vc->chan);
62 vd->tx.flags = tx_flags;
63 vd->tx.tx_submit = vchan_tx_submit;
Robert Jarzmik13bb26a2015-10-13 21:54:28 +020064 vd->tx.desc_free = vchan_tx_desc_free;
65
Alexandru Ardelean09d5b702019-06-06 13:45:47 +030066 vd->tx_result.result = DMA_TRANS_NOERROR;
67 vd->tx_result.residue = 0;
68
Robert Jarzmik13bb26a2015-10-13 21:54:28 +020069 spin_lock_irqsave(&vc->lock, flags);
70 list_add_tail(&vd->node, &vc->desc_allocated);
71 spin_unlock_irqrestore(&vc->lock, flags);
Russell King50437bf2012-04-13 12:07:23 +010072
73 return &vd->tx;
74}
75
76/**
77 * vchan_issue_pending - move submitted descriptors to issued list
Lars-Peter Clausen28ca3e82015-10-20 13:14:45 +020078 * @vc: virtual channel to update
Russell King50437bf2012-04-13 12:07:23 +010079 *
80 * vc.lock must be held by caller
81 */
82static inline bool vchan_issue_pending(struct virt_dma_chan *vc)
83{
84 list_splice_tail_init(&vc->desc_submitted, &vc->desc_issued);
85 return !list_empty(&vc->desc_issued);
86}
87
88/**
89 * vchan_cookie_complete - report completion of a descriptor
Lars-Peter Clausen28ca3e82015-10-20 13:14:45 +020090 * @vd: virtual descriptor to update
Russell King50437bf2012-04-13 12:07:23 +010091 *
92 * vc.lock must be held by caller
93 */
94static inline void vchan_cookie_complete(struct virt_dma_desc *vd)
95{
96 struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan);
Jonas Jensenaf586522013-12-06 16:42:09 +010097 dma_cookie_t cookie;
Russell King50437bf2012-04-13 12:07:23 +010098
Jonas Jensenaf586522013-12-06 16:42:09 +010099 cookie = vd->tx.cookie;
Russell King50437bf2012-04-13 12:07:23 +0100100 dma_cookie_complete(&vd->tx);
101 dev_vdbg(vc->chan.device->dev, "txd %p[%x]: marked complete\n",
Jonas Jensenaf586522013-12-06 16:42:09 +0100102 vd, cookie);
Russell King50437bf2012-04-13 12:07:23 +0100103 list_add_tail(&vd->node, &vc->desc_completed);
104
105 tasklet_schedule(&vc->task);
106}
107
108/**
Peter Ujfalusi6af149d2017-11-14 16:32:03 +0200109 * vchan_vdesc_fini - Free or reuse a descriptor
110 * @vd: virtual descriptor to free/reuse
111 */
112static inline void vchan_vdesc_fini(struct virt_dma_desc *vd)
113{
114 struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan);
115
Sascha Hauer9f91e6b2019-12-16 11:53:24 +0100116 if (dmaengine_desc_test_reuse(&vd->tx)) {
117 unsigned long flags;
118
119 spin_lock_irqsave(&vc->lock, flags);
Peter Ujfalusi6af149d2017-11-14 16:32:03 +0200120 list_add(&vd->node, &vc->desc_allocated);
Sascha Hauer9f91e6b2019-12-16 11:53:24 +0100121 spin_unlock_irqrestore(&vc->lock, flags);
122 } else {
Peter Ujfalusi6af149d2017-11-14 16:32:03 +0200123 vc->desc_free(vd);
Sascha Hauer9f91e6b2019-12-16 11:53:24 +0100124 }
Peter Ujfalusi6af149d2017-11-14 16:32:03 +0200125}
126
127/**
Russell King571fa742012-05-14 15:17:20 +0100128 * vchan_cyclic_callback - report the completion of a period
Lars-Peter Clausen28ca3e82015-10-20 13:14:45 +0200129 * @vd: virtual descriptor
Russell King571fa742012-05-14 15:17:20 +0100130 */
131static inline void vchan_cyclic_callback(struct virt_dma_desc *vd)
132{
133 struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan);
134
135 vc->cyclic = vd;
136 tasklet_schedule(&vc->task);
137}
138
139/**
Peter Ujfalusi1c7f0722017-11-14 16:32:04 +0200140 * vchan_terminate_vdesc - Disable pending cyclic callback
141 * @vd: virtual descriptor to be terminated
142 *
143 * vc.lock must be held by caller
144 */
145static inline void vchan_terminate_vdesc(struct virt_dma_desc *vd)
146{
147 struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan);
148
Sascha Hauerf88210112019-12-16 11:53:23 +0100149 list_add_tail(&vd->node, &vc->desc_terminated);
Peter Ujfalusi1c7f0722017-11-14 16:32:04 +0200150
Peter Ujfalusi1c7f0722017-11-14 16:32:04 +0200151 if (vc->cyclic == vd)
152 vc->cyclic = NULL;
153}
154
155/**
Russell King50437bf2012-04-13 12:07:23 +0100156 * vchan_next_desc - peek at the next descriptor to be processed
Lars-Peter Clausen28ca3e82015-10-20 13:14:45 +0200157 * @vc: virtual channel to obtain descriptor from
Russell King50437bf2012-04-13 12:07:23 +0100158 *
159 * vc.lock must be held by caller
160 */
161static inline struct virt_dma_desc *vchan_next_desc(struct virt_dma_chan *vc)
162{
Masahiro Yamada360af352016-09-13 03:08:17 +0900163 return list_first_entry_or_null(&vc->desc_issued,
164 struct virt_dma_desc, node);
Russell King50437bf2012-04-13 12:07:23 +0100165}
166
167/**
Jun Nie8c8fe972015-07-10 20:02:49 +0800168 * vchan_get_all_descriptors - obtain all submitted and issued descriptors
Lars-Peter Clausen28ca3e82015-10-20 13:14:45 +0200169 * @vc: virtual channel to get descriptors from
170 * @head: list of descriptors found
Russell King50437bf2012-04-13 12:07:23 +0100171 *
172 * vc.lock must be held by caller
173 *
174 * Removes all submitted and issued descriptors from internal lists, and
175 * provides a list of all descriptors found
176 */
177static inline void vchan_get_all_descriptors(struct virt_dma_chan *vc,
178 struct list_head *head)
179{
Robert Jarzmik13bb26a2015-10-13 21:54:28 +0200180 list_splice_tail_init(&vc->desc_allocated, head);
Russell King50437bf2012-04-13 12:07:23 +0100181 list_splice_tail_init(&vc->desc_submitted, head);
182 list_splice_tail_init(&vc->desc_issued, head);
183 list_splice_tail_init(&vc->desc_completed, head);
Sascha Hauerf88210112019-12-16 11:53:23 +0100184 list_splice_tail_init(&vc->desc_terminated, head);
Russell King50437bf2012-04-13 12:07:23 +0100185}
186
187static inline void vchan_free_chan_resources(struct virt_dma_chan *vc)
188{
Robert Jarzmik13bb26a2015-10-13 21:54:28 +0200189 struct virt_dma_desc *vd;
Russell King50437bf2012-04-13 12:07:23 +0100190 unsigned long flags;
191 LIST_HEAD(head);
192
193 spin_lock_irqsave(&vc->lock, flags);
194 vchan_get_all_descriptors(vc, &head);
Robert Jarzmik13bb26a2015-10-13 21:54:28 +0200195 list_for_each_entry(vd, &head, node)
196 dmaengine_desc_clear_reuse(&vd->tx);
Russell King50437bf2012-04-13 12:07:23 +0100197 spin_unlock_irqrestore(&vc->lock, flags);
198
199 vchan_dma_desc_free_list(vc, &head);
200}
201
Lars-Peter Clausen2ed08622015-10-20 11:46:29 +0200202/**
203 * vchan_synchronize() - synchronize callback execution to the current context
204 * @vc: virtual channel to synchronize
205 *
206 * Makes sure that all scheduled or active callbacks have finished running. For
207 * proper operation the caller has to ensure that no new callbacks are scheduled
208 * after the invocation of this function started.
Peter Ujfalusi1c7f0722017-11-14 16:32:04 +0200209 * Free up the terminated cyclic descriptor to prevent memory leakage.
Lars-Peter Clausen2ed08622015-10-20 11:46:29 +0200210 */
211static inline void vchan_synchronize(struct virt_dma_chan *vc)
212{
Sascha Hauerf88210112019-12-16 11:53:23 +0100213 LIST_HEAD(head);
Peter Ujfalusi1c7f0722017-11-14 16:32:04 +0200214 unsigned long flags;
215
Lars-Peter Clausen2ed08622015-10-20 11:46:29 +0200216 tasklet_kill(&vc->task);
Peter Ujfalusi1c7f0722017-11-14 16:32:04 +0200217
218 spin_lock_irqsave(&vc->lock, flags);
Sascha Hauerf88210112019-12-16 11:53:23 +0100219
220 list_splice_tail_init(&vc->desc_terminated, &head);
221
Peter Ujfalusi1c7f0722017-11-14 16:32:04 +0200222 spin_unlock_irqrestore(&vc->lock, flags);
Sascha Hauerf88210112019-12-16 11:53:23 +0100223
224 vchan_dma_desc_free_list(vc, &head);
Lars-Peter Clausen2ed08622015-10-20 11:46:29 +0200225}
226
Russell King50437bf2012-04-13 12:07:23 +0100227#endif