blob: 0eef6df7c89ecffaafb5e383a4fdf0af492e5a28 [file] [log] [blame]
Thomas Gleixner9952f692019-05-28 10:10:04 -07001// SPDX-License-Identifier: GPL-2.0-only
Terje Bergstrom65793242013-03-22 16:34:03 +02002/*
3 * Tegra host1x Job
4 *
Arto Merilainenf08ef2d2016-11-08 19:51:32 +02005 * Copyright (c) 2010-2015, NVIDIA Corporation.
Terje Bergstrom65793242013-03-22 16:34:03 +02006 */
7
8#include <linux/dma-mapping.h>
9#include <linux/err.h>
Thierry Reding35d747a2013-09-24 16:30:32 +020010#include <linux/host1x.h>
Thierry Reding273da5a2020-02-04 14:59:25 +010011#include <linux/iommu.h>
Terje Bergstrom65793242013-03-22 16:34:03 +020012#include <linux/kref.h>
13#include <linux/module.h>
14#include <linux/scatterlist.h>
15#include <linux/slab.h>
16#include <linux/vmalloc.h>
17#include <trace/events/host1x.h>
18
19#include "channel.h"
20#include "dev.h"
Terje Bergstrom65793242013-03-22 16:34:03 +020021#include "job.h"
22#include "syncpt.h"
23
Dmitry Osipenkoa47ac102017-06-15 02:18:39 +030024#define HOST1X_WAIT_SYNCPT_OFFSET 0x8
25
Terje Bergstrom65793242013-03-22 16:34:03 +020026struct host1x_job *host1x_job_alloc(struct host1x_channel *ch,
Mikko Perttunen0fddaa852021-06-10 14:04:46 +030027 u32 num_cmdbufs, u32 num_relocs,
28 bool skip_firewall)
Terje Bergstrom65793242013-03-22 16:34:03 +020029{
30 struct host1x_job *job = NULL;
Dmitry Osipenko26c8de52020-06-29 06:18:39 +030031 unsigned int num_unpins = num_relocs;
Mikko Perttunen0fddaa852021-06-10 14:04:46 +030032 bool enable_firewall;
Terje Bergstrom65793242013-03-22 16:34:03 +020033 u64 total;
34 void *mem;
35
Mikko Perttunen0fddaa852021-06-10 14:04:46 +030036 enable_firewall = IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL) && !skip_firewall;
37
38 if (!enable_firewall)
Dmitry Osipenko26c8de52020-06-29 06:18:39 +030039 num_unpins += num_cmdbufs;
40
Terje Bergstrom65793242013-03-22 16:34:03 +020041 /* Check that we're not going to overflow */
42 total = sizeof(struct host1x_job) +
Dan Carpenterf5fda672013-08-23 13:18:25 +030043 (u64)num_relocs * sizeof(struct host1x_reloc) +
44 (u64)num_unpins * sizeof(struct host1x_job_unpin_data) +
Mikko Perttunene9025852021-06-10 14:04:45 +030045 (u64)num_cmdbufs * sizeof(struct host1x_job_cmd) +
Dan Carpenterf5fda672013-08-23 13:18:25 +030046 (u64)num_unpins * sizeof(dma_addr_t) +
47 (u64)num_unpins * sizeof(u32 *);
Terje Bergstrom65793242013-03-22 16:34:03 +020048 if (total > ULONG_MAX)
49 return NULL;
50
51 mem = job = kzalloc(total, GFP_KERNEL);
52 if (!job)
53 return NULL;
54
Mikko Perttunen0fddaa852021-06-10 14:04:46 +030055 job->enable_firewall = enable_firewall;
56
Terje Bergstrom65793242013-03-22 16:34:03 +020057 kref_init(&job->ref);
58 job->channel = ch;
59
60 /* Redistribute memory to the structs */
61 mem += sizeof(struct host1x_job);
Thierry Reding06490bb2018-05-16 16:58:44 +020062 job->relocs = num_relocs ? mem : NULL;
Terje Bergstrom65793242013-03-22 16:34:03 +020063 mem += num_relocs * sizeof(struct host1x_reloc);
64 job->unpins = num_unpins ? mem : NULL;
65 mem += num_unpins * sizeof(struct host1x_job_unpin_data);
Mikko Perttunene9025852021-06-10 14:04:45 +030066 job->cmds = num_cmdbufs ? mem : NULL;
67 mem += num_cmdbufs * sizeof(struct host1x_job_cmd);
Terje Bergstrom65793242013-03-22 16:34:03 +020068 job->addr_phys = num_unpins ? mem : NULL;
69
70 job->reloc_addr_phys = job->addr_phys;
71 job->gather_addr_phys = &job->addr_phys[num_relocs];
72
73 return job;
74}
Thierry Redingfae798a2013-11-08 11:41:42 +010075EXPORT_SYMBOL(host1x_job_alloc);
Terje Bergstrom65793242013-03-22 16:34:03 +020076
77struct host1x_job *host1x_job_get(struct host1x_job *job)
78{
79 kref_get(&job->ref);
80 return job;
81}
Thierry Redingfae798a2013-11-08 11:41:42 +010082EXPORT_SYMBOL(host1x_job_get);
Terje Bergstrom65793242013-03-22 16:34:03 +020083
84static void job_free(struct kref *ref)
85{
86 struct host1x_job *job = container_of(ref, struct host1x_job, ref);
87
Mikko Perttunen17a298e2021-06-10 14:04:44 +030088 if (job->release)
89 job->release(job);
90
Mikko Perttunenc78f8372021-06-10 14:04:43 +030091 if (job->waiter)
92 host1x_intr_put_ref(job->syncpt->host, job->syncpt->id,
93 job->waiter, false);
94
Mikko Perttunen2aed4f52021-03-29 16:38:32 +030095 if (job->syncpt)
96 host1x_syncpt_put(job->syncpt);
97
Terje Bergstrom65793242013-03-22 16:34:03 +020098 kfree(job);
99}
100
101void host1x_job_put(struct host1x_job *job)
102{
103 kref_put(&job->ref, job_free);
104}
Thierry Redingfae798a2013-11-08 11:41:42 +0100105EXPORT_SYMBOL(host1x_job_put);
Terje Bergstrom65793242013-03-22 16:34:03 +0200106
107void host1x_job_add_gather(struct host1x_job *job, struct host1x_bo *bo,
Thierry Reding326bbd72018-05-16 17:01:43 +0200108 unsigned int words, unsigned int offset)
Terje Bergstrom65793242013-03-22 16:34:03 +0200109{
Mikko Perttunene9025852021-06-10 14:04:45 +0300110 struct host1x_job_gather *gather = &job->cmds[job->num_cmds].gather;
Terje Bergstrom65793242013-03-22 16:34:03 +0200111
Thierry Reding326bbd72018-05-16 17:01:43 +0200112 gather->words = words;
113 gather->bo = bo;
114 gather->offset = offset;
115
Mikko Perttunene9025852021-06-10 14:04:45 +0300116 job->num_cmds++;
Terje Bergstrom65793242013-03-22 16:34:03 +0200117}
Thierry Redingfae798a2013-11-08 11:41:42 +0100118EXPORT_SYMBOL(host1x_job_add_gather);
Terje Bergstrom65793242013-03-22 16:34:03 +0200119
Mikko Perttunene9025852021-06-10 14:04:45 +0300120void host1x_job_add_wait(struct host1x_job *job, u32 id, u32 thresh,
121 bool relative, u32 next_class)
122{
123 struct host1x_job_cmd *cmd = &job->cmds[job->num_cmds];
124
125 cmd->is_wait = true;
126 cmd->wait.id = id;
127 cmd->wait.threshold = thresh;
128 cmd->wait.next_class = next_class;
129 cmd->wait.relative = relative;
130
131 job->num_cmds++;
132}
133EXPORT_SYMBOL(host1x_job_add_wait);
134
Mikko Perttunen404bfb72016-12-14 13:16:14 +0200135static unsigned int pin_job(struct host1x *host, struct host1x_job *job)
Terje Bergstrom65793242013-03-22 16:34:03 +0200136{
Thierry Redingaf1cbfb2019-10-28 13:37:13 +0100137 struct host1x_client *client = job->client;
138 struct device *dev = client->dev;
Dmitry Osipenkofd323e92020-06-29 06:18:40 +0300139 struct host1x_job_gather *g;
Thierry Reding273da5a2020-02-04 14:59:25 +0100140 struct iommu_domain *domain;
Mikko Perttunene9025852021-06-10 14:04:45 +0300141 struct sg_table *sgt;
Terje Bergstrom65793242013-03-22 16:34:03 +0200142 unsigned int i;
Mikko Perttunen404bfb72016-12-14 13:16:14 +0200143 int err;
Terje Bergstrom65793242013-03-22 16:34:03 +0200144
Thierry Reding273da5a2020-02-04 14:59:25 +0100145 domain = iommu_get_domain_for_dev(dev);
Terje Bergstrom65793242013-03-22 16:34:03 +0200146 job->num_unpins = 0;
147
148 for (i = 0; i < job->num_relocs; i++) {
Thierry Reding06490bb2018-05-16 16:58:44 +0200149 struct host1x_reloc *reloc = &job->relocs[i];
Thierry Redingaf1cbfb2019-10-28 13:37:13 +0100150 dma_addr_t phys_addr, *phys;
Terje Bergstrom65793242013-03-22 16:34:03 +0200151
Thierry Reding961e3be2014-06-10 10:25:00 +0200152 reloc->target.bo = host1x_bo_get(reloc->target.bo);
Mikko Perttunen404bfb72016-12-14 13:16:14 +0200153 if (!reloc->target.bo) {
154 err = -EINVAL;
Terje Bergstrom65793242013-03-22 16:34:03 +0200155 goto unpin;
Mikko Perttunen404bfb72016-12-14 13:16:14 +0200156 }
Terje Bergstrom65793242013-03-22 16:34:03 +0200157
Thierry Reding273da5a2020-02-04 14:59:25 +0100158 /*
159 * If the client device is not attached to an IOMMU, the
160 * physical address of the buffer object can be used.
161 *
162 * Similarly, when an IOMMU domain is shared between all
163 * host1x clients, the IOVA is already available, so no
164 * need to map the buffer object again.
165 *
166 * XXX Note that this isn't always safe to do because it
167 * relies on an assumption that no cache maintenance is
168 * needed on the buffer objects.
169 */
170 if (!domain || client->group)
Thierry Redingaf1cbfb2019-10-28 13:37:13 +0100171 phys = &phys_addr;
172 else
173 phys = NULL;
174
175 sgt = host1x_bo_pin(dev, reloc->target.bo, phys);
Thierry Reding80327ce2019-10-28 13:37:09 +0100176 if (IS_ERR(sgt)) {
177 err = PTR_ERR(sgt);
178 goto unpin;
179 }
Terje Bergstrom65793242013-03-22 16:34:03 +0200180
Thierry Redingaf1cbfb2019-10-28 13:37:13 +0100181 if (sgt) {
182 unsigned long mask = HOST1X_RELOC_READ |
183 HOST1X_RELOC_WRITE;
184 enum dma_data_direction dir;
185
186 switch (reloc->flags & mask) {
187 case HOST1X_RELOC_READ:
188 dir = DMA_TO_DEVICE;
189 break;
190
191 case HOST1X_RELOC_WRITE:
192 dir = DMA_FROM_DEVICE;
193 break;
194
195 case HOST1X_RELOC_READ | HOST1X_RELOC_WRITE:
196 dir = DMA_BIDIRECTIONAL;
197 break;
198
199 default:
200 err = -EINVAL;
201 goto unpin;
202 }
203
Marek Szyprowski67ed9f92020-04-28 13:11:16 +0200204 err = dma_map_sgtable(dev, sgt, dir, 0);
205 if (err)
Thierry Redingaf1cbfb2019-10-28 13:37:13 +0100206 goto unpin;
Thierry Redingaf1cbfb2019-10-28 13:37:13 +0100207
208 job->unpins[job->num_unpins].dev = dev;
209 job->unpins[job->num_unpins].dir = dir;
210 phys_addr = sg_dma_address(sgt->sgl);
211 }
212
Terje Bergstrom65793242013-03-22 16:34:03 +0200213 job->addr_phys[job->num_unpins] = phys_addr;
Thierry Reding961e3be2014-06-10 10:25:00 +0200214 job->unpins[job->num_unpins].bo = reloc->target.bo;
Terje Bergstrom65793242013-03-22 16:34:03 +0200215 job->unpins[job->num_unpins].sgt = sgt;
216 job->num_unpins++;
217 }
218
Dmitry Osipenko26c8de52020-06-29 06:18:39 +0300219 /*
220 * We will copy gathers BO content later, so there is no need to
221 * hold and pin them.
222 */
Mikko Perttunen0fddaa852021-06-10 14:04:46 +0300223 if (job->enable_firewall)
Dmitry Osipenko26c8de52020-06-29 06:18:39 +0300224 return 0;
225
Mikko Perttunene9025852021-06-10 14:04:45 +0300226 for (i = 0; i < job->num_cmds; i++) {
Mikko Perttunen404bfb72016-12-14 13:16:14 +0200227 size_t gather_size = 0;
228 struct scatterlist *sg;
Terje Bergstrom65793242013-03-22 16:34:03 +0200229 dma_addr_t phys_addr;
Mikko Perttunen404bfb72016-12-14 13:16:14 +0200230 unsigned long shift;
231 struct iova *alloc;
Thierry Reding273da5a2020-02-04 14:59:25 +0100232 dma_addr_t *phys;
Mikko Perttunen404bfb72016-12-14 13:16:14 +0200233 unsigned int j;
Terje Bergstrom65793242013-03-22 16:34:03 +0200234
Mikko Perttunene9025852021-06-10 14:04:45 +0300235 if (job->cmds[i].is_wait)
236 continue;
237
238 g = &job->cmds[i].gather;
239
Terje Bergstrom65793242013-03-22 16:34:03 +0200240 g->bo = host1x_bo_get(g->bo);
Mikko Perttunen404bfb72016-12-14 13:16:14 +0200241 if (!g->bo) {
242 err = -EINVAL;
Terje Bergstrom65793242013-03-22 16:34:03 +0200243 goto unpin;
Mikko Perttunen404bfb72016-12-14 13:16:14 +0200244 }
Terje Bergstrom65793242013-03-22 16:34:03 +0200245
Thierry Reding273da5a2020-02-04 14:59:25 +0100246 /**
247 * If the host1x is not attached to an IOMMU, there is no need
248 * to map the buffer object for the host1x, since the physical
249 * address can simply be used.
250 */
251 if (!iommu_get_domain_for_dev(host->dev))
252 phys = &phys_addr;
253 else
254 phys = NULL;
255
256 sgt = host1x_bo_pin(host->dev, g->bo, phys);
Thierry Reding80327ce2019-10-28 13:37:09 +0100257 if (IS_ERR(sgt)) {
258 err = PTR_ERR(sgt);
Dmitry Osipenkofd323e92020-06-29 06:18:40 +0300259 goto put;
Thierry Reding80327ce2019-10-28 13:37:09 +0100260 }
Terje Bergstrom65793242013-03-22 16:34:03 +0200261
Dmitry Osipenko26c8de52020-06-29 06:18:39 +0300262 if (host->domain) {
Marek Szyprowski67ed9f92020-04-28 13:11:16 +0200263 for_each_sgtable_sg(sgt, sg, j)
Mikko Perttunen404bfb72016-12-14 13:16:14 +0200264 gather_size += sg->length;
265 gather_size = iova_align(&host->iova, gather_size);
266
267 shift = iova_shift(&host->iova);
268 alloc = alloc_iova(&host->iova, gather_size >> shift,
269 host->iova_end >> shift, true);
270 if (!alloc) {
271 err = -ENOMEM;
Dmitry Osipenkofd323e92020-06-29 06:18:40 +0300272 goto put;
Mikko Perttunen404bfb72016-12-14 13:16:14 +0200273 }
274
Marek Szyprowski67ed9f92020-04-28 13:11:16 +0200275 err = iommu_map_sgtable(host->domain,
Mikko Perttunen404bfb72016-12-14 13:16:14 +0200276 iova_dma_addr(&host->iova, alloc),
Marek Szyprowski67ed9f92020-04-28 13:11:16 +0200277 sgt, IOMMU_READ);
Mikko Perttunen404bfb72016-12-14 13:16:14 +0200278 if (err == 0) {
279 __free_iova(&host->iova, alloc);
280 err = -EINVAL;
Dmitry Osipenkofd323e92020-06-29 06:18:40 +0300281 goto put;
Mikko Perttunen404bfb72016-12-14 13:16:14 +0200282 }
283
Mikko Perttunen404bfb72016-12-14 13:16:14 +0200284 job->unpins[job->num_unpins].size = gather_size;
Thierry Redingaf1cbfb2019-10-28 13:37:13 +0100285 phys_addr = iova_dma_addr(&host->iova, alloc);
Thierry Reding273da5a2020-02-04 14:59:25 +0100286 } else if (sgt) {
Marek Szyprowski67ed9f92020-04-28 13:11:16 +0200287 err = dma_map_sgtable(host->dev, sgt, DMA_TO_DEVICE, 0);
288 if (err)
Dmitry Osipenkofd323e92020-06-29 06:18:40 +0300289 goto put;
Thierry Redingaf1cbfb2019-10-28 13:37:13 +0100290
Thierry Reding98ae41a2020-02-04 14:59:26 +0100291 job->unpins[job->num_unpins].dir = DMA_TO_DEVICE;
Thierry Redingaf1cbfb2019-10-28 13:37:13 +0100292 job->unpins[job->num_unpins].dev = host->dev;
293 phys_addr = sg_dma_address(sgt->sgl);
Mikko Perttunen404bfb72016-12-14 13:16:14 +0200294 }
295
Thierry Redingaf1cbfb2019-10-28 13:37:13 +0100296 job->addr_phys[job->num_unpins] = phys_addr;
297 job->gather_addr_phys[i] = phys_addr;
Mikko Perttunen404bfb72016-12-14 13:16:14 +0200298
Terje Bergstrom65793242013-03-22 16:34:03 +0200299 job->unpins[job->num_unpins].bo = g->bo;
300 job->unpins[job->num_unpins].sgt = sgt;
301 job->num_unpins++;
302 }
303
Mikko Perttunen404bfb72016-12-14 13:16:14 +0200304 return 0;
Terje Bergstrom65793242013-03-22 16:34:03 +0200305
Dmitry Osipenkofd323e92020-06-29 06:18:40 +0300306put:
307 host1x_bo_put(g->bo);
Terje Bergstrom65793242013-03-22 16:34:03 +0200308unpin:
309 host1x_job_unpin(job);
Mikko Perttunen404bfb72016-12-14 13:16:14 +0200310 return err;
Terje Bergstrom65793242013-03-22 16:34:03 +0200311}
312
Dmitry Osipenko47f89c12017-06-15 02:18:34 +0300313static int do_relocs(struct host1x_job *job, struct host1x_job_gather *g)
Terje Bergstrom65793242013-03-22 16:34:03 +0200314{
Daniel Vetter7a8139c2019-11-18 11:35:22 +0100315 void *cmdbuf_addr = NULL;
Dmitry Osipenko47f89c12017-06-15 02:18:34 +0300316 struct host1x_bo *cmdbuf = g->bo;
Thierry Redingd4ad3ad2018-03-23 13:31:24 +0100317 unsigned int i;
Terje Bergstrom65793242013-03-22 16:34:03 +0200318
319 /* pin & patch the relocs for one gather */
Arto Merilainen3364cd22013-05-29 13:26:05 +0300320 for (i = 0; i < job->num_relocs; i++) {
Thierry Reding06490bb2018-05-16 16:58:44 +0200321 struct host1x_reloc *reloc = &job->relocs[i];
Terje Bergstrom65793242013-03-22 16:34:03 +0200322 u32 reloc_addr = (job->reloc_addr_phys[i] +
Thierry Reding961e3be2014-06-10 10:25:00 +0200323 reloc->target.offset) >> reloc->shift;
Terje Bergstrom65793242013-03-22 16:34:03 +0200324 u32 *target;
325
326 /* skip all other gathers */
Thierry Reding961e3be2014-06-10 10:25:00 +0200327 if (cmdbuf != reloc->cmdbuf.bo)
Terje Bergstrom65793242013-03-22 16:34:03 +0200328 continue;
Terje Bergstrom65793242013-03-22 16:34:03 +0200329
Mikko Perttunen0fddaa852021-06-10 14:04:46 +0300330 if (job->enable_firewall) {
Dmitry Osipenko47f89c12017-06-15 02:18:34 +0300331 target = (u32 *)job->gather_copy_mapped +
332 reloc->cmdbuf.offset / sizeof(u32) +
333 g->offset / sizeof(u32);
334 goto patch_reloc;
335 }
336
Daniel Vetter7a8139c2019-11-18 11:35:22 +0100337 if (!cmdbuf_addr) {
338 cmdbuf_addr = host1x_bo_mmap(cmdbuf);
Terje Bergstrom65793242013-03-22 16:34:03 +0200339
Daniel Vetter7a8139c2019-11-18 11:35:22 +0100340 if (unlikely(!cmdbuf_addr)) {
Terje Bergstrom65793242013-03-22 16:34:03 +0200341 pr_err("Could not map cmdbuf for relocation\n");
342 return -ENOMEM;
343 }
344 }
345
Daniel Vetter7a8139c2019-11-18 11:35:22 +0100346 target = cmdbuf_addr + reloc->cmdbuf.offset;
Dmitry Osipenko47f89c12017-06-15 02:18:34 +0300347patch_reloc:
Terje Bergstrom65793242013-03-22 16:34:03 +0200348 *target = reloc_addr;
Terje Bergstrom65793242013-03-22 16:34:03 +0200349 }
350
Daniel Vetter7a8139c2019-11-18 11:35:22 +0100351 if (cmdbuf_addr)
352 host1x_bo_munmap(cmdbuf, cmdbuf_addr);
Terje Bergstrom65793242013-03-22 16:34:03 +0200353
354 return 0;
355}
356
Arto Merilainen5060d8e2013-05-29 13:26:03 +0300357static bool check_reloc(struct host1x_reloc *reloc, struct host1x_bo *cmdbuf,
Thierry Reding37857cd2013-10-10 10:17:45 +0200358 unsigned int offset)
Terje Bergstrom65793242013-03-22 16:34:03 +0200359{
360 offset *= sizeof(u32);
361
Thierry Reding961e3be2014-06-10 10:25:00 +0200362 if (reloc->cmdbuf.bo != cmdbuf || reloc->cmdbuf.offset != offset)
Arto Merilainen5060d8e2013-05-29 13:26:03 +0300363 return false;
Terje Bergstrom65793242013-03-22 16:34:03 +0200364
Dmitry Osipenko571cbf72017-06-15 02:18:35 +0300365 /* relocation shift value validation isn't implemented yet */
366 if (reloc->shift)
367 return false;
368
Arto Merilainen5060d8e2013-05-29 13:26:03 +0300369 return true;
Terje Bergstrom65793242013-03-22 16:34:03 +0200370}
371
372struct host1x_firewall {
373 struct host1x_job *job;
374 struct device *dev;
375
376 unsigned int num_relocs;
377 struct host1x_reloc *reloc;
378
Thierry Redingd7fbcf472013-10-10 10:21:58 +0200379 struct host1x_bo *cmdbuf;
Terje Bergstrom65793242013-03-22 16:34:03 +0200380 unsigned int offset;
381
382 u32 words;
383 u32 class;
384 u32 reg;
385 u32 mask;
386 u32 count;
387};
388
Thierry Redingd77563f2013-10-10 10:24:04 +0200389static int check_register(struct host1x_firewall *fw, unsigned long offset)
390{
Dmitry Osipenko0f563a42017-06-15 02:18:37 +0300391 if (!fw->job->is_addr_reg)
392 return 0;
393
Thierry Redingd77563f2013-10-10 10:24:04 +0200394 if (fw->job->is_addr_reg(fw->dev, fw->class, offset)) {
395 if (!fw->num_relocs)
396 return -EINVAL;
397
398 if (!check_reloc(fw->reloc, fw->cmdbuf, fw->offset))
399 return -EINVAL;
400
401 fw->num_relocs--;
402 fw->reloc++;
403 }
404
405 return 0;
406}
407
Dmitry Osipenko0f563a42017-06-15 02:18:37 +0300408static int check_class(struct host1x_firewall *fw, u32 class)
409{
410 if (!fw->job->is_valid_class) {
411 if (fw->class != class)
412 return -EINVAL;
413 } else {
414 if (!fw->job->is_valid_class(fw->class))
415 return -EINVAL;
416 }
417
418 return 0;
419}
420
Terje Bergstrom65793242013-03-22 16:34:03 +0200421static int check_mask(struct host1x_firewall *fw)
422{
423 u32 mask = fw->mask;
424 u32 reg = fw->reg;
Thierry Redingd77563f2013-10-10 10:24:04 +0200425 int ret;
Terje Bergstrom65793242013-03-22 16:34:03 +0200426
427 while (mask) {
428 if (fw->words == 0)
429 return -EINVAL;
430
431 if (mask & 1) {
Thierry Redingd77563f2013-10-10 10:24:04 +0200432 ret = check_register(fw, reg);
433 if (ret < 0)
434 return ret;
435
Terje Bergstrom65793242013-03-22 16:34:03 +0200436 fw->words--;
437 fw->offset++;
438 }
439 mask >>= 1;
440 reg++;
441 }
442
443 return 0;
444}
445
446static int check_incr(struct host1x_firewall *fw)
447{
448 u32 count = fw->count;
449 u32 reg = fw->reg;
Thierry Redingd77563f2013-10-10 10:24:04 +0200450 int ret;
Terje Bergstrom65793242013-03-22 16:34:03 +0200451
Terje Bergstrom64c173d2013-05-29 13:26:02 +0300452 while (count) {
Terje Bergstrom65793242013-03-22 16:34:03 +0200453 if (fw->words == 0)
454 return -EINVAL;
455
Thierry Redingd77563f2013-10-10 10:24:04 +0200456 ret = check_register(fw, reg);
457 if (ret < 0)
458 return ret;
459
Terje Bergstrom65793242013-03-22 16:34:03 +0200460 reg++;
461 fw->words--;
462 fw->offset++;
463 count--;
464 }
465
466 return 0;
467}
468
469static int check_nonincr(struct host1x_firewall *fw)
470{
Terje Bergstrom65793242013-03-22 16:34:03 +0200471 u32 count = fw->count;
Thierry Redingd77563f2013-10-10 10:24:04 +0200472 int ret;
Terje Bergstrom65793242013-03-22 16:34:03 +0200473
474 while (count) {
475 if (fw->words == 0)
476 return -EINVAL;
477
Thierry Redingd77563f2013-10-10 10:24:04 +0200478 ret = check_register(fw, fw->reg);
479 if (ret < 0)
480 return ret;
481
Terje Bergstrom65793242013-03-22 16:34:03 +0200482 fw->words--;
483 fw->offset++;
484 count--;
485 }
486
487 return 0;
488}
489
Terje Bergstromafac0e42013-05-29 13:26:04 +0300490static int validate(struct host1x_firewall *fw, struct host1x_job_gather *g)
Terje Bergstrom65793242013-03-22 16:34:03 +0200491{
Arto Merilainen3364cd22013-05-29 13:26:05 +0300492 u32 *cmdbuf_base = (u32 *)fw->job->gather_copy_mapped +
493 (g->offset / sizeof(u32));
Dmitry Osipenko0f563a42017-06-15 02:18:37 +0300494 u32 job_class = fw->class;
Terje Bergstrom65793242013-03-22 16:34:03 +0200495 int err = 0;
Terje Bergstrom65793242013-03-22 16:34:03 +0200496
Terje Bergstromafac0e42013-05-29 13:26:04 +0300497 fw->words = g->words;
Thierry Redingd7fbcf472013-10-10 10:21:58 +0200498 fw->cmdbuf = g->bo;
Terje Bergstromafac0e42013-05-29 13:26:04 +0300499 fw->offset = 0;
Terje Bergstrom65793242013-03-22 16:34:03 +0200500
Terje Bergstromafac0e42013-05-29 13:26:04 +0300501 while (fw->words && !err) {
502 u32 word = cmdbuf_base[fw->offset];
Terje Bergstrom65793242013-03-22 16:34:03 +0200503 u32 opcode = (word & 0xf0000000) >> 28;
504
Terje Bergstromafac0e42013-05-29 13:26:04 +0300505 fw->mask = 0;
506 fw->reg = 0;
507 fw->count = 0;
508 fw->words--;
509 fw->offset++;
Terje Bergstrom65793242013-03-22 16:34:03 +0200510
511 switch (opcode) {
512 case 0:
Terje Bergstromafac0e42013-05-29 13:26:04 +0300513 fw->class = word >> 6 & 0x3ff;
514 fw->mask = word & 0x3f;
515 fw->reg = word >> 16 & 0xfff;
Dmitry Osipenko0f563a42017-06-15 02:18:37 +0300516 err = check_class(fw, job_class);
517 if (!err)
518 err = check_mask(fw);
Terje Bergstrom65793242013-03-22 16:34:03 +0200519 if (err)
520 goto out;
521 break;
522 case 1:
Terje Bergstromafac0e42013-05-29 13:26:04 +0300523 fw->reg = word >> 16 & 0xfff;
524 fw->count = word & 0xffff;
525 err = check_incr(fw);
Terje Bergstrom65793242013-03-22 16:34:03 +0200526 if (err)
527 goto out;
528 break;
529
530 case 2:
Terje Bergstromafac0e42013-05-29 13:26:04 +0300531 fw->reg = word >> 16 & 0xfff;
532 fw->count = word & 0xffff;
533 err = check_nonincr(fw);
Terje Bergstrom65793242013-03-22 16:34:03 +0200534 if (err)
535 goto out;
536 break;
537
538 case 3:
Terje Bergstromafac0e42013-05-29 13:26:04 +0300539 fw->mask = word & 0xffff;
540 fw->reg = word >> 16 & 0xfff;
541 err = check_mask(fw);
Terje Bergstrom65793242013-03-22 16:34:03 +0200542 if (err)
543 goto out;
544 break;
545 case 4:
Terje Bergstrom65793242013-03-22 16:34:03 +0200546 case 14:
547 break;
548 default:
549 err = -EINVAL;
550 break;
551 }
552 }
553
Terje Bergstrom65793242013-03-22 16:34:03 +0200554out:
Terje Bergstrom65793242013-03-22 16:34:03 +0200555 return err;
556}
557
Thierry Redingb78e70c2019-10-28 13:37:12 +0100558static inline int copy_gathers(struct device *host, struct host1x_job *job,
559 struct device *dev)
Terje Bergstrom65793242013-03-22 16:34:03 +0200560{
Arto Merilainen3364cd22013-05-29 13:26:05 +0300561 struct host1x_firewall fw;
Terje Bergstrom65793242013-03-22 16:34:03 +0200562 size_t size = 0;
563 size_t offset = 0;
Thierry Redingd4ad3ad2018-03-23 13:31:24 +0100564 unsigned int i;
Terje Bergstrom65793242013-03-22 16:34:03 +0200565
Arto Merilainen3364cd22013-05-29 13:26:05 +0300566 fw.job = job;
567 fw.dev = dev;
Thierry Reding06490bb2018-05-16 16:58:44 +0200568 fw.reloc = job->relocs;
Arto Merilainen3364cd22013-05-29 13:26:05 +0300569 fw.num_relocs = job->num_relocs;
Dmitry Osipenko3833d162017-06-15 02:18:32 +0300570 fw.class = job->class;
Arto Merilainen3364cd22013-05-29 13:26:05 +0300571
Mikko Perttunene9025852021-06-10 14:04:45 +0300572 for (i = 0; i < job->num_cmds; i++) {
573 struct host1x_job_gather *g;
574
575 if (job->cmds[i].is_wait)
576 continue;
577
578 g = &job->cmds[i].gather;
Thierry Reding6df633d2016-06-23 11:33:31 +0200579
Terje Bergstrom65793242013-03-22 16:34:03 +0200580 size += g->words * sizeof(u32);
581 }
582
Dmitry Osipenko43240bb2017-06-15 02:18:43 +0300583 /*
584 * Try a non-blocking allocation from a higher priority pools first,
585 * as awaiting for the allocation here is a major performance hit.
586 */
Thierry Redingb78e70c2019-10-28 13:37:12 +0100587 job->gather_copy_mapped = dma_alloc_wc(host, size, &job->gather_copy,
Dmitry Osipenko43240bb2017-06-15 02:18:43 +0300588 GFP_NOWAIT);
589
590 /* the higher priority allocation failed, try the generic-blocking */
591 if (!job->gather_copy_mapped)
Thierry Redingb78e70c2019-10-28 13:37:12 +0100592 job->gather_copy_mapped = dma_alloc_wc(host, size,
Dmitry Osipenko43240bb2017-06-15 02:18:43 +0300593 &job->gather_copy,
594 GFP_KERNEL);
595 if (!job->gather_copy_mapped)
Dan Carpenter745cecc2013-08-23 13:19:11 +0300596 return -ENOMEM;
Terje Bergstrom65793242013-03-22 16:34:03 +0200597
598 job->gather_copy_size = size;
599
Mikko Perttunene9025852021-06-10 14:04:45 +0300600 for (i = 0; i < job->num_cmds; i++) {
601 struct host1x_job_gather *g;
Terje Bergstrom65793242013-03-22 16:34:03 +0200602 void *gather;
603
Mikko Perttunene9025852021-06-10 14:04:45 +0300604 if (job->cmds[i].is_wait)
605 continue;
606 g = &job->cmds[i].gather;
607
Arto Merilainen3364cd22013-05-29 13:26:05 +0300608 /* Copy the gather */
Terje Bergstrom65793242013-03-22 16:34:03 +0200609 gather = host1x_bo_mmap(g->bo);
610 memcpy(job->gather_copy_mapped + offset, gather + g->offset,
611 g->words * sizeof(u32));
612 host1x_bo_munmap(g->bo, gather);
613
Arto Merilainen3364cd22013-05-29 13:26:05 +0300614 /* Store the location in the buffer */
Terje Bergstrom65793242013-03-22 16:34:03 +0200615 g->base = job->gather_copy;
616 g->offset = offset;
Arto Merilainen3364cd22013-05-29 13:26:05 +0300617
618 /* Validate the job */
619 if (validate(&fw, g))
620 return -EINVAL;
Terje Bergstrom65793242013-03-22 16:34:03 +0200621
622 offset += g->words * sizeof(u32);
623 }
624
Thierry Reding24c94e12018-05-05 08:45:47 +0200625 /* No relocs should remain at this point */
626 if (fw.num_relocs)
Erik Faye-Lunda9ff9992013-10-04 20:18:33 +0000627 return -EINVAL;
628
Terje Bergstrom65793242013-03-22 16:34:03 +0200629 return 0;
630}
631
632int host1x_job_pin(struct host1x_job *job, struct device *dev)
633{
634 int err;
635 unsigned int i, j;
636 struct host1x *host = dev_get_drvdata(dev->parent);
Terje Bergstrom65793242013-03-22 16:34:03 +0200637
638 /* pin memory */
Mikko Perttunen404bfb72016-12-14 13:16:14 +0200639 err = pin_job(host, job);
640 if (err)
Terje Bergstrom65793242013-03-22 16:34:03 +0200641 goto out;
642
Mikko Perttunen0fddaa852021-06-10 14:04:46 +0300643 if (job->enable_firewall) {
Thierry Redingb78e70c2019-10-28 13:37:12 +0100644 err = copy_gathers(host->dev, job, dev);
Dmitry Osipenko47f89c12017-06-15 02:18:34 +0300645 if (err)
646 goto out;
647 }
648
Terje Bergstrom65793242013-03-22 16:34:03 +0200649 /* patch gathers */
Mikko Perttunene9025852021-06-10 14:04:45 +0300650 for (i = 0; i < job->num_cmds; i++) {
651 struct host1x_job_gather *g;
652
653 if (job->cmds[i].is_wait)
654 continue;
655 g = &job->cmds[i].gather;
Terje Bergstrom65793242013-03-22 16:34:03 +0200656
657 /* process each gather mem only once */
658 if (g->handled)
659 continue;
660
Dmitry Osipenko47f89c12017-06-15 02:18:34 +0300661 /* copy_gathers() sets gathers base if firewall is enabled */
Mikko Perttunen0fddaa852021-06-10 14:04:46 +0300662 if (!job->enable_firewall)
Dmitry Osipenko47f89c12017-06-15 02:18:34 +0300663 g->base = job->gather_addr_phys[i];
Terje Bergstrom65793242013-03-22 16:34:03 +0200664
Mikko Perttunene9025852021-06-10 14:04:45 +0300665 for (j = i + 1; j < job->num_cmds; j++) {
666 if (!job->cmds[j].is_wait &&
667 job->cmds[j].gather.bo == g->bo) {
668 job->cmds[j].gather.handled = true;
669 job->cmds[j].gather.base = g->base;
Arto Merilainenf08ef2d2016-11-08 19:51:32 +0200670 }
671 }
Terje Bergstrom65793242013-03-22 16:34:03 +0200672
Dmitry Osipenko47f89c12017-06-15 02:18:34 +0300673 err = do_relocs(job, g);
Terje Bergstrom65793242013-03-22 16:34:03 +0200674 if (err)
Dmitry Osipenko47f89c12017-06-15 02:18:34 +0300675 break;
Terje Bergstrom65793242013-03-22 16:34:03 +0200676 }
677
Terje Bergstrom65793242013-03-22 16:34:03 +0200678out:
Dmitry Osipenkoe5855aa2017-06-15 02:18:33 +0300679 if (err)
680 host1x_job_unpin(job);
Terje Bergstrom65793242013-03-22 16:34:03 +0200681 wmb();
682
683 return err;
684}
Thierry Redingfae798a2013-11-08 11:41:42 +0100685EXPORT_SYMBOL(host1x_job_pin);
Terje Bergstrom65793242013-03-22 16:34:03 +0200686
687void host1x_job_unpin(struct host1x_job *job)
688{
Mikko Perttunen404bfb72016-12-14 13:16:14 +0200689 struct host1x *host = dev_get_drvdata(job->channel->dev->parent);
Terje Bergstrom65793242013-03-22 16:34:03 +0200690 unsigned int i;
691
692 for (i = 0; i < job->num_unpins; i++) {
693 struct host1x_job_unpin_data *unpin = &job->unpins[i];
Thierry Redingaf1cbfb2019-10-28 13:37:13 +0100694 struct device *dev = unpin->dev ?: host->dev;
695 struct sg_table *sgt = unpin->sgt;
Thierry Reding6df633d2016-06-23 11:33:31 +0200696
Mikko Perttunen0fddaa852021-06-10 14:04:46 +0300697 if (!job->enable_firewall && unpin->size && host->domain) {
Mikko Perttunen404bfb72016-12-14 13:16:14 +0200698 iommu_unmap(host->domain, job->addr_phys[i],
699 unpin->size);
700 free_iova(&host->iova,
701 iova_pfn(&host->iova, job->addr_phys[i]));
702 }
703
Thierry Redingaf1cbfb2019-10-28 13:37:13 +0100704 if (unpin->dev && sgt)
Marek Szyprowski67ed9f92020-04-28 13:11:16 +0200705 dma_unmap_sgtable(unpin->dev, sgt, unpin->dir, 0);
Thierry Redingaf1cbfb2019-10-28 13:37:13 +0100706
707 host1x_bo_unpin(dev, unpin->bo, sgt);
Terje Bergstrom65793242013-03-22 16:34:03 +0200708 host1x_bo_put(unpin->bo);
709 }
Thierry Reding0b8070d12016-06-23 11:35:50 +0200710
Terje Bergstrom65793242013-03-22 16:34:03 +0200711 job->num_unpins = 0;
712
713 if (job->gather_copy_size)
Thierry Redingb78e70c2019-10-28 13:37:12 +0100714 dma_free_wc(host->dev, job->gather_copy_size,
Thierry Reding0b8070d12016-06-23 11:35:50 +0200715 job->gather_copy_mapped, job->gather_copy);
Terje Bergstrom65793242013-03-22 16:34:03 +0200716}
Thierry Redingfae798a2013-11-08 11:41:42 +0100717EXPORT_SYMBOL(host1x_job_unpin);
Terje Bergstrom65793242013-03-22 16:34:03 +0200718
719/*
720 * Debug routine used to dump job entries
721 */
722void host1x_job_dump(struct device *dev, struct host1x_job *job)
723{
Mikko Perttunen2aed4f52021-03-29 16:38:32 +0300724 dev_dbg(dev, " SYNCPT_ID %d\n", job->syncpt->id);
Terje Bergstrom65793242013-03-22 16:34:03 +0200725 dev_dbg(dev, " SYNCPT_VAL %d\n", job->syncpt_end);
726 dev_dbg(dev, " FIRST_GET 0x%x\n", job->first_get);
727 dev_dbg(dev, " TIMEOUT %d\n", job->timeout);
728 dev_dbg(dev, " NUM_SLOTS %d\n", job->num_slots);
729 dev_dbg(dev, " NUM_HANDLES %d\n", job->num_unpins);
730}