blob: 380b3007fd0b0de66111caea4183e37212e11a50 [file] [log] [blame]
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001/*
2 * Copyright 2009 Jerome Glisse.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
20 *
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
23 * of the Software.
24 *
25 */
26/*
27 * Authors:
28 * Jerome Glisse <glisse@freedesktop.org>
29 * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
30 * Dave Airlie
31 */
Sam Ravnborgf9183122019-06-08 10:02:40 +020032
33#include <linux/dma-mapping.h>
34#include <linux/pagemap.h>
Thomas Zimmermann2ef79412019-12-03 11:04:02 +010035#include <linux/pci.h>
Dave Airliefa8a1232009-08-26 13:13:37 +100036#include <linux/seq_file.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090037#include <linux/slab.h>
Christian Königf72a113a2014-08-07 09:36:00 +020038#include <linux/swap.h>
Sam Ravnborgf9183122019-06-08 10:02:40 +020039#include <linux/swiotlb.h>
40
41#include <drm/drm_agpsupport.h>
Sam Ravnborgf9183122019-06-08 10:02:40 +020042#include <drm/drm_device.h>
43#include <drm/drm_file.h>
Sam Ravnborgf9183122019-06-08 10:02:40 +020044#include <drm/drm_prime.h>
45#include <drm/radeon_drm.h>
46#include <drm/ttm/ttm_bo_api.h>
47#include <drm/ttm/ttm_bo_driver.h>
Sam Ravnborgf9183122019-06-08 10:02:40 +020048#include <drm/ttm/ttm_placement.h>
49
Jerome Glisse771fe6b2009-06-05 14:42:42 +020050#include "radeon_reg.h"
51#include "radeon.h"
Lee Jonesafd90af2020-11-16 17:29:27 +000052#include "radeon_ttm.h"
Jerome Glisse771fe6b2009-06-05 14:42:42 +020053
Nirmoy Das5b54d672021-02-17 18:34:30 -050054static void radeon_ttm_debugfs_init(struct radeon_device *rdev);
Dave Airliefa8a1232009-08-26 13:13:37 +100055
Christian König8af8a102020-10-01 14:51:40 +020056static int radeon_ttm_tt_bind(struct ttm_device *bdev, struct ttm_tt *ttm,
Dave Airliecae515f42020-09-17 13:48:59 +100057 struct ttm_resource *bo_mem);
Christian König8af8a102020-10-01 14:51:40 +020058static void radeon_ttm_tt_unbind(struct ttm_device *bdev, struct ttm_tt *ttm);
Dave Airliecae515f42020-09-17 13:48:59 +100059
Christian König8af8a102020-10-01 14:51:40 +020060struct radeon_device *radeon_get_rdev(struct ttm_device *bdev)
Jerome Glisse771fe6b2009-06-05 14:42:42 +020061{
62 struct radeon_mman *mman;
63 struct radeon_device *rdev;
64
65 mman = container_of(bdev, struct radeon_mman, bdev);
66 rdev = container_of(mman, struct radeon_device, mman);
67 return rdev;
68}
69
Christian Königb0691b32020-07-23 11:47:57 +020070static int radeon_ttm_init_vram(struct radeon_device *rdev)
71{
Dave Airlie37205892020-08-04 12:56:19 +100072 return ttm_range_man_init(&rdev->mman.bdev, TTM_PL_VRAM,
Christian König0fe438c2020-09-11 15:06:53 +020073 false, rdev->mc.real_vram_size >> PAGE_SHIFT);
Christian Königb0691b32020-07-23 11:47:57 +020074}
75
76static int radeon_ttm_init_gtt(struct radeon_device *rdev)
77{
Dave Airlie37205892020-08-04 12:56:19 +100078 return ttm_range_man_init(&rdev->mman.bdev, TTM_PL_TT,
Christian König0fe438c2020-09-11 15:06:53 +020079 true, rdev->mc.gtt_size >> PAGE_SHIFT);
Christian Königb0691b32020-07-23 11:47:57 +020080}
81
Jerome Glisse312ea8d2009-12-07 15:52:58 +010082static void radeon_evict_flags(struct ttm_buffer_object *bo,
83 struct ttm_placement *placement)
Jerome Glisse771fe6b2009-06-05 14:42:42 +020084{
Arvind Yadav46886db2017-07-02 14:36:47 +053085 static const struct ttm_place placements = {
Christian Königf1217ed2014-08-27 13:16:04 +020086 .fpfn = 0,
87 .lpfn = 0,
Christian König48e07c22020-09-10 13:39:41 +020088 .mem_type = TTM_PL_SYSTEM,
Christian Königce65b872020-09-30 16:44:16 +020089 .flags = 0
Christian Königf1217ed2014-08-27 13:16:04 +020090 };
91
Jerome Glissed03d8582009-12-14 21:02:09 +010092 struct radeon_bo *rbo;
Jerome Glissed03d8582009-12-14 21:02:09 +010093
94 if (!radeon_ttm_bo_is_radeon_bo(bo)) {
Jerome Glissed03d8582009-12-14 21:02:09 +010095 placement->placement = &placements;
96 placement->busy_placement = &placements;
97 placement->num_placement = 1;
98 placement->num_busy_placement = 1;
99 return;
100 }
101 rbo = container_of(bo, struct radeon_bo, tbo);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200102 switch (bo->mem.mem_type) {
Jerome Glisse312ea8d2009-12-07 15:52:58 +0100103 case TTM_PL_VRAM:
Alex Deucher5e5c21c2014-12-03 00:03:49 -0500104 if (rbo->rdev->ring[radeon_copy_ring_index(rbo->rdev)].ready == false)
Dave Airlie9270eb12010-01-13 09:21:49 +1000105 radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_CPU);
Michel Dänzer2a85aed2014-10-09 18:55:04 +0900106 else if (rbo->rdev->mc.visible_vram_size < rbo->rdev->mc.real_vram_size &&
107 bo->mem.start < (rbo->rdev->mc.visible_vram_size >> PAGE_SHIFT)) {
108 unsigned fpfn = rbo->rdev->mc.visible_vram_size >> PAGE_SHIFT;
109 int i;
110
111 /* Try evicting to the CPU inaccessible part of VRAM
112 * first, but only set GTT as busy placement, so this
113 * BO will be evicted to GTT rather than causing other
114 * BOs to be evicted from VRAM
115 */
116 radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM |
117 RADEON_GEM_DOMAIN_GTT);
118 rbo->placement.num_busy_placement = 0;
119 for (i = 0; i < rbo->placement.num_placement; i++) {
Christian König48e07c22020-09-10 13:39:41 +0200120 if (rbo->placements[i].mem_type == TTM_PL_VRAM) {
Michel Dänzerce4b4f22017-03-24 19:01:09 +0900121 if (rbo->placements[i].fpfn < fpfn)
122 rbo->placements[i].fpfn = fpfn;
Michel Dänzer2a85aed2014-10-09 18:55:04 +0900123 } else {
124 rbo->placement.busy_placement =
125 &rbo->placements[i];
126 rbo->placement.num_busy_placement = 1;
127 }
128 }
129 } else
Dave Airlie9270eb12010-01-13 09:21:49 +1000130 radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT);
Jerome Glisse312ea8d2009-12-07 15:52:58 +0100131 break;
132 case TTM_PL_TT:
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200133 default:
Jerome Glisse312ea8d2009-12-07 15:52:58 +0100134 radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_CPU);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200135 }
Jerome Glisseeaa5fd12009-12-09 21:57:37 +0100136 *placement = rbo->placement;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200137}
138
139static int radeon_verify_access(struct ttm_buffer_object *bo, struct file *filp)
140{
David Herrmannacb46522013-08-25 18:28:59 +0200141 struct radeon_bo *rbo = container_of(bo, struct radeon_bo, tbo);
Dave Airliea68bb192020-09-08 06:46:22 +1000142 struct radeon_device *rdev = radeon_get_rdev(bo->bdev);
David Herrmannacb46522013-08-25 18:28:59 +0200143
Dave Airliea68bb192020-09-08 06:46:22 +1000144 if (radeon_ttm_tt_has_userptr(rdev, bo->ttm))
Jérôme Glisseb5dcec62016-04-19 09:07:50 -0400145 return -EPERM;
Gerd Hoffmannce770382019-08-05 16:01:06 +0200146 return drm_vma_node_verify_access(&rbo->tbo.base.vma_node,
David Herrmannd9a1f0b2016-09-01 14:48:33 +0200147 filp->private_data);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200148}
149
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200150static int radeon_move_blit(struct ttm_buffer_object *bo,
Dave Airlieb1ec2922020-09-23 13:04:47 +1000151 bool evict,
Dave Airlie29661412020-08-04 12:56:32 +1000152 struct ttm_resource *new_mem,
153 struct ttm_resource *old_mem)
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200154{
155 struct radeon_device *rdev;
156 uint64_t old_start, new_start;
Christian König876dc9f2012-05-08 14:24:01 +0200157 struct radeon_fence *fence;
Christian König57d20a42014-09-04 20:01:53 +0200158 unsigned num_pages;
Christian König876dc9f2012-05-08 14:24:01 +0200159 int r, ridx;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200160
161 rdev = radeon_get_rdev(bo->bdev);
Christian König876dc9f2012-05-08 14:24:01 +0200162 ridx = radeon_copy_ring_index(rdev);
Christian König13f479b2016-08-17 09:46:42 +0200163 old_start = (u64)old_mem->start << PAGE_SHIFT;
164 new_start = (u64)new_mem->start << PAGE_SHIFT;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200165
166 switch (old_mem->mem_type) {
167 case TTM_PL_VRAM:
Jerome Glissed594e462010-02-17 21:54:29 +0000168 old_start += rdev->mc.vram_start;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200169 break;
170 case TTM_PL_TT:
Jerome Glissed594e462010-02-17 21:54:29 +0000171 old_start += rdev->mc.gtt_start;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200172 break;
173 default:
174 DRM_ERROR("Unknown placement %d\n", old_mem->mem_type);
175 return -EINVAL;
176 }
177 switch (new_mem->mem_type) {
178 case TTM_PL_VRAM:
Jerome Glissed594e462010-02-17 21:54:29 +0000179 new_start += rdev->mc.vram_start;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200180 break;
181 case TTM_PL_TT:
Jerome Glissed594e462010-02-17 21:54:29 +0000182 new_start += rdev->mc.gtt_start;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200183 break;
184 default:
185 DRM_ERROR("Unknown placement %d\n", old_mem->mem_type);
186 return -EINVAL;
187 }
Christian König876dc9f2012-05-08 14:24:01 +0200188 if (!rdev->ring[ridx].ready) {
Alex Deucher3000bf32012-01-05 22:11:07 -0500189 DRM_ERROR("Trying to move memory with ring turned off.\n");
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200190 return -EINVAL;
191 }
Alex Deucher003cefe2011-09-16 12:04:08 -0400192
193 BUILD_BUG_ON((PAGE_SIZE % RADEON_GPU_PAGE_SIZE) != 0);
194
Christian König57d20a42014-09-04 20:01:53 +0200195 num_pages = new_mem->num_pages * (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
Gerd Hoffmann336ac942019-08-05 16:01:13 +0200196 fence = radeon_copy(rdev, old_start, new_start, num_pages, bo->base.resv);
Christian König57d20a42014-09-04 20:01:53 +0200197 if (IS_ERR(fence))
198 return PTR_ERR(fence);
199
Dave Airliee46f4682020-09-17 16:36:14 +1000200 r = ttm_bo_move_accel_cleanup(bo, &fence->base, evict, false, new_mem);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200201 radeon_fence_unref(&fence);
202 return r;
203}
204
Christian König2823f4f2017-04-26 16:31:14 +0200205static int radeon_bo_move(struct ttm_buffer_object *bo, bool evict,
206 struct ttm_operation_ctx *ctx,
Dave Airlieebdf5652020-10-29 13:58:52 +1000207 struct ttm_resource *new_mem,
208 struct ttm_place *hop)
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200209{
210 struct radeon_device *rdev;
Michel Dänzere1a575a2016-03-28 16:39:14 +0900211 struct radeon_bo *rbo;
Dave Airlie29661412020-08-04 12:56:32 +1000212 struct ttm_resource *old_mem = &bo->mem;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200213 int r;
214
Dave Airliebfe5e582020-10-20 11:03:19 +1000215 if (new_mem->mem_type == TTM_PL_TT) {
216 r = radeon_ttm_tt_bind(bo->bdev, bo->ttm, new_mem);
217 if (r)
218 return r;
219 }
Dave Airlie6d820002020-10-20 11:03:18 +1000220
Dave Airlie0ef1ed82020-09-23 13:04:49 +1000221 r = ttm_bo_wait_ctx(bo, ctx);
Christian König88932a72016-06-06 10:17:53 +0200222 if (r)
Christian König9afdda82020-11-25 15:32:23 +0100223 return r;
Christian König88932a72016-06-06 10:17:53 +0200224
Michel Dänzere1a575a2016-03-28 16:39:14 +0900225 /* Can't move a pinned BO */
226 rbo = container_of(bo, struct radeon_bo, tbo);
Christian König0b8793f2020-09-21 13:18:02 +0200227 if (WARN_ON_ONCE(rbo->tbo.pin_count > 0))
Michel Dänzere1a575a2016-03-28 16:39:14 +0900228 return -EINVAL;
229
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200230 rdev = radeon_get_rdev(bo->bdev);
231 if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
Dave Airlieecfe6952020-09-08 06:46:18 +1000232 ttm_bo_move_null(bo, new_mem);
Christian König9afdda82020-11-25 15:32:23 +0100233 goto out;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200234 }
Dave Airlie51e50e52020-09-24 15:18:04 +1000235 if (old_mem->mem_type == TTM_PL_SYSTEM &&
236 new_mem->mem_type == TTM_PL_TT) {
Dave Airlieecfe6952020-09-08 06:46:18 +1000237 ttm_bo_move_null(bo, new_mem);
Christian König9afdda82020-11-25 15:32:23 +0100238 goto out;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200239 }
Dave Airlie51e50e52020-09-24 15:18:04 +1000240
241 if (old_mem->mem_type == TTM_PL_TT &&
Dave Airliec37d9512020-10-19 17:13:13 +1000242 new_mem->mem_type == TTM_PL_SYSTEM) {
Dave Airlie29a1d482020-10-20 11:03:15 +1000243 radeon_ttm_tt_unbind(bo->bdev, bo->ttm);
244 ttm_resource_free(bo, &bo->mem);
Dave Airliec37d9512020-10-19 17:13:13 +1000245 ttm_bo_assign_mem(bo, new_mem);
Christian König9afdda82020-11-25 15:32:23 +0100246 goto out;
Dave Airliec37d9512020-10-19 17:13:13 +1000247 }
Christian König9afdda82020-11-25 15:32:23 +0100248 if (rdev->ring[radeon_copy_ring_index(rdev)].ready &&
249 rdev->asic->copy.copy != NULL) {
250 if ((old_mem->mem_type == TTM_PL_SYSTEM &&
251 new_mem->mem_type == TTM_PL_VRAM) ||
252 (old_mem->mem_type == TTM_PL_VRAM &&
253 new_mem->mem_type == TTM_PL_SYSTEM)) {
254 hop->fpfn = 0;
255 hop->lpfn = 0;
256 hop->mem_type = TTM_PL_TT;
257 hop->flags = 0;
258 return -EMULTIHOP;
Marek Olšák67e8e3f2014-03-02 00:56:18 +0100259 }
Christian König9afdda82020-11-25 15:32:23 +0100260
261 r = radeon_move_blit(bo, evict, new_mem, old_mem);
262 } else {
263 r = -ENODEV;
Michel Dänzer1ab2e102009-07-28 12:30:56 +0200264 }
Marek Olšák67e8e3f2014-03-02 00:56:18 +0100265
Christian König9afdda82020-11-25 15:32:23 +0100266 if (r) {
267 r = ttm_bo_move_memcpy(bo, ctx, new_mem);
268 if (r)
269 return r;
270 }
271
272out:
Marek Olšák67e8e3f2014-03-02 00:56:18 +0100273 /* update statistics */
Christian Könige11bfb92020-12-09 15:07:50 +0100274 atomic64_add(bo->base.size, &rdev->num_bytes_moved);
Christian König9afdda82020-11-25 15:32:23 +0100275 radeon_bo_move_notify(bo, evict, new_mem);
Marek Olšák67e8e3f2014-03-02 00:56:18 +0100276 return 0;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200277}
278
Christian König8af8a102020-10-01 14:51:40 +0200279static int radeon_ttm_io_mem_reserve(struct ttm_device *bdev, struct ttm_resource *mem)
Jerome Glisse0a2d50e2010-04-09 14:39:24 +0200280{
Jerome Glisse0a2d50e2010-04-09 14:39:24 +0200281 struct radeon_device *rdev = radeon_get_rdev(bdev);
Dave Airlieebb21aa2020-08-11 17:46:58 +1000282 size_t bus_size = (size_t)mem->num_pages << PAGE_SHIFT;
Jerome Glisse0a2d50e2010-04-09 14:39:24 +0200283
Jerome Glisse0a2d50e2010-04-09 14:39:24 +0200284 switch (mem->mem_type) {
285 case TTM_PL_SYSTEM:
286 /* system memory */
287 return 0;
288 case TTM_PL_TT:
Daniel Vettera7fb8a22015-09-09 16:45:52 +0200289#if IS_ENABLED(CONFIG_AGP)
Jerome Glisse0a2d50e2010-04-09 14:39:24 +0200290 if (rdev->flags & RADEON_IS_AGP) {
291 /* RADEON_IS_AGP is set only if AGP is active */
Christian König54d04ea2020-09-07 13:44:36 +0200292 mem->bus.offset = (mem->start << PAGE_SHIFT) +
293 rdev->mc.agp_base;
Michel Dänzer365048ff2010-05-19 12:46:22 +0200294 mem->bus.is_iomem = !rdev->ddev->agp->cant_use_aperture;
Christian König1cf65c42020-09-30 11:17:44 +0200295 mem->bus.caching = ttm_write_combined;
Jerome Glisse0a2d50e2010-04-09 14:39:24 +0200296 }
297#endif
298 break;
299 case TTM_PL_VRAM:
Ben Skeggsd961db72010-08-05 10:48:18 +1000300 mem->bus.offset = mem->start << PAGE_SHIFT;
Jerome Glisse0a2d50e2010-04-09 14:39:24 +0200301 /* check if it's visible */
Dave Airlieebb21aa2020-08-11 17:46:58 +1000302 if ((mem->bus.offset + bus_size) > rdev->mc.visible_vram_size)
Jerome Glisse0a2d50e2010-04-09 14:39:24 +0200303 return -EINVAL;
Christian König54d04ea2020-09-07 13:44:36 +0200304 mem->bus.offset += rdev->mc.aper_base;
Jerome Glisse0a2d50e2010-04-09 14:39:24 +0200305 mem->bus.is_iomem = true;
Christian König1cf65c42020-09-30 11:17:44 +0200306 mem->bus.caching = ttm_write_combined;
Jay Estabrookffb57c42011-07-06 23:57:13 +0000307#ifdef __alpha__
308 /*
309 * Alpha: use bus.addr to hold the ioremap() return,
310 * so we can modify bus.base below.
311 */
Christian Königce65b872020-09-30 16:44:16 +0200312 mem->bus.addr = ioremap_wc(mem->bus.offset, bus_size);
Arvind Yadav3b2c69322017-01-24 14:46:16 +0530313 if (!mem->bus.addr)
314 return -ENOMEM;
Jay Estabrookffb57c42011-07-06 23:57:13 +0000315
316 /*
317 * Alpha: Use just the bus offset plus
318 * the hose/domain memory base for bus.base.
319 * It then can be used to build PTEs for VRAM
320 * access, as done in ttm_bo_vm_fault().
321 */
Christian König54d04ea2020-09-07 13:44:36 +0200322 mem->bus.offset = (mem->bus.offset & 0x0ffffffffUL) +
Thomas Zimmermann5c1736c2021-01-12 09:10:34 +0100323 rdev->hose->dense_mem_base;
Jay Estabrookffb57c42011-07-06 23:57:13 +0000324#endif
Jerome Glisse0a2d50e2010-04-09 14:39:24 +0200325 break;
326 default:
327 return -EINVAL;
328 }
329 return 0;
330}
331
Jerome Glisse649bf3c2011-11-01 20:46:13 -0400332/*
333 * TTM backend functions.
334 */
335struct radeon_ttm_tt {
Christian Könige34b8fe2020-10-21 14:06:49 +0200336 struct ttm_tt ttm;
Jerome Glisse649bf3c2011-11-01 20:46:13 -0400337 u64 offset;
Christian Königf72a113a2014-08-07 09:36:00 +0200338
339 uint64_t userptr;
340 struct mm_struct *usermm;
341 uint32_t userflags;
Dave Airlie0b988ca2020-09-17 12:54:24 +1000342 bool bound;
Jerome Glisse649bf3c2011-11-01 20:46:13 -0400343};
344
Christian Königf72a113a2014-08-07 09:36:00 +0200345/* prepare the sg table with the user pages */
Christian König8af8a102020-10-01 14:51:40 +0200346static int radeon_ttm_tt_pin_userptr(struct ttm_device *bdev, struct ttm_tt *ttm)
Christian Königf72a113a2014-08-07 09:36:00 +0200347{
Dave Airlie0a667b52020-08-25 09:46:00 +1000348 struct radeon_device *rdev = radeon_get_rdev(bdev);
Christian Königf72a113a2014-08-07 09:36:00 +0200349 struct radeon_ttm_tt *gtt = (void *)ttm;
Marek Szyprowski7b814902020-06-19 12:36:17 +0200350 unsigned pinned = 0;
Christian Königf72a113a2014-08-07 09:36:00 +0200351 int r;
352
353 int write = !(gtt->userflags & RADEON_GEM_USERPTR_READONLY);
354 enum dma_data_direction direction = write ?
355 DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
356
357 if (current->mm != gtt->usermm)
358 return -EPERM;
359
Christian Königddd00e32014-08-07 09:36:01 +0200360 if (gtt->userflags & RADEON_GEM_USERPTR_ANONONLY) {
361 /* check that we only pin down anonymous memory
362 to prevent problems with writeback */
xinhui pana441d7e2021-04-07 20:57:50 +0800363 unsigned long end = gtt->userptr + (u64)ttm->num_pages * PAGE_SIZE;
Christian Königddd00e32014-08-07 09:36:01 +0200364 struct vm_area_struct *vma;
365 vma = find_vma(gtt->usermm, gtt->userptr);
366 if (!vma || vma->vm_file || vma->vm_end < end)
367 return -EPERM;
368 }
369
Christian Königf72a113a2014-08-07 09:36:00 +0200370 do {
371 unsigned num_pages = ttm->num_pages - pinned;
372 uint64_t userptr = gtt->userptr + pinned * PAGE_SIZE;
373 struct page **pages = ttm->pages + pinned;
374
Lorenzo Stoakes768ae302016-10-13 01:20:16 +0100375 r = get_user_pages(userptr, num_pages, write ? FOLL_WRITE : 0,
376 pages, NULL);
Christian Königf72a113a2014-08-07 09:36:00 +0200377 if (r < 0)
378 goto release_pages;
379
380 pinned += r;
381
382 } while (pinned < ttm->num_pages);
383
384 r = sg_alloc_table_from_pages(ttm->sg, ttm->pages, ttm->num_pages, 0,
xinhui pana441d7e2021-04-07 20:57:50 +0800385 (u64)ttm->num_pages << PAGE_SHIFT,
Christian Königf72a113a2014-08-07 09:36:00 +0200386 GFP_KERNEL);
387 if (r)
388 goto release_sg;
389
Marek Szyprowski7b814902020-06-19 12:36:17 +0200390 r = dma_map_sgtable(rdev->dev, ttm->sg, direction, 0);
391 if (r)
Christian Königf72a113a2014-08-07 09:36:00 +0200392 goto release_sg;
393
Christian Königc67e6272020-10-08 12:57:32 +0200394 drm_prime_sg_to_dma_addr_array(ttm->sg, gtt->ttm.dma_address,
395 ttm->num_pages);
Christian Königf72a113a2014-08-07 09:36:00 +0200396
397 return 0;
398
399release_sg:
400 kfree(ttm->sg);
401
402release_pages:
Mel Gormanc6f92f92017-11-15 17:37:55 -0800403 release_pages(ttm->pages, pinned);
Christian Königf72a113a2014-08-07 09:36:00 +0200404 return r;
405}
406
Christian König8af8a102020-10-01 14:51:40 +0200407static void radeon_ttm_tt_unpin_userptr(struct ttm_device *bdev, struct ttm_tt *ttm)
Christian Königf72a113a2014-08-07 09:36:00 +0200408{
Dave Airlie0a667b52020-08-25 09:46:00 +1000409 struct radeon_device *rdev = radeon_get_rdev(bdev);
Christian Königf72a113a2014-08-07 09:36:00 +0200410 struct radeon_ttm_tt *gtt = (void *)ttm;
monk.liudb129732015-05-05 09:24:17 +0200411 struct sg_page_iter sg_iter;
Christian Königf72a113a2014-08-07 09:36:00 +0200412
413 int write = !(gtt->userflags & RADEON_GEM_USERPTR_READONLY);
414 enum dma_data_direction direction = write ?
415 DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
416
Christian König863653f2015-03-31 17:36:57 +0200417 /* double check that we don't free the table twice */
Guchun Chenfd6ecc92021-03-30 22:02:06 +0800418 if (!ttm->sg || !ttm->sg->sgl)
Christian König863653f2015-03-31 17:36:57 +0200419 return;
420
Christian Königf72a113a2014-08-07 09:36:00 +0200421 /* free the sg table and pages again */
Marek Szyprowski7b814902020-06-19 12:36:17 +0200422 dma_unmap_sgtable(rdev->dev, ttm->sg, direction, 0);
Christian Königf72a113a2014-08-07 09:36:00 +0200423
Marek Szyprowski7b814902020-06-19 12:36:17 +0200424 for_each_sgtable_page(ttm->sg, &sg_iter, 0) {
monk.liudb129732015-05-05 09:24:17 +0200425 struct page *page = sg_page_iter_page(&sg_iter);
Christian Königf72a113a2014-08-07 09:36:00 +0200426 if (!(gtt->userflags & RADEON_GEM_USERPTR_READONLY))
427 set_page_dirty(page);
428
429 mark_page_accessed(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300430 put_page(page);
Christian Königf72a113a2014-08-07 09:36:00 +0200431 }
432
433 sg_free_table(ttm->sg);
434}
435
Dave Airlie0b988ca2020-09-17 12:54:24 +1000436static bool radeon_ttm_backend_is_bound(struct ttm_tt *ttm)
437{
438 struct radeon_ttm_tt *gtt = (void*)ttm;
439
440 return (gtt->bound);
441}
442
Christian König8af8a102020-10-01 14:51:40 +0200443static int radeon_ttm_backend_bind(struct ttm_device *bdev,
Dave Airlie0a667b52020-08-25 09:46:00 +1000444 struct ttm_tt *ttm,
Dave Airlie29661412020-08-04 12:56:32 +1000445 struct ttm_resource *bo_mem)
Jerome Glisse649bf3c2011-11-01 20:46:13 -0400446{
Jerome Glisse8e7e7052011-11-09 17:15:26 -0500447 struct radeon_ttm_tt *gtt = (void*)ttm;
Dave Airlie0a667b52020-08-25 09:46:00 +1000448 struct radeon_device *rdev = radeon_get_rdev(bdev);
Michel Dänzer77497f22014-07-17 19:01:07 +0900449 uint32_t flags = RADEON_GART_PAGE_VALID | RADEON_GART_PAGE_READ |
450 RADEON_GART_PAGE_WRITE;
Jerome Glisse649bf3c2011-11-01 20:46:13 -0400451 int r;
452
Dave Airlie0b988ca2020-09-17 12:54:24 +1000453 if (gtt->bound)
454 return 0;
455
Christian Königf72a113a2014-08-07 09:36:00 +0200456 if (gtt->userptr) {
Dave Airlie0a667b52020-08-25 09:46:00 +1000457 radeon_ttm_tt_pin_userptr(bdev, ttm);
Christian Königf72a113a2014-08-07 09:36:00 +0200458 flags &= ~RADEON_GART_PAGE_WRITE;
459 }
460
Jerome Glisse649bf3c2011-11-01 20:46:13 -0400461 gtt->offset = (unsigned long)(bo_mem->start << PAGE_SHIFT);
462 if (!ttm->num_pages) {
Christian König230c0792020-10-20 20:10:39 +0200463 WARN(1, "nothing to bind %u pages for mreg %p back %p!\n",
Jerome Glisse649bf3c2011-11-01 20:46:13 -0400464 ttm->num_pages, bo_mem, ttm);
465 }
Christian König1b4ea4c2020-09-30 10:38:48 +0200466 if (ttm->caching == ttm_cached)
Michel Dänzer77497f22014-07-17 19:01:07 +0900467 flags |= RADEON_GART_PAGE_SNOOP;
Dave Airlie8e6c0a22020-08-25 09:08:11 +1000468 r = radeon_gart_bind(rdev, gtt->offset, ttm->num_pages,
Michel Dänzer77497f22014-07-17 19:01:07 +0900469 ttm->pages, gtt->ttm.dma_address, flags);
Jerome Glisse649bf3c2011-11-01 20:46:13 -0400470 if (r) {
Christian König230c0792020-10-20 20:10:39 +0200471 DRM_ERROR("failed to bind %u pages at 0x%08X\n",
Jerome Glisse649bf3c2011-11-01 20:46:13 -0400472 ttm->num_pages, (unsigned)gtt->offset);
473 return r;
474 }
Dave Airlie0b988ca2020-09-17 12:54:24 +1000475 gtt->bound = true;
Jerome Glisse649bf3c2011-11-01 20:46:13 -0400476 return 0;
477}
478
Christian König8af8a102020-10-01 14:51:40 +0200479static void radeon_ttm_backend_unbind(struct ttm_device *bdev, struct ttm_tt *ttm)
Jerome Glisse649bf3c2011-11-01 20:46:13 -0400480{
Jerome Glisse8e7e7052011-11-09 17:15:26 -0500481 struct radeon_ttm_tt *gtt = (void *)ttm;
Dave Airlie0a667b52020-08-25 09:46:00 +1000482 struct radeon_device *rdev = radeon_get_rdev(bdev);
Jerome Glisse649bf3c2011-11-01 20:46:13 -0400483
Daniel Gomez5aeaa432021-03-18 09:32:36 +0100484 if (gtt->userptr)
485 radeon_ttm_tt_unpin_userptr(bdev, ttm);
486
Dave Airlie0b988ca2020-09-17 12:54:24 +1000487 if (!gtt->bound)
488 return;
489
Dave Airlie8e6c0a22020-08-25 09:08:11 +1000490 radeon_gart_unbind(rdev, gtt->offset, ttm->num_pages);
Christian Königf72a113a2014-08-07 09:36:00 +0200491
Dave Airlie0b988ca2020-09-17 12:54:24 +1000492 gtt->bound = false;
Jerome Glisse649bf3c2011-11-01 20:46:13 -0400493}
494
Christian König8af8a102020-10-01 14:51:40 +0200495static void radeon_ttm_backend_destroy(struct ttm_device *bdev, struct ttm_tt *ttm)
Jerome Glisse649bf3c2011-11-01 20:46:13 -0400496{
Jerome Glisse8e7e7052011-11-09 17:15:26 -0500497 struct radeon_ttm_tt *gtt = (void *)ttm;
Jerome Glisse649bf3c2011-11-01 20:46:13 -0400498
Dave Airlie37bff652020-09-17 13:24:50 +1000499 radeon_ttm_backend_unbind(bdev, ttm);
Dave Airlie76261682020-09-17 13:20:48 +1000500 ttm_tt_destroy_common(bdev, ttm);
501
Christian Könige34b8fe2020-10-21 14:06:49 +0200502 ttm_tt_fini(&gtt->ttm);
Jerome Glisse649bf3c2011-11-01 20:46:13 -0400503 kfree(gtt);
504}
505
Christian Königdde5da22018-02-22 10:18:14 +0100506static struct ttm_tt *radeon_ttm_tt_create(struct ttm_buffer_object *bo,
507 uint32_t page_flags)
Jerome Glisse649bf3c2011-11-01 20:46:13 -0400508{
Jerome Glisse649bf3c2011-11-01 20:46:13 -0400509 struct radeon_ttm_tt *gtt;
Christian König1b4ea4c2020-09-30 10:38:48 +0200510 enum ttm_caching caching;
511 struct radeon_bo *rbo;
Daniel Vettera7fb8a22015-09-09 16:45:52 +0200512#if IS_ENABLED(CONFIG_AGP)
Lee Jones92378a42020-11-13 00:06:52 -0500513 struct radeon_device *rdev = radeon_get_rdev(bo->bdev);
514
Jerome Glisse649bf3c2011-11-01 20:46:13 -0400515 if (rdev->flags & RADEON_IS_AGP) {
Christian Königdde5da22018-02-22 10:18:14 +0100516 return ttm_agp_tt_create(bo, rdev->ddev->agp->bridge,
517 page_flags);
Jerome Glisse649bf3c2011-11-01 20:46:13 -0400518 }
519#endif
Lee Jones92378a42020-11-13 00:06:52 -0500520 rbo = container_of(bo, struct radeon_bo, tbo);
Jerome Glisse649bf3c2011-11-01 20:46:13 -0400521
522 gtt = kzalloc(sizeof(struct radeon_ttm_tt), GFP_KERNEL);
523 if (gtt == NULL) {
524 return NULL;
525 }
Christian König1b4ea4c2020-09-30 10:38:48 +0200526
527 if (rbo->flags & RADEON_GEM_GTT_UC)
528 caching = ttm_uncached;
529 else if (rbo->flags & RADEON_GEM_GTT_WC)
530 caching = ttm_write_combined;
531 else
532 caching = ttm_cached;
533
Christian König0575ff32020-10-08 13:01:35 +0200534 if (ttm_sg_tt_init(&gtt->ttm, bo, page_flags, caching)) {
Jerome Glisse8e7e7052011-11-09 17:15:26 -0500535 kfree(gtt);
Jerome Glisse649bf3c2011-11-01 20:46:13 -0400536 return NULL;
537 }
Christian Könige34b8fe2020-10-21 14:06:49 +0200538 return &gtt->ttm;
Jerome Glisse649bf3c2011-11-01 20:46:13 -0400539}
540
Dave Airliea68bb192020-09-08 06:46:22 +1000541static struct radeon_ttm_tt *radeon_ttm_tt_to_gtt(struct radeon_device *rdev,
542 struct ttm_tt *ttm)
Christian König3840a652014-09-17 04:00:05 -0600543{
Dave Airliea68bb192020-09-08 06:46:22 +1000544#if IS_ENABLED(CONFIG_AGP)
545 if (rdev->flags & RADEON_IS_AGP)
Christian König3840a652014-09-17 04:00:05 -0600546 return NULL;
Dave Airliea68bb192020-09-08 06:46:22 +1000547#endif
548
549 if (!ttm)
550 return NULL;
Christian Könige34b8fe2020-10-21 14:06:49 +0200551 return container_of(ttm, struct radeon_ttm_tt, ttm);
Christian König3840a652014-09-17 04:00:05 -0600552}
553
Christian König8af8a102020-10-01 14:51:40 +0200554static int radeon_ttm_tt_populate(struct ttm_device *bdev,
Dave Airlie0a667b52020-08-25 09:46:00 +1000555 struct ttm_tt *ttm,
556 struct ttm_operation_ctx *ctx)
Konrad Rzeszutek Wilkc52494f2011-10-17 17:15:08 -0400557{
Dave Airliea68bb192020-09-08 06:46:22 +1000558 struct radeon_device *rdev = radeon_get_rdev(bdev);
559 struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(rdev, ttm);
Alex Deucher40f5cf92012-05-10 18:33:13 -0400560 bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
Konrad Rzeszutek Wilkc52494f2011-10-17 17:15:08 -0400561
Christian König3840a652014-09-17 04:00:05 -0600562 if (gtt && gtt->userptr) {
Maninder Singh69ee2412015-06-19 09:35:23 +0530563 ttm->sg = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
Christian Königf72a113a2014-08-07 09:36:00 +0200564 if (!ttm->sg)
565 return -ENOMEM;
566
567 ttm->page_flags |= TTM_PAGE_FLAG_SG;
Christian Königf72a113a2014-08-07 09:36:00 +0200568 return 0;
569 }
570
Alex Deucher40f5cf92012-05-10 18:33:13 -0400571 if (slave && ttm->sg) {
Christian Königc67e6272020-10-08 12:57:32 +0200572 drm_prime_sg_to_dma_addr_array(ttm->sg, gtt->ttm.dma_address,
573 ttm->num_pages);
Alex Deucher40f5cf92012-05-10 18:33:13 -0400574 return 0;
575 }
576
Christian König0fe3cf32020-10-24 13:12:23 +0200577 return ttm_pool_alloc(&rdev->mman.bdev.pool, ttm, ctx);
Konrad Rzeszutek Wilkc52494f2011-10-17 17:15:08 -0400578}
579
Christian König8af8a102020-10-01 14:51:40 +0200580static void radeon_ttm_tt_unpopulate(struct ttm_device *bdev, struct ttm_tt *ttm)
Konrad Rzeszutek Wilkc52494f2011-10-17 17:15:08 -0400581{
Dave Airliea68bb192020-09-08 06:46:22 +1000582 struct radeon_device *rdev = radeon_get_rdev(bdev);
583 struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(rdev, ttm);
Alex Deucher40f5cf92012-05-10 18:33:13 -0400584 bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
585
Christian König3840a652014-09-17 04:00:05 -0600586 if (gtt && gtt->userptr) {
Christian Königf72a113a2014-08-07 09:36:00 +0200587 kfree(ttm->sg);
588 ttm->page_flags &= ~TTM_PAGE_FLAG_SG;
589 return;
590 }
591
Alex Deucher40f5cf92012-05-10 18:33:13 -0400592 if (slave)
593 return;
Konrad Rzeszutek Wilkc52494f2011-10-17 17:15:08 -0400594
Christian König0fe3cf32020-10-24 13:12:23 +0200595 return ttm_pool_free(&rdev->mman.bdev.pool, ttm);
Konrad Rzeszutek Wilkc52494f2011-10-17 17:15:08 -0400596}
Jerome Glisse649bf3c2011-11-01 20:46:13 -0400597
Dave Airliea68bb192020-09-08 06:46:22 +1000598int radeon_ttm_tt_set_userptr(struct radeon_device *rdev,
599 struct ttm_tt *ttm, uint64_t addr,
Christian Königf72a113a2014-08-07 09:36:00 +0200600 uint32_t flags)
601{
Dave Airliea68bb192020-09-08 06:46:22 +1000602 struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(rdev, ttm);
Christian Königf72a113a2014-08-07 09:36:00 +0200603
604 if (gtt == NULL)
605 return -EINVAL;
606
607 gtt->userptr = addr;
608 gtt->usermm = current->mm;
609 gtt->userflags = flags;
610 return 0;
611}
612
Christian König8af8a102020-10-01 14:51:40 +0200613bool radeon_ttm_tt_is_bound(struct ttm_device *bdev,
Dave Airlie0b988ca2020-09-17 12:54:24 +1000614 struct ttm_tt *ttm)
615{
616#if IS_ENABLED(CONFIG_AGP)
617 struct radeon_device *rdev = radeon_get_rdev(bdev);
618 if (rdev->flags & RADEON_IS_AGP)
619 return ttm_agp_is_bound(ttm);
620#endif
621 return radeon_ttm_backend_is_bound(ttm);
622}
623
Christian König8af8a102020-10-01 14:51:40 +0200624static int radeon_ttm_tt_bind(struct ttm_device *bdev,
Dave Airliea68bb192020-09-08 06:46:22 +1000625 struct ttm_tt *ttm,
626 struct ttm_resource *bo_mem)
Christian Königf72a113a2014-08-07 09:36:00 +0200627{
Dave Airlie0b988ca2020-09-17 12:54:24 +1000628#if IS_ENABLED(CONFIG_AGP)
Dave Airliea68bb192020-09-08 06:46:22 +1000629 struct radeon_device *rdev = radeon_get_rdev(bdev);
Dave Airlie0b988ca2020-09-17 12:54:24 +1000630#endif
Dave Airliea68bb192020-09-08 06:46:22 +1000631
Dave Airlie0b988ca2020-09-17 12:54:24 +1000632 if (!bo_mem)
633 return -EINVAL;
Dave Airliea68bb192020-09-08 06:46:22 +1000634#if IS_ENABLED(CONFIG_AGP)
635 if (rdev->flags & RADEON_IS_AGP)
Dave Airlie48efa572020-09-08 06:46:29 +1000636 return ttm_agp_bind(ttm, bo_mem);
Dave Airliea68bb192020-09-08 06:46:22 +1000637#endif
638
639 return radeon_ttm_backend_bind(bdev, ttm, bo_mem);
640}
641
Christian König8af8a102020-10-01 14:51:40 +0200642static void radeon_ttm_tt_unbind(struct ttm_device *bdev,
Dave Airliea68bb192020-09-08 06:46:22 +1000643 struct ttm_tt *ttm)
644{
645#if IS_ENABLED(CONFIG_AGP)
646 struct radeon_device *rdev = radeon_get_rdev(bdev);
647
648 if (rdev->flags & RADEON_IS_AGP) {
Dave Airlie48efa572020-09-08 06:46:29 +1000649 ttm_agp_unbind(ttm);
Dave Airliea68bb192020-09-08 06:46:22 +1000650 return;
651 }
652#endif
653 radeon_ttm_backend_unbind(bdev, ttm);
654}
655
Christian König8af8a102020-10-01 14:51:40 +0200656static void radeon_ttm_tt_destroy(struct ttm_device *bdev,
Dave Airliea68bb192020-09-08 06:46:22 +1000657 struct ttm_tt *ttm)
658{
659#if IS_ENABLED(CONFIG_AGP)
660 struct radeon_device *rdev = radeon_get_rdev(bdev);
661
662 if (rdev->flags & RADEON_IS_AGP) {
Dave Airlie37bff652020-09-17 13:24:50 +1000663 ttm_agp_unbind(ttm);
Dave Airlie76261682020-09-17 13:20:48 +1000664 ttm_tt_destroy_common(bdev, ttm);
Dave Airlie48efa572020-09-08 06:46:29 +1000665 ttm_agp_destroy(ttm);
Dave Airliea68bb192020-09-08 06:46:22 +1000666 return;
667 }
668#endif
669 radeon_ttm_backend_destroy(bdev, ttm);
670}
671
672bool radeon_ttm_tt_has_userptr(struct radeon_device *rdev,
673 struct ttm_tt *ttm)
674{
675 struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(rdev, ttm);
Christian Königf72a113a2014-08-07 09:36:00 +0200676
677 if (gtt == NULL)
678 return false;
679
680 return !!gtt->userptr;
681}
682
Dave Airliea68bb192020-09-08 06:46:22 +1000683bool radeon_ttm_tt_is_readonly(struct radeon_device *rdev,
684 struct ttm_tt *ttm)
Christian Königf72a113a2014-08-07 09:36:00 +0200685{
Dave Airliea68bb192020-09-08 06:46:22 +1000686 struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(rdev, ttm);
Christian Königf72a113a2014-08-07 09:36:00 +0200687
688 if (gtt == NULL)
689 return false;
690
691 return !!(gtt->userflags & RADEON_GEM_USERPTR_READONLY);
692}
693
Dave Airlie6a6e5982020-10-21 14:40:29 +1000694static void
695radeon_bo_delete_mem_notify(struct ttm_buffer_object *bo)
696{
697 radeon_bo_move_notify(bo, false, NULL);
698}
699
Christian König8af8a102020-10-01 14:51:40 +0200700static struct ttm_device_funcs radeon_bo_driver = {
Jerome Glisse649bf3c2011-11-01 20:46:13 -0400701 .ttm_tt_create = &radeon_ttm_tt_create,
Konrad Rzeszutek Wilkc52494f2011-10-17 17:15:08 -0400702 .ttm_tt_populate = &radeon_ttm_tt_populate,
703 .ttm_tt_unpopulate = &radeon_ttm_tt_unpopulate,
Dave Airliea68bb192020-09-08 06:46:22 +1000704 .ttm_tt_destroy = &radeon_ttm_tt_destroy,
Christian Königa2ab19fe2016-08-30 17:26:04 +0200705 .eviction_valuable = ttm_bo_eviction_valuable,
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200706 .evict_flags = &radeon_evict_flags,
707 .move = &radeon_bo_move,
708 .verify_access = &radeon_verify_access,
Dave Airlie6a6e5982020-10-21 14:40:29 +1000709 .delete_mem_notify = &radeon_bo_delete_mem_notify,
Jerome Glisse0a2d50e2010-04-09 14:39:24 +0200710 .io_mem_reserve = &radeon_ttm_io_mem_reserve,
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200711};
712
713int radeon_ttm_init(struct radeon_device *rdev)
714{
715 int r;
716
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200717 /* No others user of address space so set it to 0 */
Christian König8af8a102020-10-01 14:51:40 +0200718 r = ttm_device_init(&rdev->mman.bdev, &radeon_bo_driver, rdev->dev,
David Herrmann44d847b2013-08-13 19:10:30 +0200719 rdev->ddev->anon_inode->i_mapping,
Gerd Hoffmann8b53e1c2019-09-05 09:05:05 +0200720 rdev->ddev->vma_offset_manager,
Christian Königee5d2a82020-10-24 13:10:28 +0200721 rdev->need_swiotlb,
Christoph Hellwig33b3ad32019-08-15 09:27:00 +0200722 dma_addressing_limited(&rdev->pdev->dev));
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200723 if (r) {
724 DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
725 return r;
726 }
Jerome Glisse0a0c7592009-12-11 20:36:19 +0100727 rdev->mman.initialized = true;
Christian Königb0691b32020-07-23 11:47:57 +0200728
729 r = radeon_ttm_init_vram(rdev);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200730 if (r) {
731 DRM_ERROR("Failed initializing VRAM heap.\n");
732 return r;
733 }
Lauri Kasanen14eedc32014-02-28 20:50:23 +0200734 /* Change the size here instead of the init above so only lpfn is affected */
735 radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
736
Daniel Vetter441921d2011-02-18 17:59:16 +0100737 r = radeon_bo_create(rdev, 256 * 1024, PAGE_SIZE, true,
Maarten Lankhorst831b6962014-09-18 14:11:56 +0200738 RADEON_GEM_DOMAIN_VRAM, 0, NULL,
Kent Russell4aa5b922017-08-08 07:48:52 -0400739 NULL, &rdev->stolen_vga_memory);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200740 if (r) {
741 return r;
742 }
Kent Russell4aa5b922017-08-08 07:48:52 -0400743 r = radeon_bo_reserve(rdev->stolen_vga_memory, false);
Jerome Glisse4c788672009-11-20 14:29:23 +0100744 if (r)
745 return r;
Kent Russell4aa5b922017-08-08 07:48:52 -0400746 r = radeon_bo_pin(rdev->stolen_vga_memory, RADEON_GEM_DOMAIN_VRAM, NULL);
747 radeon_bo_unreserve(rdev->stolen_vga_memory);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200748 if (r) {
Kent Russell4aa5b922017-08-08 07:48:52 -0400749 radeon_bo_unref(&rdev->stolen_vga_memory);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200750 return r;
751 }
752 DRM_INFO("radeon: %uM of VRAM memory ready\n",
Niels Ole Salscheiderfc986032013-05-18 21:19:23 +0200753 (unsigned) (rdev->mc.real_vram_size / (1024 * 1024)));
Christian Königb0691b32020-07-23 11:47:57 +0200754
755 r = radeon_ttm_init_gtt(rdev);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200756 if (r) {
757 DRM_ERROR("Failed initializing GTT heap.\n");
758 return r;
759 }
760 DRM_INFO("radeon: %uM of GTT memory ready.\n",
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000761 (unsigned)(rdev->mc.gtt_size / (1024 * 1024)));
Dave Airliefa8a1232009-08-26 13:13:37 +1000762
Nirmoy Das5b54d672021-02-17 18:34:30 -0500763 radeon_ttm_debugfs_init(rdev);
764
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200765 return 0;
766}
767
768void radeon_ttm_fini(struct radeon_device *rdev)
769{
Jerome Glisse4c788672009-11-20 14:29:23 +0100770 int r;
771
Jerome Glisse0a0c7592009-12-11 20:36:19 +0100772 if (!rdev->mman.initialized)
773 return;
Nirmoy Dasba3d9402021-02-10 16:19:20 +0100774
Kent Russell4aa5b922017-08-08 07:48:52 -0400775 if (rdev->stolen_vga_memory) {
776 r = radeon_bo_reserve(rdev->stolen_vga_memory, false);
Jerome Glisse4c788672009-11-20 14:29:23 +0100777 if (r == 0) {
Kent Russell4aa5b922017-08-08 07:48:52 -0400778 radeon_bo_unpin(rdev->stolen_vga_memory);
779 radeon_bo_unreserve(rdev->stolen_vga_memory);
Jerome Glisse4c788672009-11-20 14:29:23 +0100780 }
Kent Russell4aa5b922017-08-08 07:48:52 -0400781 radeon_bo_unref(&rdev->stolen_vga_memory);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200782 }
Dave Airlie37205892020-08-04 12:56:19 +1000783 ttm_range_man_fini(&rdev->mman.bdev, TTM_PL_VRAM);
784 ttm_range_man_fini(&rdev->mman.bdev, TTM_PL_TT);
Christian König8af8a102020-10-01 14:51:40 +0200785 ttm_device_fini(&rdev->mman.bdev);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200786 radeon_gart_fini(rdev);
Jerome Glisse0a0c7592009-12-11 20:36:19 +0100787 rdev->mman.initialized = false;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200788 DRM_INFO("radeon: ttm finalized\n");
789}
790
Dave Airlie53595332011-03-14 09:47:24 +1000791/* this should only be called at bootup or when userspace
792 * isn't running */
793void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size)
794{
Dave Airlie9de59bc2020-08-04 12:56:31 +1000795 struct ttm_resource_manager *man;
Dave Airlie53595332011-03-14 09:47:24 +1000796
797 if (!rdev->mman.initialized)
798 return;
799
Dave Airlie47c05502020-08-04 12:56:14 +1000800 man = ttm_manager_type(&rdev->mman.bdev, TTM_PL_VRAM);
Dave Airlie53595332011-03-14 09:47:24 +1000801 /* this just adjusts TTM size idea, which sets lpfn to the correct value */
802 man->size = size >> PAGE_SHIFT;
803}
804
Souptick Joarder2bfb0b62018-04-16 19:13:51 +0530805static vm_fault_t radeon_ttm_fault(struct vm_fault *vmf)
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200806{
Christian König8e0310f2020-09-25 14:17:09 +0200807 struct ttm_buffer_object *bo = vmf->vma->vm_private_data;
808 struct radeon_device *rdev = radeon_get_rdev(bo->bdev);
Souptick Joarder2bfb0b62018-04-16 19:13:51 +0530809 vm_fault_t ret;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200810
Christian Königdb7fce32012-05-11 14:57:18 +0200811 down_read(&rdev->pm.mclk_lock);
Christian König8e0310f2020-09-25 14:17:09 +0200812
813 ret = ttm_bo_vm_reserve(bo, vmf);
814 if (ret)
815 goto unlock_mclk;
816
817 ret = radeon_bo_fault_reserve_notify(bo);
818 if (ret)
819 goto unlock_resv;
820
821 ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot,
822 TTM_BO_VM_NUM_PREFAULT, 1);
823 if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
824 goto unlock_mclk;
825
826unlock_resv:
827 dma_resv_unlock(bo->base.resv);
828
829unlock_mclk:
Christian Königdb7fce32012-05-11 14:57:18 +0200830 up_read(&rdev->pm.mclk_lock);
Souptick Joarder2bfb0b62018-04-16 19:13:51 +0530831 return ret;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200832}
833
Rikard Falkebornb6d4abc2021-02-10 00:48:16 +0100834static const struct vm_operations_struct radeon_ttm_vm_ops = {
Christian König165d3442019-09-27 14:34:25 +0200835 .fault = radeon_ttm_fault,
836 .open = ttm_bo_vm_open,
837 .close = ttm_bo_vm_close,
838 .access = ttm_bo_vm_access
839};
840
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200841int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
842{
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200843 int r;
Thomas Zimmermannbed2dd82019-02-07 09:59:30 +0100844 struct drm_file *file_priv = filp->private_data;
845 struct radeon_device *rdev = file_priv->minor->dev->dev_private;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200846
Christian König165d3442019-09-27 14:34:25 +0200847 if (rdev == NULL)
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200848 return -EINVAL;
Christian König165d3442019-09-27 14:34:25 +0200849
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200850 r = ttm_bo_mmap(filp, vma, &rdev->mman.bdev);
Christian König165d3442019-09-27 14:34:25 +0200851 if (unlikely(r != 0))
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200852 return r;
Christian König165d3442019-09-27 14:34:25 +0200853
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200854 vma->vm_ops = &radeon_ttm_vm_ops;
855 return 0;
856}
857
Dave Airliefa8a1232009-08-26 13:13:37 +1000858#if defined(CONFIG_DEBUG_FS)
Christian König893d6e62013-12-12 09:42:40 +0100859
Nirmoy Das5b54d672021-02-17 18:34:30 -0500860static int radeon_mm_vram_dump_table_show(struct seq_file *m, void *unused)
Dave Airliefa8a1232009-08-26 13:13:37 +1000861{
Nirmoy Das5b54d672021-02-17 18:34:30 -0500862 struct radeon_device *rdev = (struct radeon_device *)m->private;
863 struct ttm_resource_manager *man = ttm_manager_type(&rdev->mman.bdev,
864 TTM_PL_VRAM);
Daniel Vetterb5c37142016-12-29 12:09:24 +0100865 struct drm_printer p = drm_seq_file_printer(m);
Dave Airliefa8a1232009-08-26 13:13:37 +1000866
Christian Königbbbb29e2017-08-07 14:03:54 +0200867 man->func->debug(man, &p);
Daniel Vetterb5c37142016-12-29 12:09:24 +0100868 return 0;
Dave Airliefa8a1232009-08-26 13:13:37 +1000869}
Christian König893d6e62013-12-12 09:42:40 +0100870
Nirmoy Das5b54d672021-02-17 18:34:30 -0500871static int radeon_ttm_page_pool_show(struct seq_file *m, void *data)
Christian König0fe3cf32020-10-24 13:12:23 +0200872{
Nirmoy Das5b54d672021-02-17 18:34:30 -0500873 struct radeon_device *rdev = (struct radeon_device *)m->private;
Christian König0fe3cf32020-10-24 13:12:23 +0200874
875 return ttm_pool_debugfs(&rdev->mman.bdev.pool, m);
876}
Christian Königbbbb29e2017-08-07 14:03:54 +0200877
Nirmoy Das5b54d672021-02-17 18:34:30 -0500878static int radeon_mm_gtt_dump_table_show(struct seq_file *m, void *unused)
879{
880 struct radeon_device *rdev = (struct radeon_device *)m->private;
881 struct ttm_resource_manager *man = ttm_manager_type(&rdev->mman.bdev,
882 TTM_PL_TT);
883 struct drm_printer p = drm_seq_file_printer(m);
Christian König893d6e62013-12-12 09:42:40 +0100884
Nirmoy Das5b54d672021-02-17 18:34:30 -0500885 man->func->debug(man, &p);
886 return 0;
887}
888
889DEFINE_SHOW_ATTRIBUTE(radeon_mm_vram_dump_table);
890DEFINE_SHOW_ATTRIBUTE(radeon_mm_gtt_dump_table);
891DEFINE_SHOW_ATTRIBUTE(radeon_ttm_page_pool);
Christian König893d6e62013-12-12 09:42:40 +0100892
Christian König2014b562013-12-18 21:07:39 +0100893static int radeon_ttm_vram_open(struct inode *inode, struct file *filep)
894{
895 struct radeon_device *rdev = inode->i_private;
896 i_size_write(inode, rdev->mc.mc_vram_size);
897 filep->private_data = inode->i_private;
898 return 0;
899}
900
901static ssize_t radeon_ttm_vram_read(struct file *f, char __user *buf,
902 size_t size, loff_t *pos)
903{
904 struct radeon_device *rdev = f->private_data;
905 ssize_t result = 0;
906 int r;
907
908 if (size & 0x3 || *pos & 0x3)
909 return -EINVAL;
910
911 while (size) {
912 unsigned long flags;
913 uint32_t value;
914
915 if (*pos >= rdev->mc.mc_vram_size)
916 return result;
917
918 spin_lock_irqsave(&rdev->mmio_idx_lock, flags);
919 WREG32(RADEON_MM_INDEX, ((uint32_t)*pos) | 0x80000000);
920 if (rdev->family >= CHIP_CEDAR)
921 WREG32(EVERGREEN_MM_INDEX_HI, *pos >> 31);
922 value = RREG32(RADEON_MM_DATA);
923 spin_unlock_irqrestore(&rdev->mmio_idx_lock, flags);
924
Christian König8b1c7152021-03-08 19:15:42 +0100925 r = put_user(value, (uint32_t __user *)buf);
Christian König2014b562013-12-18 21:07:39 +0100926 if (r)
927 return r;
928
929 result += 4;
930 buf += 4;
931 *pos += 4;
932 size -= 4;
933 }
934
935 return result;
936}
937
938static const struct file_operations radeon_ttm_vram_fops = {
939 .owner = THIS_MODULE,
940 .open = radeon_ttm_vram_open,
941 .read = radeon_ttm_vram_read,
942 .llseek = default_llseek
943};
944
Christian Königdd66d202013-12-18 21:07:40 +0100945static int radeon_ttm_gtt_open(struct inode *inode, struct file *filep)
946{
947 struct radeon_device *rdev = inode->i_private;
948 i_size_write(inode, rdev->mc.gtt_size);
949 filep->private_data = inode->i_private;
950 return 0;
951}
952
953static ssize_t radeon_ttm_gtt_read(struct file *f, char __user *buf,
954 size_t size, loff_t *pos)
955{
956 struct radeon_device *rdev = f->private_data;
957 ssize_t result = 0;
958 int r;
959
960 while (size) {
961 loff_t p = *pos / PAGE_SIZE;
962 unsigned off = *pos & ~PAGE_MASK;
Paul Bolle0d997b62014-03-04 10:34:48 +0100963 size_t cur_size = min_t(size_t, size, PAGE_SIZE - off);
Christian Königdd66d202013-12-18 21:07:40 +0100964 struct page *page;
965 void *ptr;
966
967 if (p >= rdev->gart.num_cpu_pages)
968 return result;
969
970 page = rdev->gart.pages[p];
971 if (page) {
972 ptr = kmap(page);
973 ptr += off;
974
975 r = copy_to_user(buf, ptr, cur_size);
976 kunmap(rdev->gart.pages[p]);
977 } else
978 r = clear_user(buf, cur_size);
979
980 if (r)
981 return -EFAULT;
982
983 result += cur_size;
984 buf += cur_size;
985 *pos += cur_size;
986 size -= cur_size;
987 }
988
989 return result;
990}
991
992static const struct file_operations radeon_ttm_gtt_fops = {
993 .owner = THIS_MODULE,
994 .open = radeon_ttm_gtt_open,
995 .read = radeon_ttm_gtt_read,
996 .llseek = default_llseek
997};
998
Dave Airliefa8a1232009-08-26 13:13:37 +1000999#endif
1000
Nirmoy Das5b54d672021-02-17 18:34:30 -05001001static void radeon_ttm_debugfs_init(struct radeon_device *rdev)
Dave Airliefa8a1232009-08-26 13:13:37 +10001002{
Mikael Petterssonf4e45d02009-09-28 18:27:23 +02001003#if defined(CONFIG_DEBUG_FS)
Christian König2014b562013-12-18 21:07:39 +01001004 struct drm_minor *minor = rdev->ddev->primary;
Greg Kroah-Hartmanbb1d26b2019-06-13 13:56:31 +02001005 struct dentry *root = minor->debugfs_root;
Christian König2014b562013-12-18 21:07:39 +01001006
Nirmoy Das5b54d672021-02-17 18:34:30 -05001007 debugfs_create_file("radeon_vram", 0444, root, rdev,
Nirmoy Dasba3d9402021-02-10 16:19:20 +01001008 &radeon_ttm_vram_fops);
Christian König2014b562013-12-18 21:07:39 +01001009
Nirmoy Das5b54d672021-02-17 18:34:30 -05001010 debugfs_create_file("radeon_gtt", 0444, root, rdev,
Nirmoy Dasba3d9402021-02-10 16:19:20 +01001011 &radeon_ttm_gtt_fops);
Christian Königdd66d202013-12-18 21:07:40 +01001012
Nirmoy Das5b54d672021-02-17 18:34:30 -05001013 debugfs_create_file("radeon_vram_mm", 0444, root, rdev,
1014 &radeon_mm_vram_dump_table_fops);
1015 debugfs_create_file("radeon_gtt_mm", 0444, root, rdev,
1016 &radeon_mm_gtt_dump_table_fops);
1017 debugfs_create_file("ttm_page_pool", 0444, root, rdev,
1018 &radeon_ttm_page_pool_fops);
Christian König2014b562013-12-18 21:07:39 +01001019#endif
1020}