Chris Wilson | 24f90d6 | 2021-01-22 19:29:04 +0000 | [diff] [blame] | 1 | // SPDX-License-Identifier: MIT |
Chris Wilson | 2871ea8 | 2019-10-24 11:03:44 +0100 | [diff] [blame] | 2 | /* |
Chris Wilson | 2871ea8 | 2019-10-24 11:03:44 +0100 | [diff] [blame] | 3 | * Copyright © 2019 Intel Corporation |
| 4 | */ |
| 5 | |
Jani Nikula | b508d01 | 2022-02-10 17:45:39 +0200 | [diff] [blame] | 6 | #include "gem/i915_gem_internal.h" |
Michel Thierry | d712f4c | 2021-01-27 13:14:17 +0000 | [diff] [blame] | 7 | #include "gem/i915_gem_lmem.h" |
Chris Wilson | 2871ea8 | 2019-10-24 11:03:44 +0100 | [diff] [blame] | 8 | #include "gem/i915_gem_object.h" |
Chris Wilson | 45233ab | 2020-12-16 13:54:52 +0000 | [diff] [blame] | 9 | |
Chris Wilson | 2871ea8 | 2019-10-24 11:03:44 +0100 | [diff] [blame] | 10 | #include "i915_drv.h" |
| 11 | #include "i915_vma.h" |
| 12 | #include "intel_engine.h" |
Matt Roper | 202b1f4 | 2022-01-10 21:15:56 -0800 | [diff] [blame] | 13 | #include "intel_engine_regs.h" |
Chris Wilson | 45233ab | 2020-12-16 13:54:52 +0000 | [diff] [blame] | 14 | #include "intel_gpu_commands.h" |
Chris Wilson | 2871ea8 | 2019-10-24 11:03:44 +0100 | [diff] [blame] | 15 | #include "intel_ring.h" |
| 16 | #include "intel_timeline.h" |
| 17 | |
| 18 | unsigned int intel_ring_update_space(struct intel_ring *ring) |
| 19 | { |
| 20 | unsigned int space; |
| 21 | |
| 22 | space = __intel_ring_space(ring->head, ring->emit, ring->size); |
| 23 | |
| 24 | ring->space = space; |
| 25 | return space; |
| 26 | } |
| 27 | |
Maarten Lankhorst | 47b0869 | 2020-08-19 16:08:54 +0200 | [diff] [blame] | 28 | void __intel_ring_pin(struct intel_ring *ring) |
| 29 | { |
| 30 | GEM_BUG_ON(!atomic_read(&ring->pin_count)); |
| 31 | atomic_inc(&ring->pin_count); |
| 32 | } |
| 33 | |
| 34 | int intel_ring_pin(struct intel_ring *ring, struct i915_gem_ww_ctx *ww) |
Chris Wilson | 2871ea8 | 2019-10-24 11:03:44 +0100 | [diff] [blame] | 35 | { |
| 36 | struct i915_vma *vma = ring->vma; |
| 37 | unsigned int flags; |
| 38 | void *addr; |
| 39 | int ret; |
| 40 | |
| 41 | if (atomic_fetch_inc(&ring->pin_count)) |
| 42 | return 0; |
| 43 | |
Chris Wilson | 2871ea8 | 2019-10-24 11:03:44 +0100 | [diff] [blame] | 44 | /* Ring wraparound at offset 0 sometimes hangs. No idea why. */ |
Chris Wilson | e379346 | 2020-01-30 18:17:10 +0000 | [diff] [blame] | 45 | flags = PIN_OFFSET_BIAS | i915_ggtt_pin_bias(vma); |
Chris Wilson | 2871ea8 | 2019-10-24 11:03:44 +0100 | [diff] [blame] | 46 | |
Chris Wilson | 41a9c75 | 2021-01-19 21:43:33 +0000 | [diff] [blame] | 47 | if (i915_gem_object_is_stolen(vma->obj)) |
Chris Wilson | 2871ea8 | 2019-10-24 11:03:44 +0100 | [diff] [blame] | 48 | flags |= PIN_MAPPABLE; |
| 49 | else |
| 50 | flags |= PIN_HIGH; |
| 51 | |
Maarten Lankhorst | 47b0869 | 2020-08-19 16:08:54 +0200 | [diff] [blame] | 52 | ret = i915_ggtt_pin(vma, ww, 0, flags); |
Chris Wilson | 2871ea8 | 2019-10-24 11:03:44 +0100 | [diff] [blame] | 53 | if (unlikely(ret)) |
| 54 | goto err_unpin; |
| 55 | |
Venkata Sandeep Dhanalakota | fa85bfd | 2021-04-27 09:54:12 +0100 | [diff] [blame] | 56 | if (i915_vma_is_map_and_fenceable(vma)) { |
Chris Wilson | 2871ea8 | 2019-10-24 11:03:44 +0100 | [diff] [blame] | 57 | addr = (void __force *)i915_vma_pin_iomap(vma); |
Venkata Sandeep Dhanalakota | fa85bfd | 2021-04-27 09:54:12 +0100 | [diff] [blame] | 58 | } else { |
| 59 | int type = i915_coherent_map_type(vma->vm->i915, vma->obj, false); |
| 60 | |
| 61 | addr = i915_gem_object_pin_map(vma->obj, type); |
| 62 | } |
| 63 | |
Chris Wilson | 2871ea8 | 2019-10-24 11:03:44 +0100 | [diff] [blame] | 64 | if (IS_ERR(addr)) { |
| 65 | ret = PTR_ERR(addr); |
| 66 | goto err_ring; |
| 67 | } |
| 68 | |
| 69 | i915_vma_make_unshrinkable(vma); |
| 70 | |
Chris Wilson | a266bf4 | 2019-11-18 23:02:40 +0000 | [diff] [blame] | 71 | /* Discard any unused bytes beyond that submitted to hw. */ |
| 72 | intel_ring_reset(ring, ring->emit); |
Chris Wilson | 2871ea8 | 2019-10-24 11:03:44 +0100 | [diff] [blame] | 73 | |
Chris Wilson | a266bf4 | 2019-11-18 23:02:40 +0000 | [diff] [blame] | 74 | ring->vaddr = addr; |
Chris Wilson | 2871ea8 | 2019-10-24 11:03:44 +0100 | [diff] [blame] | 75 | return 0; |
| 76 | |
| 77 | err_ring: |
| 78 | i915_vma_unpin(vma); |
| 79 | err_unpin: |
| 80 | atomic_dec(&ring->pin_count); |
| 81 | return ret; |
| 82 | } |
| 83 | |
| 84 | void intel_ring_reset(struct intel_ring *ring, u32 tail) |
| 85 | { |
| 86 | tail = intel_ring_wrap(ring, tail); |
| 87 | ring->tail = tail; |
| 88 | ring->head = tail; |
| 89 | ring->emit = tail; |
| 90 | intel_ring_update_space(ring); |
| 91 | } |
| 92 | |
| 93 | void intel_ring_unpin(struct intel_ring *ring) |
| 94 | { |
| 95 | struct i915_vma *vma = ring->vma; |
| 96 | |
| 97 | if (!atomic_dec_and_test(&ring->pin_count)) |
| 98 | return; |
| 99 | |
Chris Wilson | 2871ea8 | 2019-10-24 11:03:44 +0100 | [diff] [blame] | 100 | i915_vma_unset_ggtt_write(vma); |
| 101 | if (i915_vma_is_map_and_fenceable(vma)) |
| 102 | i915_vma_unpin_iomap(vma); |
| 103 | else |
| 104 | i915_gem_object_unpin_map(vma->obj); |
| 105 | |
Chris Wilson | 2871ea8 | 2019-10-24 11:03:44 +0100 | [diff] [blame] | 106 | i915_vma_make_purgeable(vma); |
Chris Wilson | a266bf4 | 2019-11-18 23:02:40 +0000 | [diff] [blame] | 107 | i915_vma_unpin(vma); |
Chris Wilson | 2871ea8 | 2019-10-24 11:03:44 +0100 | [diff] [blame] | 108 | } |
| 109 | |
| 110 | static struct i915_vma *create_ring_vma(struct i915_ggtt *ggtt, int size) |
| 111 | { |
| 112 | struct i915_address_space *vm = &ggtt->vm; |
| 113 | struct drm_i915_private *i915 = vm->i915; |
| 114 | struct drm_i915_gem_object *obj; |
| 115 | struct i915_vma *vma; |
| 116 | |
Thomas Hellström | 0d8ee5b | 2021-09-22 08:25:24 +0200 | [diff] [blame] | 117 | obj = i915_gem_object_create_lmem(i915, size, I915_BO_ALLOC_VOLATILE | |
| 118 | I915_BO_ALLOC_PM_VOLATILE); |
Michel Thierry | d712f4c | 2021-01-27 13:14:17 +0000 | [diff] [blame] | 119 | if (IS_ERR(obj) && i915_ggtt_has_aperture(ggtt)) |
Matthew Auld | 34a6baa | 2019-10-29 09:58:55 +0000 | [diff] [blame] | 120 | obj = i915_gem_object_create_stolen(i915, size); |
Chris Wilson | 2871ea8 | 2019-10-24 11:03:44 +0100 | [diff] [blame] | 121 | if (IS_ERR(obj)) |
| 122 | obj = i915_gem_object_create_internal(i915, size); |
| 123 | if (IS_ERR(obj)) |
| 124 | return ERR_CAST(obj); |
| 125 | |
| 126 | /* |
| 127 | * Mark ring buffers as read-only from GPU side (so no stray overwrites) |
| 128 | * if supported by the platform's GGTT. |
| 129 | */ |
| 130 | if (vm->has_read_only) |
| 131 | i915_gem_object_set_readonly(obj); |
| 132 | |
| 133 | vma = i915_vma_instance(obj, vm, NULL); |
| 134 | if (IS_ERR(vma)) |
| 135 | goto err; |
| 136 | |
| 137 | return vma; |
| 138 | |
| 139 | err: |
| 140 | i915_gem_object_put(obj); |
| 141 | return vma; |
| 142 | } |
| 143 | |
| 144 | struct intel_ring * |
| 145 | intel_engine_create_ring(struct intel_engine_cs *engine, int size) |
| 146 | { |
| 147 | struct drm_i915_private *i915 = engine->i915; |
| 148 | struct intel_ring *ring; |
| 149 | struct i915_vma *vma; |
| 150 | |
| 151 | GEM_BUG_ON(!is_power_of_2(size)); |
| 152 | GEM_BUG_ON(RING_CTL_SIZE(size) & ~RING_NR_PAGES); |
| 153 | |
| 154 | ring = kzalloc(sizeof(*ring), GFP_KERNEL); |
| 155 | if (!ring) |
| 156 | return ERR_PTR(-ENOMEM); |
| 157 | |
| 158 | kref_init(&ring->ref); |
| 159 | ring->size = size; |
Chris Wilson | 5ba32c7 | 2020-02-07 21:14:52 +0000 | [diff] [blame] | 160 | ring->wrap = BITS_PER_TYPE(ring->size) - ilog2(size); |
Chris Wilson | 2871ea8 | 2019-10-24 11:03:44 +0100 | [diff] [blame] | 161 | |
| 162 | /* |
| 163 | * Workaround an erratum on the i830 which causes a hang if |
| 164 | * the TAIL pointer points to within the last 2 cachelines |
| 165 | * of the buffer. |
| 166 | */ |
| 167 | ring->effective_size = size; |
| 168 | if (IS_I830(i915) || IS_I845G(i915)) |
| 169 | ring->effective_size -= 2 * CACHELINE_BYTES; |
| 170 | |
| 171 | intel_ring_update_space(ring); |
| 172 | |
| 173 | vma = create_ring_vma(engine->gt->ggtt, size); |
| 174 | if (IS_ERR(vma)) { |
| 175 | kfree(ring); |
| 176 | return ERR_CAST(vma); |
| 177 | } |
| 178 | ring->vma = vma; |
| 179 | |
| 180 | return ring; |
| 181 | } |
| 182 | |
| 183 | void intel_ring_free(struct kref *ref) |
| 184 | { |
| 185 | struct intel_ring *ring = container_of(ref, typeof(*ring), ref); |
| 186 | |
| 187 | i915_vma_put(ring->vma); |
| 188 | kfree(ring); |
| 189 | } |
| 190 | |
| 191 | static noinline int |
| 192 | wait_for_space(struct intel_ring *ring, |
| 193 | struct intel_timeline *tl, |
| 194 | unsigned int bytes) |
| 195 | { |
| 196 | struct i915_request *target; |
| 197 | long timeout; |
| 198 | |
| 199 | if (intel_ring_update_space(ring) >= bytes) |
| 200 | return 0; |
| 201 | |
| 202 | GEM_BUG_ON(list_empty(&tl->requests)); |
| 203 | list_for_each_entry(target, &tl->requests, link) { |
| 204 | if (target->ring != ring) |
| 205 | continue; |
| 206 | |
| 207 | /* Would completion of this request free enough space? */ |
| 208 | if (bytes <= __intel_ring_space(target->postfix, |
| 209 | ring->emit, ring->size)) |
| 210 | break; |
| 211 | } |
| 212 | |
| 213 | if (GEM_WARN_ON(&target->link == &tl->requests)) |
| 214 | return -ENOSPC; |
| 215 | |
| 216 | timeout = i915_request_wait(target, |
| 217 | I915_WAIT_INTERRUPTIBLE, |
| 218 | MAX_SCHEDULE_TIMEOUT); |
| 219 | if (timeout < 0) |
| 220 | return timeout; |
| 221 | |
| 222 | i915_request_retire_upto(target); |
| 223 | |
| 224 | intel_ring_update_space(ring); |
| 225 | GEM_BUG_ON(ring->space < bytes); |
| 226 | return 0; |
| 227 | } |
| 228 | |
| 229 | u32 *intel_ring_begin(struct i915_request *rq, unsigned int num_dwords) |
| 230 | { |
| 231 | struct intel_ring *ring = rq->ring; |
| 232 | const unsigned int remain_usable = ring->effective_size - ring->emit; |
| 233 | const unsigned int bytes = num_dwords * sizeof(u32); |
| 234 | unsigned int need_wrap = 0; |
| 235 | unsigned int total_bytes; |
| 236 | u32 *cs; |
| 237 | |
| 238 | /* Packets must be qword aligned. */ |
| 239 | GEM_BUG_ON(num_dwords & 1); |
| 240 | |
| 241 | total_bytes = bytes + rq->reserved_space; |
| 242 | GEM_BUG_ON(total_bytes > ring->effective_size); |
| 243 | |
| 244 | if (unlikely(total_bytes > remain_usable)) { |
| 245 | const int remain_actual = ring->size - ring->emit; |
| 246 | |
| 247 | if (bytes > remain_usable) { |
| 248 | /* |
| 249 | * Not enough space for the basic request. So need to |
| 250 | * flush out the remainder and then wait for |
| 251 | * base + reserved. |
| 252 | */ |
| 253 | total_bytes += remain_actual; |
| 254 | need_wrap = remain_actual | 1; |
| 255 | } else { |
| 256 | /* |
| 257 | * The base request will fit but the reserved space |
| 258 | * falls off the end. So we don't need an immediate |
| 259 | * wrap and only need to effectively wait for the |
| 260 | * reserved size from the start of ringbuffer. |
| 261 | */ |
| 262 | total_bytes = rq->reserved_space + remain_actual; |
| 263 | } |
| 264 | } |
| 265 | |
| 266 | if (unlikely(total_bytes > ring->space)) { |
| 267 | int ret; |
| 268 | |
| 269 | /* |
| 270 | * Space is reserved in the ringbuffer for finalising the |
| 271 | * request, as that cannot be allowed to fail. During request |
| 272 | * finalisation, reserved_space is set to 0 to stop the |
| 273 | * overallocation and the assumption is that then we never need |
| 274 | * to wait (which has the risk of failing with EINTR). |
| 275 | * |
| 276 | * See also i915_request_alloc() and i915_request_add(). |
| 277 | */ |
| 278 | GEM_BUG_ON(!rq->reserved_space); |
| 279 | |
| 280 | ret = wait_for_space(ring, |
| 281 | i915_request_timeline(rq), |
| 282 | total_bytes); |
| 283 | if (unlikely(ret)) |
| 284 | return ERR_PTR(ret); |
| 285 | } |
| 286 | |
| 287 | if (unlikely(need_wrap)) { |
| 288 | need_wrap &= ~1; |
| 289 | GEM_BUG_ON(need_wrap > ring->space); |
| 290 | GEM_BUG_ON(ring->emit + need_wrap > ring->size); |
| 291 | GEM_BUG_ON(!IS_ALIGNED(need_wrap, sizeof(u64))); |
| 292 | |
| 293 | /* Fill the tail with MI_NOOP */ |
| 294 | memset64(ring->vaddr + ring->emit, 0, need_wrap / sizeof(u64)); |
| 295 | ring->space -= need_wrap; |
| 296 | ring->emit = 0; |
| 297 | } |
| 298 | |
| 299 | GEM_BUG_ON(ring->emit > ring->size - bytes); |
| 300 | GEM_BUG_ON(ring->space < bytes); |
| 301 | cs = ring->vaddr + ring->emit; |
| 302 | GEM_DEBUG_EXEC(memset32(cs, POISON_INUSE, bytes / sizeof(*cs))); |
| 303 | ring->emit += bytes; |
| 304 | ring->space -= bytes; |
| 305 | |
| 306 | return cs; |
| 307 | } |
| 308 | |
| 309 | /* Align the ring tail to a cacheline boundary */ |
| 310 | int intel_ring_cacheline_align(struct i915_request *rq) |
| 311 | { |
| 312 | int num_dwords; |
| 313 | void *cs; |
| 314 | |
| 315 | num_dwords = (rq->ring->emit & (CACHELINE_BYTES - 1)) / sizeof(u32); |
| 316 | if (num_dwords == 0) |
| 317 | return 0; |
| 318 | |
| 319 | num_dwords = CACHELINE_DWORDS - num_dwords; |
| 320 | GEM_BUG_ON(num_dwords & 1); |
| 321 | |
| 322 | cs = intel_ring_begin(rq, num_dwords); |
| 323 | if (IS_ERR(cs)) |
| 324 | return PTR_ERR(cs); |
| 325 | |
| 326 | memset64(cs, (u64)MI_NOOP << 32 | MI_NOOP, num_dwords / 2); |
| 327 | intel_ring_advance(rq, cs + num_dwords); |
| 328 | |
| 329 | GEM_BUG_ON(rq->ring->emit & (CACHELINE_BYTES - 1)); |
| 330 | return 0; |
| 331 | } |
Chris Wilson | 8ab3a38 | 2020-06-09 16:17:23 +0100 | [diff] [blame] | 332 | |
| 333 | #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) |
| 334 | #include "selftest_ring.c" |
| 335 | #endif |