blob: 15d04a0ec623469d6bd89c8460a335b9b22cfd8f [file] [log] [blame]
Christian König620e7622018-08-06 14:25:32 +02001/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include <linux/kthread.h>
Sam Ravnborg7c1be932019-06-30 08:19:14 +020025#include <linux/slab.h>
Andrey Grodzovsky83a77722019-11-04 16:30:05 -050026#include <linux/completion.h>
Sam Ravnborg7c1be932019-06-30 08:19:14 +020027
28#include <drm/drm_print.h>
Christian König620e7622018-08-06 14:25:32 +020029#include <drm/gpu_scheduler.h>
30
31#include "gpu_scheduler_trace.h"
32
33#define to_drm_sched_job(sched_job) \
34 container_of((sched_job), struct drm_sched_job, queue_node)
35
36/**
37 * drm_sched_entity_init - Init a context entity used by scheduler when
38 * submit to HW ring.
39 *
40 * @entity: scheduler entity to init
Nirmoy Dasb3ac1762019-12-05 11:38:00 +010041 * @priority: priority of the entity
42 * @sched_list: the list of drm scheds on which jobs from this
Christian König620e7622018-08-06 14:25:32 +020043 * entity can be submitted
Nirmoy Dasb3ac1762019-12-05 11:38:00 +010044 * @num_sched_list: number of drm sched in sched_list
Christian König620e7622018-08-06 14:25:32 +020045 * @guilty: atomic_t set to 1 when a job on this queue
46 * is found to be guilty causing a timeout
47 *
Daniel Vetter981b04d2021-08-05 12:46:51 +020048 * Note that the &sched_list must have at least one element to schedule the entity.
49 *
50 * For changing @priority later on at runtime see
51 * drm_sched_entity_set_priority(). For changing the set of schedulers
52 * @sched_list at runtime see drm_sched_entity_modify_sched().
53 *
54 * An entity is cleaned up by callind drm_sched_entity_fini(). See also
55 * drm_sched_entity_destroy().
Christian König620e7622018-08-06 14:25:32 +020056 *
57 * Returns 0 on success or a negative error code on failure.
Christian König7b105742018-08-06 14:58:56 +020058 */
Christian König620e7622018-08-06 14:25:32 +020059int drm_sched_entity_init(struct drm_sched_entity *entity,
Nirmoy Dasb3ac1762019-12-05 11:38:00 +010060 enum drm_sched_priority priority,
61 struct drm_gpu_scheduler **sched_list,
62 unsigned int num_sched_list,
Christian König620e7622018-08-06 14:25:32 +020063 atomic_t *guilty)
64{
Nirmoy Dasb3ac1762019-12-05 11:38:00 +010065 if (!(entity && sched_list && (num_sched_list == 0 || sched_list[0])))
Christian König620e7622018-08-06 14:25:32 +020066 return -EINVAL;
67
68 memset(entity, 0, sizeof(struct drm_sched_entity));
69 INIT_LIST_HEAD(&entity->list);
Bas Nieuwenhuizen1decbf62019-01-30 02:53:19 +010070 entity->rq = NULL;
Christian König620e7622018-08-06 14:25:32 +020071 entity->guilty = guilty;
Nirmoy Dasb3ac1762019-12-05 11:38:00 +010072 entity->num_sched_list = num_sched_list;
73 entity->priority = priority;
Nirmoy Das8c230562019-12-09 22:52:25 +010074 entity->sched_list = num_sched_list > 1 ? sched_list : NULL;
75 entity->last_scheduled = NULL;
Andrey Grodzovsky08fb97de02022-09-30 00:12:58 -040076 RB_CLEAR_NODE(&entity->rb_tree_node);
Nirmoy Dasb3ac1762019-12-05 11:38:00 +010077
Nirmoy Das8c230562019-12-09 22:52:25 +010078 if(num_sched_list)
79 entity->rq = &sched_list[0]->sched_rq[entity->priority];
Christian König620e7622018-08-06 14:25:32 +020080
Andrey Grodzovsky83a77722019-11-04 16:30:05 -050081 init_completion(&entity->entity_idle);
82
Boris Brezillon170fb582020-10-02 08:55:18 +020083 /* We start in an idle state. */
Dmitry Osipenko03dec922022-11-23 03:13:03 +030084 complete_all(&entity->entity_idle);
Boris Brezillon170fb582020-10-02 08:55:18 +020085
Christian König620e7622018-08-06 14:25:32 +020086 spin_lock_init(&entity->rq_lock);
87 spsc_queue_init(&entity->job_queue);
88
89 atomic_set(&entity->fence_seq, 0);
90 entity->fence_context = dma_fence_context_alloc(2);
91
92 return 0;
93}
94EXPORT_SYMBOL(drm_sched_entity_init);
95
96/**
Nirmoy Dasb37aced2020-02-27 15:34:15 +010097 * drm_sched_entity_modify_sched - Modify sched of an entity
98 * @entity: scheduler entity to init
99 * @sched_list: the list of new drm scheds which will replace
100 * existing entity->sched_list
101 * @num_sched_list: number of drm sched in sched_list
Daniel Vetter981b04d2021-08-05 12:46:51 +0200102 *
103 * Note that this must be called under the same common lock for @entity as
104 * drm_sched_job_arm() and drm_sched_entity_push_job(), or the driver needs to
105 * guarantee through some other means that this is never called while new jobs
106 * can be pushed to @entity.
Nirmoy Dasb37aced2020-02-27 15:34:15 +0100107 */
108void drm_sched_entity_modify_sched(struct drm_sched_entity *entity,
109 struct drm_gpu_scheduler **sched_list,
110 unsigned int num_sched_list)
111{
112 WARN_ON(!num_sched_list || !sched_list);
113
114 entity->sched_list = sched_list;
115 entity->num_sched_list = num_sched_list;
116}
117EXPORT_SYMBOL(drm_sched_entity_modify_sched);
118
Christian König620e7622018-08-06 14:25:32 +0200119static bool drm_sched_entity_is_idle(struct drm_sched_entity *entity)
120{
Christian König7b105742018-08-06 14:58:56 +0200121 rmb(); /* for list_empty to work without lock */
Christian König620e7622018-08-06 14:25:32 +0200122
123 if (list_empty(&entity->list) ||
Andrey Grodzovskyc61cdbd2021-05-12 10:26:45 -0400124 spsc_queue_count(&entity->job_queue) == 0 ||
125 entity->stopped)
Christian König620e7622018-08-06 14:25:32 +0200126 return true;
127
128 return false;
129}
130
Daniel Vetter981b04d2021-08-05 12:46:51 +0200131/* Return true if entity could provide a job. */
Christian König620e7622018-08-06 14:25:32 +0200132bool drm_sched_entity_is_ready(struct drm_sched_entity *entity)
133{
134 if (spsc_queue_peek(&entity->job_queue) == NULL)
135 return false;
136
137 if (READ_ONCE(entity->dependency))
138 return false;
139
140 return true;
141}
142
Christian König2fdb8a82022-09-29 14:50:56 +0200143static void drm_sched_entity_kill_jobs_work(struct work_struct *wrk)
144{
145 struct drm_sched_job *job = container_of(wrk, typeof(*job), work);
146
147 drm_sched_fence_finished(job->s_fence);
148 WARN_ON(job->s_fence->parent);
149 job->sched->ops->free_job(job);
150}
151
152/* Signal the scheduler finished fence when the entity in question is killed. */
153static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f,
154 struct dma_fence_cb *cb)
155{
156 struct drm_sched_job *job = container_of(cb, struct drm_sched_job,
157 finish_cb);
158 int r;
159
160 dma_fence_put(f);
161
162 /* Wait for all dependencies to avoid data corruptions */
163 while (!xa_empty(&job->dependencies)) {
164 f = xa_erase(&job->dependencies, job->last_dependency++);
165 r = dma_fence_add_callback(f, &job->finish_cb,
166 drm_sched_entity_kill_jobs_cb);
167 if (!r)
168 return;
169
170 dma_fence_put(f);
171 }
172
173 INIT_WORK(&job->work, drm_sched_entity_kill_jobs_work);
174 schedule_work(&job->work);
175}
176
177/* Remove the entity from the scheduler and kill all pending jobs */
178static void drm_sched_entity_kill(struct drm_sched_entity *entity)
179{
180 struct drm_sched_job *job;
181 struct dma_fence *prev;
182
183 if (!entity->rq)
184 return;
185
186 spin_lock(&entity->rq_lock);
187 entity->stopped = true;
188 drm_sched_rq_remove_entity(entity->rq, entity);
189 spin_unlock(&entity->rq_lock);
190
191 /* Make sure this entity is not used by the scheduler at the moment */
192 wait_for_completion(&entity->entity_idle);
193
194 prev = dma_fence_get(entity->last_scheduled);
195 while ((job = to_drm_sched_job(spsc_queue_pop(&entity->job_queue)))) {
196 struct drm_sched_fence *s_fence = job->s_fence;
197
198 dma_fence_set_error(&s_fence->finished, -ESRCH);
199
200 dma_fence_get(&s_fence->finished);
201 if (!prev || dma_fence_add_callback(prev, &job->finish_cb,
202 drm_sched_entity_kill_jobs_cb))
203 drm_sched_entity_kill_jobs_cb(NULL, &job->finish_cb);
204
205 prev = &s_fence->finished;
206 }
207 dma_fence_put(prev);
208}
209
Christian König620e7622018-08-06 14:25:32 +0200210/**
Christian König620e7622018-08-06 14:25:32 +0200211 * drm_sched_entity_flush - Flush a context entity
212 *
213 * @entity: scheduler entity
214 * @timeout: time to wait in for Q to become empty in jiffies.
215 *
Christian König7b105742018-08-06 14:58:56 +0200216 * Splitting drm_sched_entity_fini() into two functions, The first one does the
217 * waiting, removes the entity from the runqueue and returns an error when the
218 * process was killed.
Christian König620e7622018-08-06 14:25:32 +0200219 *
220 * Returns the remaining time in jiffies left from the input timeout
221 */
222long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout)
223{
224 struct drm_gpu_scheduler *sched;
225 struct task_struct *last_user;
226 long ret = timeout;
227
Bas Nieuwenhuizen1decbf62019-01-30 02:53:19 +0100228 if (!entity->rq)
229 return 0;
230
Christian König620e7622018-08-06 14:25:32 +0200231 sched = entity->rq->sched;
232 /**
233 * The client will not queue more IBs during this fini, consume existing
234 * queued IBs or discard them on SIGKILL
Christian König7b105742018-08-06 14:58:56 +0200235 */
Christian König620e7622018-08-06 14:25:32 +0200236 if (current->flags & PF_EXITING) {
237 if (timeout)
238 ret = wait_event_timeout(
239 sched->job_scheduled,
240 drm_sched_entity_is_idle(entity),
241 timeout);
242 } else {
243 wait_event_killable(sched->job_scheduled,
244 drm_sched_entity_is_idle(entity));
245 }
246
247 /* For killed process disable any more IBs enqueue right now */
248 last_user = cmpxchg(&entity->last_user, current->group_leader, NULL);
249 if ((!last_user || last_user == current->group_leader) &&
Christian König2fdb8a82022-09-29 14:50:56 +0200250 (current->flags & PF_EXITING) && (current->exit_code == SIGKILL))
251 drm_sched_entity_kill(entity);
Christian König620e7622018-08-06 14:25:32 +0200252
253 return ret;
254}
255EXPORT_SYMBOL(drm_sched_entity_flush);
256
Christian König7b105742018-08-06 14:58:56 +0200257/**
Lee Jones04be0c52021-04-16 15:37:09 +0100258 * drm_sched_entity_fini - Destroy a context entity
Christian König620e7622018-08-06 14:25:32 +0200259 *
260 * @entity: scheduler entity
261 *
Daniel Vetter981b04d2021-08-05 12:46:51 +0200262 * Cleanups up @entity which has been initialized by drm_sched_entity_init().
Christian König620e7622018-08-06 14:25:32 +0200263 *
Daniel Vetter981b04d2021-08-05 12:46:51 +0200264 * If there are potentially job still in flight or getting newly queued
265 * drm_sched_entity_flush() must be called first. This function then goes over
266 * the entity and signals all jobs with an error code if the process was killed.
Christian König620e7622018-08-06 14:25:32 +0200267 */
268void drm_sched_entity_fini(struct drm_sched_entity *entity)
269{
Christian König2fdb8a82022-09-29 14:50:56 +0200270 /*
271 * If consumption of existing IBs wasn't completed. Forcefully remove
272 * them here. Also makes sure that the scheduler won't touch this entity
273 * any more.
Christian König620e7622018-08-06 14:25:32 +0200274 */
Christian König2fdb8a82022-09-29 14:50:56 +0200275 drm_sched_entity_kill(entity);
Andrey Grodzovsky83a77722019-11-04 16:30:05 -0500276
Christian König2fdb8a82022-09-29 14:50:56 +0200277 if (entity->dependency) {
278 dma_fence_remove_callback(entity->dependency, &entity->cb);
279 dma_fence_put(entity->dependency);
280 entity->dependency = NULL;
Christian König620e7622018-08-06 14:25:32 +0200281 }
282
283 dma_fence_put(entity->last_scheduled);
284 entity->last_scheduled = NULL;
Christian König620e7622018-08-06 14:25:32 +0200285}
286EXPORT_SYMBOL(drm_sched_entity_fini);
287
288/**
Lee Jones04be0c52021-04-16 15:37:09 +0100289 * drm_sched_entity_destroy - Destroy a context entity
Christian König620e7622018-08-06 14:25:32 +0200290 * @entity: scheduler entity
291 *
Daniel Vetter981b04d2021-08-05 12:46:51 +0200292 * Calls drm_sched_entity_flush() and drm_sched_entity_fini() as a
293 * convenience wrapper.
Christian König620e7622018-08-06 14:25:32 +0200294 */
295void drm_sched_entity_destroy(struct drm_sched_entity *entity)
296{
297 drm_sched_entity_flush(entity, MAX_WAIT_SCHED_ENTITY_Q_EMPTY);
298 drm_sched_entity_fini(entity);
299}
300EXPORT_SYMBOL(drm_sched_entity_destroy);
301
Daniel Vetter981b04d2021-08-05 12:46:51 +0200302/* drm_sched_entity_clear_dep - callback to clear the entities dependency */
Christian König7b105742018-08-06 14:58:56 +0200303static void drm_sched_entity_clear_dep(struct dma_fence *f,
304 struct dma_fence_cb *cb)
Christian König620e7622018-08-06 14:25:32 +0200305{
306 struct drm_sched_entity *entity =
307 container_of(cb, struct drm_sched_entity, cb);
Christian König7b105742018-08-06 14:58:56 +0200308
Christian König620e7622018-08-06 14:25:32 +0200309 entity->dependency = NULL;
310 dma_fence_put(f);
Christian König620e7622018-08-06 14:25:32 +0200311}
312
Lee Jones00d44b962020-11-05 14:45:05 +0000313/*
Christian König7b105742018-08-06 14:58:56 +0200314 * drm_sched_entity_clear_dep - callback to clear the entities dependency and
315 * wake up scheduler
316 */
317static void drm_sched_entity_wakeup(struct dma_fence *f,
318 struct dma_fence_cb *cb)
Christian König620e7622018-08-06 14:25:32 +0200319{
320 struct drm_sched_entity *entity =
321 container_of(cb, struct drm_sched_entity, cb);
Christian König7b105742018-08-06 14:58:56 +0200322
323 drm_sched_entity_clear_dep(f, cb);
324 drm_sched_wakeup(entity->rq->sched);
Christian König620e7622018-08-06 14:25:32 +0200325}
326
327/**
Christian König620e7622018-08-06 14:25:32 +0200328 * drm_sched_entity_set_priority - Sets priority of the entity
329 *
330 * @entity: scheduler entity
331 * @priority: scheduler priority
332 *
333 * Update the priority of runqueus used for the entity.
334 */
335void drm_sched_entity_set_priority(struct drm_sched_entity *entity,
336 enum drm_sched_priority priority)
337{
Christian König620e7622018-08-06 14:25:32 +0200338 spin_lock(&entity->rq_lock);
Nirmoy Dasb3ac1762019-12-05 11:38:00 +0100339 entity->priority = priority;
Christian König620e7622018-08-06 14:25:32 +0200340 spin_unlock(&entity->rq_lock);
341}
342EXPORT_SYMBOL(drm_sched_entity_set_priority);
343
Daniel Vetter981b04d2021-08-05 12:46:51 +0200344/*
Christian König7b105742018-08-06 14:58:56 +0200345 * Add a callback to the current dependency of the entity to wake up the
346 * scheduler when the entity becomes available.
347 */
Christian König620e7622018-08-06 14:25:32 +0200348static bool drm_sched_entity_add_dependency_cb(struct drm_sched_entity *entity)
349{
350 struct drm_gpu_scheduler *sched = entity->rq->sched;
Christian König7b105742018-08-06 14:58:56 +0200351 struct dma_fence *fence = entity->dependency;
Christian König620e7622018-08-06 14:25:32 +0200352 struct drm_sched_fence *s_fence;
353
354 if (fence->context == entity->fence_context ||
Christian König7b105742018-08-06 14:58:56 +0200355 fence->context == entity->fence_context + 1) {
356 /*
357 * Fence is a scheduled/finished fence from a job
358 * which belongs to the same entity, we can ignore
359 * fences from ourself
360 */
Christian König620e7622018-08-06 14:25:32 +0200361 dma_fence_put(entity->dependency);
362 return false;
363 }
364
365 s_fence = to_drm_sched_fence(fence);
Christian König7b476af2022-10-07 09:51:13 +0200366 if (s_fence && s_fence->sched == sched &&
367 !test_bit(DRM_SCHED_FENCE_DONT_PIPELINE, &fence->flags)) {
Christian König620e7622018-08-06 14:25:32 +0200368
369 /*
370 * Fence is from the same scheduler, only need to wait for
371 * it to be scheduled
372 */
373 fence = dma_fence_get(&s_fence->scheduled);
374 dma_fence_put(entity->dependency);
375 entity->dependency = fence;
376 if (!dma_fence_add_callback(fence, &entity->cb,
377 drm_sched_entity_clear_dep))
378 return true;
379
380 /* Ignore it when it is already scheduled */
381 dma_fence_put(fence);
382 return false;
383 }
384
385 if (!dma_fence_add_callback(entity->dependency, &entity->cb,
386 drm_sched_entity_wakeup))
387 return true;
388
389 dma_fence_put(entity->dependency);
390 return false;
391}
392
Christian König2fdb8a82022-09-29 14:50:56 +0200393static struct dma_fence *
394drm_sched_job_dependency(struct drm_sched_job *job,
395 struct drm_sched_entity *entity)
396{
397 if (!xa_empty(&job->dependencies))
398 return xa_erase(&job->dependencies, job->last_dependency++);
399
Christian Königa82f30b2022-09-29 15:01:57 +0200400 if (job->sched->ops->prepare_job)
401 return job->sched->ops->prepare_job(job, entity);
Christian König2fdb8a82022-09-29 14:50:56 +0200402
403 return NULL;
404}
405
Christian König620e7622018-08-06 14:25:32 +0200406struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity)
407{
Christian König7b105742018-08-06 14:58:56 +0200408 struct drm_sched_job *sched_job;
Christian König620e7622018-08-06 14:25:32 +0200409
Christian König7b105742018-08-06 14:58:56 +0200410 sched_job = to_drm_sched_job(spsc_queue_peek(&entity->job_queue));
Christian König620e7622018-08-06 14:25:32 +0200411 if (!sched_job)
412 return NULL;
413
Christian König7b105742018-08-06 14:58:56 +0200414 while ((entity->dependency =
Daniel Vetterebd5f742021-08-05 12:46:49 +0200415 drm_sched_job_dependency(sched_job, entity))) {
Eric Anholt82abf332018-12-07 11:16:53 -0800416 trace_drm_sched_job_wait_dep(sched_job, entity->dependency);
Christian König7b105742018-08-06 14:58:56 +0200417
Eric Anholt82abf332018-12-07 11:16:53 -0800418 if (drm_sched_entity_add_dependency_cb(entity))
Christian König620e7622018-08-06 14:25:32 +0200419 return NULL;
Christian König620e7622018-08-06 14:25:32 +0200420 }
421
422 /* skip jobs from entity that marked guilty */
423 if (entity->guilty && atomic_read(entity->guilty))
424 dma_fence_set_error(&sched_job->s_fence->finished, -ECANCELED);
425
426 dma_fence_put(entity->last_scheduled);
Daniel Vetterb0a53032021-08-05 12:46:48 +0200427
Christian König620e7622018-08-06 14:25:32 +0200428 entity->last_scheduled = dma_fence_get(&sched_job->s_fence->finished);
429
Daniel Vetterb0a53032021-08-05 12:46:48 +0200430 /*
431 * If the queue is empty we allow drm_sched_entity_select_rq() to
432 * locklessly access ->last_scheduled. This only works if we set the
433 * pointer before we dequeue and if we a write barrier here.
434 */
435 smp_wmb();
436
Christian König620e7622018-08-06 14:25:32 +0200437 spsc_queue_pop(&entity->job_queue);
Andrey Grodzovsky08fb97de02022-09-30 00:12:58 -0400438
439 /*
440 * Update the entity's location in the min heap according to
441 * the timestamp of the next job, if any.
442 */
443 if (drm_sched_policy == DRM_SCHED_POLICY_FIFO) {
444 struct drm_sched_job *next;
445
446 next = to_drm_sched_job(spsc_queue_peek(&entity->job_queue));
447 if (next)
448 drm_sched_rq_update_fifo(entity, next->submit_ts);
449 }
450
Christian König620e7622018-08-06 14:25:32 +0200451 return sched_job;
452}
453
Christian König620e7622018-08-06 14:25:32 +0200454void drm_sched_entity_select_rq(struct drm_sched_entity *entity)
455{
456 struct dma_fence *fence;
Nirmoy Dasec2edcc2020-03-13 11:39:27 +0100457 struct drm_gpu_scheduler *sched;
Christian König620e7622018-08-06 14:25:32 +0200458 struct drm_sched_rq *rq;
459
Daniel Vetterb0a53032021-08-05 12:46:48 +0200460 /* single possible engine and already selected */
461 if (!entity->sched_list)
Christian König620e7622018-08-06 14:25:32 +0200462 return;
463
Daniel Vetterb0a53032021-08-05 12:46:48 +0200464 /* queue non-empty, stay on the same engine */
465 if (spsc_queue_count(&entity->job_queue))
466 return;
467
468 /*
469 * Only when the queue is empty are we guaranteed that the scheduler
470 * thread cannot change ->last_scheduled. To enforce ordering we need
471 * a read barrier here. See drm_sched_entity_pop_job() for the other
472 * side.
473 */
474 smp_rmb();
475
476 fence = entity->last_scheduled;
477
478 /* stay on the same engine if the previous job hasn't finished */
Christian König620e7622018-08-06 14:25:32 +0200479 if (fence && !dma_fence_is_signaled(fence))
480 return;
481
Christian König620e7622018-08-06 14:25:32 +0200482 spin_lock(&entity->rq_lock);
Nirmoy Dasec2edcc2020-03-13 11:39:27 +0100483 sched = drm_sched_pick_best(entity->sched_list, entity->num_sched_list);
484 rq = sched ? &sched->sched_rq[entity->priority] : NULL;
Nirmoy Dasb3ac1762019-12-05 11:38:00 +0100485 if (rq != entity->rq) {
486 drm_sched_rq_remove_entity(entity->rq, entity);
487 entity->rq = rq;
488 }
Christian König620e7622018-08-06 14:25:32 +0200489 spin_unlock(&entity->rq_lock);
Christian Königac4eb832021-02-20 09:50:42 +0100490
491 if (entity->num_sched_list == 1)
492 entity->sched_list = NULL;
Christian König620e7622018-08-06 14:25:32 +0200493}
494
495/**
496 * drm_sched_entity_push_job - Submit a job to the entity's job queue
Christian König620e7622018-08-06 14:25:32 +0200497 * @sched_job: job to submit
Christian König620e7622018-08-06 14:25:32 +0200498 *
Daniel Vetterdbe48d02021-08-17 10:49:16 +0200499 * Note: To guarantee that the order of insertion to queue matches the job's
500 * fence sequence number this function should be called with drm_sched_job_arm()
Daniel Vetter981b04d2021-08-05 12:46:51 +0200501 * under common lock for the struct drm_sched_entity that was set up for
502 * @sched_job in drm_sched_job_init().
Christian König620e7622018-08-06 14:25:32 +0200503 *
504 * Returns 0 for success, negative error code otherwise.
505 */
Daniel Vetter0e10e9a2021-08-05 12:46:50 +0200506void drm_sched_entity_push_job(struct drm_sched_job *sched_job)
Christian König620e7622018-08-06 14:25:32 +0200507{
Daniel Vetter0e10e9a2021-08-05 12:46:50 +0200508 struct drm_sched_entity *entity = sched_job->entity;
Christian König620e7622018-08-06 14:25:32 +0200509 bool first;
510
511 trace_drm_sched_job(sched_job, entity);
Christian Königf2f12eb2021-02-02 12:40:01 +0100512 atomic_inc(entity->rq->sched->score);
Christian König620e7622018-08-06 14:25:32 +0200513 WRITE_ONCE(entity->last_user, current->group_leader);
514 first = spsc_queue_push(&entity->job_queue, &sched_job->queue_node);
Andrey Grodzovsky08fb97de02022-09-30 00:12:58 -0400515 sched_job->submit_ts = ktime_get();
Christian König620e7622018-08-06 14:25:32 +0200516
517 /* first job wakes up scheduler */
518 if (first) {
519 /* Add the entity to the run queue */
520 spin_lock(&entity->rq_lock);
Andrey Grodzovsky62347a32018-08-17 10:32:50 -0400521 if (entity->stopped) {
522 spin_unlock(&entity->rq_lock);
523
524 DRM_ERROR("Trying to push to a killed entity\n");
525 return;
526 }
Andrey Grodzovsky08fb97de02022-09-30 00:12:58 -0400527
Christian König620e7622018-08-06 14:25:32 +0200528 drm_sched_rq_add_entity(entity->rq, entity);
529 spin_unlock(&entity->rq_lock);
Andrey Grodzovsky08fb97de02022-09-30 00:12:58 -0400530
531 if (drm_sched_policy == DRM_SCHED_POLICY_FIFO)
532 drm_sched_rq_update_fifo(entity, sched_job->submit_ts);
533
Christian König620e7622018-08-06 14:25:32 +0200534 drm_sched_wakeup(entity->rq->sched);
535 }
536}
537EXPORT_SYMBOL(drm_sched_entity_push_job);