Peter Zijlstra | e26af0e | 2009-09-11 12:31:23 +0200 | [diff] [blame] | 1 | /* |
| 2 | * Disregards a certain amount of sleep time (sched_latency_ns) and |
| 3 | * considers the task to be running during that period. This gives it |
| 4 | * a service deficit on wakeup, allowing it to run sooner. |
| 5 | */ |
Ingo Molnar | 51e0304 | 2009-09-16 08:54:45 +0200 | [diff] [blame] | 6 | SCHED_FEAT(FAIR_SLEEPERS, 1) |
| 7 | |
| 8 | /* |
| 9 | * Only give sleepers 50% of their service deficit. This allows |
| 10 | * them to run sooner, but does not allow tons of sleepers to |
| 11 | * rip the spread apart. |
| 12 | */ |
| 13 | SCHED_FEAT(GENTLE_FAIR_SLEEPERS, 1) |
Peter Zijlstra | e26af0e | 2009-09-11 12:31:23 +0200 | [diff] [blame] | 14 | |
| 15 | /* |
| 16 | * By not normalizing the sleep time, heavy tasks get an effective |
| 17 | * longer period, and lighter task an effective shorter period they |
| 18 | * are considered running. |
| 19 | */ |
Peter Zijlstra | e52fb7c | 2009-01-14 12:39:19 +0100 | [diff] [blame] | 20 | SCHED_FEAT(NORMALIZED_SLEEPER, 0) |
Peter Zijlstra | e26af0e | 2009-09-11 12:31:23 +0200 | [diff] [blame] | 21 | |
| 22 | /* |
| 23 | * Place new tasks ahead so that they do not starve already running |
| 24 | * tasks |
| 25 | */ |
Peter Zijlstra | f00b45c | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 26 | SCHED_FEAT(START_DEBIT, 1) |
Peter Zijlstra | e26af0e | 2009-09-11 12:31:23 +0200 | [diff] [blame] | 27 | |
| 28 | /* |
| 29 | * Should wakeups try to preempt running tasks. |
| 30 | */ |
| 31 | SCHED_FEAT(WAKEUP_PREEMPT, 1) |
| 32 | |
| 33 | /* |
| 34 | * Compute wakeup_gran based on task behaviour, clipped to |
| 35 | * [0, sched_wakeup_gran_ns] |
| 36 | */ |
| 37 | SCHED_FEAT(ADAPTIVE_GRAN, 1) |
| 38 | |
| 39 | /* |
| 40 | * When converting the wakeup granularity to virtual time, do it such |
| 41 | * that heavier tasks preempting a lighter task have an edge. |
| 42 | */ |
| 43 | SCHED_FEAT(ASYM_GRAN, 1) |
| 44 | |
| 45 | /* |
| 46 | * Always wakeup-preempt SYNC wakeups, see SYNC_WAKEUPS. |
| 47 | */ |
| 48 | SCHED_FEAT(WAKEUP_SYNC, 0) |
| 49 | |
| 50 | /* |
| 51 | * Wakeup preempt based on task behaviour. Tasks that do not overlap |
| 52 | * don't get preempted. |
| 53 | */ |
| 54 | SCHED_FEAT(WAKEUP_OVERLAP, 0) |
| 55 | |
| 56 | /* |
| 57 | * Use the SYNC wakeup hint, pipes and the likes use this to indicate |
| 58 | * the remote end is likely to consume the data we just wrote, and |
| 59 | * therefore has cache benefit from being placed on the same cpu, see |
| 60 | * also AFFINE_WAKEUPS. |
| 61 | */ |
Peter Zijlstra | f00b45c | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 62 | SCHED_FEAT(SYNC_WAKEUPS, 1) |
Peter Zijlstra | e26af0e | 2009-09-11 12:31:23 +0200 | [diff] [blame] | 63 | |
| 64 | /* |
| 65 | * Based on load and program behaviour, see if it makes sense to place |
| 66 | * a newly woken task on the same cpu as the task that woke it -- |
| 67 | * improve cache locality. Typically used with SYNC wakeups as |
| 68 | * generated by pipes and the like, see also SYNC_WAKEUPS. |
| 69 | */ |
| 70 | SCHED_FEAT(AFFINE_WAKEUPS, 1) |
| 71 | |
| 72 | /* |
Peter Zijlstra | e69b0f1 | 2009-09-15 19:38:52 +0200 | [diff] [blame] | 73 | * Weaken SYNC hint based on overlap |
| 74 | */ |
| 75 | SCHED_FEAT(SYNC_LESS, 1) |
| 76 | |
| 77 | /* |
| 78 | * Add SYNC hint based on overlap |
| 79 | */ |
| 80 | SCHED_FEAT(SYNC_MORE, 0) |
| 81 | |
| 82 | /* |
Peter Zijlstra | e26af0e | 2009-09-11 12:31:23 +0200 | [diff] [blame] | 83 | * Prefer to schedule the task we woke last (assuming it failed |
| 84 | * wakeup-preemption), since its likely going to consume data we |
| 85 | * touched, increases cache locality. |
| 86 | */ |
Mike Galbraith | 0ec9fab | 2009-09-15 15:07:03 +0200 | [diff] [blame] | 87 | SCHED_FEAT(NEXT_BUDDY, 0) |
Peter Zijlstra | e26af0e | 2009-09-11 12:31:23 +0200 | [diff] [blame] | 88 | |
| 89 | /* |
| 90 | * Prefer to schedule the task that ran last (when we did |
| 91 | * wake-preempt) as that likely will touch the same data, increases |
| 92 | * cache locality. |
| 93 | */ |
| 94 | SCHED_FEAT(LAST_BUDDY, 1) |
| 95 | |
| 96 | /* |
| 97 | * Consider buddies to be cache hot, decreases the likelyness of a |
| 98 | * cache buddy being migrated away, increases cache locality. |
| 99 | */ |
| 100 | SCHED_FEAT(CACHE_HOT_BUDDY, 1) |
| 101 | |
Peter Zijlstra | 8e6598af | 2009-09-03 13:20:03 +0200 | [diff] [blame] | 102 | /* |
| 103 | * Use arch dependent cpu power functions |
| 104 | */ |
| 105 | SCHED_FEAT(ARCH_POWER, 0) |
| 106 | |
Ingo Molnar | 0c4b83d | 2008-10-20 14:27:43 +0200 | [diff] [blame] | 107 | SCHED_FEAT(HRTICK, 0) |
Peter Zijlstra | f00b45c | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 108 | SCHED_FEAT(DOUBLE_TICK, 0) |
Peter Zijlstra | efc2dea | 2008-08-20 12:44:55 +0200 | [diff] [blame] | 109 | SCHED_FEAT(LB_BIAS, 1) |
Peter Zijlstra | 3b64089 | 2009-09-16 13:44:33 +0200 | [diff] [blame] | 110 | SCHED_FEAT(LB_SHARES_UPDATE, 1) |
Peter Zijlstra | f5bfb7d | 2008-06-27 13:41:39 +0200 | [diff] [blame] | 111 | SCHED_FEAT(ASYM_EFF_LOAD, 1) |
Peter Zijlstra | e26af0e | 2009-09-11 12:31:23 +0200 | [diff] [blame] | 112 | |
| 113 | /* |
| 114 | * Spin-wait on mutex acquisition when the mutex owner is running on |
| 115 | * another cpu -- assumes that when the owner is running, it will soon |
| 116 | * release the lock. Decreases scheduling overhead. |
| 117 | */ |
Peter Zijlstra | 0d66bf6 | 2009-01-12 14:01:47 +0100 | [diff] [blame] | 118 | SCHED_FEAT(OWNER_SPIN, 1) |