Chris Wilson | 907b28c | 2013-07-19 20:36:52 +0100 | [diff] [blame] | 1 | /* |
| 2 | * Copyright © 2013 Intel Corporation |
| 3 | * |
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a |
| 5 | * copy of this software and associated documentation files (the "Software"), |
| 6 | * to deal in the Software without restriction, including without limitation |
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
| 8 | * and/or sell copies of the Software, and to permit persons to whom the |
| 9 | * Software is furnished to do so, subject to the following conditions: |
| 10 | * |
| 11 | * The above copyright notice and this permission notice (including the next |
| 12 | * paragraph) shall be included in all copies or substantial portions of the |
| 13 | * Software. |
| 14 | * |
| 15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
| 18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
| 19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
| 20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS |
| 21 | * IN THE SOFTWARE. |
| 22 | */ |
| 23 | |
Chris Wilson | 6daccb0 | 2015-01-16 11:34:35 +0200 | [diff] [blame] | 24 | #include <linux/pm_runtime.h> |
Jani Nikula | 696173b | 2019-04-05 14:00:15 +0300 | [diff] [blame] | 25 | #include <asm/iosf_mbi.h> |
| 26 | |
| 27 | #include "i915_drv.h" |
Jani Nikula | a09d9a8 | 2019-08-06 13:07:28 +0300 | [diff] [blame] | 28 | #include "i915_trace.h" |
Jani Nikula | 696173b | 2019-04-05 14:00:15 +0300 | [diff] [blame] | 29 | #include "i915_vgpu.h" |
Jani Nikula | 696173b | 2019-04-05 14:00:15 +0300 | [diff] [blame] | 30 | #include "intel_pm.h" |
Chris Wilson | 6daccb0 | 2015-01-16 11:34:35 +0200 | [diff] [blame] | 31 | |
Sagar Arun Kamble | 83e3337 | 2015-08-23 17:52:47 +0530 | [diff] [blame] | 32 | #define FORCEWAKE_ACK_TIMEOUT_MS 50 |
Mika Kuoppala | 6b07b6d | 2017-05-02 17:03:44 +0300 | [diff] [blame] | 33 | #define GT_FIFO_TIMEOUT_MS 10 |
Chris Wilson | 907b28c | 2013-07-19 20:36:52 +0100 | [diff] [blame] | 34 | |
Daniele Ceraolo Spurio | 6cc5ca7 | 2019-03-25 14:49:32 -0700 | [diff] [blame] | 35 | #define __raw_posting_read(...) ((void)__raw_uncore_read32(__VA_ARGS__)) |
Chris Wilson | 6af5d92 | 2013-07-19 20:36:53 +0100 | [diff] [blame] | 36 | |
Daniele Ceraolo Spurio | 0a9b263 | 2019-08-09 07:31:16 +0100 | [diff] [blame] | 37 | void |
| 38 | intel_uncore_mmio_debug_init_early(struct intel_uncore_mmio_debug *mmio_debug) |
| 39 | { |
| 40 | spin_lock_init(&mmio_debug->lock); |
| 41 | mmio_debug->unclaimed_mmio_check = 1; |
| 42 | } |
| 43 | |
| 44 | static void mmio_debug_suspend(struct intel_uncore_mmio_debug *mmio_debug) |
| 45 | { |
| 46 | lockdep_assert_held(&mmio_debug->lock); |
| 47 | |
| 48 | /* Save and disable mmio debugging for the user bypass */ |
| 49 | if (!mmio_debug->suspend_count++) { |
| 50 | mmio_debug->saved_mmio_check = mmio_debug->unclaimed_mmio_check; |
| 51 | mmio_debug->unclaimed_mmio_check = 0; |
| 52 | } |
| 53 | } |
| 54 | |
| 55 | static void mmio_debug_resume(struct intel_uncore_mmio_debug *mmio_debug) |
| 56 | { |
| 57 | lockdep_assert_held(&mmio_debug->lock); |
| 58 | |
| 59 | if (!--mmio_debug->suspend_count) |
| 60 | mmio_debug->unclaimed_mmio_check = mmio_debug->saved_mmio_check; |
| 61 | } |
| 62 | |
Mika Kuoppala | 05a2fb1 | 2015-01-19 16:20:43 +0200 | [diff] [blame] | 63 | static const char * const forcewake_domain_names[] = { |
| 64 | "render", |
| 65 | "blitter", |
| 66 | "media", |
Daniele Ceraolo Spurio | a89a70a | 2018-03-02 18:15:01 +0200 | [diff] [blame] | 67 | "vdbox0", |
| 68 | "vdbox1", |
| 69 | "vdbox2", |
| 70 | "vdbox3", |
| 71 | "vebox0", |
| 72 | "vebox1", |
Mika Kuoppala | 05a2fb1 | 2015-01-19 16:20:43 +0200 | [diff] [blame] | 73 | }; |
| 74 | |
| 75 | const char * |
Mika Kuoppala | 48c1026 | 2015-01-16 11:34:41 +0200 | [diff] [blame] | 76 | intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id) |
Mika Kuoppala | 05a2fb1 | 2015-01-19 16:20:43 +0200 | [diff] [blame] | 77 | { |
Ville Syrjälä | 53abb67 | 2015-08-21 20:45:28 +0300 | [diff] [blame] | 78 | BUILD_BUG_ON(ARRAY_SIZE(forcewake_domain_names) != FW_DOMAIN_ID_COUNT); |
Mika Kuoppala | 05a2fb1 | 2015-01-19 16:20:43 +0200 | [diff] [blame] | 79 | |
| 80 | if (id >= 0 && id < FW_DOMAIN_ID_COUNT) |
| 81 | return forcewake_domain_names[id]; |
| 82 | |
| 83 | WARN_ON(id); |
| 84 | |
| 85 | return "unknown"; |
| 86 | } |
| 87 | |
Daniele Ceraolo Spurio | 535d8d27 | 2019-03-16 10:00:45 +0000 | [diff] [blame] | 88 | #define fw_ack(d) readl((d)->reg_ack) |
Daniele Ceraolo Spurio | 159367b | 2019-03-20 12:27:32 +0000 | [diff] [blame] | 89 | #define fw_set(d, val) writel(_MASKED_BIT_ENABLE((val)), (d)->reg_set) |
| 90 | #define fw_clear(d, val) writel(_MASKED_BIT_DISABLE((val)), (d)->reg_set) |
Daniele Ceraolo Spurio | 535d8d27 | 2019-03-16 10:00:45 +0000 | [diff] [blame] | 91 | |
Mika Kuoppala | 05a2fb1 | 2015-01-19 16:20:43 +0200 | [diff] [blame] | 92 | static inline void |
Daniele Ceraolo Spurio | 159367b | 2019-03-20 12:27:32 +0000 | [diff] [blame] | 93 | fw_domain_reset(const struct intel_uncore_forcewake_domain *d) |
Mika Kuoppala | 05a2fb1 | 2015-01-19 16:20:43 +0200 | [diff] [blame] | 94 | { |
Oscar Mateo | 26376a7 | 2018-03-16 14:14:49 +0200 | [diff] [blame] | 95 | /* |
| 96 | * We don't really know if the powerwell for the forcewake domain we are |
| 97 | * trying to reset here does exist at this point (engines could be fused |
| 98 | * off in ICL+), so no waiting for acks |
| 99 | */ |
Daniele Ceraolo Spurio | 159367b | 2019-03-20 12:27:32 +0000 | [diff] [blame] | 100 | /* WaRsClearFWBitsAtReset:bdw,skl */ |
| 101 | fw_clear(d, 0xffff); |
Mika Kuoppala | 05a2fb1 | 2015-01-19 16:20:43 +0200 | [diff] [blame] | 102 | } |
| 103 | |
| 104 | static inline void |
| 105 | fw_domain_arm_timer(struct intel_uncore_forcewake_domain *d) |
| 106 | { |
Chris Wilson | 77adbd8 | 2019-07-08 16:49:14 +0100 | [diff] [blame] | 107 | GEM_BUG_ON(d->uncore->fw_domains_timer & d->mask); |
| 108 | d->uncore->fw_domains_timer |= d->mask; |
Tvrtko Ursulin | a57a4a6 | 2016-04-07 17:04:32 +0100 | [diff] [blame] | 109 | d->wake_count++; |
| 110 | hrtimer_start_range_ns(&d->timer, |
Thomas Gleixner | 8b0e195 | 2016-12-25 12:30:41 +0100 | [diff] [blame] | 111 | NSEC_PER_MSEC, |
Tvrtko Ursulin | a57a4a6 | 2016-04-07 17:04:32 +0100 | [diff] [blame] | 112 | NSEC_PER_MSEC, |
| 113 | HRTIMER_MODE_REL); |
Mika Kuoppala | 05a2fb1 | 2015-01-19 16:20:43 +0200 | [diff] [blame] | 114 | } |
| 115 | |
Mika Kuoppala | 7130630 | 2017-11-02 11:48:36 +0200 | [diff] [blame] | 116 | static inline int |
Daniele Ceraolo Spurio | 535d8d27 | 2019-03-16 10:00:45 +0000 | [diff] [blame] | 117 | __wait_for_ack(const struct intel_uncore_forcewake_domain *d, |
Mika Kuoppala | 7130630 | 2017-11-02 11:48:36 +0200 | [diff] [blame] | 118 | const u32 ack, |
| 119 | const u32 value) |
| 120 | { |
Daniele Ceraolo Spurio | 535d8d27 | 2019-03-16 10:00:45 +0000 | [diff] [blame] | 121 | return wait_for_atomic((fw_ack(d) & ack) == value, |
Mika Kuoppala | 7130630 | 2017-11-02 11:48:36 +0200 | [diff] [blame] | 122 | FORCEWAKE_ACK_TIMEOUT_MS); |
| 123 | } |
| 124 | |
| 125 | static inline int |
Daniele Ceraolo Spurio | 535d8d27 | 2019-03-16 10:00:45 +0000 | [diff] [blame] | 126 | wait_ack_clear(const struct intel_uncore_forcewake_domain *d, |
Mika Kuoppala | 7130630 | 2017-11-02 11:48:36 +0200 | [diff] [blame] | 127 | const u32 ack) |
| 128 | { |
Daniele Ceraolo Spurio | 535d8d27 | 2019-03-16 10:00:45 +0000 | [diff] [blame] | 129 | return __wait_for_ack(d, ack, 0); |
Mika Kuoppala | 7130630 | 2017-11-02 11:48:36 +0200 | [diff] [blame] | 130 | } |
| 131 | |
| 132 | static inline int |
Daniele Ceraolo Spurio | 535d8d27 | 2019-03-16 10:00:45 +0000 | [diff] [blame] | 133 | wait_ack_set(const struct intel_uncore_forcewake_domain *d, |
Mika Kuoppala | 7130630 | 2017-11-02 11:48:36 +0200 | [diff] [blame] | 134 | const u32 ack) |
| 135 | { |
Daniele Ceraolo Spurio | 535d8d27 | 2019-03-16 10:00:45 +0000 | [diff] [blame] | 136 | return __wait_for_ack(d, ack, ack); |
Mika Kuoppala | 7130630 | 2017-11-02 11:48:36 +0200 | [diff] [blame] | 137 | } |
| 138 | |
Mika Kuoppala | 05a2fb1 | 2015-01-19 16:20:43 +0200 | [diff] [blame] | 139 | static inline void |
Daniele Ceraolo Spurio | 535d8d27 | 2019-03-16 10:00:45 +0000 | [diff] [blame] | 140 | fw_domain_wait_ack_clear(const struct intel_uncore_forcewake_domain *d) |
Mika Kuoppala | 05a2fb1 | 2015-01-19 16:20:43 +0200 | [diff] [blame] | 141 | { |
Chris Wilson | 18ecc6c | 2019-05-08 12:52:45 +0100 | [diff] [blame] | 142 | if (wait_ack_clear(d, FORCEWAKE_KERNEL)) { |
Mika Kuoppala | 05a2fb1 | 2015-01-19 16:20:43 +0200 | [diff] [blame] | 143 | DRM_ERROR("%s: timed out waiting for forcewake ack to clear.\n", |
| 144 | intel_uncore_forcewake_domain_to_str(d->id)); |
Chris Wilson | 18ecc6c | 2019-05-08 12:52:45 +0100 | [diff] [blame] | 145 | add_taint_for_CI(TAINT_WARN); /* CI now unreliable */ |
| 146 | } |
Mika Kuoppala | 05a2fb1 | 2015-01-19 16:20:43 +0200 | [diff] [blame] | 147 | } |
| 148 | |
Mika Kuoppala | 7130630 | 2017-11-02 11:48:36 +0200 | [diff] [blame] | 149 | enum ack_type { |
| 150 | ACK_CLEAR = 0, |
| 151 | ACK_SET |
| 152 | }; |
| 153 | |
| 154 | static int |
Daniele Ceraolo Spurio | 535d8d27 | 2019-03-16 10:00:45 +0000 | [diff] [blame] | 155 | fw_domain_wait_ack_with_fallback(const struct intel_uncore_forcewake_domain *d, |
Mika Kuoppala | 7130630 | 2017-11-02 11:48:36 +0200 | [diff] [blame] | 156 | const enum ack_type type) |
| 157 | { |
| 158 | const u32 ack_bit = FORCEWAKE_KERNEL; |
| 159 | const u32 value = type == ACK_SET ? ack_bit : 0; |
| 160 | unsigned int pass; |
| 161 | bool ack_detected; |
| 162 | |
| 163 | /* |
| 164 | * There is a possibility of driver's wake request colliding |
| 165 | * with hardware's own wake requests and that can cause |
| 166 | * hardware to not deliver the driver's ack message. |
| 167 | * |
| 168 | * Use a fallback bit toggle to kick the gpu state machine |
| 169 | * in the hope that the original ack will be delivered along with |
| 170 | * the fallback ack. |
| 171 | * |
Oscar Mateo | cc38cae | 2018-05-08 14:29:23 -0700 | [diff] [blame] | 172 | * This workaround is described in HSDES #1604254524 and it's known as: |
| 173 | * WaRsForcewakeAddDelayForAck:skl,bxt,kbl,glk,cfl,cnl,icl |
| 174 | * although the name is a bit misleading. |
Mika Kuoppala | 7130630 | 2017-11-02 11:48:36 +0200 | [diff] [blame] | 175 | */ |
| 176 | |
| 177 | pass = 1; |
| 178 | do { |
Daniele Ceraolo Spurio | 535d8d27 | 2019-03-16 10:00:45 +0000 | [diff] [blame] | 179 | wait_ack_clear(d, FORCEWAKE_KERNEL_FALLBACK); |
Mika Kuoppala | 7130630 | 2017-11-02 11:48:36 +0200 | [diff] [blame] | 180 | |
Daniele Ceraolo Spurio | 159367b | 2019-03-20 12:27:32 +0000 | [diff] [blame] | 181 | fw_set(d, FORCEWAKE_KERNEL_FALLBACK); |
Mika Kuoppala | 7130630 | 2017-11-02 11:48:36 +0200 | [diff] [blame] | 182 | /* Give gt some time to relax before the polling frenzy */ |
| 183 | udelay(10 * pass); |
Daniele Ceraolo Spurio | 535d8d27 | 2019-03-16 10:00:45 +0000 | [diff] [blame] | 184 | wait_ack_set(d, FORCEWAKE_KERNEL_FALLBACK); |
Mika Kuoppala | 7130630 | 2017-11-02 11:48:36 +0200 | [diff] [blame] | 185 | |
Daniele Ceraolo Spurio | 535d8d27 | 2019-03-16 10:00:45 +0000 | [diff] [blame] | 186 | ack_detected = (fw_ack(d) & ack_bit) == value; |
Mika Kuoppala | 7130630 | 2017-11-02 11:48:36 +0200 | [diff] [blame] | 187 | |
Daniele Ceraolo Spurio | 159367b | 2019-03-20 12:27:32 +0000 | [diff] [blame] | 188 | fw_clear(d, FORCEWAKE_KERNEL_FALLBACK); |
Mika Kuoppala | 7130630 | 2017-11-02 11:48:36 +0200 | [diff] [blame] | 189 | } while (!ack_detected && pass++ < 10); |
| 190 | |
| 191 | DRM_DEBUG_DRIVER("%s had to use fallback to %s ack, 0x%x (passes %u)\n", |
| 192 | intel_uncore_forcewake_domain_to_str(d->id), |
| 193 | type == ACK_SET ? "set" : "clear", |
Daniele Ceraolo Spurio | 535d8d27 | 2019-03-16 10:00:45 +0000 | [diff] [blame] | 194 | fw_ack(d), |
Mika Kuoppala | 7130630 | 2017-11-02 11:48:36 +0200 | [diff] [blame] | 195 | pass); |
| 196 | |
| 197 | return ack_detected ? 0 : -ETIMEDOUT; |
| 198 | } |
| 199 | |
| 200 | static inline void |
Daniele Ceraolo Spurio | 535d8d27 | 2019-03-16 10:00:45 +0000 | [diff] [blame] | 201 | fw_domain_wait_ack_clear_fallback(const struct intel_uncore_forcewake_domain *d) |
Mika Kuoppala | 7130630 | 2017-11-02 11:48:36 +0200 | [diff] [blame] | 202 | { |
Daniele Ceraolo Spurio | 535d8d27 | 2019-03-16 10:00:45 +0000 | [diff] [blame] | 203 | if (likely(!wait_ack_clear(d, FORCEWAKE_KERNEL))) |
Mika Kuoppala | 7130630 | 2017-11-02 11:48:36 +0200 | [diff] [blame] | 204 | return; |
| 205 | |
Daniele Ceraolo Spurio | 535d8d27 | 2019-03-16 10:00:45 +0000 | [diff] [blame] | 206 | if (fw_domain_wait_ack_with_fallback(d, ACK_CLEAR)) |
| 207 | fw_domain_wait_ack_clear(d); |
Mika Kuoppala | 7130630 | 2017-11-02 11:48:36 +0200 | [diff] [blame] | 208 | } |
| 209 | |
Mika Kuoppala | 05a2fb1 | 2015-01-19 16:20:43 +0200 | [diff] [blame] | 210 | static inline void |
Daniele Ceraolo Spurio | 159367b | 2019-03-20 12:27:32 +0000 | [diff] [blame] | 211 | fw_domain_get(const struct intel_uncore_forcewake_domain *d) |
Mika Kuoppala | 05a2fb1 | 2015-01-19 16:20:43 +0200 | [diff] [blame] | 212 | { |
Daniele Ceraolo Spurio | 159367b | 2019-03-20 12:27:32 +0000 | [diff] [blame] | 213 | fw_set(d, FORCEWAKE_KERNEL); |
Mika Kuoppala | 05a2fb1 | 2015-01-19 16:20:43 +0200 | [diff] [blame] | 214 | } |
| 215 | |
| 216 | static inline void |
Daniele Ceraolo Spurio | 535d8d27 | 2019-03-16 10:00:45 +0000 | [diff] [blame] | 217 | fw_domain_wait_ack_set(const struct intel_uncore_forcewake_domain *d) |
Mika Kuoppala | 05a2fb1 | 2015-01-19 16:20:43 +0200 | [diff] [blame] | 218 | { |
Chris Wilson | 18ecc6c | 2019-05-08 12:52:45 +0100 | [diff] [blame] | 219 | if (wait_ack_set(d, FORCEWAKE_KERNEL)) { |
Mika Kuoppala | 05a2fb1 | 2015-01-19 16:20:43 +0200 | [diff] [blame] | 220 | DRM_ERROR("%s: timed out waiting for forcewake ack request.\n", |
| 221 | intel_uncore_forcewake_domain_to_str(d->id)); |
Chris Wilson | 18ecc6c | 2019-05-08 12:52:45 +0100 | [diff] [blame] | 222 | add_taint_for_CI(TAINT_WARN); /* CI now unreliable */ |
| 223 | } |
Mika Kuoppala | 05a2fb1 | 2015-01-19 16:20:43 +0200 | [diff] [blame] | 224 | } |
| 225 | |
| 226 | static inline void |
Daniele Ceraolo Spurio | 535d8d27 | 2019-03-16 10:00:45 +0000 | [diff] [blame] | 227 | fw_domain_wait_ack_set_fallback(const struct intel_uncore_forcewake_domain *d) |
Mika Kuoppala | 7130630 | 2017-11-02 11:48:36 +0200 | [diff] [blame] | 228 | { |
Daniele Ceraolo Spurio | 535d8d27 | 2019-03-16 10:00:45 +0000 | [diff] [blame] | 229 | if (likely(!wait_ack_set(d, FORCEWAKE_KERNEL))) |
Mika Kuoppala | 7130630 | 2017-11-02 11:48:36 +0200 | [diff] [blame] | 230 | return; |
| 231 | |
Daniele Ceraolo Spurio | 535d8d27 | 2019-03-16 10:00:45 +0000 | [diff] [blame] | 232 | if (fw_domain_wait_ack_with_fallback(d, ACK_SET)) |
| 233 | fw_domain_wait_ack_set(d); |
Mika Kuoppala | 7130630 | 2017-11-02 11:48:36 +0200 | [diff] [blame] | 234 | } |
| 235 | |
| 236 | static inline void |
Daniele Ceraolo Spurio | 159367b | 2019-03-20 12:27:32 +0000 | [diff] [blame] | 237 | fw_domain_put(const struct intel_uncore_forcewake_domain *d) |
Mika Kuoppala | 05a2fb1 | 2015-01-19 16:20:43 +0200 | [diff] [blame] | 238 | { |
Daniele Ceraolo Spurio | 159367b | 2019-03-20 12:27:32 +0000 | [diff] [blame] | 239 | fw_clear(d, FORCEWAKE_KERNEL); |
Mika Kuoppala | 05a2fb1 | 2015-01-19 16:20:43 +0200 | [diff] [blame] | 240 | } |
| 241 | |
Mika Kuoppala | 05a2fb1 | 2015-01-19 16:20:43 +0200 | [diff] [blame] | 242 | static void |
Daniele Ceraolo Spurio | f568eee | 2019-03-19 11:35:35 -0700 | [diff] [blame] | 243 | fw_domains_get(struct intel_uncore *uncore, enum forcewake_domains fw_domains) |
Mika Kuoppala | 05a2fb1 | 2015-01-19 16:20:43 +0200 | [diff] [blame] | 244 | { |
| 245 | struct intel_uncore_forcewake_domain *d; |
Chris Wilson | d2dc94b | 2017-03-23 10:19:41 +0000 | [diff] [blame] | 246 | unsigned int tmp; |
Mika Kuoppala | 05a2fb1 | 2015-01-19 16:20:43 +0200 | [diff] [blame] | 247 | |
Daniele Ceraolo Spurio | 535d8d27 | 2019-03-16 10:00:45 +0000 | [diff] [blame] | 248 | GEM_BUG_ON(fw_domains & ~uncore->fw_domains); |
Chris Wilson | d2dc94b | 2017-03-23 10:19:41 +0000 | [diff] [blame] | 249 | |
Daniele Ceraolo Spurio | f568eee | 2019-03-19 11:35:35 -0700 | [diff] [blame] | 250 | for_each_fw_domain_masked(d, fw_domains, uncore, tmp) { |
Daniele Ceraolo Spurio | 535d8d27 | 2019-03-16 10:00:45 +0000 | [diff] [blame] | 251 | fw_domain_wait_ack_clear(d); |
Daniele Ceraolo Spurio | 159367b | 2019-03-20 12:27:32 +0000 | [diff] [blame] | 252 | fw_domain_get(d); |
Mika Kuoppala | 05a2fb1 | 2015-01-19 16:20:43 +0200 | [diff] [blame] | 253 | } |
Tvrtko Ursulin | 4e1176d | 2016-04-07 17:04:34 +0100 | [diff] [blame] | 254 | |
Daniele Ceraolo Spurio | f568eee | 2019-03-19 11:35:35 -0700 | [diff] [blame] | 255 | for_each_fw_domain_masked(d, fw_domains, uncore, tmp) |
Daniele Ceraolo Spurio | 535d8d27 | 2019-03-16 10:00:45 +0000 | [diff] [blame] | 256 | fw_domain_wait_ack_set(d); |
Mika Kuoppala | 7130630 | 2017-11-02 11:48:36 +0200 | [diff] [blame] | 257 | |
Daniele Ceraolo Spurio | 535d8d27 | 2019-03-16 10:00:45 +0000 | [diff] [blame] | 258 | uncore->fw_domains_active |= fw_domains; |
Mika Kuoppala | 7130630 | 2017-11-02 11:48:36 +0200 | [diff] [blame] | 259 | } |
| 260 | |
| 261 | static void |
Daniele Ceraolo Spurio | f568eee | 2019-03-19 11:35:35 -0700 | [diff] [blame] | 262 | fw_domains_get_with_fallback(struct intel_uncore *uncore, |
Mika Kuoppala | 7130630 | 2017-11-02 11:48:36 +0200 | [diff] [blame] | 263 | enum forcewake_domains fw_domains) |
| 264 | { |
| 265 | struct intel_uncore_forcewake_domain *d; |
| 266 | unsigned int tmp; |
| 267 | |
Daniele Ceraolo Spurio | 535d8d27 | 2019-03-16 10:00:45 +0000 | [diff] [blame] | 268 | GEM_BUG_ON(fw_domains & ~uncore->fw_domains); |
Mika Kuoppala | 7130630 | 2017-11-02 11:48:36 +0200 | [diff] [blame] | 269 | |
Daniele Ceraolo Spurio | f568eee | 2019-03-19 11:35:35 -0700 | [diff] [blame] | 270 | for_each_fw_domain_masked(d, fw_domains, uncore, tmp) { |
Daniele Ceraolo Spurio | 535d8d27 | 2019-03-16 10:00:45 +0000 | [diff] [blame] | 271 | fw_domain_wait_ack_clear_fallback(d); |
Daniele Ceraolo Spurio | 159367b | 2019-03-20 12:27:32 +0000 | [diff] [blame] | 272 | fw_domain_get(d); |
Mika Kuoppala | 7130630 | 2017-11-02 11:48:36 +0200 | [diff] [blame] | 273 | } |
| 274 | |
Daniele Ceraolo Spurio | f568eee | 2019-03-19 11:35:35 -0700 | [diff] [blame] | 275 | for_each_fw_domain_masked(d, fw_domains, uncore, tmp) |
Daniele Ceraolo Spurio | 535d8d27 | 2019-03-16 10:00:45 +0000 | [diff] [blame] | 276 | fw_domain_wait_ack_set_fallback(d); |
Tvrtko Ursulin | b847305 | 2017-03-10 09:32:49 +0000 | [diff] [blame] | 277 | |
Daniele Ceraolo Spurio | 535d8d27 | 2019-03-16 10:00:45 +0000 | [diff] [blame] | 278 | uncore->fw_domains_active |= fw_domains; |
Mika Kuoppala | 05a2fb1 | 2015-01-19 16:20:43 +0200 | [diff] [blame] | 279 | } |
| 280 | |
| 281 | static void |
Daniele Ceraolo Spurio | f568eee | 2019-03-19 11:35:35 -0700 | [diff] [blame] | 282 | fw_domains_put(struct intel_uncore *uncore, enum forcewake_domains fw_domains) |
Mika Kuoppala | 05a2fb1 | 2015-01-19 16:20:43 +0200 | [diff] [blame] | 283 | { |
| 284 | struct intel_uncore_forcewake_domain *d; |
Chris Wilson | d2dc94b | 2017-03-23 10:19:41 +0000 | [diff] [blame] | 285 | unsigned int tmp; |
Mika Kuoppala | 05a2fb1 | 2015-01-19 16:20:43 +0200 | [diff] [blame] | 286 | |
Daniele Ceraolo Spurio | 535d8d27 | 2019-03-16 10:00:45 +0000 | [diff] [blame] | 287 | GEM_BUG_ON(fw_domains & ~uncore->fw_domains); |
Chris Wilson | d2dc94b | 2017-03-23 10:19:41 +0000 | [diff] [blame] | 288 | |
Daniele Ceraolo Spurio | f568eee | 2019-03-19 11:35:35 -0700 | [diff] [blame] | 289 | for_each_fw_domain_masked(d, fw_domains, uncore, tmp) |
Daniele Ceraolo Spurio | 159367b | 2019-03-20 12:27:32 +0000 | [diff] [blame] | 290 | fw_domain_put(d); |
Tvrtko Ursulin | b847305 | 2017-03-10 09:32:49 +0000 | [diff] [blame] | 291 | |
Daniele Ceraolo Spurio | 535d8d27 | 2019-03-16 10:00:45 +0000 | [diff] [blame] | 292 | uncore->fw_domains_active &= ~fw_domains; |
Mika Kuoppala | 05a2fb1 | 2015-01-19 16:20:43 +0200 | [diff] [blame] | 293 | } |
| 294 | |
| 295 | static void |
Daniele Ceraolo Spurio | f568eee | 2019-03-19 11:35:35 -0700 | [diff] [blame] | 296 | fw_domains_reset(struct intel_uncore *uncore, |
Chris Wilson | 577ac4b | 2017-03-23 10:19:38 +0000 | [diff] [blame] | 297 | enum forcewake_domains fw_domains) |
Mika Kuoppala | 05a2fb1 | 2015-01-19 16:20:43 +0200 | [diff] [blame] | 298 | { |
| 299 | struct intel_uncore_forcewake_domain *d; |
Chris Wilson | d2dc94b | 2017-03-23 10:19:41 +0000 | [diff] [blame] | 300 | unsigned int tmp; |
Mika Kuoppala | 05a2fb1 | 2015-01-19 16:20:43 +0200 | [diff] [blame] | 301 | |
Chris Wilson | d2dc94b | 2017-03-23 10:19:41 +0000 | [diff] [blame] | 302 | if (!fw_domains) |
Mika Kuoppala | 3225b2f | 2015-02-05 17:45:42 +0200 | [diff] [blame] | 303 | return; |
Mika Kuoppala | f9b3927 | 2015-01-28 14:43:24 +0200 | [diff] [blame] | 304 | |
Daniele Ceraolo Spurio | 535d8d27 | 2019-03-16 10:00:45 +0000 | [diff] [blame] | 305 | GEM_BUG_ON(fw_domains & ~uncore->fw_domains); |
Chris Wilson | d2dc94b | 2017-03-23 10:19:41 +0000 | [diff] [blame] | 306 | |
Daniele Ceraolo Spurio | f568eee | 2019-03-19 11:35:35 -0700 | [diff] [blame] | 307 | for_each_fw_domain_masked(d, fw_domains, uncore, tmp) |
Daniele Ceraolo Spurio | 159367b | 2019-03-20 12:27:32 +0000 | [diff] [blame] | 308 | fw_domain_reset(d); |
Mika Kuoppala | 05a2fb1 | 2015-01-19 16:20:43 +0200 | [diff] [blame] | 309 | } |
| 310 | |
Daniele Ceraolo Spurio | 6ebc969 | 2019-03-19 11:35:41 -0700 | [diff] [blame] | 311 | static inline u32 gt_thread_status(struct intel_uncore *uncore) |
Chris Wilson | a5b22b5 | 2018-07-20 12:11:02 +0100 | [diff] [blame] | 312 | { |
| 313 | u32 val; |
| 314 | |
Daniele Ceraolo Spurio | 6cc5ca7 | 2019-03-25 14:49:32 -0700 | [diff] [blame] | 315 | val = __raw_uncore_read32(uncore, GEN6_GT_THREAD_STATUS_REG); |
Chris Wilson | a5b22b5 | 2018-07-20 12:11:02 +0100 | [diff] [blame] | 316 | val &= GEN6_GT_THREAD_STATUS_CORE_MASK; |
| 317 | |
| 318 | return val; |
| 319 | } |
| 320 | |
Daniele Ceraolo Spurio | 6ebc969 | 2019-03-19 11:35:41 -0700 | [diff] [blame] | 321 | static void __gen6_gt_wait_for_thread_c0(struct intel_uncore *uncore) |
Chris Wilson | 907b28c | 2013-07-19 20:36:52 +0100 | [diff] [blame] | 322 | { |
Chris Wilson | a5b22b5 | 2018-07-20 12:11:02 +0100 | [diff] [blame] | 323 | /* |
| 324 | * w/a for a sporadic read returning 0 by waiting for the GT |
Chris Wilson | 907b28c | 2013-07-19 20:36:52 +0100 | [diff] [blame] | 325 | * thread to wake up. |
| 326 | */ |
Pankaj Bharadiya | a9f236d | 2020-01-15 09:14:54 +0530 | [diff] [blame] | 327 | drm_WARN_ONCE(&uncore->i915->drm, |
| 328 | wait_for_atomic_us(gt_thread_status(uncore) == 0, 5000), |
| 329 | "GT thread status wait timed out\n"); |
Chris Wilson | 907b28c | 2013-07-19 20:36:52 +0100 | [diff] [blame] | 330 | } |
| 331 | |
Daniele Ceraolo Spurio | f568eee | 2019-03-19 11:35:35 -0700 | [diff] [blame] | 332 | static void fw_domains_get_with_thread_status(struct intel_uncore *uncore, |
Mika Kuoppala | 48c1026 | 2015-01-16 11:34:41 +0200 | [diff] [blame] | 333 | enum forcewake_domains fw_domains) |
Chris Wilson | 907b28c | 2013-07-19 20:36:52 +0100 | [diff] [blame] | 334 | { |
Daniele Ceraolo Spurio | f568eee | 2019-03-19 11:35:35 -0700 | [diff] [blame] | 335 | fw_domains_get(uncore, fw_domains); |
Chris Wilson | 907b28c | 2013-07-19 20:36:52 +0100 | [diff] [blame] | 336 | |
Mika Kuoppala | 05a2fb1 | 2015-01-19 16:20:43 +0200 | [diff] [blame] | 337 | /* WaRsForcewakeWaitTC0:snb,ivb,hsw,bdw,vlv */ |
Daniele Ceraolo Spurio | 6ebc969 | 2019-03-19 11:35:41 -0700 | [diff] [blame] | 338 | __gen6_gt_wait_for_thread_c0(uncore); |
Chris Wilson | 907b28c | 2013-07-19 20:36:52 +0100 | [diff] [blame] | 339 | } |
| 340 | |
Daniele Ceraolo Spurio | 6ebc969 | 2019-03-19 11:35:41 -0700 | [diff] [blame] | 341 | static inline u32 fifo_free_entries(struct intel_uncore *uncore) |
Dave Gordon | c32e378 | 2014-12-10 18:12:12 +0000 | [diff] [blame] | 342 | { |
Daniele Ceraolo Spurio | 6cc5ca7 | 2019-03-25 14:49:32 -0700 | [diff] [blame] | 343 | u32 count = __raw_uncore_read32(uncore, GTFIFOCTL); |
Dave Gordon | c32e378 | 2014-12-10 18:12:12 +0000 | [diff] [blame] | 344 | |
| 345 | return count & GT_FIFO_FREE_ENTRIES_MASK; |
| 346 | } |
| 347 | |
Daniele Ceraolo Spurio | 6ebc969 | 2019-03-19 11:35:41 -0700 | [diff] [blame] | 348 | static void __gen6_gt_wait_for_fifo(struct intel_uncore *uncore) |
Chris Wilson | 907b28c | 2013-07-19 20:36:52 +0100 | [diff] [blame] | 349 | { |
Mika Kuoppala | 6b07b6d | 2017-05-02 17:03:44 +0300 | [diff] [blame] | 350 | u32 n; |
Chris Wilson | 907b28c | 2013-07-19 20:36:52 +0100 | [diff] [blame] | 351 | |
Deepak S | 5135d64 | 2013-11-29 15:56:30 +0530 | [diff] [blame] | 352 | /* On VLV, FIFO will be shared by both SW and HW. |
| 353 | * So, we need to read the FREE_ENTRIES everytime */ |
Daniele Ceraolo Spurio | 0138575 | 2019-06-19 18:00:18 -0700 | [diff] [blame] | 354 | if (IS_VALLEYVIEW(uncore->i915)) |
Daniele Ceraolo Spurio | 6ebc969 | 2019-03-19 11:35:41 -0700 | [diff] [blame] | 355 | n = fifo_free_entries(uncore); |
Mika Kuoppala | 6b07b6d | 2017-05-02 17:03:44 +0300 | [diff] [blame] | 356 | else |
Daniele Ceraolo Spurio | 272c7e5 | 2019-03-19 11:35:39 -0700 | [diff] [blame] | 357 | n = uncore->fifo_count; |
Deepak S | 5135d64 | 2013-11-29 15:56:30 +0530 | [diff] [blame] | 358 | |
Mika Kuoppala | 6b07b6d | 2017-05-02 17:03:44 +0300 | [diff] [blame] | 359 | if (n <= GT_FIFO_NUM_RESERVED_ENTRIES) { |
Daniele Ceraolo Spurio | 6ebc969 | 2019-03-19 11:35:41 -0700 | [diff] [blame] | 360 | if (wait_for_atomic((n = fifo_free_entries(uncore)) > |
Mika Kuoppala | 6b07b6d | 2017-05-02 17:03:44 +0300 | [diff] [blame] | 361 | GT_FIFO_NUM_RESERVED_ENTRIES, |
| 362 | GT_FIFO_TIMEOUT_MS)) { |
Wambui Karuga | d0208cf | 2020-01-07 18:13:33 +0300 | [diff] [blame] | 363 | drm_dbg(&uncore->i915->drm, |
| 364 | "GT_FIFO timeout, entries: %u\n", n); |
Mika Kuoppala | 6b07b6d | 2017-05-02 17:03:44 +0300 | [diff] [blame] | 365 | return; |
Chris Wilson | 907b28c | 2013-07-19 20:36:52 +0100 | [diff] [blame] | 366 | } |
Chris Wilson | 907b28c | 2013-07-19 20:36:52 +0100 | [diff] [blame] | 367 | } |
Chris Wilson | 907b28c | 2013-07-19 20:36:52 +0100 | [diff] [blame] | 368 | |
Daniele Ceraolo Spurio | 272c7e5 | 2019-03-19 11:35:39 -0700 | [diff] [blame] | 369 | uncore->fifo_count = n - 1; |
Chris Wilson | 907b28c | 2013-07-19 20:36:52 +0100 | [diff] [blame] | 370 | } |
| 371 | |
Tvrtko Ursulin | a57a4a6 | 2016-04-07 17:04:32 +0100 | [diff] [blame] | 372 | static enum hrtimer_restart |
| 373 | intel_uncore_fw_release_timer(struct hrtimer *timer) |
Chris Wilson | aec347a | 2013-08-26 13:46:09 +0100 | [diff] [blame] | 374 | { |
Tvrtko Ursulin | a57a4a6 | 2016-04-07 17:04:32 +0100 | [diff] [blame] | 375 | struct intel_uncore_forcewake_domain *domain = |
| 376 | container_of(timer, struct intel_uncore_forcewake_domain, timer); |
Daniele Ceraolo Spurio | f833cdb | 2019-06-19 18:00:20 -0700 | [diff] [blame] | 377 | struct intel_uncore *uncore = domain->uncore; |
Chris Wilson | aec347a | 2013-08-26 13:46:09 +0100 | [diff] [blame] | 378 | unsigned long irqflags; |
| 379 | |
Daniele Ceraolo Spurio | eb17af6 | 2019-03-25 14:49:35 -0700 | [diff] [blame] | 380 | assert_rpm_device_not_suspended(uncore->rpm); |
Paulo Zanoni | b2ec142 | 2014-02-21 13:52:25 -0300 | [diff] [blame] | 381 | |
Chris Wilson | c9e0c6d | 2017-05-26 14:22:09 +0100 | [diff] [blame] | 382 | if (xchg(&domain->active, false)) |
| 383 | return HRTIMER_RESTART; |
| 384 | |
Daniele Ceraolo Spurio | f568eee | 2019-03-19 11:35:35 -0700 | [diff] [blame] | 385 | spin_lock_irqsave(&uncore->lock, irqflags); |
Daniel Vetter | 3123fca | 2014-03-15 20:20:29 +0100 | [diff] [blame] | 386 | |
Chris Wilson | 77adbd8 | 2019-07-08 16:49:14 +0100 | [diff] [blame] | 387 | uncore->fw_domains_timer &= ~domain->mask; |
| 388 | |
| 389 | GEM_BUG_ON(!domain->wake_count); |
Tvrtko Ursulin | b847305 | 2017-03-10 09:32:49 +0000 | [diff] [blame] | 390 | if (--domain->wake_count == 0) |
Daniele Ceraolo Spurio | f568eee | 2019-03-19 11:35:35 -0700 | [diff] [blame] | 391 | uncore->funcs.force_wake_put(uncore, domain->mask); |
Chris Wilson | b2cff0d | 2015-01-16 11:34:37 +0200 | [diff] [blame] | 392 | |
Daniele Ceraolo Spurio | f568eee | 2019-03-19 11:35:35 -0700 | [diff] [blame] | 393 | spin_unlock_irqrestore(&uncore->lock, irqflags); |
Tvrtko Ursulin | a57a4a6 | 2016-04-07 17:04:32 +0100 | [diff] [blame] | 394 | |
| 395 | return HRTIMER_NORESTART; |
Chris Wilson | aec347a | 2013-08-26 13:46:09 +0100 | [diff] [blame] | 396 | } |
| 397 | |
Hans de Goede | a5266db | 2017-10-19 13:16:20 +0200 | [diff] [blame] | 398 | /* Note callers must have acquired the PUNIT->PMIC bus, before calling this. */ |
Chris Wilson | d60996a | 2018-08-08 22:08:42 +0100 | [diff] [blame] | 399 | static unsigned int |
Daniele Ceraolo Spurio | f568eee | 2019-03-19 11:35:35 -0700 | [diff] [blame] | 400 | intel_uncore_forcewake_reset(struct intel_uncore *uncore) |
Daniel Vetter | ef46e0d | 2013-11-16 16:00:09 +0100 | [diff] [blame] | 401 | { |
Mika Kuoppala | 48c1026 | 2015-01-16 11:34:41 +0200 | [diff] [blame] | 402 | unsigned long irqflags; |
Chris Wilson | b2cff0d | 2015-01-16 11:34:37 +0200 | [diff] [blame] | 403 | struct intel_uncore_forcewake_domain *domain; |
Mika Kuoppala | 48c1026 | 2015-01-16 11:34:41 +0200 | [diff] [blame] | 404 | int retry_count = 100; |
Tvrtko Ursulin | 003342a | 2016-10-04 09:29:17 +0100 | [diff] [blame] | 405 | enum forcewake_domains fw, active_domains; |
Chris Wilson | 0294ae7 | 2014-03-13 12:00:29 +0000 | [diff] [blame] | 406 | |
Hans de Goede | a5266db | 2017-10-19 13:16:20 +0200 | [diff] [blame] | 407 | iosf_mbi_assert_punit_acquired(); |
| 408 | |
Chris Wilson | 0294ae7 | 2014-03-13 12:00:29 +0000 | [diff] [blame] | 409 | /* Hold uncore.lock across reset to prevent any register access |
Chris Wilson | b2cff0d | 2015-01-16 11:34:37 +0200 | [diff] [blame] | 410 | * with forcewake not set correctly. Wait until all pending |
| 411 | * timers are run before holding. |
Chris Wilson | 0294ae7 | 2014-03-13 12:00:29 +0000 | [diff] [blame] | 412 | */ |
Chris Wilson | b2cff0d | 2015-01-16 11:34:37 +0200 | [diff] [blame] | 413 | while (1) { |
Chris Wilson | d2dc94b | 2017-03-23 10:19:41 +0000 | [diff] [blame] | 414 | unsigned int tmp; |
| 415 | |
Chris Wilson | b2cff0d | 2015-01-16 11:34:37 +0200 | [diff] [blame] | 416 | active_domains = 0; |
| 417 | |
Daniele Ceraolo Spurio | f568eee | 2019-03-19 11:35:35 -0700 | [diff] [blame] | 418 | for_each_fw_domain(domain, uncore, tmp) { |
Chris Wilson | c9e0c6d | 2017-05-26 14:22:09 +0100 | [diff] [blame] | 419 | smp_store_mb(domain->active, false); |
Tvrtko Ursulin | a57a4a6 | 2016-04-07 17:04:32 +0100 | [diff] [blame] | 420 | if (hrtimer_cancel(&domain->timer) == 0) |
Chris Wilson | b2cff0d | 2015-01-16 11:34:37 +0200 | [diff] [blame] | 421 | continue; |
| 422 | |
Tvrtko Ursulin | a57a4a6 | 2016-04-07 17:04:32 +0100 | [diff] [blame] | 423 | intel_uncore_fw_release_timer(&domain->timer); |
Chris Wilson | b2cff0d | 2015-01-16 11:34:37 +0200 | [diff] [blame] | 424 | } |
| 425 | |
Daniele Ceraolo Spurio | f568eee | 2019-03-19 11:35:35 -0700 | [diff] [blame] | 426 | spin_lock_irqsave(&uncore->lock, irqflags); |
Chris Wilson | b2cff0d | 2015-01-16 11:34:37 +0200 | [diff] [blame] | 427 | |
Daniele Ceraolo Spurio | f568eee | 2019-03-19 11:35:35 -0700 | [diff] [blame] | 428 | for_each_fw_domain(domain, uncore, tmp) { |
Tvrtko Ursulin | a57a4a6 | 2016-04-07 17:04:32 +0100 | [diff] [blame] | 429 | if (hrtimer_active(&domain->timer)) |
Tvrtko Ursulin | 33c582c | 2016-04-07 17:04:33 +0100 | [diff] [blame] | 430 | active_domains |= domain->mask; |
Chris Wilson | b2cff0d | 2015-01-16 11:34:37 +0200 | [diff] [blame] | 431 | } |
| 432 | |
| 433 | if (active_domains == 0) |
| 434 | break; |
| 435 | |
| 436 | if (--retry_count == 0) { |
Wambui Karuga | d0208cf | 2020-01-07 18:13:33 +0300 | [diff] [blame] | 437 | drm_err(&uncore->i915->drm, "Timed out waiting for forcewake timers to finish\n"); |
Chris Wilson | b2cff0d | 2015-01-16 11:34:37 +0200 | [diff] [blame] | 438 | break; |
| 439 | } |
| 440 | |
Daniele Ceraolo Spurio | f568eee | 2019-03-19 11:35:35 -0700 | [diff] [blame] | 441 | spin_unlock_irqrestore(&uncore->lock, irqflags); |
Chris Wilson | b2cff0d | 2015-01-16 11:34:37 +0200 | [diff] [blame] | 442 | cond_resched(); |
| 443 | } |
| 444 | |
Pankaj Bharadiya | a9f236d | 2020-01-15 09:14:54 +0530 | [diff] [blame] | 445 | drm_WARN_ON(&uncore->i915->drm, active_domains); |
Chris Wilson | b2cff0d | 2015-01-16 11:34:37 +0200 | [diff] [blame] | 446 | |
Daniele Ceraolo Spurio | f568eee | 2019-03-19 11:35:35 -0700 | [diff] [blame] | 447 | fw = uncore->fw_domains_active; |
Chris Wilson | b2cff0d | 2015-01-16 11:34:37 +0200 | [diff] [blame] | 448 | if (fw) |
Daniele Ceraolo Spurio | f568eee | 2019-03-19 11:35:35 -0700 | [diff] [blame] | 449 | uncore->funcs.force_wake_put(uncore, fw); |
Daniel Vetter | ef46e0d | 2013-11-16 16:00:09 +0100 | [diff] [blame] | 450 | |
Daniele Ceraolo Spurio | f568eee | 2019-03-19 11:35:35 -0700 | [diff] [blame] | 451 | fw_domains_reset(uncore, uncore->fw_domains); |
| 452 | assert_forcewakes_inactive(uncore); |
Chris Wilson | b2cff0d | 2015-01-16 11:34:37 +0200 | [diff] [blame] | 453 | |
Daniele Ceraolo Spurio | f568eee | 2019-03-19 11:35:35 -0700 | [diff] [blame] | 454 | spin_unlock_irqrestore(&uncore->lock, irqflags); |
Chris Wilson | d60996a | 2018-08-08 22:08:42 +0100 | [diff] [blame] | 455 | |
| 456 | return fw; /* track the lost user forcewake domains */ |
Daniel Vetter | ef46e0d | 2013-11-16 16:00:09 +0100 | [diff] [blame] | 457 | } |
| 458 | |
Mika Kuoppala | 8a47eb1 | 2015-12-15 19:24:47 +0200 | [diff] [blame] | 459 | static bool |
Daniele Ceraolo Spurio | 6ebc969 | 2019-03-19 11:35:41 -0700 | [diff] [blame] | 460 | fpga_check_for_unclaimed_mmio(struct intel_uncore *uncore) |
Mika Kuoppala | 8a47eb1 | 2015-12-15 19:24:47 +0200 | [diff] [blame] | 461 | { |
| 462 | u32 dbg; |
| 463 | |
Daniele Ceraolo Spurio | 6cc5ca7 | 2019-03-25 14:49:32 -0700 | [diff] [blame] | 464 | dbg = __raw_uncore_read32(uncore, FPGA_DBG); |
Mika Kuoppala | 8a47eb1 | 2015-12-15 19:24:47 +0200 | [diff] [blame] | 465 | if (likely(!(dbg & FPGA_DBG_RM_NOCLAIM))) |
| 466 | return false; |
| 467 | |
Daniele Ceraolo Spurio | 6cc5ca7 | 2019-03-25 14:49:32 -0700 | [diff] [blame] | 468 | __raw_uncore_write32(uncore, FPGA_DBG, FPGA_DBG_RM_NOCLAIM); |
Mika Kuoppala | 8a47eb1 | 2015-12-15 19:24:47 +0200 | [diff] [blame] | 469 | |
| 470 | return true; |
| 471 | } |
| 472 | |
Mika Kuoppala | 8ac3e1b | 2015-12-15 19:45:42 +0200 | [diff] [blame] | 473 | static bool |
Daniele Ceraolo Spurio | 6ebc969 | 2019-03-19 11:35:41 -0700 | [diff] [blame] | 474 | vlv_check_for_unclaimed_mmio(struct intel_uncore *uncore) |
Mika Kuoppala | 8ac3e1b | 2015-12-15 19:45:42 +0200 | [diff] [blame] | 475 | { |
| 476 | u32 cer; |
| 477 | |
Daniele Ceraolo Spurio | 6cc5ca7 | 2019-03-25 14:49:32 -0700 | [diff] [blame] | 478 | cer = __raw_uncore_read32(uncore, CLAIM_ER); |
Mika Kuoppala | 8ac3e1b | 2015-12-15 19:45:42 +0200 | [diff] [blame] | 479 | if (likely(!(cer & (CLAIM_ER_OVERFLOW | CLAIM_ER_CTR_MASK)))) |
| 480 | return false; |
| 481 | |
Daniele Ceraolo Spurio | 6cc5ca7 | 2019-03-25 14:49:32 -0700 | [diff] [blame] | 482 | __raw_uncore_write32(uncore, CLAIM_ER, CLAIM_ER_CLR); |
Mika Kuoppala | 8ac3e1b | 2015-12-15 19:45:42 +0200 | [diff] [blame] | 483 | |
| 484 | return true; |
| 485 | } |
| 486 | |
| 487 | static bool |
Daniele Ceraolo Spurio | 6ebc969 | 2019-03-19 11:35:41 -0700 | [diff] [blame] | 488 | gen6_check_for_fifo_debug(struct intel_uncore *uncore) |
Mika Kuoppala | a338908 | 2017-04-06 18:39:42 +0300 | [diff] [blame] | 489 | { |
| 490 | u32 fifodbg; |
| 491 | |
Daniele Ceraolo Spurio | 6cc5ca7 | 2019-03-25 14:49:32 -0700 | [diff] [blame] | 492 | fifodbg = __raw_uncore_read32(uncore, GTFIFODBG); |
Mika Kuoppala | a338908 | 2017-04-06 18:39:42 +0300 | [diff] [blame] | 493 | |
| 494 | if (unlikely(fifodbg)) { |
Wambui Karuga | d0208cf | 2020-01-07 18:13:33 +0300 | [diff] [blame] | 495 | drm_dbg(&uncore->i915->drm, "GTFIFODBG = 0x08%x\n", fifodbg); |
Daniele Ceraolo Spurio | 6cc5ca7 | 2019-03-25 14:49:32 -0700 | [diff] [blame] | 496 | __raw_uncore_write32(uncore, GTFIFODBG, fifodbg); |
Mika Kuoppala | a338908 | 2017-04-06 18:39:42 +0300 | [diff] [blame] | 497 | } |
| 498 | |
| 499 | return fifodbg; |
| 500 | } |
| 501 | |
| 502 | static bool |
Daniele Ceraolo Spurio | 2cf7bf6 | 2019-03-25 14:49:34 -0700 | [diff] [blame] | 503 | check_for_unclaimed_mmio(struct intel_uncore *uncore) |
Mika Kuoppala | 8ac3e1b | 2015-12-15 19:45:42 +0200 | [diff] [blame] | 504 | { |
Mika Kuoppala | a338908 | 2017-04-06 18:39:42 +0300 | [diff] [blame] | 505 | bool ret = false; |
| 506 | |
Daniele Ceraolo Spurio | 0a9b263 | 2019-08-09 07:31:16 +0100 | [diff] [blame] | 507 | lockdep_assert_held(&uncore->debug->lock); |
| 508 | |
| 509 | if (uncore->debug->suspend_count) |
| 510 | return false; |
| 511 | |
Daniele Ceraolo Spurio | 2cf7bf6 | 2019-03-25 14:49:34 -0700 | [diff] [blame] | 512 | if (intel_uncore_has_fpga_dbg_unclaimed(uncore)) |
Daniele Ceraolo Spurio | 6ebc969 | 2019-03-19 11:35:41 -0700 | [diff] [blame] | 513 | ret |= fpga_check_for_unclaimed_mmio(uncore); |
Mika Kuoppala | 8ac3e1b | 2015-12-15 19:45:42 +0200 | [diff] [blame] | 514 | |
Daniele Ceraolo Spurio | 2cf7bf6 | 2019-03-25 14:49:34 -0700 | [diff] [blame] | 515 | if (intel_uncore_has_dbg_unclaimed(uncore)) |
Daniele Ceraolo Spurio | 6ebc969 | 2019-03-19 11:35:41 -0700 | [diff] [blame] | 516 | ret |= vlv_check_for_unclaimed_mmio(uncore); |
Mika Kuoppala | 8ac3e1b | 2015-12-15 19:45:42 +0200 | [diff] [blame] | 517 | |
Daniele Ceraolo Spurio | 2cf7bf6 | 2019-03-25 14:49:34 -0700 | [diff] [blame] | 518 | if (intel_uncore_has_fifo(uncore)) |
Daniele Ceraolo Spurio | 6ebc969 | 2019-03-19 11:35:41 -0700 | [diff] [blame] | 519 | ret |= gen6_check_for_fifo_debug(uncore); |
Mika Kuoppala | a338908 | 2017-04-06 18:39:42 +0300 | [diff] [blame] | 520 | |
| 521 | return ret; |
Mika Kuoppala | 8ac3e1b | 2015-12-15 19:45:42 +0200 | [diff] [blame] | 522 | } |
| 523 | |
Daniele Ceraolo Spurio | 2e81bc6 | 2019-06-19 18:00:19 -0700 | [diff] [blame] | 524 | static void forcewake_early_sanitize(struct intel_uncore *uncore, |
| 525 | unsigned int restore_forcewake) |
Mika Kuoppala | f9b3927 | 2015-01-28 14:43:24 +0200 | [diff] [blame] | 526 | { |
Daniele Ceraolo Spurio | 2e81bc6 | 2019-06-19 18:00:19 -0700 | [diff] [blame] | 527 | GEM_BUG_ON(!intel_uncore_has_forcewake(uncore)); |
Chris Wilson | 907b28c | 2013-07-19 20:36:52 +0100 | [diff] [blame] | 528 | |
Deepak S | a04f90a | 2015-04-16 08:51:28 +0530 | [diff] [blame] | 529 | /* WaDisableShadowRegForCpd:chv */ |
Daniele Ceraolo Spurio | 0138575 | 2019-06-19 18:00:18 -0700 | [diff] [blame] | 530 | if (IS_CHERRYVIEW(uncore->i915)) { |
Daniele Ceraolo Spurio | 6cc5ca7 | 2019-03-25 14:49:32 -0700 | [diff] [blame] | 531 | __raw_uncore_write32(uncore, GTFIFOCTL, |
| 532 | __raw_uncore_read32(uncore, GTFIFOCTL) | |
| 533 | GT_FIFO_CTL_BLOCK_ALL_POLICY_STALL | |
| 534 | GT_FIFO_CTL_RC6_POLICY_STALL); |
Deepak S | a04f90a | 2015-04-16 08:51:28 +0530 | [diff] [blame] | 535 | } |
| 536 | |
Hans de Goede | a5266db | 2017-10-19 13:16:20 +0200 | [diff] [blame] | 537 | iosf_mbi_punit_acquire(); |
Daniele Ceraolo Spurio | f7de502 | 2019-03-19 11:35:37 -0700 | [diff] [blame] | 538 | intel_uncore_forcewake_reset(uncore); |
Chris Wilson | d60996a | 2018-08-08 22:08:42 +0100 | [diff] [blame] | 539 | if (restore_forcewake) { |
Daniele Ceraolo Spurio | f7de502 | 2019-03-19 11:35:37 -0700 | [diff] [blame] | 540 | spin_lock_irq(&uncore->lock); |
| 541 | uncore->funcs.force_wake_get(uncore, restore_forcewake); |
Chris Wilson | d60996a | 2018-08-08 22:08:42 +0100 | [diff] [blame] | 542 | |
Daniele Ceraolo Spurio | 2cf7bf6 | 2019-03-25 14:49:34 -0700 | [diff] [blame] | 543 | if (intel_uncore_has_fifo(uncore)) |
Daniele Ceraolo Spurio | 6ebc969 | 2019-03-19 11:35:41 -0700 | [diff] [blame] | 544 | uncore->fifo_count = fifo_free_entries(uncore); |
Daniele Ceraolo Spurio | f7de502 | 2019-03-19 11:35:37 -0700 | [diff] [blame] | 545 | spin_unlock_irq(&uncore->lock); |
Chris Wilson | d60996a | 2018-08-08 22:08:42 +0100 | [diff] [blame] | 546 | } |
Hans de Goede | a5266db | 2017-10-19 13:16:20 +0200 | [diff] [blame] | 547 | iosf_mbi_punit_release(); |
Mika Kuoppala | 521198a | 2013-08-23 16:52:30 +0300 | [diff] [blame] | 548 | } |
| 549 | |
Daniele Ceraolo Spurio | f7de502 | 2019-03-19 11:35:37 -0700 | [diff] [blame] | 550 | void intel_uncore_suspend(struct intel_uncore *uncore) |
Imre Deak | ed49388 | 2014-10-23 19:23:21 +0300 | [diff] [blame] | 551 | { |
Daniele Ceraolo Spurio | 2e81bc6 | 2019-06-19 18:00:19 -0700 | [diff] [blame] | 552 | if (!intel_uncore_has_forcewake(uncore)) |
| 553 | return; |
| 554 | |
Hans de Goede | a5266db | 2017-10-19 13:16:20 +0200 | [diff] [blame] | 555 | iosf_mbi_punit_acquire(); |
| 556 | iosf_mbi_unregister_pmic_bus_access_notifier_unlocked( |
Daniele Ceraolo Spurio | f7de502 | 2019-03-19 11:35:37 -0700 | [diff] [blame] | 557 | &uncore->pmic_bus_access_nb); |
| 558 | uncore->fw_domains_saved = intel_uncore_forcewake_reset(uncore); |
Hans de Goede | a5266db | 2017-10-19 13:16:20 +0200 | [diff] [blame] | 559 | iosf_mbi_punit_release(); |
Hans de Goede | 68f6094 | 2017-02-10 11:28:01 +0100 | [diff] [blame] | 560 | } |
| 561 | |
Daniele Ceraolo Spurio | f7de502 | 2019-03-19 11:35:37 -0700 | [diff] [blame] | 562 | void intel_uncore_resume_early(struct intel_uncore *uncore) |
Hans de Goede | 68f6094 | 2017-02-10 11:28:01 +0100 | [diff] [blame] | 563 | { |
Chris Wilson | d60996a | 2018-08-08 22:08:42 +0100 | [diff] [blame] | 564 | unsigned int restore_forcewake; |
| 565 | |
Daniele Ceraolo Spurio | 2e81bc6 | 2019-06-19 18:00:19 -0700 | [diff] [blame] | 566 | if (intel_uncore_unclaimed_mmio(uncore)) |
Wambui Karuga | d0208cf | 2020-01-07 18:13:33 +0300 | [diff] [blame] | 567 | drm_dbg(&uncore->i915->drm, "unclaimed mmio detected on resume, clearing\n"); |
Daniele Ceraolo Spurio | 2e81bc6 | 2019-06-19 18:00:19 -0700 | [diff] [blame] | 568 | |
| 569 | if (!intel_uncore_has_forcewake(uncore)) |
| 570 | return; |
| 571 | |
Daniele Ceraolo Spurio | f7de502 | 2019-03-19 11:35:37 -0700 | [diff] [blame] | 572 | restore_forcewake = fetch_and_zero(&uncore->fw_domains_saved); |
Daniele Ceraolo Spurio | 2e81bc6 | 2019-06-19 18:00:19 -0700 | [diff] [blame] | 573 | forcewake_early_sanitize(uncore, restore_forcewake); |
Chris Wilson | d60996a | 2018-08-08 22:08:42 +0100 | [diff] [blame] | 574 | |
Daniele Ceraolo Spurio | f7de502 | 2019-03-19 11:35:37 -0700 | [diff] [blame] | 575 | iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb); |
Imre Deak | ed49388 | 2014-10-23 19:23:21 +0300 | [diff] [blame] | 576 | } |
| 577 | |
Daniele Ceraolo Spurio | f7de502 | 2019-03-19 11:35:37 -0700 | [diff] [blame] | 578 | void intel_uncore_runtime_resume(struct intel_uncore *uncore) |
Hans de Goede | bedf4d7 | 2017-11-14 14:55:17 +0100 | [diff] [blame] | 579 | { |
Daniele Ceraolo Spurio | 2e81bc6 | 2019-06-19 18:00:19 -0700 | [diff] [blame] | 580 | if (!intel_uncore_has_forcewake(uncore)) |
| 581 | return; |
| 582 | |
Daniele Ceraolo Spurio | f7de502 | 2019-03-19 11:35:37 -0700 | [diff] [blame] | 583 | iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb); |
Hans de Goede | bedf4d7 | 2017-11-14 14:55:17 +0100 | [diff] [blame] | 584 | } |
| 585 | |
Daniele Ceraolo Spurio | f568eee | 2019-03-19 11:35:35 -0700 | [diff] [blame] | 586 | static void __intel_uncore_forcewake_get(struct intel_uncore *uncore, |
Chris Wilson | a6111f7 | 2015-04-07 16:21:02 +0100 | [diff] [blame] | 587 | enum forcewake_domains fw_domains) |
| 588 | { |
| 589 | struct intel_uncore_forcewake_domain *domain; |
Chris Wilson | d2dc94b | 2017-03-23 10:19:41 +0000 | [diff] [blame] | 590 | unsigned int tmp; |
Chris Wilson | a6111f7 | 2015-04-07 16:21:02 +0100 | [diff] [blame] | 591 | |
Daniele Ceraolo Spurio | f568eee | 2019-03-19 11:35:35 -0700 | [diff] [blame] | 592 | fw_domains &= uncore->fw_domains; |
Chris Wilson | a6111f7 | 2015-04-07 16:21:02 +0100 | [diff] [blame] | 593 | |
Daniele Ceraolo Spurio | f568eee | 2019-03-19 11:35:35 -0700 | [diff] [blame] | 594 | for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) { |
Chris Wilson | c9e0c6d | 2017-05-26 14:22:09 +0100 | [diff] [blame] | 595 | if (domain->wake_count++) { |
Tvrtko Ursulin | 33c582c | 2016-04-07 17:04:33 +0100 | [diff] [blame] | 596 | fw_domains &= ~domain->mask; |
Chris Wilson | c9e0c6d | 2017-05-26 14:22:09 +0100 | [diff] [blame] | 597 | domain->active = true; |
| 598 | } |
| 599 | } |
Chris Wilson | a6111f7 | 2015-04-07 16:21:02 +0100 | [diff] [blame] | 600 | |
Tvrtko Ursulin | b847305 | 2017-03-10 09:32:49 +0000 | [diff] [blame] | 601 | if (fw_domains) |
Daniele Ceraolo Spurio | f568eee | 2019-03-19 11:35:35 -0700 | [diff] [blame] | 602 | uncore->funcs.force_wake_get(uncore, fw_domains); |
Chris Wilson | a6111f7 | 2015-04-07 16:21:02 +0100 | [diff] [blame] | 603 | } |
| 604 | |
Mika Kuoppala | 59bad94 | 2015-01-16 11:34:40 +0200 | [diff] [blame] | 605 | /** |
| 606 | * intel_uncore_forcewake_get - grab forcewake domain references |
Daniele Ceraolo Spurio | 3ceea6a | 2019-03-19 11:35:36 -0700 | [diff] [blame] | 607 | * @uncore: the intel_uncore structure |
Mika Kuoppala | 59bad94 | 2015-01-16 11:34:40 +0200 | [diff] [blame] | 608 | * @fw_domains: forcewake domains to get reference on |
| 609 | * |
| 610 | * This function can be used get GT's forcewake domain references. |
| 611 | * Normal register access will handle the forcewake domains automatically. |
| 612 | * However if some sequence requires the GT to not power down a particular |
| 613 | * forcewake domains this function should be called at the beginning of the |
| 614 | * sequence. And subsequently the reference should be dropped by symmetric |
| 615 | * call to intel_unforce_forcewake_put(). Usually caller wants all the domains |
| 616 | * to be kept awake so the @fw_domains would be then FORCEWAKE_ALL. |
Chris Wilson | 907b28c | 2013-07-19 20:36:52 +0100 | [diff] [blame] | 617 | */ |
Daniele Ceraolo Spurio | 3ceea6a | 2019-03-19 11:35:36 -0700 | [diff] [blame] | 618 | void intel_uncore_forcewake_get(struct intel_uncore *uncore, |
Mika Kuoppala | 48c1026 | 2015-01-16 11:34:41 +0200 | [diff] [blame] | 619 | enum forcewake_domains fw_domains) |
Chris Wilson | 907b28c | 2013-07-19 20:36:52 +0100 | [diff] [blame] | 620 | { |
| 621 | unsigned long irqflags; |
| 622 | |
Daniele Ceraolo Spurio | f568eee | 2019-03-19 11:35:35 -0700 | [diff] [blame] | 623 | if (!uncore->funcs.force_wake_get) |
Ben Widawsky | ab484f8 | 2013-10-05 17:57:11 -0700 | [diff] [blame] | 624 | return; |
| 625 | |
Daniele Ceraolo Spurio | 87b391b9 | 2019-06-13 16:21:50 -0700 | [diff] [blame] | 626 | assert_rpm_wakelock_held(uncore->rpm); |
Deepak S | 940aece | 2013-11-23 14:55:43 +0530 | [diff] [blame] | 627 | |
Daniele Ceraolo Spurio | f568eee | 2019-03-19 11:35:35 -0700 | [diff] [blame] | 628 | spin_lock_irqsave(&uncore->lock, irqflags); |
| 629 | __intel_uncore_forcewake_get(uncore, fw_domains); |
| 630 | spin_unlock_irqrestore(&uncore->lock, irqflags); |
Chris Wilson | a6111f7 | 2015-04-07 16:21:02 +0100 | [diff] [blame] | 631 | } |
| 632 | |
| 633 | /** |
Chris Wilson | d7a133d | 2017-09-07 14:44:41 +0100 | [diff] [blame] | 634 | * intel_uncore_forcewake_user_get - claim forcewake on behalf of userspace |
Daniele Ceraolo Spurio | 3ceea6a | 2019-03-19 11:35:36 -0700 | [diff] [blame] | 635 | * @uncore: the intel_uncore structure |
Chris Wilson | d7a133d | 2017-09-07 14:44:41 +0100 | [diff] [blame] | 636 | * |
| 637 | * This function is a wrapper around intel_uncore_forcewake_get() to acquire |
| 638 | * the GT powerwell and in the process disable our debugging for the |
| 639 | * duration of userspace's bypass. |
| 640 | */ |
Daniele Ceraolo Spurio | 3ceea6a | 2019-03-19 11:35:36 -0700 | [diff] [blame] | 641 | void intel_uncore_forcewake_user_get(struct intel_uncore *uncore) |
Chris Wilson | d7a133d | 2017-09-07 14:44:41 +0100 | [diff] [blame] | 642 | { |
Daniele Ceraolo Spurio | f568eee | 2019-03-19 11:35:35 -0700 | [diff] [blame] | 643 | spin_lock_irq(&uncore->lock); |
Daniele Ceraolo Spurio | 0a9b263 | 2019-08-09 07:31:16 +0100 | [diff] [blame] | 644 | if (!uncore->user_forcewake_count++) { |
Daniele Ceraolo Spurio | 3ceea6a | 2019-03-19 11:35:36 -0700 | [diff] [blame] | 645 | intel_uncore_forcewake_get__locked(uncore, FORCEWAKE_ALL); |
Daniele Ceraolo Spurio | 0a9b263 | 2019-08-09 07:31:16 +0100 | [diff] [blame] | 646 | spin_lock(&uncore->debug->lock); |
| 647 | mmio_debug_suspend(uncore->debug); |
| 648 | spin_unlock(&uncore->debug->lock); |
Chris Wilson | d7a133d | 2017-09-07 14:44:41 +0100 | [diff] [blame] | 649 | } |
Daniele Ceraolo Spurio | f568eee | 2019-03-19 11:35:35 -0700 | [diff] [blame] | 650 | spin_unlock_irq(&uncore->lock); |
Chris Wilson | d7a133d | 2017-09-07 14:44:41 +0100 | [diff] [blame] | 651 | } |
| 652 | |
| 653 | /** |
| 654 | * intel_uncore_forcewake_user_put - release forcewake on behalf of userspace |
Daniele Ceraolo Spurio | 3ceea6a | 2019-03-19 11:35:36 -0700 | [diff] [blame] | 655 | * @uncore: the intel_uncore structure |
Chris Wilson | d7a133d | 2017-09-07 14:44:41 +0100 | [diff] [blame] | 656 | * |
| 657 | * This function complements intel_uncore_forcewake_user_get() and releases |
| 658 | * the GT powerwell taken on behalf of the userspace bypass. |
| 659 | */ |
Daniele Ceraolo Spurio | 3ceea6a | 2019-03-19 11:35:36 -0700 | [diff] [blame] | 660 | void intel_uncore_forcewake_user_put(struct intel_uncore *uncore) |
Chris Wilson | d7a133d | 2017-09-07 14:44:41 +0100 | [diff] [blame] | 661 | { |
Daniele Ceraolo Spurio | f568eee | 2019-03-19 11:35:35 -0700 | [diff] [blame] | 662 | spin_lock_irq(&uncore->lock); |
Daniele Ceraolo Spurio | 0a9b263 | 2019-08-09 07:31:16 +0100 | [diff] [blame] | 663 | if (!--uncore->user_forcewake_count) { |
| 664 | spin_lock(&uncore->debug->lock); |
| 665 | mmio_debug_resume(uncore->debug); |
| 666 | |
| 667 | if (check_for_unclaimed_mmio(uncore)) |
Daniele Ceraolo Spurio | 0138575 | 2019-06-19 18:00:18 -0700 | [diff] [blame] | 668 | dev_info(uncore->i915->drm.dev, |
Chris Wilson | d7a133d | 2017-09-07 14:44:41 +0100 | [diff] [blame] | 669 | "Invalid mmio detected during user access\n"); |
Daniele Ceraolo Spurio | 0a9b263 | 2019-08-09 07:31:16 +0100 | [diff] [blame] | 670 | spin_unlock(&uncore->debug->lock); |
Chris Wilson | d7a133d | 2017-09-07 14:44:41 +0100 | [diff] [blame] | 671 | |
Daniele Ceraolo Spurio | 3ceea6a | 2019-03-19 11:35:36 -0700 | [diff] [blame] | 672 | intel_uncore_forcewake_put__locked(uncore, FORCEWAKE_ALL); |
Chris Wilson | d7a133d | 2017-09-07 14:44:41 +0100 | [diff] [blame] | 673 | } |
Daniele Ceraolo Spurio | f568eee | 2019-03-19 11:35:35 -0700 | [diff] [blame] | 674 | spin_unlock_irq(&uncore->lock); |
Chris Wilson | d7a133d | 2017-09-07 14:44:41 +0100 | [diff] [blame] | 675 | } |
| 676 | |
| 677 | /** |
Chris Wilson | a6111f7 | 2015-04-07 16:21:02 +0100 | [diff] [blame] | 678 | * intel_uncore_forcewake_get__locked - grab forcewake domain references |
Daniele Ceraolo Spurio | 3ceea6a | 2019-03-19 11:35:36 -0700 | [diff] [blame] | 679 | * @uncore: the intel_uncore structure |
Chris Wilson | a6111f7 | 2015-04-07 16:21:02 +0100 | [diff] [blame] | 680 | * @fw_domains: forcewake domains to get reference on |
| 681 | * |
| 682 | * See intel_uncore_forcewake_get(). This variant places the onus |
| 683 | * on the caller to explicitly handle the dev_priv->uncore.lock spinlock. |
| 684 | */ |
Daniele Ceraolo Spurio | 3ceea6a | 2019-03-19 11:35:36 -0700 | [diff] [blame] | 685 | void intel_uncore_forcewake_get__locked(struct intel_uncore *uncore, |
Chris Wilson | a6111f7 | 2015-04-07 16:21:02 +0100 | [diff] [blame] | 686 | enum forcewake_domains fw_domains) |
| 687 | { |
Daniele Ceraolo Spurio | f568eee | 2019-03-19 11:35:35 -0700 | [diff] [blame] | 688 | lockdep_assert_held(&uncore->lock); |
| 689 | |
| 690 | if (!uncore->funcs.force_wake_get) |
Chris Wilson | a6111f7 | 2015-04-07 16:21:02 +0100 | [diff] [blame] | 691 | return; |
| 692 | |
Daniele Ceraolo Spurio | f568eee | 2019-03-19 11:35:35 -0700 | [diff] [blame] | 693 | __intel_uncore_forcewake_get(uncore, fw_domains); |
Chris Wilson | a6111f7 | 2015-04-07 16:21:02 +0100 | [diff] [blame] | 694 | } |
| 695 | |
Daniele Ceraolo Spurio | f568eee | 2019-03-19 11:35:35 -0700 | [diff] [blame] | 696 | static void __intel_uncore_forcewake_put(struct intel_uncore *uncore, |
Chris Wilson | a6111f7 | 2015-04-07 16:21:02 +0100 | [diff] [blame] | 697 | enum forcewake_domains fw_domains) |
| 698 | { |
| 699 | struct intel_uncore_forcewake_domain *domain; |
Chris Wilson | d2dc94b | 2017-03-23 10:19:41 +0000 | [diff] [blame] | 700 | unsigned int tmp; |
Chris Wilson | a6111f7 | 2015-04-07 16:21:02 +0100 | [diff] [blame] | 701 | |
Daniele Ceraolo Spurio | f568eee | 2019-03-19 11:35:35 -0700 | [diff] [blame] | 702 | fw_domains &= uncore->fw_domains; |
Chris Wilson | b2cff0d | 2015-01-16 11:34:37 +0200 | [diff] [blame] | 703 | |
Daniele Ceraolo Spurio | f568eee | 2019-03-19 11:35:35 -0700 | [diff] [blame] | 704 | for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) { |
Chris Wilson | 77adbd8 | 2019-07-08 16:49:14 +0100 | [diff] [blame] | 705 | GEM_BUG_ON(!domain->wake_count); |
Chris Wilson | a6111f7 | 2015-04-07 16:21:02 +0100 | [diff] [blame] | 706 | |
Chris Wilson | c9e0c6d | 2017-05-26 14:22:09 +0100 | [diff] [blame] | 707 | if (--domain->wake_count) { |
| 708 | domain->active = true; |
Chris Wilson | a6111f7 | 2015-04-07 16:21:02 +0100 | [diff] [blame] | 709 | continue; |
Chris Wilson | c9e0c6d | 2017-05-26 14:22:09 +0100 | [diff] [blame] | 710 | } |
Chris Wilson | a6111f7 | 2015-04-07 16:21:02 +0100 | [diff] [blame] | 711 | |
Chris Wilson | a6111f7 | 2015-04-07 16:21:02 +0100 | [diff] [blame] | 712 | fw_domain_arm_timer(domain); |
Chris Wilson | 6daccb0 | 2015-01-16 11:34:35 +0200 | [diff] [blame] | 713 | } |
Chris Wilson | 907b28c | 2013-07-19 20:36:52 +0100 | [diff] [blame] | 714 | } |
| 715 | |
Mika Kuoppala | 59bad94 | 2015-01-16 11:34:40 +0200 | [diff] [blame] | 716 | /** |
| 717 | * intel_uncore_forcewake_put - release a forcewake domain reference |
Daniele Ceraolo Spurio | 3ceea6a | 2019-03-19 11:35:36 -0700 | [diff] [blame] | 718 | * @uncore: the intel_uncore structure |
Mika Kuoppala | 59bad94 | 2015-01-16 11:34:40 +0200 | [diff] [blame] | 719 | * @fw_domains: forcewake domains to put references |
| 720 | * |
| 721 | * This function drops the device-level forcewakes for specified |
| 722 | * domains obtained by intel_uncore_forcewake_get(). |
Chris Wilson | 907b28c | 2013-07-19 20:36:52 +0100 | [diff] [blame] | 723 | */ |
Daniele Ceraolo Spurio | 3ceea6a | 2019-03-19 11:35:36 -0700 | [diff] [blame] | 724 | void intel_uncore_forcewake_put(struct intel_uncore *uncore, |
Mika Kuoppala | 48c1026 | 2015-01-16 11:34:41 +0200 | [diff] [blame] | 725 | enum forcewake_domains fw_domains) |
Chris Wilson | 907b28c | 2013-07-19 20:36:52 +0100 | [diff] [blame] | 726 | { |
| 727 | unsigned long irqflags; |
| 728 | |
Daniele Ceraolo Spurio | f568eee | 2019-03-19 11:35:35 -0700 | [diff] [blame] | 729 | if (!uncore->funcs.force_wake_put) |
Ben Widawsky | ab484f8 | 2013-10-05 17:57:11 -0700 | [diff] [blame] | 730 | return; |
| 731 | |
Daniele Ceraolo Spurio | f568eee | 2019-03-19 11:35:35 -0700 | [diff] [blame] | 732 | spin_lock_irqsave(&uncore->lock, irqflags); |
| 733 | __intel_uncore_forcewake_put(uncore, fw_domains); |
| 734 | spin_unlock_irqrestore(&uncore->lock, irqflags); |
Chris Wilson | 907b28c | 2013-07-19 20:36:52 +0100 | [diff] [blame] | 735 | } |
| 736 | |
Chris Wilson | a6111f7 | 2015-04-07 16:21:02 +0100 | [diff] [blame] | 737 | /** |
| 738 | * intel_uncore_forcewake_put__locked - grab forcewake domain references |
Daniele Ceraolo Spurio | 3ceea6a | 2019-03-19 11:35:36 -0700 | [diff] [blame] | 739 | * @uncore: the intel_uncore structure |
Chris Wilson | a6111f7 | 2015-04-07 16:21:02 +0100 | [diff] [blame] | 740 | * @fw_domains: forcewake domains to get reference on |
| 741 | * |
| 742 | * See intel_uncore_forcewake_put(). This variant places the onus |
| 743 | * on the caller to explicitly handle the dev_priv->uncore.lock spinlock. |
| 744 | */ |
Daniele Ceraolo Spurio | 3ceea6a | 2019-03-19 11:35:36 -0700 | [diff] [blame] | 745 | void intel_uncore_forcewake_put__locked(struct intel_uncore *uncore, |
Chris Wilson | a6111f7 | 2015-04-07 16:21:02 +0100 | [diff] [blame] | 746 | enum forcewake_domains fw_domains) |
| 747 | { |
Daniele Ceraolo Spurio | f568eee | 2019-03-19 11:35:35 -0700 | [diff] [blame] | 748 | lockdep_assert_held(&uncore->lock); |
| 749 | |
| 750 | if (!uncore->funcs.force_wake_put) |
Chris Wilson | a6111f7 | 2015-04-07 16:21:02 +0100 | [diff] [blame] | 751 | return; |
| 752 | |
Daniele Ceraolo Spurio | f568eee | 2019-03-19 11:35:35 -0700 | [diff] [blame] | 753 | __intel_uncore_forcewake_put(uncore, fw_domains); |
Chris Wilson | a6111f7 | 2015-04-07 16:21:02 +0100 | [diff] [blame] | 754 | } |
| 755 | |
Daniele Ceraolo Spurio | f568eee | 2019-03-19 11:35:35 -0700 | [diff] [blame] | 756 | void assert_forcewakes_inactive(struct intel_uncore *uncore) |
Paulo Zanoni | e998c40 | 2014-02-21 13:52:26 -0300 | [diff] [blame] | 757 | { |
Daniele Ceraolo Spurio | f568eee | 2019-03-19 11:35:35 -0700 | [diff] [blame] | 758 | if (!uncore->funcs.force_wake_get) |
Paulo Zanoni | e998c40 | 2014-02-21 13:52:26 -0300 | [diff] [blame] | 759 | return; |
| 760 | |
Pankaj Bharadiya | a9f236d | 2020-01-15 09:14:54 +0530 | [diff] [blame] | 761 | drm_WARN(&uncore->i915->drm, uncore->fw_domains_active, |
| 762 | "Expected all fw_domains to be inactive, but %08x are still on\n", |
| 763 | uncore->fw_domains_active); |
Chris Wilson | 67e6456 | 2017-10-09 12:03:01 +0100 | [diff] [blame] | 764 | } |
| 765 | |
Daniele Ceraolo Spurio | f568eee | 2019-03-19 11:35:35 -0700 | [diff] [blame] | 766 | void assert_forcewakes_active(struct intel_uncore *uncore, |
Chris Wilson | 67e6456 | 2017-10-09 12:03:01 +0100 | [diff] [blame] | 767 | enum forcewake_domains fw_domains) |
| 768 | { |
Chris Wilson | b7dc939 | 2019-07-04 11:20:48 +0100 | [diff] [blame] | 769 | struct intel_uncore_forcewake_domain *domain; |
| 770 | unsigned int tmp; |
| 771 | |
| 772 | if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)) |
| 773 | return; |
| 774 | |
Daniele Ceraolo Spurio | f568eee | 2019-03-19 11:35:35 -0700 | [diff] [blame] | 775 | if (!uncore->funcs.force_wake_get) |
Chris Wilson | 67e6456 | 2017-10-09 12:03:01 +0100 | [diff] [blame] | 776 | return; |
| 777 | |
Chris Wilson | 15e7fac | 2019-07-07 16:11:35 +0100 | [diff] [blame] | 778 | spin_lock_irq(&uncore->lock); |
| 779 | |
Daniele Ceraolo Spurio | 87b391b9 | 2019-06-13 16:21:50 -0700 | [diff] [blame] | 780 | assert_rpm_wakelock_held(uncore->rpm); |
Chris Wilson | 67e6456 | 2017-10-09 12:03:01 +0100 | [diff] [blame] | 781 | |
Daniele Ceraolo Spurio | f568eee | 2019-03-19 11:35:35 -0700 | [diff] [blame] | 782 | fw_domains &= uncore->fw_domains; |
Pankaj Bharadiya | a9f236d | 2020-01-15 09:14:54 +0530 | [diff] [blame] | 783 | drm_WARN(&uncore->i915->drm, fw_domains & ~uncore->fw_domains_active, |
| 784 | "Expected %08x fw_domains to be active, but %08x are off\n", |
| 785 | fw_domains, fw_domains & ~uncore->fw_domains_active); |
Chris Wilson | b7dc939 | 2019-07-04 11:20:48 +0100 | [diff] [blame] | 786 | |
| 787 | /* |
| 788 | * Check that the caller has an explicit wakeref and we don't mistake |
| 789 | * it for the auto wakeref. |
| 790 | */ |
Chris Wilson | b7dc939 | 2019-07-04 11:20:48 +0100 | [diff] [blame] | 791 | for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) { |
Chris Wilson | badf1f2 | 2019-07-05 08:45:57 +0100 | [diff] [blame] | 792 | unsigned int actual = READ_ONCE(domain->wake_count); |
Chris Wilson | b7dc939 | 2019-07-04 11:20:48 +0100 | [diff] [blame] | 793 | unsigned int expect = 1; |
| 794 | |
Chris Wilson | 77adbd8 | 2019-07-08 16:49:14 +0100 | [diff] [blame] | 795 | if (uncore->fw_domains_timer & domain->mask) |
Chris Wilson | b7dc939 | 2019-07-04 11:20:48 +0100 | [diff] [blame] | 796 | expect++; /* pending automatic release */ |
| 797 | |
Pankaj Bharadiya | a9f236d | 2020-01-15 09:14:54 +0530 | [diff] [blame] | 798 | if (drm_WARN(&uncore->i915->drm, actual < expect, |
| 799 | "Expected domain %d to be held awake by caller, count=%d\n", |
| 800 | domain->id, actual)) |
Chris Wilson | b7dc939 | 2019-07-04 11:20:48 +0100 | [diff] [blame] | 801 | break; |
| 802 | } |
Chris Wilson | 15e7fac | 2019-07-07 16:11:35 +0100 | [diff] [blame] | 803 | |
| 804 | spin_unlock_irq(&uncore->lock); |
Paulo Zanoni | e998c40 | 2014-02-21 13:52:26 -0300 | [diff] [blame] | 805 | } |
| 806 | |
Chris Wilson | 907b28c | 2013-07-19 20:36:52 +0100 | [diff] [blame] | 807 | /* We give fast paths for the really cool registers */ |
Ville Syrjälä | 4018169 | 2015-10-22 15:34:57 +0300 | [diff] [blame] | 808 | #define NEEDS_FORCE_WAKE(reg) ((reg) < 0x40000) |
Chris Wilson | 907b28c | 2013-07-19 20:36:52 +0100 | [diff] [blame] | 809 | |
Daniele Ceraolo Spurio | 272c7e5 | 2019-03-19 11:35:39 -0700 | [diff] [blame] | 810 | #define __gen6_reg_read_fw_domains(uncore, offset) \ |
Tvrtko Ursulin | 6863b76 | 2016-04-12 14:37:29 +0100 | [diff] [blame] | 811 | ({ \ |
| 812 | enum forcewake_domains __fwd; \ |
| 813 | if (NEEDS_FORCE_WAKE(offset)) \ |
| 814 | __fwd = FORCEWAKE_RENDER; \ |
| 815 | else \ |
| 816 | __fwd = 0; \ |
| 817 | __fwd; \ |
| 818 | }) |
| 819 | |
Tvrtko Ursulin | 9480dbf | 2016-10-04 09:29:29 +0100 | [diff] [blame] | 820 | static int fw_range_cmp(u32 offset, const struct intel_forcewake_range *entry) |
Tvrtko Ursulin | 91e630b | 2016-10-04 09:29:21 +0100 | [diff] [blame] | 821 | { |
Tvrtko Ursulin | 91e630b | 2016-10-04 09:29:21 +0100 | [diff] [blame] | 822 | if (offset < entry->start) |
| 823 | return -1; |
| 824 | else if (offset > entry->end) |
| 825 | return 1; |
| 826 | else |
| 827 | return 0; |
| 828 | } |
| 829 | |
Tvrtko Ursulin | 9480dbf | 2016-10-04 09:29:29 +0100 | [diff] [blame] | 830 | /* Copied and "macroized" from lib/bsearch.c */ |
| 831 | #define BSEARCH(key, base, num, cmp) ({ \ |
| 832 | unsigned int start__ = 0, end__ = (num); \ |
| 833 | typeof(base) result__ = NULL; \ |
| 834 | while (start__ < end__) { \ |
| 835 | unsigned int mid__ = start__ + (end__ - start__) / 2; \ |
| 836 | int ret__ = (cmp)((key), (base) + mid__); \ |
| 837 | if (ret__ < 0) { \ |
| 838 | end__ = mid__; \ |
| 839 | } else if (ret__ > 0) { \ |
| 840 | start__ = mid__ + 1; \ |
| 841 | } else { \ |
| 842 | result__ = (base) + mid__; \ |
| 843 | break; \ |
| 844 | } \ |
| 845 | } \ |
| 846 | result__; \ |
| 847 | }) |
| 848 | |
Tvrtko Ursulin | 9fc1117 | 2016-10-04 09:29:19 +0100 | [diff] [blame] | 849 | static enum forcewake_domains |
Daniele Ceraolo Spurio | cb7ee69 | 2019-03-19 11:35:38 -0700 | [diff] [blame] | 850 | find_fw_domain(struct intel_uncore *uncore, u32 offset) |
Tvrtko Ursulin | 9fc1117 | 2016-10-04 09:29:19 +0100 | [diff] [blame] | 851 | { |
Tvrtko Ursulin | 9480dbf | 2016-10-04 09:29:29 +0100 | [diff] [blame] | 852 | const struct intel_forcewake_range *entry; |
Tvrtko Ursulin | 9fc1117 | 2016-10-04 09:29:19 +0100 | [diff] [blame] | 853 | |
Tvrtko Ursulin | 9480dbf | 2016-10-04 09:29:29 +0100 | [diff] [blame] | 854 | entry = BSEARCH(offset, |
Daniele Ceraolo Spurio | cb7ee69 | 2019-03-19 11:35:38 -0700 | [diff] [blame] | 855 | uncore->fw_domains_table, |
| 856 | uncore->fw_domains_table_entries, |
Tvrtko Ursulin | 91e630b | 2016-10-04 09:29:21 +0100 | [diff] [blame] | 857 | fw_range_cmp); |
Tvrtko Ursulin | 9fc1117 | 2016-10-04 09:29:19 +0100 | [diff] [blame] | 858 | |
Joonas Lahtinen | 9919142 | 2016-12-07 16:22:39 +0200 | [diff] [blame] | 859 | if (!entry) |
| 860 | return 0; |
| 861 | |
Daniele Ceraolo Spurio | a89a70a | 2018-03-02 18:15:01 +0200 | [diff] [blame] | 862 | /* |
| 863 | * The list of FW domains depends on the SKU in gen11+ so we |
| 864 | * can't determine it statically. We use FORCEWAKE_ALL and |
| 865 | * translate it here to the list of available domains. |
| 866 | */ |
| 867 | if (entry->domains == FORCEWAKE_ALL) |
Daniele Ceraolo Spurio | cb7ee69 | 2019-03-19 11:35:38 -0700 | [diff] [blame] | 868 | return uncore->fw_domains; |
Daniele Ceraolo Spurio | a89a70a | 2018-03-02 18:15:01 +0200 | [diff] [blame] | 869 | |
Pankaj Bharadiya | a9f236d | 2020-01-15 09:14:54 +0530 | [diff] [blame] | 870 | drm_WARN(&uncore->i915->drm, entry->domains & ~uncore->fw_domains, |
| 871 | "Uninitialized forcewake domain(s) 0x%x accessed at 0x%x\n", |
| 872 | entry->domains & ~uncore->fw_domains, offset); |
Joonas Lahtinen | 9919142 | 2016-12-07 16:22:39 +0200 | [diff] [blame] | 873 | |
| 874 | return entry->domains; |
Tvrtko Ursulin | 9fc1117 | 2016-10-04 09:29:19 +0100 | [diff] [blame] | 875 | } |
| 876 | |
| 877 | #define GEN_FW_RANGE(s, e, d) \ |
| 878 | { .start = (s), .end = (e), .domains = (d) } |
| 879 | |
Tvrtko Ursulin | 895833b | 2016-10-04 09:29:24 +0100 | [diff] [blame] | 880 | #define HAS_FWTABLE(dev_priv) \ |
Rodrigo Vivi | 3d16ca5 | 2017-07-05 18:00:31 -0700 | [diff] [blame] | 881 | (INTEL_GEN(dev_priv) >= 9 || \ |
Tvrtko Ursulin | 895833b | 2016-10-04 09:29:24 +0100 | [diff] [blame] | 882 | IS_CHERRYVIEW(dev_priv) || \ |
| 883 | IS_VALLEYVIEW(dev_priv)) |
| 884 | |
Tvrtko Ursulin | b008123 | 2016-10-04 09:29:20 +0100 | [diff] [blame] | 885 | /* *Must* be sorted by offset ranges! See intel_fw_table_check(). */ |
Tvrtko Ursulin | 9fc1117 | 2016-10-04 09:29:19 +0100 | [diff] [blame] | 886 | static const struct intel_forcewake_range __vlv_fw_ranges[] = { |
| 887 | GEN_FW_RANGE(0x2000, 0x3fff, FORCEWAKE_RENDER), |
| 888 | GEN_FW_RANGE(0x5000, 0x7fff, FORCEWAKE_RENDER), |
| 889 | GEN_FW_RANGE(0xb000, 0x11fff, FORCEWAKE_RENDER), |
Tvrtko Ursulin | 9fc1117 | 2016-10-04 09:29:19 +0100 | [diff] [blame] | 890 | GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA), |
| 891 | GEN_FW_RANGE(0x22000, 0x23fff, FORCEWAKE_MEDIA), |
Tvrtko Ursulin | b008123 | 2016-10-04 09:29:20 +0100 | [diff] [blame] | 892 | GEN_FW_RANGE(0x2e000, 0x2ffff, FORCEWAKE_RENDER), |
Tvrtko Ursulin | 9fc1117 | 2016-10-04 09:29:19 +0100 | [diff] [blame] | 893 | GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_MEDIA), |
| 894 | }; |
Deepak S | 1938e59 | 2014-05-23 21:00:16 +0530 | [diff] [blame] | 895 | |
Daniele Ceraolo Spurio | 272c7e5 | 2019-03-19 11:35:39 -0700 | [diff] [blame] | 896 | #define __fwtable_reg_read_fw_domains(uncore, offset) \ |
Tvrtko Ursulin | 6863b76 | 2016-04-12 14:37:29 +0100 | [diff] [blame] | 897 | ({ \ |
| 898 | enum forcewake_domains __fwd = 0; \ |
Tvrtko Ursulin | 0dd356b | 2016-10-04 09:29:22 +0100 | [diff] [blame] | 899 | if (NEEDS_FORCE_WAKE((offset))) \ |
Daniele Ceraolo Spurio | 272c7e5 | 2019-03-19 11:35:39 -0700 | [diff] [blame] | 900 | __fwd = find_fw_domain(uncore, offset); \ |
Tvrtko Ursulin | 6863b76 | 2016-04-12 14:37:29 +0100 | [diff] [blame] | 901 | __fwd; \ |
| 902 | }) |
| 903 | |
Daniele Ceraolo Spurio | 272c7e5 | 2019-03-19 11:35:39 -0700 | [diff] [blame] | 904 | #define __gen11_fwtable_reg_read_fw_domains(uncore, offset) \ |
Mika Kuoppala | c9f8d187 | 2019-09-13 17:16:50 +0300 | [diff] [blame] | 905 | find_fw_domain(uncore, offset) |
Daniele Ceraolo Spurio | a89a70a | 2018-03-02 18:15:01 +0200 | [diff] [blame] | 906 | |
Michel Thierry | cf82d9d | 2019-09-13 17:16:51 +0300 | [diff] [blame] | 907 | #define __gen12_fwtable_reg_read_fw_domains(uncore, offset) \ |
| 908 | find_fw_domain(uncore, offset) |
| 909 | |
Tvrtko Ursulin | 4718857 | 2016-10-04 09:29:27 +0100 | [diff] [blame] | 910 | /* *Must* be sorted by offset! See intel_shadow_table_check(). */ |
Tvrtko Ursulin | 6863b76 | 2016-04-12 14:37:29 +0100 | [diff] [blame] | 911 | static const i915_reg_t gen8_shadowed_regs[] = { |
Tvrtko Ursulin | 4718857 | 2016-10-04 09:29:27 +0100 | [diff] [blame] | 912 | RING_TAIL(RENDER_RING_BASE), /* 0x2000 (base) */ |
| 913 | GEN6_RPNSWREQ, /* 0xA008 */ |
| 914 | GEN6_RC_VIDEO_FREQ, /* 0xA00C */ |
| 915 | RING_TAIL(GEN6_BSD_RING_BASE), /* 0x12000 (base) */ |
| 916 | RING_TAIL(VEBOX_RING_BASE), /* 0x1a000 (base) */ |
| 917 | RING_TAIL(BLT_RING_BASE), /* 0x22000 (base) */ |
Tvrtko Ursulin | 6863b76 | 2016-04-12 14:37:29 +0100 | [diff] [blame] | 918 | /* TODO: Other registers are not yet used */ |
| 919 | }; |
| 920 | |
Daniele Ceraolo Spurio | a89a70a | 2018-03-02 18:15:01 +0200 | [diff] [blame] | 921 | static const i915_reg_t gen11_shadowed_regs[] = { |
| 922 | RING_TAIL(RENDER_RING_BASE), /* 0x2000 (base) */ |
| 923 | GEN6_RPNSWREQ, /* 0xA008 */ |
| 924 | GEN6_RC_VIDEO_FREQ, /* 0xA00C */ |
| 925 | RING_TAIL(BLT_RING_BASE), /* 0x22000 (base) */ |
| 926 | RING_TAIL(GEN11_BSD_RING_BASE), /* 0x1C0000 (base) */ |
| 927 | RING_TAIL(GEN11_BSD2_RING_BASE), /* 0x1C4000 (base) */ |
| 928 | RING_TAIL(GEN11_VEBOX_RING_BASE), /* 0x1C8000 (base) */ |
| 929 | RING_TAIL(GEN11_BSD3_RING_BASE), /* 0x1D0000 (base) */ |
| 930 | RING_TAIL(GEN11_BSD4_RING_BASE), /* 0x1D4000 (base) */ |
| 931 | RING_TAIL(GEN11_VEBOX2_RING_BASE), /* 0x1D8000 (base) */ |
| 932 | /* TODO: Other registers are not yet used */ |
| 933 | }; |
| 934 | |
Michel Thierry | cf82d9d | 2019-09-13 17:16:51 +0300 | [diff] [blame] | 935 | static const i915_reg_t gen12_shadowed_regs[] = { |
| 936 | RING_TAIL(RENDER_RING_BASE), /* 0x2000 (base) */ |
| 937 | GEN6_RPNSWREQ, /* 0xA008 */ |
| 938 | GEN6_RC_VIDEO_FREQ, /* 0xA00C */ |
| 939 | RING_TAIL(BLT_RING_BASE), /* 0x22000 (base) */ |
| 940 | RING_TAIL(GEN11_BSD_RING_BASE), /* 0x1C0000 (base) */ |
| 941 | RING_TAIL(GEN11_BSD2_RING_BASE), /* 0x1C4000 (base) */ |
| 942 | RING_TAIL(GEN11_VEBOX_RING_BASE), /* 0x1C8000 (base) */ |
| 943 | RING_TAIL(GEN11_BSD3_RING_BASE), /* 0x1D0000 (base) */ |
| 944 | RING_TAIL(GEN11_BSD4_RING_BASE), /* 0x1D4000 (base) */ |
| 945 | RING_TAIL(GEN11_VEBOX2_RING_BASE), /* 0x1D8000 (base) */ |
| 946 | /* TODO: Other registers are not yet used */ |
| 947 | }; |
| 948 | |
Tvrtko Ursulin | 9480dbf | 2016-10-04 09:29:29 +0100 | [diff] [blame] | 949 | static int mmio_reg_cmp(u32 key, const i915_reg_t *reg) |
Tvrtko Ursulin | 5a65938 | 2016-10-04 09:29:28 +0100 | [diff] [blame] | 950 | { |
Tvrtko Ursulin | 9480dbf | 2016-10-04 09:29:29 +0100 | [diff] [blame] | 951 | u32 offset = i915_mmio_reg_offset(*reg); |
Tvrtko Ursulin | 5a65938 | 2016-10-04 09:29:28 +0100 | [diff] [blame] | 952 | |
Tvrtko Ursulin | 9480dbf | 2016-10-04 09:29:29 +0100 | [diff] [blame] | 953 | if (key < offset) |
Tvrtko Ursulin | 5a65938 | 2016-10-04 09:29:28 +0100 | [diff] [blame] | 954 | return -1; |
Tvrtko Ursulin | 9480dbf | 2016-10-04 09:29:29 +0100 | [diff] [blame] | 955 | else if (key > offset) |
Tvrtko Ursulin | 5a65938 | 2016-10-04 09:29:28 +0100 | [diff] [blame] | 956 | return 1; |
| 957 | else |
| 958 | return 0; |
| 959 | } |
| 960 | |
Daniele Ceraolo Spurio | a89a70a | 2018-03-02 18:15:01 +0200 | [diff] [blame] | 961 | #define __is_genX_shadowed(x) \ |
| 962 | static bool is_gen##x##_shadowed(u32 offset) \ |
| 963 | { \ |
| 964 | const i915_reg_t *regs = gen##x##_shadowed_regs; \ |
| 965 | return BSEARCH(offset, regs, ARRAY_SIZE(gen##x##_shadowed_regs), \ |
| 966 | mmio_reg_cmp); \ |
Tvrtko Ursulin | 6863b76 | 2016-04-12 14:37:29 +0100 | [diff] [blame] | 967 | } |
| 968 | |
Daniele Ceraolo Spurio | a89a70a | 2018-03-02 18:15:01 +0200 | [diff] [blame] | 969 | __is_genX_shadowed(8) |
| 970 | __is_genX_shadowed(11) |
Michel Thierry | cf82d9d | 2019-09-13 17:16:51 +0300 | [diff] [blame] | 971 | __is_genX_shadowed(12) |
Daniele Ceraolo Spurio | a89a70a | 2018-03-02 18:15:01 +0200 | [diff] [blame] | 972 | |
Daniele Ceraolo Spurio | ccb2ace | 2019-06-19 18:00:16 -0700 | [diff] [blame] | 973 | static enum forcewake_domains |
| 974 | gen6_reg_write_fw_domains(struct intel_uncore *uncore, i915_reg_t reg) |
| 975 | { |
| 976 | return FORCEWAKE_RENDER; |
| 977 | } |
| 978 | |
Daniele Ceraolo Spurio | 272c7e5 | 2019-03-19 11:35:39 -0700 | [diff] [blame] | 979 | #define __gen8_reg_write_fw_domains(uncore, offset) \ |
Tvrtko Ursulin | 6863b76 | 2016-04-12 14:37:29 +0100 | [diff] [blame] | 980 | ({ \ |
| 981 | enum forcewake_domains __fwd; \ |
| 982 | if (NEEDS_FORCE_WAKE(offset) && !is_gen8_shadowed(offset)) \ |
| 983 | __fwd = FORCEWAKE_RENDER; \ |
| 984 | else \ |
| 985 | __fwd = 0; \ |
| 986 | __fwd; \ |
| 987 | }) |
| 988 | |
Tvrtko Ursulin | b008123 | 2016-10-04 09:29:20 +0100 | [diff] [blame] | 989 | /* *Must* be sorted by offset ranges! See intel_fw_table_check(). */ |
Tvrtko Ursulin | 9fc1117 | 2016-10-04 09:29:19 +0100 | [diff] [blame] | 990 | static const struct intel_forcewake_range __chv_fw_ranges[] = { |
| 991 | GEN_FW_RANGE(0x2000, 0x3fff, FORCEWAKE_RENDER), |
Tvrtko Ursulin | b008123 | 2016-10-04 09:29:20 +0100 | [diff] [blame] | 992 | GEN_FW_RANGE(0x4000, 0x4fff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA), |
Tvrtko Ursulin | 9fc1117 | 2016-10-04 09:29:19 +0100 | [diff] [blame] | 993 | GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER), |
Tvrtko Ursulin | b008123 | 2016-10-04 09:29:20 +0100 | [diff] [blame] | 994 | GEN_FW_RANGE(0x8000, 0x82ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA), |
Tvrtko Ursulin | 9fc1117 | 2016-10-04 09:29:19 +0100 | [diff] [blame] | 995 | GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER), |
Tvrtko Ursulin | b008123 | 2016-10-04 09:29:20 +0100 | [diff] [blame] | 996 | GEN_FW_RANGE(0x8500, 0x85ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA), |
Tvrtko Ursulin | 9fc1117 | 2016-10-04 09:29:19 +0100 | [diff] [blame] | 997 | GEN_FW_RANGE(0x8800, 0x88ff, FORCEWAKE_MEDIA), |
Tvrtko Ursulin | b008123 | 2016-10-04 09:29:20 +0100 | [diff] [blame] | 998 | GEN_FW_RANGE(0x9000, 0xafff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA), |
| 999 | GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER), |
Tvrtko Ursulin | 9fc1117 | 2016-10-04 09:29:19 +0100 | [diff] [blame] | 1000 | GEN_FW_RANGE(0xd000, 0xd7ff, FORCEWAKE_MEDIA), |
Tvrtko Ursulin | b008123 | 2016-10-04 09:29:20 +0100 | [diff] [blame] | 1001 | GEN_FW_RANGE(0xe000, 0xe7ff, FORCEWAKE_RENDER), |
| 1002 | GEN_FW_RANGE(0xf000, 0xffff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA), |
Tvrtko Ursulin | 9fc1117 | 2016-10-04 09:29:19 +0100 | [diff] [blame] | 1003 | GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA), |
| 1004 | GEN_FW_RANGE(0x1a000, 0x1bfff, FORCEWAKE_MEDIA), |
| 1005 | GEN_FW_RANGE(0x1e800, 0x1e9ff, FORCEWAKE_MEDIA), |
| 1006 | GEN_FW_RANGE(0x30000, 0x37fff, FORCEWAKE_MEDIA), |
Tvrtko Ursulin | 9fc1117 | 2016-10-04 09:29:19 +0100 | [diff] [blame] | 1007 | }; |
Damien Lespiau | 38fb6a4 | 2014-03-28 16:54:26 +0000 | [diff] [blame] | 1008 | |
Daniele Ceraolo Spurio | 272c7e5 | 2019-03-19 11:35:39 -0700 | [diff] [blame] | 1009 | #define __fwtable_reg_write_fw_domains(uncore, offset) \ |
Tvrtko Ursulin | 6863b76 | 2016-04-12 14:37:29 +0100 | [diff] [blame] | 1010 | ({ \ |
| 1011 | enum forcewake_domains __fwd = 0; \ |
Tvrtko Ursulin | 0dd356b | 2016-10-04 09:29:22 +0100 | [diff] [blame] | 1012 | if (NEEDS_FORCE_WAKE((offset)) && !is_gen8_shadowed(offset)) \ |
Daniele Ceraolo Spurio | 272c7e5 | 2019-03-19 11:35:39 -0700 | [diff] [blame] | 1013 | __fwd = find_fw_domain(uncore, offset); \ |
Tvrtko Ursulin | 6863b76 | 2016-04-12 14:37:29 +0100 | [diff] [blame] | 1014 | __fwd; \ |
| 1015 | }) |
| 1016 | |
Daniele Ceraolo Spurio | 272c7e5 | 2019-03-19 11:35:39 -0700 | [diff] [blame] | 1017 | #define __gen11_fwtable_reg_write_fw_domains(uncore, offset) \ |
Daniele Ceraolo Spurio | a89a70a | 2018-03-02 18:15:01 +0200 | [diff] [blame] | 1018 | ({ \ |
| 1019 | enum forcewake_domains __fwd = 0; \ |
Mika Kuoppala | c9f8d187 | 2019-09-13 17:16:50 +0300 | [diff] [blame] | 1020 | const u32 __offset = (offset); \ |
| 1021 | if (!is_gen11_shadowed(__offset)) \ |
| 1022 | __fwd = find_fw_domain(uncore, __offset); \ |
Daniele Ceraolo Spurio | a89a70a | 2018-03-02 18:15:01 +0200 | [diff] [blame] | 1023 | __fwd; \ |
| 1024 | }) |
| 1025 | |
Michel Thierry | cf82d9d | 2019-09-13 17:16:51 +0300 | [diff] [blame] | 1026 | #define __gen12_fwtable_reg_write_fw_domains(uncore, offset) \ |
| 1027 | ({ \ |
| 1028 | enum forcewake_domains __fwd = 0; \ |
| 1029 | const u32 __offset = (offset); \ |
| 1030 | if (!is_gen12_shadowed(__offset)) \ |
| 1031 | __fwd = find_fw_domain(uncore, __offset); \ |
| 1032 | __fwd; \ |
| 1033 | }) |
| 1034 | |
Tvrtko Ursulin | b008123 | 2016-10-04 09:29:20 +0100 | [diff] [blame] | 1035 | /* *Must* be sorted by offset ranges! See intel_fw_table_check(). */ |
Tvrtko Ursulin | 9fc1117 | 2016-10-04 09:29:19 +0100 | [diff] [blame] | 1036 | static const struct intel_forcewake_range __gen9_fw_ranges[] = { |
Tvrtko Ursulin | 0dd356b | 2016-10-04 09:29:22 +0100 | [diff] [blame] | 1037 | GEN_FW_RANGE(0x0, 0xaff, FORCEWAKE_BLITTER), |
Tvrtko Ursulin | 9fc1117 | 2016-10-04 09:29:19 +0100 | [diff] [blame] | 1038 | GEN_FW_RANGE(0xb00, 0x1fff, 0), /* uncore range */ |
| 1039 | GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER), |
Tvrtko Ursulin | 0dd356b | 2016-10-04 09:29:22 +0100 | [diff] [blame] | 1040 | GEN_FW_RANGE(0x2700, 0x2fff, FORCEWAKE_BLITTER), |
Tvrtko Ursulin | 9fc1117 | 2016-10-04 09:29:19 +0100 | [diff] [blame] | 1041 | GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER), |
Tvrtko Ursulin | 0dd356b | 2016-10-04 09:29:22 +0100 | [diff] [blame] | 1042 | GEN_FW_RANGE(0x4000, 0x51ff, FORCEWAKE_BLITTER), |
Tvrtko Ursulin | 9fc1117 | 2016-10-04 09:29:19 +0100 | [diff] [blame] | 1043 | GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER), |
Tvrtko Ursulin | 0dd356b | 2016-10-04 09:29:22 +0100 | [diff] [blame] | 1044 | GEN_FW_RANGE(0x8000, 0x812f, FORCEWAKE_BLITTER), |
Tvrtko Ursulin | b008123 | 2016-10-04 09:29:20 +0100 | [diff] [blame] | 1045 | GEN_FW_RANGE(0x8130, 0x813f, FORCEWAKE_MEDIA), |
Tvrtko Ursulin | 9fc1117 | 2016-10-04 09:29:19 +0100 | [diff] [blame] | 1046 | GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER), |
Tvrtko Ursulin | 0dd356b | 2016-10-04 09:29:22 +0100 | [diff] [blame] | 1047 | GEN_FW_RANGE(0x8160, 0x82ff, FORCEWAKE_BLITTER), |
Tvrtko Ursulin | 9fc1117 | 2016-10-04 09:29:19 +0100 | [diff] [blame] | 1048 | GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER), |
Tvrtko Ursulin | 0dd356b | 2016-10-04 09:29:22 +0100 | [diff] [blame] | 1049 | GEN_FW_RANGE(0x8500, 0x87ff, FORCEWAKE_BLITTER), |
Tvrtko Ursulin | 9fc1117 | 2016-10-04 09:29:19 +0100 | [diff] [blame] | 1050 | GEN_FW_RANGE(0x8800, 0x89ff, FORCEWAKE_MEDIA), |
Tvrtko Ursulin | 0dd356b | 2016-10-04 09:29:22 +0100 | [diff] [blame] | 1051 | GEN_FW_RANGE(0x8a00, 0x8bff, FORCEWAKE_BLITTER), |
Tvrtko Ursulin | b008123 | 2016-10-04 09:29:20 +0100 | [diff] [blame] | 1052 | GEN_FW_RANGE(0x8c00, 0x8cff, FORCEWAKE_RENDER), |
Tvrtko Ursulin | 0dd356b | 2016-10-04 09:29:22 +0100 | [diff] [blame] | 1053 | GEN_FW_RANGE(0x8d00, 0x93ff, FORCEWAKE_BLITTER), |
Tvrtko Ursulin | b008123 | 2016-10-04 09:29:20 +0100 | [diff] [blame] | 1054 | GEN_FW_RANGE(0x9400, 0x97ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA), |
Tvrtko Ursulin | 0dd356b | 2016-10-04 09:29:22 +0100 | [diff] [blame] | 1055 | GEN_FW_RANGE(0x9800, 0xafff, FORCEWAKE_BLITTER), |
Tvrtko Ursulin | b008123 | 2016-10-04 09:29:20 +0100 | [diff] [blame] | 1056 | GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER), |
Tvrtko Ursulin | 78424c9 | 2016-11-17 09:02:43 +0000 | [diff] [blame] | 1057 | GEN_FW_RANGE(0xb480, 0xcfff, FORCEWAKE_BLITTER), |
Tvrtko Ursulin | 9fc1117 | 2016-10-04 09:29:19 +0100 | [diff] [blame] | 1058 | GEN_FW_RANGE(0xd000, 0xd7ff, FORCEWAKE_MEDIA), |
Tvrtko Ursulin | 0dd356b | 2016-10-04 09:29:22 +0100 | [diff] [blame] | 1059 | GEN_FW_RANGE(0xd800, 0xdfff, FORCEWAKE_BLITTER), |
Tvrtko Ursulin | b008123 | 2016-10-04 09:29:20 +0100 | [diff] [blame] | 1060 | GEN_FW_RANGE(0xe000, 0xe8ff, FORCEWAKE_RENDER), |
Tvrtko Ursulin | 0dd356b | 2016-10-04 09:29:22 +0100 | [diff] [blame] | 1061 | GEN_FW_RANGE(0xe900, 0x11fff, FORCEWAKE_BLITTER), |
Tvrtko Ursulin | 9fc1117 | 2016-10-04 09:29:19 +0100 | [diff] [blame] | 1062 | GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA), |
Tvrtko Ursulin | 0dd356b | 2016-10-04 09:29:22 +0100 | [diff] [blame] | 1063 | GEN_FW_RANGE(0x14000, 0x19fff, FORCEWAKE_BLITTER), |
Tvrtko Ursulin | 9fc1117 | 2016-10-04 09:29:19 +0100 | [diff] [blame] | 1064 | GEN_FW_RANGE(0x1a000, 0x1e9ff, FORCEWAKE_MEDIA), |
Tvrtko Ursulin | 0dd356b | 2016-10-04 09:29:22 +0100 | [diff] [blame] | 1065 | GEN_FW_RANGE(0x1ea00, 0x243ff, FORCEWAKE_BLITTER), |
Tvrtko Ursulin | b008123 | 2016-10-04 09:29:20 +0100 | [diff] [blame] | 1066 | GEN_FW_RANGE(0x24400, 0x247ff, FORCEWAKE_RENDER), |
Tvrtko Ursulin | 0dd356b | 2016-10-04 09:29:22 +0100 | [diff] [blame] | 1067 | GEN_FW_RANGE(0x24800, 0x2ffff, FORCEWAKE_BLITTER), |
Tvrtko Ursulin | 9fc1117 | 2016-10-04 09:29:19 +0100 | [diff] [blame] | 1068 | GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_MEDIA), |
| 1069 | }; |
Tvrtko Ursulin | 6863b76 | 2016-04-12 14:37:29 +0100 | [diff] [blame] | 1070 | |
Daniele Ceraolo Spurio | a89a70a | 2018-03-02 18:15:01 +0200 | [diff] [blame] | 1071 | /* *Must* be sorted by offset ranges! See intel_fw_table_check(). */ |
| 1072 | static const struct intel_forcewake_range __gen11_fw_ranges[] = { |
| 1073 | GEN_FW_RANGE(0x0, 0xaff, FORCEWAKE_BLITTER), |
| 1074 | GEN_FW_RANGE(0xb00, 0x1fff, 0), /* uncore range */ |
| 1075 | GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER), |
| 1076 | GEN_FW_RANGE(0x2700, 0x2fff, FORCEWAKE_BLITTER), |
| 1077 | GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER), |
| 1078 | GEN_FW_RANGE(0x4000, 0x51ff, FORCEWAKE_BLITTER), |
| 1079 | GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER), |
| 1080 | GEN_FW_RANGE(0x8000, 0x813f, FORCEWAKE_BLITTER), |
| 1081 | GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER), |
| 1082 | GEN_FW_RANGE(0x8160, 0x82ff, FORCEWAKE_BLITTER), |
| 1083 | GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER), |
| 1084 | GEN_FW_RANGE(0x8500, 0x8bff, FORCEWAKE_BLITTER), |
| 1085 | GEN_FW_RANGE(0x8c00, 0x8cff, FORCEWAKE_RENDER), |
| 1086 | GEN_FW_RANGE(0x8d00, 0x93ff, FORCEWAKE_BLITTER), |
| 1087 | GEN_FW_RANGE(0x9400, 0x97ff, FORCEWAKE_ALL), |
| 1088 | GEN_FW_RANGE(0x9800, 0xafff, FORCEWAKE_BLITTER), |
| 1089 | GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER), |
Mika Kuoppala | c9f8d187 | 2019-09-13 17:16:50 +0300 | [diff] [blame] | 1090 | GEN_FW_RANGE(0xb480, 0xdeff, FORCEWAKE_BLITTER), |
| 1091 | GEN_FW_RANGE(0xdf00, 0xe8ff, FORCEWAKE_RENDER), |
| 1092 | GEN_FW_RANGE(0xe900, 0x16dff, FORCEWAKE_BLITTER), |
| 1093 | GEN_FW_RANGE(0x16e00, 0x19fff, FORCEWAKE_RENDER), |
| 1094 | GEN_FW_RANGE(0x1a000, 0x243ff, FORCEWAKE_BLITTER), |
Daniele Ceraolo Spurio | a89a70a | 2018-03-02 18:15:01 +0200 | [diff] [blame] | 1095 | GEN_FW_RANGE(0x24400, 0x247ff, FORCEWAKE_RENDER), |
| 1096 | GEN_FW_RANGE(0x24800, 0x3ffff, FORCEWAKE_BLITTER), |
| 1097 | GEN_FW_RANGE(0x40000, 0x1bffff, 0), |
| 1098 | GEN_FW_RANGE(0x1c0000, 0x1c3fff, FORCEWAKE_MEDIA_VDBOX0), |
| 1099 | GEN_FW_RANGE(0x1c4000, 0x1c7fff, FORCEWAKE_MEDIA_VDBOX1), |
| 1100 | GEN_FW_RANGE(0x1c8000, 0x1cbfff, FORCEWAKE_MEDIA_VEBOX0), |
| 1101 | GEN_FW_RANGE(0x1cc000, 0x1cffff, FORCEWAKE_BLITTER), |
| 1102 | GEN_FW_RANGE(0x1d0000, 0x1d3fff, FORCEWAKE_MEDIA_VDBOX2), |
| 1103 | GEN_FW_RANGE(0x1d4000, 0x1d7fff, FORCEWAKE_MEDIA_VDBOX3), |
| 1104 | GEN_FW_RANGE(0x1d8000, 0x1dbfff, FORCEWAKE_MEDIA_VEBOX1) |
| 1105 | }; |
| 1106 | |
Michel Thierry | cf82d9d | 2019-09-13 17:16:51 +0300 | [diff] [blame] | 1107 | /* *Must* be sorted by offset ranges! See intel_fw_table_check(). */ |
| 1108 | static const struct intel_forcewake_range __gen12_fw_ranges[] = { |
| 1109 | GEN_FW_RANGE(0x0, 0xaff, FORCEWAKE_BLITTER), |
| 1110 | GEN_FW_RANGE(0xb00, 0x1fff, 0), /* uncore range */ |
| 1111 | GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER), |
| 1112 | GEN_FW_RANGE(0x2700, 0x2fff, FORCEWAKE_BLITTER), |
| 1113 | GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER), |
| 1114 | GEN_FW_RANGE(0x4000, 0x51ff, FORCEWAKE_BLITTER), |
| 1115 | GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER), |
| 1116 | GEN_FW_RANGE(0x8000, 0x813f, FORCEWAKE_BLITTER), |
| 1117 | GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER), |
| 1118 | GEN_FW_RANGE(0x8160, 0x82ff, FORCEWAKE_BLITTER), |
| 1119 | GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER), |
| 1120 | GEN_FW_RANGE(0x8500, 0x8bff, FORCEWAKE_BLITTER), |
| 1121 | GEN_FW_RANGE(0x8c00, 0x8cff, FORCEWAKE_RENDER), |
| 1122 | GEN_FW_RANGE(0x8d00, 0x93ff, FORCEWAKE_BLITTER), |
| 1123 | GEN_FW_RANGE(0x9400, 0x97ff, FORCEWAKE_ALL), |
| 1124 | GEN_FW_RANGE(0x9800, 0xafff, FORCEWAKE_BLITTER), |
| 1125 | GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER), |
| 1126 | GEN_FW_RANGE(0xb480, 0xdfff, FORCEWAKE_BLITTER), |
| 1127 | GEN_FW_RANGE(0xe000, 0xe8ff, FORCEWAKE_RENDER), |
| 1128 | GEN_FW_RANGE(0xe900, 0x147ff, FORCEWAKE_BLITTER), |
| 1129 | GEN_FW_RANGE(0x14800, 0x148ff, FORCEWAKE_RENDER), |
| 1130 | GEN_FW_RANGE(0x14900, 0x19fff, FORCEWAKE_BLITTER), |
| 1131 | GEN_FW_RANGE(0x1a000, 0x1a7ff, FORCEWAKE_RENDER), |
| 1132 | GEN_FW_RANGE(0x1a800, 0x1afff, FORCEWAKE_BLITTER), |
| 1133 | GEN_FW_RANGE(0x1b000, 0x1bfff, FORCEWAKE_RENDER), |
| 1134 | GEN_FW_RANGE(0x1c000, 0x243ff, FORCEWAKE_BLITTER), |
| 1135 | GEN_FW_RANGE(0x24400, 0x247ff, FORCEWAKE_RENDER), |
| 1136 | GEN_FW_RANGE(0x24800, 0x3ffff, FORCEWAKE_BLITTER), |
| 1137 | GEN_FW_RANGE(0x40000, 0x1bffff, 0), |
| 1138 | GEN_FW_RANGE(0x1c0000, 0x1c3fff, FORCEWAKE_MEDIA_VDBOX0), |
| 1139 | GEN_FW_RANGE(0x1c4000, 0x1c7fff, FORCEWAKE_MEDIA_VDBOX1), |
| 1140 | GEN_FW_RANGE(0x1c8000, 0x1cbfff, FORCEWAKE_MEDIA_VEBOX0), |
| 1141 | GEN_FW_RANGE(0x1cc000, 0x1cffff, FORCEWAKE_BLITTER), |
| 1142 | GEN_FW_RANGE(0x1d0000, 0x1d3fff, FORCEWAKE_MEDIA_VDBOX2), |
| 1143 | GEN_FW_RANGE(0x1d4000, 0x1d7fff, FORCEWAKE_MEDIA_VDBOX3), |
| 1144 | GEN_FW_RANGE(0x1d8000, 0x1dbfff, FORCEWAKE_MEDIA_VEBOX1) |
| 1145 | }; |
| 1146 | |
Chris Wilson | 907b28c | 2013-07-19 20:36:52 +0100 | [diff] [blame] | 1147 | static void |
Daniele Ceraolo Spurio | 6ebc969 | 2019-03-19 11:35:41 -0700 | [diff] [blame] | 1148 | ilk_dummy_write(struct intel_uncore *uncore) |
Chris Wilson | 907b28c | 2013-07-19 20:36:52 +0100 | [diff] [blame] | 1149 | { |
| 1150 | /* WaIssueDummyWriteToWakeupFromRC6:ilk Issue a dummy write to wake up |
| 1151 | * the chip from rc6 before touching it for real. MI_MODE is masked, |
| 1152 | * hence harmless to write 0 into. */ |
Daniele Ceraolo Spurio | 6cc5ca7 | 2019-03-25 14:49:32 -0700 | [diff] [blame] | 1153 | __raw_uncore_write32(uncore, MI_MODE, 0); |
Chris Wilson | 907b28c | 2013-07-19 20:36:52 +0100 | [diff] [blame] | 1154 | } |
| 1155 | |
| 1156 | static void |
Daniele Ceraolo Spurio | 2cf7bf6 | 2019-03-25 14:49:34 -0700 | [diff] [blame] | 1157 | __unclaimed_reg_debug(struct intel_uncore *uncore, |
Mika Kuoppala | 9c05350 | 2016-01-08 15:51:19 +0200 | [diff] [blame] | 1158 | const i915_reg_t reg, |
| 1159 | const bool read, |
| 1160 | const bool before) |
Chris Wilson | 907b28c | 2013-07-19 20:36:52 +0100 | [diff] [blame] | 1161 | { |
Pankaj Bharadiya | a9f236d | 2020-01-15 09:14:54 +0530 | [diff] [blame] | 1162 | if (drm_WARN(&uncore->i915->drm, |
| 1163 | check_for_unclaimed_mmio(uncore) && !before, |
| 1164 | "Unclaimed %s register 0x%x\n", |
| 1165 | read ? "read from" : "write to", |
| 1166 | i915_mmio_reg_offset(reg))) |
Michal Wajdeczko | 4f044a8 | 2017-09-19 19:38:44 +0000 | [diff] [blame] | 1167 | /* Only report the first N failures */ |
| 1168 | i915_modparams.mmio_debug--; |
Chris Wilson | 907b28c | 2013-07-19 20:36:52 +0100 | [diff] [blame] | 1169 | } |
| 1170 | |
Mika Kuoppala | 9c05350 | 2016-01-08 15:51:19 +0200 | [diff] [blame] | 1171 | static inline void |
Daniele Ceraolo Spurio | 2cf7bf6 | 2019-03-25 14:49:34 -0700 | [diff] [blame] | 1172 | unclaimed_reg_debug(struct intel_uncore *uncore, |
Mika Kuoppala | 9c05350 | 2016-01-08 15:51:19 +0200 | [diff] [blame] | 1173 | const i915_reg_t reg, |
| 1174 | const bool read, |
| 1175 | const bool before) |
| 1176 | { |
Michal Wajdeczko | 4f044a8 | 2017-09-19 19:38:44 +0000 | [diff] [blame] | 1177 | if (likely(!i915_modparams.mmio_debug)) |
Mika Kuoppala | 9c05350 | 2016-01-08 15:51:19 +0200 | [diff] [blame] | 1178 | return; |
| 1179 | |
Daniele Ceraolo Spurio | 0a9b263 | 2019-08-09 07:31:16 +0100 | [diff] [blame] | 1180 | /* interrupts are disabled and re-enabled around uncore->lock usage */ |
| 1181 | lockdep_assert_held(&uncore->lock); |
| 1182 | |
| 1183 | if (before) |
| 1184 | spin_lock(&uncore->debug->lock); |
| 1185 | |
Daniele Ceraolo Spurio | 2cf7bf6 | 2019-03-25 14:49:34 -0700 | [diff] [blame] | 1186 | __unclaimed_reg_debug(uncore, reg, read, before); |
Daniele Ceraolo Spurio | 0a9b263 | 2019-08-09 07:31:16 +0100 | [diff] [blame] | 1187 | |
| 1188 | if (!before) |
| 1189 | spin_unlock(&uncore->debug->lock); |
Mika Kuoppala | 9c05350 | 2016-01-08 15:51:19 +0200 | [diff] [blame] | 1190 | } |
| 1191 | |
Chris Wilson | 51f6788 | 2015-01-16 11:34:36 +0200 | [diff] [blame] | 1192 | #define GEN2_READ_HEADER(x) \ |
Ben Widawsky | 5d73879 | 2013-10-04 21:24:53 -0700 | [diff] [blame] | 1193 | u##x val = 0; \ |
Daniele Ceraolo Spurio | 87b391b9 | 2019-06-13 16:21:50 -0700 | [diff] [blame] | 1194 | assert_rpm_wakelock_held(uncore->rpm); |
Ben Widawsky | 5d73879 | 2013-10-04 21:24:53 -0700 | [diff] [blame] | 1195 | |
Chris Wilson | 51f6788 | 2015-01-16 11:34:36 +0200 | [diff] [blame] | 1196 | #define GEN2_READ_FOOTER \ |
Ben Widawsky | 5d73879 | 2013-10-04 21:24:53 -0700 | [diff] [blame] | 1197 | trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \ |
| 1198 | return val |
| 1199 | |
Chris Wilson | 51f6788 | 2015-01-16 11:34:36 +0200 | [diff] [blame] | 1200 | #define __gen2_read(x) \ |
Ben Widawsky | 0b27448 | 2013-10-04 21:22:51 -0700 | [diff] [blame] | 1201 | static u##x \ |
Daniele Ceraolo Spurio | a2b4abf | 2019-03-25 14:49:36 -0700 | [diff] [blame] | 1202 | gen2_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) { \ |
Chris Wilson | 51f6788 | 2015-01-16 11:34:36 +0200 | [diff] [blame] | 1203 | GEN2_READ_HEADER(x); \ |
Daniele Ceraolo Spurio | 6cc5ca7 | 2019-03-25 14:49:32 -0700 | [diff] [blame] | 1204 | val = __raw_uncore_read##x(uncore, reg); \ |
Chris Wilson | 51f6788 | 2015-01-16 11:34:36 +0200 | [diff] [blame] | 1205 | GEN2_READ_FOOTER; \ |
Ben Widawsky | 3967018 | 2013-10-04 21:22:53 -0700 | [diff] [blame] | 1206 | } |
| 1207 | |
| 1208 | #define __gen5_read(x) \ |
| 1209 | static u##x \ |
Daniele Ceraolo Spurio | a2b4abf | 2019-03-25 14:49:36 -0700 | [diff] [blame] | 1210 | gen5_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) { \ |
Chris Wilson | 51f6788 | 2015-01-16 11:34:36 +0200 | [diff] [blame] | 1211 | GEN2_READ_HEADER(x); \ |
Daniele Ceraolo Spurio | 6ebc969 | 2019-03-19 11:35:41 -0700 | [diff] [blame] | 1212 | ilk_dummy_write(uncore); \ |
Daniele Ceraolo Spurio | 6cc5ca7 | 2019-03-25 14:49:32 -0700 | [diff] [blame] | 1213 | val = __raw_uncore_read##x(uncore, reg); \ |
Chris Wilson | 51f6788 | 2015-01-16 11:34:36 +0200 | [diff] [blame] | 1214 | GEN2_READ_FOOTER; \ |
Ben Widawsky | 3967018 | 2013-10-04 21:22:53 -0700 | [diff] [blame] | 1215 | } |
| 1216 | |
Chris Wilson | 51f6788 | 2015-01-16 11:34:36 +0200 | [diff] [blame] | 1217 | __gen5_read(8) |
| 1218 | __gen5_read(16) |
| 1219 | __gen5_read(32) |
| 1220 | __gen5_read(64) |
| 1221 | __gen2_read(8) |
| 1222 | __gen2_read(16) |
| 1223 | __gen2_read(32) |
| 1224 | __gen2_read(64) |
| 1225 | |
| 1226 | #undef __gen5_read |
| 1227 | #undef __gen2_read |
| 1228 | |
| 1229 | #undef GEN2_READ_FOOTER |
| 1230 | #undef GEN2_READ_HEADER |
| 1231 | |
| 1232 | #define GEN6_READ_HEADER(x) \ |
Ville Syrjälä | f0f59a0 | 2015-11-18 15:33:26 +0200 | [diff] [blame] | 1233 | u32 offset = i915_mmio_reg_offset(reg); \ |
Chris Wilson | 51f6788 | 2015-01-16 11:34:36 +0200 | [diff] [blame] | 1234 | unsigned long irqflags; \ |
| 1235 | u##x val = 0; \ |
Daniele Ceraolo Spurio | 87b391b9 | 2019-06-13 16:21:50 -0700 | [diff] [blame] | 1236 | assert_rpm_wakelock_held(uncore->rpm); \ |
Daniele Ceraolo Spurio | 272c7e5 | 2019-03-19 11:35:39 -0700 | [diff] [blame] | 1237 | spin_lock_irqsave(&uncore->lock, irqflags); \ |
Daniele Ceraolo Spurio | 2cf7bf6 | 2019-03-25 14:49:34 -0700 | [diff] [blame] | 1238 | unclaimed_reg_debug(uncore, reg, true, true) |
Chris Wilson | 51f6788 | 2015-01-16 11:34:36 +0200 | [diff] [blame] | 1239 | |
| 1240 | #define GEN6_READ_FOOTER \ |
Daniele Ceraolo Spurio | 2cf7bf6 | 2019-03-25 14:49:34 -0700 | [diff] [blame] | 1241 | unclaimed_reg_debug(uncore, reg, true, false); \ |
Daniele Ceraolo Spurio | 272c7e5 | 2019-03-19 11:35:39 -0700 | [diff] [blame] | 1242 | spin_unlock_irqrestore(&uncore->lock, irqflags); \ |
Chris Wilson | 51f6788 | 2015-01-16 11:34:36 +0200 | [diff] [blame] | 1243 | trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \ |
| 1244 | return val |
| 1245 | |
Daniele Ceraolo Spurio | f568eee | 2019-03-19 11:35:35 -0700 | [diff] [blame] | 1246 | static noinline void ___force_wake_auto(struct intel_uncore *uncore, |
Tvrtko Ursulin | c521b0c | 2016-10-04 09:29:18 +0100 | [diff] [blame] | 1247 | enum forcewake_domains fw_domains) |
Chris Wilson | b2cff0d | 2015-01-16 11:34:37 +0200 | [diff] [blame] | 1248 | { |
| 1249 | struct intel_uncore_forcewake_domain *domain; |
Chris Wilson | d2dc94b | 2017-03-23 10:19:41 +0000 | [diff] [blame] | 1250 | unsigned int tmp; |
Chris Wilson | b2cff0d | 2015-01-16 11:34:37 +0200 | [diff] [blame] | 1251 | |
Daniele Ceraolo Spurio | f568eee | 2019-03-19 11:35:35 -0700 | [diff] [blame] | 1252 | GEM_BUG_ON(fw_domains & ~uncore->fw_domains); |
Chris Wilson | d2dc94b | 2017-03-23 10:19:41 +0000 | [diff] [blame] | 1253 | |
Daniele Ceraolo Spurio | f568eee | 2019-03-19 11:35:35 -0700 | [diff] [blame] | 1254 | for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) |
Tvrtko Ursulin | c521b0c | 2016-10-04 09:29:18 +0100 | [diff] [blame] | 1255 | fw_domain_arm_timer(domain); |
| 1256 | |
Daniele Ceraolo Spurio | f568eee | 2019-03-19 11:35:35 -0700 | [diff] [blame] | 1257 | uncore->funcs.force_wake_get(uncore, fw_domains); |
Tvrtko Ursulin | c521b0c | 2016-10-04 09:29:18 +0100 | [diff] [blame] | 1258 | } |
| 1259 | |
Daniele Ceraolo Spurio | f568eee | 2019-03-19 11:35:35 -0700 | [diff] [blame] | 1260 | static inline void __force_wake_auto(struct intel_uncore *uncore, |
Tvrtko Ursulin | c521b0c | 2016-10-04 09:29:18 +0100 | [diff] [blame] | 1261 | enum forcewake_domains fw_domains) |
| 1262 | { |
Chris Wilson | 77adbd8 | 2019-07-08 16:49:14 +0100 | [diff] [blame] | 1263 | GEM_BUG_ON(!fw_domains); |
Chris Wilson | b2cff0d | 2015-01-16 11:34:37 +0200 | [diff] [blame] | 1264 | |
Tvrtko Ursulin | 003342a | 2016-10-04 09:29:17 +0100 | [diff] [blame] | 1265 | /* Turn on all requested but inactive supported forcewake domains. */ |
Daniele Ceraolo Spurio | f568eee | 2019-03-19 11:35:35 -0700 | [diff] [blame] | 1266 | fw_domains &= uncore->fw_domains; |
| 1267 | fw_domains &= ~uncore->fw_domains_active; |
Chris Wilson | b2cff0d | 2015-01-16 11:34:37 +0200 | [diff] [blame] | 1268 | |
Tvrtko Ursulin | c521b0c | 2016-10-04 09:29:18 +0100 | [diff] [blame] | 1269 | if (fw_domains) |
Daniele Ceraolo Spurio | f568eee | 2019-03-19 11:35:35 -0700 | [diff] [blame] | 1270 | ___force_wake_auto(uncore, fw_domains); |
Chris Wilson | b2cff0d | 2015-01-16 11:34:37 +0200 | [diff] [blame] | 1271 | } |
| 1272 | |
Daniele Ceraolo Spurio | ccfceda | 2017-02-03 17:23:29 -0800 | [diff] [blame] | 1273 | #define __gen_read(func, x) \ |
Ben Widawsky | 3967018 | 2013-10-04 21:22:53 -0700 | [diff] [blame] | 1274 | static u##x \ |
Daniele Ceraolo Spurio | a2b4abf | 2019-03-25 14:49:36 -0700 | [diff] [blame] | 1275 | func##_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) { \ |
Tvrtko Ursulin | 6863b76 | 2016-04-12 14:37:29 +0100 | [diff] [blame] | 1276 | enum forcewake_domains fw_engine; \ |
Chris Wilson | 51f6788 | 2015-01-16 11:34:36 +0200 | [diff] [blame] | 1277 | GEN6_READ_HEADER(x); \ |
Daniele Ceraolo Spurio | 272c7e5 | 2019-03-19 11:35:39 -0700 | [diff] [blame] | 1278 | fw_engine = __##func##_reg_read_fw_domains(uncore, offset); \ |
Tvrtko Ursulin | 6863b76 | 2016-04-12 14:37:29 +0100 | [diff] [blame] | 1279 | if (fw_engine) \ |
Daniele Ceraolo Spurio | 272c7e5 | 2019-03-19 11:35:39 -0700 | [diff] [blame] | 1280 | __force_wake_auto(uncore, fw_engine); \ |
Daniele Ceraolo Spurio | 6cc5ca7 | 2019-03-25 14:49:32 -0700 | [diff] [blame] | 1281 | val = __raw_uncore_read##x(uncore, reg); \ |
Chris Wilson | 51f6788 | 2015-01-16 11:34:36 +0200 | [diff] [blame] | 1282 | GEN6_READ_FOOTER; \ |
Chris Wilson | 907b28c | 2013-07-19 20:36:52 +0100 | [diff] [blame] | 1283 | } |
Deepak S | 940aece | 2013-11-23 14:55:43 +0530 | [diff] [blame] | 1284 | |
Daniele Ceraolo Spurio | ccb2ace | 2019-06-19 18:00:16 -0700 | [diff] [blame] | 1285 | #define __gen_reg_read_funcs(func) \ |
| 1286 | static enum forcewake_domains \ |
| 1287 | func##_reg_read_fw_domains(struct intel_uncore *uncore, i915_reg_t reg) { \ |
| 1288 | return __##func##_reg_read_fw_domains(uncore, i915_mmio_reg_offset(reg)); \ |
| 1289 | } \ |
| 1290 | \ |
| 1291 | __gen_read(func, 8) \ |
| 1292 | __gen_read(func, 16) \ |
| 1293 | __gen_read(func, 32) \ |
| 1294 | __gen_read(func, 64) |
Ben Widawsky | 3967018 | 2013-10-04 21:22:53 -0700 | [diff] [blame] | 1295 | |
Michel Thierry | cf82d9d | 2019-09-13 17:16:51 +0300 | [diff] [blame] | 1296 | __gen_reg_read_funcs(gen12_fwtable); |
Daniele Ceraolo Spurio | ccb2ace | 2019-06-19 18:00:16 -0700 | [diff] [blame] | 1297 | __gen_reg_read_funcs(gen11_fwtable); |
| 1298 | __gen_reg_read_funcs(fwtable); |
| 1299 | __gen_reg_read_funcs(gen6); |
| 1300 | |
| 1301 | #undef __gen_reg_read_funcs |
Chris Wilson | 51f6788 | 2015-01-16 11:34:36 +0200 | [diff] [blame] | 1302 | #undef GEN6_READ_FOOTER |
| 1303 | #undef GEN6_READ_HEADER |
Ben Widawsky | 5d73879 | 2013-10-04 21:24:53 -0700 | [diff] [blame] | 1304 | |
Chris Wilson | 51f6788 | 2015-01-16 11:34:36 +0200 | [diff] [blame] | 1305 | #define GEN2_WRITE_HEADER \ |
Ben Widawsky | 5d73879 | 2013-10-04 21:24:53 -0700 | [diff] [blame] | 1306 | trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \ |
Daniele Ceraolo Spurio | 87b391b9 | 2019-06-13 16:21:50 -0700 | [diff] [blame] | 1307 | assert_rpm_wakelock_held(uncore->rpm); \ |
Chris Wilson | 907b28c | 2013-07-19 20:36:52 +0100 | [diff] [blame] | 1308 | |
Chris Wilson | 51f6788 | 2015-01-16 11:34:36 +0200 | [diff] [blame] | 1309 | #define GEN2_WRITE_FOOTER |
Ville Syrjälä | 0d96530 | 2013-12-02 14:23:02 +0200 | [diff] [blame] | 1310 | |
Chris Wilson | 51f6788 | 2015-01-16 11:34:36 +0200 | [diff] [blame] | 1311 | #define __gen2_write(x) \ |
Ben Widawsky | 0b27448 | 2013-10-04 21:22:51 -0700 | [diff] [blame] | 1312 | static void \ |
Daniele Ceraolo Spurio | a2b4abf | 2019-03-25 14:49:36 -0700 | [diff] [blame] | 1313 | gen2_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \ |
Chris Wilson | 51f6788 | 2015-01-16 11:34:36 +0200 | [diff] [blame] | 1314 | GEN2_WRITE_HEADER; \ |
Daniele Ceraolo Spurio | 6cc5ca7 | 2019-03-25 14:49:32 -0700 | [diff] [blame] | 1315 | __raw_uncore_write##x(uncore, reg, val); \ |
Chris Wilson | 51f6788 | 2015-01-16 11:34:36 +0200 | [diff] [blame] | 1316 | GEN2_WRITE_FOOTER; \ |
Ben Widawsky | 4032ef4 | 2013-10-04 21:22:54 -0700 | [diff] [blame] | 1317 | } |
| 1318 | |
| 1319 | #define __gen5_write(x) \ |
| 1320 | static void \ |
Daniele Ceraolo Spurio | a2b4abf | 2019-03-25 14:49:36 -0700 | [diff] [blame] | 1321 | gen5_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \ |
Chris Wilson | 51f6788 | 2015-01-16 11:34:36 +0200 | [diff] [blame] | 1322 | GEN2_WRITE_HEADER; \ |
Daniele Ceraolo Spurio | 6ebc969 | 2019-03-19 11:35:41 -0700 | [diff] [blame] | 1323 | ilk_dummy_write(uncore); \ |
Daniele Ceraolo Spurio | 6cc5ca7 | 2019-03-25 14:49:32 -0700 | [diff] [blame] | 1324 | __raw_uncore_write##x(uncore, reg, val); \ |
Chris Wilson | 51f6788 | 2015-01-16 11:34:36 +0200 | [diff] [blame] | 1325 | GEN2_WRITE_FOOTER; \ |
Ben Widawsky | 4032ef4 | 2013-10-04 21:22:54 -0700 | [diff] [blame] | 1326 | } |
| 1327 | |
Chris Wilson | 51f6788 | 2015-01-16 11:34:36 +0200 | [diff] [blame] | 1328 | __gen5_write(8) |
| 1329 | __gen5_write(16) |
| 1330 | __gen5_write(32) |
Chris Wilson | 51f6788 | 2015-01-16 11:34:36 +0200 | [diff] [blame] | 1331 | __gen2_write(8) |
| 1332 | __gen2_write(16) |
| 1333 | __gen2_write(32) |
Chris Wilson | 51f6788 | 2015-01-16 11:34:36 +0200 | [diff] [blame] | 1334 | |
| 1335 | #undef __gen5_write |
| 1336 | #undef __gen2_write |
| 1337 | |
| 1338 | #undef GEN2_WRITE_FOOTER |
| 1339 | #undef GEN2_WRITE_HEADER |
| 1340 | |
| 1341 | #define GEN6_WRITE_HEADER \ |
Ville Syrjälä | f0f59a0 | 2015-11-18 15:33:26 +0200 | [diff] [blame] | 1342 | u32 offset = i915_mmio_reg_offset(reg); \ |
Chris Wilson | 51f6788 | 2015-01-16 11:34:36 +0200 | [diff] [blame] | 1343 | unsigned long irqflags; \ |
| 1344 | trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \ |
Daniele Ceraolo Spurio | 87b391b9 | 2019-06-13 16:21:50 -0700 | [diff] [blame] | 1345 | assert_rpm_wakelock_held(uncore->rpm); \ |
Daniele Ceraolo Spurio | 272c7e5 | 2019-03-19 11:35:39 -0700 | [diff] [blame] | 1346 | spin_lock_irqsave(&uncore->lock, irqflags); \ |
Daniele Ceraolo Spurio | 2cf7bf6 | 2019-03-25 14:49:34 -0700 | [diff] [blame] | 1347 | unclaimed_reg_debug(uncore, reg, false, true) |
Chris Wilson | 51f6788 | 2015-01-16 11:34:36 +0200 | [diff] [blame] | 1348 | |
| 1349 | #define GEN6_WRITE_FOOTER \ |
Daniele Ceraolo Spurio | 2cf7bf6 | 2019-03-25 14:49:34 -0700 | [diff] [blame] | 1350 | unclaimed_reg_debug(uncore, reg, false, false); \ |
Daniele Ceraolo Spurio | 272c7e5 | 2019-03-19 11:35:39 -0700 | [diff] [blame] | 1351 | spin_unlock_irqrestore(&uncore->lock, irqflags) |
Chris Wilson | 51f6788 | 2015-01-16 11:34:36 +0200 | [diff] [blame] | 1352 | |
Ben Widawsky | 4032ef4 | 2013-10-04 21:22:54 -0700 | [diff] [blame] | 1353 | #define __gen6_write(x) \ |
| 1354 | static void \ |
Daniele Ceraolo Spurio | a2b4abf | 2019-03-25 14:49:36 -0700 | [diff] [blame] | 1355 | gen6_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \ |
Chris Wilson | 51f6788 | 2015-01-16 11:34:36 +0200 | [diff] [blame] | 1356 | GEN6_WRITE_HEADER; \ |
Mika Kuoppala | a338908 | 2017-04-06 18:39:42 +0300 | [diff] [blame] | 1357 | if (NEEDS_FORCE_WAKE(offset)) \ |
Daniele Ceraolo Spurio | 6ebc969 | 2019-03-19 11:35:41 -0700 | [diff] [blame] | 1358 | __gen6_gt_wait_for_fifo(uncore); \ |
Daniele Ceraolo Spurio | 6cc5ca7 | 2019-03-25 14:49:32 -0700 | [diff] [blame] | 1359 | __raw_uncore_write##x(uncore, reg, val); \ |
Chris Wilson | 51f6788 | 2015-01-16 11:34:36 +0200 | [diff] [blame] | 1360 | GEN6_WRITE_FOOTER; \ |
Ben Widawsky | 4032ef4 | 2013-10-04 21:22:54 -0700 | [diff] [blame] | 1361 | } |
Daniele Ceraolo Spurio | ccb2ace | 2019-06-19 18:00:16 -0700 | [diff] [blame] | 1362 | __gen6_write(8) |
| 1363 | __gen6_write(16) |
| 1364 | __gen6_write(32) |
Ben Widawsky | 4032ef4 | 2013-10-04 21:22:54 -0700 | [diff] [blame] | 1365 | |
Daniele Ceraolo Spurio | ccfceda | 2017-02-03 17:23:29 -0800 | [diff] [blame] | 1366 | #define __gen_write(func, x) \ |
Ben Widawsky | ab2aa47 | 2013-11-02 21:07:00 -0700 | [diff] [blame] | 1367 | static void \ |
Daniele Ceraolo Spurio | a2b4abf | 2019-03-25 14:49:36 -0700 | [diff] [blame] | 1368 | func##_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \ |
Tvrtko Ursulin | 6863b76 | 2016-04-12 14:37:29 +0100 | [diff] [blame] | 1369 | enum forcewake_domains fw_engine; \ |
Chris Wilson | 51f6788 | 2015-01-16 11:34:36 +0200 | [diff] [blame] | 1370 | GEN6_WRITE_HEADER; \ |
Daniele Ceraolo Spurio | 272c7e5 | 2019-03-19 11:35:39 -0700 | [diff] [blame] | 1371 | fw_engine = __##func##_reg_write_fw_domains(uncore, offset); \ |
Tvrtko Ursulin | 6863b76 | 2016-04-12 14:37:29 +0100 | [diff] [blame] | 1372 | if (fw_engine) \ |
Daniele Ceraolo Spurio | 272c7e5 | 2019-03-19 11:35:39 -0700 | [diff] [blame] | 1373 | __force_wake_auto(uncore, fw_engine); \ |
Daniele Ceraolo Spurio | 6cc5ca7 | 2019-03-25 14:49:32 -0700 | [diff] [blame] | 1374 | __raw_uncore_write##x(uncore, reg, val); \ |
Chris Wilson | 51f6788 | 2015-01-16 11:34:36 +0200 | [diff] [blame] | 1375 | GEN6_WRITE_FOOTER; \ |
Ben Widawsky | ab2aa47 | 2013-11-02 21:07:00 -0700 | [diff] [blame] | 1376 | } |
Deepak S | 1938e59 | 2014-05-23 21:00:16 +0530 | [diff] [blame] | 1377 | |
Daniele Ceraolo Spurio | ccb2ace | 2019-06-19 18:00:16 -0700 | [diff] [blame] | 1378 | #define __gen_reg_write_funcs(func) \ |
| 1379 | static enum forcewake_domains \ |
| 1380 | func##_reg_write_fw_domains(struct intel_uncore *uncore, i915_reg_t reg) { \ |
| 1381 | return __##func##_reg_write_fw_domains(uncore, i915_mmio_reg_offset(reg)); \ |
| 1382 | } \ |
| 1383 | \ |
| 1384 | __gen_write(func, 8) \ |
| 1385 | __gen_write(func, 16) \ |
| 1386 | __gen_write(func, 32) |
Ben Widawsky | 4032ef4 | 2013-10-04 21:22:54 -0700 | [diff] [blame] | 1387 | |
Michel Thierry | cf82d9d | 2019-09-13 17:16:51 +0300 | [diff] [blame] | 1388 | __gen_reg_write_funcs(gen12_fwtable); |
Daniele Ceraolo Spurio | ccb2ace | 2019-06-19 18:00:16 -0700 | [diff] [blame] | 1389 | __gen_reg_write_funcs(gen11_fwtable); |
| 1390 | __gen_reg_write_funcs(fwtable); |
| 1391 | __gen_reg_write_funcs(gen8); |
| 1392 | |
| 1393 | #undef __gen_reg_write_funcs |
Chris Wilson | 51f6788 | 2015-01-16 11:34:36 +0200 | [diff] [blame] | 1394 | #undef GEN6_WRITE_FOOTER |
| 1395 | #undef GEN6_WRITE_HEADER |
Chris Wilson | 907b28c | 2013-07-19 20:36:52 +0100 | [diff] [blame] | 1396 | |
Daniele Ceraolo Spurio | ccb2ace | 2019-06-19 18:00:16 -0700 | [diff] [blame] | 1397 | #define ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, x) \ |
Yu Zhang | 43d942a | 2014-10-23 15:28:24 +0800 | [diff] [blame] | 1398 | do { \ |
Daniele Ceraolo Spurio | f7de502 | 2019-03-19 11:35:37 -0700 | [diff] [blame] | 1399 | (uncore)->funcs.mmio_writeb = x##_write8; \ |
| 1400 | (uncore)->funcs.mmio_writew = x##_write16; \ |
| 1401 | (uncore)->funcs.mmio_writel = x##_write32; \ |
Yu Zhang | 43d942a | 2014-10-23 15:28:24 +0800 | [diff] [blame] | 1402 | } while (0) |
| 1403 | |
Daniele Ceraolo Spurio | ccb2ace | 2019-06-19 18:00:16 -0700 | [diff] [blame] | 1404 | #define ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, x) \ |
Yu Zhang | 43d942a | 2014-10-23 15:28:24 +0800 | [diff] [blame] | 1405 | do { \ |
Daniele Ceraolo Spurio | f7de502 | 2019-03-19 11:35:37 -0700 | [diff] [blame] | 1406 | (uncore)->funcs.mmio_readb = x##_read8; \ |
| 1407 | (uncore)->funcs.mmio_readw = x##_read16; \ |
| 1408 | (uncore)->funcs.mmio_readl = x##_read32; \ |
| 1409 | (uncore)->funcs.mmio_readq = x##_read64; \ |
Yu Zhang | 43d942a | 2014-10-23 15:28:24 +0800 | [diff] [blame] | 1410 | } while (0) |
| 1411 | |
Daniele Ceraolo Spurio | ccb2ace | 2019-06-19 18:00:16 -0700 | [diff] [blame] | 1412 | #define ASSIGN_WRITE_MMIO_VFUNCS(uncore, x) \ |
| 1413 | do { \ |
| 1414 | ASSIGN_RAW_WRITE_MMIO_VFUNCS((uncore), x); \ |
| 1415 | (uncore)->funcs.write_fw_domains = x##_reg_write_fw_domains; \ |
| 1416 | } while (0) |
| 1417 | |
| 1418 | #define ASSIGN_READ_MMIO_VFUNCS(uncore, x) \ |
| 1419 | do { \ |
| 1420 | ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, x); \ |
| 1421 | (uncore)->funcs.read_fw_domains = x##_reg_read_fw_domains; \ |
| 1422 | } while (0) |
Mika Kuoppala | 05a2fb1 | 2015-01-19 16:20:43 +0200 | [diff] [blame] | 1423 | |
Daniele Ceraolo Spurio | f833cdb | 2019-06-19 18:00:20 -0700 | [diff] [blame] | 1424 | static int __fw_domain_init(struct intel_uncore *uncore, |
| 1425 | enum forcewake_domain_id domain_id, |
| 1426 | i915_reg_t reg_set, |
| 1427 | i915_reg_t reg_ack) |
Mika Kuoppala | 05a2fb1 | 2015-01-19 16:20:43 +0200 | [diff] [blame] | 1428 | { |
| 1429 | struct intel_uncore_forcewake_domain *d; |
| 1430 | |
Daniele Ceraolo Spurio | f833cdb | 2019-06-19 18:00:20 -0700 | [diff] [blame] | 1431 | GEM_BUG_ON(domain_id >= FW_DOMAIN_ID_COUNT); |
| 1432 | GEM_BUG_ON(uncore->fw_domain[domain_id]); |
Mika Kuoppala | 05a2fb1 | 2015-01-19 16:20:43 +0200 | [diff] [blame] | 1433 | |
Michal Wajdeczko | 50d8441 | 2019-08-02 18:40:50 +0000 | [diff] [blame] | 1434 | if (i915_inject_probe_failure(uncore->i915)) |
Daniele Ceraolo Spurio | f833cdb | 2019-06-19 18:00:20 -0700 | [diff] [blame] | 1435 | return -ENOMEM; |
Mika Kuoppala | 05a2fb1 | 2015-01-19 16:20:43 +0200 | [diff] [blame] | 1436 | |
Daniele Ceraolo Spurio | f833cdb | 2019-06-19 18:00:20 -0700 | [diff] [blame] | 1437 | d = kzalloc(sizeof(*d), GFP_KERNEL); |
| 1438 | if (!d) |
| 1439 | return -ENOMEM; |
Mika Kuoppala | 05a2fb1 | 2015-01-19 16:20:43 +0200 | [diff] [blame] | 1440 | |
Pankaj Bharadiya | a9f236d | 2020-01-15 09:14:54 +0530 | [diff] [blame] | 1441 | drm_WARN_ON(&uncore->i915->drm, !i915_mmio_reg_valid(reg_set)); |
| 1442 | drm_WARN_ON(&uncore->i915->drm, !i915_mmio_reg_valid(reg_ack)); |
Chris Wilson | 6e3955a | 2017-03-23 10:19:43 +0000 | [diff] [blame] | 1443 | |
Daniele Ceraolo Spurio | f833cdb | 2019-06-19 18:00:20 -0700 | [diff] [blame] | 1444 | d->uncore = uncore; |
Mika Kuoppala | 05a2fb1 | 2015-01-19 16:20:43 +0200 | [diff] [blame] | 1445 | d->wake_count = 0; |
Daniele Ceraolo Spurio | 25286aa | 2019-03-19 11:35:40 -0700 | [diff] [blame] | 1446 | d->reg_set = uncore->regs + i915_mmio_reg_offset(reg_set); |
| 1447 | d->reg_ack = uncore->regs + i915_mmio_reg_offset(reg_ack); |
Mika Kuoppala | 05a2fb1 | 2015-01-19 16:20:43 +0200 | [diff] [blame] | 1448 | |
Mika Kuoppala | 05a2fb1 | 2015-01-19 16:20:43 +0200 | [diff] [blame] | 1449 | d->id = domain_id; |
| 1450 | |
Tvrtko Ursulin | 33c582c | 2016-04-07 17:04:33 +0100 | [diff] [blame] | 1451 | BUILD_BUG_ON(FORCEWAKE_RENDER != (1 << FW_DOMAIN_ID_RENDER)); |
| 1452 | BUILD_BUG_ON(FORCEWAKE_BLITTER != (1 << FW_DOMAIN_ID_BLITTER)); |
| 1453 | BUILD_BUG_ON(FORCEWAKE_MEDIA != (1 << FW_DOMAIN_ID_MEDIA)); |
Daniele Ceraolo Spurio | a89a70a | 2018-03-02 18:15:01 +0200 | [diff] [blame] | 1454 | BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX0 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX0)); |
| 1455 | BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX1 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX1)); |
| 1456 | BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX2 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX2)); |
| 1457 | BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX3 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX3)); |
| 1458 | BUILD_BUG_ON(FORCEWAKE_MEDIA_VEBOX0 != (1 << FW_DOMAIN_ID_MEDIA_VEBOX0)); |
| 1459 | BUILD_BUG_ON(FORCEWAKE_MEDIA_VEBOX1 != (1 << FW_DOMAIN_ID_MEDIA_VEBOX1)); |
| 1460 | |
Chris Wilson | d2dc94b | 2017-03-23 10:19:41 +0000 | [diff] [blame] | 1461 | d->mask = BIT(domain_id); |
Tvrtko Ursulin | 33c582c | 2016-04-07 17:04:33 +0100 | [diff] [blame] | 1462 | |
Tvrtko Ursulin | a57a4a6 | 2016-04-07 17:04:32 +0100 | [diff] [blame] | 1463 | hrtimer_init(&d->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); |
| 1464 | d->timer.function = intel_uncore_fw_release_timer; |
Mika Kuoppala | 05a2fb1 | 2015-01-19 16:20:43 +0200 | [diff] [blame] | 1465 | |
Daniele Ceraolo Spurio | 535d8d27 | 2019-03-16 10:00:45 +0000 | [diff] [blame] | 1466 | uncore->fw_domains |= BIT(domain_id); |
Mika Kuoppala | f9b3927 | 2015-01-28 14:43:24 +0200 | [diff] [blame] | 1467 | |
Daniele Ceraolo Spurio | 159367b | 2019-03-20 12:27:32 +0000 | [diff] [blame] | 1468 | fw_domain_reset(d); |
Daniele Ceraolo Spurio | f833cdb | 2019-06-19 18:00:20 -0700 | [diff] [blame] | 1469 | |
| 1470 | uncore->fw_domain[domain_id] = d; |
| 1471 | |
| 1472 | return 0; |
Mika Kuoppala | 05a2fb1 | 2015-01-19 16:20:43 +0200 | [diff] [blame] | 1473 | } |
| 1474 | |
Daniele Ceraolo Spurio | f7de502 | 2019-03-19 11:35:37 -0700 | [diff] [blame] | 1475 | static void fw_domain_fini(struct intel_uncore *uncore, |
Oscar Mateo | 26376a7 | 2018-03-16 14:14:49 +0200 | [diff] [blame] | 1476 | enum forcewake_domain_id domain_id) |
| 1477 | { |
| 1478 | struct intel_uncore_forcewake_domain *d; |
| 1479 | |
Daniele Ceraolo Spurio | f833cdb | 2019-06-19 18:00:20 -0700 | [diff] [blame] | 1480 | GEM_BUG_ON(domain_id >= FW_DOMAIN_ID_COUNT); |
| 1481 | |
| 1482 | d = fetch_and_zero(&uncore->fw_domain[domain_id]); |
| 1483 | if (!d) |
Oscar Mateo | 26376a7 | 2018-03-16 14:14:49 +0200 | [diff] [blame] | 1484 | return; |
| 1485 | |
Daniele Ceraolo Spurio | f833cdb | 2019-06-19 18:00:20 -0700 | [diff] [blame] | 1486 | uncore->fw_domains &= ~BIT(domain_id); |
Pankaj Bharadiya | a9f236d | 2020-01-15 09:14:54 +0530 | [diff] [blame] | 1487 | drm_WARN_ON(&uncore->i915->drm, d->wake_count); |
| 1488 | drm_WARN_ON(&uncore->i915->drm, hrtimer_cancel(&d->timer)); |
Daniele Ceraolo Spurio | f833cdb | 2019-06-19 18:00:20 -0700 | [diff] [blame] | 1489 | kfree(d); |
Oscar Mateo | 26376a7 | 2018-03-16 14:14:49 +0200 | [diff] [blame] | 1490 | } |
| 1491 | |
Daniele Ceraolo Spurio | f833cdb | 2019-06-19 18:00:20 -0700 | [diff] [blame] | 1492 | static void intel_uncore_fw_domains_fini(struct intel_uncore *uncore) |
| 1493 | { |
| 1494 | struct intel_uncore_forcewake_domain *d; |
| 1495 | int tmp; |
| 1496 | |
| 1497 | for_each_fw_domain(d, uncore, tmp) |
| 1498 | fw_domain_fini(uncore, d->id); |
| 1499 | } |
| 1500 | |
| 1501 | static int intel_uncore_fw_domains_init(struct intel_uncore *uncore) |
Ben Widawsky | 0b27448 | 2013-10-04 21:22:51 -0700 | [diff] [blame] | 1502 | { |
Daniele Ceraolo Spurio | 0138575 | 2019-06-19 18:00:18 -0700 | [diff] [blame] | 1503 | struct drm_i915_private *i915 = uncore->i915; |
Daniele Ceraolo Spurio | f833cdb | 2019-06-19 18:00:20 -0700 | [diff] [blame] | 1504 | int ret = 0; |
Daniele Ceraolo Spurio | f7de502 | 2019-03-19 11:35:37 -0700 | [diff] [blame] | 1505 | |
Daniele Ceraolo Spurio | 2e81bc6 | 2019-06-19 18:00:19 -0700 | [diff] [blame] | 1506 | GEM_BUG_ON(!intel_uncore_has_forcewake(uncore)); |
Mika Kuoppala | 3225b2f | 2015-02-05 17:45:42 +0200 | [diff] [blame] | 1507 | |
Daniele Ceraolo Spurio | f833cdb | 2019-06-19 18:00:20 -0700 | [diff] [blame] | 1508 | #define fw_domain_init(uncore__, id__, set__, ack__) \ |
| 1509 | (ret ?: (ret = __fw_domain_init((uncore__), (id__), (set__), (ack__)))) |
| 1510 | |
Daniele Ceraolo Spurio | f7de502 | 2019-03-19 11:35:37 -0700 | [diff] [blame] | 1511 | if (INTEL_GEN(i915) >= 11) { |
Daniele Ceraolo Spurio | a89a70a | 2018-03-02 18:15:01 +0200 | [diff] [blame] | 1512 | int i; |
| 1513 | |
Daniele Ceraolo Spurio | f833cdb | 2019-06-19 18:00:20 -0700 | [diff] [blame] | 1514 | uncore->funcs.force_wake_get = fw_domains_get_with_fallback; |
Daniele Ceraolo Spurio | f7de502 | 2019-03-19 11:35:37 -0700 | [diff] [blame] | 1515 | uncore->funcs.force_wake_put = fw_domains_put; |
| 1516 | fw_domain_init(uncore, FW_DOMAIN_ID_RENDER, |
Daniele Ceraolo Spurio | a89a70a | 2018-03-02 18:15:01 +0200 | [diff] [blame] | 1517 | FORCEWAKE_RENDER_GEN9, |
| 1518 | FORCEWAKE_ACK_RENDER_GEN9); |
Daniele Ceraolo Spurio | f7de502 | 2019-03-19 11:35:37 -0700 | [diff] [blame] | 1519 | fw_domain_init(uncore, FW_DOMAIN_ID_BLITTER, |
Daniele Ceraolo Spurio | a89a70a | 2018-03-02 18:15:01 +0200 | [diff] [blame] | 1520 | FORCEWAKE_BLITTER_GEN9, |
| 1521 | FORCEWAKE_ACK_BLITTER_GEN9); |
Daniele Ceraolo Spurio | f833cdb | 2019-06-19 18:00:20 -0700 | [diff] [blame] | 1522 | |
Daniele Ceraolo Spurio | a89a70a | 2018-03-02 18:15:01 +0200 | [diff] [blame] | 1523 | for (i = 0; i < I915_MAX_VCS; i++) { |
Daniele Ceraolo Spurio | f7de502 | 2019-03-19 11:35:37 -0700 | [diff] [blame] | 1524 | if (!HAS_ENGINE(i915, _VCS(i))) |
Daniele Ceraolo Spurio | a89a70a | 2018-03-02 18:15:01 +0200 | [diff] [blame] | 1525 | continue; |
| 1526 | |
Daniele Ceraolo Spurio | f7de502 | 2019-03-19 11:35:37 -0700 | [diff] [blame] | 1527 | fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA_VDBOX0 + i, |
Daniele Ceraolo Spurio | a89a70a | 2018-03-02 18:15:01 +0200 | [diff] [blame] | 1528 | FORCEWAKE_MEDIA_VDBOX_GEN11(i), |
| 1529 | FORCEWAKE_ACK_MEDIA_VDBOX_GEN11(i)); |
| 1530 | } |
| 1531 | for (i = 0; i < I915_MAX_VECS; i++) { |
Daniele Ceraolo Spurio | f7de502 | 2019-03-19 11:35:37 -0700 | [diff] [blame] | 1532 | if (!HAS_ENGINE(i915, _VECS(i))) |
Daniele Ceraolo Spurio | a89a70a | 2018-03-02 18:15:01 +0200 | [diff] [blame] | 1533 | continue; |
| 1534 | |
Daniele Ceraolo Spurio | f7de502 | 2019-03-19 11:35:37 -0700 | [diff] [blame] | 1535 | fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA_VEBOX0 + i, |
Daniele Ceraolo Spurio | a89a70a | 2018-03-02 18:15:01 +0200 | [diff] [blame] | 1536 | FORCEWAKE_MEDIA_VEBOX_GEN11(i), |
| 1537 | FORCEWAKE_ACK_MEDIA_VEBOX_GEN11(i)); |
| 1538 | } |
Daniele Ceraolo Spurio | f7de502 | 2019-03-19 11:35:37 -0700 | [diff] [blame] | 1539 | } else if (IS_GEN_RANGE(i915, 9, 10)) { |
Daniele Ceraolo Spurio | f833cdb | 2019-06-19 18:00:20 -0700 | [diff] [blame] | 1540 | uncore->funcs.force_wake_get = fw_domains_get_with_fallback; |
Daniele Ceraolo Spurio | f7de502 | 2019-03-19 11:35:37 -0700 | [diff] [blame] | 1541 | uncore->funcs.force_wake_put = fw_domains_put; |
| 1542 | fw_domain_init(uncore, FW_DOMAIN_ID_RENDER, |
Mika Kuoppala | 05a2fb1 | 2015-01-19 16:20:43 +0200 | [diff] [blame] | 1543 | FORCEWAKE_RENDER_GEN9, |
| 1544 | FORCEWAKE_ACK_RENDER_GEN9); |
Daniele Ceraolo Spurio | f7de502 | 2019-03-19 11:35:37 -0700 | [diff] [blame] | 1545 | fw_domain_init(uncore, FW_DOMAIN_ID_BLITTER, |
Mika Kuoppala | 05a2fb1 | 2015-01-19 16:20:43 +0200 | [diff] [blame] | 1546 | FORCEWAKE_BLITTER_GEN9, |
| 1547 | FORCEWAKE_ACK_BLITTER_GEN9); |
Daniele Ceraolo Spurio | f7de502 | 2019-03-19 11:35:37 -0700 | [diff] [blame] | 1548 | fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA, |
Mika Kuoppala | 05a2fb1 | 2015-01-19 16:20:43 +0200 | [diff] [blame] | 1549 | FORCEWAKE_MEDIA_GEN9, FORCEWAKE_ACK_MEDIA_GEN9); |
Daniele Ceraolo Spurio | f7de502 | 2019-03-19 11:35:37 -0700 | [diff] [blame] | 1550 | } else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) { |
| 1551 | uncore->funcs.force_wake_get = fw_domains_get; |
| 1552 | uncore->funcs.force_wake_put = fw_domains_put; |
| 1553 | fw_domain_init(uncore, FW_DOMAIN_ID_RENDER, |
Mika Kuoppala | 05a2fb1 | 2015-01-19 16:20:43 +0200 | [diff] [blame] | 1554 | FORCEWAKE_VLV, FORCEWAKE_ACK_VLV); |
Daniele Ceraolo Spurio | f7de502 | 2019-03-19 11:35:37 -0700 | [diff] [blame] | 1555 | fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA, |
Mika Kuoppala | 05a2fb1 | 2015-01-19 16:20:43 +0200 | [diff] [blame] | 1556 | FORCEWAKE_MEDIA_VLV, FORCEWAKE_ACK_MEDIA_VLV); |
Daniele Ceraolo Spurio | f7de502 | 2019-03-19 11:35:37 -0700 | [diff] [blame] | 1557 | } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) { |
| 1558 | uncore->funcs.force_wake_get = |
Mika Kuoppala | 05a2fb1 | 2015-01-19 16:20:43 +0200 | [diff] [blame] | 1559 | fw_domains_get_with_thread_status; |
Daniele Ceraolo Spurio | f7de502 | 2019-03-19 11:35:37 -0700 | [diff] [blame] | 1560 | uncore->funcs.force_wake_put = fw_domains_put; |
| 1561 | fw_domain_init(uncore, FW_DOMAIN_ID_RENDER, |
Mika Kuoppala | 05a2fb1 | 2015-01-19 16:20:43 +0200 | [diff] [blame] | 1562 | FORCEWAKE_MT, FORCEWAKE_ACK_HSW); |
Daniele Ceraolo Spurio | f7de502 | 2019-03-19 11:35:37 -0700 | [diff] [blame] | 1563 | } else if (IS_IVYBRIDGE(i915)) { |
Ben Widawsky | 0b27448 | 2013-10-04 21:22:51 -0700 | [diff] [blame] | 1564 | u32 ecobus; |
| 1565 | |
| 1566 | /* IVB configs may use multi-threaded forcewake */ |
| 1567 | |
| 1568 | /* A small trick here - if the bios hasn't configured |
| 1569 | * MT forcewake, and if the device is in RC6, then |
| 1570 | * force_wake_mt_get will not wake the device and the |
| 1571 | * ECOBUS read will return zero. Which will be |
| 1572 | * (correctly) interpreted by the test below as MT |
| 1573 | * forcewake being disabled. |
| 1574 | */ |
Daniele Ceraolo Spurio | f7de502 | 2019-03-19 11:35:37 -0700 | [diff] [blame] | 1575 | uncore->funcs.force_wake_get = |
Mika Kuoppala | 05a2fb1 | 2015-01-19 16:20:43 +0200 | [diff] [blame] | 1576 | fw_domains_get_with_thread_status; |
Daniele Ceraolo Spurio | f7de502 | 2019-03-19 11:35:37 -0700 | [diff] [blame] | 1577 | uncore->funcs.force_wake_put = fw_domains_put; |
Mika Kuoppala | 05a2fb1 | 2015-01-19 16:20:43 +0200 | [diff] [blame] | 1578 | |
Mika Kuoppala | f9b3927 | 2015-01-28 14:43:24 +0200 | [diff] [blame] | 1579 | /* We need to init first for ECOBUS access and then |
| 1580 | * determine later if we want to reinit, in case of MT access is |
Mika Kuoppala | 6ea2556 | 2015-02-27 18:11:09 +0200 | [diff] [blame] | 1581 | * not working. In this stage we don't know which flavour this |
| 1582 | * ivb is, so it is better to reset also the gen6 fw registers |
| 1583 | * before the ecobus check. |
Mika Kuoppala | f9b3927 | 2015-01-28 14:43:24 +0200 | [diff] [blame] | 1584 | */ |
Mika Kuoppala | 6ea2556 | 2015-02-27 18:11:09 +0200 | [diff] [blame] | 1585 | |
Daniele Ceraolo Spurio | 6cc5ca7 | 2019-03-25 14:49:32 -0700 | [diff] [blame] | 1586 | __raw_uncore_write32(uncore, FORCEWAKE, 0); |
Daniele Ceraolo Spurio | 6ebc969 | 2019-03-19 11:35:41 -0700 | [diff] [blame] | 1587 | __raw_posting_read(uncore, ECOBUS); |
Mika Kuoppala | 6ea2556 | 2015-02-27 18:11:09 +0200 | [diff] [blame] | 1588 | |
Daniele Ceraolo Spurio | f833cdb | 2019-06-19 18:00:20 -0700 | [diff] [blame] | 1589 | ret = __fw_domain_init(uncore, FW_DOMAIN_ID_RENDER, |
| 1590 | FORCEWAKE_MT, FORCEWAKE_MT_ACK); |
| 1591 | if (ret) |
| 1592 | goto out; |
Mika Kuoppala | f9b3927 | 2015-01-28 14:43:24 +0200 | [diff] [blame] | 1593 | |
Daniele Ceraolo Spurio | f7de502 | 2019-03-19 11:35:37 -0700 | [diff] [blame] | 1594 | spin_lock_irq(&uncore->lock); |
| 1595 | fw_domains_get_with_thread_status(uncore, FORCEWAKE_RENDER); |
Daniele Ceraolo Spurio | 6cc5ca7 | 2019-03-25 14:49:32 -0700 | [diff] [blame] | 1596 | ecobus = __raw_uncore_read32(uncore, ECOBUS); |
Daniele Ceraolo Spurio | f7de502 | 2019-03-19 11:35:37 -0700 | [diff] [blame] | 1597 | fw_domains_put(uncore, FORCEWAKE_RENDER); |
| 1598 | spin_unlock_irq(&uncore->lock); |
Ben Widawsky | 0b27448 | 2013-10-04 21:22:51 -0700 | [diff] [blame] | 1599 | |
Mika Kuoppala | 05a2fb1 | 2015-01-19 16:20:43 +0200 | [diff] [blame] | 1600 | if (!(ecobus & FORCEWAKE_MT_ENABLE)) { |
Wambui Karuga | d0208cf | 2020-01-07 18:13:33 +0300 | [diff] [blame] | 1601 | drm_info(&i915->drm, "No MT forcewake available on Ivybridge, this can result in issues\n"); |
| 1602 | drm_info(&i915->drm, "when using vblank-synced partial screen updates.\n"); |
Daniele Ceraolo Spurio | f833cdb | 2019-06-19 18:00:20 -0700 | [diff] [blame] | 1603 | fw_domain_fini(uncore, FW_DOMAIN_ID_RENDER); |
Daniele Ceraolo Spurio | f7de502 | 2019-03-19 11:35:37 -0700 | [diff] [blame] | 1604 | fw_domain_init(uncore, FW_DOMAIN_ID_RENDER, |
Mika Kuoppala | 05a2fb1 | 2015-01-19 16:20:43 +0200 | [diff] [blame] | 1605 | FORCEWAKE, FORCEWAKE_ACK); |
Ben Widawsky | 0b27448 | 2013-10-04 21:22:51 -0700 | [diff] [blame] | 1606 | } |
Daniele Ceraolo Spurio | f7de502 | 2019-03-19 11:35:37 -0700 | [diff] [blame] | 1607 | } else if (IS_GEN(i915, 6)) { |
| 1608 | uncore->funcs.force_wake_get = |
Mika Kuoppala | 05a2fb1 | 2015-01-19 16:20:43 +0200 | [diff] [blame] | 1609 | fw_domains_get_with_thread_status; |
Daniele Ceraolo Spurio | f7de502 | 2019-03-19 11:35:37 -0700 | [diff] [blame] | 1610 | uncore->funcs.force_wake_put = fw_domains_put; |
| 1611 | fw_domain_init(uncore, FW_DOMAIN_ID_RENDER, |
Mika Kuoppala | 05a2fb1 | 2015-01-19 16:20:43 +0200 | [diff] [blame] | 1612 | FORCEWAKE, FORCEWAKE_ACK); |
Ben Widawsky | 0b27448 | 2013-10-04 21:22:51 -0700 | [diff] [blame] | 1613 | } |
Mika Kuoppala | 3225b2f | 2015-02-05 17:45:42 +0200 | [diff] [blame] | 1614 | |
Daniele Ceraolo Spurio | f833cdb | 2019-06-19 18:00:20 -0700 | [diff] [blame] | 1615 | #undef fw_domain_init |
| 1616 | |
Mika Kuoppala | 3225b2f | 2015-02-05 17:45:42 +0200 | [diff] [blame] | 1617 | /* All future platforms are expected to require complex power gating */ |
Pankaj Bharadiya | 48a1b8d | 2020-01-15 09:14:53 +0530 | [diff] [blame] | 1618 | drm_WARN_ON(&i915->drm, !ret && uncore->fw_domains == 0); |
Daniele Ceraolo Spurio | f833cdb | 2019-06-19 18:00:20 -0700 | [diff] [blame] | 1619 | |
| 1620 | out: |
| 1621 | if (ret) |
| 1622 | intel_uncore_fw_domains_fini(uncore); |
| 1623 | |
| 1624 | return ret; |
Mika Kuoppala | f9b3927 | 2015-01-28 14:43:24 +0200 | [diff] [blame] | 1625 | } |
| 1626 | |
Daniele Ceraolo Spurio | f7de502 | 2019-03-19 11:35:37 -0700 | [diff] [blame] | 1627 | #define ASSIGN_FW_DOMAINS_TABLE(uncore, d) \ |
Tvrtko Ursulin | 1515797 | 2016-10-04 09:29:23 +0100 | [diff] [blame] | 1628 | { \ |
Daniele Ceraolo Spurio | f7de502 | 2019-03-19 11:35:37 -0700 | [diff] [blame] | 1629 | (uncore)->fw_domains_table = \ |
Tvrtko Ursulin | 1515797 | 2016-10-04 09:29:23 +0100 | [diff] [blame] | 1630 | (struct intel_forcewake_range *)(d); \ |
Daniele Ceraolo Spurio | f7de502 | 2019-03-19 11:35:37 -0700 | [diff] [blame] | 1631 | (uncore)->fw_domains_table_entries = ARRAY_SIZE((d)); \ |
Tvrtko Ursulin | 1515797 | 2016-10-04 09:29:23 +0100 | [diff] [blame] | 1632 | } |
| 1633 | |
Hans de Goede | 264ec1a | 2017-02-10 11:28:02 +0100 | [diff] [blame] | 1634 | static int i915_pmic_bus_access_notifier(struct notifier_block *nb, |
| 1635 | unsigned long action, void *data) |
| 1636 | { |
Daniele Ceraolo Spurio | 9102650 | 2019-06-13 16:21:51 -0700 | [diff] [blame] | 1637 | struct intel_uncore *uncore = container_of(nb, |
| 1638 | struct intel_uncore, pmic_bus_access_nb); |
Hans de Goede | 264ec1a | 2017-02-10 11:28:02 +0100 | [diff] [blame] | 1639 | |
| 1640 | switch (action) { |
| 1641 | case MBI_PMIC_BUS_ACCESS_BEGIN: |
| 1642 | /* |
| 1643 | * forcewake all now to make sure that we don't need to do a |
| 1644 | * forcewake later which on systems where this notifier gets |
| 1645 | * called requires the punit to access to the shared pmic i2c |
| 1646 | * bus, which will be busy after this notification, leading to: |
| 1647 | * "render: timed out waiting for forcewake ack request." |
| 1648 | * errors. |
Hans de Goede | ce30560 | 2017-11-10 16:03:01 +0100 | [diff] [blame] | 1649 | * |
| 1650 | * The notifier is unregistered during intel_runtime_suspend(), |
| 1651 | * so it's ok to access the HW here without holding a RPM |
| 1652 | * wake reference -> disable wakeref asserts for the time of |
| 1653 | * the access. |
Hans de Goede | 264ec1a | 2017-02-10 11:28:02 +0100 | [diff] [blame] | 1654 | */ |
Daniele Ceraolo Spurio | 9102650 | 2019-06-13 16:21:51 -0700 | [diff] [blame] | 1655 | disable_rpm_wakeref_asserts(uncore->rpm); |
| 1656 | intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL); |
| 1657 | enable_rpm_wakeref_asserts(uncore->rpm); |
Hans de Goede | 264ec1a | 2017-02-10 11:28:02 +0100 | [diff] [blame] | 1658 | break; |
| 1659 | case MBI_PMIC_BUS_ACCESS_END: |
Daniele Ceraolo Spurio | 9102650 | 2019-06-13 16:21:51 -0700 | [diff] [blame] | 1660 | intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL); |
Hans de Goede | 264ec1a | 2017-02-10 11:28:02 +0100 | [diff] [blame] | 1661 | break; |
| 1662 | } |
| 1663 | |
| 1664 | return NOTIFY_OK; |
| 1665 | } |
| 1666 | |
Daniele Ceraolo Spurio | 25286aa | 2019-03-19 11:35:40 -0700 | [diff] [blame] | 1667 | static int uncore_mmio_setup(struct intel_uncore *uncore) |
Mika Kuoppala | f9b3927 | 2015-01-28 14:43:24 +0200 | [diff] [blame] | 1668 | { |
Daniele Ceraolo Spurio | 0138575 | 2019-06-19 18:00:18 -0700 | [diff] [blame] | 1669 | struct drm_i915_private *i915 = uncore->i915; |
Daniele Ceraolo Spurio | 25286aa | 2019-03-19 11:35:40 -0700 | [diff] [blame] | 1670 | struct pci_dev *pdev = i915->drm.pdev; |
| 1671 | int mmio_bar; |
| 1672 | int mmio_size; |
| 1673 | |
| 1674 | mmio_bar = IS_GEN(i915, 2) ? 1 : 0; |
| 1675 | /* |
| 1676 | * Before gen4, the registers and the GTT are behind different BARs. |
| 1677 | * However, from gen4 onwards, the registers and the GTT are shared |
| 1678 | * in the same BAR, so we want to restrict this ioremap from |
| 1679 | * clobbering the GTT which we want ioremap_wc instead. Fortunately, |
| 1680 | * the register BAR remains the same size for all the earlier |
| 1681 | * generations up to Ironlake. |
| 1682 | */ |
| 1683 | if (INTEL_GEN(i915) < 5) |
| 1684 | mmio_size = 512 * 1024; |
| 1685 | else |
| 1686 | mmio_size = 2 * 1024 * 1024; |
| 1687 | uncore->regs = pci_iomap(pdev, mmio_bar, mmio_size); |
| 1688 | if (uncore->regs == NULL) { |
Wambui Karuga | d0208cf | 2020-01-07 18:13:33 +0300 | [diff] [blame] | 1689 | drm_err(&i915->drm, "failed to map registers\n"); |
Daniele Ceraolo Spurio | 25286aa | 2019-03-19 11:35:40 -0700 | [diff] [blame] | 1690 | return -EIO; |
| 1691 | } |
| 1692 | |
| 1693 | return 0; |
| 1694 | } |
| 1695 | |
| 1696 | static void uncore_mmio_cleanup(struct intel_uncore *uncore) |
| 1697 | { |
Daniele Ceraolo Spurio | 0138575 | 2019-06-19 18:00:18 -0700 | [diff] [blame] | 1698 | struct pci_dev *pdev = uncore->i915->drm.pdev; |
Daniele Ceraolo Spurio | 25286aa | 2019-03-19 11:35:40 -0700 | [diff] [blame] | 1699 | |
| 1700 | pci_iounmap(pdev, uncore->regs); |
| 1701 | } |
| 1702 | |
Daniele Ceraolo Spurio | 0138575 | 2019-06-19 18:00:18 -0700 | [diff] [blame] | 1703 | void intel_uncore_init_early(struct intel_uncore *uncore, |
| 1704 | struct drm_i915_private *i915) |
Daniele Ceraolo Spurio | 6cbe8830 | 2019-04-02 13:10:31 -0700 | [diff] [blame] | 1705 | { |
| 1706 | spin_lock_init(&uncore->lock); |
Daniele Ceraolo Spurio | 0138575 | 2019-06-19 18:00:18 -0700 | [diff] [blame] | 1707 | uncore->i915 = i915; |
| 1708 | uncore->rpm = &i915->runtime_pm; |
Daniele Ceraolo Spurio | 0a9b263 | 2019-08-09 07:31:16 +0100 | [diff] [blame] | 1709 | uncore->debug = &i915->mmio_debug; |
Daniele Ceraolo Spurio | 6cbe8830 | 2019-04-02 13:10:31 -0700 | [diff] [blame] | 1710 | } |
Daniele Ceraolo Spurio | 25286aa | 2019-03-19 11:35:40 -0700 | [diff] [blame] | 1711 | |
Daniele Ceraolo Spurio | 2e81bc6 | 2019-06-19 18:00:19 -0700 | [diff] [blame] | 1712 | static void uncore_raw_init(struct intel_uncore *uncore) |
| 1713 | { |
| 1714 | GEM_BUG_ON(intel_uncore_has_forcewake(uncore)); |
| 1715 | |
| 1716 | if (IS_GEN(uncore->i915, 5)) { |
| 1717 | ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, gen5); |
| 1718 | ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, gen5); |
| 1719 | } else { |
| 1720 | ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, gen2); |
| 1721 | ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, gen2); |
| 1722 | } |
| 1723 | } |
| 1724 | |
Daniele Ceraolo Spurio | f833cdb | 2019-06-19 18:00:20 -0700 | [diff] [blame] | 1725 | static int uncore_forcewake_init(struct intel_uncore *uncore) |
Daniele Ceraolo Spurio | 2e81bc6 | 2019-06-19 18:00:19 -0700 | [diff] [blame] | 1726 | { |
| 1727 | struct drm_i915_private *i915 = uncore->i915; |
Daniele Ceraolo Spurio | f833cdb | 2019-06-19 18:00:20 -0700 | [diff] [blame] | 1728 | int ret; |
Daniele Ceraolo Spurio | 2e81bc6 | 2019-06-19 18:00:19 -0700 | [diff] [blame] | 1729 | |
| 1730 | GEM_BUG_ON(!intel_uncore_has_forcewake(uncore)); |
| 1731 | |
Daniele Ceraolo Spurio | f833cdb | 2019-06-19 18:00:20 -0700 | [diff] [blame] | 1732 | ret = intel_uncore_fw_domains_init(uncore); |
| 1733 | if (ret) |
| 1734 | return ret; |
Daniele Ceraolo Spurio | 2e81bc6 | 2019-06-19 18:00:19 -0700 | [diff] [blame] | 1735 | forcewake_early_sanitize(uncore, 0); |
| 1736 | |
| 1737 | if (IS_GEN_RANGE(i915, 6, 7)) { |
| 1738 | ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen6); |
| 1739 | |
| 1740 | if (IS_VALLEYVIEW(i915)) { |
| 1741 | ASSIGN_FW_DOMAINS_TABLE(uncore, __vlv_fw_ranges); |
| 1742 | ASSIGN_READ_MMIO_VFUNCS(uncore, fwtable); |
| 1743 | } else { |
| 1744 | ASSIGN_READ_MMIO_VFUNCS(uncore, gen6); |
| 1745 | } |
| 1746 | } else if (IS_GEN(i915, 8)) { |
| 1747 | if (IS_CHERRYVIEW(i915)) { |
| 1748 | ASSIGN_FW_DOMAINS_TABLE(uncore, __chv_fw_ranges); |
| 1749 | ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable); |
| 1750 | ASSIGN_READ_MMIO_VFUNCS(uncore, fwtable); |
| 1751 | } else { |
| 1752 | ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen8); |
| 1753 | ASSIGN_READ_MMIO_VFUNCS(uncore, gen6); |
| 1754 | } |
| 1755 | } else if (IS_GEN_RANGE(i915, 9, 10)) { |
| 1756 | ASSIGN_FW_DOMAINS_TABLE(uncore, __gen9_fw_ranges); |
| 1757 | ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable); |
| 1758 | ASSIGN_READ_MMIO_VFUNCS(uncore, fwtable); |
Michel Thierry | cf82d9d | 2019-09-13 17:16:51 +0300 | [diff] [blame] | 1759 | } else if (IS_GEN(i915, 11)) { |
Daniele Ceraolo Spurio | 2e81bc6 | 2019-06-19 18:00:19 -0700 | [diff] [blame] | 1760 | ASSIGN_FW_DOMAINS_TABLE(uncore, __gen11_fw_ranges); |
| 1761 | ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen11_fwtable); |
| 1762 | ASSIGN_READ_MMIO_VFUNCS(uncore, gen11_fwtable); |
Michel Thierry | cf82d9d | 2019-09-13 17:16:51 +0300 | [diff] [blame] | 1763 | } else { |
| 1764 | ASSIGN_FW_DOMAINS_TABLE(uncore, __gen12_fw_ranges); |
| 1765 | ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen12_fwtable); |
| 1766 | ASSIGN_READ_MMIO_VFUNCS(uncore, gen12_fwtable); |
Daniele Ceraolo Spurio | 2e81bc6 | 2019-06-19 18:00:19 -0700 | [diff] [blame] | 1767 | } |
| 1768 | |
| 1769 | uncore->pmic_bus_access_nb.notifier_call = i915_pmic_bus_access_notifier; |
| 1770 | iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb); |
Daniele Ceraolo Spurio | f833cdb | 2019-06-19 18:00:20 -0700 | [diff] [blame] | 1771 | |
| 1772 | return 0; |
Daniele Ceraolo Spurio | 2e81bc6 | 2019-06-19 18:00:19 -0700 | [diff] [blame] | 1773 | } |
| 1774 | |
Daniele Ceraolo Spurio | 3de6f85 | 2019-04-02 13:10:32 -0700 | [diff] [blame] | 1775 | int intel_uncore_init_mmio(struct intel_uncore *uncore) |
Daniele Ceraolo Spurio | 25286aa | 2019-03-19 11:35:40 -0700 | [diff] [blame] | 1776 | { |
Daniele Ceraolo Spurio | 0138575 | 2019-06-19 18:00:18 -0700 | [diff] [blame] | 1777 | struct drm_i915_private *i915 = uncore->i915; |
Daniele Ceraolo Spurio | 25286aa | 2019-03-19 11:35:40 -0700 | [diff] [blame] | 1778 | int ret; |
| 1779 | |
| 1780 | ret = uncore_mmio_setup(uncore); |
| 1781 | if (ret) |
| 1782 | return ret; |
Yu Zhang | cf9d289 | 2015-02-10 19:05:47 +0800 | [diff] [blame] | 1783 | |
Daniele Ceraolo Spurio | 5a0ba77 | 2019-03-25 14:49:33 -0700 | [diff] [blame] | 1784 | if (INTEL_GEN(i915) > 5 && !intel_vgpu_active(i915)) |
| 1785 | uncore->flags |= UNCORE_HAS_FORCEWAKE; |
| 1786 | |
Daniele Ceraolo Spurio | f833cdb | 2019-06-19 18:00:20 -0700 | [diff] [blame] | 1787 | if (!intel_uncore_has_forcewake(uncore)) { |
Daniele Ceraolo Spurio | 2e81bc6 | 2019-06-19 18:00:19 -0700 | [diff] [blame] | 1788 | uncore_raw_init(uncore); |
Daniele Ceraolo Spurio | f833cdb | 2019-06-19 18:00:20 -0700 | [diff] [blame] | 1789 | } else { |
| 1790 | ret = uncore_forcewake_init(uncore); |
| 1791 | if (ret) |
| 1792 | goto out_mmio_cleanup; |
| 1793 | } |
Imre Deak | ed49388 | 2014-10-23 19:23:21 +0300 | [diff] [blame] | 1794 | |
Daniele Ceraolo Spurio | ccb2ace | 2019-06-19 18:00:16 -0700 | [diff] [blame] | 1795 | /* make sure fw funcs are set if and only if we have fw*/ |
| 1796 | GEM_BUG_ON(intel_uncore_has_forcewake(uncore) != !!uncore->funcs.force_wake_get); |
| 1797 | GEM_BUG_ON(intel_uncore_has_forcewake(uncore) != !!uncore->funcs.force_wake_put); |
| 1798 | GEM_BUG_ON(intel_uncore_has_forcewake(uncore) != !!uncore->funcs.read_fw_domains); |
| 1799 | GEM_BUG_ON(intel_uncore_has_forcewake(uncore) != !!uncore->funcs.write_fw_domains); |
| 1800 | |
Daniele Ceraolo Spurio | 2cf7bf6 | 2019-03-25 14:49:34 -0700 | [diff] [blame] | 1801 | if (HAS_FPGA_DBG_UNCLAIMED(i915)) |
| 1802 | uncore->flags |= UNCORE_HAS_FPGA_DBG_UNCLAIMED; |
| 1803 | |
| 1804 | if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) |
| 1805 | uncore->flags |= UNCORE_HAS_DBG_UNCLAIMED; |
| 1806 | |
| 1807 | if (IS_GEN_RANGE(i915, 6, 7)) |
| 1808 | uncore->flags |= UNCORE_HAS_FIFO; |
| 1809 | |
Daniele Ceraolo Spurio | 2e81bc6 | 2019-06-19 18:00:19 -0700 | [diff] [blame] | 1810 | /* clear out unclaimed reg detection bit */ |
Daniele Ceraolo Spurio | 0a9b263 | 2019-08-09 07:31:16 +0100 | [diff] [blame] | 1811 | if (intel_uncore_unclaimed_mmio(uncore)) |
Wambui Karuga | d0208cf | 2020-01-07 18:13:33 +0300 | [diff] [blame] | 1812 | drm_dbg(&i915->drm, "unclaimed mmio detected on uncore init, clearing\n"); |
Daniele Ceraolo Spurio | 25286aa | 2019-03-19 11:35:40 -0700 | [diff] [blame] | 1813 | |
| 1814 | return 0; |
Daniele Ceraolo Spurio | f833cdb | 2019-06-19 18:00:20 -0700 | [diff] [blame] | 1815 | |
| 1816 | out_mmio_cleanup: |
| 1817 | uncore_mmio_cleanup(uncore); |
| 1818 | |
| 1819 | return ret; |
Ben Widawsky | 0b27448 | 2013-10-04 21:22:51 -0700 | [diff] [blame] | 1820 | } |
| 1821 | |
Oscar Mateo | 26376a7 | 2018-03-16 14:14:49 +0200 | [diff] [blame] | 1822 | /* |
| 1823 | * We might have detected that some engines are fused off after we initialized |
| 1824 | * the forcewake domains. Prune them, to make sure they only reference existing |
| 1825 | * engines. |
| 1826 | */ |
Daniele Ceraolo Spurio | 3de6f85 | 2019-04-02 13:10:32 -0700 | [diff] [blame] | 1827 | void intel_uncore_prune_mmio_domains(struct intel_uncore *uncore) |
Oscar Mateo | 26376a7 | 2018-03-16 14:14:49 +0200 | [diff] [blame] | 1828 | { |
Daniele Ceraolo Spurio | 0138575 | 2019-06-19 18:00:18 -0700 | [diff] [blame] | 1829 | struct drm_i915_private *i915 = uncore->i915; |
Daniele Ceraolo Spurio | 2e81bc6 | 2019-06-19 18:00:19 -0700 | [diff] [blame] | 1830 | enum forcewake_domains fw_domains = uncore->fw_domains; |
| 1831 | enum forcewake_domain_id domain_id; |
| 1832 | int i; |
Daniele Ceraolo Spurio | f7de502 | 2019-03-19 11:35:37 -0700 | [diff] [blame] | 1833 | |
Daniele Ceraolo Spurio | 2e81bc6 | 2019-06-19 18:00:19 -0700 | [diff] [blame] | 1834 | if (!intel_uncore_has_forcewake(uncore) || INTEL_GEN(i915) < 11) |
| 1835 | return; |
Oscar Mateo | 26376a7 | 2018-03-16 14:14:49 +0200 | [diff] [blame] | 1836 | |
Daniele Ceraolo Spurio | 2e81bc6 | 2019-06-19 18:00:19 -0700 | [diff] [blame] | 1837 | for (i = 0; i < I915_MAX_VCS; i++) { |
| 1838 | domain_id = FW_DOMAIN_ID_MEDIA_VDBOX0 + i; |
Oscar Mateo | 26376a7 | 2018-03-16 14:14:49 +0200 | [diff] [blame] | 1839 | |
Daniele Ceraolo Spurio | 2e81bc6 | 2019-06-19 18:00:19 -0700 | [diff] [blame] | 1840 | if (HAS_ENGINE(i915, _VCS(i))) |
| 1841 | continue; |
Oscar Mateo | 26376a7 | 2018-03-16 14:14:49 +0200 | [diff] [blame] | 1842 | |
Daniele Ceraolo Spurio | 2e81bc6 | 2019-06-19 18:00:19 -0700 | [diff] [blame] | 1843 | if (fw_domains & BIT(domain_id)) |
| 1844 | fw_domain_fini(uncore, domain_id); |
| 1845 | } |
Oscar Mateo | 26376a7 | 2018-03-16 14:14:49 +0200 | [diff] [blame] | 1846 | |
Daniele Ceraolo Spurio | 2e81bc6 | 2019-06-19 18:00:19 -0700 | [diff] [blame] | 1847 | for (i = 0; i < I915_MAX_VECS; i++) { |
| 1848 | domain_id = FW_DOMAIN_ID_MEDIA_VEBOX0 + i; |
Oscar Mateo | 26376a7 | 2018-03-16 14:14:49 +0200 | [diff] [blame] | 1849 | |
Daniele Ceraolo Spurio | 2e81bc6 | 2019-06-19 18:00:19 -0700 | [diff] [blame] | 1850 | if (HAS_ENGINE(i915, _VECS(i))) |
| 1851 | continue; |
Oscar Mateo | 26376a7 | 2018-03-16 14:14:49 +0200 | [diff] [blame] | 1852 | |
Daniele Ceraolo Spurio | 2e81bc6 | 2019-06-19 18:00:19 -0700 | [diff] [blame] | 1853 | if (fw_domains & BIT(domain_id)) |
| 1854 | fw_domain_fini(uncore, domain_id); |
Oscar Mateo | 26376a7 | 2018-03-16 14:14:49 +0200 | [diff] [blame] | 1855 | } |
| 1856 | } |
| 1857 | |
Daniele Ceraolo Spurio | 3de6f85 | 2019-04-02 13:10:32 -0700 | [diff] [blame] | 1858 | void intel_uncore_fini_mmio(struct intel_uncore *uncore) |
Ben Widawsky | 0b27448 | 2013-10-04 21:22:51 -0700 | [diff] [blame] | 1859 | { |
Daniele Ceraolo Spurio | 2e81bc6 | 2019-06-19 18:00:19 -0700 | [diff] [blame] | 1860 | if (intel_uncore_has_forcewake(uncore)) { |
| 1861 | iosf_mbi_punit_acquire(); |
| 1862 | iosf_mbi_unregister_pmic_bus_access_notifier_unlocked( |
| 1863 | &uncore->pmic_bus_access_nb); |
| 1864 | intel_uncore_forcewake_reset(uncore); |
Daniele Ceraolo Spurio | f833cdb | 2019-06-19 18:00:20 -0700 | [diff] [blame] | 1865 | intel_uncore_fw_domains_fini(uncore); |
Daniele Ceraolo Spurio | 2e81bc6 | 2019-06-19 18:00:19 -0700 | [diff] [blame] | 1866 | iosf_mbi_punit_release(); |
| 1867 | } |
| 1868 | |
Daniele Ceraolo Spurio | 25286aa | 2019-03-19 11:35:40 -0700 | [diff] [blame] | 1869 | uncore_mmio_cleanup(uncore); |
Ben Widawsky | 0b27448 | 2013-10-04 21:22:51 -0700 | [diff] [blame] | 1870 | } |
| 1871 | |
Joonas Lahtinen | 3fd3a6f | 2017-09-13 14:52:55 +0300 | [diff] [blame] | 1872 | static const struct reg_whitelist { |
| 1873 | i915_reg_t offset_ldw; |
| 1874 | i915_reg_t offset_udw; |
| 1875 | u16 gen_mask; |
| 1876 | u8 size; |
| 1877 | } reg_read_whitelist[] = { { |
| 1878 | .offset_ldw = RING_TIMESTAMP(RENDER_RING_BASE), |
| 1879 | .offset_udw = RING_TIMESTAMP_UDW(RENDER_RING_BASE), |
Jordan Justen | 2b92a82 | 2019-07-25 17:24:11 -0700 | [diff] [blame] | 1880 | .gen_mask = INTEL_GEN_MASK(4, 12), |
Joonas Lahtinen | 3fd3a6f | 2017-09-13 14:52:55 +0300 | [diff] [blame] | 1881 | .size = 8 |
| 1882 | } }; |
Chris Wilson | 907b28c | 2013-07-19 20:36:52 +0100 | [diff] [blame] | 1883 | |
| 1884 | int i915_reg_read_ioctl(struct drm_device *dev, |
| 1885 | void *data, struct drm_file *file) |
| 1886 | { |
Tvrtko Ursulin | 8ed3a62 | 2019-06-10 13:06:04 +0100 | [diff] [blame] | 1887 | struct drm_i915_private *i915 = to_i915(dev); |
| 1888 | struct intel_uncore *uncore = &i915->uncore; |
Chris Wilson | 907b28c | 2013-07-19 20:36:52 +0100 | [diff] [blame] | 1889 | struct drm_i915_reg_read *reg = data; |
Joonas Lahtinen | 3fd3a6f | 2017-09-13 14:52:55 +0300 | [diff] [blame] | 1890 | struct reg_whitelist const *entry; |
Chris Wilson | 538ef96 | 2019-01-14 14:21:18 +0000 | [diff] [blame] | 1891 | intel_wakeref_t wakeref; |
Joonas Lahtinen | 3fd3a6f | 2017-09-13 14:52:55 +0300 | [diff] [blame] | 1892 | unsigned int flags; |
| 1893 | int remain; |
| 1894 | int ret = 0; |
Chris Wilson | 907b28c | 2013-07-19 20:36:52 +0100 | [diff] [blame] | 1895 | |
Joonas Lahtinen | 3fd3a6f | 2017-09-13 14:52:55 +0300 | [diff] [blame] | 1896 | entry = reg_read_whitelist; |
| 1897 | remain = ARRAY_SIZE(reg_read_whitelist); |
| 1898 | while (remain) { |
| 1899 | u32 entry_offset = i915_mmio_reg_offset(entry->offset_ldw); |
| 1900 | |
| 1901 | GEM_BUG_ON(!is_power_of_2(entry->size)); |
| 1902 | GEM_BUG_ON(entry->size > 8); |
| 1903 | GEM_BUG_ON(entry_offset & (entry->size - 1)); |
| 1904 | |
Tvrtko Ursulin | 8ed3a62 | 2019-06-10 13:06:04 +0100 | [diff] [blame] | 1905 | if (INTEL_INFO(i915)->gen_mask & entry->gen_mask && |
Joonas Lahtinen | 3fd3a6f | 2017-09-13 14:52:55 +0300 | [diff] [blame] | 1906 | entry_offset == (reg->offset & -entry->size)) |
Chris Wilson | 907b28c | 2013-07-19 20:36:52 +0100 | [diff] [blame] | 1907 | break; |
Joonas Lahtinen | 3fd3a6f | 2017-09-13 14:52:55 +0300 | [diff] [blame] | 1908 | entry++; |
| 1909 | remain--; |
Chris Wilson | 907b28c | 2013-07-19 20:36:52 +0100 | [diff] [blame] | 1910 | } |
| 1911 | |
Joonas Lahtinen | 3fd3a6f | 2017-09-13 14:52:55 +0300 | [diff] [blame] | 1912 | if (!remain) |
Chris Wilson | 907b28c | 2013-07-19 20:36:52 +0100 | [diff] [blame] | 1913 | return -EINVAL; |
| 1914 | |
Joonas Lahtinen | 3fd3a6f | 2017-09-13 14:52:55 +0300 | [diff] [blame] | 1915 | flags = reg->offset & (entry->size - 1); |
Chris Wilson | 648a9bc | 2015-07-16 12:37:56 +0100 | [diff] [blame] | 1916 | |
Daniele Ceraolo Spurio | c447ff7 | 2019-06-13 16:21:55 -0700 | [diff] [blame] | 1917 | with_intel_runtime_pm(&i915->runtime_pm, wakeref) { |
Chris Wilson | d4225a5 | 2019-01-14 14:21:23 +0000 | [diff] [blame] | 1918 | if (entry->size == 8 && flags == I915_REG_READ_8B_WA) |
Tvrtko Ursulin | 8ed3a62 | 2019-06-10 13:06:04 +0100 | [diff] [blame] | 1919 | reg->val = intel_uncore_read64_2x32(uncore, |
| 1920 | entry->offset_ldw, |
| 1921 | entry->offset_udw); |
Chris Wilson | d4225a5 | 2019-01-14 14:21:23 +0000 | [diff] [blame] | 1922 | else if (entry->size == 8 && flags == 0) |
Tvrtko Ursulin | 8ed3a62 | 2019-06-10 13:06:04 +0100 | [diff] [blame] | 1923 | reg->val = intel_uncore_read64(uncore, |
| 1924 | entry->offset_ldw); |
Chris Wilson | d4225a5 | 2019-01-14 14:21:23 +0000 | [diff] [blame] | 1925 | else if (entry->size == 4 && flags == 0) |
Tvrtko Ursulin | 8ed3a62 | 2019-06-10 13:06:04 +0100 | [diff] [blame] | 1926 | reg->val = intel_uncore_read(uncore, entry->offset_ldw); |
Chris Wilson | d4225a5 | 2019-01-14 14:21:23 +0000 | [diff] [blame] | 1927 | else if (entry->size == 2 && flags == 0) |
Tvrtko Ursulin | 8ed3a62 | 2019-06-10 13:06:04 +0100 | [diff] [blame] | 1928 | reg->val = intel_uncore_read16(uncore, |
| 1929 | entry->offset_ldw); |
Chris Wilson | d4225a5 | 2019-01-14 14:21:23 +0000 | [diff] [blame] | 1930 | else if (entry->size == 1 && flags == 0) |
Tvrtko Ursulin | 8ed3a62 | 2019-06-10 13:06:04 +0100 | [diff] [blame] | 1931 | reg->val = intel_uncore_read8(uncore, |
| 1932 | entry->offset_ldw); |
Chris Wilson | d4225a5 | 2019-01-14 14:21:23 +0000 | [diff] [blame] | 1933 | else |
| 1934 | ret = -EINVAL; |
| 1935 | } |
Joonas Lahtinen | 3fd3a6f | 2017-09-13 14:52:55 +0300 | [diff] [blame] | 1936 | |
Paulo Zanoni | cf67c70 | 2014-04-01 14:55:08 -0300 | [diff] [blame] | 1937 | return ret; |
Chris Wilson | 907b28c | 2013-07-19 20:36:52 +0100 | [diff] [blame] | 1938 | } |
| 1939 | |
Michel Thierry | e34b034 | 2018-04-05 17:00:48 +0300 | [diff] [blame] | 1940 | /** |
Michal Wajdeczko | 1d1a977 | 2017-04-07 16:01:44 +0000 | [diff] [blame] | 1941 | * __intel_wait_for_register_fw - wait until register matches expected state |
Daniele Ceraolo Spurio | d2d551c | 2019-03-25 14:49:38 -0700 | [diff] [blame] | 1942 | * @uncore: the struct intel_uncore |
Chris Wilson | 1758b90 | 2016-06-30 15:32:44 +0100 | [diff] [blame] | 1943 | * @reg: the register to read |
| 1944 | * @mask: mask to apply to register value |
| 1945 | * @value: expected value |
Michal Wajdeczko | 1d1a977 | 2017-04-07 16:01:44 +0000 | [diff] [blame] | 1946 | * @fast_timeout_us: fast timeout in microsecond for atomic/tight wait |
| 1947 | * @slow_timeout_ms: slow timeout in millisecond |
| 1948 | * @out_value: optional placeholder to hold registry value |
Chris Wilson | 1758b90 | 2016-06-30 15:32:44 +0100 | [diff] [blame] | 1949 | * |
| 1950 | * This routine waits until the target register @reg contains the expected |
Daniel Vetter | 3d466cd | 2016-07-15 21:48:05 +0200 | [diff] [blame] | 1951 | * @value after applying the @mask, i.e. it waits until :: |
| 1952 | * |
| 1953 | * (I915_READ_FW(reg) & mask) == value |
| 1954 | * |
Michal Wajdeczko | 1d1a977 | 2017-04-07 16:01:44 +0000 | [diff] [blame] | 1955 | * Otherwise, the wait will timeout after @slow_timeout_ms milliseconds. |
Michal Wajdeczko | 6976e74 | 2017-04-10 12:17:47 +0000 | [diff] [blame] | 1956 | * For atomic context @slow_timeout_ms must be zero and @fast_timeout_us |
Chris Wilson | 84d84cb | 2017-04-11 12:27:05 +0100 | [diff] [blame] | 1957 | * must be not larger than 20,0000 microseconds. |
Chris Wilson | 1758b90 | 2016-06-30 15:32:44 +0100 | [diff] [blame] | 1958 | * |
| 1959 | * Note that this routine assumes the caller holds forcewake asserted, it is |
| 1960 | * not suitable for very long waits. See intel_wait_for_register() if you |
| 1961 | * wish to wait without holding forcewake for the duration (i.e. you expect |
| 1962 | * the wait to be slow). |
| 1963 | * |
Michal Wajdeczko | e4661f1 | 2019-08-02 12:47:39 +0000 | [diff] [blame] | 1964 | * Return: 0 if the register matches the desired condition, or -ETIMEDOUT. |
Chris Wilson | 1758b90 | 2016-06-30 15:32:44 +0100 | [diff] [blame] | 1965 | */ |
Daniele Ceraolo Spurio | d2d551c | 2019-03-25 14:49:38 -0700 | [diff] [blame] | 1966 | int __intel_wait_for_register_fw(struct intel_uncore *uncore, |
Michal Wajdeczko | 1d1a977 | 2017-04-07 16:01:44 +0000 | [diff] [blame] | 1967 | i915_reg_t reg, |
Michal Wajdeczko | 3fc7d86b | 2017-04-10 09:38:17 +0000 | [diff] [blame] | 1968 | u32 mask, |
| 1969 | u32 value, |
| 1970 | unsigned int fast_timeout_us, |
| 1971 | unsigned int slow_timeout_ms, |
Michal Wajdeczko | 1d1a977 | 2017-04-07 16:01:44 +0000 | [diff] [blame] | 1972 | u32 *out_value) |
Mika Kuoppala | 7fd2d26 | 2015-06-18 12:51:40 +0300 | [diff] [blame] | 1973 | { |
Daniel Vetter | ff26ffa | 2017-05-10 17:19:32 +0200 | [diff] [blame] | 1974 | u32 uninitialized_var(reg_value); |
Daniele Ceraolo Spurio | d2d551c | 2019-03-25 14:49:38 -0700 | [diff] [blame] | 1975 | #define done (((reg_value = intel_uncore_read_fw(uncore, reg)) & mask) == value) |
Michal Wajdeczko | 1d1a977 | 2017-04-07 16:01:44 +0000 | [diff] [blame] | 1976 | int ret; |
| 1977 | |
Michal Wajdeczko | 6976e74 | 2017-04-10 12:17:47 +0000 | [diff] [blame] | 1978 | /* Catch any overuse of this function */ |
Chris Wilson | 84d84cb | 2017-04-11 12:27:05 +0100 | [diff] [blame] | 1979 | might_sleep_if(slow_timeout_ms); |
| 1980 | GEM_BUG_ON(fast_timeout_us > 20000); |
Michal Wajdeczko | 6976e74 | 2017-04-10 12:17:47 +0000 | [diff] [blame] | 1981 | |
Chris Wilson | 84d84cb | 2017-04-11 12:27:05 +0100 | [diff] [blame] | 1982 | ret = -ETIMEDOUT; |
| 1983 | if (fast_timeout_us && fast_timeout_us <= 20000) |
Michal Wajdeczko | 1d1a977 | 2017-04-07 16:01:44 +0000 | [diff] [blame] | 1984 | ret = _wait_for_atomic(done, fast_timeout_us, 0); |
Daniel Vetter | ff26ffa | 2017-05-10 17:19:32 +0200 | [diff] [blame] | 1985 | if (ret && slow_timeout_ms) |
Michal Wajdeczko | 1d1a977 | 2017-04-07 16:01:44 +0000 | [diff] [blame] | 1986 | ret = wait_for(done, slow_timeout_ms); |
Chris Wilson | 84d84cb | 2017-04-11 12:27:05 +0100 | [diff] [blame] | 1987 | |
Michal Wajdeczko | 1d1a977 | 2017-04-07 16:01:44 +0000 | [diff] [blame] | 1988 | if (out_value) |
| 1989 | *out_value = reg_value; |
Chris Wilson | 84d84cb | 2017-04-11 12:27:05 +0100 | [diff] [blame] | 1990 | |
Chris Wilson | 1758b90 | 2016-06-30 15:32:44 +0100 | [diff] [blame] | 1991 | return ret; |
| 1992 | #undef done |
| 1993 | } |
| 1994 | |
| 1995 | /** |
Sean Paul | 23fdbdd | 2018-01-08 14:55:36 -0500 | [diff] [blame] | 1996 | * __intel_wait_for_register - wait until register matches expected state |
Daniele Ceraolo Spurio | baba6e5 | 2019-03-25 14:49:40 -0700 | [diff] [blame] | 1997 | * @uncore: the struct intel_uncore |
Chris Wilson | 1758b90 | 2016-06-30 15:32:44 +0100 | [diff] [blame] | 1998 | * @reg: the register to read |
| 1999 | * @mask: mask to apply to register value |
| 2000 | * @value: expected value |
Sean Paul | 23fdbdd | 2018-01-08 14:55:36 -0500 | [diff] [blame] | 2001 | * @fast_timeout_us: fast timeout in microsecond for atomic/tight wait |
| 2002 | * @slow_timeout_ms: slow timeout in millisecond |
| 2003 | * @out_value: optional placeholder to hold registry value |
Chris Wilson | 1758b90 | 2016-06-30 15:32:44 +0100 | [diff] [blame] | 2004 | * |
| 2005 | * This routine waits until the target register @reg contains the expected |
Daniel Vetter | 3d466cd | 2016-07-15 21:48:05 +0200 | [diff] [blame] | 2006 | * @value after applying the @mask, i.e. it waits until :: |
| 2007 | * |
| 2008 | * (I915_READ(reg) & mask) == value |
| 2009 | * |
Chris Wilson | 1758b90 | 2016-06-30 15:32:44 +0100 | [diff] [blame] | 2010 | * Otherwise, the wait will timeout after @timeout_ms milliseconds. |
| 2011 | * |
Michal Wajdeczko | e4661f1 | 2019-08-02 12:47:39 +0000 | [diff] [blame] | 2012 | * Return: 0 if the register matches the desired condition, or -ETIMEDOUT. |
Chris Wilson | 1758b90 | 2016-06-30 15:32:44 +0100 | [diff] [blame] | 2013 | */ |
Daniele Ceraolo Spurio | 97a04e0 | 2019-03-25 14:49:39 -0700 | [diff] [blame] | 2014 | int __intel_wait_for_register(struct intel_uncore *uncore, |
| 2015 | i915_reg_t reg, |
| 2016 | u32 mask, |
| 2017 | u32 value, |
| 2018 | unsigned int fast_timeout_us, |
| 2019 | unsigned int slow_timeout_ms, |
| 2020 | u32 *out_value) |
Chris Wilson | 1758b90 | 2016-06-30 15:32:44 +0100 | [diff] [blame] | 2021 | { |
Chris Wilson | 1758b90 | 2016-06-30 15:32:44 +0100 | [diff] [blame] | 2022 | unsigned fw = |
Daniele Ceraolo Spurio | 4319382 | 2019-03-25 14:49:37 -0700 | [diff] [blame] | 2023 | intel_uncore_forcewake_for_reg(uncore, reg, FW_REG_READ); |
Sean Paul | 23fdbdd | 2018-01-08 14:55:36 -0500 | [diff] [blame] | 2024 | u32 reg_value; |
Chris Wilson | 1758b90 | 2016-06-30 15:32:44 +0100 | [diff] [blame] | 2025 | int ret; |
| 2026 | |
Chris Wilson | 3df82dd4 | 2018-03-29 23:45:19 +0100 | [diff] [blame] | 2027 | might_sleep_if(slow_timeout_ms); |
Chris Wilson | 0564654 | 2017-04-11 11:13:38 +0100 | [diff] [blame] | 2028 | |
Daniele Ceraolo Spurio | 272c7e5 | 2019-03-19 11:35:39 -0700 | [diff] [blame] | 2029 | spin_lock_irq(&uncore->lock); |
| 2030 | intel_uncore_forcewake_get__locked(uncore, fw); |
Chris Wilson | 0564654 | 2017-04-11 11:13:38 +0100 | [diff] [blame] | 2031 | |
Daniele Ceraolo Spurio | d2d551c | 2019-03-25 14:49:38 -0700 | [diff] [blame] | 2032 | ret = __intel_wait_for_register_fw(uncore, |
Chris Wilson | 0564654 | 2017-04-11 11:13:38 +0100 | [diff] [blame] | 2033 | reg, mask, value, |
Sean Paul | 23fdbdd | 2018-01-08 14:55:36 -0500 | [diff] [blame] | 2034 | fast_timeout_us, 0, ®_value); |
Chris Wilson | 0564654 | 2017-04-11 11:13:38 +0100 | [diff] [blame] | 2035 | |
Daniele Ceraolo Spurio | 272c7e5 | 2019-03-19 11:35:39 -0700 | [diff] [blame] | 2036 | intel_uncore_forcewake_put__locked(uncore, fw); |
| 2037 | spin_unlock_irq(&uncore->lock); |
Chris Wilson | 0564654 | 2017-04-11 11:13:38 +0100 | [diff] [blame] | 2038 | |
Chris Wilson | 3df82dd4 | 2018-03-29 23:45:19 +0100 | [diff] [blame] | 2039 | if (ret && slow_timeout_ms) |
Daniele Ceraolo Spurio | d2d551c | 2019-03-25 14:49:38 -0700 | [diff] [blame] | 2040 | ret = __wait_for(reg_value = intel_uncore_read_notrace(uncore, |
| 2041 | reg), |
Sean Paul | 23fdbdd | 2018-01-08 14:55:36 -0500 | [diff] [blame] | 2042 | (reg_value & mask) == value, |
| 2043 | slow_timeout_ms * 1000, 10, 1000); |
| 2044 | |
Ville Syrjälä | 39806c3f | 2019-02-04 23:16:44 +0200 | [diff] [blame] | 2045 | /* just trace the final value */ |
| 2046 | trace_i915_reg_rw(false, reg, reg_value, sizeof(reg_value), true); |
| 2047 | |
Sean Paul | 23fdbdd | 2018-01-08 14:55:36 -0500 | [diff] [blame] | 2048 | if (out_value) |
| 2049 | *out_value = reg_value; |
Chris Wilson | 1758b90 | 2016-06-30 15:32:44 +0100 | [diff] [blame] | 2050 | |
| 2051 | return ret; |
Tomas Elf | d431440 | 2016-03-02 16:46:24 +0200 | [diff] [blame] | 2052 | } |
| 2053 | |
Daniele Ceraolo Spurio | 2cf7bf6 | 2019-03-25 14:49:34 -0700 | [diff] [blame] | 2054 | bool intel_uncore_unclaimed_mmio(struct intel_uncore *uncore) |
Chris Wilson | 907b28c | 2013-07-19 20:36:52 +0100 | [diff] [blame] | 2055 | { |
Daniele Ceraolo Spurio | 0a9b263 | 2019-08-09 07:31:16 +0100 | [diff] [blame] | 2056 | bool ret; |
| 2057 | |
| 2058 | spin_lock_irq(&uncore->debug->lock); |
| 2059 | ret = check_for_unclaimed_mmio(uncore); |
| 2060 | spin_unlock_irq(&uncore->debug->lock); |
| 2061 | |
| 2062 | return ret; |
Chris Wilson | 907b28c | 2013-07-19 20:36:52 +0100 | [diff] [blame] | 2063 | } |
Mika Kuoppala | 7571494 | 2015-12-16 09:26:48 +0200 | [diff] [blame] | 2064 | |
Mika Kuoppala | bc3b934 | 2016-01-08 15:51:20 +0200 | [diff] [blame] | 2065 | bool |
Daniele Ceraolo Spurio | 2cf7bf6 | 2019-03-25 14:49:34 -0700 | [diff] [blame] | 2066 | intel_uncore_arm_unclaimed_mmio_detection(struct intel_uncore *uncore) |
Mika Kuoppala | 7571494 | 2015-12-16 09:26:48 +0200 | [diff] [blame] | 2067 | { |
Chris Wilson | a167b1e | 2018-09-04 14:12:07 +0100 | [diff] [blame] | 2068 | bool ret = false; |
| 2069 | |
Daniele Ceraolo Spurio | 0a9b263 | 2019-08-09 07:31:16 +0100 | [diff] [blame] | 2070 | spin_lock_irq(&uncore->debug->lock); |
Chris Wilson | a167b1e | 2018-09-04 14:12:07 +0100 | [diff] [blame] | 2071 | |
Daniele Ceraolo Spurio | 0a9b263 | 2019-08-09 07:31:16 +0100 | [diff] [blame] | 2072 | if (unlikely(uncore->debug->unclaimed_mmio_check <= 0)) |
Chris Wilson | a167b1e | 2018-09-04 14:12:07 +0100 | [diff] [blame] | 2073 | goto out; |
Mika Kuoppala | 7571494 | 2015-12-16 09:26:48 +0200 | [diff] [blame] | 2074 | |
Daniele Ceraolo Spurio | 0a9b263 | 2019-08-09 07:31:16 +0100 | [diff] [blame] | 2075 | if (unlikely(check_for_unclaimed_mmio(uncore))) { |
Chris Wilson | 7ef4ac6e | 2018-09-04 12:17:32 +0100 | [diff] [blame] | 2076 | if (!i915_modparams.mmio_debug) { |
Wambui Karuga | d0208cf | 2020-01-07 18:13:33 +0300 | [diff] [blame] | 2077 | drm_dbg(&uncore->i915->drm, |
| 2078 | "Unclaimed register detected, " |
| 2079 | "enabling oneshot unclaimed register reporting. " |
| 2080 | "Please use i915.mmio_debug=N for more information.\n"); |
Chris Wilson | 7ef4ac6e | 2018-09-04 12:17:32 +0100 | [diff] [blame] | 2081 | i915_modparams.mmio_debug++; |
| 2082 | } |
Daniele Ceraolo Spurio | 0a9b263 | 2019-08-09 07:31:16 +0100 | [diff] [blame] | 2083 | uncore->debug->unclaimed_mmio_check--; |
Chris Wilson | a167b1e | 2018-09-04 14:12:07 +0100 | [diff] [blame] | 2084 | ret = true; |
Mika Kuoppala | 7571494 | 2015-12-16 09:26:48 +0200 | [diff] [blame] | 2085 | } |
Mika Kuoppala | bc3b934 | 2016-01-08 15:51:20 +0200 | [diff] [blame] | 2086 | |
Chris Wilson | a167b1e | 2018-09-04 14:12:07 +0100 | [diff] [blame] | 2087 | out: |
Daniele Ceraolo Spurio | 0a9b263 | 2019-08-09 07:31:16 +0100 | [diff] [blame] | 2088 | spin_unlock_irq(&uncore->debug->lock); |
Chris Wilson | a167b1e | 2018-09-04 14:12:07 +0100 | [diff] [blame] | 2089 | |
| 2090 | return ret; |
Mika Kuoppala | 7571494 | 2015-12-16 09:26:48 +0200 | [diff] [blame] | 2091 | } |
Tvrtko Ursulin | 3756685 | 2016-04-12 14:37:31 +0100 | [diff] [blame] | 2092 | |
Tvrtko Ursulin | 3756685 | 2016-04-12 14:37:31 +0100 | [diff] [blame] | 2093 | /** |
| 2094 | * intel_uncore_forcewake_for_reg - which forcewake domains are needed to access |
| 2095 | * a register |
Daniele Ceraolo Spurio | 4319382 | 2019-03-25 14:49:37 -0700 | [diff] [blame] | 2096 | * @uncore: pointer to struct intel_uncore |
Tvrtko Ursulin | 3756685 | 2016-04-12 14:37:31 +0100 | [diff] [blame] | 2097 | * @reg: register in question |
| 2098 | * @op: operation bitmask of FW_REG_READ and/or FW_REG_WRITE |
| 2099 | * |
| 2100 | * Returns a set of forcewake domains required to be taken with for example |
| 2101 | * intel_uncore_forcewake_get for the specified register to be accessible in the |
| 2102 | * specified mode (read, write or read/write) with raw mmio accessors. |
| 2103 | * |
| 2104 | * NOTE: On Gen6 and Gen7 write forcewake domain (FORCEWAKE_RENDER) requires the |
| 2105 | * callers to do FIFO management on their own or risk losing writes. |
| 2106 | */ |
| 2107 | enum forcewake_domains |
Daniele Ceraolo Spurio | 4319382 | 2019-03-25 14:49:37 -0700 | [diff] [blame] | 2108 | intel_uncore_forcewake_for_reg(struct intel_uncore *uncore, |
Tvrtko Ursulin | 3756685 | 2016-04-12 14:37:31 +0100 | [diff] [blame] | 2109 | i915_reg_t reg, unsigned int op) |
| 2110 | { |
| 2111 | enum forcewake_domains fw_domains = 0; |
| 2112 | |
Pankaj Bharadiya | a9f236d | 2020-01-15 09:14:54 +0530 | [diff] [blame] | 2113 | drm_WARN_ON(&uncore->i915->drm, !op); |
Tvrtko Ursulin | 3756685 | 2016-04-12 14:37:31 +0100 | [diff] [blame] | 2114 | |
Daniele Ceraolo Spurio | 4319382 | 2019-03-25 14:49:37 -0700 | [diff] [blame] | 2115 | if (!intel_uncore_has_forcewake(uncore)) |
Tvrtko Ursulin | 895833b | 2016-10-04 09:29:24 +0100 | [diff] [blame] | 2116 | return 0; |
| 2117 | |
Tvrtko Ursulin | 3756685 | 2016-04-12 14:37:31 +0100 | [diff] [blame] | 2118 | if (op & FW_REG_READ) |
Daniele Ceraolo Spurio | ccb2ace | 2019-06-19 18:00:16 -0700 | [diff] [blame] | 2119 | fw_domains = uncore->funcs.read_fw_domains(uncore, reg); |
Tvrtko Ursulin | 3756685 | 2016-04-12 14:37:31 +0100 | [diff] [blame] | 2120 | |
| 2121 | if (op & FW_REG_WRITE) |
Daniele Ceraolo Spurio | ccb2ace | 2019-06-19 18:00:16 -0700 | [diff] [blame] | 2122 | fw_domains |= uncore->funcs.write_fw_domains(uncore, reg); |
| 2123 | |
Pankaj Bharadiya | a9f236d | 2020-01-15 09:14:54 +0530 | [diff] [blame] | 2124 | drm_WARN_ON(&uncore->i915->drm, fw_domains & ~uncore->fw_domains); |
Tvrtko Ursulin | 3756685 | 2016-04-12 14:37:31 +0100 | [diff] [blame] | 2125 | |
| 2126 | return fw_domains; |
| 2127 | } |
Chris Wilson | 26e7a2a | 2017-02-13 17:15:33 +0000 | [diff] [blame] | 2128 | |
| 2129 | #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) |
Chris Wilson | 0757ac8 | 2017-04-12 10:21:43 +0100 | [diff] [blame] | 2130 | #include "selftests/mock_uncore.c" |
Chris Wilson | 26e7a2a | 2017-02-13 17:15:33 +0000 | [diff] [blame] | 2131 | #include "selftests/intel_uncore.c" |
| 2132 | #endif |