blob: abb18b90d7c392eea4d0d48b50bf48ecc07021b1 [file] [log] [blame]
Chris Wilson907b28c2013-07-19 20:36:52 +01001/*
2 * Copyright © 2013 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
Chris Wilson6daccb02015-01-16 11:34:35 +020024#include <linux/pm_runtime.h>
Jani Nikula696173b2019-04-05 14:00:15 +030025#include <asm/iosf_mbi.h>
26
27#include "i915_drv.h"
Jani Nikulaa09d9a82019-08-06 13:07:28 +030028#include "i915_trace.h"
Jani Nikula696173b2019-04-05 14:00:15 +030029#include "i915_vgpu.h"
Jani Nikula696173b2019-04-05 14:00:15 +030030#include "intel_pm.h"
Chris Wilson6daccb02015-01-16 11:34:35 +020031
Sagar Arun Kamble83e33372015-08-23 17:52:47 +053032#define FORCEWAKE_ACK_TIMEOUT_MS 50
Mika Kuoppala6b07b6d2017-05-02 17:03:44 +030033#define GT_FIFO_TIMEOUT_MS 10
Chris Wilson907b28c2013-07-19 20:36:52 +010034
Daniele Ceraolo Spurio6cc5ca72019-03-25 14:49:32 -070035#define __raw_posting_read(...) ((void)__raw_uncore_read32(__VA_ARGS__))
Chris Wilson6af5d922013-07-19 20:36:53 +010036
Daniele Ceraolo Spurio0a9b2632019-08-09 07:31:16 +010037void
38intel_uncore_mmio_debug_init_early(struct intel_uncore_mmio_debug *mmio_debug)
39{
40 spin_lock_init(&mmio_debug->lock);
41 mmio_debug->unclaimed_mmio_check = 1;
42}
43
44static void mmio_debug_suspend(struct intel_uncore_mmio_debug *mmio_debug)
45{
46 lockdep_assert_held(&mmio_debug->lock);
47
48 /* Save and disable mmio debugging for the user bypass */
49 if (!mmio_debug->suspend_count++) {
50 mmio_debug->saved_mmio_check = mmio_debug->unclaimed_mmio_check;
51 mmio_debug->unclaimed_mmio_check = 0;
52 }
53}
54
55static void mmio_debug_resume(struct intel_uncore_mmio_debug *mmio_debug)
56{
57 lockdep_assert_held(&mmio_debug->lock);
58
59 if (!--mmio_debug->suspend_count)
60 mmio_debug->unclaimed_mmio_check = mmio_debug->saved_mmio_check;
61}
62
Mika Kuoppala05a2fb12015-01-19 16:20:43 +020063static const char * const forcewake_domain_names[] = {
64 "render",
65 "blitter",
66 "media",
Daniele Ceraolo Spurioa89a70a2018-03-02 18:15:01 +020067 "vdbox0",
68 "vdbox1",
69 "vdbox2",
70 "vdbox3",
71 "vebox0",
72 "vebox1",
Mika Kuoppala05a2fb12015-01-19 16:20:43 +020073};
74
75const char *
Mika Kuoppala48c10262015-01-16 11:34:41 +020076intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id)
Mika Kuoppala05a2fb12015-01-19 16:20:43 +020077{
Ville Syrjälä53abb672015-08-21 20:45:28 +030078 BUILD_BUG_ON(ARRAY_SIZE(forcewake_domain_names) != FW_DOMAIN_ID_COUNT);
Mika Kuoppala05a2fb12015-01-19 16:20:43 +020079
80 if (id >= 0 && id < FW_DOMAIN_ID_COUNT)
81 return forcewake_domain_names[id];
82
83 WARN_ON(id);
84
85 return "unknown";
86}
87
Daniele Ceraolo Spurio535d8d272019-03-16 10:00:45 +000088#define fw_ack(d) readl((d)->reg_ack)
Daniele Ceraolo Spurio159367b2019-03-20 12:27:32 +000089#define fw_set(d, val) writel(_MASKED_BIT_ENABLE((val)), (d)->reg_set)
90#define fw_clear(d, val) writel(_MASKED_BIT_DISABLE((val)), (d)->reg_set)
Daniele Ceraolo Spurio535d8d272019-03-16 10:00:45 +000091
Mika Kuoppala05a2fb12015-01-19 16:20:43 +020092static inline void
Daniele Ceraolo Spurio159367b2019-03-20 12:27:32 +000093fw_domain_reset(const struct intel_uncore_forcewake_domain *d)
Mika Kuoppala05a2fb12015-01-19 16:20:43 +020094{
Oscar Mateo26376a72018-03-16 14:14:49 +020095 /*
96 * We don't really know if the powerwell for the forcewake domain we are
97 * trying to reset here does exist at this point (engines could be fused
98 * off in ICL+), so no waiting for acks
99 */
Daniele Ceraolo Spurio159367b2019-03-20 12:27:32 +0000100 /* WaRsClearFWBitsAtReset:bdw,skl */
101 fw_clear(d, 0xffff);
Mika Kuoppala05a2fb12015-01-19 16:20:43 +0200102}
103
104static inline void
105fw_domain_arm_timer(struct intel_uncore_forcewake_domain *d)
106{
Chris Wilson77adbd82019-07-08 16:49:14 +0100107 GEM_BUG_ON(d->uncore->fw_domains_timer & d->mask);
108 d->uncore->fw_domains_timer |= d->mask;
Tvrtko Ursulina57a4a62016-04-07 17:04:32 +0100109 d->wake_count++;
110 hrtimer_start_range_ns(&d->timer,
Thomas Gleixner8b0e1952016-12-25 12:30:41 +0100111 NSEC_PER_MSEC,
Tvrtko Ursulina57a4a62016-04-07 17:04:32 +0100112 NSEC_PER_MSEC,
113 HRTIMER_MODE_REL);
Mika Kuoppala05a2fb12015-01-19 16:20:43 +0200114}
115
Mika Kuoppala71306302017-11-02 11:48:36 +0200116static inline int
Daniele Ceraolo Spurio535d8d272019-03-16 10:00:45 +0000117__wait_for_ack(const struct intel_uncore_forcewake_domain *d,
Mika Kuoppala71306302017-11-02 11:48:36 +0200118 const u32 ack,
119 const u32 value)
120{
Daniele Ceraolo Spurio535d8d272019-03-16 10:00:45 +0000121 return wait_for_atomic((fw_ack(d) & ack) == value,
Mika Kuoppala71306302017-11-02 11:48:36 +0200122 FORCEWAKE_ACK_TIMEOUT_MS);
123}
124
125static inline int
Daniele Ceraolo Spurio535d8d272019-03-16 10:00:45 +0000126wait_ack_clear(const struct intel_uncore_forcewake_domain *d,
Mika Kuoppala71306302017-11-02 11:48:36 +0200127 const u32 ack)
128{
Daniele Ceraolo Spurio535d8d272019-03-16 10:00:45 +0000129 return __wait_for_ack(d, ack, 0);
Mika Kuoppala71306302017-11-02 11:48:36 +0200130}
131
132static inline int
Daniele Ceraolo Spurio535d8d272019-03-16 10:00:45 +0000133wait_ack_set(const struct intel_uncore_forcewake_domain *d,
Mika Kuoppala71306302017-11-02 11:48:36 +0200134 const u32 ack)
135{
Daniele Ceraolo Spurio535d8d272019-03-16 10:00:45 +0000136 return __wait_for_ack(d, ack, ack);
Mika Kuoppala71306302017-11-02 11:48:36 +0200137}
138
Mika Kuoppala05a2fb12015-01-19 16:20:43 +0200139static inline void
Daniele Ceraolo Spurio535d8d272019-03-16 10:00:45 +0000140fw_domain_wait_ack_clear(const struct intel_uncore_forcewake_domain *d)
Mika Kuoppala05a2fb12015-01-19 16:20:43 +0200141{
Chris Wilson18ecc6c2019-05-08 12:52:45 +0100142 if (wait_ack_clear(d, FORCEWAKE_KERNEL)) {
Mika Kuoppala05a2fb12015-01-19 16:20:43 +0200143 DRM_ERROR("%s: timed out waiting for forcewake ack to clear.\n",
144 intel_uncore_forcewake_domain_to_str(d->id));
Chris Wilson18ecc6c2019-05-08 12:52:45 +0100145 add_taint_for_CI(TAINT_WARN); /* CI now unreliable */
146 }
Mika Kuoppala05a2fb12015-01-19 16:20:43 +0200147}
148
Mika Kuoppala71306302017-11-02 11:48:36 +0200149enum ack_type {
150 ACK_CLEAR = 0,
151 ACK_SET
152};
153
154static int
Daniele Ceraolo Spurio535d8d272019-03-16 10:00:45 +0000155fw_domain_wait_ack_with_fallback(const struct intel_uncore_forcewake_domain *d,
Mika Kuoppala71306302017-11-02 11:48:36 +0200156 const enum ack_type type)
157{
158 const u32 ack_bit = FORCEWAKE_KERNEL;
159 const u32 value = type == ACK_SET ? ack_bit : 0;
160 unsigned int pass;
161 bool ack_detected;
162
163 /*
164 * There is a possibility of driver's wake request colliding
165 * with hardware's own wake requests and that can cause
166 * hardware to not deliver the driver's ack message.
167 *
168 * Use a fallback bit toggle to kick the gpu state machine
169 * in the hope that the original ack will be delivered along with
170 * the fallback ack.
171 *
Oscar Mateocc38cae2018-05-08 14:29:23 -0700172 * This workaround is described in HSDES #1604254524 and it's known as:
173 * WaRsForcewakeAddDelayForAck:skl,bxt,kbl,glk,cfl,cnl,icl
174 * although the name is a bit misleading.
Mika Kuoppala71306302017-11-02 11:48:36 +0200175 */
176
177 pass = 1;
178 do {
Daniele Ceraolo Spurio535d8d272019-03-16 10:00:45 +0000179 wait_ack_clear(d, FORCEWAKE_KERNEL_FALLBACK);
Mika Kuoppala71306302017-11-02 11:48:36 +0200180
Daniele Ceraolo Spurio159367b2019-03-20 12:27:32 +0000181 fw_set(d, FORCEWAKE_KERNEL_FALLBACK);
Mika Kuoppala71306302017-11-02 11:48:36 +0200182 /* Give gt some time to relax before the polling frenzy */
183 udelay(10 * pass);
Daniele Ceraolo Spurio535d8d272019-03-16 10:00:45 +0000184 wait_ack_set(d, FORCEWAKE_KERNEL_FALLBACK);
Mika Kuoppala71306302017-11-02 11:48:36 +0200185
Daniele Ceraolo Spurio535d8d272019-03-16 10:00:45 +0000186 ack_detected = (fw_ack(d) & ack_bit) == value;
Mika Kuoppala71306302017-11-02 11:48:36 +0200187
Daniele Ceraolo Spurio159367b2019-03-20 12:27:32 +0000188 fw_clear(d, FORCEWAKE_KERNEL_FALLBACK);
Mika Kuoppala71306302017-11-02 11:48:36 +0200189 } while (!ack_detected && pass++ < 10);
190
191 DRM_DEBUG_DRIVER("%s had to use fallback to %s ack, 0x%x (passes %u)\n",
192 intel_uncore_forcewake_domain_to_str(d->id),
193 type == ACK_SET ? "set" : "clear",
Daniele Ceraolo Spurio535d8d272019-03-16 10:00:45 +0000194 fw_ack(d),
Mika Kuoppala71306302017-11-02 11:48:36 +0200195 pass);
196
197 return ack_detected ? 0 : -ETIMEDOUT;
198}
199
200static inline void
Daniele Ceraolo Spurio535d8d272019-03-16 10:00:45 +0000201fw_domain_wait_ack_clear_fallback(const struct intel_uncore_forcewake_domain *d)
Mika Kuoppala71306302017-11-02 11:48:36 +0200202{
Daniele Ceraolo Spurio535d8d272019-03-16 10:00:45 +0000203 if (likely(!wait_ack_clear(d, FORCEWAKE_KERNEL)))
Mika Kuoppala71306302017-11-02 11:48:36 +0200204 return;
205
Daniele Ceraolo Spurio535d8d272019-03-16 10:00:45 +0000206 if (fw_domain_wait_ack_with_fallback(d, ACK_CLEAR))
207 fw_domain_wait_ack_clear(d);
Mika Kuoppala71306302017-11-02 11:48:36 +0200208}
209
Mika Kuoppala05a2fb12015-01-19 16:20:43 +0200210static inline void
Daniele Ceraolo Spurio159367b2019-03-20 12:27:32 +0000211fw_domain_get(const struct intel_uncore_forcewake_domain *d)
Mika Kuoppala05a2fb12015-01-19 16:20:43 +0200212{
Daniele Ceraolo Spurio159367b2019-03-20 12:27:32 +0000213 fw_set(d, FORCEWAKE_KERNEL);
Mika Kuoppala05a2fb12015-01-19 16:20:43 +0200214}
215
216static inline void
Daniele Ceraolo Spurio535d8d272019-03-16 10:00:45 +0000217fw_domain_wait_ack_set(const struct intel_uncore_forcewake_domain *d)
Mika Kuoppala05a2fb12015-01-19 16:20:43 +0200218{
Chris Wilson18ecc6c2019-05-08 12:52:45 +0100219 if (wait_ack_set(d, FORCEWAKE_KERNEL)) {
Mika Kuoppala05a2fb12015-01-19 16:20:43 +0200220 DRM_ERROR("%s: timed out waiting for forcewake ack request.\n",
221 intel_uncore_forcewake_domain_to_str(d->id));
Chris Wilson18ecc6c2019-05-08 12:52:45 +0100222 add_taint_for_CI(TAINT_WARN); /* CI now unreliable */
223 }
Mika Kuoppala05a2fb12015-01-19 16:20:43 +0200224}
225
226static inline void
Daniele Ceraolo Spurio535d8d272019-03-16 10:00:45 +0000227fw_domain_wait_ack_set_fallback(const struct intel_uncore_forcewake_domain *d)
Mika Kuoppala71306302017-11-02 11:48:36 +0200228{
Daniele Ceraolo Spurio535d8d272019-03-16 10:00:45 +0000229 if (likely(!wait_ack_set(d, FORCEWAKE_KERNEL)))
Mika Kuoppala71306302017-11-02 11:48:36 +0200230 return;
231
Daniele Ceraolo Spurio535d8d272019-03-16 10:00:45 +0000232 if (fw_domain_wait_ack_with_fallback(d, ACK_SET))
233 fw_domain_wait_ack_set(d);
Mika Kuoppala71306302017-11-02 11:48:36 +0200234}
235
236static inline void
Daniele Ceraolo Spurio159367b2019-03-20 12:27:32 +0000237fw_domain_put(const struct intel_uncore_forcewake_domain *d)
Mika Kuoppala05a2fb12015-01-19 16:20:43 +0200238{
Daniele Ceraolo Spurio159367b2019-03-20 12:27:32 +0000239 fw_clear(d, FORCEWAKE_KERNEL);
Mika Kuoppala05a2fb12015-01-19 16:20:43 +0200240}
241
Mika Kuoppala05a2fb12015-01-19 16:20:43 +0200242static void
Daniele Ceraolo Spuriof568eee2019-03-19 11:35:35 -0700243fw_domains_get(struct intel_uncore *uncore, enum forcewake_domains fw_domains)
Mika Kuoppala05a2fb12015-01-19 16:20:43 +0200244{
245 struct intel_uncore_forcewake_domain *d;
Chris Wilsond2dc94b2017-03-23 10:19:41 +0000246 unsigned int tmp;
Mika Kuoppala05a2fb12015-01-19 16:20:43 +0200247
Daniele Ceraolo Spurio535d8d272019-03-16 10:00:45 +0000248 GEM_BUG_ON(fw_domains & ~uncore->fw_domains);
Chris Wilsond2dc94b2017-03-23 10:19:41 +0000249
Daniele Ceraolo Spuriof568eee2019-03-19 11:35:35 -0700250 for_each_fw_domain_masked(d, fw_domains, uncore, tmp) {
Daniele Ceraolo Spurio535d8d272019-03-16 10:00:45 +0000251 fw_domain_wait_ack_clear(d);
Daniele Ceraolo Spurio159367b2019-03-20 12:27:32 +0000252 fw_domain_get(d);
Mika Kuoppala05a2fb12015-01-19 16:20:43 +0200253 }
Tvrtko Ursulin4e1176d2016-04-07 17:04:34 +0100254
Daniele Ceraolo Spuriof568eee2019-03-19 11:35:35 -0700255 for_each_fw_domain_masked(d, fw_domains, uncore, tmp)
Daniele Ceraolo Spurio535d8d272019-03-16 10:00:45 +0000256 fw_domain_wait_ack_set(d);
Mika Kuoppala71306302017-11-02 11:48:36 +0200257
Daniele Ceraolo Spurio535d8d272019-03-16 10:00:45 +0000258 uncore->fw_domains_active |= fw_domains;
Mika Kuoppala71306302017-11-02 11:48:36 +0200259}
260
261static void
Daniele Ceraolo Spuriof568eee2019-03-19 11:35:35 -0700262fw_domains_get_with_fallback(struct intel_uncore *uncore,
Mika Kuoppala71306302017-11-02 11:48:36 +0200263 enum forcewake_domains fw_domains)
264{
265 struct intel_uncore_forcewake_domain *d;
266 unsigned int tmp;
267
Daniele Ceraolo Spurio535d8d272019-03-16 10:00:45 +0000268 GEM_BUG_ON(fw_domains & ~uncore->fw_domains);
Mika Kuoppala71306302017-11-02 11:48:36 +0200269
Daniele Ceraolo Spuriof568eee2019-03-19 11:35:35 -0700270 for_each_fw_domain_masked(d, fw_domains, uncore, tmp) {
Daniele Ceraolo Spurio535d8d272019-03-16 10:00:45 +0000271 fw_domain_wait_ack_clear_fallback(d);
Daniele Ceraolo Spurio159367b2019-03-20 12:27:32 +0000272 fw_domain_get(d);
Mika Kuoppala71306302017-11-02 11:48:36 +0200273 }
274
Daniele Ceraolo Spuriof568eee2019-03-19 11:35:35 -0700275 for_each_fw_domain_masked(d, fw_domains, uncore, tmp)
Daniele Ceraolo Spurio535d8d272019-03-16 10:00:45 +0000276 fw_domain_wait_ack_set_fallback(d);
Tvrtko Ursulinb8473052017-03-10 09:32:49 +0000277
Daniele Ceraolo Spurio535d8d272019-03-16 10:00:45 +0000278 uncore->fw_domains_active |= fw_domains;
Mika Kuoppala05a2fb12015-01-19 16:20:43 +0200279}
280
281static void
Daniele Ceraolo Spuriof568eee2019-03-19 11:35:35 -0700282fw_domains_put(struct intel_uncore *uncore, enum forcewake_domains fw_domains)
Mika Kuoppala05a2fb12015-01-19 16:20:43 +0200283{
284 struct intel_uncore_forcewake_domain *d;
Chris Wilsond2dc94b2017-03-23 10:19:41 +0000285 unsigned int tmp;
Mika Kuoppala05a2fb12015-01-19 16:20:43 +0200286
Daniele Ceraolo Spurio535d8d272019-03-16 10:00:45 +0000287 GEM_BUG_ON(fw_domains & ~uncore->fw_domains);
Chris Wilsond2dc94b2017-03-23 10:19:41 +0000288
Daniele Ceraolo Spuriof568eee2019-03-19 11:35:35 -0700289 for_each_fw_domain_masked(d, fw_domains, uncore, tmp)
Daniele Ceraolo Spurio159367b2019-03-20 12:27:32 +0000290 fw_domain_put(d);
Tvrtko Ursulinb8473052017-03-10 09:32:49 +0000291
Daniele Ceraolo Spurio535d8d272019-03-16 10:00:45 +0000292 uncore->fw_domains_active &= ~fw_domains;
Mika Kuoppala05a2fb12015-01-19 16:20:43 +0200293}
294
295static void
Daniele Ceraolo Spuriof568eee2019-03-19 11:35:35 -0700296fw_domains_reset(struct intel_uncore *uncore,
Chris Wilson577ac4b2017-03-23 10:19:38 +0000297 enum forcewake_domains fw_domains)
Mika Kuoppala05a2fb12015-01-19 16:20:43 +0200298{
299 struct intel_uncore_forcewake_domain *d;
Chris Wilsond2dc94b2017-03-23 10:19:41 +0000300 unsigned int tmp;
Mika Kuoppala05a2fb12015-01-19 16:20:43 +0200301
Chris Wilsond2dc94b2017-03-23 10:19:41 +0000302 if (!fw_domains)
Mika Kuoppala3225b2f2015-02-05 17:45:42 +0200303 return;
Mika Kuoppalaf9b39272015-01-28 14:43:24 +0200304
Daniele Ceraolo Spurio535d8d272019-03-16 10:00:45 +0000305 GEM_BUG_ON(fw_domains & ~uncore->fw_domains);
Chris Wilsond2dc94b2017-03-23 10:19:41 +0000306
Daniele Ceraolo Spuriof568eee2019-03-19 11:35:35 -0700307 for_each_fw_domain_masked(d, fw_domains, uncore, tmp)
Daniele Ceraolo Spurio159367b2019-03-20 12:27:32 +0000308 fw_domain_reset(d);
Mika Kuoppala05a2fb12015-01-19 16:20:43 +0200309}
310
Daniele Ceraolo Spurio6ebc9692019-03-19 11:35:41 -0700311static inline u32 gt_thread_status(struct intel_uncore *uncore)
Chris Wilsona5b22b52018-07-20 12:11:02 +0100312{
313 u32 val;
314
Daniele Ceraolo Spurio6cc5ca72019-03-25 14:49:32 -0700315 val = __raw_uncore_read32(uncore, GEN6_GT_THREAD_STATUS_REG);
Chris Wilsona5b22b52018-07-20 12:11:02 +0100316 val &= GEN6_GT_THREAD_STATUS_CORE_MASK;
317
318 return val;
319}
320
Daniele Ceraolo Spurio6ebc9692019-03-19 11:35:41 -0700321static void __gen6_gt_wait_for_thread_c0(struct intel_uncore *uncore)
Chris Wilson907b28c2013-07-19 20:36:52 +0100322{
Chris Wilsona5b22b52018-07-20 12:11:02 +0100323 /*
324 * w/a for a sporadic read returning 0 by waiting for the GT
Chris Wilson907b28c2013-07-19 20:36:52 +0100325 * thread to wake up.
326 */
Pankaj Bharadiyaa9f236d2020-01-15 09:14:54 +0530327 drm_WARN_ONCE(&uncore->i915->drm,
328 wait_for_atomic_us(gt_thread_status(uncore) == 0, 5000),
329 "GT thread status wait timed out\n");
Chris Wilson907b28c2013-07-19 20:36:52 +0100330}
331
Daniele Ceraolo Spuriof568eee2019-03-19 11:35:35 -0700332static void fw_domains_get_with_thread_status(struct intel_uncore *uncore,
Mika Kuoppala48c10262015-01-16 11:34:41 +0200333 enum forcewake_domains fw_domains)
Chris Wilson907b28c2013-07-19 20:36:52 +0100334{
Daniele Ceraolo Spuriof568eee2019-03-19 11:35:35 -0700335 fw_domains_get(uncore, fw_domains);
Chris Wilson907b28c2013-07-19 20:36:52 +0100336
Mika Kuoppala05a2fb12015-01-19 16:20:43 +0200337 /* WaRsForcewakeWaitTC0:snb,ivb,hsw,bdw,vlv */
Daniele Ceraolo Spurio6ebc9692019-03-19 11:35:41 -0700338 __gen6_gt_wait_for_thread_c0(uncore);
Chris Wilson907b28c2013-07-19 20:36:52 +0100339}
340
Daniele Ceraolo Spurio6ebc9692019-03-19 11:35:41 -0700341static inline u32 fifo_free_entries(struct intel_uncore *uncore)
Dave Gordonc32e3782014-12-10 18:12:12 +0000342{
Daniele Ceraolo Spurio6cc5ca72019-03-25 14:49:32 -0700343 u32 count = __raw_uncore_read32(uncore, GTFIFOCTL);
Dave Gordonc32e3782014-12-10 18:12:12 +0000344
345 return count & GT_FIFO_FREE_ENTRIES_MASK;
346}
347
Daniele Ceraolo Spurio6ebc9692019-03-19 11:35:41 -0700348static void __gen6_gt_wait_for_fifo(struct intel_uncore *uncore)
Chris Wilson907b28c2013-07-19 20:36:52 +0100349{
Mika Kuoppala6b07b6d2017-05-02 17:03:44 +0300350 u32 n;
Chris Wilson907b28c2013-07-19 20:36:52 +0100351
Deepak S5135d642013-11-29 15:56:30 +0530352 /* On VLV, FIFO will be shared by both SW and HW.
353 * So, we need to read the FREE_ENTRIES everytime */
Daniele Ceraolo Spurio01385752019-06-19 18:00:18 -0700354 if (IS_VALLEYVIEW(uncore->i915))
Daniele Ceraolo Spurio6ebc9692019-03-19 11:35:41 -0700355 n = fifo_free_entries(uncore);
Mika Kuoppala6b07b6d2017-05-02 17:03:44 +0300356 else
Daniele Ceraolo Spurio272c7e52019-03-19 11:35:39 -0700357 n = uncore->fifo_count;
Deepak S5135d642013-11-29 15:56:30 +0530358
Mika Kuoppala6b07b6d2017-05-02 17:03:44 +0300359 if (n <= GT_FIFO_NUM_RESERVED_ENTRIES) {
Daniele Ceraolo Spurio6ebc9692019-03-19 11:35:41 -0700360 if (wait_for_atomic((n = fifo_free_entries(uncore)) >
Mika Kuoppala6b07b6d2017-05-02 17:03:44 +0300361 GT_FIFO_NUM_RESERVED_ENTRIES,
362 GT_FIFO_TIMEOUT_MS)) {
Wambui Karugad0208cf2020-01-07 18:13:33 +0300363 drm_dbg(&uncore->i915->drm,
364 "GT_FIFO timeout, entries: %u\n", n);
Mika Kuoppala6b07b6d2017-05-02 17:03:44 +0300365 return;
Chris Wilson907b28c2013-07-19 20:36:52 +0100366 }
Chris Wilson907b28c2013-07-19 20:36:52 +0100367 }
Chris Wilson907b28c2013-07-19 20:36:52 +0100368
Daniele Ceraolo Spurio272c7e52019-03-19 11:35:39 -0700369 uncore->fifo_count = n - 1;
Chris Wilson907b28c2013-07-19 20:36:52 +0100370}
371
Tvrtko Ursulina57a4a62016-04-07 17:04:32 +0100372static enum hrtimer_restart
373intel_uncore_fw_release_timer(struct hrtimer *timer)
Chris Wilsonaec347a2013-08-26 13:46:09 +0100374{
Tvrtko Ursulina57a4a62016-04-07 17:04:32 +0100375 struct intel_uncore_forcewake_domain *domain =
376 container_of(timer, struct intel_uncore_forcewake_domain, timer);
Daniele Ceraolo Spuriof833cdb2019-06-19 18:00:20 -0700377 struct intel_uncore *uncore = domain->uncore;
Chris Wilsonaec347a2013-08-26 13:46:09 +0100378 unsigned long irqflags;
379
Daniele Ceraolo Spurioeb17af62019-03-25 14:49:35 -0700380 assert_rpm_device_not_suspended(uncore->rpm);
Paulo Zanonib2ec1422014-02-21 13:52:25 -0300381
Chris Wilsonc9e0c6d2017-05-26 14:22:09 +0100382 if (xchg(&domain->active, false))
383 return HRTIMER_RESTART;
384
Daniele Ceraolo Spuriof568eee2019-03-19 11:35:35 -0700385 spin_lock_irqsave(&uncore->lock, irqflags);
Daniel Vetter3123fca2014-03-15 20:20:29 +0100386
Chris Wilson77adbd82019-07-08 16:49:14 +0100387 uncore->fw_domains_timer &= ~domain->mask;
388
389 GEM_BUG_ON(!domain->wake_count);
Tvrtko Ursulinb8473052017-03-10 09:32:49 +0000390 if (--domain->wake_count == 0)
Daniele Ceraolo Spuriof568eee2019-03-19 11:35:35 -0700391 uncore->funcs.force_wake_put(uncore, domain->mask);
Chris Wilsonb2cff0d2015-01-16 11:34:37 +0200392
Daniele Ceraolo Spuriof568eee2019-03-19 11:35:35 -0700393 spin_unlock_irqrestore(&uncore->lock, irqflags);
Tvrtko Ursulina57a4a62016-04-07 17:04:32 +0100394
395 return HRTIMER_NORESTART;
Chris Wilsonaec347a2013-08-26 13:46:09 +0100396}
397
Hans de Goedea5266db2017-10-19 13:16:20 +0200398/* Note callers must have acquired the PUNIT->PMIC bus, before calling this. */
Chris Wilsond60996a2018-08-08 22:08:42 +0100399static unsigned int
Daniele Ceraolo Spuriof568eee2019-03-19 11:35:35 -0700400intel_uncore_forcewake_reset(struct intel_uncore *uncore)
Daniel Vetteref46e0d2013-11-16 16:00:09 +0100401{
Mika Kuoppala48c10262015-01-16 11:34:41 +0200402 unsigned long irqflags;
Chris Wilsonb2cff0d2015-01-16 11:34:37 +0200403 struct intel_uncore_forcewake_domain *domain;
Mika Kuoppala48c10262015-01-16 11:34:41 +0200404 int retry_count = 100;
Tvrtko Ursulin003342a2016-10-04 09:29:17 +0100405 enum forcewake_domains fw, active_domains;
Chris Wilson0294ae72014-03-13 12:00:29 +0000406
Hans de Goedea5266db2017-10-19 13:16:20 +0200407 iosf_mbi_assert_punit_acquired();
408
Chris Wilson0294ae72014-03-13 12:00:29 +0000409 /* Hold uncore.lock across reset to prevent any register access
Chris Wilsonb2cff0d2015-01-16 11:34:37 +0200410 * with forcewake not set correctly. Wait until all pending
411 * timers are run before holding.
Chris Wilson0294ae72014-03-13 12:00:29 +0000412 */
Chris Wilsonb2cff0d2015-01-16 11:34:37 +0200413 while (1) {
Chris Wilsond2dc94b2017-03-23 10:19:41 +0000414 unsigned int tmp;
415
Chris Wilsonb2cff0d2015-01-16 11:34:37 +0200416 active_domains = 0;
417
Daniele Ceraolo Spuriof568eee2019-03-19 11:35:35 -0700418 for_each_fw_domain(domain, uncore, tmp) {
Chris Wilsonc9e0c6d2017-05-26 14:22:09 +0100419 smp_store_mb(domain->active, false);
Tvrtko Ursulina57a4a62016-04-07 17:04:32 +0100420 if (hrtimer_cancel(&domain->timer) == 0)
Chris Wilsonb2cff0d2015-01-16 11:34:37 +0200421 continue;
422
Tvrtko Ursulina57a4a62016-04-07 17:04:32 +0100423 intel_uncore_fw_release_timer(&domain->timer);
Chris Wilsonb2cff0d2015-01-16 11:34:37 +0200424 }
425
Daniele Ceraolo Spuriof568eee2019-03-19 11:35:35 -0700426 spin_lock_irqsave(&uncore->lock, irqflags);
Chris Wilsonb2cff0d2015-01-16 11:34:37 +0200427
Daniele Ceraolo Spuriof568eee2019-03-19 11:35:35 -0700428 for_each_fw_domain(domain, uncore, tmp) {
Tvrtko Ursulina57a4a62016-04-07 17:04:32 +0100429 if (hrtimer_active(&domain->timer))
Tvrtko Ursulin33c582c2016-04-07 17:04:33 +0100430 active_domains |= domain->mask;
Chris Wilsonb2cff0d2015-01-16 11:34:37 +0200431 }
432
433 if (active_domains == 0)
434 break;
435
436 if (--retry_count == 0) {
Wambui Karugad0208cf2020-01-07 18:13:33 +0300437 drm_err(&uncore->i915->drm, "Timed out waiting for forcewake timers to finish\n");
Chris Wilsonb2cff0d2015-01-16 11:34:37 +0200438 break;
439 }
440
Daniele Ceraolo Spuriof568eee2019-03-19 11:35:35 -0700441 spin_unlock_irqrestore(&uncore->lock, irqflags);
Chris Wilsonb2cff0d2015-01-16 11:34:37 +0200442 cond_resched();
443 }
444
Pankaj Bharadiyaa9f236d2020-01-15 09:14:54 +0530445 drm_WARN_ON(&uncore->i915->drm, active_domains);
Chris Wilsonb2cff0d2015-01-16 11:34:37 +0200446
Daniele Ceraolo Spuriof568eee2019-03-19 11:35:35 -0700447 fw = uncore->fw_domains_active;
Chris Wilsonb2cff0d2015-01-16 11:34:37 +0200448 if (fw)
Daniele Ceraolo Spuriof568eee2019-03-19 11:35:35 -0700449 uncore->funcs.force_wake_put(uncore, fw);
Daniel Vetteref46e0d2013-11-16 16:00:09 +0100450
Daniele Ceraolo Spuriof568eee2019-03-19 11:35:35 -0700451 fw_domains_reset(uncore, uncore->fw_domains);
452 assert_forcewakes_inactive(uncore);
Chris Wilsonb2cff0d2015-01-16 11:34:37 +0200453
Daniele Ceraolo Spuriof568eee2019-03-19 11:35:35 -0700454 spin_unlock_irqrestore(&uncore->lock, irqflags);
Chris Wilsond60996a2018-08-08 22:08:42 +0100455
456 return fw; /* track the lost user forcewake domains */
Daniel Vetteref46e0d2013-11-16 16:00:09 +0100457}
458
Mika Kuoppala8a47eb12015-12-15 19:24:47 +0200459static bool
Daniele Ceraolo Spurio6ebc9692019-03-19 11:35:41 -0700460fpga_check_for_unclaimed_mmio(struct intel_uncore *uncore)
Mika Kuoppala8a47eb12015-12-15 19:24:47 +0200461{
462 u32 dbg;
463
Daniele Ceraolo Spurio6cc5ca72019-03-25 14:49:32 -0700464 dbg = __raw_uncore_read32(uncore, FPGA_DBG);
Mika Kuoppala8a47eb12015-12-15 19:24:47 +0200465 if (likely(!(dbg & FPGA_DBG_RM_NOCLAIM)))
466 return false;
467
Daniele Ceraolo Spurio6cc5ca72019-03-25 14:49:32 -0700468 __raw_uncore_write32(uncore, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
Mika Kuoppala8a47eb12015-12-15 19:24:47 +0200469
470 return true;
471}
472
Mika Kuoppala8ac3e1b2015-12-15 19:45:42 +0200473static bool
Daniele Ceraolo Spurio6ebc9692019-03-19 11:35:41 -0700474vlv_check_for_unclaimed_mmio(struct intel_uncore *uncore)
Mika Kuoppala8ac3e1b2015-12-15 19:45:42 +0200475{
476 u32 cer;
477
Daniele Ceraolo Spurio6cc5ca72019-03-25 14:49:32 -0700478 cer = __raw_uncore_read32(uncore, CLAIM_ER);
Mika Kuoppala8ac3e1b2015-12-15 19:45:42 +0200479 if (likely(!(cer & (CLAIM_ER_OVERFLOW | CLAIM_ER_CTR_MASK))))
480 return false;
481
Daniele Ceraolo Spurio6cc5ca72019-03-25 14:49:32 -0700482 __raw_uncore_write32(uncore, CLAIM_ER, CLAIM_ER_CLR);
Mika Kuoppala8ac3e1b2015-12-15 19:45:42 +0200483
484 return true;
485}
486
487static bool
Daniele Ceraolo Spurio6ebc9692019-03-19 11:35:41 -0700488gen6_check_for_fifo_debug(struct intel_uncore *uncore)
Mika Kuoppalaa3389082017-04-06 18:39:42 +0300489{
490 u32 fifodbg;
491
Daniele Ceraolo Spurio6cc5ca72019-03-25 14:49:32 -0700492 fifodbg = __raw_uncore_read32(uncore, GTFIFODBG);
Mika Kuoppalaa3389082017-04-06 18:39:42 +0300493
494 if (unlikely(fifodbg)) {
Wambui Karugad0208cf2020-01-07 18:13:33 +0300495 drm_dbg(&uncore->i915->drm, "GTFIFODBG = 0x08%x\n", fifodbg);
Daniele Ceraolo Spurio6cc5ca72019-03-25 14:49:32 -0700496 __raw_uncore_write32(uncore, GTFIFODBG, fifodbg);
Mika Kuoppalaa3389082017-04-06 18:39:42 +0300497 }
498
499 return fifodbg;
500}
501
502static bool
Daniele Ceraolo Spurio2cf7bf62019-03-25 14:49:34 -0700503check_for_unclaimed_mmio(struct intel_uncore *uncore)
Mika Kuoppala8ac3e1b2015-12-15 19:45:42 +0200504{
Mika Kuoppalaa3389082017-04-06 18:39:42 +0300505 bool ret = false;
506
Daniele Ceraolo Spurio0a9b2632019-08-09 07:31:16 +0100507 lockdep_assert_held(&uncore->debug->lock);
508
509 if (uncore->debug->suspend_count)
510 return false;
511
Daniele Ceraolo Spurio2cf7bf62019-03-25 14:49:34 -0700512 if (intel_uncore_has_fpga_dbg_unclaimed(uncore))
Daniele Ceraolo Spurio6ebc9692019-03-19 11:35:41 -0700513 ret |= fpga_check_for_unclaimed_mmio(uncore);
Mika Kuoppala8ac3e1b2015-12-15 19:45:42 +0200514
Daniele Ceraolo Spurio2cf7bf62019-03-25 14:49:34 -0700515 if (intel_uncore_has_dbg_unclaimed(uncore))
Daniele Ceraolo Spurio6ebc9692019-03-19 11:35:41 -0700516 ret |= vlv_check_for_unclaimed_mmio(uncore);
Mika Kuoppala8ac3e1b2015-12-15 19:45:42 +0200517
Daniele Ceraolo Spurio2cf7bf62019-03-25 14:49:34 -0700518 if (intel_uncore_has_fifo(uncore))
Daniele Ceraolo Spurio6ebc9692019-03-19 11:35:41 -0700519 ret |= gen6_check_for_fifo_debug(uncore);
Mika Kuoppalaa3389082017-04-06 18:39:42 +0300520
521 return ret;
Mika Kuoppala8ac3e1b2015-12-15 19:45:42 +0200522}
523
Daniele Ceraolo Spurio2e81bc62019-06-19 18:00:19 -0700524static void forcewake_early_sanitize(struct intel_uncore *uncore,
525 unsigned int restore_forcewake)
Mika Kuoppalaf9b39272015-01-28 14:43:24 +0200526{
Daniele Ceraolo Spurio2e81bc62019-06-19 18:00:19 -0700527 GEM_BUG_ON(!intel_uncore_has_forcewake(uncore));
Chris Wilson907b28c2013-07-19 20:36:52 +0100528
Deepak Sa04f90a2015-04-16 08:51:28 +0530529 /* WaDisableShadowRegForCpd:chv */
Daniele Ceraolo Spurio01385752019-06-19 18:00:18 -0700530 if (IS_CHERRYVIEW(uncore->i915)) {
Daniele Ceraolo Spurio6cc5ca72019-03-25 14:49:32 -0700531 __raw_uncore_write32(uncore, GTFIFOCTL,
532 __raw_uncore_read32(uncore, GTFIFOCTL) |
533 GT_FIFO_CTL_BLOCK_ALL_POLICY_STALL |
534 GT_FIFO_CTL_RC6_POLICY_STALL);
Deepak Sa04f90a2015-04-16 08:51:28 +0530535 }
536
Hans de Goedea5266db2017-10-19 13:16:20 +0200537 iosf_mbi_punit_acquire();
Daniele Ceraolo Spuriof7de5022019-03-19 11:35:37 -0700538 intel_uncore_forcewake_reset(uncore);
Chris Wilsond60996a2018-08-08 22:08:42 +0100539 if (restore_forcewake) {
Daniele Ceraolo Spuriof7de5022019-03-19 11:35:37 -0700540 spin_lock_irq(&uncore->lock);
541 uncore->funcs.force_wake_get(uncore, restore_forcewake);
Chris Wilsond60996a2018-08-08 22:08:42 +0100542
Daniele Ceraolo Spurio2cf7bf62019-03-25 14:49:34 -0700543 if (intel_uncore_has_fifo(uncore))
Daniele Ceraolo Spurio6ebc9692019-03-19 11:35:41 -0700544 uncore->fifo_count = fifo_free_entries(uncore);
Daniele Ceraolo Spuriof7de5022019-03-19 11:35:37 -0700545 spin_unlock_irq(&uncore->lock);
Chris Wilsond60996a2018-08-08 22:08:42 +0100546 }
Hans de Goedea5266db2017-10-19 13:16:20 +0200547 iosf_mbi_punit_release();
Mika Kuoppala521198a2013-08-23 16:52:30 +0300548}
549
Daniele Ceraolo Spuriof7de5022019-03-19 11:35:37 -0700550void intel_uncore_suspend(struct intel_uncore *uncore)
Imre Deaked493882014-10-23 19:23:21 +0300551{
Daniele Ceraolo Spurio2e81bc62019-06-19 18:00:19 -0700552 if (!intel_uncore_has_forcewake(uncore))
553 return;
554
Hans de Goedea5266db2017-10-19 13:16:20 +0200555 iosf_mbi_punit_acquire();
556 iosf_mbi_unregister_pmic_bus_access_notifier_unlocked(
Daniele Ceraolo Spuriof7de5022019-03-19 11:35:37 -0700557 &uncore->pmic_bus_access_nb);
558 uncore->fw_domains_saved = intel_uncore_forcewake_reset(uncore);
Hans de Goedea5266db2017-10-19 13:16:20 +0200559 iosf_mbi_punit_release();
Hans de Goede68f60942017-02-10 11:28:01 +0100560}
561
Daniele Ceraolo Spuriof7de5022019-03-19 11:35:37 -0700562void intel_uncore_resume_early(struct intel_uncore *uncore)
Hans de Goede68f60942017-02-10 11:28:01 +0100563{
Chris Wilsond60996a2018-08-08 22:08:42 +0100564 unsigned int restore_forcewake;
565
Daniele Ceraolo Spurio2e81bc62019-06-19 18:00:19 -0700566 if (intel_uncore_unclaimed_mmio(uncore))
Wambui Karugad0208cf2020-01-07 18:13:33 +0300567 drm_dbg(&uncore->i915->drm, "unclaimed mmio detected on resume, clearing\n");
Daniele Ceraolo Spurio2e81bc62019-06-19 18:00:19 -0700568
569 if (!intel_uncore_has_forcewake(uncore))
570 return;
571
Daniele Ceraolo Spuriof7de5022019-03-19 11:35:37 -0700572 restore_forcewake = fetch_and_zero(&uncore->fw_domains_saved);
Daniele Ceraolo Spurio2e81bc62019-06-19 18:00:19 -0700573 forcewake_early_sanitize(uncore, restore_forcewake);
Chris Wilsond60996a2018-08-08 22:08:42 +0100574
Daniele Ceraolo Spuriof7de5022019-03-19 11:35:37 -0700575 iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb);
Imre Deaked493882014-10-23 19:23:21 +0300576}
577
Daniele Ceraolo Spuriof7de5022019-03-19 11:35:37 -0700578void intel_uncore_runtime_resume(struct intel_uncore *uncore)
Hans de Goedebedf4d72017-11-14 14:55:17 +0100579{
Daniele Ceraolo Spurio2e81bc62019-06-19 18:00:19 -0700580 if (!intel_uncore_has_forcewake(uncore))
581 return;
582
Daniele Ceraolo Spuriof7de5022019-03-19 11:35:37 -0700583 iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb);
Hans de Goedebedf4d72017-11-14 14:55:17 +0100584}
585
Daniele Ceraolo Spuriof568eee2019-03-19 11:35:35 -0700586static void __intel_uncore_forcewake_get(struct intel_uncore *uncore,
Chris Wilsona6111f72015-04-07 16:21:02 +0100587 enum forcewake_domains fw_domains)
588{
589 struct intel_uncore_forcewake_domain *domain;
Chris Wilsond2dc94b2017-03-23 10:19:41 +0000590 unsigned int tmp;
Chris Wilsona6111f72015-04-07 16:21:02 +0100591
Daniele Ceraolo Spuriof568eee2019-03-19 11:35:35 -0700592 fw_domains &= uncore->fw_domains;
Chris Wilsona6111f72015-04-07 16:21:02 +0100593
Daniele Ceraolo Spuriof568eee2019-03-19 11:35:35 -0700594 for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) {
Chris Wilsonc9e0c6d2017-05-26 14:22:09 +0100595 if (domain->wake_count++) {
Tvrtko Ursulin33c582c2016-04-07 17:04:33 +0100596 fw_domains &= ~domain->mask;
Chris Wilsonc9e0c6d2017-05-26 14:22:09 +0100597 domain->active = true;
598 }
599 }
Chris Wilsona6111f72015-04-07 16:21:02 +0100600
Tvrtko Ursulinb8473052017-03-10 09:32:49 +0000601 if (fw_domains)
Daniele Ceraolo Spuriof568eee2019-03-19 11:35:35 -0700602 uncore->funcs.force_wake_get(uncore, fw_domains);
Chris Wilsona6111f72015-04-07 16:21:02 +0100603}
604
Mika Kuoppala59bad942015-01-16 11:34:40 +0200605/**
606 * intel_uncore_forcewake_get - grab forcewake domain references
Daniele Ceraolo Spurio3ceea6a2019-03-19 11:35:36 -0700607 * @uncore: the intel_uncore structure
Mika Kuoppala59bad942015-01-16 11:34:40 +0200608 * @fw_domains: forcewake domains to get reference on
609 *
610 * This function can be used get GT's forcewake domain references.
611 * Normal register access will handle the forcewake domains automatically.
612 * However if some sequence requires the GT to not power down a particular
613 * forcewake domains this function should be called at the beginning of the
614 * sequence. And subsequently the reference should be dropped by symmetric
615 * call to intel_unforce_forcewake_put(). Usually caller wants all the domains
616 * to be kept awake so the @fw_domains would be then FORCEWAKE_ALL.
Chris Wilson907b28c2013-07-19 20:36:52 +0100617 */
Daniele Ceraolo Spurio3ceea6a2019-03-19 11:35:36 -0700618void intel_uncore_forcewake_get(struct intel_uncore *uncore,
Mika Kuoppala48c10262015-01-16 11:34:41 +0200619 enum forcewake_domains fw_domains)
Chris Wilson907b28c2013-07-19 20:36:52 +0100620{
621 unsigned long irqflags;
622
Daniele Ceraolo Spuriof568eee2019-03-19 11:35:35 -0700623 if (!uncore->funcs.force_wake_get)
Ben Widawskyab484f82013-10-05 17:57:11 -0700624 return;
625
Daniele Ceraolo Spurio87b391b92019-06-13 16:21:50 -0700626 assert_rpm_wakelock_held(uncore->rpm);
Deepak S940aece2013-11-23 14:55:43 +0530627
Daniele Ceraolo Spuriof568eee2019-03-19 11:35:35 -0700628 spin_lock_irqsave(&uncore->lock, irqflags);
629 __intel_uncore_forcewake_get(uncore, fw_domains);
630 spin_unlock_irqrestore(&uncore->lock, irqflags);
Chris Wilsona6111f72015-04-07 16:21:02 +0100631}
632
633/**
Chris Wilsond7a133d2017-09-07 14:44:41 +0100634 * intel_uncore_forcewake_user_get - claim forcewake on behalf of userspace
Daniele Ceraolo Spurio3ceea6a2019-03-19 11:35:36 -0700635 * @uncore: the intel_uncore structure
Chris Wilsond7a133d2017-09-07 14:44:41 +0100636 *
637 * This function is a wrapper around intel_uncore_forcewake_get() to acquire
638 * the GT powerwell and in the process disable our debugging for the
639 * duration of userspace's bypass.
640 */
Daniele Ceraolo Spurio3ceea6a2019-03-19 11:35:36 -0700641void intel_uncore_forcewake_user_get(struct intel_uncore *uncore)
Chris Wilsond7a133d2017-09-07 14:44:41 +0100642{
Daniele Ceraolo Spuriof568eee2019-03-19 11:35:35 -0700643 spin_lock_irq(&uncore->lock);
Daniele Ceraolo Spurio0a9b2632019-08-09 07:31:16 +0100644 if (!uncore->user_forcewake_count++) {
Daniele Ceraolo Spurio3ceea6a2019-03-19 11:35:36 -0700645 intel_uncore_forcewake_get__locked(uncore, FORCEWAKE_ALL);
Daniele Ceraolo Spurio0a9b2632019-08-09 07:31:16 +0100646 spin_lock(&uncore->debug->lock);
647 mmio_debug_suspend(uncore->debug);
648 spin_unlock(&uncore->debug->lock);
Chris Wilsond7a133d2017-09-07 14:44:41 +0100649 }
Daniele Ceraolo Spuriof568eee2019-03-19 11:35:35 -0700650 spin_unlock_irq(&uncore->lock);
Chris Wilsond7a133d2017-09-07 14:44:41 +0100651}
652
653/**
654 * intel_uncore_forcewake_user_put - release forcewake on behalf of userspace
Daniele Ceraolo Spurio3ceea6a2019-03-19 11:35:36 -0700655 * @uncore: the intel_uncore structure
Chris Wilsond7a133d2017-09-07 14:44:41 +0100656 *
657 * This function complements intel_uncore_forcewake_user_get() and releases
658 * the GT powerwell taken on behalf of the userspace bypass.
659 */
Daniele Ceraolo Spurio3ceea6a2019-03-19 11:35:36 -0700660void intel_uncore_forcewake_user_put(struct intel_uncore *uncore)
Chris Wilsond7a133d2017-09-07 14:44:41 +0100661{
Daniele Ceraolo Spuriof568eee2019-03-19 11:35:35 -0700662 spin_lock_irq(&uncore->lock);
Daniele Ceraolo Spurio0a9b2632019-08-09 07:31:16 +0100663 if (!--uncore->user_forcewake_count) {
664 spin_lock(&uncore->debug->lock);
665 mmio_debug_resume(uncore->debug);
666
667 if (check_for_unclaimed_mmio(uncore))
Daniele Ceraolo Spurio01385752019-06-19 18:00:18 -0700668 dev_info(uncore->i915->drm.dev,
Chris Wilsond7a133d2017-09-07 14:44:41 +0100669 "Invalid mmio detected during user access\n");
Daniele Ceraolo Spurio0a9b2632019-08-09 07:31:16 +0100670 spin_unlock(&uncore->debug->lock);
Chris Wilsond7a133d2017-09-07 14:44:41 +0100671
Daniele Ceraolo Spurio3ceea6a2019-03-19 11:35:36 -0700672 intel_uncore_forcewake_put__locked(uncore, FORCEWAKE_ALL);
Chris Wilsond7a133d2017-09-07 14:44:41 +0100673 }
Daniele Ceraolo Spuriof568eee2019-03-19 11:35:35 -0700674 spin_unlock_irq(&uncore->lock);
Chris Wilsond7a133d2017-09-07 14:44:41 +0100675}
676
677/**
Chris Wilsona6111f72015-04-07 16:21:02 +0100678 * intel_uncore_forcewake_get__locked - grab forcewake domain references
Daniele Ceraolo Spurio3ceea6a2019-03-19 11:35:36 -0700679 * @uncore: the intel_uncore structure
Chris Wilsona6111f72015-04-07 16:21:02 +0100680 * @fw_domains: forcewake domains to get reference on
681 *
682 * See intel_uncore_forcewake_get(). This variant places the onus
683 * on the caller to explicitly handle the dev_priv->uncore.lock spinlock.
684 */
Daniele Ceraolo Spurio3ceea6a2019-03-19 11:35:36 -0700685void intel_uncore_forcewake_get__locked(struct intel_uncore *uncore,
Chris Wilsona6111f72015-04-07 16:21:02 +0100686 enum forcewake_domains fw_domains)
687{
Daniele Ceraolo Spuriof568eee2019-03-19 11:35:35 -0700688 lockdep_assert_held(&uncore->lock);
689
690 if (!uncore->funcs.force_wake_get)
Chris Wilsona6111f72015-04-07 16:21:02 +0100691 return;
692
Daniele Ceraolo Spuriof568eee2019-03-19 11:35:35 -0700693 __intel_uncore_forcewake_get(uncore, fw_domains);
Chris Wilsona6111f72015-04-07 16:21:02 +0100694}
695
Daniele Ceraolo Spuriof568eee2019-03-19 11:35:35 -0700696static void __intel_uncore_forcewake_put(struct intel_uncore *uncore,
Chris Wilsona6111f72015-04-07 16:21:02 +0100697 enum forcewake_domains fw_domains)
698{
699 struct intel_uncore_forcewake_domain *domain;
Chris Wilsond2dc94b2017-03-23 10:19:41 +0000700 unsigned int tmp;
Chris Wilsona6111f72015-04-07 16:21:02 +0100701
Daniele Ceraolo Spuriof568eee2019-03-19 11:35:35 -0700702 fw_domains &= uncore->fw_domains;
Chris Wilsonb2cff0d2015-01-16 11:34:37 +0200703
Daniele Ceraolo Spuriof568eee2019-03-19 11:35:35 -0700704 for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) {
Chris Wilson77adbd82019-07-08 16:49:14 +0100705 GEM_BUG_ON(!domain->wake_count);
Chris Wilsona6111f72015-04-07 16:21:02 +0100706
Chris Wilsonc9e0c6d2017-05-26 14:22:09 +0100707 if (--domain->wake_count) {
708 domain->active = true;
Chris Wilsona6111f72015-04-07 16:21:02 +0100709 continue;
Chris Wilsonc9e0c6d2017-05-26 14:22:09 +0100710 }
Chris Wilsona6111f72015-04-07 16:21:02 +0100711
Chris Wilsona6111f72015-04-07 16:21:02 +0100712 fw_domain_arm_timer(domain);
Chris Wilson6daccb02015-01-16 11:34:35 +0200713 }
Chris Wilson907b28c2013-07-19 20:36:52 +0100714}
715
Mika Kuoppala59bad942015-01-16 11:34:40 +0200716/**
717 * intel_uncore_forcewake_put - release a forcewake domain reference
Daniele Ceraolo Spurio3ceea6a2019-03-19 11:35:36 -0700718 * @uncore: the intel_uncore structure
Mika Kuoppala59bad942015-01-16 11:34:40 +0200719 * @fw_domains: forcewake domains to put references
720 *
721 * This function drops the device-level forcewakes for specified
722 * domains obtained by intel_uncore_forcewake_get().
Chris Wilson907b28c2013-07-19 20:36:52 +0100723 */
Daniele Ceraolo Spurio3ceea6a2019-03-19 11:35:36 -0700724void intel_uncore_forcewake_put(struct intel_uncore *uncore,
Mika Kuoppala48c10262015-01-16 11:34:41 +0200725 enum forcewake_domains fw_domains)
Chris Wilson907b28c2013-07-19 20:36:52 +0100726{
727 unsigned long irqflags;
728
Daniele Ceraolo Spuriof568eee2019-03-19 11:35:35 -0700729 if (!uncore->funcs.force_wake_put)
Ben Widawskyab484f82013-10-05 17:57:11 -0700730 return;
731
Daniele Ceraolo Spuriof568eee2019-03-19 11:35:35 -0700732 spin_lock_irqsave(&uncore->lock, irqflags);
733 __intel_uncore_forcewake_put(uncore, fw_domains);
734 spin_unlock_irqrestore(&uncore->lock, irqflags);
Chris Wilson907b28c2013-07-19 20:36:52 +0100735}
736
Chris Wilsona6111f72015-04-07 16:21:02 +0100737/**
738 * intel_uncore_forcewake_put__locked - grab forcewake domain references
Daniele Ceraolo Spurio3ceea6a2019-03-19 11:35:36 -0700739 * @uncore: the intel_uncore structure
Chris Wilsona6111f72015-04-07 16:21:02 +0100740 * @fw_domains: forcewake domains to get reference on
741 *
742 * See intel_uncore_forcewake_put(). This variant places the onus
743 * on the caller to explicitly handle the dev_priv->uncore.lock spinlock.
744 */
Daniele Ceraolo Spurio3ceea6a2019-03-19 11:35:36 -0700745void intel_uncore_forcewake_put__locked(struct intel_uncore *uncore,
Chris Wilsona6111f72015-04-07 16:21:02 +0100746 enum forcewake_domains fw_domains)
747{
Daniele Ceraolo Spuriof568eee2019-03-19 11:35:35 -0700748 lockdep_assert_held(&uncore->lock);
749
750 if (!uncore->funcs.force_wake_put)
Chris Wilsona6111f72015-04-07 16:21:02 +0100751 return;
752
Daniele Ceraolo Spuriof568eee2019-03-19 11:35:35 -0700753 __intel_uncore_forcewake_put(uncore, fw_domains);
Chris Wilsona6111f72015-04-07 16:21:02 +0100754}
755
Daniele Ceraolo Spuriof568eee2019-03-19 11:35:35 -0700756void assert_forcewakes_inactive(struct intel_uncore *uncore)
Paulo Zanonie998c402014-02-21 13:52:26 -0300757{
Daniele Ceraolo Spuriof568eee2019-03-19 11:35:35 -0700758 if (!uncore->funcs.force_wake_get)
Paulo Zanonie998c402014-02-21 13:52:26 -0300759 return;
760
Pankaj Bharadiyaa9f236d2020-01-15 09:14:54 +0530761 drm_WARN(&uncore->i915->drm, uncore->fw_domains_active,
762 "Expected all fw_domains to be inactive, but %08x are still on\n",
763 uncore->fw_domains_active);
Chris Wilson67e64562017-10-09 12:03:01 +0100764}
765
Daniele Ceraolo Spuriof568eee2019-03-19 11:35:35 -0700766void assert_forcewakes_active(struct intel_uncore *uncore,
Chris Wilson67e64562017-10-09 12:03:01 +0100767 enum forcewake_domains fw_domains)
768{
Chris Wilsonb7dc9392019-07-04 11:20:48 +0100769 struct intel_uncore_forcewake_domain *domain;
770 unsigned int tmp;
771
772 if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM))
773 return;
774
Daniele Ceraolo Spuriof568eee2019-03-19 11:35:35 -0700775 if (!uncore->funcs.force_wake_get)
Chris Wilson67e64562017-10-09 12:03:01 +0100776 return;
777
Chris Wilson15e7fac2019-07-07 16:11:35 +0100778 spin_lock_irq(&uncore->lock);
779
Daniele Ceraolo Spurio87b391b92019-06-13 16:21:50 -0700780 assert_rpm_wakelock_held(uncore->rpm);
Chris Wilson67e64562017-10-09 12:03:01 +0100781
Daniele Ceraolo Spuriof568eee2019-03-19 11:35:35 -0700782 fw_domains &= uncore->fw_domains;
Pankaj Bharadiyaa9f236d2020-01-15 09:14:54 +0530783 drm_WARN(&uncore->i915->drm, fw_domains & ~uncore->fw_domains_active,
784 "Expected %08x fw_domains to be active, but %08x are off\n",
785 fw_domains, fw_domains & ~uncore->fw_domains_active);
Chris Wilsonb7dc9392019-07-04 11:20:48 +0100786
787 /*
788 * Check that the caller has an explicit wakeref and we don't mistake
789 * it for the auto wakeref.
790 */
Chris Wilsonb7dc9392019-07-04 11:20:48 +0100791 for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) {
Chris Wilsonbadf1f22019-07-05 08:45:57 +0100792 unsigned int actual = READ_ONCE(domain->wake_count);
Chris Wilsonb7dc9392019-07-04 11:20:48 +0100793 unsigned int expect = 1;
794
Chris Wilson77adbd82019-07-08 16:49:14 +0100795 if (uncore->fw_domains_timer & domain->mask)
Chris Wilsonb7dc9392019-07-04 11:20:48 +0100796 expect++; /* pending automatic release */
797
Pankaj Bharadiyaa9f236d2020-01-15 09:14:54 +0530798 if (drm_WARN(&uncore->i915->drm, actual < expect,
799 "Expected domain %d to be held awake by caller, count=%d\n",
800 domain->id, actual))
Chris Wilsonb7dc9392019-07-04 11:20:48 +0100801 break;
802 }
Chris Wilson15e7fac2019-07-07 16:11:35 +0100803
804 spin_unlock_irq(&uncore->lock);
Paulo Zanonie998c402014-02-21 13:52:26 -0300805}
806
Chris Wilson907b28c2013-07-19 20:36:52 +0100807/* We give fast paths for the really cool registers */
Ville Syrjälä40181692015-10-22 15:34:57 +0300808#define NEEDS_FORCE_WAKE(reg) ((reg) < 0x40000)
Chris Wilson907b28c2013-07-19 20:36:52 +0100809
Daniele Ceraolo Spurio272c7e52019-03-19 11:35:39 -0700810#define __gen6_reg_read_fw_domains(uncore, offset) \
Tvrtko Ursulin6863b762016-04-12 14:37:29 +0100811({ \
812 enum forcewake_domains __fwd; \
813 if (NEEDS_FORCE_WAKE(offset)) \
814 __fwd = FORCEWAKE_RENDER; \
815 else \
816 __fwd = 0; \
817 __fwd; \
818})
819
Tvrtko Ursulin9480dbf2016-10-04 09:29:29 +0100820static int fw_range_cmp(u32 offset, const struct intel_forcewake_range *entry)
Tvrtko Ursulin91e630b2016-10-04 09:29:21 +0100821{
Tvrtko Ursulin91e630b2016-10-04 09:29:21 +0100822 if (offset < entry->start)
823 return -1;
824 else if (offset > entry->end)
825 return 1;
826 else
827 return 0;
828}
829
Tvrtko Ursulin9480dbf2016-10-04 09:29:29 +0100830/* Copied and "macroized" from lib/bsearch.c */
831#define BSEARCH(key, base, num, cmp) ({ \
832 unsigned int start__ = 0, end__ = (num); \
833 typeof(base) result__ = NULL; \
834 while (start__ < end__) { \
835 unsigned int mid__ = start__ + (end__ - start__) / 2; \
836 int ret__ = (cmp)((key), (base) + mid__); \
837 if (ret__ < 0) { \
838 end__ = mid__; \
839 } else if (ret__ > 0) { \
840 start__ = mid__ + 1; \
841 } else { \
842 result__ = (base) + mid__; \
843 break; \
844 } \
845 } \
846 result__; \
847})
848
Tvrtko Ursulin9fc11172016-10-04 09:29:19 +0100849static enum forcewake_domains
Daniele Ceraolo Spuriocb7ee692019-03-19 11:35:38 -0700850find_fw_domain(struct intel_uncore *uncore, u32 offset)
Tvrtko Ursulin9fc11172016-10-04 09:29:19 +0100851{
Tvrtko Ursulin9480dbf2016-10-04 09:29:29 +0100852 const struct intel_forcewake_range *entry;
Tvrtko Ursulin9fc11172016-10-04 09:29:19 +0100853
Tvrtko Ursulin9480dbf2016-10-04 09:29:29 +0100854 entry = BSEARCH(offset,
Daniele Ceraolo Spuriocb7ee692019-03-19 11:35:38 -0700855 uncore->fw_domains_table,
856 uncore->fw_domains_table_entries,
Tvrtko Ursulin91e630b2016-10-04 09:29:21 +0100857 fw_range_cmp);
Tvrtko Ursulin9fc11172016-10-04 09:29:19 +0100858
Joonas Lahtinen99191422016-12-07 16:22:39 +0200859 if (!entry)
860 return 0;
861
Daniele Ceraolo Spurioa89a70a2018-03-02 18:15:01 +0200862 /*
863 * The list of FW domains depends on the SKU in gen11+ so we
864 * can't determine it statically. We use FORCEWAKE_ALL and
865 * translate it here to the list of available domains.
866 */
867 if (entry->domains == FORCEWAKE_ALL)
Daniele Ceraolo Spuriocb7ee692019-03-19 11:35:38 -0700868 return uncore->fw_domains;
Daniele Ceraolo Spurioa89a70a2018-03-02 18:15:01 +0200869
Pankaj Bharadiyaa9f236d2020-01-15 09:14:54 +0530870 drm_WARN(&uncore->i915->drm, entry->domains & ~uncore->fw_domains,
871 "Uninitialized forcewake domain(s) 0x%x accessed at 0x%x\n",
872 entry->domains & ~uncore->fw_domains, offset);
Joonas Lahtinen99191422016-12-07 16:22:39 +0200873
874 return entry->domains;
Tvrtko Ursulin9fc11172016-10-04 09:29:19 +0100875}
876
877#define GEN_FW_RANGE(s, e, d) \
878 { .start = (s), .end = (e), .domains = (d) }
879
Tvrtko Ursulin895833b2016-10-04 09:29:24 +0100880#define HAS_FWTABLE(dev_priv) \
Rodrigo Vivi3d16ca52017-07-05 18:00:31 -0700881 (INTEL_GEN(dev_priv) >= 9 || \
Tvrtko Ursulin895833b2016-10-04 09:29:24 +0100882 IS_CHERRYVIEW(dev_priv) || \
883 IS_VALLEYVIEW(dev_priv))
884
Tvrtko Ursulinb0081232016-10-04 09:29:20 +0100885/* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
Tvrtko Ursulin9fc11172016-10-04 09:29:19 +0100886static const struct intel_forcewake_range __vlv_fw_ranges[] = {
887 GEN_FW_RANGE(0x2000, 0x3fff, FORCEWAKE_RENDER),
888 GEN_FW_RANGE(0x5000, 0x7fff, FORCEWAKE_RENDER),
889 GEN_FW_RANGE(0xb000, 0x11fff, FORCEWAKE_RENDER),
Tvrtko Ursulin9fc11172016-10-04 09:29:19 +0100890 GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA),
891 GEN_FW_RANGE(0x22000, 0x23fff, FORCEWAKE_MEDIA),
Tvrtko Ursulinb0081232016-10-04 09:29:20 +0100892 GEN_FW_RANGE(0x2e000, 0x2ffff, FORCEWAKE_RENDER),
Tvrtko Ursulin9fc11172016-10-04 09:29:19 +0100893 GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_MEDIA),
894};
Deepak S1938e592014-05-23 21:00:16 +0530895
Daniele Ceraolo Spurio272c7e52019-03-19 11:35:39 -0700896#define __fwtable_reg_read_fw_domains(uncore, offset) \
Tvrtko Ursulin6863b762016-04-12 14:37:29 +0100897({ \
898 enum forcewake_domains __fwd = 0; \
Tvrtko Ursulin0dd356b2016-10-04 09:29:22 +0100899 if (NEEDS_FORCE_WAKE((offset))) \
Daniele Ceraolo Spurio272c7e52019-03-19 11:35:39 -0700900 __fwd = find_fw_domain(uncore, offset); \
Tvrtko Ursulin6863b762016-04-12 14:37:29 +0100901 __fwd; \
902})
903
Daniele Ceraolo Spurio272c7e52019-03-19 11:35:39 -0700904#define __gen11_fwtable_reg_read_fw_domains(uncore, offset) \
Mika Kuoppalac9f8d1872019-09-13 17:16:50 +0300905 find_fw_domain(uncore, offset)
Daniele Ceraolo Spurioa89a70a2018-03-02 18:15:01 +0200906
Michel Thierrycf82d9d2019-09-13 17:16:51 +0300907#define __gen12_fwtable_reg_read_fw_domains(uncore, offset) \
908 find_fw_domain(uncore, offset)
909
Tvrtko Ursulin47188572016-10-04 09:29:27 +0100910/* *Must* be sorted by offset! See intel_shadow_table_check(). */
Tvrtko Ursulin6863b762016-04-12 14:37:29 +0100911static const i915_reg_t gen8_shadowed_regs[] = {
Tvrtko Ursulin47188572016-10-04 09:29:27 +0100912 RING_TAIL(RENDER_RING_BASE), /* 0x2000 (base) */
913 GEN6_RPNSWREQ, /* 0xA008 */
914 GEN6_RC_VIDEO_FREQ, /* 0xA00C */
915 RING_TAIL(GEN6_BSD_RING_BASE), /* 0x12000 (base) */
916 RING_TAIL(VEBOX_RING_BASE), /* 0x1a000 (base) */
917 RING_TAIL(BLT_RING_BASE), /* 0x22000 (base) */
Tvrtko Ursulin6863b762016-04-12 14:37:29 +0100918 /* TODO: Other registers are not yet used */
919};
920
Daniele Ceraolo Spurioa89a70a2018-03-02 18:15:01 +0200921static const i915_reg_t gen11_shadowed_regs[] = {
922 RING_TAIL(RENDER_RING_BASE), /* 0x2000 (base) */
923 GEN6_RPNSWREQ, /* 0xA008 */
924 GEN6_RC_VIDEO_FREQ, /* 0xA00C */
925 RING_TAIL(BLT_RING_BASE), /* 0x22000 (base) */
926 RING_TAIL(GEN11_BSD_RING_BASE), /* 0x1C0000 (base) */
927 RING_TAIL(GEN11_BSD2_RING_BASE), /* 0x1C4000 (base) */
928 RING_TAIL(GEN11_VEBOX_RING_BASE), /* 0x1C8000 (base) */
929 RING_TAIL(GEN11_BSD3_RING_BASE), /* 0x1D0000 (base) */
930 RING_TAIL(GEN11_BSD4_RING_BASE), /* 0x1D4000 (base) */
931 RING_TAIL(GEN11_VEBOX2_RING_BASE), /* 0x1D8000 (base) */
932 /* TODO: Other registers are not yet used */
933};
934
Michel Thierrycf82d9d2019-09-13 17:16:51 +0300935static const i915_reg_t gen12_shadowed_regs[] = {
936 RING_TAIL(RENDER_RING_BASE), /* 0x2000 (base) */
937 GEN6_RPNSWREQ, /* 0xA008 */
938 GEN6_RC_VIDEO_FREQ, /* 0xA00C */
939 RING_TAIL(BLT_RING_BASE), /* 0x22000 (base) */
940 RING_TAIL(GEN11_BSD_RING_BASE), /* 0x1C0000 (base) */
941 RING_TAIL(GEN11_BSD2_RING_BASE), /* 0x1C4000 (base) */
942 RING_TAIL(GEN11_VEBOX_RING_BASE), /* 0x1C8000 (base) */
943 RING_TAIL(GEN11_BSD3_RING_BASE), /* 0x1D0000 (base) */
944 RING_TAIL(GEN11_BSD4_RING_BASE), /* 0x1D4000 (base) */
945 RING_TAIL(GEN11_VEBOX2_RING_BASE), /* 0x1D8000 (base) */
946 /* TODO: Other registers are not yet used */
947};
948
Tvrtko Ursulin9480dbf2016-10-04 09:29:29 +0100949static int mmio_reg_cmp(u32 key, const i915_reg_t *reg)
Tvrtko Ursulin5a659382016-10-04 09:29:28 +0100950{
Tvrtko Ursulin9480dbf2016-10-04 09:29:29 +0100951 u32 offset = i915_mmio_reg_offset(*reg);
Tvrtko Ursulin5a659382016-10-04 09:29:28 +0100952
Tvrtko Ursulin9480dbf2016-10-04 09:29:29 +0100953 if (key < offset)
Tvrtko Ursulin5a659382016-10-04 09:29:28 +0100954 return -1;
Tvrtko Ursulin9480dbf2016-10-04 09:29:29 +0100955 else if (key > offset)
Tvrtko Ursulin5a659382016-10-04 09:29:28 +0100956 return 1;
957 else
958 return 0;
959}
960
Daniele Ceraolo Spurioa89a70a2018-03-02 18:15:01 +0200961#define __is_genX_shadowed(x) \
962static bool is_gen##x##_shadowed(u32 offset) \
963{ \
964 const i915_reg_t *regs = gen##x##_shadowed_regs; \
965 return BSEARCH(offset, regs, ARRAY_SIZE(gen##x##_shadowed_regs), \
966 mmio_reg_cmp); \
Tvrtko Ursulin6863b762016-04-12 14:37:29 +0100967}
968
Daniele Ceraolo Spurioa89a70a2018-03-02 18:15:01 +0200969__is_genX_shadowed(8)
970__is_genX_shadowed(11)
Michel Thierrycf82d9d2019-09-13 17:16:51 +0300971__is_genX_shadowed(12)
Daniele Ceraolo Spurioa89a70a2018-03-02 18:15:01 +0200972
Daniele Ceraolo Spurioccb2ace2019-06-19 18:00:16 -0700973static enum forcewake_domains
974gen6_reg_write_fw_domains(struct intel_uncore *uncore, i915_reg_t reg)
975{
976 return FORCEWAKE_RENDER;
977}
978
Daniele Ceraolo Spurio272c7e52019-03-19 11:35:39 -0700979#define __gen8_reg_write_fw_domains(uncore, offset) \
Tvrtko Ursulin6863b762016-04-12 14:37:29 +0100980({ \
981 enum forcewake_domains __fwd; \
982 if (NEEDS_FORCE_WAKE(offset) && !is_gen8_shadowed(offset)) \
983 __fwd = FORCEWAKE_RENDER; \
984 else \
985 __fwd = 0; \
986 __fwd; \
987})
988
Tvrtko Ursulinb0081232016-10-04 09:29:20 +0100989/* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
Tvrtko Ursulin9fc11172016-10-04 09:29:19 +0100990static const struct intel_forcewake_range __chv_fw_ranges[] = {
991 GEN_FW_RANGE(0x2000, 0x3fff, FORCEWAKE_RENDER),
Tvrtko Ursulinb0081232016-10-04 09:29:20 +0100992 GEN_FW_RANGE(0x4000, 0x4fff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
Tvrtko Ursulin9fc11172016-10-04 09:29:19 +0100993 GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER),
Tvrtko Ursulinb0081232016-10-04 09:29:20 +0100994 GEN_FW_RANGE(0x8000, 0x82ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
Tvrtko Ursulin9fc11172016-10-04 09:29:19 +0100995 GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER),
Tvrtko Ursulinb0081232016-10-04 09:29:20 +0100996 GEN_FW_RANGE(0x8500, 0x85ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
Tvrtko Ursulin9fc11172016-10-04 09:29:19 +0100997 GEN_FW_RANGE(0x8800, 0x88ff, FORCEWAKE_MEDIA),
Tvrtko Ursulinb0081232016-10-04 09:29:20 +0100998 GEN_FW_RANGE(0x9000, 0xafff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
999 GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER),
Tvrtko Ursulin9fc11172016-10-04 09:29:19 +01001000 GEN_FW_RANGE(0xd000, 0xd7ff, FORCEWAKE_MEDIA),
Tvrtko Ursulinb0081232016-10-04 09:29:20 +01001001 GEN_FW_RANGE(0xe000, 0xe7ff, FORCEWAKE_RENDER),
1002 GEN_FW_RANGE(0xf000, 0xffff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
Tvrtko Ursulin9fc11172016-10-04 09:29:19 +01001003 GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA),
1004 GEN_FW_RANGE(0x1a000, 0x1bfff, FORCEWAKE_MEDIA),
1005 GEN_FW_RANGE(0x1e800, 0x1e9ff, FORCEWAKE_MEDIA),
1006 GEN_FW_RANGE(0x30000, 0x37fff, FORCEWAKE_MEDIA),
Tvrtko Ursulin9fc11172016-10-04 09:29:19 +01001007};
Damien Lespiau38fb6a42014-03-28 16:54:26 +00001008
Daniele Ceraolo Spurio272c7e52019-03-19 11:35:39 -07001009#define __fwtable_reg_write_fw_domains(uncore, offset) \
Tvrtko Ursulin6863b762016-04-12 14:37:29 +01001010({ \
1011 enum forcewake_domains __fwd = 0; \
Tvrtko Ursulin0dd356b2016-10-04 09:29:22 +01001012 if (NEEDS_FORCE_WAKE((offset)) && !is_gen8_shadowed(offset)) \
Daniele Ceraolo Spurio272c7e52019-03-19 11:35:39 -07001013 __fwd = find_fw_domain(uncore, offset); \
Tvrtko Ursulin6863b762016-04-12 14:37:29 +01001014 __fwd; \
1015})
1016
Daniele Ceraolo Spurio272c7e52019-03-19 11:35:39 -07001017#define __gen11_fwtable_reg_write_fw_domains(uncore, offset) \
Daniele Ceraolo Spurioa89a70a2018-03-02 18:15:01 +02001018({ \
1019 enum forcewake_domains __fwd = 0; \
Mika Kuoppalac9f8d1872019-09-13 17:16:50 +03001020 const u32 __offset = (offset); \
1021 if (!is_gen11_shadowed(__offset)) \
1022 __fwd = find_fw_domain(uncore, __offset); \
Daniele Ceraolo Spurioa89a70a2018-03-02 18:15:01 +02001023 __fwd; \
1024})
1025
Michel Thierrycf82d9d2019-09-13 17:16:51 +03001026#define __gen12_fwtable_reg_write_fw_domains(uncore, offset) \
1027({ \
1028 enum forcewake_domains __fwd = 0; \
1029 const u32 __offset = (offset); \
1030 if (!is_gen12_shadowed(__offset)) \
1031 __fwd = find_fw_domain(uncore, __offset); \
1032 __fwd; \
1033})
1034
Tvrtko Ursulinb0081232016-10-04 09:29:20 +01001035/* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
Tvrtko Ursulin9fc11172016-10-04 09:29:19 +01001036static const struct intel_forcewake_range __gen9_fw_ranges[] = {
Tvrtko Ursulin0dd356b2016-10-04 09:29:22 +01001037 GEN_FW_RANGE(0x0, 0xaff, FORCEWAKE_BLITTER),
Tvrtko Ursulin9fc11172016-10-04 09:29:19 +01001038 GEN_FW_RANGE(0xb00, 0x1fff, 0), /* uncore range */
1039 GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER),
Tvrtko Ursulin0dd356b2016-10-04 09:29:22 +01001040 GEN_FW_RANGE(0x2700, 0x2fff, FORCEWAKE_BLITTER),
Tvrtko Ursulin9fc11172016-10-04 09:29:19 +01001041 GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER),
Tvrtko Ursulin0dd356b2016-10-04 09:29:22 +01001042 GEN_FW_RANGE(0x4000, 0x51ff, FORCEWAKE_BLITTER),
Tvrtko Ursulin9fc11172016-10-04 09:29:19 +01001043 GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER),
Tvrtko Ursulin0dd356b2016-10-04 09:29:22 +01001044 GEN_FW_RANGE(0x8000, 0x812f, FORCEWAKE_BLITTER),
Tvrtko Ursulinb0081232016-10-04 09:29:20 +01001045 GEN_FW_RANGE(0x8130, 0x813f, FORCEWAKE_MEDIA),
Tvrtko Ursulin9fc11172016-10-04 09:29:19 +01001046 GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER),
Tvrtko Ursulin0dd356b2016-10-04 09:29:22 +01001047 GEN_FW_RANGE(0x8160, 0x82ff, FORCEWAKE_BLITTER),
Tvrtko Ursulin9fc11172016-10-04 09:29:19 +01001048 GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER),
Tvrtko Ursulin0dd356b2016-10-04 09:29:22 +01001049 GEN_FW_RANGE(0x8500, 0x87ff, FORCEWAKE_BLITTER),
Tvrtko Ursulin9fc11172016-10-04 09:29:19 +01001050 GEN_FW_RANGE(0x8800, 0x89ff, FORCEWAKE_MEDIA),
Tvrtko Ursulin0dd356b2016-10-04 09:29:22 +01001051 GEN_FW_RANGE(0x8a00, 0x8bff, FORCEWAKE_BLITTER),
Tvrtko Ursulinb0081232016-10-04 09:29:20 +01001052 GEN_FW_RANGE(0x8c00, 0x8cff, FORCEWAKE_RENDER),
Tvrtko Ursulin0dd356b2016-10-04 09:29:22 +01001053 GEN_FW_RANGE(0x8d00, 0x93ff, FORCEWAKE_BLITTER),
Tvrtko Ursulinb0081232016-10-04 09:29:20 +01001054 GEN_FW_RANGE(0x9400, 0x97ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
Tvrtko Ursulin0dd356b2016-10-04 09:29:22 +01001055 GEN_FW_RANGE(0x9800, 0xafff, FORCEWAKE_BLITTER),
Tvrtko Ursulinb0081232016-10-04 09:29:20 +01001056 GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER),
Tvrtko Ursulin78424c92016-11-17 09:02:43 +00001057 GEN_FW_RANGE(0xb480, 0xcfff, FORCEWAKE_BLITTER),
Tvrtko Ursulin9fc11172016-10-04 09:29:19 +01001058 GEN_FW_RANGE(0xd000, 0xd7ff, FORCEWAKE_MEDIA),
Tvrtko Ursulin0dd356b2016-10-04 09:29:22 +01001059 GEN_FW_RANGE(0xd800, 0xdfff, FORCEWAKE_BLITTER),
Tvrtko Ursulinb0081232016-10-04 09:29:20 +01001060 GEN_FW_RANGE(0xe000, 0xe8ff, FORCEWAKE_RENDER),
Tvrtko Ursulin0dd356b2016-10-04 09:29:22 +01001061 GEN_FW_RANGE(0xe900, 0x11fff, FORCEWAKE_BLITTER),
Tvrtko Ursulin9fc11172016-10-04 09:29:19 +01001062 GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA),
Tvrtko Ursulin0dd356b2016-10-04 09:29:22 +01001063 GEN_FW_RANGE(0x14000, 0x19fff, FORCEWAKE_BLITTER),
Tvrtko Ursulin9fc11172016-10-04 09:29:19 +01001064 GEN_FW_RANGE(0x1a000, 0x1e9ff, FORCEWAKE_MEDIA),
Tvrtko Ursulin0dd356b2016-10-04 09:29:22 +01001065 GEN_FW_RANGE(0x1ea00, 0x243ff, FORCEWAKE_BLITTER),
Tvrtko Ursulinb0081232016-10-04 09:29:20 +01001066 GEN_FW_RANGE(0x24400, 0x247ff, FORCEWAKE_RENDER),
Tvrtko Ursulin0dd356b2016-10-04 09:29:22 +01001067 GEN_FW_RANGE(0x24800, 0x2ffff, FORCEWAKE_BLITTER),
Tvrtko Ursulin9fc11172016-10-04 09:29:19 +01001068 GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_MEDIA),
1069};
Tvrtko Ursulin6863b762016-04-12 14:37:29 +01001070
Daniele Ceraolo Spurioa89a70a2018-03-02 18:15:01 +02001071/* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
1072static const struct intel_forcewake_range __gen11_fw_ranges[] = {
1073 GEN_FW_RANGE(0x0, 0xaff, FORCEWAKE_BLITTER),
1074 GEN_FW_RANGE(0xb00, 0x1fff, 0), /* uncore range */
1075 GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER),
1076 GEN_FW_RANGE(0x2700, 0x2fff, FORCEWAKE_BLITTER),
1077 GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER),
1078 GEN_FW_RANGE(0x4000, 0x51ff, FORCEWAKE_BLITTER),
1079 GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER),
1080 GEN_FW_RANGE(0x8000, 0x813f, FORCEWAKE_BLITTER),
1081 GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER),
1082 GEN_FW_RANGE(0x8160, 0x82ff, FORCEWAKE_BLITTER),
1083 GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER),
1084 GEN_FW_RANGE(0x8500, 0x8bff, FORCEWAKE_BLITTER),
1085 GEN_FW_RANGE(0x8c00, 0x8cff, FORCEWAKE_RENDER),
1086 GEN_FW_RANGE(0x8d00, 0x93ff, FORCEWAKE_BLITTER),
1087 GEN_FW_RANGE(0x9400, 0x97ff, FORCEWAKE_ALL),
1088 GEN_FW_RANGE(0x9800, 0xafff, FORCEWAKE_BLITTER),
1089 GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER),
Mika Kuoppalac9f8d1872019-09-13 17:16:50 +03001090 GEN_FW_RANGE(0xb480, 0xdeff, FORCEWAKE_BLITTER),
1091 GEN_FW_RANGE(0xdf00, 0xe8ff, FORCEWAKE_RENDER),
1092 GEN_FW_RANGE(0xe900, 0x16dff, FORCEWAKE_BLITTER),
1093 GEN_FW_RANGE(0x16e00, 0x19fff, FORCEWAKE_RENDER),
1094 GEN_FW_RANGE(0x1a000, 0x243ff, FORCEWAKE_BLITTER),
Daniele Ceraolo Spurioa89a70a2018-03-02 18:15:01 +02001095 GEN_FW_RANGE(0x24400, 0x247ff, FORCEWAKE_RENDER),
1096 GEN_FW_RANGE(0x24800, 0x3ffff, FORCEWAKE_BLITTER),
1097 GEN_FW_RANGE(0x40000, 0x1bffff, 0),
1098 GEN_FW_RANGE(0x1c0000, 0x1c3fff, FORCEWAKE_MEDIA_VDBOX0),
1099 GEN_FW_RANGE(0x1c4000, 0x1c7fff, FORCEWAKE_MEDIA_VDBOX1),
1100 GEN_FW_RANGE(0x1c8000, 0x1cbfff, FORCEWAKE_MEDIA_VEBOX0),
1101 GEN_FW_RANGE(0x1cc000, 0x1cffff, FORCEWAKE_BLITTER),
1102 GEN_FW_RANGE(0x1d0000, 0x1d3fff, FORCEWAKE_MEDIA_VDBOX2),
1103 GEN_FW_RANGE(0x1d4000, 0x1d7fff, FORCEWAKE_MEDIA_VDBOX3),
1104 GEN_FW_RANGE(0x1d8000, 0x1dbfff, FORCEWAKE_MEDIA_VEBOX1)
1105};
1106
Michel Thierrycf82d9d2019-09-13 17:16:51 +03001107/* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
1108static const struct intel_forcewake_range __gen12_fw_ranges[] = {
1109 GEN_FW_RANGE(0x0, 0xaff, FORCEWAKE_BLITTER),
1110 GEN_FW_RANGE(0xb00, 0x1fff, 0), /* uncore range */
1111 GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER),
1112 GEN_FW_RANGE(0x2700, 0x2fff, FORCEWAKE_BLITTER),
1113 GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER),
1114 GEN_FW_RANGE(0x4000, 0x51ff, FORCEWAKE_BLITTER),
1115 GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER),
1116 GEN_FW_RANGE(0x8000, 0x813f, FORCEWAKE_BLITTER),
1117 GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER),
1118 GEN_FW_RANGE(0x8160, 0x82ff, FORCEWAKE_BLITTER),
1119 GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER),
1120 GEN_FW_RANGE(0x8500, 0x8bff, FORCEWAKE_BLITTER),
1121 GEN_FW_RANGE(0x8c00, 0x8cff, FORCEWAKE_RENDER),
1122 GEN_FW_RANGE(0x8d00, 0x93ff, FORCEWAKE_BLITTER),
1123 GEN_FW_RANGE(0x9400, 0x97ff, FORCEWAKE_ALL),
1124 GEN_FW_RANGE(0x9800, 0xafff, FORCEWAKE_BLITTER),
1125 GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER),
1126 GEN_FW_RANGE(0xb480, 0xdfff, FORCEWAKE_BLITTER),
1127 GEN_FW_RANGE(0xe000, 0xe8ff, FORCEWAKE_RENDER),
1128 GEN_FW_RANGE(0xe900, 0x147ff, FORCEWAKE_BLITTER),
1129 GEN_FW_RANGE(0x14800, 0x148ff, FORCEWAKE_RENDER),
1130 GEN_FW_RANGE(0x14900, 0x19fff, FORCEWAKE_BLITTER),
1131 GEN_FW_RANGE(0x1a000, 0x1a7ff, FORCEWAKE_RENDER),
1132 GEN_FW_RANGE(0x1a800, 0x1afff, FORCEWAKE_BLITTER),
1133 GEN_FW_RANGE(0x1b000, 0x1bfff, FORCEWAKE_RENDER),
1134 GEN_FW_RANGE(0x1c000, 0x243ff, FORCEWAKE_BLITTER),
1135 GEN_FW_RANGE(0x24400, 0x247ff, FORCEWAKE_RENDER),
1136 GEN_FW_RANGE(0x24800, 0x3ffff, FORCEWAKE_BLITTER),
1137 GEN_FW_RANGE(0x40000, 0x1bffff, 0),
1138 GEN_FW_RANGE(0x1c0000, 0x1c3fff, FORCEWAKE_MEDIA_VDBOX0),
1139 GEN_FW_RANGE(0x1c4000, 0x1c7fff, FORCEWAKE_MEDIA_VDBOX1),
1140 GEN_FW_RANGE(0x1c8000, 0x1cbfff, FORCEWAKE_MEDIA_VEBOX0),
1141 GEN_FW_RANGE(0x1cc000, 0x1cffff, FORCEWAKE_BLITTER),
1142 GEN_FW_RANGE(0x1d0000, 0x1d3fff, FORCEWAKE_MEDIA_VDBOX2),
1143 GEN_FW_RANGE(0x1d4000, 0x1d7fff, FORCEWAKE_MEDIA_VDBOX3),
1144 GEN_FW_RANGE(0x1d8000, 0x1dbfff, FORCEWAKE_MEDIA_VEBOX1)
1145};
1146
Chris Wilson907b28c2013-07-19 20:36:52 +01001147static void
Daniele Ceraolo Spurio6ebc9692019-03-19 11:35:41 -07001148ilk_dummy_write(struct intel_uncore *uncore)
Chris Wilson907b28c2013-07-19 20:36:52 +01001149{
1150 /* WaIssueDummyWriteToWakeupFromRC6:ilk Issue a dummy write to wake up
1151 * the chip from rc6 before touching it for real. MI_MODE is masked,
1152 * hence harmless to write 0 into. */
Daniele Ceraolo Spurio6cc5ca72019-03-25 14:49:32 -07001153 __raw_uncore_write32(uncore, MI_MODE, 0);
Chris Wilson907b28c2013-07-19 20:36:52 +01001154}
1155
1156static void
Daniele Ceraolo Spurio2cf7bf62019-03-25 14:49:34 -07001157__unclaimed_reg_debug(struct intel_uncore *uncore,
Mika Kuoppala9c053502016-01-08 15:51:19 +02001158 const i915_reg_t reg,
1159 const bool read,
1160 const bool before)
Chris Wilson907b28c2013-07-19 20:36:52 +01001161{
Pankaj Bharadiyaa9f236d2020-01-15 09:14:54 +05301162 if (drm_WARN(&uncore->i915->drm,
1163 check_for_unclaimed_mmio(uncore) && !before,
1164 "Unclaimed %s register 0x%x\n",
1165 read ? "read from" : "write to",
1166 i915_mmio_reg_offset(reg)))
Michal Wajdeczko4f044a82017-09-19 19:38:44 +00001167 /* Only report the first N failures */
1168 i915_modparams.mmio_debug--;
Chris Wilson907b28c2013-07-19 20:36:52 +01001169}
1170
Mika Kuoppala9c053502016-01-08 15:51:19 +02001171static inline void
Daniele Ceraolo Spurio2cf7bf62019-03-25 14:49:34 -07001172unclaimed_reg_debug(struct intel_uncore *uncore,
Mika Kuoppala9c053502016-01-08 15:51:19 +02001173 const i915_reg_t reg,
1174 const bool read,
1175 const bool before)
1176{
Michal Wajdeczko4f044a82017-09-19 19:38:44 +00001177 if (likely(!i915_modparams.mmio_debug))
Mika Kuoppala9c053502016-01-08 15:51:19 +02001178 return;
1179
Daniele Ceraolo Spurio0a9b2632019-08-09 07:31:16 +01001180 /* interrupts are disabled and re-enabled around uncore->lock usage */
1181 lockdep_assert_held(&uncore->lock);
1182
1183 if (before)
1184 spin_lock(&uncore->debug->lock);
1185
Daniele Ceraolo Spurio2cf7bf62019-03-25 14:49:34 -07001186 __unclaimed_reg_debug(uncore, reg, read, before);
Daniele Ceraolo Spurio0a9b2632019-08-09 07:31:16 +01001187
1188 if (!before)
1189 spin_unlock(&uncore->debug->lock);
Mika Kuoppala9c053502016-01-08 15:51:19 +02001190}
1191
Chris Wilson51f67882015-01-16 11:34:36 +02001192#define GEN2_READ_HEADER(x) \
Ben Widawsky5d738792013-10-04 21:24:53 -07001193 u##x val = 0; \
Daniele Ceraolo Spurio87b391b92019-06-13 16:21:50 -07001194 assert_rpm_wakelock_held(uncore->rpm);
Ben Widawsky5d738792013-10-04 21:24:53 -07001195
Chris Wilson51f67882015-01-16 11:34:36 +02001196#define GEN2_READ_FOOTER \
Ben Widawsky5d738792013-10-04 21:24:53 -07001197 trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
1198 return val
1199
Chris Wilson51f67882015-01-16 11:34:36 +02001200#define __gen2_read(x) \
Ben Widawsky0b274482013-10-04 21:22:51 -07001201static u##x \
Daniele Ceraolo Spurioa2b4abf2019-03-25 14:49:36 -07001202gen2_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) { \
Chris Wilson51f67882015-01-16 11:34:36 +02001203 GEN2_READ_HEADER(x); \
Daniele Ceraolo Spurio6cc5ca72019-03-25 14:49:32 -07001204 val = __raw_uncore_read##x(uncore, reg); \
Chris Wilson51f67882015-01-16 11:34:36 +02001205 GEN2_READ_FOOTER; \
Ben Widawsky39670182013-10-04 21:22:53 -07001206}
1207
1208#define __gen5_read(x) \
1209static u##x \
Daniele Ceraolo Spurioa2b4abf2019-03-25 14:49:36 -07001210gen5_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) { \
Chris Wilson51f67882015-01-16 11:34:36 +02001211 GEN2_READ_HEADER(x); \
Daniele Ceraolo Spurio6ebc9692019-03-19 11:35:41 -07001212 ilk_dummy_write(uncore); \
Daniele Ceraolo Spurio6cc5ca72019-03-25 14:49:32 -07001213 val = __raw_uncore_read##x(uncore, reg); \
Chris Wilson51f67882015-01-16 11:34:36 +02001214 GEN2_READ_FOOTER; \
Ben Widawsky39670182013-10-04 21:22:53 -07001215}
1216
Chris Wilson51f67882015-01-16 11:34:36 +02001217__gen5_read(8)
1218__gen5_read(16)
1219__gen5_read(32)
1220__gen5_read(64)
1221__gen2_read(8)
1222__gen2_read(16)
1223__gen2_read(32)
1224__gen2_read(64)
1225
1226#undef __gen5_read
1227#undef __gen2_read
1228
1229#undef GEN2_READ_FOOTER
1230#undef GEN2_READ_HEADER
1231
1232#define GEN6_READ_HEADER(x) \
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02001233 u32 offset = i915_mmio_reg_offset(reg); \
Chris Wilson51f67882015-01-16 11:34:36 +02001234 unsigned long irqflags; \
1235 u##x val = 0; \
Daniele Ceraolo Spurio87b391b92019-06-13 16:21:50 -07001236 assert_rpm_wakelock_held(uncore->rpm); \
Daniele Ceraolo Spurio272c7e52019-03-19 11:35:39 -07001237 spin_lock_irqsave(&uncore->lock, irqflags); \
Daniele Ceraolo Spurio2cf7bf62019-03-25 14:49:34 -07001238 unclaimed_reg_debug(uncore, reg, true, true)
Chris Wilson51f67882015-01-16 11:34:36 +02001239
1240#define GEN6_READ_FOOTER \
Daniele Ceraolo Spurio2cf7bf62019-03-25 14:49:34 -07001241 unclaimed_reg_debug(uncore, reg, true, false); \
Daniele Ceraolo Spurio272c7e52019-03-19 11:35:39 -07001242 spin_unlock_irqrestore(&uncore->lock, irqflags); \
Chris Wilson51f67882015-01-16 11:34:36 +02001243 trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
1244 return val
1245
Daniele Ceraolo Spuriof568eee2019-03-19 11:35:35 -07001246static noinline void ___force_wake_auto(struct intel_uncore *uncore,
Tvrtko Ursulinc521b0c2016-10-04 09:29:18 +01001247 enum forcewake_domains fw_domains)
Chris Wilsonb2cff0d2015-01-16 11:34:37 +02001248{
1249 struct intel_uncore_forcewake_domain *domain;
Chris Wilsond2dc94b2017-03-23 10:19:41 +00001250 unsigned int tmp;
Chris Wilsonb2cff0d2015-01-16 11:34:37 +02001251
Daniele Ceraolo Spuriof568eee2019-03-19 11:35:35 -07001252 GEM_BUG_ON(fw_domains & ~uncore->fw_domains);
Chris Wilsond2dc94b2017-03-23 10:19:41 +00001253
Daniele Ceraolo Spuriof568eee2019-03-19 11:35:35 -07001254 for_each_fw_domain_masked(domain, fw_domains, uncore, tmp)
Tvrtko Ursulinc521b0c2016-10-04 09:29:18 +01001255 fw_domain_arm_timer(domain);
1256
Daniele Ceraolo Spuriof568eee2019-03-19 11:35:35 -07001257 uncore->funcs.force_wake_get(uncore, fw_domains);
Tvrtko Ursulinc521b0c2016-10-04 09:29:18 +01001258}
1259
Daniele Ceraolo Spuriof568eee2019-03-19 11:35:35 -07001260static inline void __force_wake_auto(struct intel_uncore *uncore,
Tvrtko Ursulinc521b0c2016-10-04 09:29:18 +01001261 enum forcewake_domains fw_domains)
1262{
Chris Wilson77adbd82019-07-08 16:49:14 +01001263 GEM_BUG_ON(!fw_domains);
Chris Wilsonb2cff0d2015-01-16 11:34:37 +02001264
Tvrtko Ursulin003342a2016-10-04 09:29:17 +01001265 /* Turn on all requested but inactive supported forcewake domains. */
Daniele Ceraolo Spuriof568eee2019-03-19 11:35:35 -07001266 fw_domains &= uncore->fw_domains;
1267 fw_domains &= ~uncore->fw_domains_active;
Chris Wilsonb2cff0d2015-01-16 11:34:37 +02001268
Tvrtko Ursulinc521b0c2016-10-04 09:29:18 +01001269 if (fw_domains)
Daniele Ceraolo Spuriof568eee2019-03-19 11:35:35 -07001270 ___force_wake_auto(uncore, fw_domains);
Chris Wilsonb2cff0d2015-01-16 11:34:37 +02001271}
1272
Daniele Ceraolo Spurioccfceda2017-02-03 17:23:29 -08001273#define __gen_read(func, x) \
Ben Widawsky39670182013-10-04 21:22:53 -07001274static u##x \
Daniele Ceraolo Spurioa2b4abf2019-03-25 14:49:36 -07001275func##_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) { \
Tvrtko Ursulin6863b762016-04-12 14:37:29 +01001276 enum forcewake_domains fw_engine; \
Chris Wilson51f67882015-01-16 11:34:36 +02001277 GEN6_READ_HEADER(x); \
Daniele Ceraolo Spurio272c7e52019-03-19 11:35:39 -07001278 fw_engine = __##func##_reg_read_fw_domains(uncore, offset); \
Tvrtko Ursulin6863b762016-04-12 14:37:29 +01001279 if (fw_engine) \
Daniele Ceraolo Spurio272c7e52019-03-19 11:35:39 -07001280 __force_wake_auto(uncore, fw_engine); \
Daniele Ceraolo Spurio6cc5ca72019-03-25 14:49:32 -07001281 val = __raw_uncore_read##x(uncore, reg); \
Chris Wilson51f67882015-01-16 11:34:36 +02001282 GEN6_READ_FOOTER; \
Chris Wilson907b28c2013-07-19 20:36:52 +01001283}
Deepak S940aece2013-11-23 14:55:43 +05301284
Daniele Ceraolo Spurioccb2ace2019-06-19 18:00:16 -07001285#define __gen_reg_read_funcs(func) \
1286static enum forcewake_domains \
1287func##_reg_read_fw_domains(struct intel_uncore *uncore, i915_reg_t reg) { \
1288 return __##func##_reg_read_fw_domains(uncore, i915_mmio_reg_offset(reg)); \
1289} \
1290\
1291__gen_read(func, 8) \
1292__gen_read(func, 16) \
1293__gen_read(func, 32) \
1294__gen_read(func, 64)
Ben Widawsky39670182013-10-04 21:22:53 -07001295
Michel Thierrycf82d9d2019-09-13 17:16:51 +03001296__gen_reg_read_funcs(gen12_fwtable);
Daniele Ceraolo Spurioccb2ace2019-06-19 18:00:16 -07001297__gen_reg_read_funcs(gen11_fwtable);
1298__gen_reg_read_funcs(fwtable);
1299__gen_reg_read_funcs(gen6);
1300
1301#undef __gen_reg_read_funcs
Chris Wilson51f67882015-01-16 11:34:36 +02001302#undef GEN6_READ_FOOTER
1303#undef GEN6_READ_HEADER
Ben Widawsky5d738792013-10-04 21:24:53 -07001304
Chris Wilson51f67882015-01-16 11:34:36 +02001305#define GEN2_WRITE_HEADER \
Ben Widawsky5d738792013-10-04 21:24:53 -07001306 trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
Daniele Ceraolo Spurio87b391b92019-06-13 16:21:50 -07001307 assert_rpm_wakelock_held(uncore->rpm); \
Chris Wilson907b28c2013-07-19 20:36:52 +01001308
Chris Wilson51f67882015-01-16 11:34:36 +02001309#define GEN2_WRITE_FOOTER
Ville Syrjälä0d965302013-12-02 14:23:02 +02001310
Chris Wilson51f67882015-01-16 11:34:36 +02001311#define __gen2_write(x) \
Ben Widawsky0b274482013-10-04 21:22:51 -07001312static void \
Daniele Ceraolo Spurioa2b4abf2019-03-25 14:49:36 -07001313gen2_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \
Chris Wilson51f67882015-01-16 11:34:36 +02001314 GEN2_WRITE_HEADER; \
Daniele Ceraolo Spurio6cc5ca72019-03-25 14:49:32 -07001315 __raw_uncore_write##x(uncore, reg, val); \
Chris Wilson51f67882015-01-16 11:34:36 +02001316 GEN2_WRITE_FOOTER; \
Ben Widawsky4032ef42013-10-04 21:22:54 -07001317}
1318
1319#define __gen5_write(x) \
1320static void \
Daniele Ceraolo Spurioa2b4abf2019-03-25 14:49:36 -07001321gen5_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \
Chris Wilson51f67882015-01-16 11:34:36 +02001322 GEN2_WRITE_HEADER; \
Daniele Ceraolo Spurio6ebc9692019-03-19 11:35:41 -07001323 ilk_dummy_write(uncore); \
Daniele Ceraolo Spurio6cc5ca72019-03-25 14:49:32 -07001324 __raw_uncore_write##x(uncore, reg, val); \
Chris Wilson51f67882015-01-16 11:34:36 +02001325 GEN2_WRITE_FOOTER; \
Ben Widawsky4032ef42013-10-04 21:22:54 -07001326}
1327
Chris Wilson51f67882015-01-16 11:34:36 +02001328__gen5_write(8)
1329__gen5_write(16)
1330__gen5_write(32)
Chris Wilson51f67882015-01-16 11:34:36 +02001331__gen2_write(8)
1332__gen2_write(16)
1333__gen2_write(32)
Chris Wilson51f67882015-01-16 11:34:36 +02001334
1335#undef __gen5_write
1336#undef __gen2_write
1337
1338#undef GEN2_WRITE_FOOTER
1339#undef GEN2_WRITE_HEADER
1340
1341#define GEN6_WRITE_HEADER \
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02001342 u32 offset = i915_mmio_reg_offset(reg); \
Chris Wilson51f67882015-01-16 11:34:36 +02001343 unsigned long irqflags; \
1344 trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
Daniele Ceraolo Spurio87b391b92019-06-13 16:21:50 -07001345 assert_rpm_wakelock_held(uncore->rpm); \
Daniele Ceraolo Spurio272c7e52019-03-19 11:35:39 -07001346 spin_lock_irqsave(&uncore->lock, irqflags); \
Daniele Ceraolo Spurio2cf7bf62019-03-25 14:49:34 -07001347 unclaimed_reg_debug(uncore, reg, false, true)
Chris Wilson51f67882015-01-16 11:34:36 +02001348
1349#define GEN6_WRITE_FOOTER \
Daniele Ceraolo Spurio2cf7bf62019-03-25 14:49:34 -07001350 unclaimed_reg_debug(uncore, reg, false, false); \
Daniele Ceraolo Spurio272c7e52019-03-19 11:35:39 -07001351 spin_unlock_irqrestore(&uncore->lock, irqflags)
Chris Wilson51f67882015-01-16 11:34:36 +02001352
Ben Widawsky4032ef42013-10-04 21:22:54 -07001353#define __gen6_write(x) \
1354static void \
Daniele Ceraolo Spurioa2b4abf2019-03-25 14:49:36 -07001355gen6_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \
Chris Wilson51f67882015-01-16 11:34:36 +02001356 GEN6_WRITE_HEADER; \
Mika Kuoppalaa3389082017-04-06 18:39:42 +03001357 if (NEEDS_FORCE_WAKE(offset)) \
Daniele Ceraolo Spurio6ebc9692019-03-19 11:35:41 -07001358 __gen6_gt_wait_for_fifo(uncore); \
Daniele Ceraolo Spurio6cc5ca72019-03-25 14:49:32 -07001359 __raw_uncore_write##x(uncore, reg, val); \
Chris Wilson51f67882015-01-16 11:34:36 +02001360 GEN6_WRITE_FOOTER; \
Ben Widawsky4032ef42013-10-04 21:22:54 -07001361}
Daniele Ceraolo Spurioccb2ace2019-06-19 18:00:16 -07001362__gen6_write(8)
1363__gen6_write(16)
1364__gen6_write(32)
Ben Widawsky4032ef42013-10-04 21:22:54 -07001365
Daniele Ceraolo Spurioccfceda2017-02-03 17:23:29 -08001366#define __gen_write(func, x) \
Ben Widawskyab2aa472013-11-02 21:07:00 -07001367static void \
Daniele Ceraolo Spurioa2b4abf2019-03-25 14:49:36 -07001368func##_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \
Tvrtko Ursulin6863b762016-04-12 14:37:29 +01001369 enum forcewake_domains fw_engine; \
Chris Wilson51f67882015-01-16 11:34:36 +02001370 GEN6_WRITE_HEADER; \
Daniele Ceraolo Spurio272c7e52019-03-19 11:35:39 -07001371 fw_engine = __##func##_reg_write_fw_domains(uncore, offset); \
Tvrtko Ursulin6863b762016-04-12 14:37:29 +01001372 if (fw_engine) \
Daniele Ceraolo Spurio272c7e52019-03-19 11:35:39 -07001373 __force_wake_auto(uncore, fw_engine); \
Daniele Ceraolo Spurio6cc5ca72019-03-25 14:49:32 -07001374 __raw_uncore_write##x(uncore, reg, val); \
Chris Wilson51f67882015-01-16 11:34:36 +02001375 GEN6_WRITE_FOOTER; \
Ben Widawskyab2aa472013-11-02 21:07:00 -07001376}
Deepak S1938e592014-05-23 21:00:16 +05301377
Daniele Ceraolo Spurioccb2ace2019-06-19 18:00:16 -07001378#define __gen_reg_write_funcs(func) \
1379static enum forcewake_domains \
1380func##_reg_write_fw_domains(struct intel_uncore *uncore, i915_reg_t reg) { \
1381 return __##func##_reg_write_fw_domains(uncore, i915_mmio_reg_offset(reg)); \
1382} \
1383\
1384__gen_write(func, 8) \
1385__gen_write(func, 16) \
1386__gen_write(func, 32)
Ben Widawsky4032ef42013-10-04 21:22:54 -07001387
Michel Thierrycf82d9d2019-09-13 17:16:51 +03001388__gen_reg_write_funcs(gen12_fwtable);
Daniele Ceraolo Spurioccb2ace2019-06-19 18:00:16 -07001389__gen_reg_write_funcs(gen11_fwtable);
1390__gen_reg_write_funcs(fwtable);
1391__gen_reg_write_funcs(gen8);
1392
1393#undef __gen_reg_write_funcs
Chris Wilson51f67882015-01-16 11:34:36 +02001394#undef GEN6_WRITE_FOOTER
1395#undef GEN6_WRITE_HEADER
Chris Wilson907b28c2013-07-19 20:36:52 +01001396
Daniele Ceraolo Spurioccb2ace2019-06-19 18:00:16 -07001397#define ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, x) \
Yu Zhang43d942a2014-10-23 15:28:24 +08001398do { \
Daniele Ceraolo Spuriof7de5022019-03-19 11:35:37 -07001399 (uncore)->funcs.mmio_writeb = x##_write8; \
1400 (uncore)->funcs.mmio_writew = x##_write16; \
1401 (uncore)->funcs.mmio_writel = x##_write32; \
Yu Zhang43d942a2014-10-23 15:28:24 +08001402} while (0)
1403
Daniele Ceraolo Spurioccb2ace2019-06-19 18:00:16 -07001404#define ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, x) \
Yu Zhang43d942a2014-10-23 15:28:24 +08001405do { \
Daniele Ceraolo Spuriof7de5022019-03-19 11:35:37 -07001406 (uncore)->funcs.mmio_readb = x##_read8; \
1407 (uncore)->funcs.mmio_readw = x##_read16; \
1408 (uncore)->funcs.mmio_readl = x##_read32; \
1409 (uncore)->funcs.mmio_readq = x##_read64; \
Yu Zhang43d942a2014-10-23 15:28:24 +08001410} while (0)
1411
Daniele Ceraolo Spurioccb2ace2019-06-19 18:00:16 -07001412#define ASSIGN_WRITE_MMIO_VFUNCS(uncore, x) \
1413do { \
1414 ASSIGN_RAW_WRITE_MMIO_VFUNCS((uncore), x); \
1415 (uncore)->funcs.write_fw_domains = x##_reg_write_fw_domains; \
1416} while (0)
1417
1418#define ASSIGN_READ_MMIO_VFUNCS(uncore, x) \
1419do { \
1420 ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, x); \
1421 (uncore)->funcs.read_fw_domains = x##_reg_read_fw_domains; \
1422} while (0)
Mika Kuoppala05a2fb12015-01-19 16:20:43 +02001423
Daniele Ceraolo Spuriof833cdb2019-06-19 18:00:20 -07001424static int __fw_domain_init(struct intel_uncore *uncore,
1425 enum forcewake_domain_id domain_id,
1426 i915_reg_t reg_set,
1427 i915_reg_t reg_ack)
Mika Kuoppala05a2fb12015-01-19 16:20:43 +02001428{
1429 struct intel_uncore_forcewake_domain *d;
1430
Daniele Ceraolo Spuriof833cdb2019-06-19 18:00:20 -07001431 GEM_BUG_ON(domain_id >= FW_DOMAIN_ID_COUNT);
1432 GEM_BUG_ON(uncore->fw_domain[domain_id]);
Mika Kuoppala05a2fb12015-01-19 16:20:43 +02001433
Michal Wajdeczko50d84412019-08-02 18:40:50 +00001434 if (i915_inject_probe_failure(uncore->i915))
Daniele Ceraolo Spuriof833cdb2019-06-19 18:00:20 -07001435 return -ENOMEM;
Mika Kuoppala05a2fb12015-01-19 16:20:43 +02001436
Daniele Ceraolo Spuriof833cdb2019-06-19 18:00:20 -07001437 d = kzalloc(sizeof(*d), GFP_KERNEL);
1438 if (!d)
1439 return -ENOMEM;
Mika Kuoppala05a2fb12015-01-19 16:20:43 +02001440
Pankaj Bharadiyaa9f236d2020-01-15 09:14:54 +05301441 drm_WARN_ON(&uncore->i915->drm, !i915_mmio_reg_valid(reg_set));
1442 drm_WARN_ON(&uncore->i915->drm, !i915_mmio_reg_valid(reg_ack));
Chris Wilson6e3955a2017-03-23 10:19:43 +00001443
Daniele Ceraolo Spuriof833cdb2019-06-19 18:00:20 -07001444 d->uncore = uncore;
Mika Kuoppala05a2fb12015-01-19 16:20:43 +02001445 d->wake_count = 0;
Daniele Ceraolo Spurio25286aa2019-03-19 11:35:40 -07001446 d->reg_set = uncore->regs + i915_mmio_reg_offset(reg_set);
1447 d->reg_ack = uncore->regs + i915_mmio_reg_offset(reg_ack);
Mika Kuoppala05a2fb12015-01-19 16:20:43 +02001448
Mika Kuoppala05a2fb12015-01-19 16:20:43 +02001449 d->id = domain_id;
1450
Tvrtko Ursulin33c582c2016-04-07 17:04:33 +01001451 BUILD_BUG_ON(FORCEWAKE_RENDER != (1 << FW_DOMAIN_ID_RENDER));
1452 BUILD_BUG_ON(FORCEWAKE_BLITTER != (1 << FW_DOMAIN_ID_BLITTER));
1453 BUILD_BUG_ON(FORCEWAKE_MEDIA != (1 << FW_DOMAIN_ID_MEDIA));
Daniele Ceraolo Spurioa89a70a2018-03-02 18:15:01 +02001454 BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX0 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX0));
1455 BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX1 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX1));
1456 BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX2 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX2));
1457 BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX3 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX3));
1458 BUILD_BUG_ON(FORCEWAKE_MEDIA_VEBOX0 != (1 << FW_DOMAIN_ID_MEDIA_VEBOX0));
1459 BUILD_BUG_ON(FORCEWAKE_MEDIA_VEBOX1 != (1 << FW_DOMAIN_ID_MEDIA_VEBOX1));
1460
Chris Wilsond2dc94b2017-03-23 10:19:41 +00001461 d->mask = BIT(domain_id);
Tvrtko Ursulin33c582c2016-04-07 17:04:33 +01001462
Tvrtko Ursulina57a4a62016-04-07 17:04:32 +01001463 hrtimer_init(&d->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1464 d->timer.function = intel_uncore_fw_release_timer;
Mika Kuoppala05a2fb12015-01-19 16:20:43 +02001465
Daniele Ceraolo Spurio535d8d272019-03-16 10:00:45 +00001466 uncore->fw_domains |= BIT(domain_id);
Mika Kuoppalaf9b39272015-01-28 14:43:24 +02001467
Daniele Ceraolo Spurio159367b2019-03-20 12:27:32 +00001468 fw_domain_reset(d);
Daniele Ceraolo Spuriof833cdb2019-06-19 18:00:20 -07001469
1470 uncore->fw_domain[domain_id] = d;
1471
1472 return 0;
Mika Kuoppala05a2fb12015-01-19 16:20:43 +02001473}
1474
Daniele Ceraolo Spuriof7de5022019-03-19 11:35:37 -07001475static void fw_domain_fini(struct intel_uncore *uncore,
Oscar Mateo26376a72018-03-16 14:14:49 +02001476 enum forcewake_domain_id domain_id)
1477{
1478 struct intel_uncore_forcewake_domain *d;
1479
Daniele Ceraolo Spuriof833cdb2019-06-19 18:00:20 -07001480 GEM_BUG_ON(domain_id >= FW_DOMAIN_ID_COUNT);
1481
1482 d = fetch_and_zero(&uncore->fw_domain[domain_id]);
1483 if (!d)
Oscar Mateo26376a72018-03-16 14:14:49 +02001484 return;
1485
Daniele Ceraolo Spuriof833cdb2019-06-19 18:00:20 -07001486 uncore->fw_domains &= ~BIT(domain_id);
Pankaj Bharadiyaa9f236d2020-01-15 09:14:54 +05301487 drm_WARN_ON(&uncore->i915->drm, d->wake_count);
1488 drm_WARN_ON(&uncore->i915->drm, hrtimer_cancel(&d->timer));
Daniele Ceraolo Spuriof833cdb2019-06-19 18:00:20 -07001489 kfree(d);
Oscar Mateo26376a72018-03-16 14:14:49 +02001490}
1491
Daniele Ceraolo Spuriof833cdb2019-06-19 18:00:20 -07001492static void intel_uncore_fw_domains_fini(struct intel_uncore *uncore)
1493{
1494 struct intel_uncore_forcewake_domain *d;
1495 int tmp;
1496
1497 for_each_fw_domain(d, uncore, tmp)
1498 fw_domain_fini(uncore, d->id);
1499}
1500
1501static int intel_uncore_fw_domains_init(struct intel_uncore *uncore)
Ben Widawsky0b274482013-10-04 21:22:51 -07001502{
Daniele Ceraolo Spurio01385752019-06-19 18:00:18 -07001503 struct drm_i915_private *i915 = uncore->i915;
Daniele Ceraolo Spuriof833cdb2019-06-19 18:00:20 -07001504 int ret = 0;
Daniele Ceraolo Spuriof7de5022019-03-19 11:35:37 -07001505
Daniele Ceraolo Spurio2e81bc62019-06-19 18:00:19 -07001506 GEM_BUG_ON(!intel_uncore_has_forcewake(uncore));
Mika Kuoppala3225b2f2015-02-05 17:45:42 +02001507
Daniele Ceraolo Spuriof833cdb2019-06-19 18:00:20 -07001508#define fw_domain_init(uncore__, id__, set__, ack__) \
1509 (ret ?: (ret = __fw_domain_init((uncore__), (id__), (set__), (ack__))))
1510
Daniele Ceraolo Spuriof7de5022019-03-19 11:35:37 -07001511 if (INTEL_GEN(i915) >= 11) {
Daniele Ceraolo Spurioa89a70a2018-03-02 18:15:01 +02001512 int i;
1513
Daniele Ceraolo Spuriof833cdb2019-06-19 18:00:20 -07001514 uncore->funcs.force_wake_get = fw_domains_get_with_fallback;
Daniele Ceraolo Spuriof7de5022019-03-19 11:35:37 -07001515 uncore->funcs.force_wake_put = fw_domains_put;
1516 fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
Daniele Ceraolo Spurioa89a70a2018-03-02 18:15:01 +02001517 FORCEWAKE_RENDER_GEN9,
1518 FORCEWAKE_ACK_RENDER_GEN9);
Daniele Ceraolo Spuriof7de5022019-03-19 11:35:37 -07001519 fw_domain_init(uncore, FW_DOMAIN_ID_BLITTER,
Daniele Ceraolo Spurioa89a70a2018-03-02 18:15:01 +02001520 FORCEWAKE_BLITTER_GEN9,
1521 FORCEWAKE_ACK_BLITTER_GEN9);
Daniele Ceraolo Spuriof833cdb2019-06-19 18:00:20 -07001522
Daniele Ceraolo Spurioa89a70a2018-03-02 18:15:01 +02001523 for (i = 0; i < I915_MAX_VCS; i++) {
Daniele Ceraolo Spuriof7de5022019-03-19 11:35:37 -07001524 if (!HAS_ENGINE(i915, _VCS(i)))
Daniele Ceraolo Spurioa89a70a2018-03-02 18:15:01 +02001525 continue;
1526
Daniele Ceraolo Spuriof7de5022019-03-19 11:35:37 -07001527 fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA_VDBOX0 + i,
Daniele Ceraolo Spurioa89a70a2018-03-02 18:15:01 +02001528 FORCEWAKE_MEDIA_VDBOX_GEN11(i),
1529 FORCEWAKE_ACK_MEDIA_VDBOX_GEN11(i));
1530 }
1531 for (i = 0; i < I915_MAX_VECS; i++) {
Daniele Ceraolo Spuriof7de5022019-03-19 11:35:37 -07001532 if (!HAS_ENGINE(i915, _VECS(i)))
Daniele Ceraolo Spurioa89a70a2018-03-02 18:15:01 +02001533 continue;
1534
Daniele Ceraolo Spuriof7de5022019-03-19 11:35:37 -07001535 fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA_VEBOX0 + i,
Daniele Ceraolo Spurioa89a70a2018-03-02 18:15:01 +02001536 FORCEWAKE_MEDIA_VEBOX_GEN11(i),
1537 FORCEWAKE_ACK_MEDIA_VEBOX_GEN11(i));
1538 }
Daniele Ceraolo Spuriof7de5022019-03-19 11:35:37 -07001539 } else if (IS_GEN_RANGE(i915, 9, 10)) {
Daniele Ceraolo Spuriof833cdb2019-06-19 18:00:20 -07001540 uncore->funcs.force_wake_get = fw_domains_get_with_fallback;
Daniele Ceraolo Spuriof7de5022019-03-19 11:35:37 -07001541 uncore->funcs.force_wake_put = fw_domains_put;
1542 fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
Mika Kuoppala05a2fb12015-01-19 16:20:43 +02001543 FORCEWAKE_RENDER_GEN9,
1544 FORCEWAKE_ACK_RENDER_GEN9);
Daniele Ceraolo Spuriof7de5022019-03-19 11:35:37 -07001545 fw_domain_init(uncore, FW_DOMAIN_ID_BLITTER,
Mika Kuoppala05a2fb12015-01-19 16:20:43 +02001546 FORCEWAKE_BLITTER_GEN9,
1547 FORCEWAKE_ACK_BLITTER_GEN9);
Daniele Ceraolo Spuriof7de5022019-03-19 11:35:37 -07001548 fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA,
Mika Kuoppala05a2fb12015-01-19 16:20:43 +02001549 FORCEWAKE_MEDIA_GEN9, FORCEWAKE_ACK_MEDIA_GEN9);
Daniele Ceraolo Spuriof7de5022019-03-19 11:35:37 -07001550 } else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
1551 uncore->funcs.force_wake_get = fw_domains_get;
1552 uncore->funcs.force_wake_put = fw_domains_put;
1553 fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
Mika Kuoppala05a2fb12015-01-19 16:20:43 +02001554 FORCEWAKE_VLV, FORCEWAKE_ACK_VLV);
Daniele Ceraolo Spuriof7de5022019-03-19 11:35:37 -07001555 fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA,
Mika Kuoppala05a2fb12015-01-19 16:20:43 +02001556 FORCEWAKE_MEDIA_VLV, FORCEWAKE_ACK_MEDIA_VLV);
Daniele Ceraolo Spuriof7de5022019-03-19 11:35:37 -07001557 } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
1558 uncore->funcs.force_wake_get =
Mika Kuoppala05a2fb12015-01-19 16:20:43 +02001559 fw_domains_get_with_thread_status;
Daniele Ceraolo Spuriof7de5022019-03-19 11:35:37 -07001560 uncore->funcs.force_wake_put = fw_domains_put;
1561 fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
Mika Kuoppala05a2fb12015-01-19 16:20:43 +02001562 FORCEWAKE_MT, FORCEWAKE_ACK_HSW);
Daniele Ceraolo Spuriof7de5022019-03-19 11:35:37 -07001563 } else if (IS_IVYBRIDGE(i915)) {
Ben Widawsky0b274482013-10-04 21:22:51 -07001564 u32 ecobus;
1565
1566 /* IVB configs may use multi-threaded forcewake */
1567
1568 /* A small trick here - if the bios hasn't configured
1569 * MT forcewake, and if the device is in RC6, then
1570 * force_wake_mt_get will not wake the device and the
1571 * ECOBUS read will return zero. Which will be
1572 * (correctly) interpreted by the test below as MT
1573 * forcewake being disabled.
1574 */
Daniele Ceraolo Spuriof7de5022019-03-19 11:35:37 -07001575 uncore->funcs.force_wake_get =
Mika Kuoppala05a2fb12015-01-19 16:20:43 +02001576 fw_domains_get_with_thread_status;
Daniele Ceraolo Spuriof7de5022019-03-19 11:35:37 -07001577 uncore->funcs.force_wake_put = fw_domains_put;
Mika Kuoppala05a2fb12015-01-19 16:20:43 +02001578
Mika Kuoppalaf9b39272015-01-28 14:43:24 +02001579 /* We need to init first for ECOBUS access and then
1580 * determine later if we want to reinit, in case of MT access is
Mika Kuoppala6ea25562015-02-27 18:11:09 +02001581 * not working. In this stage we don't know which flavour this
1582 * ivb is, so it is better to reset also the gen6 fw registers
1583 * before the ecobus check.
Mika Kuoppalaf9b39272015-01-28 14:43:24 +02001584 */
Mika Kuoppala6ea25562015-02-27 18:11:09 +02001585
Daniele Ceraolo Spurio6cc5ca72019-03-25 14:49:32 -07001586 __raw_uncore_write32(uncore, FORCEWAKE, 0);
Daniele Ceraolo Spurio6ebc9692019-03-19 11:35:41 -07001587 __raw_posting_read(uncore, ECOBUS);
Mika Kuoppala6ea25562015-02-27 18:11:09 +02001588
Daniele Ceraolo Spuriof833cdb2019-06-19 18:00:20 -07001589 ret = __fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
1590 FORCEWAKE_MT, FORCEWAKE_MT_ACK);
1591 if (ret)
1592 goto out;
Mika Kuoppalaf9b39272015-01-28 14:43:24 +02001593
Daniele Ceraolo Spuriof7de5022019-03-19 11:35:37 -07001594 spin_lock_irq(&uncore->lock);
1595 fw_domains_get_with_thread_status(uncore, FORCEWAKE_RENDER);
Daniele Ceraolo Spurio6cc5ca72019-03-25 14:49:32 -07001596 ecobus = __raw_uncore_read32(uncore, ECOBUS);
Daniele Ceraolo Spuriof7de5022019-03-19 11:35:37 -07001597 fw_domains_put(uncore, FORCEWAKE_RENDER);
1598 spin_unlock_irq(&uncore->lock);
Ben Widawsky0b274482013-10-04 21:22:51 -07001599
Mika Kuoppala05a2fb12015-01-19 16:20:43 +02001600 if (!(ecobus & FORCEWAKE_MT_ENABLE)) {
Wambui Karugad0208cf2020-01-07 18:13:33 +03001601 drm_info(&i915->drm, "No MT forcewake available on Ivybridge, this can result in issues\n");
1602 drm_info(&i915->drm, "when using vblank-synced partial screen updates.\n");
Daniele Ceraolo Spuriof833cdb2019-06-19 18:00:20 -07001603 fw_domain_fini(uncore, FW_DOMAIN_ID_RENDER);
Daniele Ceraolo Spuriof7de5022019-03-19 11:35:37 -07001604 fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
Mika Kuoppala05a2fb12015-01-19 16:20:43 +02001605 FORCEWAKE, FORCEWAKE_ACK);
Ben Widawsky0b274482013-10-04 21:22:51 -07001606 }
Daniele Ceraolo Spuriof7de5022019-03-19 11:35:37 -07001607 } else if (IS_GEN(i915, 6)) {
1608 uncore->funcs.force_wake_get =
Mika Kuoppala05a2fb12015-01-19 16:20:43 +02001609 fw_domains_get_with_thread_status;
Daniele Ceraolo Spuriof7de5022019-03-19 11:35:37 -07001610 uncore->funcs.force_wake_put = fw_domains_put;
1611 fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
Mika Kuoppala05a2fb12015-01-19 16:20:43 +02001612 FORCEWAKE, FORCEWAKE_ACK);
Ben Widawsky0b274482013-10-04 21:22:51 -07001613 }
Mika Kuoppala3225b2f2015-02-05 17:45:42 +02001614
Daniele Ceraolo Spuriof833cdb2019-06-19 18:00:20 -07001615#undef fw_domain_init
1616
Mika Kuoppala3225b2f2015-02-05 17:45:42 +02001617 /* All future platforms are expected to require complex power gating */
Pankaj Bharadiya48a1b8d2020-01-15 09:14:53 +05301618 drm_WARN_ON(&i915->drm, !ret && uncore->fw_domains == 0);
Daniele Ceraolo Spuriof833cdb2019-06-19 18:00:20 -07001619
1620out:
1621 if (ret)
1622 intel_uncore_fw_domains_fini(uncore);
1623
1624 return ret;
Mika Kuoppalaf9b39272015-01-28 14:43:24 +02001625}
1626
Daniele Ceraolo Spuriof7de5022019-03-19 11:35:37 -07001627#define ASSIGN_FW_DOMAINS_TABLE(uncore, d) \
Tvrtko Ursulin15157972016-10-04 09:29:23 +01001628{ \
Daniele Ceraolo Spuriof7de5022019-03-19 11:35:37 -07001629 (uncore)->fw_domains_table = \
Tvrtko Ursulin15157972016-10-04 09:29:23 +01001630 (struct intel_forcewake_range *)(d); \
Daniele Ceraolo Spuriof7de5022019-03-19 11:35:37 -07001631 (uncore)->fw_domains_table_entries = ARRAY_SIZE((d)); \
Tvrtko Ursulin15157972016-10-04 09:29:23 +01001632}
1633
Hans de Goede264ec1a2017-02-10 11:28:02 +01001634static int i915_pmic_bus_access_notifier(struct notifier_block *nb,
1635 unsigned long action, void *data)
1636{
Daniele Ceraolo Spurio91026502019-06-13 16:21:51 -07001637 struct intel_uncore *uncore = container_of(nb,
1638 struct intel_uncore, pmic_bus_access_nb);
Hans de Goede264ec1a2017-02-10 11:28:02 +01001639
1640 switch (action) {
1641 case MBI_PMIC_BUS_ACCESS_BEGIN:
1642 /*
1643 * forcewake all now to make sure that we don't need to do a
1644 * forcewake later which on systems where this notifier gets
1645 * called requires the punit to access to the shared pmic i2c
1646 * bus, which will be busy after this notification, leading to:
1647 * "render: timed out waiting for forcewake ack request."
1648 * errors.
Hans de Goedece305602017-11-10 16:03:01 +01001649 *
1650 * The notifier is unregistered during intel_runtime_suspend(),
1651 * so it's ok to access the HW here without holding a RPM
1652 * wake reference -> disable wakeref asserts for the time of
1653 * the access.
Hans de Goede264ec1a2017-02-10 11:28:02 +01001654 */
Daniele Ceraolo Spurio91026502019-06-13 16:21:51 -07001655 disable_rpm_wakeref_asserts(uncore->rpm);
1656 intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
1657 enable_rpm_wakeref_asserts(uncore->rpm);
Hans de Goede264ec1a2017-02-10 11:28:02 +01001658 break;
1659 case MBI_PMIC_BUS_ACCESS_END:
Daniele Ceraolo Spurio91026502019-06-13 16:21:51 -07001660 intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
Hans de Goede264ec1a2017-02-10 11:28:02 +01001661 break;
1662 }
1663
1664 return NOTIFY_OK;
1665}
1666
Daniele Ceraolo Spurio25286aa2019-03-19 11:35:40 -07001667static int uncore_mmio_setup(struct intel_uncore *uncore)
Mika Kuoppalaf9b39272015-01-28 14:43:24 +02001668{
Daniele Ceraolo Spurio01385752019-06-19 18:00:18 -07001669 struct drm_i915_private *i915 = uncore->i915;
Daniele Ceraolo Spurio25286aa2019-03-19 11:35:40 -07001670 struct pci_dev *pdev = i915->drm.pdev;
1671 int mmio_bar;
1672 int mmio_size;
1673
1674 mmio_bar = IS_GEN(i915, 2) ? 1 : 0;
1675 /*
1676 * Before gen4, the registers and the GTT are behind different BARs.
1677 * However, from gen4 onwards, the registers and the GTT are shared
1678 * in the same BAR, so we want to restrict this ioremap from
1679 * clobbering the GTT which we want ioremap_wc instead. Fortunately,
1680 * the register BAR remains the same size for all the earlier
1681 * generations up to Ironlake.
1682 */
1683 if (INTEL_GEN(i915) < 5)
1684 mmio_size = 512 * 1024;
1685 else
1686 mmio_size = 2 * 1024 * 1024;
1687 uncore->regs = pci_iomap(pdev, mmio_bar, mmio_size);
1688 if (uncore->regs == NULL) {
Wambui Karugad0208cf2020-01-07 18:13:33 +03001689 drm_err(&i915->drm, "failed to map registers\n");
Daniele Ceraolo Spurio25286aa2019-03-19 11:35:40 -07001690 return -EIO;
1691 }
1692
1693 return 0;
1694}
1695
1696static void uncore_mmio_cleanup(struct intel_uncore *uncore)
1697{
Daniele Ceraolo Spurio01385752019-06-19 18:00:18 -07001698 struct pci_dev *pdev = uncore->i915->drm.pdev;
Daniele Ceraolo Spurio25286aa2019-03-19 11:35:40 -07001699
1700 pci_iounmap(pdev, uncore->regs);
1701}
1702
Daniele Ceraolo Spurio01385752019-06-19 18:00:18 -07001703void intel_uncore_init_early(struct intel_uncore *uncore,
1704 struct drm_i915_private *i915)
Daniele Ceraolo Spurio6cbe88302019-04-02 13:10:31 -07001705{
1706 spin_lock_init(&uncore->lock);
Daniele Ceraolo Spurio01385752019-06-19 18:00:18 -07001707 uncore->i915 = i915;
1708 uncore->rpm = &i915->runtime_pm;
Daniele Ceraolo Spurio0a9b2632019-08-09 07:31:16 +01001709 uncore->debug = &i915->mmio_debug;
Daniele Ceraolo Spurio6cbe88302019-04-02 13:10:31 -07001710}
Daniele Ceraolo Spurio25286aa2019-03-19 11:35:40 -07001711
Daniele Ceraolo Spurio2e81bc62019-06-19 18:00:19 -07001712static void uncore_raw_init(struct intel_uncore *uncore)
1713{
1714 GEM_BUG_ON(intel_uncore_has_forcewake(uncore));
1715
1716 if (IS_GEN(uncore->i915, 5)) {
1717 ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, gen5);
1718 ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, gen5);
1719 } else {
1720 ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, gen2);
1721 ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, gen2);
1722 }
1723}
1724
Daniele Ceraolo Spuriof833cdb2019-06-19 18:00:20 -07001725static int uncore_forcewake_init(struct intel_uncore *uncore)
Daniele Ceraolo Spurio2e81bc62019-06-19 18:00:19 -07001726{
1727 struct drm_i915_private *i915 = uncore->i915;
Daniele Ceraolo Spuriof833cdb2019-06-19 18:00:20 -07001728 int ret;
Daniele Ceraolo Spurio2e81bc62019-06-19 18:00:19 -07001729
1730 GEM_BUG_ON(!intel_uncore_has_forcewake(uncore));
1731
Daniele Ceraolo Spuriof833cdb2019-06-19 18:00:20 -07001732 ret = intel_uncore_fw_domains_init(uncore);
1733 if (ret)
1734 return ret;
Daniele Ceraolo Spurio2e81bc62019-06-19 18:00:19 -07001735 forcewake_early_sanitize(uncore, 0);
1736
1737 if (IS_GEN_RANGE(i915, 6, 7)) {
1738 ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen6);
1739
1740 if (IS_VALLEYVIEW(i915)) {
1741 ASSIGN_FW_DOMAINS_TABLE(uncore, __vlv_fw_ranges);
1742 ASSIGN_READ_MMIO_VFUNCS(uncore, fwtable);
1743 } else {
1744 ASSIGN_READ_MMIO_VFUNCS(uncore, gen6);
1745 }
1746 } else if (IS_GEN(i915, 8)) {
1747 if (IS_CHERRYVIEW(i915)) {
1748 ASSIGN_FW_DOMAINS_TABLE(uncore, __chv_fw_ranges);
1749 ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
1750 ASSIGN_READ_MMIO_VFUNCS(uncore, fwtable);
1751 } else {
1752 ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen8);
1753 ASSIGN_READ_MMIO_VFUNCS(uncore, gen6);
1754 }
1755 } else if (IS_GEN_RANGE(i915, 9, 10)) {
1756 ASSIGN_FW_DOMAINS_TABLE(uncore, __gen9_fw_ranges);
1757 ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
1758 ASSIGN_READ_MMIO_VFUNCS(uncore, fwtable);
Michel Thierrycf82d9d2019-09-13 17:16:51 +03001759 } else if (IS_GEN(i915, 11)) {
Daniele Ceraolo Spurio2e81bc62019-06-19 18:00:19 -07001760 ASSIGN_FW_DOMAINS_TABLE(uncore, __gen11_fw_ranges);
1761 ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen11_fwtable);
1762 ASSIGN_READ_MMIO_VFUNCS(uncore, gen11_fwtable);
Michel Thierrycf82d9d2019-09-13 17:16:51 +03001763 } else {
1764 ASSIGN_FW_DOMAINS_TABLE(uncore, __gen12_fw_ranges);
1765 ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen12_fwtable);
1766 ASSIGN_READ_MMIO_VFUNCS(uncore, gen12_fwtable);
Daniele Ceraolo Spurio2e81bc62019-06-19 18:00:19 -07001767 }
1768
1769 uncore->pmic_bus_access_nb.notifier_call = i915_pmic_bus_access_notifier;
1770 iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb);
Daniele Ceraolo Spuriof833cdb2019-06-19 18:00:20 -07001771
1772 return 0;
Daniele Ceraolo Spurio2e81bc62019-06-19 18:00:19 -07001773}
1774
Daniele Ceraolo Spurio3de6f852019-04-02 13:10:32 -07001775int intel_uncore_init_mmio(struct intel_uncore *uncore)
Daniele Ceraolo Spurio25286aa2019-03-19 11:35:40 -07001776{
Daniele Ceraolo Spurio01385752019-06-19 18:00:18 -07001777 struct drm_i915_private *i915 = uncore->i915;
Daniele Ceraolo Spurio25286aa2019-03-19 11:35:40 -07001778 int ret;
1779
1780 ret = uncore_mmio_setup(uncore);
1781 if (ret)
1782 return ret;
Yu Zhangcf9d2892015-02-10 19:05:47 +08001783
Daniele Ceraolo Spurio5a0ba772019-03-25 14:49:33 -07001784 if (INTEL_GEN(i915) > 5 && !intel_vgpu_active(i915))
1785 uncore->flags |= UNCORE_HAS_FORCEWAKE;
1786
Daniele Ceraolo Spuriof833cdb2019-06-19 18:00:20 -07001787 if (!intel_uncore_has_forcewake(uncore)) {
Daniele Ceraolo Spurio2e81bc62019-06-19 18:00:19 -07001788 uncore_raw_init(uncore);
Daniele Ceraolo Spuriof833cdb2019-06-19 18:00:20 -07001789 } else {
1790 ret = uncore_forcewake_init(uncore);
1791 if (ret)
1792 goto out_mmio_cleanup;
1793 }
Imre Deaked493882014-10-23 19:23:21 +03001794
Daniele Ceraolo Spurioccb2ace2019-06-19 18:00:16 -07001795 /* make sure fw funcs are set if and only if we have fw*/
1796 GEM_BUG_ON(intel_uncore_has_forcewake(uncore) != !!uncore->funcs.force_wake_get);
1797 GEM_BUG_ON(intel_uncore_has_forcewake(uncore) != !!uncore->funcs.force_wake_put);
1798 GEM_BUG_ON(intel_uncore_has_forcewake(uncore) != !!uncore->funcs.read_fw_domains);
1799 GEM_BUG_ON(intel_uncore_has_forcewake(uncore) != !!uncore->funcs.write_fw_domains);
1800
Daniele Ceraolo Spurio2cf7bf62019-03-25 14:49:34 -07001801 if (HAS_FPGA_DBG_UNCLAIMED(i915))
1802 uncore->flags |= UNCORE_HAS_FPGA_DBG_UNCLAIMED;
1803
1804 if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
1805 uncore->flags |= UNCORE_HAS_DBG_UNCLAIMED;
1806
1807 if (IS_GEN_RANGE(i915, 6, 7))
1808 uncore->flags |= UNCORE_HAS_FIFO;
1809
Daniele Ceraolo Spurio2e81bc62019-06-19 18:00:19 -07001810 /* clear out unclaimed reg detection bit */
Daniele Ceraolo Spurio0a9b2632019-08-09 07:31:16 +01001811 if (intel_uncore_unclaimed_mmio(uncore))
Wambui Karugad0208cf2020-01-07 18:13:33 +03001812 drm_dbg(&i915->drm, "unclaimed mmio detected on uncore init, clearing\n");
Daniele Ceraolo Spurio25286aa2019-03-19 11:35:40 -07001813
1814 return 0;
Daniele Ceraolo Spuriof833cdb2019-06-19 18:00:20 -07001815
1816out_mmio_cleanup:
1817 uncore_mmio_cleanup(uncore);
1818
1819 return ret;
Ben Widawsky0b274482013-10-04 21:22:51 -07001820}
1821
Oscar Mateo26376a72018-03-16 14:14:49 +02001822/*
1823 * We might have detected that some engines are fused off after we initialized
1824 * the forcewake domains. Prune them, to make sure they only reference existing
1825 * engines.
1826 */
Daniele Ceraolo Spurio3de6f852019-04-02 13:10:32 -07001827void intel_uncore_prune_mmio_domains(struct intel_uncore *uncore)
Oscar Mateo26376a72018-03-16 14:14:49 +02001828{
Daniele Ceraolo Spurio01385752019-06-19 18:00:18 -07001829 struct drm_i915_private *i915 = uncore->i915;
Daniele Ceraolo Spurio2e81bc62019-06-19 18:00:19 -07001830 enum forcewake_domains fw_domains = uncore->fw_domains;
1831 enum forcewake_domain_id domain_id;
1832 int i;
Daniele Ceraolo Spuriof7de5022019-03-19 11:35:37 -07001833
Daniele Ceraolo Spurio2e81bc62019-06-19 18:00:19 -07001834 if (!intel_uncore_has_forcewake(uncore) || INTEL_GEN(i915) < 11)
1835 return;
Oscar Mateo26376a72018-03-16 14:14:49 +02001836
Daniele Ceraolo Spurio2e81bc62019-06-19 18:00:19 -07001837 for (i = 0; i < I915_MAX_VCS; i++) {
1838 domain_id = FW_DOMAIN_ID_MEDIA_VDBOX0 + i;
Oscar Mateo26376a72018-03-16 14:14:49 +02001839
Daniele Ceraolo Spurio2e81bc62019-06-19 18:00:19 -07001840 if (HAS_ENGINE(i915, _VCS(i)))
1841 continue;
Oscar Mateo26376a72018-03-16 14:14:49 +02001842
Daniele Ceraolo Spurio2e81bc62019-06-19 18:00:19 -07001843 if (fw_domains & BIT(domain_id))
1844 fw_domain_fini(uncore, domain_id);
1845 }
Oscar Mateo26376a72018-03-16 14:14:49 +02001846
Daniele Ceraolo Spurio2e81bc62019-06-19 18:00:19 -07001847 for (i = 0; i < I915_MAX_VECS; i++) {
1848 domain_id = FW_DOMAIN_ID_MEDIA_VEBOX0 + i;
Oscar Mateo26376a72018-03-16 14:14:49 +02001849
Daniele Ceraolo Spurio2e81bc62019-06-19 18:00:19 -07001850 if (HAS_ENGINE(i915, _VECS(i)))
1851 continue;
Oscar Mateo26376a72018-03-16 14:14:49 +02001852
Daniele Ceraolo Spurio2e81bc62019-06-19 18:00:19 -07001853 if (fw_domains & BIT(domain_id))
1854 fw_domain_fini(uncore, domain_id);
Oscar Mateo26376a72018-03-16 14:14:49 +02001855 }
1856}
1857
Daniele Ceraolo Spurio3de6f852019-04-02 13:10:32 -07001858void intel_uncore_fini_mmio(struct intel_uncore *uncore)
Ben Widawsky0b274482013-10-04 21:22:51 -07001859{
Daniele Ceraolo Spurio2e81bc62019-06-19 18:00:19 -07001860 if (intel_uncore_has_forcewake(uncore)) {
1861 iosf_mbi_punit_acquire();
1862 iosf_mbi_unregister_pmic_bus_access_notifier_unlocked(
1863 &uncore->pmic_bus_access_nb);
1864 intel_uncore_forcewake_reset(uncore);
Daniele Ceraolo Spuriof833cdb2019-06-19 18:00:20 -07001865 intel_uncore_fw_domains_fini(uncore);
Daniele Ceraolo Spurio2e81bc62019-06-19 18:00:19 -07001866 iosf_mbi_punit_release();
1867 }
1868
Daniele Ceraolo Spurio25286aa2019-03-19 11:35:40 -07001869 uncore_mmio_cleanup(uncore);
Ben Widawsky0b274482013-10-04 21:22:51 -07001870}
1871
Joonas Lahtinen3fd3a6f2017-09-13 14:52:55 +03001872static const struct reg_whitelist {
1873 i915_reg_t offset_ldw;
1874 i915_reg_t offset_udw;
1875 u16 gen_mask;
1876 u8 size;
1877} reg_read_whitelist[] = { {
1878 .offset_ldw = RING_TIMESTAMP(RENDER_RING_BASE),
1879 .offset_udw = RING_TIMESTAMP_UDW(RENDER_RING_BASE),
Jordan Justen2b92a822019-07-25 17:24:11 -07001880 .gen_mask = INTEL_GEN_MASK(4, 12),
Joonas Lahtinen3fd3a6f2017-09-13 14:52:55 +03001881 .size = 8
1882} };
Chris Wilson907b28c2013-07-19 20:36:52 +01001883
1884int i915_reg_read_ioctl(struct drm_device *dev,
1885 void *data, struct drm_file *file)
1886{
Tvrtko Ursulin8ed3a622019-06-10 13:06:04 +01001887 struct drm_i915_private *i915 = to_i915(dev);
1888 struct intel_uncore *uncore = &i915->uncore;
Chris Wilson907b28c2013-07-19 20:36:52 +01001889 struct drm_i915_reg_read *reg = data;
Joonas Lahtinen3fd3a6f2017-09-13 14:52:55 +03001890 struct reg_whitelist const *entry;
Chris Wilson538ef962019-01-14 14:21:18 +00001891 intel_wakeref_t wakeref;
Joonas Lahtinen3fd3a6f2017-09-13 14:52:55 +03001892 unsigned int flags;
1893 int remain;
1894 int ret = 0;
Chris Wilson907b28c2013-07-19 20:36:52 +01001895
Joonas Lahtinen3fd3a6f2017-09-13 14:52:55 +03001896 entry = reg_read_whitelist;
1897 remain = ARRAY_SIZE(reg_read_whitelist);
1898 while (remain) {
1899 u32 entry_offset = i915_mmio_reg_offset(entry->offset_ldw);
1900
1901 GEM_BUG_ON(!is_power_of_2(entry->size));
1902 GEM_BUG_ON(entry->size > 8);
1903 GEM_BUG_ON(entry_offset & (entry->size - 1));
1904
Tvrtko Ursulin8ed3a622019-06-10 13:06:04 +01001905 if (INTEL_INFO(i915)->gen_mask & entry->gen_mask &&
Joonas Lahtinen3fd3a6f2017-09-13 14:52:55 +03001906 entry_offset == (reg->offset & -entry->size))
Chris Wilson907b28c2013-07-19 20:36:52 +01001907 break;
Joonas Lahtinen3fd3a6f2017-09-13 14:52:55 +03001908 entry++;
1909 remain--;
Chris Wilson907b28c2013-07-19 20:36:52 +01001910 }
1911
Joonas Lahtinen3fd3a6f2017-09-13 14:52:55 +03001912 if (!remain)
Chris Wilson907b28c2013-07-19 20:36:52 +01001913 return -EINVAL;
1914
Joonas Lahtinen3fd3a6f2017-09-13 14:52:55 +03001915 flags = reg->offset & (entry->size - 1);
Chris Wilson648a9bc2015-07-16 12:37:56 +01001916
Daniele Ceraolo Spurioc447ff72019-06-13 16:21:55 -07001917 with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
Chris Wilsond4225a52019-01-14 14:21:23 +00001918 if (entry->size == 8 && flags == I915_REG_READ_8B_WA)
Tvrtko Ursulin8ed3a622019-06-10 13:06:04 +01001919 reg->val = intel_uncore_read64_2x32(uncore,
1920 entry->offset_ldw,
1921 entry->offset_udw);
Chris Wilsond4225a52019-01-14 14:21:23 +00001922 else if (entry->size == 8 && flags == 0)
Tvrtko Ursulin8ed3a622019-06-10 13:06:04 +01001923 reg->val = intel_uncore_read64(uncore,
1924 entry->offset_ldw);
Chris Wilsond4225a52019-01-14 14:21:23 +00001925 else if (entry->size == 4 && flags == 0)
Tvrtko Ursulin8ed3a622019-06-10 13:06:04 +01001926 reg->val = intel_uncore_read(uncore, entry->offset_ldw);
Chris Wilsond4225a52019-01-14 14:21:23 +00001927 else if (entry->size == 2 && flags == 0)
Tvrtko Ursulin8ed3a622019-06-10 13:06:04 +01001928 reg->val = intel_uncore_read16(uncore,
1929 entry->offset_ldw);
Chris Wilsond4225a52019-01-14 14:21:23 +00001930 else if (entry->size == 1 && flags == 0)
Tvrtko Ursulin8ed3a622019-06-10 13:06:04 +01001931 reg->val = intel_uncore_read8(uncore,
1932 entry->offset_ldw);
Chris Wilsond4225a52019-01-14 14:21:23 +00001933 else
1934 ret = -EINVAL;
1935 }
Joonas Lahtinen3fd3a6f2017-09-13 14:52:55 +03001936
Paulo Zanonicf67c702014-04-01 14:55:08 -03001937 return ret;
Chris Wilson907b28c2013-07-19 20:36:52 +01001938}
1939
Michel Thierrye34b0342018-04-05 17:00:48 +03001940/**
Michal Wajdeczko1d1a9772017-04-07 16:01:44 +00001941 * __intel_wait_for_register_fw - wait until register matches expected state
Daniele Ceraolo Spuriod2d551c2019-03-25 14:49:38 -07001942 * @uncore: the struct intel_uncore
Chris Wilson1758b902016-06-30 15:32:44 +01001943 * @reg: the register to read
1944 * @mask: mask to apply to register value
1945 * @value: expected value
Michal Wajdeczko1d1a9772017-04-07 16:01:44 +00001946 * @fast_timeout_us: fast timeout in microsecond for atomic/tight wait
1947 * @slow_timeout_ms: slow timeout in millisecond
1948 * @out_value: optional placeholder to hold registry value
Chris Wilson1758b902016-06-30 15:32:44 +01001949 *
1950 * This routine waits until the target register @reg contains the expected
Daniel Vetter3d466cd2016-07-15 21:48:05 +02001951 * @value after applying the @mask, i.e. it waits until ::
1952 *
1953 * (I915_READ_FW(reg) & mask) == value
1954 *
Michal Wajdeczko1d1a9772017-04-07 16:01:44 +00001955 * Otherwise, the wait will timeout after @slow_timeout_ms milliseconds.
Michal Wajdeczko6976e742017-04-10 12:17:47 +00001956 * For atomic context @slow_timeout_ms must be zero and @fast_timeout_us
Chris Wilson84d84cb2017-04-11 12:27:05 +01001957 * must be not larger than 20,0000 microseconds.
Chris Wilson1758b902016-06-30 15:32:44 +01001958 *
1959 * Note that this routine assumes the caller holds forcewake asserted, it is
1960 * not suitable for very long waits. See intel_wait_for_register() if you
1961 * wish to wait without holding forcewake for the duration (i.e. you expect
1962 * the wait to be slow).
1963 *
Michal Wajdeczkoe4661f12019-08-02 12:47:39 +00001964 * Return: 0 if the register matches the desired condition, or -ETIMEDOUT.
Chris Wilson1758b902016-06-30 15:32:44 +01001965 */
Daniele Ceraolo Spuriod2d551c2019-03-25 14:49:38 -07001966int __intel_wait_for_register_fw(struct intel_uncore *uncore,
Michal Wajdeczko1d1a9772017-04-07 16:01:44 +00001967 i915_reg_t reg,
Michal Wajdeczko3fc7d86b2017-04-10 09:38:17 +00001968 u32 mask,
1969 u32 value,
1970 unsigned int fast_timeout_us,
1971 unsigned int slow_timeout_ms,
Michal Wajdeczko1d1a9772017-04-07 16:01:44 +00001972 u32 *out_value)
Mika Kuoppala7fd2d262015-06-18 12:51:40 +03001973{
Daniel Vetterff26ffa2017-05-10 17:19:32 +02001974 u32 uninitialized_var(reg_value);
Daniele Ceraolo Spuriod2d551c2019-03-25 14:49:38 -07001975#define done (((reg_value = intel_uncore_read_fw(uncore, reg)) & mask) == value)
Michal Wajdeczko1d1a9772017-04-07 16:01:44 +00001976 int ret;
1977
Michal Wajdeczko6976e742017-04-10 12:17:47 +00001978 /* Catch any overuse of this function */
Chris Wilson84d84cb2017-04-11 12:27:05 +01001979 might_sleep_if(slow_timeout_ms);
1980 GEM_BUG_ON(fast_timeout_us > 20000);
Michal Wajdeczko6976e742017-04-10 12:17:47 +00001981
Chris Wilson84d84cb2017-04-11 12:27:05 +01001982 ret = -ETIMEDOUT;
1983 if (fast_timeout_us && fast_timeout_us <= 20000)
Michal Wajdeczko1d1a9772017-04-07 16:01:44 +00001984 ret = _wait_for_atomic(done, fast_timeout_us, 0);
Daniel Vetterff26ffa2017-05-10 17:19:32 +02001985 if (ret && slow_timeout_ms)
Michal Wajdeczko1d1a9772017-04-07 16:01:44 +00001986 ret = wait_for(done, slow_timeout_ms);
Chris Wilson84d84cb2017-04-11 12:27:05 +01001987
Michal Wajdeczko1d1a9772017-04-07 16:01:44 +00001988 if (out_value)
1989 *out_value = reg_value;
Chris Wilson84d84cb2017-04-11 12:27:05 +01001990
Chris Wilson1758b902016-06-30 15:32:44 +01001991 return ret;
1992#undef done
1993}
1994
1995/**
Sean Paul23fdbdd2018-01-08 14:55:36 -05001996 * __intel_wait_for_register - wait until register matches expected state
Daniele Ceraolo Spuriobaba6e52019-03-25 14:49:40 -07001997 * @uncore: the struct intel_uncore
Chris Wilson1758b902016-06-30 15:32:44 +01001998 * @reg: the register to read
1999 * @mask: mask to apply to register value
2000 * @value: expected value
Sean Paul23fdbdd2018-01-08 14:55:36 -05002001 * @fast_timeout_us: fast timeout in microsecond for atomic/tight wait
2002 * @slow_timeout_ms: slow timeout in millisecond
2003 * @out_value: optional placeholder to hold registry value
Chris Wilson1758b902016-06-30 15:32:44 +01002004 *
2005 * This routine waits until the target register @reg contains the expected
Daniel Vetter3d466cd2016-07-15 21:48:05 +02002006 * @value after applying the @mask, i.e. it waits until ::
2007 *
2008 * (I915_READ(reg) & mask) == value
2009 *
Chris Wilson1758b902016-06-30 15:32:44 +01002010 * Otherwise, the wait will timeout after @timeout_ms milliseconds.
2011 *
Michal Wajdeczkoe4661f12019-08-02 12:47:39 +00002012 * Return: 0 if the register matches the desired condition, or -ETIMEDOUT.
Chris Wilson1758b902016-06-30 15:32:44 +01002013 */
Daniele Ceraolo Spurio97a04e02019-03-25 14:49:39 -07002014int __intel_wait_for_register(struct intel_uncore *uncore,
2015 i915_reg_t reg,
2016 u32 mask,
2017 u32 value,
2018 unsigned int fast_timeout_us,
2019 unsigned int slow_timeout_ms,
2020 u32 *out_value)
Chris Wilson1758b902016-06-30 15:32:44 +01002021{
Chris Wilson1758b902016-06-30 15:32:44 +01002022 unsigned fw =
Daniele Ceraolo Spurio43193822019-03-25 14:49:37 -07002023 intel_uncore_forcewake_for_reg(uncore, reg, FW_REG_READ);
Sean Paul23fdbdd2018-01-08 14:55:36 -05002024 u32 reg_value;
Chris Wilson1758b902016-06-30 15:32:44 +01002025 int ret;
2026
Chris Wilson3df82dd42018-03-29 23:45:19 +01002027 might_sleep_if(slow_timeout_ms);
Chris Wilson05646542017-04-11 11:13:38 +01002028
Daniele Ceraolo Spurio272c7e52019-03-19 11:35:39 -07002029 spin_lock_irq(&uncore->lock);
2030 intel_uncore_forcewake_get__locked(uncore, fw);
Chris Wilson05646542017-04-11 11:13:38 +01002031
Daniele Ceraolo Spuriod2d551c2019-03-25 14:49:38 -07002032 ret = __intel_wait_for_register_fw(uncore,
Chris Wilson05646542017-04-11 11:13:38 +01002033 reg, mask, value,
Sean Paul23fdbdd2018-01-08 14:55:36 -05002034 fast_timeout_us, 0, &reg_value);
Chris Wilson05646542017-04-11 11:13:38 +01002035
Daniele Ceraolo Spurio272c7e52019-03-19 11:35:39 -07002036 intel_uncore_forcewake_put__locked(uncore, fw);
2037 spin_unlock_irq(&uncore->lock);
Chris Wilson05646542017-04-11 11:13:38 +01002038
Chris Wilson3df82dd42018-03-29 23:45:19 +01002039 if (ret && slow_timeout_ms)
Daniele Ceraolo Spuriod2d551c2019-03-25 14:49:38 -07002040 ret = __wait_for(reg_value = intel_uncore_read_notrace(uncore,
2041 reg),
Sean Paul23fdbdd2018-01-08 14:55:36 -05002042 (reg_value & mask) == value,
2043 slow_timeout_ms * 1000, 10, 1000);
2044
Ville Syrjälä39806c3f2019-02-04 23:16:44 +02002045 /* just trace the final value */
2046 trace_i915_reg_rw(false, reg, reg_value, sizeof(reg_value), true);
2047
Sean Paul23fdbdd2018-01-08 14:55:36 -05002048 if (out_value)
2049 *out_value = reg_value;
Chris Wilson1758b902016-06-30 15:32:44 +01002050
2051 return ret;
Tomas Elfd4314402016-03-02 16:46:24 +02002052}
2053
Daniele Ceraolo Spurio2cf7bf62019-03-25 14:49:34 -07002054bool intel_uncore_unclaimed_mmio(struct intel_uncore *uncore)
Chris Wilson907b28c2013-07-19 20:36:52 +01002055{
Daniele Ceraolo Spurio0a9b2632019-08-09 07:31:16 +01002056 bool ret;
2057
2058 spin_lock_irq(&uncore->debug->lock);
2059 ret = check_for_unclaimed_mmio(uncore);
2060 spin_unlock_irq(&uncore->debug->lock);
2061
2062 return ret;
Chris Wilson907b28c2013-07-19 20:36:52 +01002063}
Mika Kuoppala75714942015-12-16 09:26:48 +02002064
Mika Kuoppalabc3b9342016-01-08 15:51:20 +02002065bool
Daniele Ceraolo Spurio2cf7bf62019-03-25 14:49:34 -07002066intel_uncore_arm_unclaimed_mmio_detection(struct intel_uncore *uncore)
Mika Kuoppala75714942015-12-16 09:26:48 +02002067{
Chris Wilsona167b1e2018-09-04 14:12:07 +01002068 bool ret = false;
2069
Daniele Ceraolo Spurio0a9b2632019-08-09 07:31:16 +01002070 spin_lock_irq(&uncore->debug->lock);
Chris Wilsona167b1e2018-09-04 14:12:07 +01002071
Daniele Ceraolo Spurio0a9b2632019-08-09 07:31:16 +01002072 if (unlikely(uncore->debug->unclaimed_mmio_check <= 0))
Chris Wilsona167b1e2018-09-04 14:12:07 +01002073 goto out;
Mika Kuoppala75714942015-12-16 09:26:48 +02002074
Daniele Ceraolo Spurio0a9b2632019-08-09 07:31:16 +01002075 if (unlikely(check_for_unclaimed_mmio(uncore))) {
Chris Wilson7ef4ac6e2018-09-04 12:17:32 +01002076 if (!i915_modparams.mmio_debug) {
Wambui Karugad0208cf2020-01-07 18:13:33 +03002077 drm_dbg(&uncore->i915->drm,
2078 "Unclaimed register detected, "
2079 "enabling oneshot unclaimed register reporting. "
2080 "Please use i915.mmio_debug=N for more information.\n");
Chris Wilson7ef4ac6e2018-09-04 12:17:32 +01002081 i915_modparams.mmio_debug++;
2082 }
Daniele Ceraolo Spurio0a9b2632019-08-09 07:31:16 +01002083 uncore->debug->unclaimed_mmio_check--;
Chris Wilsona167b1e2018-09-04 14:12:07 +01002084 ret = true;
Mika Kuoppala75714942015-12-16 09:26:48 +02002085 }
Mika Kuoppalabc3b9342016-01-08 15:51:20 +02002086
Chris Wilsona167b1e2018-09-04 14:12:07 +01002087out:
Daniele Ceraolo Spurio0a9b2632019-08-09 07:31:16 +01002088 spin_unlock_irq(&uncore->debug->lock);
Chris Wilsona167b1e2018-09-04 14:12:07 +01002089
2090 return ret;
Mika Kuoppala75714942015-12-16 09:26:48 +02002091}
Tvrtko Ursulin37566852016-04-12 14:37:31 +01002092
Tvrtko Ursulin37566852016-04-12 14:37:31 +01002093/**
2094 * intel_uncore_forcewake_for_reg - which forcewake domains are needed to access
2095 * a register
Daniele Ceraolo Spurio43193822019-03-25 14:49:37 -07002096 * @uncore: pointer to struct intel_uncore
Tvrtko Ursulin37566852016-04-12 14:37:31 +01002097 * @reg: register in question
2098 * @op: operation bitmask of FW_REG_READ and/or FW_REG_WRITE
2099 *
2100 * Returns a set of forcewake domains required to be taken with for example
2101 * intel_uncore_forcewake_get for the specified register to be accessible in the
2102 * specified mode (read, write or read/write) with raw mmio accessors.
2103 *
2104 * NOTE: On Gen6 and Gen7 write forcewake domain (FORCEWAKE_RENDER) requires the
2105 * callers to do FIFO management on their own or risk losing writes.
2106 */
2107enum forcewake_domains
Daniele Ceraolo Spurio43193822019-03-25 14:49:37 -07002108intel_uncore_forcewake_for_reg(struct intel_uncore *uncore,
Tvrtko Ursulin37566852016-04-12 14:37:31 +01002109 i915_reg_t reg, unsigned int op)
2110{
2111 enum forcewake_domains fw_domains = 0;
2112
Pankaj Bharadiyaa9f236d2020-01-15 09:14:54 +05302113 drm_WARN_ON(&uncore->i915->drm, !op);
Tvrtko Ursulin37566852016-04-12 14:37:31 +01002114
Daniele Ceraolo Spurio43193822019-03-25 14:49:37 -07002115 if (!intel_uncore_has_forcewake(uncore))
Tvrtko Ursulin895833b2016-10-04 09:29:24 +01002116 return 0;
2117
Tvrtko Ursulin37566852016-04-12 14:37:31 +01002118 if (op & FW_REG_READ)
Daniele Ceraolo Spurioccb2ace2019-06-19 18:00:16 -07002119 fw_domains = uncore->funcs.read_fw_domains(uncore, reg);
Tvrtko Ursulin37566852016-04-12 14:37:31 +01002120
2121 if (op & FW_REG_WRITE)
Daniele Ceraolo Spurioccb2ace2019-06-19 18:00:16 -07002122 fw_domains |= uncore->funcs.write_fw_domains(uncore, reg);
2123
Pankaj Bharadiyaa9f236d2020-01-15 09:14:54 +05302124 drm_WARN_ON(&uncore->i915->drm, fw_domains & ~uncore->fw_domains);
Tvrtko Ursulin37566852016-04-12 14:37:31 +01002125
2126 return fw_domains;
2127}
Chris Wilson26e7a2a2017-02-13 17:15:33 +00002128
2129#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
Chris Wilson0757ac82017-04-12 10:21:43 +01002130#include "selftests/mock_uncore.c"
Chris Wilson26e7a2a2017-02-13 17:15:33 +00002131#include "selftests/intel_uncore.c"
2132#endif