Dave Gordon | 2617268 | 2015-07-09 19:29:04 +0100 | [diff] [blame] | 1 | /* |
| 2 | * Copyright © 2014 Intel Corporation |
| 3 | * |
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a |
| 5 | * copy of this software and associated documentation files (the "Software"), |
| 6 | * to deal in the Software without restriction, including without limitation |
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
| 8 | * and/or sell copies of the Software, and to permit persons to whom the |
| 9 | * Software is furnished to do so, subject to the following conditions: |
| 10 | * |
| 11 | * The above copyright notice and this permission notice (including the next |
| 12 | * paragraph) shall be included in all copies or substantial portions of the |
| 13 | * Software. |
| 14 | * |
| 15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
| 18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
| 19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
| 20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS |
| 21 | * IN THE SOFTWARE. |
| 22 | */ |
| 23 | #ifndef _INTEL_GUC_FWIF_H |
| 24 | #define _INTEL_GUC_FWIF_H |
| 25 | |
Oscar Mateo | b09935a | 2017-03-22 10:39:53 -0700 | [diff] [blame] | 26 | #define GUC_CLIENT_PRIORITY_KMD_HIGH 0 |
| 27 | #define GUC_CLIENT_PRIORITY_HIGH 1 |
| 28 | #define GUC_CLIENT_PRIORITY_KMD_NORMAL 2 |
| 29 | #define GUC_CLIENT_PRIORITY_NORMAL 3 |
| 30 | #define GUC_CLIENT_PRIORITY_NUM 4 |
Dave Gordon | 2617268 | 2015-07-09 19:29:04 +0100 | [diff] [blame] | 31 | |
Oscar Mateo | b09935a | 2017-03-22 10:39:53 -0700 | [diff] [blame] | 32 | #define GUC_MAX_STAGE_DESCRIPTORS 1024 |
| 33 | #define GUC_INVALID_STAGE_ID GUC_MAX_STAGE_DESCRIPTORS |
Dave Gordon | 2617268 | 2015-07-09 19:29:04 +0100 | [diff] [blame] | 34 | |
Alex Dai | 397097b | 2016-01-23 11:58:14 -0800 | [diff] [blame] | 35 | #define GUC_RENDER_ENGINE 0 |
| 36 | #define GUC_VIDEO_ENGINE 1 |
| 37 | #define GUC_BLITTER_ENGINE 2 |
| 38 | #define GUC_VIDEOENHANCE_ENGINE 3 |
| 39 | #define GUC_VIDEO_ENGINE2 4 |
| 40 | #define GUC_MAX_ENGINES_NUM (GUC_VIDEO_ENGINE2 + 1) |
| 41 | |
Michal Wajdeczko | ffd5ce2 | 2019-05-27 18:35:59 +0000 | [diff] [blame] | 42 | /* |
| 43 | * XXX: Beware that Gen9 firmware 32.x uses wrong definition for |
| 44 | * GUC_MAX_INSTANCES_PER_CLASS (1) but this is harmless for us now |
| 45 | * as we are not enabling GuC submission mode where this will be used |
| 46 | */ |
| 47 | #define GUC_MAX_ENGINE_CLASSES 5 |
| 48 | #define GUC_MAX_INSTANCES_PER_CLASS 4 |
| 49 | |
Daniele Ceraolo Spurio | fb0c37f | 2018-10-22 16:04:24 -0700 | [diff] [blame] | 50 | #define GUC_DOORBELL_INVALID 256 |
| 51 | |
| 52 | #define GUC_DB_SIZE (PAGE_SIZE) |
| 53 | #define GUC_WQ_SIZE (PAGE_SIZE * 2) |
| 54 | |
Dave Gordon | 2617268 | 2015-07-09 19:29:04 +0100 | [diff] [blame] | 55 | /* Work queue item header definitions */ |
| 56 | #define WQ_STATUS_ACTIVE 1 |
| 57 | #define WQ_STATUS_SUSPENDED 2 |
| 58 | #define WQ_STATUS_CMD_ERROR 3 |
| 59 | #define WQ_STATUS_ENGINE_ID_NOT_USED 4 |
| 60 | #define WQ_STATUS_SUSPENDED_FROM_RESET 5 |
| 61 | #define WQ_TYPE_SHIFT 0 |
| 62 | #define WQ_TYPE_BATCH_BUF (0x1 << WQ_TYPE_SHIFT) |
| 63 | #define WQ_TYPE_PSEUDO (0x2 << WQ_TYPE_SHIFT) |
| 64 | #define WQ_TYPE_INORDER (0x3 << WQ_TYPE_SHIFT) |
Daniele Ceraolo Spurio | 5382bed | 2018-08-27 15:36:14 -0700 | [diff] [blame] | 65 | #define WQ_TYPE_NOOP (0x4 << WQ_TYPE_SHIFT) |
Dave Gordon | 2617268 | 2015-07-09 19:29:04 +0100 | [diff] [blame] | 66 | #define WQ_TARGET_SHIFT 10 |
| 67 | #define WQ_LEN_SHIFT 16 |
| 68 | #define WQ_NO_WCFLUSH_WAIT (1 << 27) |
| 69 | #define WQ_PRESENT_WORKLOAD (1 << 28) |
Dave Gordon | 2617268 | 2015-07-09 19:29:04 +0100 | [diff] [blame] | 70 | |
| 71 | #define WQ_RING_TAIL_SHIFT 20 |
Dave Gordon | 0a31afb | 2016-05-13 15:36:34 +0100 | [diff] [blame] | 72 | #define WQ_RING_TAIL_MAX 0x7FF /* 2^11 QWords */ |
| 73 | #define WQ_RING_TAIL_MASK (WQ_RING_TAIL_MAX << WQ_RING_TAIL_SHIFT) |
Dave Gordon | 2617268 | 2015-07-09 19:29:04 +0100 | [diff] [blame] | 74 | |
Oscar Mateo | b09935a | 2017-03-22 10:39:53 -0700 | [diff] [blame] | 75 | #define GUC_STAGE_DESC_ATTR_ACTIVE BIT(0) |
| 76 | #define GUC_STAGE_DESC_ATTR_PENDING_DB BIT(1) |
| 77 | #define GUC_STAGE_DESC_ATTR_KERNEL BIT(2) |
| 78 | #define GUC_STAGE_DESC_ATTR_PREEMPT BIT(3) |
| 79 | #define GUC_STAGE_DESC_ATTR_RESET BIT(4) |
| 80 | #define GUC_STAGE_DESC_ATTR_WQLOCKED BIT(5) |
| 81 | #define GUC_STAGE_DESC_ATTR_PCH BIT(6) |
| 82 | #define GUC_STAGE_DESC_ATTR_TERMINATED BIT(7) |
Dave Gordon | 2617268 | 2015-07-09 19:29:04 +0100 | [diff] [blame] | 83 | |
Michal Wajdeczko | ffd5ce2 | 2019-05-27 18:35:59 +0000 | [diff] [blame] | 84 | /* New GuC control data */ |
Dave Gordon | 2617268 | 2015-07-09 19:29:04 +0100 | [diff] [blame] | 85 | #define GUC_CTL_CTXINFO 0 |
| 86 | #define GUC_CTL_CTXNUM_IN16_SHIFT 0 |
| 87 | #define GUC_CTL_BASE_ADDR_SHIFT 12 |
Alex Dai | 68371a9 | 2015-12-18 12:00:09 -0800 | [diff] [blame] | 88 | |
Michal Wajdeczko | ffd5ce2 | 2019-05-27 18:35:59 +0000 | [diff] [blame] | 89 | #define GUC_CTL_LOG_PARAMS 1 |
Dave Gordon | 2617268 | 2015-07-09 19:29:04 +0100 | [diff] [blame] | 90 | #define GUC_LOG_VALID (1 << 0) |
| 91 | #define GUC_LOG_NOTIFY_ON_HALF_FULL (1 << 1) |
| 92 | #define GUC_LOG_ALLOC_IN_MEGABYTE (1 << 3) |
Dave Gordon | 2617268 | 2015-07-09 19:29:04 +0100 | [diff] [blame] | 93 | #define GUC_LOG_CRASH_SHIFT 4 |
Michal Wajdeczko | ffd5ce2 | 2019-05-27 18:35:59 +0000 | [diff] [blame] | 94 | #define GUC_LOG_CRASH_MASK (0x3 << GUC_LOG_CRASH_SHIFT) |
Dave Gordon | 2617268 | 2015-07-09 19:29:04 +0100 | [diff] [blame] | 95 | #define GUC_LOG_DPC_SHIFT 6 |
Piotr Piorkowski | 5288c71 | 2018-06-05 17:13:29 +0200 | [diff] [blame] | 96 | #define GUC_LOG_DPC_MASK (0x7 << GUC_LOG_DPC_SHIFT) |
Dave Gordon | 2617268 | 2015-07-09 19:29:04 +0100 | [diff] [blame] | 97 | #define GUC_LOG_ISR_SHIFT 9 |
Piotr Piorkowski | 5288c71 | 2018-06-05 17:13:29 +0200 | [diff] [blame] | 98 | #define GUC_LOG_ISR_MASK (0x7 << GUC_LOG_ISR_SHIFT) |
Dave Gordon | 2617268 | 2015-07-09 19:29:04 +0100 | [diff] [blame] | 99 | #define GUC_LOG_BUF_ADDR_SHIFT 12 |
Alex Dai | 68371a9 | 2015-12-18 12:00:09 -0800 | [diff] [blame] | 100 | |
Michal Wajdeczko | ffd5ce2 | 2019-05-27 18:35:59 +0000 | [diff] [blame] | 101 | #define GUC_CTL_WA 2 |
| 102 | #define GUC_CTL_FEATURE 3 |
| 103 | #define GUC_CTL_DISABLE_SCHEDULER (1 << 14) |
Alex Dai | 68371a9 | 2015-12-18 12:00:09 -0800 | [diff] [blame] | 104 | |
Michal Wajdeczko | ffd5ce2 | 2019-05-27 18:35:59 +0000 | [diff] [blame] | 105 | #define GUC_CTL_DEBUG 4 |
Dave Gordon | 2617268 | 2015-07-09 19:29:04 +0100 | [diff] [blame] | 106 | #define GUC_LOG_VERBOSITY_SHIFT 0 |
| 107 | #define GUC_LOG_VERBOSITY_LOW (0 << GUC_LOG_VERBOSITY_SHIFT) |
| 108 | #define GUC_LOG_VERBOSITY_MED (1 << GUC_LOG_VERBOSITY_SHIFT) |
| 109 | #define GUC_LOG_VERBOSITY_HIGH (2 << GUC_LOG_VERBOSITY_SHIFT) |
| 110 | #define GUC_LOG_VERBOSITY_ULTRA (3 << GUC_LOG_VERBOSITY_SHIFT) |
| 111 | /* Verbosity range-check limits, without the shift */ |
| 112 | #define GUC_LOG_VERBOSITY_MIN 0 |
| 113 | #define GUC_LOG_VERBOSITY_MAX 3 |
Alex Dai | 68371a9 | 2015-12-18 12:00:09 -0800 | [diff] [blame] | 114 | #define GUC_LOG_VERBOSITY_MASK 0x0000000f |
| 115 | #define GUC_LOG_DESTINATION_MASK (3 << 4) |
| 116 | #define GUC_LOG_DISABLED (1 << 6) |
| 117 | #define GUC_PROFILE_ENABLED (1 << 7) |
Alex Dai | 68371a9 | 2015-12-18 12:00:09 -0800 | [diff] [blame] | 118 | |
Michal Wajdeczko | ffd5ce2 | 2019-05-27 18:35:59 +0000 | [diff] [blame] | 119 | #define GUC_CTL_ADS 5 |
| 120 | #define GUC_ADS_ADDR_SHIFT 1 |
| 121 | #define GUC_ADS_ADDR_MASK (0xFFFFF << GUC_ADS_ADDR_SHIFT) |
Dave Gordon | 2617268 | 2015-07-09 19:29:04 +0100 | [diff] [blame] | 122 | |
Alex Dai | 68371a9 | 2015-12-18 12:00:09 -0800 | [diff] [blame] | 123 | #define GUC_CTL_MAX_DWORDS (SOFT_SCRATCH_COUNT - 2) /* [1..14] */ |
Dave Gordon | 2617268 | 2015-07-09 19:29:04 +0100 | [diff] [blame] | 124 | |
Alex Dai | feda33e | 2015-10-19 16:10:54 -0700 | [diff] [blame] | 125 | /** |
| 126 | * DOC: GuC Firmware Layout |
| 127 | * |
| 128 | * The GuC firmware layout looks like this: |
| 129 | * |
| 130 | * +-------------------------------+ |
Anusha Srivatsa | fbbad73 | 2017-01-13 17:17:05 -0800 | [diff] [blame] | 131 | * | uc_css_header | |
Daniel Vetter | 62cacc7 | 2016-08-12 22:48:37 +0200 | [diff] [blame] | 132 | * | | |
Alex Dai | feda33e | 2015-10-19 16:10:54 -0700 | [diff] [blame] | 133 | * | contains major/minor version | |
| 134 | * +-------------------------------+ |
| 135 | * | uCode | |
| 136 | * +-------------------------------+ |
| 137 | * | RSA signature | |
| 138 | * +-------------------------------+ |
| 139 | * | modulus key | |
| 140 | * +-------------------------------+ |
| 141 | * | exponent val | |
| 142 | * +-------------------------------+ |
| 143 | * |
| 144 | * The firmware may or may not have modulus key and exponent data. The header, |
| 145 | * uCode and RSA signature are must-have components that will be used by driver. |
| 146 | * Length of each components, which is all in dwords, can be found in header. |
| 147 | * In the case that modulus and exponent are not present in fw, a.k.a truncated |
| 148 | * image, the length value still appears in header. |
| 149 | * |
| 150 | * Driver will do some basic fw size validation based on the following rules: |
| 151 | * |
| 152 | * 1. Header, uCode and RSA are must-have components. |
| 153 | * 2. All firmware components, if they present, are in the sequence illustrated |
Daniel Vetter | 62cacc7 | 2016-08-12 22:48:37 +0200 | [diff] [blame] | 154 | * in the layout table above. |
Alex Dai | feda33e | 2015-10-19 16:10:54 -0700 | [diff] [blame] | 155 | * 3. Length info of each component can be found in header, in dwords. |
| 156 | * 4. Modulus and exponent key are not required by driver. They may not appear |
Daniel Vetter | 62cacc7 | 2016-08-12 22:48:37 +0200 | [diff] [blame] | 157 | * in fw. So driver will load a truncated firmware in this case. |
Anusha Srivatsa | fbbad73 | 2017-01-13 17:17:05 -0800 | [diff] [blame] | 158 | * |
| 159 | * HuC firmware layout is same as GuC firmware. |
Michal Wajdeczko | ffd5ce2 | 2019-05-27 18:35:59 +0000 | [diff] [blame] | 160 | * Only HuC version information is saved in a different way. |
Alex Dai | feda33e | 2015-10-19 16:10:54 -0700 | [diff] [blame] | 161 | */ |
| 162 | |
Anusha Srivatsa | fbbad73 | 2017-01-13 17:17:05 -0800 | [diff] [blame] | 163 | struct uc_css_header { |
Joonas Lahtinen | faf6548 | 2017-10-06 11:49:40 +0300 | [diff] [blame] | 164 | u32 module_type; |
Alex Dai | feda33e | 2015-10-19 16:10:54 -0700 | [diff] [blame] | 165 | /* header_size includes all non-uCode bits, including css_header, rsa |
| 166 | * key, modulus key and exponent data. */ |
Joonas Lahtinen | faf6548 | 2017-10-06 11:49:40 +0300 | [diff] [blame] | 167 | u32 header_size_dw; |
| 168 | u32 header_version; |
| 169 | u32 module_id; |
| 170 | u32 module_vendor; |
Michal Wajdeczko | ffd5ce2 | 2019-05-27 18:35:59 +0000 | [diff] [blame] | 171 | u32 date; |
| 172 | #define CSS_DATE_DAY (0xFF << 0) |
| 173 | #define CSS_DATE_MONTH (0xFF << 8) |
| 174 | #define CSS_DATE_YEAR (0xFFFF << 16) |
Joonas Lahtinen | faf6548 | 2017-10-06 11:49:40 +0300 | [diff] [blame] | 175 | u32 size_dw; /* uCode plus header_size_dw */ |
| 176 | u32 key_size_dw; |
| 177 | u32 modulus_size_dw; |
| 178 | u32 exponent_size_dw; |
Michal Wajdeczko | ffd5ce2 | 2019-05-27 18:35:59 +0000 | [diff] [blame] | 179 | u32 time; |
| 180 | #define CSS_TIME_HOUR (0xFF << 0) |
| 181 | #define CSS_DATE_MIN (0xFF << 8) |
| 182 | #define CSS_DATE_SEC (0xFFFF << 16) |
Alex Dai | feda33e | 2015-10-19 16:10:54 -0700 | [diff] [blame] | 183 | char username[8]; |
| 184 | char buildnumber[12]; |
Michal Wajdeczko | ffd5ce2 | 2019-05-27 18:35:59 +0000 | [diff] [blame] | 185 | u32 sw_version; |
| 186 | #define CSS_SW_VERSION_GUC_MAJOR (0xFF << 16) |
| 187 | #define CSS_SW_VERSION_GUC_MINOR (0xFF << 8) |
| 188 | #define CSS_SW_VERSION_GUC_PATCH (0xFF << 0) |
| 189 | #define CSS_SW_VERSION_HUC_MAJOR (0xFFFF << 16) |
| 190 | #define CSS_SW_VERSION_HUC_MINOR (0xFFFF << 0) |
| 191 | u32 reserved[14]; |
Joonas Lahtinen | faf6548 | 2017-10-06 11:49:40 +0300 | [diff] [blame] | 192 | u32 header_info; |
Alex Dai | feda33e | 2015-10-19 16:10:54 -0700 | [diff] [blame] | 193 | } __packed; |
| 194 | |
Dave Gordon | 2617268 | 2015-07-09 19:29:04 +0100 | [diff] [blame] | 195 | /* Work item for submitting workloads into work queue of GuC. */ |
| 196 | struct guc_wq_item { |
| 197 | u32 header; |
| 198 | u32 context_desc; |
Oscar Mateo | 0d76812 | 2017-03-22 10:39:50 -0700 | [diff] [blame] | 199 | u32 submit_element_info; |
Dave Gordon | 2617268 | 2015-07-09 19:29:04 +0100 | [diff] [blame] | 200 | u32 fence_id; |
| 201 | } __packed; |
| 202 | |
| 203 | struct guc_process_desc { |
Oscar Mateo | b09935a | 2017-03-22 10:39:53 -0700 | [diff] [blame] | 204 | u32 stage_id; |
Dave Gordon | 2617268 | 2015-07-09 19:29:04 +0100 | [diff] [blame] | 205 | u64 db_base_addr; |
| 206 | u32 head; |
| 207 | u32 tail; |
| 208 | u32 error_offset; |
| 209 | u64 wq_base_addr; |
| 210 | u32 wq_size_bytes; |
| 211 | u32 wq_status; |
| 212 | u32 engine_presence; |
| 213 | u32 priority; |
| 214 | u32 reserved[30]; |
| 215 | } __packed; |
| 216 | |
| 217 | /* engine id and context id is packed into guc_execlist_context.context_id*/ |
| 218 | #define GUC_ELC_CTXID_OFFSET 0 |
| 219 | #define GUC_ELC_ENGINE_OFFSET 29 |
| 220 | |
| 221 | /* The execlist context including software and HW information */ |
| 222 | struct guc_execlist_context { |
| 223 | u32 context_desc; |
| 224 | u32 context_id; |
| 225 | u32 ring_status; |
Oscar Mateo | 0d76812 | 2017-03-22 10:39:50 -0700 | [diff] [blame] | 226 | u32 ring_lrca; |
Dave Gordon | 2617268 | 2015-07-09 19:29:04 +0100 | [diff] [blame] | 227 | u32 ring_begin; |
| 228 | u32 ring_end; |
| 229 | u32 ring_next_free_location; |
| 230 | u32 ring_current_tail_pointer_value; |
| 231 | u8 engine_state_submit_value; |
| 232 | u8 engine_state_wait_value; |
| 233 | u16 pagefault_count; |
| 234 | u16 engine_submit_queue_count; |
| 235 | } __packed; |
| 236 | |
Oscar Mateo | b09935a | 2017-03-22 10:39:53 -0700 | [diff] [blame] | 237 | /* |
| 238 | * This structure describes a stage set arranged for a particular communication |
| 239 | * between uKernel (GuC) and Driver (KMD). Technically, this is known as a |
| 240 | * "GuC Context descriptor" in the specs, but we use the term "stage descriptor" |
| 241 | * to avoid confusion with all the other things already named "context" in the |
| 242 | * driver. A static pool of these descriptors are stored inside a GEM object |
| 243 | * (stage_desc_pool) which is held for the entire lifetime of our interaction |
| 244 | * with the GuC, being allocated before the GuC is loaded with its firmware. |
| 245 | */ |
| 246 | struct guc_stage_desc { |
Dave Gordon | 2617268 | 2015-07-09 19:29:04 +0100 | [diff] [blame] | 247 | u32 sched_common_area; |
Oscar Mateo | b09935a | 2017-03-22 10:39:53 -0700 | [diff] [blame] | 248 | u32 stage_id; |
Dave Gordon | 2617268 | 2015-07-09 19:29:04 +0100 | [diff] [blame] | 249 | u32 pas_id; |
| 250 | u8 engines_used; |
| 251 | u64 db_trigger_cpu; |
| 252 | u32 db_trigger_uk; |
| 253 | u64 db_trigger_phy; |
| 254 | u16 db_id; |
| 255 | |
Alex Dai | 397097b | 2016-01-23 11:58:14 -0800 | [diff] [blame] | 256 | struct guc_execlist_context lrc[GUC_MAX_ENGINES_NUM]; |
Dave Gordon | 2617268 | 2015-07-09 19:29:04 +0100 | [diff] [blame] | 257 | |
| 258 | u8 attribute; |
| 259 | |
| 260 | u32 priority; |
| 261 | |
| 262 | u32 wq_sampled_tail_offset; |
| 263 | u32 wq_total_submit_enqueues; |
| 264 | |
| 265 | u32 process_desc; |
| 266 | u32 wq_addr; |
| 267 | u32 wq_size; |
| 268 | |
| 269 | u32 engine_presence; |
| 270 | |
Alex Dai | aa557ab | 2015-08-18 14:32:35 -0700 | [diff] [blame] | 271 | u8 engine_suspended; |
| 272 | |
| 273 | u8 reserved0[3]; |
Dave Gordon | 2617268 | 2015-07-09 19:29:04 +0100 | [diff] [blame] | 274 | u64 reserved1[1]; |
| 275 | |
| 276 | u64 desc_private; |
| 277 | } __packed; |
| 278 | |
Michal Wajdeczko | 9ef4c75 | 2018-03-27 12:14:39 +0000 | [diff] [blame] | 279 | /** |
| 280 | * DOC: CTB based communication |
| 281 | * |
| 282 | * The CTB (command transport buffer) communication between Host and GuC |
| 283 | * is based on u32 data stream written to the shared buffer. One buffer can |
| 284 | * be used to transmit data only in one direction (one-directional channel). |
| 285 | * |
| 286 | * Current status of the each buffer is stored in the buffer descriptor. |
| 287 | * Buffer descriptor holds tail and head fields that represents active data |
| 288 | * stream. The tail field is updated by the data producer (sender), and head |
| 289 | * field is updated by the data consumer (receiver):: |
| 290 | * |
| 291 | * +------------+ |
| 292 | * | DESCRIPTOR | +=================+============+========+ |
| 293 | * +============+ | | MESSAGE(s) | | |
| 294 | * | address |--------->+=================+============+========+ |
| 295 | * +------------+ |
| 296 | * | head | ^-----head--------^ |
| 297 | * +------------+ |
| 298 | * | tail | ^---------tail-----------------^ |
| 299 | * +------------+ |
| 300 | * | size | ^---------------size--------------------^ |
| 301 | * +------------+ |
| 302 | * |
| 303 | * Each message in data stream starts with the single u32 treated as a header, |
| 304 | * followed by optional set of u32 data that makes message specific payload:: |
| 305 | * |
| 306 | * +------------+---------+---------+---------+ |
| 307 | * | MESSAGE | |
| 308 | * +------------+---------+---------+---------+ |
| 309 | * | msg[0] | [1] | ... | [n-1] | |
| 310 | * +------------+---------+---------+---------+ |
| 311 | * | MESSAGE | MESSAGE PAYLOAD | |
| 312 | * + HEADER +---------+---------+---------+ |
| 313 | * | | 0 | ... | n | |
| 314 | * +======+=====+=========+=========+=========+ |
| 315 | * | 31:16| code| | | | |
| 316 | * +------+-----+ | | | |
| 317 | * | 15:5|flags| | | | |
| 318 | * +------+-----+ | | | |
| 319 | * | 4:0| len| | | | |
| 320 | * +------+-----+---------+---------+---------+ |
| 321 | * |
| 322 | * ^-------------len-------------^ |
| 323 | * |
| 324 | * The message header consists of: |
| 325 | * |
| 326 | * - **len**, indicates length of the message payload (in u32) |
| 327 | * - **code**, indicates message code |
| 328 | * - **flags**, holds various bits to control message handling |
| 329 | */ |
| 330 | |
Michal Wajdeczko | f8a58d6 | 2017-05-26 11:13:25 +0000 | [diff] [blame] | 331 | /* |
| 332 | * Describes single command transport buffer. |
| 333 | * Used by both guc-master and clients. |
| 334 | */ |
| 335 | struct guc_ct_buffer_desc { |
| 336 | u32 addr; /* gfx address */ |
| 337 | u64 host_private; /* host private data */ |
| 338 | u32 size; /* size in bytes */ |
| 339 | u32 head; /* offset updated by GuC*/ |
| 340 | u32 tail; /* offset updated by owner */ |
| 341 | u32 is_in_error; /* error indicator */ |
| 342 | u32 fence; /* fence updated by GuC */ |
| 343 | u32 status; /* status updated by GuC */ |
| 344 | u32 owner; /* id of the channel owner */ |
| 345 | u32 owner_sub_id; /* owner-defined field for extra tracking */ |
| 346 | u32 reserved[5]; |
| 347 | } __packed; |
| 348 | |
| 349 | /* Type of command transport buffer */ |
| 350 | #define INTEL_GUC_CT_BUFFER_TYPE_SEND 0x0u |
| 351 | #define INTEL_GUC_CT_BUFFER_TYPE_RECV 0x1u |
| 352 | |
| 353 | /* |
| 354 | * Definition of the command transport message header (DW0) |
| 355 | * |
| 356 | * bit[4..0] message len (in dwords) |
| 357 | * bit[7..5] reserved |
Michal Wajdeczko | 440f136 | 2019-05-27 18:36:09 +0000 | [diff] [blame] | 358 | * bit[8] response (G2H only) |
| 359 | * bit[8] write fence to desc (H2G only) |
| 360 | * bit[9] write status to H2G buff (H2G only) |
| 361 | * bit[10] send status back via G2H (H2G only) |
Michal Wajdeczko | f8a58d6 | 2017-05-26 11:13:25 +0000 | [diff] [blame] | 362 | * bit[15..11] reserved |
| 363 | * bit[31..16] action code |
| 364 | */ |
| 365 | #define GUC_CT_MSG_LEN_SHIFT 0 |
| 366 | #define GUC_CT_MSG_LEN_MASK 0x1F |
Michal Wajdeczko | 440f136 | 2019-05-27 18:36:09 +0000 | [diff] [blame] | 367 | #define GUC_CT_MSG_IS_RESPONSE (1 << 8) |
Michal Wajdeczko | f8a58d6 | 2017-05-26 11:13:25 +0000 | [diff] [blame] | 368 | #define GUC_CT_MSG_WRITE_FENCE_TO_DESC (1 << 8) |
| 369 | #define GUC_CT_MSG_WRITE_STATUS_TO_BUFF (1 << 9) |
| 370 | #define GUC_CT_MSG_SEND_STATUS (1 << 10) |
| 371 | #define GUC_CT_MSG_ACTION_SHIFT 16 |
| 372 | #define GUC_CT_MSG_ACTION_MASK 0xFFFF |
| 373 | |
Alex Dai | 93f2531 | 2015-09-25 11:46:56 -0700 | [diff] [blame] | 374 | #define GUC_FORCEWAKE_RENDER (1 << 0) |
| 375 | #define GUC_FORCEWAKE_MEDIA (1 << 1) |
| 376 | |
Alex Dai | a1c4199 | 2015-09-30 09:46:37 -0700 | [diff] [blame] | 377 | #define GUC_POWER_UNSPECIFIED 0 |
| 378 | #define GUC_POWER_D0 1 |
| 379 | #define GUC_POWER_D1 2 |
| 380 | #define GUC_POWER_D2 3 |
| 381 | #define GUC_POWER_D3 4 |
| 382 | |
Alex Dai | 463704d | 2015-12-18 12:00:10 -0800 | [diff] [blame] | 383 | /* Scheduling policy settings */ |
| 384 | |
| 385 | /* Reset engine upon preempt failure */ |
| 386 | #define POLICY_RESET_ENGINE (1<<0) |
| 387 | /* Preempt to idle on quantum expiry */ |
| 388 | #define POLICY_PREEMPT_TO_IDLE (1<<1) |
| 389 | |
Oscar Mateo | e9eb803 | 2017-09-12 14:36:35 -0700 | [diff] [blame] | 390 | #define POLICY_MAX_NUM_WI 15 |
| 391 | #define POLICY_DEFAULT_DPC_PROMOTE_TIME_US 500000 |
| 392 | #define POLICY_DEFAULT_EXECUTION_QUANTUM_US 1000000 |
| 393 | #define POLICY_DEFAULT_PREEMPTION_TIME_US 500000 |
| 394 | #define POLICY_DEFAULT_FAULT_TIME_US 250000 |
Alex Dai | 463704d | 2015-12-18 12:00:10 -0800 | [diff] [blame] | 395 | |
| 396 | struct guc_policy { |
| 397 | /* Time for one workload to execute. (in micro seconds) */ |
| 398 | u32 execution_quantum; |
Alex Dai | 463704d | 2015-12-18 12:00:10 -0800 | [diff] [blame] | 399 | /* Time to wait for a preemption request to completed before issuing a |
| 400 | * reset. (in micro seconds). */ |
| 401 | u32 preemption_time; |
Alex Dai | 463704d | 2015-12-18 12:00:10 -0800 | [diff] [blame] | 402 | /* How much time to allow to run after the first fault is observed. |
| 403 | * Then preempt afterwards. (in micro seconds) */ |
| 404 | u32 fault_time; |
Alex Dai | 463704d | 2015-12-18 12:00:10 -0800 | [diff] [blame] | 405 | u32 policy_flags; |
Michal Wajdeczko | ffd5ce2 | 2019-05-27 18:35:59 +0000 | [diff] [blame] | 406 | u32 reserved[8]; |
Alex Dai | 463704d | 2015-12-18 12:00:10 -0800 | [diff] [blame] | 407 | } __packed; |
| 408 | |
| 409 | struct guc_policies { |
Michal Wajdeczko | ffd5ce2 | 2019-05-27 18:35:59 +0000 | [diff] [blame] | 410 | struct guc_policy policy[GUC_CLIENT_PRIORITY_NUM][GUC_MAX_ENGINE_CLASSES]; |
| 411 | u32 submission_queue_depth[GUC_MAX_ENGINE_CLASSES]; |
Alex Dai | 463704d | 2015-12-18 12:00:10 -0800 | [diff] [blame] | 412 | /* In micro seconds. How much time to allow before DPC processing is |
| 413 | * called back via interrupt (to prevent DPC queue drain starving). |
| 414 | * Typically 1000s of micro seconds (example only, not granularity). */ |
| 415 | u32 dpc_promote_time; |
| 416 | |
| 417 | /* Must be set to take these new values. */ |
| 418 | u32 is_valid; |
| 419 | |
| 420 | /* Max number of WIs to process per call. A large value may keep CS |
| 421 | * idle. */ |
| 422 | u32 max_num_work_items; |
| 423 | |
Michal Wajdeczko | ffd5ce2 | 2019-05-27 18:35:59 +0000 | [diff] [blame] | 424 | u32 reserved[4]; |
Alex Dai | 463704d | 2015-12-18 12:00:10 -0800 | [diff] [blame] | 425 | } __packed; |
| 426 | |
Alex Dai | 5c148e0 | 2015-12-18 12:00:11 -0800 | [diff] [blame] | 427 | /* GuC MMIO reg state struct */ |
| 428 | |
Alex Dai | 5c148e0 | 2015-12-18 12:00:11 -0800 | [diff] [blame] | 429 | |
Michal Wajdeczko | ffd5ce2 | 2019-05-27 18:35:59 +0000 | [diff] [blame] | 430 | #define GUC_REGSET_MAX_REGISTERS 64 |
Alex Dai | 5c148e0 | 2015-12-18 12:00:11 -0800 | [diff] [blame] | 431 | #define GUC_S3_SAVE_SPACE_PAGES 10 |
| 432 | |
Michal Wajdeczko | ffd5ce2 | 2019-05-27 18:35:59 +0000 | [diff] [blame] | 433 | struct guc_mmio_reg { |
| 434 | u32 offset; |
| 435 | u32 value; |
| 436 | u32 flags; |
| 437 | #define GUC_REGSET_MASKED (1 << 0) |
| 438 | } __packed; |
Alex Dai | 5c148e0 | 2015-12-18 12:00:11 -0800 | [diff] [blame] | 439 | |
Michal Wajdeczko | ffd5ce2 | 2019-05-27 18:35:59 +0000 | [diff] [blame] | 440 | struct guc_mmio_regset { |
| 441 | struct guc_mmio_reg registers[GUC_REGSET_MAX_REGISTERS]; |
Alex Dai | 5c148e0 | 2015-12-18 12:00:11 -0800 | [diff] [blame] | 442 | u32 values_valid; |
| 443 | u32 number_of_registers; |
| 444 | } __packed; |
| 445 | |
Michal Wajdeczko | ffd5ce2 | 2019-05-27 18:35:59 +0000 | [diff] [blame] | 446 | /* GuC register sets */ |
| 447 | struct guc_mmio_reg_state { |
| 448 | struct guc_mmio_regset engine_reg[GUC_MAX_ENGINE_CLASSES][GUC_MAX_INSTANCES_PER_CLASS]; |
| 449 | u32 reserved[98]; |
Oscar Mateo | 35815ea | 2017-03-22 10:39:54 -0700 | [diff] [blame] | 450 | } __packed; |
| 451 | |
Michal Wajdeczko | ffd5ce2 | 2019-05-27 18:35:59 +0000 | [diff] [blame] | 452 | /* HW info */ |
| 453 | struct guc_gt_system_info { |
| 454 | u32 slice_enabled; |
| 455 | u32 rcs_enabled; |
| 456 | u32 reserved0; |
| 457 | u32 bcs_enabled; |
| 458 | u32 vdbox_enable_mask; |
| 459 | u32 vdbox_sfc_support_mask; |
| 460 | u32 vebox_enable_mask; |
| 461 | u32 reserved[9]; |
| 462 | } __packed; |
| 463 | |
| 464 | /* Clients info */ |
| 465 | struct guc_ct_pool_entry { |
| 466 | struct guc_ct_buffer_desc desc; |
| 467 | u32 reserved[7]; |
| 468 | } __packed; |
| 469 | |
| 470 | #define GUC_CT_POOL_SIZE 2 |
| 471 | |
| 472 | struct guc_clients_info { |
| 473 | u32 clients_num; |
| 474 | u32 reserved0[13]; |
| 475 | u32 ct_pool_addr; |
| 476 | u32 ct_pool_count; |
| 477 | u32 reserved[4]; |
Alex Dai | 5c148e0 | 2015-12-18 12:00:11 -0800 | [diff] [blame] | 478 | } __packed; |
| 479 | |
Alex Dai | 68371a9 | 2015-12-18 12:00:09 -0800 | [diff] [blame] | 480 | /* GuC Additional Data Struct */ |
Alex Dai | 68371a9 | 2015-12-18 12:00:09 -0800 | [diff] [blame] | 481 | struct guc_ads { |
| 482 | u32 reg_state_addr; |
| 483 | u32 reg_state_buffer; |
Alex Dai | 68371a9 | 2015-12-18 12:00:09 -0800 | [diff] [blame] | 484 | u32 scheduler_policies; |
Michal Wajdeczko | ffd5ce2 | 2019-05-27 18:35:59 +0000 | [diff] [blame] | 485 | u32 gt_system_info; |
| 486 | u32 clients_info; |
| 487 | u32 control_data; |
| 488 | u32 golden_context_lrca[GUC_MAX_ENGINE_CLASSES]; |
| 489 | u32 eng_state_size[GUC_MAX_ENGINE_CLASSES]; |
| 490 | u32 reserved[16]; |
Alex Dai | 68371a9 | 2015-12-18 12:00:09 -0800 | [diff] [blame] | 491 | } __packed; |
| 492 | |
Sagar Arun Kamble | 5d34e85 | 2016-10-12 21:54:28 +0530 | [diff] [blame] | 493 | /* GuC logging structures */ |
| 494 | |
| 495 | enum guc_log_buffer_type { |
| 496 | GUC_ISR_LOG_BUFFER, |
| 497 | GUC_DPC_LOG_BUFFER, |
| 498 | GUC_CRASH_DUMP_LOG_BUFFER, |
| 499 | GUC_MAX_LOG_BUFFER |
| 500 | }; |
| 501 | |
| 502 | /** |
Jani Nikula | affa22b | 2019-06-05 12:56:57 +0300 | [diff] [blame] | 503 | * struct guc_log_buffer_state - GuC log buffer state |
| 504 | * |
Sagar Arun Kamble | 5d34e85 | 2016-10-12 21:54:28 +0530 | [diff] [blame] | 505 | * Below state structure is used for coordination of retrieval of GuC firmware |
| 506 | * logs. Separate state is maintained for each log buffer type. |
| 507 | * read_ptr points to the location where i915 read last in log buffer and |
| 508 | * is read only for GuC firmware. write_ptr is incremented by GuC with number |
| 509 | * of bytes written for each log entry and is read only for i915. |
| 510 | * When any type of log buffer becomes half full, GuC sends a flush interrupt. |
| 511 | * GuC firmware expects that while it is writing to 2nd half of the buffer, |
| 512 | * first half would get consumed by Host and then get a flush completed |
| 513 | * acknowledgment from Host, so that it does not end up doing any overwrite |
| 514 | * causing loss of logs. So when buffer gets half filled & i915 has requested |
| 515 | * for interrupt, GuC will set flush_to_file field, set the sampled_write_ptr |
| 516 | * to the value of write_ptr and raise the interrupt. |
| 517 | * On receiving the interrupt i915 should read the buffer, clear flush_to_file |
| 518 | * field and also update read_ptr with the value of sample_write_ptr, before |
| 519 | * sending an acknowledgment to GuC. marker & version fields are for internal |
| 520 | * usage of GuC and opaque to i915. buffer_full_cnt field is incremented every |
| 521 | * time GuC detects the log buffer overflow. |
| 522 | */ |
| 523 | struct guc_log_buffer_state { |
| 524 | u32 marker[2]; |
| 525 | u32 read_ptr; |
| 526 | u32 write_ptr; |
| 527 | u32 size; |
| 528 | u32 sampled_write_ptr; |
| 529 | union { |
| 530 | struct { |
| 531 | u32 flush_to_file:1; |
| 532 | u32 buffer_full_cnt:4; |
| 533 | u32 reserved:27; |
| 534 | }; |
| 535 | u32 flags; |
| 536 | }; |
| 537 | u32 version; |
| 538 | } __packed; |
| 539 | |
Michał Winiarski | 4ddbe87 | 2017-10-25 22:00:12 +0200 | [diff] [blame] | 540 | struct guc_ctx_report { |
| 541 | u32 report_return_status; |
| 542 | u32 reserved1[64]; |
| 543 | u32 affected_count; |
| 544 | u32 reserved2[2]; |
| 545 | } __packed; |
| 546 | |
| 547 | /* GuC Shared Context Data Struct */ |
| 548 | struct guc_shared_ctx_data { |
| 549 | u32 addr_of_last_preempted_data_low; |
| 550 | u32 addr_of_last_preempted_data_high; |
| 551 | u32 addr_of_last_preempted_data_high_tmp; |
| 552 | u32 padding; |
| 553 | u32 is_mapped_to_proxy; |
| 554 | u32 proxy_ctx_id; |
| 555 | u32 engine_reset_ctx_id; |
| 556 | u32 media_reset_count; |
| 557 | u32 reserved1[8]; |
| 558 | u32 uk_last_ctx_switch_reason; |
| 559 | u32 was_reset; |
| 560 | u32 lrca_gpu_addr; |
| 561 | u64 execlist_ctx; |
| 562 | u32 reserved2[66]; |
| 563 | struct guc_ctx_report preempt_ctx_report[GUC_MAX_ENGINES_NUM]; |
| 564 | } __packed; |
| 565 | |
Michal Wajdeczko | 4d82a17 | 2018-03-26 19:48:18 +0000 | [diff] [blame] | 566 | /** |
| 567 | * DOC: MMIO based communication |
| 568 | * |
| 569 | * The MMIO based communication between Host and GuC uses software scratch |
| 570 | * registers, where first register holds data treated as message header, |
| 571 | * and other registers are used to hold message payload. |
| 572 | * |
Michal Wajdeczko | 9128b10 | 2018-10-19 10:17:24 +0000 | [diff] [blame] | 573 | * For Gen9+, GuC uses software scratch registers 0xC180-0xC1B8, |
| 574 | * but no H2G command takes more than 8 parameters and the GuC FW |
| 575 | * itself uses an 8-element array to store the H2G message. |
Michal Wajdeczko | 4d82a17 | 2018-03-26 19:48:18 +0000 | [diff] [blame] | 576 | * |
| 577 | * +-----------+---------+---------+---------+ |
| 578 | * | MMIO[0] | MMIO[1] | ... | MMIO[n] | |
| 579 | * +-----------+---------+---------+---------+ |
| 580 | * | header | optional payload | |
| 581 | * +======+====+=========+=========+=========+ |
| 582 | * | 31:28|type| | | | |
| 583 | * +------+----+ | | | |
| 584 | * | 27:16|data| | | | |
| 585 | * +------+----+ | | | |
| 586 | * | 15:0|code| | | | |
| 587 | * +------+----+---------+---------+---------+ |
| 588 | * |
| 589 | * The message header consists of: |
| 590 | * |
| 591 | * - **type**, indicates message type |
| 592 | * - **code**, indicates message code, is specific for **type** |
| 593 | * - **data**, indicates message data, optional, depends on **code** |
| 594 | * |
| 595 | * The following message **types** are supported: |
| 596 | * |
| 597 | * - **REQUEST**, indicates Host-to-GuC request, requested GuC action code |
| 598 | * must be priovided in **code** field. Optional action specific parameters |
| 599 | * can be provided in remaining payload registers or **data** field. |
| 600 | * |
| 601 | * - **RESPONSE**, indicates GuC-to-Host response from earlier GuC request, |
| 602 | * action response status will be provided in **code** field. Optional |
| 603 | * response data can be returned in remaining payload registers or **data** |
| 604 | * field. |
| 605 | */ |
| 606 | |
Michal Wajdeczko | 9128b10 | 2018-10-19 10:17:24 +0000 | [diff] [blame] | 607 | #define GUC_MAX_MMIO_MSG_LEN 8 |
| 608 | |
Michal Wajdeczko | 4d82a17 | 2018-03-26 19:48:18 +0000 | [diff] [blame] | 609 | #define INTEL_GUC_MSG_TYPE_SHIFT 28 |
| 610 | #define INTEL_GUC_MSG_TYPE_MASK (0xF << INTEL_GUC_MSG_TYPE_SHIFT) |
| 611 | #define INTEL_GUC_MSG_DATA_SHIFT 16 |
| 612 | #define INTEL_GUC_MSG_DATA_MASK (0xFFF << INTEL_GUC_MSG_DATA_SHIFT) |
| 613 | #define INTEL_GUC_MSG_CODE_SHIFT 0 |
| 614 | #define INTEL_GUC_MSG_CODE_MASK (0xFFFF << INTEL_GUC_MSG_CODE_SHIFT) |
| 615 | |
| 616 | #define __INTEL_GUC_MSG_GET(T, m) \ |
| 617 | (((m) & INTEL_GUC_MSG_ ## T ## _MASK) >> INTEL_GUC_MSG_ ## T ## _SHIFT) |
| 618 | #define INTEL_GUC_MSG_TO_TYPE(m) __INTEL_GUC_MSG_GET(TYPE, m) |
| 619 | #define INTEL_GUC_MSG_TO_DATA(m) __INTEL_GUC_MSG_GET(DATA, m) |
| 620 | #define INTEL_GUC_MSG_TO_CODE(m) __INTEL_GUC_MSG_GET(CODE, m) |
| 621 | |
| 622 | enum intel_guc_msg_type { |
| 623 | INTEL_GUC_MSG_TYPE_REQUEST = 0x0, |
| 624 | INTEL_GUC_MSG_TYPE_RESPONSE = 0xF, |
| 625 | }; |
| 626 | |
| 627 | #define __INTEL_GUC_MSG_TYPE_IS(T, m) \ |
| 628 | (INTEL_GUC_MSG_TO_TYPE(m) == INTEL_GUC_MSG_TYPE_ ## T) |
| 629 | #define INTEL_GUC_MSG_IS_REQUEST(m) __INTEL_GUC_MSG_TYPE_IS(REQUEST, m) |
| 630 | #define INTEL_GUC_MSG_IS_RESPONSE(m) __INTEL_GUC_MSG_TYPE_IS(RESPONSE, m) |
| 631 | |
Arkadiusz Hiler | a80bc45 | 2016-11-25 18:59:34 +0100 | [diff] [blame] | 632 | enum intel_guc_action { |
| 633 | INTEL_GUC_ACTION_DEFAULT = 0x0, |
Michał Winiarski | 4ddbe87 | 2017-10-25 22:00:12 +0200 | [diff] [blame] | 634 | INTEL_GUC_ACTION_REQUEST_PREEMPTION = 0x2, |
Michel Thierry | 6acbea8 | 2017-10-31 15:53:09 -0700 | [diff] [blame] | 635 | INTEL_GUC_ACTION_REQUEST_ENGINE_RESET = 0x3, |
Arkadiusz Hiler | a80bc45 | 2016-11-25 18:59:34 +0100 | [diff] [blame] | 636 | INTEL_GUC_ACTION_ALLOCATE_DOORBELL = 0x10, |
| 637 | INTEL_GUC_ACTION_DEALLOCATE_DOORBELL = 0x20, |
| 638 | INTEL_GUC_ACTION_LOG_BUFFER_FILE_FLUSH_COMPLETE = 0x30, |
| 639 | INTEL_GUC_ACTION_FORCE_LOG_BUFFER_FLUSH = 0x302, |
| 640 | INTEL_GUC_ACTION_ENTER_S_STATE = 0x501, |
| 641 | INTEL_GUC_ACTION_EXIT_S_STATE = 0x502, |
| 642 | INTEL_GUC_ACTION_SLPC_REQUEST = 0x3003, |
Michal Wajdeczko | ffd5ce2 | 2019-05-27 18:35:59 +0000 | [diff] [blame] | 643 | INTEL_GUC_ACTION_SAMPLE_FORCEWAKE = 0x3005, |
Anusha Srivatsa | dac84a3 | 2017-01-18 08:05:57 -0800 | [diff] [blame] | 644 | INTEL_GUC_ACTION_AUTHENTICATE_HUC = 0x4000, |
Michal Wajdeczko | f8a58d6 | 2017-05-26 11:13:25 +0000 | [diff] [blame] | 645 | INTEL_GUC_ACTION_REGISTER_COMMAND_TRANSPORT_BUFFER = 0x4505, |
| 646 | INTEL_GUC_ACTION_DEREGISTER_COMMAND_TRANSPORT_BUFFER = 0x4506, |
Arkadiusz Hiler | a80bc45 | 2016-11-25 18:59:34 +0100 | [diff] [blame] | 647 | INTEL_GUC_ACTION_UK_LOG_ENABLE_LOGGING = 0x0E000, |
| 648 | INTEL_GUC_ACTION_LIMIT |
Dave Gordon | 2617268 | 2015-07-09 19:29:04 +0100 | [diff] [blame] | 649 | }; |
| 650 | |
Michał Winiarski | 4ddbe87 | 2017-10-25 22:00:12 +0200 | [diff] [blame] | 651 | enum intel_guc_preempt_options { |
| 652 | INTEL_GUC_PREEMPT_OPTION_DROP_WORK_Q = 0x4, |
| 653 | INTEL_GUC_PREEMPT_OPTION_DROP_SUBMIT_Q = 0x8, |
| 654 | }; |
| 655 | |
| 656 | enum intel_guc_report_status { |
| 657 | INTEL_GUC_REPORT_STATUS_UNKNOWN = 0x0, |
| 658 | INTEL_GUC_REPORT_STATUS_ACKED = 0x1, |
| 659 | INTEL_GUC_REPORT_STATUS_ERROR = 0x2, |
| 660 | INTEL_GUC_REPORT_STATUS_COMPLETE = 0x4, |
| 661 | }; |
| 662 | |
Daniele Ceraolo Spurio | 20fd600 | 2018-10-16 15:46:47 -0700 | [diff] [blame] | 663 | enum intel_guc_sleep_state_status { |
Michal Wajdeczko | ffd5ce2 | 2019-05-27 18:35:59 +0000 | [diff] [blame] | 664 | INTEL_GUC_SLEEP_STATE_SUCCESS = 0x1, |
| 665 | INTEL_GUC_SLEEP_STATE_PREEMPT_TO_IDLE_FAILED = 0x2, |
| 666 | INTEL_GUC_SLEEP_STATE_ENGINE_RESET_FAILED = 0x3 |
Daniele Ceraolo Spurio | 20fd600 | 2018-10-16 15:46:47 -0700 | [diff] [blame] | 667 | #define INTEL_GUC_SLEEP_STATE_INVALID_MASK 0x80000000 |
| 668 | }; |
| 669 | |
Michal Wajdeczko | 154374c | 2018-03-20 18:14:18 +0000 | [diff] [blame] | 670 | #define GUC_LOG_CONTROL_LOGGING_ENABLED (1 << 0) |
| 671 | #define GUC_LOG_CONTROL_VERBOSITY_SHIFT 4 |
| 672 | #define GUC_LOG_CONTROL_VERBOSITY_MASK (0xF << GUC_LOG_CONTROL_VERBOSITY_SHIFT) |
| 673 | #define GUC_LOG_CONTROL_DEFAULT_LOGGING (1 << 8) |
| 674 | |
Michal Wajdeczko | 4d82a17 | 2018-03-26 19:48:18 +0000 | [diff] [blame] | 675 | enum intel_guc_response_status { |
| 676 | INTEL_GUC_RESPONSE_STATUS_SUCCESS = 0x0, |
| 677 | INTEL_GUC_RESPONSE_STATUS_GENERIC_FAIL = 0xF000, |
Dave Gordon | 2617268 | 2015-07-09 19:29:04 +0100 | [diff] [blame] | 678 | }; |
| 679 | |
Michal Wajdeczko | 4d82a17 | 2018-03-26 19:48:18 +0000 | [diff] [blame] | 680 | #define INTEL_GUC_MSG_IS_RESPONSE_SUCCESS(m) \ |
| 681 | (typecheck(u32, (m)) && \ |
| 682 | ((m) & (INTEL_GUC_MSG_TYPE_MASK | INTEL_GUC_MSG_CODE_MASK)) == \ |
| 683 | ((INTEL_GUC_MSG_TYPE_RESPONSE << INTEL_GUC_MSG_TYPE_SHIFT) | \ |
| 684 | (INTEL_GUC_RESPONSE_STATUS_SUCCESS << INTEL_GUC_MSG_CODE_SHIFT))) |
| 685 | |
Sagar Arun Kamble | 5d34e85 | 2016-10-12 21:54:28 +0530 | [diff] [blame] | 686 | /* This action will be programmed in C1BC - SOFT_SCRATCH_15_REG */ |
Arkadiusz Hiler | a80bc45 | 2016-11-25 18:59:34 +0100 | [diff] [blame] | 687 | enum intel_guc_recv_message { |
| 688 | INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED = BIT(1), |
| 689 | INTEL_GUC_RECV_MSG_FLUSH_LOG_BUFFER = BIT(3) |
Sagar Arun Kamble | 5d34e85 | 2016-10-12 21:54:28 +0530 | [diff] [blame] | 690 | }; |
| 691 | |
Dave Gordon | 2617268 | 2015-07-09 19:29:04 +0100 | [diff] [blame] | 692 | #endif |