Christian König | 770d13b1 | 2018-01-12 14:52:22 +0100 | [diff] [blame] | 1 | /* |
| 2 | * Copyright 2018 Advanced Micro Devices, Inc. |
| 3 | * All Rights Reserved. |
| 4 | * |
| 5 | * Permission is hereby granted, free of charge, to any person obtaining a |
| 6 | * copy of this software and associated documentation files (the |
| 7 | * "Software"), to deal in the Software without restriction, including |
| 8 | * without limitation the rights to use, copy, modify, merge, publish, |
| 9 | * distribute, sub license, and/or sell copies of the Software, and to |
| 10 | * permit persons to whom the Software is furnished to do so, subject to |
| 11 | * the following conditions: |
| 12 | * |
| 13 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 14 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 15 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL |
| 16 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, |
| 17 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR |
| 18 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE |
| 19 | * USE OR OTHER DEALINGS IN THE SOFTWARE. |
| 20 | * |
| 21 | * The above copyright notice and this permission notice (including the |
| 22 | * next paragraph) shall be included in all copies or substantial portions |
| 23 | * of the Software. |
| 24 | * |
| 25 | */ |
| 26 | #ifndef __AMDGPU_GMC_H__ |
| 27 | #define __AMDGPU_GMC_H__ |
| 28 | |
| 29 | #include <linux/types.h> |
| 30 | |
| 31 | #include "amdgpu_irq.h" |
| 32 | |
Christian König | ad9a5b7 | 2018-08-27 18:22:31 +0200 | [diff] [blame] | 33 | /* VA hole for 48bit addresses on Vega10 */ |
| 34 | #define AMDGPU_GMC_HOLE_START 0x0000800000000000ULL |
| 35 | #define AMDGPU_GMC_HOLE_END 0xffff800000000000ULL |
| 36 | |
| 37 | /* |
| 38 | * Hardware is programmed as if the hole doesn't exists with start and end |
| 39 | * address values. |
| 40 | * |
| 41 | * This mask is used to remove the upper 16bits of the VA and so come up with |
| 42 | * the linear addr value. |
| 43 | */ |
| 44 | #define AMDGPU_GMC_HOLE_MASK 0x0000ffffffffffffULL |
| 45 | |
Christian König | c1a8abd | 2018-11-07 13:55:01 +0100 | [diff] [blame] | 46 | /* |
| 47 | * Ring size as power of two for the log of recent faults. |
| 48 | */ |
| 49 | #define AMDGPU_GMC_FAULT_RING_ORDER 8 |
| 50 | #define AMDGPU_GMC_FAULT_RING_SIZE (1 << AMDGPU_GMC_FAULT_RING_ORDER) |
| 51 | |
| 52 | /* |
| 53 | * Hash size as power of two for the log of recent faults |
| 54 | */ |
| 55 | #define AMDGPU_GMC_FAULT_HASH_ORDER 8 |
| 56 | #define AMDGPU_GMC_FAULT_HASH_SIZE (1 << AMDGPU_GMC_FAULT_HASH_ORDER) |
| 57 | |
| 58 | /* |
| 59 | * Number of IH timestamp ticks until a fault is considered handled |
| 60 | */ |
| 61 | #define AMDGPU_GMC_FAULT_TIMEOUT 5000ULL |
| 62 | |
Christian König | 770d13b1 | 2018-01-12 14:52:22 +0100 | [diff] [blame] | 63 | struct firmware; |
| 64 | |
| 65 | /* |
Christian König | c1a8abd | 2018-11-07 13:55:01 +0100 | [diff] [blame] | 66 | * GMC page fault information |
| 67 | */ |
| 68 | struct amdgpu_gmc_fault { |
| 69 | uint64_t timestamp; |
| 70 | uint64_t next:AMDGPU_GMC_FAULT_RING_ORDER; |
| 71 | uint64_t key:52; |
| 72 | }; |
| 73 | |
| 74 | /* |
Christian König | 770d13b1 | 2018-01-12 14:52:22 +0100 | [diff] [blame] | 75 | * VMHUB structures, functions & helpers |
| 76 | */ |
Huang Rui | 2577db9 | 2020-07-21 17:39:26 +0800 | [diff] [blame] | 77 | struct amdgpu_vmhub_funcs { |
| 78 | void (*print_l2_protection_fault_status)(struct amdgpu_device *adev, |
| 79 | uint32_t status); |
Huang Rui | caa9f48 | 2020-07-21 18:04:46 +0800 | [diff] [blame] | 80 | uint32_t (*get_invalidate_req)(unsigned int vmid, uint32_t flush_type); |
Huang Rui | 2577db9 | 2020-07-21 17:39:26 +0800 | [diff] [blame] | 81 | }; |
| 82 | |
Christian König | 770d13b1 | 2018-01-12 14:52:22 +0100 | [diff] [blame] | 83 | struct amdgpu_vmhub { |
| 84 | uint32_t ctx0_ptb_addr_lo32; |
| 85 | uint32_t ctx0_ptb_addr_hi32; |
changzhu | 6c2c897 | 2019-11-19 10:18:39 +0800 | [diff] [blame] | 86 | uint32_t vm_inv_eng0_sem; |
Christian König | 770d13b1 | 2018-01-12 14:52:22 +0100 | [diff] [blame] | 87 | uint32_t vm_inv_eng0_req; |
| 88 | uint32_t vm_inv_eng0_ack; |
| 89 | uint32_t vm_context0_cntl; |
| 90 | uint32_t vm_l2_pro_fault_status; |
| 91 | uint32_t vm_l2_pro_fault_cntl; |
Huang Rui | 1f9d56c | 2020-07-01 09:37:56 +0800 | [diff] [blame] | 92 | |
| 93 | /* |
| 94 | * store the register distances between two continuous context domain |
| 95 | * and invalidation engine. |
| 96 | */ |
| 97 | uint32_t ctx_distance; |
| 98 | uint32_t ctx_addr_distance; /* include LO32/HI32 */ |
| 99 | uint32_t eng_distance; |
| 100 | uint32_t eng_addr_distance; /* include LO32/HI32 */ |
Huang Rui | 5befb6f | 2020-07-21 14:24:43 +0800 | [diff] [blame] | 101 | |
| 102 | uint32_t vm_cntx_cntl_vm_fault; |
Huang Rui | 2577db9 | 2020-07-21 17:39:26 +0800 | [diff] [blame] | 103 | |
| 104 | const struct amdgpu_vmhub_funcs *vmhub_funcs; |
Christian König | 770d13b1 | 2018-01-12 14:52:22 +0100 | [diff] [blame] | 105 | }; |
| 106 | |
| 107 | /* |
| 108 | * GPU MC structures, functions & helpers |
| 109 | */ |
Christian König | 132f34e | 2018-01-12 15:26:08 +0100 | [diff] [blame] | 110 | struct amdgpu_gmc_funcs { |
| 111 | /* flush the vm tlb via mmio */ |
Oak Zeng | 3ff9854 | 2019-08-01 14:55:45 -0500 | [diff] [blame] | 112 | void (*flush_gpu_tlb)(struct amdgpu_device *adev, uint32_t vmid, |
| 113 | uint32_t vmhub, uint32_t flush_type); |
Alex Sierra | ea93000 | 2019-12-19 23:40:19 -0600 | [diff] [blame] | 114 | /* flush the vm tlb via pasid */ |
| 115 | int (*flush_gpu_tlb_pasid)(struct amdgpu_device *adev, uint16_t pasid, |
| 116 | uint32_t flush_type, bool all_hub); |
Christian König | 7ef1104 | 2018-01-12 16:57:33 +0100 | [diff] [blame] | 117 | /* flush the vm tlb via ring */ |
| 118 | uint64_t (*emit_flush_gpu_tlb)(struct amdgpu_ring *ring, unsigned vmid, |
Christian König | c633c00 | 2018-02-04 10:32:35 +0100 | [diff] [blame] | 119 | uint64_t pd_addr); |
| 120 | /* Change the VMID -> PASID mapping */ |
| 121 | void (*emit_pasid_mapping)(struct amdgpu_ring *ring, unsigned vmid, |
| 122 | unsigned pasid); |
Christian König | 132f34e | 2018-01-12 15:26:08 +0100 | [diff] [blame] | 123 | /* enable/disable PRT support */ |
| 124 | void (*set_prt)(struct amdgpu_device *adev, bool enable); |
Christian König | 71776b6 | 2019-09-02 14:52:30 +0200 | [diff] [blame] | 125 | /* map mtype to hardware flags */ |
| 126 | uint64_t (*map_mtype)(struct amdgpu_device *adev, uint32_t flags); |
Christian König | 132f34e | 2018-01-12 15:26:08 +0100 | [diff] [blame] | 127 | /* get the pde for a given mc addr */ |
| 128 | void (*get_vm_pde)(struct amdgpu_device *adev, int level, |
| 129 | u64 *dst, u64 *flags); |
Christian König | cbfae36 | 2019-09-02 16:39:40 +0200 | [diff] [blame] | 130 | /* get the pte flags to use for a BO VA mapping */ |
| 131 | void (*get_vm_pte)(struct amdgpu_device *adev, |
| 132 | struct amdgpu_bo_va_mapping *mapping, |
| 133 | uint64_t *flags); |
Alex Deucher | dd285c5 | 2020-07-28 15:04:52 -0400 | [diff] [blame] | 134 | /* get the amount of memory used by the vbios for pre-OS console */ |
| 135 | unsigned int (*get_vbios_fb_size)(struct amdgpu_device *adev); |
Christian König | 132f34e | 2018-01-12 15:26:08 +0100 | [diff] [blame] | 136 | }; |
| 137 | |
Alex Deucher | 76a5b36 | 2018-06-19 16:00:47 -0500 | [diff] [blame] | 138 | struct amdgpu_xgmi { |
| 139 | /* from psp */ |
Hawking Zhang | dd3c45d | 2018-09-28 21:50:37 +0800 | [diff] [blame] | 140 | u64 node_id; |
Alex Deucher | 76a5b36 | 2018-06-19 16:00:47 -0500 | [diff] [blame] | 141 | u64 hive_id; |
| 142 | /* fixed per family */ |
| 143 | u64 node_segment_size; |
| 144 | /* physical node (0-3) */ |
| 145 | unsigned physical_node_id; |
| 146 | /* number of nodes (0-4) */ |
| 147 | unsigned num_physical_nodes; |
Shaoyun Liu | fb30fc5 | 2018-06-27 17:25:53 -0400 | [diff] [blame] | 148 | /* gpu list in the same hive */ |
| 149 | struct list_head head; |
Alex Deucher | 47622ba | 2018-11-30 15:29:43 -0500 | [diff] [blame] | 150 | bool supported; |
Hawking Zhang | 029fbd4 | 2019-09-10 11:13:39 +0800 | [diff] [blame] | 151 | struct ras_common_if *ras_if; |
Alex Deucher | 76a5b36 | 2018-06-19 16:00:47 -0500 | [diff] [blame] | 152 | }; |
| 153 | |
Christian König | 770d13b1 | 2018-01-12 14:52:22 +0100 | [diff] [blame] | 154 | struct amdgpu_gmc { |
Oak Zeng | f6baa07 | 2019-11-06 11:18:54 -0600 | [diff] [blame] | 155 | /* FB's physical address in MMIO space (for CPU to |
| 156 | * map FB). This is different compared to the agp/ |
| 157 | * gart/vram_start/end field as the later is from |
| 158 | * GPU's view and aper_base is from CPU's view. |
| 159 | */ |
Christian König | 770d13b1 | 2018-01-12 14:52:22 +0100 | [diff] [blame] | 160 | resource_size_t aper_size; |
| 161 | resource_size_t aper_base; |
| 162 | /* for some chips with <= 32MB we need to lie |
| 163 | * about vram size near mc fb location */ |
| 164 | u64 mc_vram_size; |
| 165 | u64 visible_vram_size; |
Oak Zeng | f6baa07 | 2019-11-06 11:18:54 -0600 | [diff] [blame] | 166 | /* AGP aperture start and end in MC address space |
| 167 | * Driver find a hole in the MC address space |
| 168 | * to place AGP by setting MC_VM_AGP_BOT/TOP registers |
| 169 | * Under VMID0, logical address == MC address. AGP |
| 170 | * aperture maps to physical bus or IOVA addressed. |
| 171 | * AGP aperture is used to simulate FB in ZFB case. |
| 172 | * AGP aperture is also used for page table in system |
| 173 | * memory (mainly for APU). |
| 174 | * |
| 175 | */ |
Christian König | d76364f | 2018-08-24 12:08:06 +0200 | [diff] [blame] | 176 | u64 agp_size; |
| 177 | u64 agp_start; |
| 178 | u64 agp_end; |
Oak Zeng | f6baa07 | 2019-11-06 11:18:54 -0600 | [diff] [blame] | 179 | /* GART aperture start and end in MC address space |
| 180 | * Driver find a hole in the MC address space |
| 181 | * to place GART by setting VM_CONTEXT0_PAGE_TABLE_START/END_ADDR |
| 182 | * registers |
| 183 | * Under VMID0, logical address inside GART aperture will |
| 184 | * be translated through gpuvm gart page table to access |
| 185 | * paged system memory |
| 186 | */ |
Christian König | 770d13b1 | 2018-01-12 14:52:22 +0100 | [diff] [blame] | 187 | u64 gart_size; |
| 188 | u64 gart_start; |
| 189 | u64 gart_end; |
Oak Zeng | f6baa07 | 2019-11-06 11:18:54 -0600 | [diff] [blame] | 190 | /* Frame buffer aperture of this GPU device. Different from |
| 191 | * fb_start (see below), this only covers the local GPU device. |
| 192 | * Driver get fb_start from MC_VM_FB_LOCATION_BASE (set by vbios) |
| 193 | * and calculate vram_start of this local device by adding an |
| 194 | * offset inside the XGMI hive. |
| 195 | * Under VMID0, logical address == MC address |
| 196 | */ |
Christian König | 770d13b1 | 2018-01-12 14:52:22 +0100 | [diff] [blame] | 197 | u64 vram_start; |
| 198 | u64 vram_end; |
Alex Deucher | 6fdd68b | 2018-06-19 16:11:56 -0500 | [diff] [blame] | 199 | /* FB region , it's same as local vram region in single GPU, in XGMI |
| 200 | * configuration, this region covers all GPUs in the same hive , |
| 201 | * each GPU in the hive has the same view of this FB region . |
| 202 | * GPU0's vram starts at offset (0 * segment size) , |
| 203 | * GPU1 starts at offset (1 * segment size), etc. |
| 204 | */ |
| 205 | u64 fb_start; |
| 206 | u64 fb_end; |
Christian König | 770d13b1 | 2018-01-12 14:52:22 +0100 | [diff] [blame] | 207 | unsigned vram_width; |
| 208 | u64 real_vram_size; |
| 209 | int vram_mtrr; |
| 210 | u64 mc_mask; |
| 211 | const struct firmware *fw; /* MC firmware */ |
| 212 | uint32_t fw_version; |
| 213 | struct amdgpu_irq_src vm_fault; |
| 214 | uint32_t vram_type; |
Ori Messinger | ad02e08 | 2019-10-02 10:02:07 -0400 | [diff] [blame] | 215 | uint8_t vram_vendor; |
Christian König | 770d13b1 | 2018-01-12 14:52:22 +0100 | [diff] [blame] | 216 | uint32_t srbm_soft_reset; |
| 217 | bool prt_warning; |
Shirish S | c2ecd79 | 2020-01-27 16:35:24 +0530 | [diff] [blame] | 218 | uint32_t sdpif_register; |
Christian König | 770d13b1 | 2018-01-12 14:52:22 +0100 | [diff] [blame] | 219 | /* apertures */ |
| 220 | u64 shared_aperture_start; |
| 221 | u64 shared_aperture_end; |
| 222 | u64 private_aperture_start; |
| 223 | u64 private_aperture_end; |
| 224 | /* protects concurrent invalidation */ |
| 225 | spinlock_t invalidate_lock; |
| 226 | bool translate_further; |
shaoyunl | b97dfa2 | 2018-07-11 22:32:49 -0400 | [diff] [blame] | 227 | struct kfd_vm_fault_info *vm_fault_info; |
| 228 | atomic_t vm_fault_info_updated; |
Christian König | 132f34e | 2018-01-12 15:26:08 +0100 | [diff] [blame] | 229 | |
Christian König | c1a8abd | 2018-11-07 13:55:01 +0100 | [diff] [blame] | 230 | struct amdgpu_gmc_fault fault_ring[AMDGPU_GMC_FAULT_RING_SIZE]; |
| 231 | struct { |
| 232 | uint64_t idx:AMDGPU_GMC_FAULT_RING_ORDER; |
| 233 | } fault_hash[AMDGPU_GMC_FAULT_HASH_SIZE]; |
| 234 | uint64_t last_fault:AMDGPU_GMC_FAULT_RING_ORDER; |
| 235 | |
Luben Tuikov | c6252390 | 2020-03-19 16:47:51 -0400 | [diff] [blame] | 236 | bool tmz_enabled; |
| 237 | |
Christian König | 132f34e | 2018-01-12 15:26:08 +0100 | [diff] [blame] | 238 | const struct amdgpu_gmc_funcs *gmc_funcs; |
Alex Deucher | 76a5b36 | 2018-06-19 16:00:47 -0500 | [diff] [blame] | 239 | |
| 240 | struct amdgpu_xgmi xgmi; |
xinhui pan | 791c476 | 2019-01-23 19:03:25 +0800 | [diff] [blame] | 241 | struct amdgpu_irq_src ecc_irq; |
Alex Deucher | 9b498ef | 2020-09-23 09:37:39 -0400 | [diff] [blame] | 242 | int noretry; |
Christian König | 770d13b1 | 2018-01-12 14:52:22 +0100 | [diff] [blame] | 243 | }; |
| 244 | |
Oak Zeng | 3ff9854 | 2019-08-01 14:55:45 -0500 | [diff] [blame] | 245 | #define amdgpu_gmc_flush_gpu_tlb(adev, vmid, vmhub, type) ((adev)->gmc.gmc_funcs->flush_gpu_tlb((adev), (vmid), (vmhub), (type))) |
Alex Sierra | ea93000 | 2019-12-19 23:40:19 -0600 | [diff] [blame] | 246 | #define amdgpu_gmc_flush_gpu_tlb_pasid(adev, pasid, type, allhub) \ |
| 247 | ((adev)->gmc.gmc_funcs->flush_gpu_tlb_pasid \ |
| 248 | ((adev), (pasid), (type), (allhub))) |
Huang Rui | c082b99 | 2018-08-03 18:59:25 +0800 | [diff] [blame] | 249 | #define amdgpu_gmc_emit_flush_gpu_tlb(r, vmid, addr) (r)->adev->gmc.gmc_funcs->emit_flush_gpu_tlb((r), (vmid), (addr)) |
| 250 | #define amdgpu_gmc_emit_pasid_mapping(r, vmid, pasid) (r)->adev->gmc.gmc_funcs->emit_pasid_mapping((r), (vmid), (pasid)) |
Christian König | 71776b6 | 2019-09-02 14:52:30 +0200 | [diff] [blame] | 251 | #define amdgpu_gmc_map_mtype(adev, flags) (adev)->gmc.gmc_funcs->map_mtype((adev),(flags)) |
Huang Rui | c082b99 | 2018-08-03 18:59:25 +0800 | [diff] [blame] | 252 | #define amdgpu_gmc_get_vm_pde(adev, level, dst, flags) (adev)->gmc.gmc_funcs->get_vm_pde((adev), (level), (dst), (flags)) |
Christian König | cbfae36 | 2019-09-02 16:39:40 +0200 | [diff] [blame] | 253 | #define amdgpu_gmc_get_vm_pte(adev, mapping, flags) (adev)->gmc.gmc_funcs->get_vm_pte((adev), (mapping), (flags)) |
Alex Deucher | dd285c5 | 2020-07-28 15:04:52 -0400 | [diff] [blame] | 254 | #define amdgpu_gmc_get_vbios_fb_size(adev) (adev)->gmc.gmc_funcs->get_vbios_fb_size((adev)) |
Huang Rui | c082b99 | 2018-08-03 18:59:25 +0800 | [diff] [blame] | 255 | |
Andrey Grodzovsky | c8c5e56 | 2018-06-12 14:28:20 -0400 | [diff] [blame] | 256 | /** |
| 257 | * amdgpu_gmc_vram_full_visible - Check if full VRAM is visible through the BAR |
| 258 | * |
| 259 | * @adev: amdgpu_device pointer |
| 260 | * |
| 261 | * Returns: |
| 262 | * True if full VRAM is visible through the BAR |
| 263 | */ |
| 264 | static inline bool amdgpu_gmc_vram_full_visible(struct amdgpu_gmc *gmc) |
| 265 | { |
| 266 | WARN_ON(gmc->real_vram_size < gmc->visible_vram_size); |
| 267 | |
| 268 | return (gmc->real_vram_size == gmc->visible_vram_size); |
| 269 | } |
| 270 | |
Christian König | ad9a5b7 | 2018-08-27 18:22:31 +0200 | [diff] [blame] | 271 | /** |
| 272 | * amdgpu_gmc_sign_extend - sign extend the given gmc address |
| 273 | * |
| 274 | * @addr: address to extend |
| 275 | */ |
| 276 | static inline uint64_t amdgpu_gmc_sign_extend(uint64_t addr) |
| 277 | { |
| 278 | if (addr >= AMDGPU_GMC_HOLE_START) |
| 279 | addr |= AMDGPU_GMC_HOLE_END; |
| 280 | |
| 281 | return addr; |
| 282 | } |
| 283 | |
Christian König | 24a8d28 | 2018-08-22 14:11:19 +0200 | [diff] [blame] | 284 | void amdgpu_gmc_get_pde_for_bo(struct amdgpu_bo *bo, int level, |
| 285 | uint64_t *addr, uint64_t *flags); |
Yong Zhao | 6490bd7 | 2019-02-25 12:56:53 -0500 | [diff] [blame] | 286 | int amdgpu_gmc_set_pte_pde(struct amdgpu_device *adev, void *cpu_pt_addr, |
| 287 | uint32_t gpu_page_idx, uint64_t addr, |
| 288 | uint64_t flags); |
Christian König | 11c3a24 | 2018-08-22 12:22:14 +0200 | [diff] [blame] | 289 | uint64_t amdgpu_gmc_pd_addr(struct amdgpu_bo *bo); |
Christian König | 485fc36 | 2018-08-27 18:19:48 +0200 | [diff] [blame] | 290 | uint64_t amdgpu_gmc_agp_addr(struct ttm_buffer_object *bo); |
Christian König | 961c75c | 2018-08-23 15:20:43 +0200 | [diff] [blame] | 291 | void amdgpu_gmc_vram_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc, |
| 292 | u64 base); |
| 293 | void amdgpu_gmc_gart_location(struct amdgpu_device *adev, |
| 294 | struct amdgpu_gmc *mc); |
Christian König | d76364f | 2018-08-24 12:08:06 +0200 | [diff] [blame] | 295 | void amdgpu_gmc_agp_location(struct amdgpu_device *adev, |
| 296 | struct amdgpu_gmc *mc); |
Christian König | c1a8abd | 2018-11-07 13:55:01 +0100 | [diff] [blame] | 297 | bool amdgpu_gmc_filter_faults(struct amdgpu_device *adev, uint64_t addr, |
| 298 | uint16_t pasid, uint64_t timestamp); |
Tao Zhou | ba08349 | 2019-09-18 18:31:07 +0800 | [diff] [blame] | 299 | int amdgpu_gmc_ras_late_init(struct amdgpu_device *adev); |
Tao Zhou | 2adf134 | 2019-09-12 17:39:47 +0800 | [diff] [blame] | 300 | void amdgpu_gmc_ras_fini(struct amdgpu_device *adev); |
Alex Deucher | bdbe90f | 2020-01-06 13:14:27 -0500 | [diff] [blame] | 301 | int amdgpu_gmc_allocate_vm_inv_eng(struct amdgpu_device *adev); |
Christian König | 11c3a24 | 2018-08-22 12:22:14 +0200 | [diff] [blame] | 302 | |
Luben Tuikov | c6252390 | 2020-03-19 16:47:51 -0400 | [diff] [blame] | 303 | extern void amdgpu_gmc_tmz_set(struct amdgpu_device *adev); |
Alex Deucher | 9b498ef | 2020-09-23 09:37:39 -0400 | [diff] [blame] | 304 | extern void amdgpu_gmc_noretry_set(struct amdgpu_device *adev); |
Luben Tuikov | c6252390 | 2020-03-19 16:47:51 -0400 | [diff] [blame] | 305 | |
Huang Rui | f2c1b5c | 2020-07-21 14:57:02 +0800 | [diff] [blame] | 306 | extern void |
| 307 | amdgpu_gmc_set_vm_fault_masks(struct amdgpu_device *adev, int hub_type, |
| 308 | bool enable); |
| 309 | |
Alex Deucher | dd285c5 | 2020-07-28 15:04:52 -0400 | [diff] [blame] | 310 | void amdgpu_gmc_get_vbios_allocations(struct amdgpu_device *adev); |
| 311 | |
Christian König | 770d13b1 | 2018-01-12 14:52:22 +0100 | [diff] [blame] | 312 | #endif |