Merge branch 'drm-next-4.21' of git://people.freedesktop.org/~agd5f/linux into drm-next

New features for 4.21:
amdgpu:
- Support for SDMA paging queue on vega
- Put compute EOP buffers into vram for better performance
- Share more code with amdkfd
- Support for scanout with DCC on gfx9
- Initial kerneldoc for DC
- Updated SMU firmware support for gfx8 chips
- Rework CSA handling for eventual support for preemption
- XGMI PSP support
- Clean up RLC handling
- Enable GPU reset by default on VI, SOC15 dGPUs
- Ring and IB test cleanups

amdkfd:
- Share more code with amdgpu

ttm:
- Move global init out of the drivers

scheduler:
- Track if schedulers are ready for work
- Timeout/fault handling changes to facilitate GPU recovery

Signed-off-by: Dave Airlie <airlied@redhat.com>
From: Alex Deucher <alexdeucher@gmail.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20181114165113.3751-1-alexander.deucher@amd.com
diff --git a/Documentation/gpu/amdgpu-dc.rst b/Documentation/gpu/amdgpu-dc.rst
new file mode 100644
index 0000000..cc89b0f
--- /dev/null
+++ b/Documentation/gpu/amdgpu-dc.rst
@@ -0,0 +1,68 @@
+===================================
+drm/amd/display - Display Core (DC)
+===================================
+
+*placeholder - general description of supported platforms, what dc is, etc.*
+
+Because it is partially shared with other operating systems, the Display Core
+Driver is divided in two pieces.
+
+1. **Display Core (DC)** contains the OS-agnostic components. Things like
+   hardware programming and resource management are handled here.
+2. **Display Manager (DM)** contains the OS-dependent components. Hooks to the
+   amdgpu base driver and DRM are implemented here.
+
+It doesn't help that the entire package is frequently referred to as DC. But
+with the context in mind, it should be clear.
+
+When CONFIG_DRM_AMD_DC is enabled, DC will be initialized by default for
+supported ASICs. To force disable, set `amdgpu.dc=0` on kernel command line.
+Likewise, to force enable on unsupported ASICs, set `amdgpu.dc=1`.
+
+To determine if DC is loaded, search dmesg for the following entry:
+
+``Display Core initialized with <version number here>``
+
+AMDgpu Display Manager
+======================
+
+.. kernel-doc:: drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+   :doc: overview
+
+.. kernel-doc:: drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+   :internal:
+
+Lifecycle
+---------
+
+.. kernel-doc:: drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+   :doc: DM Lifecycle
+
+.. kernel-doc:: drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+   :functions: dm_hw_init dm_hw_fini
+
+Interrupts
+----------
+
+.. kernel-doc:: drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
+   :doc: overview
+
+.. kernel-doc:: drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
+   :internal:
+
+.. kernel-doc:: drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+   :functions: register_hpd_handlers dm_crtc_high_irq dm_pflip_high_irq
+
+Atomic Implementation
+---------------------
+
+.. kernel-doc:: drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+   :doc: atomic
+
+.. kernel-doc:: drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+   :functions: amdgpu_dm_atomic_check amdgpu_dm_atomic_commit_tail
+
+Display Core
+============
+
+**WIP**
diff --git a/Documentation/gpu/drivers.rst b/Documentation/gpu/drivers.rst
index 7d2d387..7c16721 100644
--- a/Documentation/gpu/drivers.rst
+++ b/Documentation/gpu/drivers.rst
@@ -5,6 +5,7 @@
 .. toctree::
 
    amdgpu
+   amdgpu-dc
    i915
    meson
    pl111
diff --git a/Documentation/gpu/drm-mm.rst b/Documentation/gpu/drm-mm.rst
index e725e84..d0f3c6b 100644
--- a/Documentation/gpu/drm-mm.rst
+++ b/Documentation/gpu/drm-mm.rst
@@ -72,8 +72,8 @@
 the kernel itself. The type of this object should be
 TTM_GLOBAL_TTM_BO, and its size should be sizeof(struct
 ttm_bo_global). Again, driver-specific init and release functions may
-be provided, likely eventually calling ttm_bo_global_init() and
-ttm_bo_global_release(), respectively. Also, like the previous
+be provided, likely eventually calling ttm_bo_global_ref_init() and
+ttm_bo_global_ref_release(), respectively. Also, like the previous
 object, ttm_global_item_ref() is used to create an initial reference
 count for the TTM, which will call your initialization function.
 
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 576ba98..7f3be35 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -11,7 +11,7 @@
 		drm_sysfs.o drm_hashtab.o drm_mm.o \
 		drm_crtc.o drm_fourcc.o drm_modes.o drm_edid.o \
 		drm_info.o drm_encoder_slave.o \
-		drm_trace_points.o drm_global.o drm_prime.o \
+		drm_trace_points.o drm_prime.o \
 		drm_rect.o drm_vma_manager.o drm_flip_work.o \
 		drm_modeset_lock.o drm_atomic.o drm_bridge.o \
 		drm_framebuffer.o drm_connector.o drm_blend.o \
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
index 138cb78..f76bcb9 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -53,7 +53,7 @@
 	amdgpu_ucode.o amdgpu_bo_list.o amdgpu_ctx.o amdgpu_sync.o \
 	amdgpu_gtt_mgr.o amdgpu_vram_mgr.o amdgpu_virt.o amdgpu_atomfirmware.o \
 	amdgpu_vf_error.o amdgpu_sched.o amdgpu_debugfs.o amdgpu_ids.o \
-	amdgpu_gmc.o amdgpu_xgmi.o
+	amdgpu_gmc.o amdgpu_xgmi.o amdgpu_csa.o
 
 # add asic specific block
 amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o cik_ih.o kv_smc.o kv_dpm.o \
@@ -105,6 +105,7 @@
 # add GFX block
 amdgpu-y += \
 	amdgpu_gfx.o \
+	amdgpu_rlc.o \
 	gfx_v8_0.o \
 	gfx_v9_0.o
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 104b2e0..42f882c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -75,6 +75,7 @@
 #include "amdgpu_sdma.h"
 #include "amdgpu_dm.h"
 #include "amdgpu_virt.h"
+#include "amdgpu_csa.h"
 #include "amdgpu_gart.h"
 #include "amdgpu_debugfs.h"
 #include "amdgpu_job.h"
@@ -433,7 +434,7 @@ typedef enum _AMDGPU_DOORBELL64_ASSIGNMENT
 	 * default non-graphics QWORD index is 0xe0 - 0xFF inclusive
 	 */
 
-	/* sDMA engines  reserved from 0xe0 -oxef  */
+	/* sDMA engines  reserved from 0xe0 -0xef  */
 	AMDGPU_DOORBELL64_sDMA_ENGINE0            = 0xE0,
 	AMDGPU_DOORBELL64_sDMA_HI_PRI_ENGINE0     = 0xE1,
 	AMDGPU_DOORBELL64_sDMA_ENGINE1            = 0xE8,
@@ -831,7 +832,6 @@ struct amdgpu_device {
 	bool				need_dma32;
 	bool				need_swiotlb;
 	bool				accel_working;
-	struct work_struct		reset_work;
 	struct notifier_block		acpi_nb;
 	struct amdgpu_i2c_chan		*i2c_bus[AMDGPU_MAX_I2C_BUS];
 	struct amdgpu_debugfs		debugfs[AMDGPU_DEBUGFS_MAX_COMPONENTS];
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index c31a884..60f9a87 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -144,7 +144,7 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
 				  KGD_MAX_QUEUES);
 
 		/* remove the KIQ bit as well */
-		if (adev->gfx.kiq.ring.ready)
+		if (adev->gfx.kiq.ring.sched.ready)
 			clear_bit(amdgpu_gfx_queue_to_bit(adev,
 							  adev->gfx.kiq.ring.me - 1,
 							  adev->gfx.kiq.ring.pipe,
@@ -268,9 +268,9 @@ void amdgpu_amdkfd_gpu_reset(struct kgd_dev *kgd)
 		amdgpu_device_gpu_recover(adev, NULL);
 }
 
-int alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
-			void **mem_obj, uint64_t *gpu_addr,
-			void **cpu_ptr, bool mqd_gfx9)
+int amdgpu_amdkfd_alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
+				void **mem_obj, uint64_t *gpu_addr,
+				void **cpu_ptr, bool mqd_gfx9)
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
 	struct amdgpu_bo *bo = NULL;
@@ -340,7 +340,7 @@ int alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
 	return r;
 }
 
-void free_gtt_mem(struct kgd_dev *kgd, void *mem_obj)
+void amdgpu_amdkfd_free_gtt_mem(struct kgd_dev *kgd, void *mem_obj)
 {
 	struct amdgpu_bo *bo = (struct amdgpu_bo *) mem_obj;
 
@@ -351,8 +351,8 @@ void free_gtt_mem(struct kgd_dev *kgd, void *mem_obj)
 	amdgpu_bo_unref(&(bo));
 }
 
-void get_local_mem_info(struct kgd_dev *kgd,
-			struct kfd_local_mem_info *mem_info)
+void amdgpu_amdkfd_get_local_mem_info(struct kgd_dev *kgd,
+				      struct kfd_local_mem_info *mem_info)
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
 	uint64_t address_mask = adev->dev->dma_mask ? ~*adev->dev->dma_mask :
@@ -383,7 +383,7 @@ void get_local_mem_info(struct kgd_dev *kgd,
 		mem_info->mem_clk_max = 100;
 }
 
-uint64_t get_gpu_clock_counter(struct kgd_dev *kgd)
+uint64_t amdgpu_amdkfd_get_gpu_clock_counter(struct kgd_dev *kgd)
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
 
@@ -392,7 +392,7 @@ uint64_t get_gpu_clock_counter(struct kgd_dev *kgd)
 	return 0;
 }
 
-uint32_t get_max_engine_clock_in_mhz(struct kgd_dev *kgd)
+uint32_t amdgpu_amdkfd_get_max_engine_clock_in_mhz(struct kgd_dev *kgd)
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
 
@@ -405,7 +405,7 @@ uint32_t get_max_engine_clock_in_mhz(struct kgd_dev *kgd)
 		return 100;
 }
 
-void get_cu_info(struct kgd_dev *kgd, struct kfd_cu_info *cu_info)
+void amdgpu_amdkfd_get_cu_info(struct kgd_dev *kgd, struct kfd_cu_info *cu_info)
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
 	struct amdgpu_cu_info acu_info = adev->gfx.cu_info;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
index 8e0d4f7..bcf587b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
@@ -134,16 +134,16 @@ int amdgpu_amdkfd_post_reset(struct amdgpu_device *adev);
 void amdgpu_amdkfd_gpu_reset(struct kgd_dev *kgd);
 
 /* Shared API */
-int alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
-			void **mem_obj, uint64_t *gpu_addr,
-			void **cpu_ptr, bool mqd_gfx9);
-void free_gtt_mem(struct kgd_dev *kgd, void *mem_obj);
-void get_local_mem_info(struct kgd_dev *kgd,
-			struct kfd_local_mem_info *mem_info);
-uint64_t get_gpu_clock_counter(struct kgd_dev *kgd);
+int amdgpu_amdkfd_alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
+				void **mem_obj, uint64_t *gpu_addr,
+				void **cpu_ptr, bool mqd_gfx9);
+void amdgpu_amdkfd_free_gtt_mem(struct kgd_dev *kgd, void *mem_obj);
+void amdgpu_amdkfd_get_local_mem_info(struct kgd_dev *kgd,
+				      struct kfd_local_mem_info *mem_info);
+uint64_t amdgpu_amdkfd_get_gpu_clock_counter(struct kgd_dev *kgd);
 
-uint32_t get_max_engine_clock_in_mhz(struct kgd_dev *kgd);
-void get_cu_info(struct kgd_dev *kgd, struct kfd_cu_info *cu_info);
+uint32_t amdgpu_amdkfd_get_max_engine_clock_in_mhz(struct kgd_dev *kgd);
+void amdgpu_amdkfd_get_cu_info(struct kgd_dev *kgd, struct kfd_cu_info *cu_info);
 uint64_t amdgpu_amdkfd_get_vram_usage(struct kgd_dev *kgd);
 uint64_t amdgpu_amdkfd_get_hive_id(struct kgd_dev *kgd);
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
index 244d983..72a357d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
@@ -173,13 +173,6 @@ static int get_tile_config(struct kgd_dev *kgd,
 }
 
 static const struct kfd2kgd_calls kfd2kgd = {
-	.init_gtt_mem_allocation = alloc_gtt_mem,
-	.free_gtt_mem = free_gtt_mem,
-	.get_local_mem_info = get_local_mem_info,
-	.get_gpu_clock_counter = get_gpu_clock_counter,
-	.get_max_engine_clock_in_mhz = get_max_engine_clock_in_mhz,
-	.alloc_pasid = amdgpu_pasid_alloc,
-	.free_pasid = amdgpu_pasid_free,
 	.program_sh_mem_settings = kgd_program_sh_mem_settings,
 	.set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
 	.init_interrupts = kgd_init_interrupts,
@@ -200,28 +193,10 @@ static const struct kfd2kgd_calls kfd2kgd = {
 	.get_fw_version = get_fw_version,
 	.set_scratch_backing_va = set_scratch_backing_va,
 	.get_tile_config = get_tile_config,
-	.get_cu_info = get_cu_info,
-	.get_vram_usage = amdgpu_amdkfd_get_vram_usage,
-	.create_process_vm = amdgpu_amdkfd_gpuvm_create_process_vm,
-	.acquire_process_vm = amdgpu_amdkfd_gpuvm_acquire_process_vm,
-	.destroy_process_vm = amdgpu_amdkfd_gpuvm_destroy_process_vm,
-	.release_process_vm = amdgpu_amdkfd_gpuvm_release_process_vm,
-	.get_process_page_dir = amdgpu_amdkfd_gpuvm_get_process_page_dir,
 	.set_vm_context_page_table_base = set_vm_context_page_table_base,
-	.alloc_memory_of_gpu = amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu,
-	.free_memory_of_gpu = amdgpu_amdkfd_gpuvm_free_memory_of_gpu,
-	.map_memory_to_gpu = amdgpu_amdkfd_gpuvm_map_memory_to_gpu,
-	.unmap_memory_to_gpu = amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu,
-	.sync_memory = amdgpu_amdkfd_gpuvm_sync_memory,
-	.map_gtt_bo_to_kernel = amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel,
-	.restore_process_bos = amdgpu_amdkfd_gpuvm_restore_process_bos,
 	.invalidate_tlbs = invalidate_tlbs,
 	.invalidate_tlbs_vmid = invalidate_tlbs_vmid,
-	.submit_ib = amdgpu_amdkfd_submit_ib,
-	.get_vm_fault_info = amdgpu_amdkfd_gpuvm_get_vm_fault_info,
 	.read_vmid_from_vmfault_reg = read_vmid_from_vmfault_reg,
-	.gpu_recover = amdgpu_amdkfd_gpu_reset,
-	.set_compute_idle = amdgpu_amdkfd_set_compute_idle
 };
 
 struct kfd2kgd_calls *amdgpu_amdkfd_gfx_7_get_functions(void)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
index 9f14991..0e2a56b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
@@ -128,13 +128,6 @@ static int get_tile_config(struct kgd_dev *kgd,
 }
 
 static const struct kfd2kgd_calls kfd2kgd = {
-	.init_gtt_mem_allocation = alloc_gtt_mem,
-	.free_gtt_mem = free_gtt_mem,
-	.get_local_mem_info = get_local_mem_info,
-	.get_gpu_clock_counter = get_gpu_clock_counter,
-	.get_max_engine_clock_in_mhz = get_max_engine_clock_in_mhz,
-	.alloc_pasid = amdgpu_pasid_alloc,
-	.free_pasid = amdgpu_pasid_free,
 	.program_sh_mem_settings = kgd_program_sh_mem_settings,
 	.set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
 	.init_interrupts = kgd_init_interrupts,
@@ -157,27 +150,9 @@ static const struct kfd2kgd_calls kfd2kgd = {
 	.get_fw_version = get_fw_version,
 	.set_scratch_backing_va = set_scratch_backing_va,
 	.get_tile_config = get_tile_config,
-	.get_cu_info = get_cu_info,
-	.get_vram_usage = amdgpu_amdkfd_get_vram_usage,
-	.create_process_vm = amdgpu_amdkfd_gpuvm_create_process_vm,
-	.acquire_process_vm = amdgpu_amdkfd_gpuvm_acquire_process_vm,
-	.destroy_process_vm = amdgpu_amdkfd_gpuvm_destroy_process_vm,
-	.release_process_vm = amdgpu_amdkfd_gpuvm_release_process_vm,
-	.get_process_page_dir = amdgpu_amdkfd_gpuvm_get_process_page_dir,
 	.set_vm_context_page_table_base = set_vm_context_page_table_base,
-	.alloc_memory_of_gpu = amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu,
-	.free_memory_of_gpu = amdgpu_amdkfd_gpuvm_free_memory_of_gpu,
-	.map_memory_to_gpu = amdgpu_amdkfd_gpuvm_map_memory_to_gpu,
-	.unmap_memory_to_gpu = amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu,
-	.sync_memory = amdgpu_amdkfd_gpuvm_sync_memory,
-	.map_gtt_bo_to_kernel = amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel,
-	.restore_process_bos = amdgpu_amdkfd_gpuvm_restore_process_bos,
 	.invalidate_tlbs = invalidate_tlbs,
 	.invalidate_tlbs_vmid = invalidate_tlbs_vmid,
-	.submit_ib = amdgpu_amdkfd_submit_ib,
-	.get_vm_fault_info = amdgpu_amdkfd_gpuvm_get_vm_fault_info,
-	.gpu_recover = amdgpu_amdkfd_gpu_reset,
-	.set_compute_idle = amdgpu_amdkfd_set_compute_idle
 };
 
 struct kfd2kgd_calls *amdgpu_amdkfd_gfx_8_0_get_functions(void)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
index 42cb4c4..03b604c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
@@ -46,38 +46,9 @@
 #include "v9_structs.h"
 #include "soc15.h"
 #include "soc15d.h"
+#include "mmhub_v1_0.h"
+#include "gfxhub_v1_0.h"
 
-/* HACK: MMHUB and GC both have VM-related register with the same
- * names but different offsets. Define the MMHUB register we need here
- * with a prefix. A proper solution would be to move the functions
- * programming these registers into gfx_v9_0.c and mmhub_v1_0.c
- * respectively.
- */
-#define mmMMHUB_VM_INVALIDATE_ENG16_REQ				0x06f3
-#define mmMMHUB_VM_INVALIDATE_ENG16_REQ_BASE_IDX		0
-
-#define mmMMHUB_VM_INVALIDATE_ENG16_ACK				0x0705
-#define mmMMHUB_VM_INVALIDATE_ENG16_ACK_BASE_IDX		0
-
-#define mmMMHUB_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32		0x072b
-#define mmMMHUB_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX	0
-#define mmMMHUB_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32		0x072c
-#define mmMMHUB_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX	0
-
-#define mmMMHUB_VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32		0x074b
-#define mmMMHUB_VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32_BASE_IDX	0
-#define mmMMHUB_VM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32		0x074c
-#define mmMMHUB_VM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32_BASE_IDX	0
-
-#define mmMMHUB_VM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32		0x076b
-#define mmMMHUB_VM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32_BASE_IDX	0
-#define mmMMHUB_VM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32		0x076c
-#define mmMMHUB_VM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32_BASE_IDX	0
-
-#define mmMMHUB_VM_INVALIDATE_ENG16_ADDR_RANGE_LO32		0x0727
-#define mmMMHUB_VM_INVALIDATE_ENG16_ADDR_RANGE_LO32_BASE_IDX	0
-#define mmMMHUB_VM_INVALIDATE_ENG16_ADDR_RANGE_HI32		0x0728
-#define mmMMHUB_VM_INVALIDATE_ENG16_ADDR_RANGE_HI32_BASE_IDX	0
 
 #define V9_PIPE_PER_MEC		(4)
 #define V9_QUEUES_PER_PIPE_MEC	(8)
@@ -167,13 +138,6 @@ static int amdgpu_amdkfd_get_tile_config(struct kgd_dev *kgd,
 }
 
 static const struct kfd2kgd_calls kfd2kgd = {
-	.init_gtt_mem_allocation = alloc_gtt_mem,
-	.free_gtt_mem = free_gtt_mem,
-	.get_local_mem_info = get_local_mem_info,
-	.get_gpu_clock_counter = get_gpu_clock_counter,
-	.get_max_engine_clock_in_mhz = get_max_engine_clock_in_mhz,
-	.alloc_pasid = amdgpu_pasid_alloc,
-	.free_pasid = amdgpu_pasid_free,
 	.program_sh_mem_settings = kgd_program_sh_mem_settings,
 	.set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
 	.init_interrupts = kgd_init_interrupts,
@@ -196,26 +160,9 @@ static const struct kfd2kgd_calls kfd2kgd = {
 	.get_fw_version = get_fw_version,
 	.set_scratch_backing_va = set_scratch_backing_va,
 	.get_tile_config = amdgpu_amdkfd_get_tile_config,
-	.get_cu_info = get_cu_info,
-	.get_vram_usage = amdgpu_amdkfd_get_vram_usage,
-	.create_process_vm = amdgpu_amdkfd_gpuvm_create_process_vm,
-	.acquire_process_vm = amdgpu_amdkfd_gpuvm_acquire_process_vm,
-	.destroy_process_vm = amdgpu_amdkfd_gpuvm_destroy_process_vm,
-	.release_process_vm = amdgpu_amdkfd_gpuvm_release_process_vm,
-	.get_process_page_dir = amdgpu_amdkfd_gpuvm_get_process_page_dir,
 	.set_vm_context_page_table_base = set_vm_context_page_table_base,
-	.alloc_memory_of_gpu = amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu,
-	.free_memory_of_gpu = amdgpu_amdkfd_gpuvm_free_memory_of_gpu,
-	.map_memory_to_gpu = amdgpu_amdkfd_gpuvm_map_memory_to_gpu,
-	.unmap_memory_to_gpu = amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu,
-	.sync_memory = amdgpu_amdkfd_gpuvm_sync_memory,
-	.map_gtt_bo_to_kernel = amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel,
-	.restore_process_bos = amdgpu_amdkfd_gpuvm_restore_process_bos,
 	.invalidate_tlbs = invalidate_tlbs,
 	.invalidate_tlbs_vmid = invalidate_tlbs_vmid,
-	.submit_ib = amdgpu_amdkfd_submit_ib,
-	.gpu_recover = amdgpu_amdkfd_gpu_reset,
-	.set_compute_idle = amdgpu_amdkfd_set_compute_idle,
 	.get_hive_id = amdgpu_amdkfd_get_hive_id,
 };
 
@@ -785,15 +732,6 @@ static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd,
 static void write_vmid_invalidate_request(struct kgd_dev *kgd, uint8_t vmid)
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
-	uint32_t req = (1 << vmid) |
-		(0 << VM_INVALIDATE_ENG16_REQ__FLUSH_TYPE__SHIFT) | /* legacy */
-		VM_INVALIDATE_ENG16_REQ__INVALIDATE_L2_PTES_MASK |
-		VM_INVALIDATE_ENG16_REQ__INVALIDATE_L2_PDE0_MASK |
-		VM_INVALIDATE_ENG16_REQ__INVALIDATE_L2_PDE1_MASK |
-		VM_INVALIDATE_ENG16_REQ__INVALIDATE_L2_PDE2_MASK |
-		VM_INVALIDATE_ENG16_REQ__INVALIDATE_L1_PTES_MASK;
-
-	mutex_lock(&adev->srbm_mutex);
 
 	/* Use legacy mode tlb invalidation.
 	 *
@@ -810,34 +748,7 @@ static void write_vmid_invalidate_request(struct kgd_dev *kgd, uint8_t vmid)
 	 * TODO 2: support range-based invalidation, requires kfg2kgd
 	 * interface change
 	 */
-	WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_INVALIDATE_ENG16_ADDR_RANGE_LO32),
-				0xffffffff);
-	WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_INVALIDATE_ENG16_ADDR_RANGE_HI32),
-				0x0000001f);
-
-	WREG32(SOC15_REG_OFFSET(MMHUB, 0,
-				mmMMHUB_VM_INVALIDATE_ENG16_ADDR_RANGE_LO32),
-				0xffffffff);
-	WREG32(SOC15_REG_OFFSET(MMHUB, 0,
-				mmMMHUB_VM_INVALIDATE_ENG16_ADDR_RANGE_HI32),
-				0x0000001f);
-
-	WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_INVALIDATE_ENG16_REQ), req);
-
-	WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmMMHUB_VM_INVALIDATE_ENG16_REQ),
-				req);
-
-	while (!(RREG32(SOC15_REG_OFFSET(GC, 0, mmVM_INVALIDATE_ENG16_ACK)) &
-					(1 << vmid)))
-		cpu_relax();
-
-	while (!(RREG32(SOC15_REG_OFFSET(MMHUB, 0,
-					mmMMHUB_VM_INVALIDATE_ENG16_ACK)) &
-					(1 << vmid)))
-		cpu_relax();
-
-	mutex_unlock(&adev->srbm_mutex);
-
+	amdgpu_gmc_flush_gpu_tlb(adev, vmid, 0);
 }
 
 static int invalidate_tlbs_with_kiq(struct amdgpu_device *adev, uint16_t pasid)
@@ -876,7 +787,7 @@ static int invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid)
 	if (adev->in_gpu_reset)
 		return -EIO;
 
-	if (ring->ready)
+	if (ring->sched.ready)
 		return invalidate_tlbs_with_kiq(adev, pasid);
 
 	for (vmid = 0; vmid < 16; vmid++) {
@@ -1016,7 +927,6 @@ static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
 		uint64_t page_table_base)
 {
 	struct amdgpu_device *adev = get_amdgpu_device(kgd);
-	uint64_t base = page_table_base | AMDGPU_PTE_VALID;
 
 	if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) {
 		pr_err("trying to set page table base for wrong VMID %u\n",
@@ -1028,25 +938,7 @@ static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
 	 * now, all processes share the same address space size, like
 	 * on GFX8 and older.
 	 */
-	WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmMMHUB_VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32) + (vmid*2), 0);
-	WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmMMHUB_VM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32) + (vmid*2), 0);
+	mmhub_v1_0_setup_vm_pt_regs(adev, vmid, page_table_base);
 
-	WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmMMHUB_VM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32) + (vmid*2),
-			lower_32_bits(adev->vm_manager.max_pfn - 1));
-	WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmMMHUB_VM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32) + (vmid*2),
-			upper_32_bits(adev->vm_manager.max_pfn - 1));
-
-	WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmMMHUB_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32) + (vmid*2), lower_32_bits(base));
-	WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmMMHUB_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32) + (vmid*2), upper_32_bits(base));
-
-	WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32) + (vmid*2), 0);
-	WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32) + (vmid*2), 0);
-
-	WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32) + (vmid*2),
-			lower_32_bits(adev->vm_manager.max_pfn - 1));
-	WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32) + (vmid*2),
-			upper_32_bits(adev->vm_manager.max_pfn - 1));
-
-	WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32) + (vmid*2), lower_32_bits(base));
-	WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32) + (vmid*2), upper_32_bits(base));
+	gfxhub_v1_0_setup_vm_pt_regs(adev, vmid, page_table_base);
 }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
index 8816c69..ceadeea 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
@@ -330,7 +330,9 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
 			case CHIP_TOPAZ:
 				if (((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0x81)) ||
 				    ((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0x83)) ||
-				    ((adev->pdev->device == 0x6907) && (adev->pdev->revision == 0x87))) {
+				    ((adev->pdev->device == 0x6907) && (adev->pdev->revision == 0x87)) ||
+				    ((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0xD1)) ||
+				    ((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0xD3))) {
 					info->is_kicker = true;
 					strcpy(fw_name, "amdgpu/topaz_k_smc.bin");
 				} else
@@ -351,7 +353,6 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
 				if (type == CGS_UCODE_ID_SMU) {
 					if (((adev->pdev->device == 0x67ef) &&
 					     ((adev->pdev->revision == 0xe0) ||
-					      (adev->pdev->revision == 0xe2) ||
 					      (adev->pdev->revision == 0xe5))) ||
 					    ((adev->pdev->device == 0x67ff) &&
 					     ((adev->pdev->revision == 0xcf) ||
@@ -359,8 +360,13 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
 					      (adev->pdev->revision == 0xff)))) {
 						info->is_kicker = true;
 						strcpy(fw_name, "amdgpu/polaris11_k_smc.bin");
-					} else
+					} else if ((adev->pdev->device == 0x67ef) &&
+						   (adev->pdev->revision == 0xe2)) {
+						info->is_kicker = true;
+						strcpy(fw_name, "amdgpu/polaris11_k2_smc.bin");
+					} else {
 						strcpy(fw_name, "amdgpu/polaris11_smc.bin");
+					}
 				} else if (type == CGS_UCODE_ID_SMU_SK) {
 					strcpy(fw_name, "amdgpu/polaris11_smc_sk.bin");
 				}
@@ -378,14 +384,31 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
 					     (adev->pdev->revision == 0xef))) {
 						info->is_kicker = true;
 						strcpy(fw_name, "amdgpu/polaris10_k_smc.bin");
-					} else
+					} else if ((adev->pdev->device == 0x67df) &&
+						   ((adev->pdev->revision == 0xe1) ||
+						    (adev->pdev->revision == 0xf7))) {
+						info->is_kicker = true;
+						strcpy(fw_name, "amdgpu/polaris10_k2_smc.bin");
+					} else {
 						strcpy(fw_name, "amdgpu/polaris10_smc.bin");
+					}
 				} else if (type == CGS_UCODE_ID_SMU_SK) {
 					strcpy(fw_name, "amdgpu/polaris10_smc_sk.bin");
 				}
 				break;
 			case CHIP_POLARIS12:
-				strcpy(fw_name, "amdgpu/polaris12_smc.bin");
+				if (((adev->pdev->device == 0x6987) &&
+				     ((adev->pdev->revision == 0xc0) ||
+				      (adev->pdev->revision == 0xc3))) ||
+				    ((adev->pdev->device == 0x6981) &&
+				     ((adev->pdev->revision == 0x00) ||
+				      (adev->pdev->revision == 0x01) ||
+				      (adev->pdev->revision == 0x10)))) {
+					info->is_kicker = true;
+					strcpy(fw_name, "amdgpu/polaris12_k_smc.bin");
+				} else {
+					strcpy(fw_name, "amdgpu/polaris12_smc.bin");
+				}
 				break;
 			case CHIP_VEGAM:
 				strcpy(fw_name, "amdgpu/vegam_smc.bin");
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 35bc8fc3b..024dfbd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -1260,8 +1260,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
 	return 0;
 
 error_abort:
-	dma_fence_put(&job->base.s_fence->finished);
-	job->base.s_fence = NULL;
+	drm_sched_job_cleanup(&job->base);
 	amdgpu_mn_unlock(p->mn);
 
 error_unlock:
@@ -1285,7 +1284,7 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
 
 	r = amdgpu_cs_parser_init(&parser, data);
 	if (r) {
-		DRM_ERROR("Failed to initialize parser !\n");
+		DRM_ERROR("Failed to initialize parser %d!\n", r);
 		goto out;
 	}
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c
new file mode 100644
index 0000000..0c590dd
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c
@@ -0,0 +1,117 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+
+ * * Author: Monk.liu@amd.com
+ */
+
+#include "amdgpu.h"
+
+uint64_t amdgpu_csa_vaddr(struct amdgpu_device *adev)
+{
+	uint64_t addr = adev->vm_manager.max_pfn << AMDGPU_GPU_PAGE_SHIFT;
+
+	addr -= AMDGPU_VA_RESERVED_SIZE;
+	addr = amdgpu_gmc_sign_extend(addr);
+
+	return addr;
+}
+
+int amdgpu_allocate_static_csa(struct amdgpu_device *adev, struct amdgpu_bo **bo,
+				u32 domain, uint32_t size)
+{
+	int r;
+	void *ptr;
+
+	r = amdgpu_bo_create_kernel(adev, size, PAGE_SIZE,
+				domain, bo,
+				NULL, &ptr);
+	if (!bo)
+		return -ENOMEM;
+
+	memset(ptr, 0, size);
+	return 0;
+}
+
+void amdgpu_free_static_csa(struct amdgpu_bo **bo)
+{
+	amdgpu_bo_free_kernel(bo, NULL, NULL);
+}
+
+/*
+ * amdgpu_map_static_csa should be called during amdgpu_vm_init
+ * it maps virtual address amdgpu_csa_vaddr() to this VM, and each command
+ * submission of GFX should use this virtual address within META_DATA init
+ * package to support SRIOV gfx preemption.
+ */
+int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+			  struct amdgpu_bo *bo, struct amdgpu_bo_va **bo_va,
+			  uint64_t csa_addr, uint32_t size)
+{
+	struct ww_acquire_ctx ticket;
+	struct list_head list;
+	struct amdgpu_bo_list_entry pd;
+	struct ttm_validate_buffer csa_tv;
+	int r;
+
+	INIT_LIST_HEAD(&list);
+	INIT_LIST_HEAD(&csa_tv.head);
+	csa_tv.bo = &bo->tbo;
+	csa_tv.shared = true;
+
+	list_add(&csa_tv.head, &list);
+	amdgpu_vm_get_pd_bo(vm, &list, &pd);
+
+	r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL);
+	if (r) {
+		DRM_ERROR("failed to reserve CSA,PD BOs: err=%d\n", r);
+		return r;
+	}
+
+	*bo_va = amdgpu_vm_bo_add(adev, vm, bo);
+	if (!*bo_va) {
+		ttm_eu_backoff_reservation(&ticket, &list);
+		DRM_ERROR("failed to create bo_va for static CSA\n");
+		return -ENOMEM;
+	}
+
+	r = amdgpu_vm_alloc_pts(adev, (*bo_va)->base.vm, csa_addr,
+				size);
+	if (r) {
+		DRM_ERROR("failed to allocate pts for static CSA, err=%d\n", r);
+		amdgpu_vm_bo_rmv(adev, *bo_va);
+		ttm_eu_backoff_reservation(&ticket, &list);
+		return r;
+	}
+
+	r = amdgpu_vm_bo_map(adev, *bo_va, csa_addr, 0, size,
+			     AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE |
+			     AMDGPU_PTE_EXECUTABLE);
+
+	if (r) {
+		DRM_ERROR("failed to do bo_map on static CSA, err=%d\n", r);
+		amdgpu_vm_bo_rmv(adev, *bo_va);
+		ttm_eu_backoff_reservation(&ticket, &list);
+		return r;
+	}
+
+	ttm_eu_backoff_reservation(&ticket, &list);
+	return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.h
new file mode 100644
index 0000000..524b443
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Author: Monk.liu@amd.com
+ */
+
+#ifndef AMDGPU_CSA_MANAGER_H
+#define AMDGPU_CSA_MANAGER_H
+
+#define AMDGPU_CSA_SIZE		(128 * 1024)
+
+uint32_t amdgpu_get_total_csa_size(struct amdgpu_device *adev);
+uint64_t amdgpu_csa_vaddr(struct amdgpu_device *adev);
+int amdgpu_allocate_static_csa(struct amdgpu_device *adev, struct amdgpu_bo **bo,
+				u32 domain, uint32_t size);
+int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+			  struct amdgpu_bo *bo, struct amdgpu_bo_va **bo_va,
+			  uint64_t csa_addr, uint32_t size);
+void amdgpu_free_static_csa(struct amdgpu_bo **bo);
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 30bc345..590588a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -1656,7 +1656,9 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
 
 			/* right after GMC hw init, we create CSA */
 			if (amdgpu_sriov_vf(adev)) {
-				r = amdgpu_allocate_static_csa(adev);
+				r = amdgpu_allocate_static_csa(adev, &adev->virt.csa_obj,
+								AMDGPU_GEM_DOMAIN_VRAM,
+								AMDGPU_CSA_SIZE);
 				if (r) {
 					DRM_ERROR("allocate CSA failed %d\n", r);
 					return r;
@@ -1681,7 +1683,8 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
 	if (r)
 		return r;
 
-	amdgpu_xgmi_add_device(adev);
+	if (adev->gmc.xgmi.num_physical_nodes > 1)
+		amdgpu_xgmi_add_device(adev);
 	amdgpu_amdkfd_device_init(adev);
 
 	if (amdgpu_sriov_vf(adev))
@@ -1890,7 +1893,7 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
 
 		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
 			amdgpu_ucode_free_bo(adev);
-			amdgpu_free_static_csa(adev);
+			amdgpu_free_static_csa(&adev->virt.csa_obj);
 			amdgpu_device_wb_fini(adev);
 			amdgpu_device_vram_scratch_fini(adev);
 		}
@@ -3295,13 +3298,35 @@ bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev)
 		return false;
 	}
 
-	if (amdgpu_gpu_recovery == 0 || (amdgpu_gpu_recovery == -1  &&
-					 !amdgpu_sriov_vf(adev))) {
-		DRM_INFO("GPU recovery disabled.\n");
-		return false;
+	if (amdgpu_gpu_recovery == 0)
+		goto disabled;
+
+	if (amdgpu_sriov_vf(adev))
+		return true;
+
+	if (amdgpu_gpu_recovery == -1) {
+		switch (adev->asic_type) {
+		case CHIP_TOPAZ:
+		case CHIP_TONGA:
+		case CHIP_FIJI:
+		case CHIP_POLARIS10:
+		case CHIP_POLARIS11:
+		case CHIP_POLARIS12:
+		case CHIP_VEGAM:
+		case CHIP_VEGA20:
+		case CHIP_VEGA10:
+		case CHIP_VEGA12:
+			break;
+		default:
+			goto disabled;
+		}
 	}
 
 	return true;
+
+disabled:
+		DRM_INFO("GPU recovery disabled.\n");
+		return false;
 }
 
 /**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index 5448cf2..ee47c11 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -398,9 +398,9 @@ int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
 	ring->fence_drv.irq_type = irq_type;
 	ring->fence_drv.initialized = true;
 
-	dev_dbg(adev->dev, "fence driver on ring %d use gpu addr 0x%016llx, "
-		"cpu addr 0x%p\n", ring->idx,
-		ring->fence_drv.gpu_addr, ring->fence_drv.cpu_addr);
+	DRM_DEV_DEBUG(adev->dev, "fence driver on ring %s use gpu addr "
+		      "0x%016llx, cpu addr 0x%p\n", ring->name,
+		      ring->fence_drv.gpu_addr, ring->fence_drv.cpu_addr);
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
index 11fea28..6d11e17 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
@@ -248,7 +248,7 @@ int amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
 	}
 	mb();
 	amdgpu_asic_flush_hdp(adev, NULL);
-	amdgpu_gmc_flush_gpu_tlb(adev, 0);
+	amdgpu_gmc_flush_gpu_tlb(adev, 0, 0);
 	return 0;
 }
 
@@ -259,6 +259,8 @@ int amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
  * @offset: offset into the GPU's gart aperture
  * @pages: number of pages to bind
  * @dma_addr: DMA addresses of pages
+ * @flags: page table entry flags
+ * @dst: CPU address of the gart table
  *
  * Map the dma_addresses into GART entries (all asics).
  * Returns 0 for success, -EINVAL for failure.
@@ -331,7 +333,7 @@ int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset,
 
 	mb();
 	amdgpu_asic_flush_hdp(adev, NULL);
-	amdgpu_gmc_flush_gpu_tlb(adev, 0);
+	amdgpu_gmc_flush_gpu_tlb(adev, 0, 0);
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h
index 9ff6288..afa2e28 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h
@@ -41,6 +41,7 @@ struct amdgpu_bo;
 
 struct amdgpu_gart {
 	struct amdgpu_bo		*bo;
+	/* CPU kmapped address of gart table */
 	void				*ptr;
 	unsigned			num_gpu_pages;
 	unsigned			num_cpu_pages;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
index 1a656b86..6a70c0b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
@@ -25,6 +25,7 @@
 #include <drm/drmP.h>
 #include "amdgpu.h"
 #include "amdgpu_gfx.h"
+#include "amdgpu_rlc.h"
 
 /* delay 0.1 second to enable gfx off feature */
 #define GFX_OFF_DELAY_ENABLE         msecs_to_jiffies(100)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
index b61b5c1..f790e15 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
@@ -29,6 +29,7 @@
  */
 #include "clearstate_defs.h"
 #include "amdgpu_ring.h"
+#include "amdgpu_rlc.h"
 
 /* GFX current status */
 #define AMDGPU_GFX_NORMAL_MODE			0x00000000L
@@ -37,59 +38,6 @@
 #define AMDGPU_GFX_CG_DISABLED_MODE		0x00000004L
 #define AMDGPU_GFX_LBPW_DISABLED_MODE		0x00000008L
 
-
-struct amdgpu_rlc_funcs {
-	void (*enter_safe_mode)(struct amdgpu_device *adev);
-	void (*exit_safe_mode)(struct amdgpu_device *adev);
-};
-
-struct amdgpu_rlc {
-	/* for power gating */
-	struct amdgpu_bo	*save_restore_obj;
-	uint64_t		save_restore_gpu_addr;
-	volatile uint32_t	*sr_ptr;
-	const u32               *reg_list;
-	u32                     reg_list_size;
-	/* for clear state */
-	struct amdgpu_bo	*clear_state_obj;
-	uint64_t		clear_state_gpu_addr;
-	volatile uint32_t	*cs_ptr;
-	const struct cs_section_def   *cs_data;
-	u32                     clear_state_size;
-	/* for cp tables */
-	struct amdgpu_bo	*cp_table_obj;
-	uint64_t		cp_table_gpu_addr;
-	volatile uint32_t	*cp_table_ptr;
-	u32                     cp_table_size;
-
-	/* safe mode for updating CG/PG state */
-	bool in_safe_mode;
-	const struct amdgpu_rlc_funcs *funcs;
-
-	/* for firmware data */
-	u32 save_and_restore_offset;
-	u32 clear_state_descriptor_offset;
-	u32 avail_scratch_ram_locations;
-	u32 reg_restore_list_size;
-	u32 reg_list_format_start;
-	u32 reg_list_format_separate_start;
-	u32 starting_offsets_start;
-	u32 reg_list_format_size_bytes;
-	u32 reg_list_size_bytes;
-	u32 reg_list_format_direct_reg_list_length;
-	u32 save_restore_list_cntl_size_bytes;
-	u32 save_restore_list_gpm_size_bytes;
-	u32 save_restore_list_srm_size_bytes;
-
-	u32 *register_list_format;
-	u32 *register_restore;
-	u8 *save_restore_list_cntl;
-	u8 *save_restore_list_gpm;
-	u8 *save_restore_list_srm;
-
-	bool is_rlc_v2_1;
-};
-
 #define AMDGPU_MAX_COMPUTE_QUEUES KGD_MAX_QUEUES
 
 struct amdgpu_mec {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
index 6fa7ef4..8c57924 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
@@ -64,7 +64,7 @@ struct amdgpu_vmhub {
 struct amdgpu_gmc_funcs {
 	/* flush the vm tlb via mmio */
 	void (*flush_gpu_tlb)(struct amdgpu_device *adev,
-			      uint32_t vmid);
+			      uint32_t vmid, uint32_t flush_type);
 	/* flush the vm tlb via ring */
 	uint64_t (*emit_flush_gpu_tlb)(struct amdgpu_ring *ring, unsigned vmid,
 				       uint64_t pd_addr);
@@ -89,7 +89,7 @@ struct amdgpu_gmc_funcs {
 
 struct amdgpu_xgmi {
 	/* from psp */
-	u64 device_id;
+	u64 node_id;
 	u64 hive_id;
 	/* fixed per family */
 	u64 node_segment_size;
@@ -151,7 +151,7 @@ struct amdgpu_gmc {
 	struct amdgpu_xgmi xgmi;
 };
 
-#define amdgpu_gmc_flush_gpu_tlb(adev, vmid) (adev)->gmc.gmc_funcs->flush_gpu_tlb((adev), (vmid))
+#define amdgpu_gmc_flush_gpu_tlb(adev, vmid, type) (adev)->gmc.gmc_funcs->flush_gpu_tlb((adev), (vmid), (type))
 #define amdgpu_gmc_emit_flush_gpu_tlb(r, vmid, addr) (r)->adev->gmc.gmc_funcs->emit_flush_gpu_tlb((r), (vmid), (addr))
 #define amdgpu_gmc_emit_pasid_mapping(r, vmid, pasid) (r)->adev->gmc.gmc_funcs->emit_pasid_mapping((r), (vmid), (pasid))
 #define amdgpu_gmc_set_pte_pde(adev, pt, idx, addr, flags) (adev)->gmc.gmc_funcs->set_pte_pde((adev), (pt), (idx), (addr), (flags))
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
index b8963b7..c48207b3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
@@ -146,7 +146,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
 		fence_ctx = 0;
 	}
 
-	if (!ring->ready) {
+	if (!ring->sched.ready) {
 		dev_err(adev->dev, "couldn't schedule ib on ring <%s>\n", ring->name);
 		return -EINVAL;
 	}
@@ -221,8 +221,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
 			!amdgpu_sriov_vf(adev)) /* for SRIOV preemption, Preamble CE ib must be inserted anyway */
 			continue;
 
-		amdgpu_ring_emit_ib(ring, ib, job ? job->vmid : 0,
-				    need_ctx_switch);
+		amdgpu_ring_emit_ib(ring, job, ib, need_ctx_switch);
 		need_ctx_switch = false;
 	}
 
@@ -347,19 +346,14 @@ int amdgpu_ib_ring_tests(struct amdgpu_device *adev)
 		tmo_gfx = 8 * AMDGPU_IB_TEST_TIMEOUT;
 	}
 
-	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
+	for (i = 0; i < adev->num_rings; ++i) {
 		struct amdgpu_ring *ring = adev->rings[i];
 		long tmo;
 
-		if (!ring || !ring->ready)
-			continue;
-
-		/* skip IB tests for KIQ in general for the below reasons:
-		 * 1. We never submit IBs to the KIQ
-		 * 2. KIQ doesn't use the EOP interrupts,
-		 *    we use some other CP interrupt.
+		/* KIQ rings don't have an IB test because we never submit IBs
+		 * to them and they have no interrupt support.
 		 */
-		if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
+		if (!ring->sched.ready || !ring->funcs->test_ib)
 			continue;
 
 		/* MM engine need more time */
@@ -374,20 +368,23 @@ int amdgpu_ib_ring_tests(struct amdgpu_device *adev)
 			tmo = tmo_gfx;
 
 		r = amdgpu_ring_test_ib(ring, tmo);
-		if (r) {
-			ring->ready = false;
+		if (!r) {
+			DRM_DEV_DEBUG(adev->dev, "ib test on %s succeeded\n",
+				      ring->name);
+			continue;
+		}
 
-			if (ring == &adev->gfx.gfx_ring[0]) {
-				/* oh, oh, that's really bad */
-				DRM_ERROR("amdgpu: failed testing IB on GFX ring (%d).\n", r);
-				adev->accel_working = false;
-				return r;
+		ring->sched.ready = false;
+		DRM_DEV_ERROR(adev->dev, "IB test failed on %s (%d).\n",
+			  ring->name, r);
 
-			} else {
-				/* still not good, but we can live with it */
-				DRM_ERROR("amdgpu: failed testing IB on ring %d (%d).\n", i, r);
-				ret = r;
-			}
+		if (ring == &adev->gfx.gfx_ring[0]) {
+			/* oh, oh, that's really bad */
+			adev->accel_working = false;
+			return r;
+
+		} else {
+			ret = r;
 		}
 	}
 	return ret;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
index 52c17f6..6b6524f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
@@ -94,23 +94,6 @@ static void amdgpu_hotplug_work_func(struct work_struct *work)
 }
 
 /**
- * amdgpu_irq_reset_work_func - execute GPU reset
- *
- * @work: work struct pointer
- *
- * Execute scheduled GPU reset (Cayman+).
- * This function is called when the IRQ handler thinks we need a GPU reset.
- */
-static void amdgpu_irq_reset_work_func(struct work_struct *work)
-{
-	struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
-						  reset_work);
-
-	if (!amdgpu_sriov_vf(adev) && amdgpu_device_should_recover_gpu(adev))
-		amdgpu_device_gpu_recover(adev, NULL);
-}
-
-/**
  * amdgpu_irq_disable_all - disable *all* interrupts
  *
  * @adev: amdgpu device pointer
@@ -262,15 +245,12 @@ int amdgpu_irq_init(struct amdgpu_device *adev)
 				amdgpu_hotplug_work_func);
 	}
 
-	INIT_WORK(&adev->reset_work, amdgpu_irq_reset_work_func);
-
 	adev->irq.installed = true;
 	r = drm_irq_install(adev->ddev, adev->ddev->pdev->irq);
 	if (r) {
 		adev->irq.installed = false;
 		if (!amdgpu_device_has_dc_support(adev))
 			flush_work(&adev->hotplug_work);
-		cancel_work_sync(&adev->reset_work);
 		return r;
 	}
 	adev->ddev->max_vblank_count = 0x00ffffff;
@@ -299,7 +279,6 @@ void amdgpu_irq_fini(struct amdgpu_device *adev)
 			pci_disable_msi(adev->pdev);
 		if (!amdgpu_device_has_dc_support(adev))
 			flush_work(&adev->hotplug_work);
-		cancel_work_sync(&adev->reset_work);
 	}
 
 	for (i = 0; i < AMDGPU_IRQ_CLIENTID_MAX; ++i) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index 755f733..e0af44f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -112,6 +112,8 @@ static void amdgpu_job_free_cb(struct drm_sched_job *s_job)
 	struct amdgpu_ring *ring = to_amdgpu_ring(s_job->sched);
 	struct amdgpu_job *job = to_amdgpu_job(s_job);
 
+	drm_sched_job_cleanup(s_job);
+
 	amdgpu_ring_priority_put(ring, s_job->s_priority);
 	dma_fence_put(job->fence);
 	amdgpu_sync_free(&job->sync);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
index 57cfe78..e1b46a6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
@@ -33,6 +33,8 @@
 #define to_amdgpu_job(sched_job)		\
 		container_of((sched_job), struct amdgpu_job, base)
 
+#define AMDGPU_JOB_GET_VMID(job) ((job) ? (job)->vmid : 0)
+
 struct amdgpu_fence;
 
 struct amdgpu_job {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index 81732a8..9b3164c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -336,7 +336,7 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
 	case AMDGPU_HW_IP_GFX:
 		type = AMD_IP_BLOCK_TYPE_GFX;
 		for (i = 0; i < adev->gfx.num_gfx_rings; i++)
-			if (adev->gfx.gfx_ring[i].ready)
+			if (adev->gfx.gfx_ring[i].sched.ready)
 				++num_rings;
 		ib_start_alignment = 32;
 		ib_size_alignment = 32;
@@ -344,7 +344,7 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
 	case AMDGPU_HW_IP_COMPUTE:
 		type = AMD_IP_BLOCK_TYPE_GFX;
 		for (i = 0; i < adev->gfx.num_compute_rings; i++)
-			if (adev->gfx.compute_ring[i].ready)
+			if (adev->gfx.compute_ring[i].sched.ready)
 				++num_rings;
 		ib_start_alignment = 32;
 		ib_size_alignment = 32;
@@ -352,7 +352,7 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
 	case AMDGPU_HW_IP_DMA:
 		type = AMD_IP_BLOCK_TYPE_SDMA;
 		for (i = 0; i < adev->sdma.num_instances; i++)
-			if (adev->sdma.instance[i].ring.ready)
+			if (adev->sdma.instance[i].ring.sched.ready)
 				++num_rings;
 		ib_start_alignment = 256;
 		ib_size_alignment = 4;
@@ -363,7 +363,7 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
 			if (adev->uvd.harvest_config & (1 << i))
 				continue;
 
-			if (adev->uvd.inst[i].ring.ready)
+			if (adev->uvd.inst[i].ring.sched.ready)
 				++num_rings;
 		}
 		ib_start_alignment = 64;
@@ -372,7 +372,7 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
 	case AMDGPU_HW_IP_VCE:
 		type = AMD_IP_BLOCK_TYPE_VCE;
 		for (i = 0; i < adev->vce.num_rings; i++)
-			if (adev->vce.ring[i].ready)
+			if (adev->vce.ring[i].sched.ready)
 				++num_rings;
 		ib_start_alignment = 4;
 		ib_size_alignment = 1;
@@ -384,7 +384,7 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
 				continue;
 
 			for (j = 0; j < adev->uvd.num_enc_rings; j++)
-				if (adev->uvd.inst[i].ring_enc[j].ready)
+				if (adev->uvd.inst[i].ring_enc[j].sched.ready)
 					++num_rings;
 		}
 		ib_start_alignment = 64;
@@ -392,7 +392,7 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
 		break;
 	case AMDGPU_HW_IP_VCN_DEC:
 		type = AMD_IP_BLOCK_TYPE_VCN;
-		if (adev->vcn.ring_dec.ready)
+		if (adev->vcn.ring_dec.sched.ready)
 			++num_rings;
 		ib_start_alignment = 16;
 		ib_size_alignment = 16;
@@ -400,14 +400,14 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
 	case AMDGPU_HW_IP_VCN_ENC:
 		type = AMD_IP_BLOCK_TYPE_VCN;
 		for (i = 0; i < adev->vcn.num_enc_rings; i++)
-			if (adev->vcn.ring_enc[i].ready)
+			if (adev->vcn.ring_enc[i].sched.ready)
 				++num_rings;
 		ib_start_alignment = 64;
 		ib_size_alignment = 1;
 		break;
 	case AMDGPU_HW_IP_VCN_JPEG:
 		type = AMD_IP_BLOCK_TYPE_VCN;
-		if (adev->vcn.ring_jpeg.ready)
+		if (adev->vcn.ring_jpeg.sched.ready)
 			++num_rings;
 		ib_start_alignment = 16;
 		ib_size_alignment = 16;
@@ -978,7 +978,10 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
 	}
 
 	if (amdgpu_sriov_vf(adev)) {
-		r = amdgpu_map_static_csa(adev, &fpriv->vm, &fpriv->csa_va);
+		uint64_t csa_addr = amdgpu_csa_vaddr(adev) & AMDGPU_GMC_HOLE_MASK;
+
+		r = amdgpu_map_static_csa(adev, &fpriv->vm, adev->virt.csa_obj,
+						&fpriv->csa_va, csa_addr, AMDGPU_CSA_SIZE);
 		if (r)
 			goto error_vm;
 	}
@@ -1048,8 +1051,8 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev,
 	pasid = fpriv->vm.pasid;
 	pd = amdgpu_bo_ref(fpriv->vm.root.base.bo);
 
-	amdgpu_vm_fini(adev, &fpriv->vm);
 	amdgpu_ctx_mgr_fini(&fpriv->ctx_mgr);
+	amdgpu_vm_fini(adev, &fpriv->vm);
 
 	if (pasid)
 		amdgpu_pasid_free_delayed(pd->tbo.resv, pasid);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
index b9e9e8b..11723d8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
@@ -57,7 +57,6 @@ struct amdgpu_hpd;
 #define to_amdgpu_connector(x) container_of(x, struct amdgpu_connector, base)
 #define to_amdgpu_encoder(x) container_of(x, struct amdgpu_encoder, base)
 #define to_amdgpu_framebuffer(x) container_of(x, struct amdgpu_framebuffer, base)
-#define to_amdgpu_plane(x)	container_of(x, struct amdgpu_plane, base)
 
 #define to_dm_plane_state(x)	container_of(x, struct dm_plane_state, base);
 
@@ -325,7 +324,7 @@ struct amdgpu_mode_info {
 	struct card_info *atom_card_info;
 	bool mode_config_initialized;
 	struct amdgpu_crtc *crtcs[AMDGPU_MAX_CRTCS];
-	struct amdgpu_plane *planes[AMDGPU_MAX_PLANES];
+	struct drm_plane *planes[AMDGPU_MAX_PLANES];
 	struct amdgpu_afmt *afmt[AMDGPU_MAX_AFMT_BLOCKS];
 	/* DVI-I properties */
 	struct drm_property *coherent_mode_property;
@@ -434,11 +433,6 @@ struct amdgpu_crtc {
 	struct drm_pending_vblank_event *event;
 };
 
-struct amdgpu_plane {
-	struct drm_plane base;
-	enum drm_plane_type plane_type;
-};
-
 struct amdgpu_encoder_atom_dig {
 	bool linkb;
 	/* atom dig */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index 59cc678..7235cd0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -2129,7 +2129,7 @@ void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
 
 	for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
 		struct amdgpu_ring *ring = adev->rings[i];
-		if (ring && ring->ready)
+		if (ring && ring->sched.ready)
 			amdgpu_fence_wait_empty(ring);
 	}
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index 25d2f3e..e05dc66 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -90,6 +90,8 @@ static int psp_sw_fini(void *handle)
 	adev->psp.sos_fw = NULL;
 	release_firmware(adev->psp.asd_fw);
 	adev->psp.asd_fw = NULL;
+	release_firmware(adev->psp.ta_fw);
+	adev->psp.ta_fw = NULL;
 	return 0;
 }
 
@@ -118,22 +120,26 @@ int psp_wait_for(struct psp_context *psp, uint32_t reg_index,
 static int
 psp_cmd_submit_buf(struct psp_context *psp,
 		   struct amdgpu_firmware_info *ucode,
-		   struct psp_gfx_cmd_resp *cmd, uint64_t fence_mc_addr,
-		   int index)
+		   struct psp_gfx_cmd_resp *cmd, uint64_t fence_mc_addr)
 {
 	int ret;
+	int index;
 
 	memset(psp->cmd_buf_mem, 0, PSP_CMD_BUFFER_SIZE);
 
 	memcpy(psp->cmd_buf_mem, cmd, sizeof(struct psp_gfx_cmd_resp));
 
+	index = atomic_inc_return(&psp->fence_value);
 	ret = psp_cmd_submit(psp, ucode, psp->cmd_buf_mc_addr,
 			     fence_mc_addr, index);
-
-	while (*((unsigned int *)psp->fence_buf) != index) {
-		msleep(1);
+	if (ret) {
+		atomic_dec(&psp->fence_value);
+		return ret;
 	}
 
+	while (*((unsigned int *)psp->fence_buf) != index)
+		msleep(1);
+
 	/* the status field must be 0 after FW is loaded */
 	if (ucode && psp->cmd_buf_mem->resp.status) {
 		DRM_ERROR("failed loading with status (%d) and ucode id (%d)\n",
@@ -191,7 +197,7 @@ static int psp_tmr_load(struct psp_context *psp)
 			PSP_TMR_SIZE, psp->tmr_mc_addr);
 
 	ret = psp_cmd_submit_buf(psp, NULL, cmd,
-				 psp->fence_buf_mc_addr, 1);
+				 psp->fence_buf_mc_addr);
 	if (ret)
 		goto failed;
 
@@ -258,13 +264,194 @@ static int psp_asd_load(struct psp_context *psp)
 			     psp->asd_ucode_size, PSP_ASD_SHARED_MEM_SIZE);
 
 	ret = psp_cmd_submit_buf(psp, NULL, cmd,
-				 psp->fence_buf_mc_addr, 2);
+				 psp->fence_buf_mc_addr);
 
 	kfree(cmd);
 
 	return ret;
 }
 
+static void psp_prep_xgmi_ta_load_cmd_buf(struct psp_gfx_cmd_resp *cmd,
+					  uint64_t xgmi_ta_mc, uint64_t xgmi_mc_shared,
+					  uint32_t xgmi_ta_size, uint32_t shared_size)
+{
+        cmd->cmd_id = GFX_CMD_ID_LOAD_TA;
+        cmd->cmd.cmd_load_ta.app_phy_addr_lo = lower_32_bits(xgmi_ta_mc);
+        cmd->cmd.cmd_load_ta.app_phy_addr_hi = upper_32_bits(xgmi_ta_mc);
+        cmd->cmd.cmd_load_ta.app_len = xgmi_ta_size;
+
+        cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo = lower_32_bits(xgmi_mc_shared);
+        cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi = upper_32_bits(xgmi_mc_shared);
+        cmd->cmd.cmd_load_ta.cmd_buf_len = shared_size;
+}
+
+static int psp_xgmi_init_shared_buf(struct psp_context *psp)
+{
+	int ret;
+
+	/*
+	 * Allocate 16k memory aligned to 4k from Frame Buffer (local
+	 * physical) for xgmi ta <-> Driver
+	 */
+	ret = amdgpu_bo_create_kernel(psp->adev, PSP_XGMI_SHARED_MEM_SIZE,
+				      PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
+				      &psp->xgmi_context.xgmi_shared_bo,
+				      &psp->xgmi_context.xgmi_shared_mc_addr,
+				      &psp->xgmi_context.xgmi_shared_buf);
+
+	return ret;
+}
+
+static int psp_xgmi_load(struct psp_context *psp)
+{
+	int ret;
+	struct psp_gfx_cmd_resp *cmd;
+
+	/*
+	 * TODO: bypass the loading in sriov for now
+	 */
+	if (amdgpu_sriov_vf(psp->adev))
+		return 0;
+
+	cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
+	if (!cmd)
+		return -ENOMEM;
+
+	memset(psp->fw_pri_buf, 0, PSP_1_MEG);
+	memcpy(psp->fw_pri_buf, psp->ta_xgmi_start_addr, psp->ta_xgmi_ucode_size);
+
+	psp_prep_xgmi_ta_load_cmd_buf(cmd, psp->fw_pri_mc_addr,
+				      psp->xgmi_context.xgmi_shared_mc_addr,
+				      psp->ta_xgmi_ucode_size, PSP_XGMI_SHARED_MEM_SIZE);
+
+	ret = psp_cmd_submit_buf(psp, NULL, cmd,
+				 psp->fence_buf_mc_addr);
+
+	if (!ret) {
+		psp->xgmi_context.initialized = 1;
+		psp->xgmi_context.session_id = cmd->resp.session_id;
+	}
+
+	kfree(cmd);
+
+	return ret;
+}
+
+static void psp_prep_xgmi_ta_unload_cmd_buf(struct psp_gfx_cmd_resp *cmd,
+					    uint32_t xgmi_session_id)
+{
+	cmd->cmd_id = GFX_CMD_ID_UNLOAD_TA;
+	cmd->cmd.cmd_unload_ta.session_id = xgmi_session_id;
+}
+
+static int psp_xgmi_unload(struct psp_context *psp)
+{
+	int ret;
+	struct psp_gfx_cmd_resp *cmd;
+
+	/*
+	 * TODO: bypass the unloading in sriov for now
+	 */
+	if (amdgpu_sriov_vf(psp->adev))
+		return 0;
+
+	cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
+	if (!cmd)
+		return -ENOMEM;
+
+	psp_prep_xgmi_ta_unload_cmd_buf(cmd, psp->xgmi_context.session_id);
+
+	ret = psp_cmd_submit_buf(psp, NULL, cmd,
+				 psp->fence_buf_mc_addr);
+
+	kfree(cmd);
+
+	return ret;
+}
+
+static void psp_prep_xgmi_ta_invoke_cmd_buf(struct psp_gfx_cmd_resp *cmd,
+					    uint32_t ta_cmd_id,
+					    uint32_t xgmi_session_id)
+{
+	cmd->cmd_id = GFX_CMD_ID_INVOKE_CMD;
+	cmd->cmd.cmd_invoke_cmd.session_id = xgmi_session_id;
+	cmd->cmd.cmd_invoke_cmd.ta_cmd_id = ta_cmd_id;
+	/* Note: cmd_invoke_cmd.buf is not used for now */
+}
+
+int psp_xgmi_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
+{
+	int ret;
+	struct psp_gfx_cmd_resp *cmd;
+
+	/*
+	 * TODO: bypass the loading in sriov for now
+	*/
+	if (amdgpu_sriov_vf(psp->adev))
+		return 0;
+
+	cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
+	if (!cmd)
+		return -ENOMEM;
+
+	psp_prep_xgmi_ta_invoke_cmd_buf(cmd, ta_cmd_id,
+					psp->xgmi_context.session_id);
+
+	ret = psp_cmd_submit_buf(psp, NULL, cmd,
+				 psp->fence_buf_mc_addr);
+
+	kfree(cmd);
+
+        return ret;
+}
+
+static int psp_xgmi_terminate(struct psp_context *psp)
+{
+	int ret;
+
+	if (!psp->xgmi_context.initialized)
+		return 0;
+
+	ret = psp_xgmi_unload(psp);
+	if (ret)
+		return ret;
+
+	psp->xgmi_context.initialized = 0;
+
+	/* free xgmi shared memory */
+	amdgpu_bo_free_kernel(&psp->xgmi_context.xgmi_shared_bo,
+			&psp->xgmi_context.xgmi_shared_mc_addr,
+			&psp->xgmi_context.xgmi_shared_buf);
+
+	return 0;
+}
+
+static int psp_xgmi_initialize(struct psp_context *psp)
+{
+	struct ta_xgmi_shared_memory *xgmi_cmd;
+	int ret;
+
+	if (!psp->xgmi_context.initialized) {
+		ret = psp_xgmi_init_shared_buf(psp);
+		if (ret)
+			return ret;
+	}
+
+	/* Load XGMI TA */
+	ret = psp_xgmi_load(psp);
+	if (ret)
+		return ret;
+
+	/* Initialize XGMI session */
+	xgmi_cmd = (struct ta_xgmi_shared_memory *)(psp->xgmi_context.xgmi_shared_buf);
+	memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
+	xgmi_cmd->cmd_id = TA_COMMAND_XGMI__INITIALIZE;
+
+	ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
+
+	return ret;
+}
+
 static int psp_hw_start(struct psp_context *psp)
 {
 	struct amdgpu_device *adev = psp->adev;
@@ -292,6 +479,15 @@ static int psp_hw_start(struct psp_context *psp)
 	if (ret)
 		return ret;
 
+	if (adev->gmc.xgmi.num_physical_nodes > 1) {
+		ret = psp_xgmi_initialize(psp);
+		/* Warning the XGMI seesion initialize failure
+		 * Instead of stop driver initialization
+		 */
+		if (ret)
+			dev_err(psp->adev->dev,
+				"XGMI: Failed to initialize XGMI session\n");
+	}
 	return 0;
 }
 
@@ -321,7 +517,7 @@ static int psp_np_fw_load(struct psp_context *psp)
 			return ret;
 
 		ret = psp_cmd_submit_buf(psp, ucode, psp->cmd,
-					 psp->fence_buf_mc_addr, i + 3);
+					 psp->fence_buf_mc_addr);
 		if (ret)
 			return ret;
 
@@ -452,6 +648,10 @@ static int psp_hw_fini(void *handle)
 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
 		return 0;
 
+	if (adev->gmc.xgmi.num_physical_nodes > 1 &&
+	    psp->xgmi_context.initialized == 1)
+                psp_xgmi_terminate(psp);
+
 	psp_ring_destroy(psp, PSP_RING_TYPE__KM);
 
 	amdgpu_bo_free_kernel(&psp->tmr_bo, &psp->tmr_mc_addr, &psp->tmr_buf);
@@ -479,6 +679,15 @@ static int psp_suspend(void *handle)
 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
 		return 0;
 
+	if (adev->gmc.xgmi.num_physical_nodes > 1 &&
+	    psp->xgmi_context.initialized == 1) {
+		ret = psp_xgmi_terminate(psp);
+		if (ret) {
+			DRM_ERROR("Failed to terminate xgmi ta\n");
+			return ret;
+		}
+	}
+
 	ret = psp_ring_stop(psp, PSP_RING_TYPE__KM);
 	if (ret) {
 		DRM_ERROR("PSP ring stop failed\n");
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
index 8b8720e..9ec5d1a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
@@ -27,14 +27,17 @@
 
 #include "amdgpu.h"
 #include "psp_gfx_if.h"
+#include "ta_xgmi_if.h"
 
 #define PSP_FENCE_BUFFER_SIZE	0x1000
 #define PSP_CMD_BUFFER_SIZE	0x1000
-#define PSP_ASD_SHARED_MEM_SIZE	0x4000
+#define PSP_ASD_SHARED_MEM_SIZE 0x4000
+#define PSP_XGMI_SHARED_MEM_SIZE 0x4000
 #define PSP_1_MEG		0x100000
 #define PSP_TMR_SIZE	0x400000
 
 struct psp_context;
+struct psp_xgmi_node_info;
 struct psp_xgmi_topology_info;
 
 enum psp_ring_type
@@ -80,12 +83,20 @@ struct psp_funcs
 				  enum AMDGPU_UCODE_ID ucode_type);
 	bool (*smu_reload_quirk)(struct psp_context *psp);
 	int (*mode1_reset)(struct psp_context *psp);
-	uint64_t (*xgmi_get_device_id)(struct psp_context *psp);
+	uint64_t (*xgmi_get_node_id)(struct psp_context *psp);
 	uint64_t (*xgmi_get_hive_id)(struct psp_context *psp);
 	int (*xgmi_get_topology_info)(struct psp_context *psp, int number_devices,
-			struct psp_xgmi_topology_info *topology);
+				      struct psp_xgmi_topology_info *topology);
 	int (*xgmi_set_topology_info)(struct psp_context *psp, int number_devices,
-			struct psp_xgmi_topology_info *topology);
+				      struct psp_xgmi_topology_info *topology);
+};
+
+struct psp_xgmi_context {
+	uint8_t				initialized;
+	uint32_t			session_id;
+	struct amdgpu_bo                *xgmi_shared_bo;
+	uint64_t                        xgmi_shared_mc_addr;
+	void                            *xgmi_shared_buf;
 };
 
 struct psp_context
@@ -96,7 +107,7 @@ struct psp_context
 
 	const struct psp_funcs		*funcs;
 
-	/* fence buffer */
+	/* firmware buffer */
 	struct amdgpu_bo		*fw_pri_bo;
 	uint64_t			fw_pri_mc_addr;
 	void				*fw_pri_buf;
@@ -134,6 +145,16 @@ struct psp_context
 	struct amdgpu_bo		*cmd_buf_bo;
 	uint64_t			cmd_buf_mc_addr;
 	struct psp_gfx_cmd_resp		*cmd_buf_mem;
+
+	/* fence value associated with cmd buffer */
+	atomic_t			fence_value;
+
+	/* xgmi ta firmware and buffer */
+	const struct firmware		*ta_fw;
+	uint32_t			ta_xgmi_ucode_version;
+	uint32_t			ta_xgmi_ucode_size;
+	uint8_t				*ta_xgmi_start_addr;
+	struct psp_xgmi_context		xgmi_context;
 };
 
 struct amdgpu_psp_funcs {
@@ -141,21 +162,17 @@ struct amdgpu_psp_funcs {
 					enum AMDGPU_UCODE_ID);
 };
 
+#define AMDGPU_XGMI_MAX_CONNECTED_NODES		64
+struct psp_xgmi_node_info {
+	uint64_t				node_id;
+	uint8_t					num_hops;
+	uint8_t					is_sharing_enabled;
+	enum ta_xgmi_assigned_sdma_engine	sdma_engine;
+};
+
 struct psp_xgmi_topology_info {
-	/* Generated by PSP to identify the GPU instance within xgmi connection */
-	uint64_t			device_id;
-	/*
-	 * If all bits set to 0 , driver indicates it wants to retrieve the xgmi
-	 * connection vector topology, but not access enable the connections
-	 * if some or all bits are set to 1, driver indicates it want to retrieve the
-	 * current xgmi topology and  access enable the link to GPU[i] associated
-	 * with the bit position in the  vector.
-	 * On return,: bits indicated which xgmi links are present/active depending
-	 * on the  value passed in. The relative bit offset for the  relative GPU index
-	 * within the  hive is always marked active.
-	 */
-	uint32_t			connection_mask;
-	uint32_t			reserved; /* must be  0 */
+	uint32_t			num_nodes;
+	struct psp_xgmi_node_info	nodes[AMDGPU_XGMI_MAX_CONNECTED_NODES];
 };
 
 #define psp_prep_cmd_buf(ucode, type) (psp)->funcs->prep_cmd_buf((ucode), (type))
@@ -177,8 +194,8 @@ struct psp_xgmi_topology_info {
 		((psp)->funcs->smu_reload_quirk ? (psp)->funcs->smu_reload_quirk((psp)) : false)
 #define psp_mode1_reset(psp) \
 		((psp)->funcs->mode1_reset ? (psp)->funcs->mode1_reset((psp)) : false)
-#define psp_xgmi_get_device_id(psp) \
-		((psp)->funcs->xgmi_get_device_id ? (psp)->funcs->xgmi_get_device_id((psp)) : 0)
+#define psp_xgmi_get_node_id(psp) \
+		((psp)->funcs->xgmi_get_node_id ? (psp)->funcs->xgmi_get_node_id((psp)) : 0)
 #define psp_xgmi_get_hive_id(psp) \
 		((psp)->funcs->xgmi_get_hive_id ? (psp)->funcs->xgmi_get_hive_id((psp)) : 0)
 #define psp_xgmi_get_topology_info(psp, num_device, topology) \
@@ -199,6 +216,8 @@ extern int psp_wait_for(struct psp_context *psp, uint32_t reg_index,
 extern const struct amdgpu_ip_block_version psp_v10_0_ip_block;
 
 int psp_gpu_reset(struct amdgpu_device *adev);
+int psp_xgmi_invoke(struct psp_context *psp, uint32_t ta_cmd_id);
+
 extern const struct amdgpu_ip_block_version psp_v11_0_ip_block;
 
 #endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index b70e85e..5b75bdc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -338,7 +338,7 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
  */
 void amdgpu_ring_fini(struct amdgpu_ring *ring)
 {
-	ring->ready = false;
+	ring->sched.ready = false;
 
 	/* Not to finish a ring which is not initialized */
 	if (!(ring->adev) || !(ring->adev->rings[ring->idx]))
@@ -500,3 +500,29 @@ static void amdgpu_debugfs_ring_fini(struct amdgpu_ring *ring)
 	debugfs_remove(ring->ent);
 #endif
 }
+
+/**
+ * amdgpu_ring_test_helper - tests ring and set sched readiness status
+ *
+ * @ring: ring to try the recovery on
+ *
+ * Tests ring and set sched readiness status
+ *
+ * Returns 0 on success, error on failure.
+ */
+int amdgpu_ring_test_helper(struct amdgpu_ring *ring)
+{
+	struct amdgpu_device *adev = ring->adev;
+	int r;
+
+	r = amdgpu_ring_test_ring(ring);
+	if (r)
+		DRM_DEV_ERROR(adev->dev, "ring %s test failed (%d)\n",
+			      ring->name, r);
+	else
+		DRM_DEV_DEBUG(adev->dev, "ring test on %s succeeded\n",
+			      ring->name);
+
+	ring->sched.ready = !r;
+	return r;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
index 4caa301..0beb01f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
@@ -129,8 +129,9 @@ struct amdgpu_ring_funcs {
 	unsigned emit_ib_size;
 	/* command emit functions */
 	void (*emit_ib)(struct amdgpu_ring *ring,
+			struct amdgpu_job *job,
 			struct amdgpu_ib *ib,
-			unsigned vmid, bool ctx_switch);
+			bool ctx_switch);
 	void (*emit_fence)(struct amdgpu_ring *ring, uint64_t addr,
 			   uint64_t seq, unsigned flags);
 	void (*emit_pipeline_sync)(struct amdgpu_ring *ring);
@@ -189,7 +190,6 @@ struct amdgpu_ring {
 	uint64_t		gpu_addr;
 	uint64_t		ptr_mask;
 	uint32_t		buf_mask;
-	bool			ready;
 	u32			idx;
 	u32			me;
 	u32			pipe;
@@ -229,7 +229,7 @@ struct amdgpu_ring {
 #define amdgpu_ring_get_rptr(r) (r)->funcs->get_rptr((r))
 #define amdgpu_ring_get_wptr(r) (r)->funcs->get_wptr((r))
 #define amdgpu_ring_set_wptr(r) (r)->funcs->set_wptr((r))
-#define amdgpu_ring_emit_ib(r, ib, vmid, c) (r)->funcs->emit_ib((r), (ib), (vmid), (c))
+#define amdgpu_ring_emit_ib(r, job, ib, c) ((r)->funcs->emit_ib((r), (job), (ib), (c)))
 #define amdgpu_ring_emit_pipeline_sync(r) (r)->funcs->emit_pipeline_sync((r))
 #define amdgpu_ring_emit_vm_flush(r, vmid, addr) (r)->funcs->emit_vm_flush((r), (vmid), (addr))
 #define amdgpu_ring_emit_fence(r, addr, seq, flags) (r)->funcs->emit_fence((r), (addr), (seq), (flags))
@@ -313,4 +313,6 @@ static inline void amdgpu_ring_write_multiple(struct amdgpu_ring *ring,
 	ring->count_dw -= count_dw;
 }
 
+int amdgpu_ring_test_helper(struct amdgpu_ring *ring);
+
 #endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c
new file mode 100644
index 0000000..c8793e6
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c
@@ -0,0 +1,282 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include <linux/firmware.h>
+#include "amdgpu.h"
+#include "amdgpu_gfx.h"
+#include "amdgpu_rlc.h"
+
+/**
+ * amdgpu_gfx_rlc_enter_safe_mode - Set RLC into safe mode
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Set RLC enter into safe mode if RLC is enabled and haven't in safe mode.
+ */
+void amdgpu_gfx_rlc_enter_safe_mode(struct amdgpu_device *adev)
+{
+	if (adev->gfx.rlc.in_safe_mode)
+		return;
+
+	/* if RLC is not enabled, do nothing */
+	if (!adev->gfx.rlc.funcs->is_rlc_enabled(adev))
+		return;
+
+	if (adev->cg_flags &
+	    (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_MGCG |
+	     AMD_CG_SUPPORT_GFX_3D_CGCG)) {
+		adev->gfx.rlc.funcs->set_safe_mode(adev);
+		adev->gfx.rlc.in_safe_mode = true;
+	}
+}
+
+/**
+ * amdgpu_gfx_rlc_exit_safe_mode - Set RLC out of safe mode
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Set RLC exit safe mode if RLC is enabled and have entered into safe mode.
+ */
+void amdgpu_gfx_rlc_exit_safe_mode(struct amdgpu_device *adev)
+{
+	if (!(adev->gfx.rlc.in_safe_mode))
+		return;
+
+	/* if RLC is not enabled, do nothing */
+	if (!adev->gfx.rlc.funcs->is_rlc_enabled(adev))
+		return;
+
+	if (adev->cg_flags &
+	    (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_MGCG |
+	     AMD_CG_SUPPORT_GFX_3D_CGCG)) {
+		adev->gfx.rlc.funcs->unset_safe_mode(adev);
+		adev->gfx.rlc.in_safe_mode = false;
+	}
+}
+
+/**
+ * amdgpu_gfx_rlc_init_sr - Init save restore block
+ *
+ * @adev: amdgpu_device pointer
+ * @dws: the size of save restore block
+ *
+ * Allocate and setup value to save restore block of rlc.
+ * Returns 0 on succeess or negative error code if allocate failed.
+ */
+int amdgpu_gfx_rlc_init_sr(struct amdgpu_device *adev, u32 dws)
+{
+	const u32 *src_ptr;
+	volatile u32 *dst_ptr;
+	u32 i;
+	int r;
+
+	/* allocate save restore block */
+	r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE,
+				      AMDGPU_GEM_DOMAIN_VRAM,
+				      &adev->gfx.rlc.save_restore_obj,
+				      &adev->gfx.rlc.save_restore_gpu_addr,
+				      (void **)&adev->gfx.rlc.sr_ptr);
+	if (r) {
+		dev_warn(adev->dev, "(%d) create RLC sr bo failed\n", r);
+		amdgpu_gfx_rlc_fini(adev);
+		return r;
+	}
+
+	/* write the sr buffer */
+	src_ptr = adev->gfx.rlc.reg_list;
+	dst_ptr = adev->gfx.rlc.sr_ptr;
+	for (i = 0; i < adev->gfx.rlc.reg_list_size; i++)
+		dst_ptr[i] = cpu_to_le32(src_ptr[i]);
+	amdgpu_bo_kunmap(adev->gfx.rlc.save_restore_obj);
+	amdgpu_bo_unreserve(adev->gfx.rlc.save_restore_obj);
+
+	return 0;
+}
+
+/**
+ * amdgpu_gfx_rlc_init_csb - Init clear state block
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Allocate and setup value to clear state block of rlc.
+ * Returns 0 on succeess or negative error code if allocate failed.
+ */
+int amdgpu_gfx_rlc_init_csb(struct amdgpu_device *adev)
+{
+	volatile u32 *dst_ptr;
+	u32 dws;
+	int r;
+
+	/* allocate clear state block */
+	adev->gfx.rlc.clear_state_size = dws = adev->gfx.rlc.funcs->get_csb_size(adev);
+	r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE,
+				      AMDGPU_GEM_DOMAIN_VRAM,
+				      &adev->gfx.rlc.clear_state_obj,
+				      &adev->gfx.rlc.clear_state_gpu_addr,
+				      (void **)&adev->gfx.rlc.cs_ptr);
+	if (r) {
+		dev_err(adev->dev, "(%d) failed to create rlc csb bo\n", r);
+		amdgpu_gfx_rlc_fini(adev);
+		return r;
+	}
+
+	/* set up the cs buffer */
+	dst_ptr = adev->gfx.rlc.cs_ptr;
+	adev->gfx.rlc.funcs->get_csb_buffer(adev, dst_ptr);
+	amdgpu_bo_kunmap(adev->gfx.rlc.clear_state_obj);
+	amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj);
+	amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
+
+	return 0;
+}
+
+/**
+ * amdgpu_gfx_rlc_init_cpt - Init cp table
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Allocate and setup value to cp table of rlc.
+ * Returns 0 on succeess or negative error code if allocate failed.
+ */
+int amdgpu_gfx_rlc_init_cpt(struct amdgpu_device *adev)
+{
+	int r;
+
+	r = amdgpu_bo_create_reserved(adev, adev->gfx.rlc.cp_table_size,
+				      PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
+				      &adev->gfx.rlc.cp_table_obj,
+				      &adev->gfx.rlc.cp_table_gpu_addr,
+				      (void **)&adev->gfx.rlc.cp_table_ptr);
+	if (r) {
+		dev_err(adev->dev, "(%d) failed to create cp table bo\n", r);
+		amdgpu_gfx_rlc_fini(adev);
+		return r;
+	}
+
+	/* set up the cp table */
+	amdgpu_gfx_rlc_setup_cp_table(adev);
+	amdgpu_bo_kunmap(adev->gfx.rlc.cp_table_obj);
+	amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj);
+
+	return 0;
+}
+
+/**
+ * amdgpu_gfx_rlc_setup_cp_table - setup cp the buffer of cp table
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Write cp firmware data into cp table.
+ */
+void amdgpu_gfx_rlc_setup_cp_table(struct amdgpu_device *adev)
+{
+	const __le32 *fw_data;
+	volatile u32 *dst_ptr;
+	int me, i, max_me;
+	u32 bo_offset = 0;
+	u32 table_offset, table_size;
+
+	max_me = adev->gfx.rlc.funcs->get_cp_table_num(adev);
+
+	/* write the cp table buffer */
+	dst_ptr = adev->gfx.rlc.cp_table_ptr;
+	for (me = 0; me < max_me; me++) {
+		if (me == 0) {
+			const struct gfx_firmware_header_v1_0 *hdr =
+				(const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
+			fw_data = (const __le32 *)
+				(adev->gfx.ce_fw->data +
+				 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+			table_offset = le32_to_cpu(hdr->jt_offset);
+			table_size = le32_to_cpu(hdr->jt_size);
+		} else if (me == 1) {
+			const struct gfx_firmware_header_v1_0 *hdr =
+				(const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
+			fw_data = (const __le32 *)
+				(adev->gfx.pfp_fw->data +
+				 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+			table_offset = le32_to_cpu(hdr->jt_offset);
+			table_size = le32_to_cpu(hdr->jt_size);
+		} else if (me == 2) {
+			const struct gfx_firmware_header_v1_0 *hdr =
+				(const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
+			fw_data = (const __le32 *)
+				(adev->gfx.me_fw->data +
+				 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+			table_offset = le32_to_cpu(hdr->jt_offset);
+			table_size = le32_to_cpu(hdr->jt_size);
+		} else if (me == 3) {
+			const struct gfx_firmware_header_v1_0 *hdr =
+				(const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
+			fw_data = (const __le32 *)
+				(adev->gfx.mec_fw->data +
+				 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+			table_offset = le32_to_cpu(hdr->jt_offset);
+			table_size = le32_to_cpu(hdr->jt_size);
+		} else  if (me == 4) {
+			const struct gfx_firmware_header_v1_0 *hdr =
+				(const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data;
+			fw_data = (const __le32 *)
+				(adev->gfx.mec2_fw->data +
+				 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+			table_offset = le32_to_cpu(hdr->jt_offset);
+			table_size = le32_to_cpu(hdr->jt_size);
+		}
+
+		for (i = 0; i < table_size; i ++) {
+			dst_ptr[bo_offset + i] =
+				cpu_to_le32(le32_to_cpu(fw_data[table_offset + i]));
+		}
+
+		bo_offset += table_size;
+	}
+}
+
+/**
+ * amdgpu_gfx_rlc_fini - Free BO which used for RLC
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Free three BO which is used for rlc_save_restore_block, rlc_clear_state_block
+ * and rlc_jump_table_block.
+ */
+void amdgpu_gfx_rlc_fini(struct amdgpu_device *adev)
+{
+	/* save restore block */
+	if (adev->gfx.rlc.save_restore_obj) {
+		amdgpu_bo_free_kernel(&adev->gfx.rlc.save_restore_obj,
+				      &adev->gfx.rlc.save_restore_gpu_addr,
+				      (void **)&adev->gfx.rlc.sr_ptr);
+	}
+
+	/* clear state block */
+	amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj,
+			      &adev->gfx.rlc.clear_state_gpu_addr,
+			      (void **)&adev->gfx.rlc.cs_ptr);
+
+	/* jump table block */
+	amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj,
+			      &adev->gfx.rlc.cp_table_gpu_addr,
+			      (void **)&adev->gfx.rlc.cp_table_ptr);
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h
new file mode 100644
index 0000000..49a8ab5
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h
@@ -0,0 +1,98 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __AMDGPU_RLC_H__
+#define __AMDGPU_RLC_H__
+
+#include "clearstate_defs.h"
+
+struct amdgpu_rlc_funcs {
+	bool (*is_rlc_enabled)(struct amdgpu_device *adev);
+	void (*set_safe_mode)(struct amdgpu_device *adev);
+	void (*unset_safe_mode)(struct amdgpu_device *adev);
+	int  (*init)(struct amdgpu_device *adev);
+	u32  (*get_csb_size)(struct amdgpu_device *adev);
+	void (*get_csb_buffer)(struct amdgpu_device *adev, volatile u32 *buffer);
+	int  (*get_cp_table_num)(struct amdgpu_device *adev);
+	int  (*resume)(struct amdgpu_device *adev);
+	void (*stop)(struct amdgpu_device *adev);
+	void (*reset)(struct amdgpu_device *adev);
+	void (*start)(struct amdgpu_device *adev);
+};
+
+struct amdgpu_rlc {
+	/* for power gating */
+	struct amdgpu_bo        *save_restore_obj;
+	uint64_t                save_restore_gpu_addr;
+	volatile uint32_t       *sr_ptr;
+	const u32               *reg_list;
+	u32                     reg_list_size;
+	/* for clear state */
+	struct amdgpu_bo        *clear_state_obj;
+	uint64_t                clear_state_gpu_addr;
+	volatile uint32_t       *cs_ptr;
+	const struct cs_section_def   *cs_data;
+	u32                     clear_state_size;
+	/* for cp tables */
+	struct amdgpu_bo        *cp_table_obj;
+	uint64_t                cp_table_gpu_addr;
+	volatile uint32_t       *cp_table_ptr;
+	u32                     cp_table_size;
+
+	/* safe mode for updating CG/PG state */
+	bool in_safe_mode;
+	const struct amdgpu_rlc_funcs *funcs;
+
+	/* for firmware data */
+	u32 save_and_restore_offset;
+	u32 clear_state_descriptor_offset;
+	u32 avail_scratch_ram_locations;
+	u32 reg_restore_list_size;
+	u32 reg_list_format_start;
+	u32 reg_list_format_separate_start;
+	u32 starting_offsets_start;
+	u32 reg_list_format_size_bytes;
+	u32 reg_list_size_bytes;
+	u32 reg_list_format_direct_reg_list_length;
+	u32 save_restore_list_cntl_size_bytes;
+	u32 save_restore_list_gpm_size_bytes;
+	u32 save_restore_list_srm_size_bytes;
+
+	u32 *register_list_format;
+	u32 *register_restore;
+	u8 *save_restore_list_cntl;
+	u8 *save_restore_list_gpm;
+	u8 *save_restore_list_srm;
+
+	bool is_rlc_v2_1;
+};
+
+void amdgpu_gfx_rlc_enter_safe_mode(struct amdgpu_device *adev);
+void amdgpu_gfx_rlc_exit_safe_mode(struct amdgpu_device *adev);
+int amdgpu_gfx_rlc_init_sr(struct amdgpu_device *adev, u32 dws);
+int amdgpu_gfx_rlc_init_csb(struct amdgpu_device *adev);
+int amdgpu_gfx_rlc_init_cpt(struct amdgpu_device *adev);
+void amdgpu_gfx_rlc_setup_cp_table(struct amdgpu_device *adev);
+void amdgpu_gfx_rlc_fini(struct amdgpu_device *adev);
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
index bc9244b..115bb0c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
@@ -28,17 +28,31 @@
  * GPU SDMA IP block helpers function.
  */
 
-struct amdgpu_sdma_instance * amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
+struct amdgpu_sdma_instance *amdgpu_sdma_get_instance_from_ring(struct amdgpu_ring *ring)
 {
 	struct amdgpu_device *adev = ring->adev;
 	int i;
 
 	for (i = 0; i < adev->sdma.num_instances; i++)
-		if (&adev->sdma.instance[i].ring == ring)
-			break;
+		if (ring == &adev->sdma.instance[i].ring ||
+		    ring == &adev->sdma.instance[i].page)
+			return &adev->sdma.instance[i];
 
-	if (i < AMDGPU_MAX_SDMA_INSTANCES)
-		return &adev->sdma.instance[i];
-	else
-		return NULL;
+	return NULL;
+}
+
+int amdgpu_sdma_get_index_from_ring(struct amdgpu_ring *ring, uint32_t *index)
+{
+	struct amdgpu_device *adev = ring->adev;
+	int i;
+
+	for (i = 0; i < adev->sdma.num_instances; i++) {
+		if (ring == &adev->sdma.instance[i].ring ||
+			ring == &adev->sdma.instance[i].page) {
+			*index = i;
+			return 0;
+		}
+	}
+
+	return -EINVAL;
 }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
index 500113e..16b1a6a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
@@ -41,6 +41,7 @@ struct amdgpu_sdma_instance {
 	uint32_t		feature_version;
 
 	struct amdgpu_ring	ring;
+	struct amdgpu_ring	page;
 	bool			burst_nop;
 };
 
@@ -50,6 +51,7 @@ struct amdgpu_sdma {
 	struct amdgpu_irq_src	illegal_inst_irq;
 	int			num_instances;
 	uint32_t                    srbm_soft_reset;
+	bool			has_page_queue;
 };
 
 /*
@@ -92,6 +94,7 @@ struct amdgpu_buffer_funcs {
 #define amdgpu_emit_fill_buffer(adev, ib, s, d, b) (adev)->mman.buffer_funcs->emit_fill_buffer((ib), (s), (d), (b))
 
 struct amdgpu_sdma_instance *
-amdgpu_get_sdma_instance(struct amdgpu_ring *ring);
+amdgpu_sdma_get_instance_from_ring(struct amdgpu_ring *ring);
+int amdgpu_sdma_get_index_from_ring(struct amdgpu_ring *ring, uint32_t *index);
 
 #endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
index e9bf70e..626abca 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
@@ -218,6 +218,7 @@ TRACE_EVENT(amdgpu_vm_grab_id,
 	    TP_ARGS(vm, ring, job),
 	    TP_STRUCT__entry(
 			     __field(u32, pasid)
+			     __string(ring, ring->name)
 			     __field(u32, ring)
 			     __field(u32, vmid)
 			     __field(u32, vm_hub)
@@ -227,14 +228,14 @@ TRACE_EVENT(amdgpu_vm_grab_id,
 
 	    TP_fast_assign(
 			   __entry->pasid = vm->pasid;
-			   __entry->ring = ring->idx;
+			   __assign_str(ring, ring->name)
 			   __entry->vmid = job->vmid;
 			   __entry->vm_hub = ring->funcs->vmhub,
 			   __entry->pd_addr = job->vm_pd_addr;
 			   __entry->needs_flush = job->vm_needs_flush;
 			   ),
-	    TP_printk("pasid=%d, ring=%u, id=%u, hub=%u, pd_addr=%010Lx needs_flush=%u",
-		      __entry->pasid, __entry->ring, __entry->vmid,
+	    TP_printk("pasid=%d, ring=%s, id=%u, hub=%u, pd_addr=%010Lx needs_flush=%u",
+		      __entry->pasid, __get_str(ring), __entry->vmid,
 		      __entry->vm_hub, __entry->pd_addr, __entry->needs_flush)
 );
 
@@ -366,20 +367,20 @@ TRACE_EVENT(amdgpu_vm_flush,
 		     uint64_t pd_addr),
 	    TP_ARGS(ring, vmid, pd_addr),
 	    TP_STRUCT__entry(
-			     __field(u32, ring)
+			     __string(ring, ring->name)
 			     __field(u32, vmid)
 			     __field(u32, vm_hub)
 			     __field(u64, pd_addr)
 			     ),
 
 	    TP_fast_assign(
-			   __entry->ring = ring->idx;
+			   __assign_str(ring, ring->name)
 			   __entry->vmid = vmid;
 			   __entry->vm_hub = ring->funcs->vmhub;
 			   __entry->pd_addr = pd_addr;
 			   ),
-	    TP_printk("ring=%u, id=%u, hub=%u, pd_addr=%010Lx",
-		      __entry->ring, __entry->vmid,
+	    TP_printk("ring=%s, id=%u, hub=%u, pd_addr=%010Lx",
+		      __get_str(ring), __entry->vmid,
 		      __entry->vm_hub,__entry->pd_addr)
 );
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index a44fc12..c91ec31 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -61,100 +61,6 @@ static int amdgpu_map_buffer(struct ttm_buffer_object *bo,
 static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev);
 static void amdgpu_ttm_debugfs_fini(struct amdgpu_device *adev);
 
-/*
- * Global memory.
- */
-
-/**
- * amdgpu_ttm_mem_global_init - Initialize and acquire reference to
- * memory object
- *
- * @ref: Object for initialization.
- *
- * This is called by drm_global_item_ref() when an object is being
- * initialized.
- */
-static int amdgpu_ttm_mem_global_init(struct drm_global_reference *ref)
-{
-	return ttm_mem_global_init(ref->object);
-}
-
-/**
- * amdgpu_ttm_mem_global_release - Drop reference to a memory object
- *
- * @ref: Object being removed
- *
- * This is called by drm_global_item_unref() when an object is being
- * released.
- */
-static void amdgpu_ttm_mem_global_release(struct drm_global_reference *ref)
-{
-	ttm_mem_global_release(ref->object);
-}
-
-/**
- * amdgpu_ttm_global_init - Initialize global TTM memory reference structures.
- *
- * @adev: AMDGPU device for which the global structures need to be registered.
- *
- * This is called as part of the AMDGPU ttm init from amdgpu_ttm_init()
- * during bring up.
- */
-static int amdgpu_ttm_global_init(struct amdgpu_device *adev)
-{
-	struct drm_global_reference *global_ref;
-	int r;
-
-	/* ensure reference is false in case init fails */
-	adev->mman.mem_global_referenced = false;
-
-	global_ref = &adev->mman.mem_global_ref;
-	global_ref->global_type = DRM_GLOBAL_TTM_MEM;
-	global_ref->size = sizeof(struct ttm_mem_global);
-	global_ref->init = &amdgpu_ttm_mem_global_init;
-	global_ref->release = &amdgpu_ttm_mem_global_release;
-	r = drm_global_item_ref(global_ref);
-	if (r) {
-		DRM_ERROR("Failed setting up TTM memory accounting "
-			  "subsystem.\n");
-		goto error_mem;
-	}
-
-	adev->mman.bo_global_ref.mem_glob =
-		adev->mman.mem_global_ref.object;
-	global_ref = &adev->mman.bo_global_ref.ref;
-	global_ref->global_type = DRM_GLOBAL_TTM_BO;
-	global_ref->size = sizeof(struct ttm_bo_global);
-	global_ref->init = &ttm_bo_global_init;
-	global_ref->release = &ttm_bo_global_release;
-	r = drm_global_item_ref(global_ref);
-	if (r) {
-		DRM_ERROR("Failed setting up TTM BO subsystem.\n");
-		goto error_bo;
-	}
-
-	mutex_init(&adev->mman.gtt_window_lock);
-
-	adev->mman.mem_global_referenced = true;
-
-	return 0;
-
-error_bo:
-	drm_global_item_unref(&adev->mman.mem_global_ref);
-error_mem:
-	return r;
-}
-
-static void amdgpu_ttm_global_fini(struct amdgpu_device *adev)
-{
-	if (adev->mman.mem_global_referenced) {
-		mutex_destroy(&adev->mman.gtt_window_lock);
-		drm_global_item_unref(&adev->mman.bo_global_ref.ref);
-		drm_global_item_unref(&adev->mman.mem_global_ref);
-		adev->mman.mem_global_referenced = false;
-	}
-}
-
 static int amdgpu_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
 {
 	return 0;
@@ -1758,14 +1664,10 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
 	int r;
 	u64 vis_vram_limit;
 
-	/* initialize global references for vram/gtt */
-	r = amdgpu_ttm_global_init(adev);
-	if (r) {
-		return r;
-	}
+	mutex_init(&adev->mman.gtt_window_lock);
+
 	/* No others user of address space so set it to 0 */
 	r = ttm_bo_device_init(&adev->mman.bdev,
-			       adev->mman.bo_global_ref.ref.object,
 			       &amdgpu_bo_driver,
 			       adev->ddev->anon_inode->i_mapping,
 			       DRM_FILE_PAGE_OFFSET,
@@ -1922,7 +1824,6 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev)
 	ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_GWS);
 	ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_OA);
 	ttm_bo_device_release(&adev->mman.bdev);
-	amdgpu_ttm_global_fini(adev);
 	adev->mman.initialized = false;
 	DRM_INFO("amdgpu: ttm finalized\n");
 }
@@ -2069,7 +1970,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
 	unsigned i;
 	int r;
 
-	if (direct_submit && !ring->ready) {
+	if (direct_submit && !ring->sched.ready) {
 		DRM_ERROR("Trying to move memory with ring turned off.\n");
 		return -EINVAL;
 	}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
index fe8f276..b5b2d10 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
@@ -39,8 +39,6 @@
 #define AMDGPU_GTT_NUM_TRANSFER_WINDOWS	2
 
 struct amdgpu_mman {
-	struct ttm_bo_global_ref        bo_global_ref;
-	struct drm_global_reference	mem_global_ref;
 	struct ttm_bo_device		bdev;
 	bool				mem_global_referenced;
 	bool				initialized;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
index aa6641b..7ac25a1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
@@ -58,6 +58,17 @@ struct psp_firmware_header_v1_0 {
 };
 
 /* version_major=1, version_minor=0 */
+struct ta_firmware_header_v1_0 {
+	struct common_firmware_header header;
+	uint32_t ta_xgmi_ucode_version;
+	uint32_t ta_xgmi_offset_bytes;
+	uint32_t ta_xgmi_size_bytes;
+	uint32_t ta_ras_ucode_version;
+	uint32_t ta_ras_offset_bytes;
+	uint32_t ta_ras_size_bytes;
+};
+
+/* version_major=1, version_minor=0 */
 struct gfx_firmware_header_v1_0 {
 	struct common_firmware_header header;
 	uint32_t ucode_feature_version;
@@ -170,6 +181,7 @@ union amdgpu_firmware_header {
 	struct mc_firmware_header_v1_0 mc;
 	struct smc_firmware_header_v1_0 smc;
 	struct psp_firmware_header_v1_0 psp;
+	struct ta_firmware_header_v1_0 ta;
 	struct gfx_firmware_header_v1_0 gfx;
 	struct rlc_firmware_header_v1_0 rlc;
 	struct rlc_firmware_header_v2_0 rlc_v2_0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index e5a6db6..69896f4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -1243,30 +1243,20 @@ int amdgpu_uvd_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 {
 	struct dma_fence *fence;
 	long r;
-	uint32_t ip_instance = ring->me;
 
 	r = amdgpu_uvd_get_create_msg(ring, 1, NULL);
-	if (r) {
-		DRM_ERROR("amdgpu: (%d)failed to get create msg (%ld).\n", ip_instance, r);
+	if (r)
 		goto error;
-	}
 
 	r = amdgpu_uvd_get_destroy_msg(ring, 1, true, &fence);
-	if (r) {
-		DRM_ERROR("amdgpu: (%d)failed to get destroy ib (%ld).\n", ip_instance, r);
+	if (r)
 		goto error;
-	}
 
 	r = dma_fence_wait_timeout(fence, false, timeout);
-	if (r == 0) {
-		DRM_ERROR("amdgpu: (%d)IB test timed out.\n", ip_instance);
+	if (r == 0)
 		r = -ETIMEDOUT;
-	} else if (r < 0) {
-		DRM_ERROR("amdgpu: (%d)fence wait failed (%ld).\n", ip_instance, r);
-	} else {
-		DRM_DEBUG("ib test on (%d)ring %d succeeded\n", ip_instance, ring->idx);
+	else if (r > 0)
 		r = 0;
-	}
 
 	dma_fence_put(fence);
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index 5f3f540..98a1b2c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -1032,8 +1032,10 @@ int amdgpu_vce_ring_parse_cs_vm(struct amdgpu_cs_parser *p, uint32_t ib_idx)
  * @ib: the IB to execute
  *
  */
-void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib,
-			     unsigned vmid, bool ctx_switch)
+void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring,
+				struct amdgpu_job *job,
+				struct amdgpu_ib *ib,
+				bool ctx_switch)
 {
 	amdgpu_ring_write(ring, VCE_CMD_IB);
 	amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
@@ -1079,11 +1081,9 @@ int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring)
 		return 0;
 
 	r = amdgpu_ring_alloc(ring, 16);
-	if (r) {
-		DRM_ERROR("amdgpu: vce failed to lock ring %d (%d).\n",
-			  ring->idx, r);
+	if (r)
 		return r;
-	}
+
 	amdgpu_ring_write(ring, VCE_CMD_END);
 	amdgpu_ring_commit(ring);
 
@@ -1093,14 +1093,8 @@ int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring)
 		DRM_UDELAY(1);
 	}
 
-	if (i < timeout) {
-		DRM_DEBUG("ring test on %d succeeded in %d usecs\n",
-			 ring->idx, i);
-	} else {
-		DRM_ERROR("amdgpu: ring %d test failed\n",
-			  ring->idx);
+	if (i >= timeout)
 		r = -ETIMEDOUT;
-	}
 
 	return r;
 }
@@ -1121,27 +1115,19 @@ int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 		return 0;
 
 	r = amdgpu_vce_get_create_msg(ring, 1, NULL);
-	if (r) {
-		DRM_ERROR("amdgpu: failed to get create msg (%ld).\n", r);
+	if (r)
 		goto error;
-	}
 
 	r = amdgpu_vce_get_destroy_msg(ring, 1, true, &fence);
-	if (r) {
-		DRM_ERROR("amdgpu: failed to get destroy ib (%ld).\n", r);
+	if (r)
 		goto error;
-	}
 
 	r = dma_fence_wait_timeout(fence, false, timeout);
-	if (r == 0) {
-		DRM_ERROR("amdgpu: IB test timed out.\n");
+	if (r == 0)
 		r = -ETIMEDOUT;
-	} else if (r < 0) {
-		DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
-	} else {
-		DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
+	else if (r > 0)
 		r = 0;
-	}
+
 error:
 	dma_fence_put(fence);
 	return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
index a1f209e..5029365 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
@@ -65,8 +65,8 @@ int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
 void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp);
 int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx);
 int amdgpu_vce_ring_parse_cs_vm(struct amdgpu_cs_parser *p, uint32_t ib_idx);
-void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib,
-			     unsigned vmid, bool ctx_switch);
+void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_job *job,
+				struct amdgpu_ib *ib, bool ctx_switch);
 void amdgpu_vce_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
 				unsigned flags);
 int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
index 27da13d..e2e42e3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -425,11 +425,9 @@ int amdgpu_vcn_dec_ring_test_ring(struct amdgpu_ring *ring)
 
 	WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9), 0xCAFEDEAD);
 	r = amdgpu_ring_alloc(ring, 3);
-	if (r) {
-		DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n",
-			  ring->idx, r);
+	if (r)
 		return r;
-	}
+
 	amdgpu_ring_write(ring,
 		PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9), 0));
 	amdgpu_ring_write(ring, 0xDEADBEEF);
@@ -441,14 +439,9 @@ int amdgpu_vcn_dec_ring_test_ring(struct amdgpu_ring *ring)
 		DRM_UDELAY(1);
 	}
 
-	if (i < adev->usec_timeout) {
-		DRM_DEBUG("ring test on %d succeeded in %d usecs\n",
-			 ring->idx, i);
-	} else {
-		DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
-			  ring->idx, tmp);
-		r = -EINVAL;
-	}
+	if (i >= adev->usec_timeout)
+		r = -ETIMEDOUT;
+
 	return r;
 }
 
@@ -570,30 +563,20 @@ int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 	long r;
 
 	r = amdgpu_vcn_dec_get_create_msg(ring, 1, NULL);
-	if (r) {
-		DRM_ERROR("amdgpu: failed to get create msg (%ld).\n", r);
+	if (r)
 		goto error;
-	}
 
 	r = amdgpu_vcn_dec_get_destroy_msg(ring, 1, &fence);
-	if (r) {
-		DRM_ERROR("amdgpu: failed to get destroy ib (%ld).\n", r);
+	if (r)
 		goto error;
-	}
 
 	r = dma_fence_wait_timeout(fence, false, timeout);
-	if (r == 0) {
-		DRM_ERROR("amdgpu: IB test timed out.\n");
+	if (r == 0)
 		r = -ETIMEDOUT;
-	} else if (r < 0) {
-		DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
-	} else {
-		DRM_DEBUG("ib test on ring %d succeeded\n",  ring->idx);
+	else if (r > 0)
 		r = 0;
-	}
 
 	dma_fence_put(fence);
-
 error:
 	return r;
 }
@@ -606,11 +589,9 @@ int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring)
 	int r;
 
 	r = amdgpu_ring_alloc(ring, 16);
-	if (r) {
-		DRM_ERROR("amdgpu: vcn enc failed to lock ring %d (%d).\n",
-			  ring->idx, r);
+	if (r)
 		return r;
-	}
+
 	amdgpu_ring_write(ring, VCN_ENC_CMD_END);
 	amdgpu_ring_commit(ring);
 
@@ -620,14 +601,8 @@ int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring)
 		DRM_UDELAY(1);
 	}
 
-	if (i < adev->usec_timeout) {
-		DRM_DEBUG("ring test on %d succeeded in %d usecs\n",
-			 ring->idx, i);
-	} else {
-		DRM_ERROR("amdgpu: ring %d test failed\n",
-			  ring->idx);
+	if (i >= adev->usec_timeout)
 		r = -ETIMEDOUT;
-	}
 
 	return r;
 }
@@ -742,27 +717,19 @@ int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 	long r;
 
 	r = amdgpu_vcn_enc_get_create_msg(ring, 1, NULL);
-	if (r) {
-		DRM_ERROR("amdgpu: failed to get create msg (%ld).\n", r);
+	if (r)
 		goto error;
-	}
 
 	r = amdgpu_vcn_enc_get_destroy_msg(ring, 1, &fence);
-	if (r) {
-		DRM_ERROR("amdgpu: failed to get destroy ib (%ld).\n", r);
+	if (r)
 		goto error;
-	}
 
 	r = dma_fence_wait_timeout(fence, false, timeout);
-	if (r == 0) {
-		DRM_ERROR("amdgpu: IB test timed out.\n");
+	if (r == 0)
 		r = -ETIMEDOUT;
-	} else if (r < 0) {
-		DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
-	} else {
-		DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
+	else if (r > 0)
 		r = 0;
-	}
+
 error:
 	dma_fence_put(fence);
 	return r;
@@ -778,11 +745,8 @@ int amdgpu_vcn_jpeg_ring_test_ring(struct amdgpu_ring *ring)
 	WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9), 0xCAFEDEAD);
 	r = amdgpu_ring_alloc(ring, 3);
 
-	if (r) {
-		DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n",
-				  ring->idx, r);
+	if (r)
 		return r;
-	}
 
 	amdgpu_ring_write(ring,
 		PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9), 0, 0, 0));
@@ -796,14 +760,8 @@ int amdgpu_vcn_jpeg_ring_test_ring(struct amdgpu_ring *ring)
 		DRM_UDELAY(1);
 	}
 
-	if (i < adev->usec_timeout) {
-		DRM_DEBUG("ring test on %d succeeded in %d usecs\n",
-				  ring->idx, i);
-	} else {
-		DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
-				  ring->idx, tmp);
-		r = -EINVAL;
-	}
+	if (i >= adev->usec_timeout)
+		r = -ETIMEDOUT;
 
 	return r;
 }
@@ -856,21 +814,18 @@ int amdgpu_vcn_jpeg_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 	long r = 0;
 
 	r = amdgpu_vcn_jpeg_set_reg(ring, 1, &fence);
-	if (r) {
-		DRM_ERROR("amdgpu: failed to set jpeg register (%ld).\n", r);
+	if (r)
 		goto error;
-	}
 
 	r = dma_fence_wait_timeout(fence, false, timeout);
 	if (r == 0) {
-		DRM_ERROR("amdgpu: IB test timed out.\n");
 		r = -ETIMEDOUT;
 		goto error;
 	} else if (r < 0) {
-		DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
 		goto error;
-	} else
+	} else {
 		r = 0;
+	}
 
 	for (i = 0; i < adev->usec_timeout; i++) {
 		tmp = RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9));
@@ -879,15 +834,10 @@ int amdgpu_vcn_jpeg_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 		DRM_UDELAY(1);
 	}
 
-	if (i < adev->usec_timeout)
-		DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
-	else {
-		DRM_ERROR("ib test failed (0x%08X)\n", tmp);
-		r = -EINVAL;
-	}
+	if (i >= adev->usec_timeout)
+		r = -ETIMEDOUT;
 
 	dma_fence_put(fence);
-
 error:
 	return r;
 }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
index f2f358a..cfee747 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
@@ -23,16 +23,6 @@
 
 #include "amdgpu.h"
 
-uint64_t amdgpu_csa_vaddr(struct amdgpu_device *adev)
-{
-	uint64_t addr = adev->vm_manager.max_pfn << AMDGPU_GPU_PAGE_SHIFT;
-
-	addr -= AMDGPU_VA_RESERVED_SIZE;
-	addr = amdgpu_gmc_sign_extend(addr);
-
-	return addr;
-}
-
 bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev)
 {
 	/* By now all MMIO pages except mailbox are blocked */
@@ -41,88 +31,6 @@ bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev)
 	return RREG32_NO_KIQ(0xc040) == 0xffffffff;
 }
 
-int amdgpu_allocate_static_csa(struct amdgpu_device *adev)
-{
-	int r;
-	void *ptr;
-
-	r = amdgpu_bo_create_kernel(adev, AMDGPU_CSA_SIZE, PAGE_SIZE,
-				AMDGPU_GEM_DOMAIN_VRAM, &adev->virt.csa_obj,
-				&adev->virt.csa_vmid0_addr, &ptr);
-	if (r)
-		return r;
-
-	memset(ptr, 0, AMDGPU_CSA_SIZE);
-	return 0;
-}
-
-void amdgpu_free_static_csa(struct amdgpu_device *adev) {
-	amdgpu_bo_free_kernel(&adev->virt.csa_obj,
-						&adev->virt.csa_vmid0_addr,
-						NULL);
-}
-
-/*
- * amdgpu_map_static_csa should be called during amdgpu_vm_init
- * it maps virtual address amdgpu_csa_vaddr() to this VM, and each command
- * submission of GFX should use this virtual address within META_DATA init
- * package to support SRIOV gfx preemption.
- */
-int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
-			  struct amdgpu_bo_va **bo_va)
-{
-	uint64_t csa_addr = amdgpu_csa_vaddr(adev) & AMDGPU_GMC_HOLE_MASK;
-	struct ww_acquire_ctx ticket;
-	struct list_head list;
-	struct amdgpu_bo_list_entry pd;
-	struct ttm_validate_buffer csa_tv;
-	int r;
-
-	INIT_LIST_HEAD(&list);
-	INIT_LIST_HEAD(&csa_tv.head);
-	csa_tv.bo = &adev->virt.csa_obj->tbo;
-	csa_tv.shared = true;
-
-	list_add(&csa_tv.head, &list);
-	amdgpu_vm_get_pd_bo(vm, &list, &pd);
-
-	r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL);
-	if (r) {
-		DRM_ERROR("failed to reserve CSA,PD BOs: err=%d\n", r);
-		return r;
-	}
-
-	*bo_va = amdgpu_vm_bo_add(adev, vm, adev->virt.csa_obj);
-	if (!*bo_va) {
-		ttm_eu_backoff_reservation(&ticket, &list);
-		DRM_ERROR("failed to create bo_va for static CSA\n");
-		return -ENOMEM;
-	}
-
-	r = amdgpu_vm_alloc_pts(adev, (*bo_va)->base.vm, csa_addr,
-				AMDGPU_CSA_SIZE);
-	if (r) {
-		DRM_ERROR("failed to allocate pts for static CSA, err=%d\n", r);
-		amdgpu_vm_bo_rmv(adev, *bo_va);
-		ttm_eu_backoff_reservation(&ticket, &list);
-		return r;
-	}
-
-	r = amdgpu_vm_bo_map(adev, *bo_va, csa_addr, 0, AMDGPU_CSA_SIZE,
-			     AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE |
-			     AMDGPU_PTE_EXECUTABLE);
-
-	if (r) {
-		DRM_ERROR("failed to do bo_map on static CSA, err=%d\n", r);
-		amdgpu_vm_bo_rmv(adev, *bo_va);
-		ttm_eu_backoff_reservation(&ticket, &list);
-		return r;
-	}
-
-	ttm_eu_backoff_reservation(&ticket, &list);
-	return 0;
-}
-
 void amdgpu_virt_init_setting(struct amdgpu_device *adev)
 {
 	/* enable virtual display */
@@ -162,9 +70,7 @@ uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg)
 	if (r < 1 && (adev->in_gpu_reset || in_interrupt()))
 		goto failed_kiq_read;
 
-	if (in_interrupt())
-		might_sleep();
-
+	might_sleep();
 	while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
 		msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
 		r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
@@ -210,9 +116,7 @@ void amdgpu_virt_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
 	if (r < 1 && (adev->in_gpu_reset || in_interrupt()))
 		goto failed_kiq_write;
 
-	if (in_interrupt())
-		might_sleep();
-
+	might_sleep();
 	while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
 
 		msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
@@ -228,6 +132,46 @@ void amdgpu_virt_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
 	pr_err("failed to write reg:%x\n", reg);
 }
 
+void amdgpu_virt_kiq_reg_write_reg_wait(struct amdgpu_device *adev,
+					uint32_t reg0, uint32_t reg1,
+					uint32_t ref, uint32_t mask)
+{
+	struct amdgpu_kiq *kiq = &adev->gfx.kiq;
+	struct amdgpu_ring *ring = &kiq->ring;
+	signed long r, cnt = 0;
+	unsigned long flags;
+	uint32_t seq;
+
+	spin_lock_irqsave(&kiq->ring_lock, flags);
+	amdgpu_ring_alloc(ring, 32);
+	amdgpu_ring_emit_reg_write_reg_wait(ring, reg0, reg1,
+					    ref, mask);
+	amdgpu_fence_emit_polling(ring, &seq);
+	amdgpu_ring_commit(ring);
+	spin_unlock_irqrestore(&kiq->ring_lock, flags);
+
+	r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
+
+	/* don't wait anymore for IRQ context */
+	if (r < 1 && in_interrupt())
+		goto failed_kiq;
+
+	might_sleep();
+	while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
+
+		msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
+		r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
+	}
+
+	if (cnt > MAX_KIQ_REG_TRY)
+		goto failed_kiq;
+
+	return;
+
+failed_kiq:
+	pr_err("failed to write reg %x wait reg %x\n", reg0, reg1);
+}
+
 /**
  * amdgpu_virt_request_full_gpu() - request full gpu access
  * @amdgpu:	amdgpu device.
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
index 880ac11..0728fbc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
@@ -238,7 +238,6 @@ typedef struct amdgim_vf2pf_info_v2 amdgim_vf2pf_info ;
 struct amdgpu_virt {
 	uint32_t			caps;
 	struct amdgpu_bo		*csa_obj;
-	uint64_t			csa_vmid0_addr;
 	bool chained_ib_support;
 	uint32_t			reg_val_offs;
 	struct amdgpu_irq_src		ack_irq;
@@ -251,8 +250,6 @@ struct amdgpu_virt {
 	uint32_t gim_feature;
 };
 
-#define AMDGPU_CSA_SIZE		(8 * 1024)
-
 #define amdgpu_sriov_enabled(adev) \
 ((adev)->virt.caps & AMDGPU_SRIOV_CAPS_ENABLE_IOV)
 
@@ -277,17 +274,13 @@ static inline bool is_virtual_machine(void)
 #endif
 }
 
-struct amdgpu_vm;
-
-uint64_t amdgpu_csa_vaddr(struct amdgpu_device *adev);
 bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev);
-int amdgpu_allocate_static_csa(struct amdgpu_device *adev);
-int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
-			  struct amdgpu_bo_va **bo_va);
-void amdgpu_free_static_csa(struct amdgpu_device *adev);
 void amdgpu_virt_init_setting(struct amdgpu_device *adev);
 uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg);
 void amdgpu_virt_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v);
+void amdgpu_virt_kiq_reg_write_reg_wait(struct amdgpu_device *adev,
+					uint32_t reg0, uint32_t rreg1,
+					uint32_t ref, uint32_t mask);
 int amdgpu_virt_request_full_gpu(struct amdgpu_device *adev, bool init);
 int amdgpu_virt_release_full_gpu(struct amdgpu_device *adev, bool init);
 int amdgpu_virt_reset_gpu(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
index 897afbb..909216a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
@@ -63,7 +63,7 @@ static struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev)
 
 int amdgpu_xgmi_add_device(struct amdgpu_device *adev)
 {
-	struct psp_xgmi_topology_info tmp_topology[AMDGPU_MAX_XGMI_DEVICE_PER_HIVE];
+	struct psp_xgmi_topology_info *tmp_topology;
 	struct amdgpu_hive_info *hive;
 	struct amdgpu_xgmi	*entry;
 	struct amdgpu_device 	*tmp_adev;
@@ -73,10 +73,12 @@ int amdgpu_xgmi_add_device(struct amdgpu_device *adev)
 	if ((adev->asic_type < CHIP_VEGA20) ||
 		(adev->flags & AMD_IS_APU) )
 		return 0;
-	adev->gmc.xgmi.device_id = psp_xgmi_get_device_id(&adev->psp);
+	adev->gmc.xgmi.node_id = psp_xgmi_get_node_id(&adev->psp);
 	adev->gmc.xgmi.hive_id = psp_xgmi_get_hive_id(&adev->psp);
 
-	memset(&tmp_topology[0], 0, sizeof(tmp_topology));
+	tmp_topology = kzalloc(sizeof(struct psp_xgmi_topology_info), GFP_KERNEL);
+	if (!tmp_topology)
+		return -ENOMEM;
 	mutex_lock(&xgmi_mutex);
 	hive = amdgpu_get_xgmi_hive(adev);
 	if (!hive)
@@ -84,23 +86,28 @@ int amdgpu_xgmi_add_device(struct amdgpu_device *adev)
 
 	list_add_tail(&adev->gmc.xgmi.head, &hive->device_list);
 	list_for_each_entry(entry, &hive->device_list, head)
-		tmp_topology[count++].device_id = entry->device_id;
+		tmp_topology->nodes[count++].node_id = entry->node_id;
 
-	ret = psp_xgmi_get_topology_info(&adev->psp, count, tmp_topology);
-	if (ret) {
-		dev_err(adev->dev,
-			"XGMI: Get topology failure on device %llx, hive %llx, ret %d",
-			adev->gmc.xgmi.device_id,
-			adev->gmc.xgmi.hive_id, ret);
-		goto exit;
+	/* Each psp need to get the latest topology */
+	list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
+		ret = psp_xgmi_get_topology_info(&tmp_adev->psp, count, tmp_topology);
+		if (ret) {
+			dev_err(tmp_adev->dev,
+				"XGMI: Get topology failure on device %llx, hive %llx, ret %d",
+				tmp_adev->gmc.xgmi.node_id,
+				tmp_adev->gmc.xgmi.hive_id, ret);
+			/* To do : continue with some node failed or disable the whole hive */
+			break;
+		}
 	}
+
 	/* Each psp need to set the latest topology */
 	list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
 		ret = psp_xgmi_set_topology_info(&tmp_adev->psp, count, tmp_topology);
 		if (ret) {
 			dev_err(tmp_adev->dev,
 				"XGMI: Set topology failure on device %llx, hive %llx, ret %d",
-				tmp_adev->gmc.xgmi.device_id,
+				tmp_adev->gmc.xgmi.node_id,
 				tmp_adev->gmc.xgmi.hive_id, ret);
 			/* To do : continue with some  node failed or disable the  whole  hive */
 			break;
@@ -113,7 +120,6 @@ int amdgpu_xgmi_add_device(struct amdgpu_device *adev)
 
 exit:
 	mutex_unlock(&xgmi_mutex);
+	kfree(tmp_topology);
 	return ret;
 }
-
-
diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
index 79220a9..86e14c7 100644
--- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
@@ -743,19 +743,19 @@ static int ci_enable_didt(struct amdgpu_device *adev, bool enable)
 
 	if (pi->caps_sq_ramping || pi->caps_db_ramping ||
 	    pi->caps_td_ramping || pi->caps_tcp_ramping) {
-		adev->gfx.rlc.funcs->enter_safe_mode(adev);
+		amdgpu_gfx_rlc_enter_safe_mode(adev);
 
 		if (enable) {
 			ret = ci_program_pt_config_registers(adev, didt_config_ci);
 			if (ret) {
-				adev->gfx.rlc.funcs->exit_safe_mode(adev);
+				amdgpu_gfx_rlc_exit_safe_mode(adev);
 				return ret;
 			}
 		}
 
 		ci_do_enable_didt(adev, enable);
 
-		adev->gfx.rlc.funcs->exit_safe_mode(adev);
+		amdgpu_gfx_rlc_exit_safe_mode(adev);
 	}
 
 	return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
index b918c88..4579519 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
@@ -198,7 +198,7 @@ static void cik_sdma_ring_set_wptr(struct amdgpu_ring *ring)
 
 static void cik_sdma_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
 {
-	struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ring);
+	struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring);
 	int i;
 
 	for (i = 0; i < count; i++)
@@ -218,9 +218,11 @@ static void cik_sdma_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
  * Schedule an IB in the DMA ring (CIK).
  */
 static void cik_sdma_ring_emit_ib(struct amdgpu_ring *ring,
+				  struct amdgpu_job *job,
 				  struct amdgpu_ib *ib,
-				  unsigned vmid, bool ctx_switch)
+				  bool ctx_switch)
 {
+	unsigned vmid = AMDGPU_JOB_GET_VMID(job);
 	u32 extra_bits = vmid & 0xf;
 
 	/* IB packet must end on a 8 DW boundary */
@@ -316,8 +318,8 @@ static void cik_sdma_gfx_stop(struct amdgpu_device *adev)
 		WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl);
 		WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], 0);
 	}
-	sdma0->ready = false;
-	sdma1->ready = false;
+	sdma0->sched.ready = false;
+	sdma1->sched.ready = false;
 }
 
 /**
@@ -494,18 +496,16 @@ static int cik_sdma_gfx_resume(struct amdgpu_device *adev)
 		/* enable DMA IBs */
 		WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
 
-		ring->ready = true;
+		ring->sched.ready = true;
 	}
 
 	cik_sdma_enable(adev, true);
 
 	for (i = 0; i < adev->sdma.num_instances; i++) {
 		ring = &adev->sdma.instance[i].ring;
-		r = amdgpu_ring_test_ring(ring);
-		if (r) {
-			ring->ready = false;
+		r = amdgpu_ring_test_helper(ring);
+		if (r)
 			return r;
-		}
 
 		if (adev->mman.buffer_funcs_ring == ring)
 			amdgpu_ttm_set_buffer_funcs_status(adev, true);
@@ -618,21 +618,17 @@ static int cik_sdma_ring_test_ring(struct amdgpu_ring *ring)
 	u64 gpu_addr;
 
 	r = amdgpu_device_wb_get(adev, &index);
-	if (r) {
-		dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r);
+	if (r)
 		return r;
-	}
 
 	gpu_addr = adev->wb.gpu_addr + (index * 4);
 	tmp = 0xCAFEDEAD;
 	adev->wb.wb[index] = cpu_to_le32(tmp);
 
 	r = amdgpu_ring_alloc(ring, 5);
-	if (r) {
-		DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r);
-		amdgpu_device_wb_free(adev, index);
-		return r;
-	}
+	if (r)
+		goto error_free_wb;
+
 	amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0));
 	amdgpu_ring_write(ring, lower_32_bits(gpu_addr));
 	amdgpu_ring_write(ring, upper_32_bits(gpu_addr));
@@ -647,15 +643,11 @@ static int cik_sdma_ring_test_ring(struct amdgpu_ring *ring)
 		DRM_UDELAY(1);
 	}
 
-	if (i < adev->usec_timeout) {
-		DRM_DEBUG("ring test on %d succeeded in %d usecs\n", ring->idx, i);
-	} else {
-		DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
-			  ring->idx, tmp);
-		r = -EINVAL;
-	}
-	amdgpu_device_wb_free(adev, index);
+	if (i >= adev->usec_timeout)
+		r = -ETIMEDOUT;
 
+error_free_wb:
+	amdgpu_device_wb_free(adev, index);
 	return r;
 }
 
@@ -678,20 +670,16 @@ static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 	long r;
 
 	r = amdgpu_device_wb_get(adev, &index);
-	if (r) {
-		dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r);
+	if (r)
 		return r;
-	}
 
 	gpu_addr = adev->wb.gpu_addr + (index * 4);
 	tmp = 0xCAFEDEAD;
 	adev->wb.wb[index] = cpu_to_le32(tmp);
 	memset(&ib, 0, sizeof(ib));
 	r = amdgpu_ib_get(adev, NULL, 256, &ib);
-	if (r) {
-		DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
+	if (r)
 		goto err0;
-	}
 
 	ib.ptr[0] = SDMA_PACKET(SDMA_OPCODE_WRITE,
 				SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
@@ -706,21 +694,16 @@ static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 
 	r = dma_fence_wait_timeout(f, false, timeout);
 	if (r == 0) {
-		DRM_ERROR("amdgpu: IB test timed out\n");
 		r = -ETIMEDOUT;
 		goto err1;
 	} else if (r < 0) {
-		DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
 		goto err1;
 	}
 	tmp = le32_to_cpu(adev->wb.wb[index]);
-	if (tmp == 0xDEADBEEF) {
-		DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
+	if (tmp == 0xDEADBEEF)
 		r = 0;
-	} else {
-		DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp);
+	else
 		r = -EINVAL;
-	}
 
 err1:
 	amdgpu_ib_free(adev, &ib, NULL);
@@ -822,7 +805,7 @@ static void cik_sdma_vm_set_pte_pde(struct amdgpu_ib *ib, uint64_t pe,
  */
 static void cik_sdma_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
 {
-	struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ring);
+	struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring);
 	u32 pad_count;
 	int i;
 
@@ -1214,8 +1197,11 @@ static int cik_sdma_process_illegal_inst_irq(struct amdgpu_device *adev,
 					     struct amdgpu_irq_src *source,
 					     struct amdgpu_iv_entry *entry)
 {
+	u8 instance_id;
+
 	DRM_ERROR("Illegal instruction in SDMA command stream\n");
-	schedule_work(&adev->reset_work);
+	instance_id = (entry->ring_id & 0x3) >> 0;
+	drm_sched_fault(&adev->sdma.instance[instance_id].ring.sched);
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
index d76eb27..1dc3013 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
@@ -1775,18 +1775,15 @@ static int gfx_v6_0_ring_test_ring(struct amdgpu_ring *ring)
 	int r;
 
 	r = amdgpu_gfx_scratch_get(adev, &scratch);
-	if (r) {
-		DRM_ERROR("amdgpu: cp failed to get scratch reg (%d).\n", r);
+	if (r)
 		return r;
-	}
+
 	WREG32(scratch, 0xCAFEDEAD);
 
 	r = amdgpu_ring_alloc(ring, 3);
-	if (r) {
-		DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n", ring->idx, r);
-		amdgpu_gfx_scratch_free(adev, scratch);
-		return r;
-	}
+	if (r)
+		goto error_free_scratch;
+
 	amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
 	amdgpu_ring_write(ring, (scratch - PACKET3_SET_CONFIG_REG_START));
 	amdgpu_ring_write(ring, 0xDEADBEEF);
@@ -1798,13 +1795,11 @@ static int gfx_v6_0_ring_test_ring(struct amdgpu_ring *ring)
 			break;
 		DRM_UDELAY(1);
 	}
-	if (i < adev->usec_timeout) {
-		DRM_DEBUG("ring test on %d succeeded in %d usecs\n", ring->idx, i);
-	} else {
-		DRM_ERROR("amdgpu: ring %d test failed (scratch(0x%04X)=0x%08X)\n",
-			  ring->idx, scratch, tmp);
-		r = -EINVAL;
-	}
+
+	if (i >= adev->usec_timeout)
+		r = -ETIMEDOUT;
+
+error_free_scratch:
 	amdgpu_gfx_scratch_free(adev, scratch);
 	return r;
 }
@@ -1845,9 +1840,11 @@ static void gfx_v6_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
 }
 
 static void gfx_v6_0_ring_emit_ib(struct amdgpu_ring *ring,
+				  struct amdgpu_job *job,
 				  struct amdgpu_ib *ib,
-				  unsigned vmid, bool ctx_switch)
+				  bool ctx_switch)
 {
+	unsigned vmid = AMDGPU_JOB_GET_VMID(job);
 	u32 header, control = 0;
 
 	/* insert SWITCH_BUFFER packet before first IB in the ring frame */
@@ -1892,17 +1889,15 @@ static int gfx_v6_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 	long r;
 
 	r = amdgpu_gfx_scratch_get(adev, &scratch);
-	if (r) {
-		DRM_ERROR("amdgpu: failed to get scratch reg (%ld).\n", r);
+	if (r)
 		return r;
-	}
+
 	WREG32(scratch, 0xCAFEDEAD);
 	memset(&ib, 0, sizeof(ib));
 	r = amdgpu_ib_get(adev, NULL, 256, &ib);
-	if (r) {
-		DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
+	if (r)
 		goto err1;
-	}
+
 	ib.ptr[0] = PACKET3(PACKET3_SET_CONFIG_REG, 1);
 	ib.ptr[1] = ((scratch - PACKET3_SET_CONFIG_REG_START));
 	ib.ptr[2] = 0xDEADBEEF;
@@ -1914,22 +1909,16 @@ static int gfx_v6_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 
 	r = dma_fence_wait_timeout(f, false, timeout);
 	if (r == 0) {
-		DRM_ERROR("amdgpu: IB test timed out\n");
 		r = -ETIMEDOUT;
 		goto err2;
 	} else if (r < 0) {
-		DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
 		goto err2;
 	}
 	tmp = RREG32(scratch);
-	if (tmp == 0xDEADBEEF) {
-		DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
+	if (tmp == 0xDEADBEEF)
 		r = 0;
-	} else {
-		DRM_ERROR("amdgpu: ib test failed (scratch(0x%04X)=0x%08X)\n",
-			  scratch, tmp);
+	else
 		r = -EINVAL;
-	}
 
 err2:
 	amdgpu_ib_free(adev, &ib, NULL);
@@ -1950,9 +1939,9 @@ static void gfx_v6_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
 				      CP_ME_CNTL__CE_HALT_MASK));
 		WREG32(mmSCRATCH_UMSK, 0);
 		for (i = 0; i < adev->gfx.num_gfx_rings; i++)
-			adev->gfx.gfx_ring[i].ready = false;
+			adev->gfx.gfx_ring[i].sched.ready = false;
 		for (i = 0; i < adev->gfx.num_compute_rings; i++)
-			adev->gfx.compute_ring[i].ready = false;
+			adev->gfx.compute_ring[i].sched.ready = false;
 	}
 	udelay(50);
 }
@@ -2124,12 +2113,9 @@ static int gfx_v6_0_cp_gfx_resume(struct amdgpu_device *adev)
 
 	/* start the rings */
 	gfx_v6_0_cp_gfx_start(adev);
-	ring->ready = true;
-	r = amdgpu_ring_test_ring(ring);
-	if (r) {
-		ring->ready = false;
+	r = amdgpu_ring_test_helper(ring);
+	if (r)
 		return r;
-	}
 
 	return 0;
 }
@@ -2227,14 +2213,11 @@ static int gfx_v6_0_cp_compute_resume(struct amdgpu_device *adev)
 	WREG32(mmCP_RB2_CNTL, tmp);
 	WREG32(mmCP_RB2_BASE, ring->gpu_addr >> 8);
 
-	adev->gfx.compute_ring[0].ready = false;
-	adev->gfx.compute_ring[1].ready = false;
 
 	for (i = 0; i < 2; i++) {
-		r = amdgpu_ring_test_ring(&adev->gfx.compute_ring[i]);
+		r = amdgpu_ring_test_helper(&adev->gfx.compute_ring[i]);
 		if (r)
 			return r;
-		adev->gfx.compute_ring[i].ready = true;
 	}
 
 	return 0;
@@ -2368,18 +2351,11 @@ static void gfx_v6_0_ring_emit_wreg(struct amdgpu_ring *ring,
 	amdgpu_ring_write(ring, val);
 }
 
-static void gfx_v6_0_rlc_fini(struct amdgpu_device *adev)
-{
-	amdgpu_bo_free_kernel(&adev->gfx.rlc.save_restore_obj, NULL, NULL);
-	amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj, NULL, NULL);
-	amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj, NULL, NULL);
-}
-
 static int gfx_v6_0_rlc_init(struct amdgpu_device *adev)
 {
 	const u32 *src_ptr;
 	volatile u32 *dst_ptr;
-	u32 dws, i;
+	u32 dws;
 	u64 reg_list_mc_addr;
 	const struct cs_section_def *cs_data;
 	int r;
@@ -2394,26 +2370,10 @@ static int gfx_v6_0_rlc_init(struct amdgpu_device *adev)
 	cs_data = adev->gfx.rlc.cs_data;
 
 	if (src_ptr) {
-		/* save restore block */
-		r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE,
-					      AMDGPU_GEM_DOMAIN_VRAM,
-					      &adev->gfx.rlc.save_restore_obj,
-					      &adev->gfx.rlc.save_restore_gpu_addr,
-					      (void **)&adev->gfx.rlc.sr_ptr);
-		if (r) {
-			dev_warn(adev->dev, "(%d) create RLC sr bo failed\n",
-				 r);
-			gfx_v6_0_rlc_fini(adev);
+		/* init save restore block */
+		r = amdgpu_gfx_rlc_init_sr(adev, dws);
+		if (r)
 			return r;
-		}
-
-		/* write the sr buffer */
-		dst_ptr = adev->gfx.rlc.sr_ptr;
-		for (i = 0; i < adev->gfx.rlc.reg_list_size; i++)
-			dst_ptr[i] = cpu_to_le32(src_ptr[i]);
-
-		amdgpu_bo_kunmap(adev->gfx.rlc.save_restore_obj);
-		amdgpu_bo_unreserve(adev->gfx.rlc.save_restore_obj);
 	}
 
 	if (cs_data) {
@@ -2428,7 +2388,7 @@ static int gfx_v6_0_rlc_init(struct amdgpu_device *adev)
 					      (void **)&adev->gfx.rlc.cs_ptr);
 		if (r) {
 			dev_warn(adev->dev, "(%d) create RLC c bo failed\n", r);
-			gfx_v6_0_rlc_fini(adev);
+			amdgpu_gfx_rlc_fini(adev);
 			return r;
 		}
 
@@ -2549,8 +2509,8 @@ static int gfx_v6_0_rlc_resume(struct amdgpu_device *adev)
 	if (!adev->gfx.rlc_fw)
 		return -EINVAL;
 
-	gfx_v6_0_rlc_stop(adev);
-	gfx_v6_0_rlc_reset(adev);
+	adev->gfx.rlc.funcs->stop(adev);
+	adev->gfx.rlc.funcs->reset(adev);
 	gfx_v6_0_init_pg(adev);
 	gfx_v6_0_init_cg(adev);
 
@@ -2578,7 +2538,7 @@ static int gfx_v6_0_rlc_resume(struct amdgpu_device *adev)
 	WREG32(mmRLC_UCODE_ADDR, 0);
 
 	gfx_v6_0_enable_lbpw(adev, gfx_v6_0_lbpw_supported(adev));
-	gfx_v6_0_rlc_start(adev);
+	adev->gfx.rlc.funcs->start(adev);
 
 	return 0;
 }
@@ -3075,6 +3035,14 @@ static const struct amdgpu_gfx_funcs gfx_v6_0_gfx_funcs = {
 	.select_me_pipe_q = &gfx_v6_0_select_me_pipe_q
 };
 
+static const struct amdgpu_rlc_funcs gfx_v6_0_rlc_funcs = {
+	.init = gfx_v6_0_rlc_init,
+	.resume = gfx_v6_0_rlc_resume,
+	.stop = gfx_v6_0_rlc_stop,
+	.reset = gfx_v6_0_rlc_reset,
+	.start = gfx_v6_0_rlc_start
+};
+
 static int gfx_v6_0_early_init(void *handle)
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -3082,6 +3050,7 @@ static int gfx_v6_0_early_init(void *handle)
 	adev->gfx.num_gfx_rings = GFX6_NUM_GFX_RINGS;
 	adev->gfx.num_compute_rings = GFX6_NUM_COMPUTE_RINGS;
 	adev->gfx.funcs = &gfx_v6_0_gfx_funcs;
+	adev->gfx.rlc.funcs = &gfx_v6_0_rlc_funcs;
 	gfx_v6_0_set_ring_funcs(adev);
 	gfx_v6_0_set_irq_funcs(adev);
 
@@ -3114,7 +3083,7 @@ static int gfx_v6_0_sw_init(void *handle)
 		return r;
 	}
 
-	r = gfx_v6_0_rlc_init(adev);
+	r = adev->gfx.rlc.funcs->init(adev);
 	if (r) {
 		DRM_ERROR("Failed to init rlc BOs!\n");
 		return r;
@@ -3165,7 +3134,7 @@ static int gfx_v6_0_sw_fini(void *handle)
 	for (i = 0; i < adev->gfx.num_compute_rings; i++)
 		amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
 
-	gfx_v6_0_rlc_fini(adev);
+	amdgpu_gfx_rlc_fini(adev);
 
 	return 0;
 }
@@ -3177,7 +3146,7 @@ static int gfx_v6_0_hw_init(void *handle)
 
 	gfx_v6_0_constants_init(adev);
 
-	r = gfx_v6_0_rlc_resume(adev);
+	r = adev->gfx.rlc.funcs->resume(adev);
 	if (r)
 		return r;
 
@@ -3195,7 +3164,7 @@ static int gfx_v6_0_hw_fini(void *handle)
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
 	gfx_v6_0_cp_enable(adev, false);
-	gfx_v6_0_rlc_stop(adev);
+	adev->gfx.rlc.funcs->stop(adev);
 	gfx_v6_0_fini_pg(adev);
 
 	return 0;
@@ -3393,12 +3362,31 @@ static int gfx_v6_0_eop_irq(struct amdgpu_device *adev,
 	return 0;
 }
 
+static void gfx_v6_0_fault(struct amdgpu_device *adev,
+			   struct amdgpu_iv_entry *entry)
+{
+	struct amdgpu_ring *ring;
+
+	switch (entry->ring_id) {
+	case 0:
+		ring = &adev->gfx.gfx_ring[0];
+		break;
+	case 1:
+	case 2:
+		ring = &adev->gfx.compute_ring[entry->ring_id - 1];
+		break;
+	default:
+		return;
+	}
+	drm_sched_fault(&ring->sched);
+}
+
 static int gfx_v6_0_priv_reg_irq(struct amdgpu_device *adev,
 				 struct amdgpu_irq_src *source,
 				 struct amdgpu_iv_entry *entry)
 {
 	DRM_ERROR("Illegal register access in command stream\n");
-	schedule_work(&adev->reset_work);
+	gfx_v6_0_fault(adev, entry);
 	return 0;
 }
 
@@ -3407,7 +3395,7 @@ static int gfx_v6_0_priv_inst_irq(struct amdgpu_device *adev,
 				  struct amdgpu_iv_entry *entry)
 {
 	DRM_ERROR("Illegal instruction in command stream\n");
-	schedule_work(&adev->reset_work);
+	gfx_v6_0_fault(adev, entry);
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index 0e72bc0..f467b9b 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -882,7 +882,6 @@ static const u32 kalindi_rlc_save_restore_register_list[] =
 
 static u32 gfx_v7_0_get_csb_size(struct amdgpu_device *adev);
 static void gfx_v7_0_get_csb_buffer(struct amdgpu_device *adev, volatile u32 *buffer);
-static void gfx_v7_0_init_cp_pg_table(struct amdgpu_device *adev);
 static void gfx_v7_0_init_pg(struct amdgpu_device *adev);
 static void gfx_v7_0_get_cu_info(struct amdgpu_device *adev);
 
@@ -2064,17 +2063,14 @@ static int gfx_v7_0_ring_test_ring(struct amdgpu_ring *ring)
 	int r;
 
 	r = amdgpu_gfx_scratch_get(adev, &scratch);
-	if (r) {
-		DRM_ERROR("amdgpu: cp failed to get scratch reg (%d).\n", r);
+	if (r)
 		return r;
-	}
+
 	WREG32(scratch, 0xCAFEDEAD);
 	r = amdgpu_ring_alloc(ring, 3);
-	if (r) {
-		DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n", ring->idx, r);
-		amdgpu_gfx_scratch_free(adev, scratch);
-		return r;
-	}
+	if (r)
+		goto error_free_scratch;
+
 	amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
 	amdgpu_ring_write(ring, (scratch - PACKET3_SET_UCONFIG_REG_START));
 	amdgpu_ring_write(ring, 0xDEADBEEF);
@@ -2086,13 +2082,10 @@ static int gfx_v7_0_ring_test_ring(struct amdgpu_ring *ring)
 			break;
 		DRM_UDELAY(1);
 	}
-	if (i < adev->usec_timeout) {
-		DRM_DEBUG("ring test on %d succeeded in %d usecs\n", ring->idx, i);
-	} else {
-		DRM_ERROR("amdgpu: ring %d test failed (scratch(0x%04X)=0x%08X)\n",
-			  ring->idx, scratch, tmp);
-		r = -EINVAL;
-	}
+	if (i >= adev->usec_timeout)
+		r = -ETIMEDOUT;
+
+error_free_scratch:
 	amdgpu_gfx_scratch_free(adev, scratch);
 	return r;
 }
@@ -2233,9 +2226,11 @@ static void gfx_v7_0_ring_emit_fence_compute(struct amdgpu_ring *ring,
  * on the gfx ring for execution by the GPU.
  */
 static void gfx_v7_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
-				      struct amdgpu_ib *ib,
-				      unsigned vmid, bool ctx_switch)
+					struct amdgpu_job *job,
+					struct amdgpu_ib *ib,
+					bool ctx_switch)
 {
+	unsigned vmid = AMDGPU_JOB_GET_VMID(job);
 	u32 header, control = 0;
 
 	/* insert SWITCH_BUFFER packet before first IB in the ring frame */
@@ -2262,9 +2257,11 @@ static void gfx_v7_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
 }
 
 static void gfx_v7_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
+					  struct amdgpu_job *job,
 					  struct amdgpu_ib *ib,
-					  unsigned vmid, bool ctx_switch)
+					  bool ctx_switch)
 {
+	unsigned vmid = AMDGPU_JOB_GET_VMID(job);
 	u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24);
 
 	amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
@@ -2316,17 +2313,15 @@ static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 	long r;
 
 	r = amdgpu_gfx_scratch_get(adev, &scratch);
-	if (r) {
-		DRM_ERROR("amdgpu: failed to get scratch reg (%ld).\n", r);
+	if (r)
 		return r;
-	}
+
 	WREG32(scratch, 0xCAFEDEAD);
 	memset(&ib, 0, sizeof(ib));
 	r = amdgpu_ib_get(adev, NULL, 256, &ib);
-	if (r) {
-		DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
+	if (r)
 		goto err1;
-	}
+
 	ib.ptr[0] = PACKET3(PACKET3_SET_UCONFIG_REG, 1);
 	ib.ptr[1] = ((scratch - PACKET3_SET_UCONFIG_REG_START));
 	ib.ptr[2] = 0xDEADBEEF;
@@ -2338,22 +2333,16 @@ static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 
 	r = dma_fence_wait_timeout(f, false, timeout);
 	if (r == 0) {
-		DRM_ERROR("amdgpu: IB test timed out\n");
 		r = -ETIMEDOUT;
 		goto err2;
 	} else if (r < 0) {
-		DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
 		goto err2;
 	}
 	tmp = RREG32(scratch);
-	if (tmp == 0xDEADBEEF) {
-		DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
+	if (tmp == 0xDEADBEEF)
 		r = 0;
-	} else {
-		DRM_ERROR("amdgpu: ib test failed (scratch(0x%04X)=0x%08X)\n",
-			  scratch, tmp);
+	else
 		r = -EINVAL;
-	}
 
 err2:
 	amdgpu_ib_free(adev, &ib, NULL);
@@ -2403,7 +2392,7 @@ static void gfx_v7_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
 	} else {
 		WREG32(mmCP_ME_CNTL, (CP_ME_CNTL__ME_HALT_MASK | CP_ME_CNTL__PFP_HALT_MASK | CP_ME_CNTL__CE_HALT_MASK));
 		for (i = 0; i < adev->gfx.num_gfx_rings; i++)
-			adev->gfx.gfx_ring[i].ready = false;
+			adev->gfx.gfx_ring[i].sched.ready = false;
 	}
 	udelay(50);
 }
@@ -2613,12 +2602,9 @@ static int gfx_v7_0_cp_gfx_resume(struct amdgpu_device *adev)
 
 	/* start the ring */
 	gfx_v7_0_cp_gfx_start(adev);
-	ring->ready = true;
-	r = amdgpu_ring_test_ring(ring);
-	if (r) {
-		ring->ready = false;
+	r = amdgpu_ring_test_helper(ring);
+	if (r)
 		return r;
-	}
 
 	return 0;
 }
@@ -2675,7 +2661,7 @@ static void gfx_v7_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
 	} else {
 		WREG32(mmCP_MEC_CNTL, (CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK));
 		for (i = 0; i < adev->gfx.num_compute_rings; i++)
-			adev->gfx.compute_ring[i].ready = false;
+			adev->gfx.compute_ring[i].sched.ready = false;
 	}
 	udelay(50);
 }
@@ -2781,7 +2767,7 @@ static int gfx_v7_0_mec_init(struct amdgpu_device *adev)
 		* GFX7_MEC_HPD_SIZE * 2;
 
 	r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE,
-				      AMDGPU_GEM_DOMAIN_GTT,
+				      AMDGPU_GEM_DOMAIN_VRAM,
 				      &adev->gfx.mec.hpd_eop_obj,
 				      &adev->gfx.mec.hpd_eop_gpu_addr,
 				      (void **)&hpd);
@@ -3106,10 +3092,7 @@ static int gfx_v7_0_cp_compute_resume(struct amdgpu_device *adev)
 
 	for (i = 0; i < adev->gfx.num_compute_rings; i++) {
 		ring = &adev->gfx.compute_ring[i];
-		ring->ready = true;
-		r = amdgpu_ring_test_ring(ring);
-		if (r)
-			ring->ready = false;
+		amdgpu_ring_test_helper(ring);
 	}
 
 	return 0;
@@ -3268,18 +3251,10 @@ static void gfx_v7_0_ring_emit_wreg(struct amdgpu_ring *ring,
  * The RLC is a multi-purpose microengine that handles a
  * variety of functions.
  */
-static void gfx_v7_0_rlc_fini(struct amdgpu_device *adev)
-{
-	amdgpu_bo_free_kernel(&adev->gfx.rlc.save_restore_obj, NULL, NULL);
-	amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj, NULL, NULL);
-	amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj, NULL, NULL);
-}
-
 static int gfx_v7_0_rlc_init(struct amdgpu_device *adev)
 {
 	const u32 *src_ptr;
-	volatile u32 *dst_ptr;
-	u32 dws, i;
+	u32 dws;
 	const struct cs_section_def *cs_data;
 	int r;
 
@@ -3306,66 +3281,23 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev)
 	cs_data = adev->gfx.rlc.cs_data;
 
 	if (src_ptr) {
-		/* save restore block */
-		r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE,
-					      AMDGPU_GEM_DOMAIN_VRAM,
-					      &adev->gfx.rlc.save_restore_obj,
-					      &adev->gfx.rlc.save_restore_gpu_addr,
-					      (void **)&adev->gfx.rlc.sr_ptr);
-		if (r) {
-			dev_warn(adev->dev, "(%d) create, pin or map of RLC sr bo failed\n", r);
-			gfx_v7_0_rlc_fini(adev);
+		/* init save restore block */
+		r = amdgpu_gfx_rlc_init_sr(adev, dws);
+		if (r)
 			return r;
-		}
-
-		/* write the sr buffer */
-		dst_ptr = adev->gfx.rlc.sr_ptr;
-		for (i = 0; i < adev->gfx.rlc.reg_list_size; i++)
-			dst_ptr[i] = cpu_to_le32(src_ptr[i]);
-		amdgpu_bo_kunmap(adev->gfx.rlc.save_restore_obj);
-		amdgpu_bo_unreserve(adev->gfx.rlc.save_restore_obj);
 	}
 
 	if (cs_data) {
-		/* clear state block */
-		adev->gfx.rlc.clear_state_size = dws = gfx_v7_0_get_csb_size(adev);
-
-		r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE,
-					      AMDGPU_GEM_DOMAIN_VRAM,
-					      &adev->gfx.rlc.clear_state_obj,
-					      &adev->gfx.rlc.clear_state_gpu_addr,
-					      (void **)&adev->gfx.rlc.cs_ptr);
-		if (r) {
-			dev_warn(adev->dev, "(%d) create RLC c bo failed\n", r);
-			gfx_v7_0_rlc_fini(adev);
+		/* init clear state block */
+		r = amdgpu_gfx_rlc_init_csb(adev);
+		if (r)
 			return r;
-		}
-
-		/* set up the cs buffer */
-		dst_ptr = adev->gfx.rlc.cs_ptr;
-		gfx_v7_0_get_csb_buffer(adev, dst_ptr);
-		amdgpu_bo_kunmap(adev->gfx.rlc.clear_state_obj);
-		amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
 	}
 
 	if (adev->gfx.rlc.cp_table_size) {
-
-		r = amdgpu_bo_create_reserved(adev, adev->gfx.rlc.cp_table_size,
-					      PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
-					      &adev->gfx.rlc.cp_table_obj,
-					      &adev->gfx.rlc.cp_table_gpu_addr,
-					      (void **)&adev->gfx.rlc.cp_table_ptr);
-		if (r) {
-			dev_warn(adev->dev, "(%d) create RLC cp table bo failed\n", r);
-			gfx_v7_0_rlc_fini(adev);
+		r = amdgpu_gfx_rlc_init_cpt(adev);
+		if (r)
 			return r;
-		}
-
-		gfx_v7_0_init_cp_pg_table(adev);
-
-		amdgpu_bo_kunmap(adev->gfx.rlc.cp_table_obj);
-		amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj);
-
 	}
 
 	return 0;
@@ -3446,7 +3378,12 @@ static u32 gfx_v7_0_halt_rlc(struct amdgpu_device *adev)
 	return orig;
 }
 
-static void gfx_v7_0_enter_rlc_safe_mode(struct amdgpu_device *adev)
+static bool gfx_v7_0_is_rlc_enabled(struct amdgpu_device *adev)
+{
+	return true;
+}
+
+static void gfx_v7_0_set_safe_mode(struct amdgpu_device *adev)
 {
 	u32 tmp, i, mask;
 
@@ -3468,7 +3405,7 @@ static void gfx_v7_0_enter_rlc_safe_mode(struct amdgpu_device *adev)
 	}
 }
 
-static void gfx_v7_0_exit_rlc_safe_mode(struct amdgpu_device *adev)
+static void gfx_v7_0_unset_safe_mode(struct amdgpu_device *adev)
 {
 	u32 tmp;
 
@@ -3545,13 +3482,13 @@ static int gfx_v7_0_rlc_resume(struct amdgpu_device *adev)
 	adev->gfx.rlc_feature_version = le32_to_cpu(
 					hdr->ucode_feature_version);
 
-	gfx_v7_0_rlc_stop(adev);
+	adev->gfx.rlc.funcs->stop(adev);
 
 	/* disable CG */
 	tmp = RREG32(mmRLC_CGCG_CGLS_CTRL) & 0xfffffffc;
 	WREG32(mmRLC_CGCG_CGLS_CTRL, tmp);
 
-	gfx_v7_0_rlc_reset(adev);
+	adev->gfx.rlc.funcs->reset(adev);
 
 	gfx_v7_0_init_pg(adev);
 
@@ -3582,7 +3519,7 @@ static int gfx_v7_0_rlc_resume(struct amdgpu_device *adev)
 	if (adev->asic_type == CHIP_BONAIRE)
 		WREG32(mmRLC_DRIVER_CPDMA_STATUS, 0);
 
-	gfx_v7_0_rlc_start(adev);
+	adev->gfx.rlc.funcs->start(adev);
 
 	return 0;
 }
@@ -3784,72 +3721,12 @@ static void gfx_v7_0_enable_gds_pg(struct amdgpu_device *adev, bool enable)
 		WREG32(mmRLC_PG_CNTL, data);
 }
 
-static void gfx_v7_0_init_cp_pg_table(struct amdgpu_device *adev)
+static int gfx_v7_0_cp_pg_table_num(struct amdgpu_device *adev)
 {
-	const __le32 *fw_data;
-	volatile u32 *dst_ptr;
-	int me, i, max_me = 4;
-	u32 bo_offset = 0;
-	u32 table_offset, table_size;
-
 	if (adev->asic_type == CHIP_KAVERI)
-		max_me = 5;
-
-	if (adev->gfx.rlc.cp_table_ptr == NULL)
-		return;
-
-	/* write the cp table buffer */
-	dst_ptr = adev->gfx.rlc.cp_table_ptr;
-	for (me = 0; me < max_me; me++) {
-		if (me == 0) {
-			const struct gfx_firmware_header_v1_0 *hdr =
-				(const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
-			fw_data = (const __le32 *)
-				(adev->gfx.ce_fw->data +
-				 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
-			table_offset = le32_to_cpu(hdr->jt_offset);
-			table_size = le32_to_cpu(hdr->jt_size);
-		} else if (me == 1) {
-			const struct gfx_firmware_header_v1_0 *hdr =
-				(const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
-			fw_data = (const __le32 *)
-				(adev->gfx.pfp_fw->data +
-				 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
-			table_offset = le32_to_cpu(hdr->jt_offset);
-			table_size = le32_to_cpu(hdr->jt_size);
-		} else if (me == 2) {
-			const struct gfx_firmware_header_v1_0 *hdr =
-				(const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
-			fw_data = (const __le32 *)
-				(adev->gfx.me_fw->data +
-				 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
-			table_offset = le32_to_cpu(hdr->jt_offset);
-			table_size = le32_to_cpu(hdr->jt_size);
-		} else if (me == 3) {
-			const struct gfx_firmware_header_v1_0 *hdr =
-				(const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
-			fw_data = (const __le32 *)
-				(adev->gfx.mec_fw->data +
-				 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
-			table_offset = le32_to_cpu(hdr->jt_offset);
-			table_size = le32_to_cpu(hdr->jt_size);
-		} else {
-			const struct gfx_firmware_header_v1_0 *hdr =
-				(const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data;
-			fw_data = (const __le32 *)
-				(adev->gfx.mec2_fw->data +
-				 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
-			table_offset = le32_to_cpu(hdr->jt_offset);
-			table_size = le32_to_cpu(hdr->jt_size);
-		}
-
-		for (i = 0; i < table_size; i ++) {
-			dst_ptr[bo_offset + i] =
-				cpu_to_le32(le32_to_cpu(fw_data[table_offset + i]));
-		}
-
-		bo_offset += table_size;
-	}
+		return 5;
+	else
+		return 4;
 }
 
 static void gfx_v7_0_enable_gfx_cgpg(struct amdgpu_device *adev,
@@ -4288,8 +4165,17 @@ static const struct amdgpu_gfx_funcs gfx_v7_0_gfx_funcs = {
 };
 
 static const struct amdgpu_rlc_funcs gfx_v7_0_rlc_funcs = {
-	.enter_safe_mode = gfx_v7_0_enter_rlc_safe_mode,
-	.exit_safe_mode = gfx_v7_0_exit_rlc_safe_mode
+	.is_rlc_enabled = gfx_v7_0_is_rlc_enabled,
+	.set_safe_mode = gfx_v7_0_set_safe_mode,
+	.unset_safe_mode = gfx_v7_0_unset_safe_mode,
+	.init = gfx_v7_0_rlc_init,
+	.get_csb_size = gfx_v7_0_get_csb_size,
+	.get_csb_buffer = gfx_v7_0_get_csb_buffer,
+	.get_cp_table_num = gfx_v7_0_cp_pg_table_num,
+	.resume = gfx_v7_0_rlc_resume,
+	.stop = gfx_v7_0_rlc_stop,
+	.reset = gfx_v7_0_rlc_reset,
+	.start = gfx_v7_0_rlc_start
 };
 
 static int gfx_v7_0_early_init(void *handle)
@@ -4540,7 +4426,7 @@ static int gfx_v7_0_sw_init(void *handle)
 		return r;
 	}
 
-	r = gfx_v7_0_rlc_init(adev);
+	r = adev->gfx.rlc.funcs->init(adev);
 	if (r) {
 		DRM_ERROR("Failed to init rlc BOs!\n");
 		return r;
@@ -4604,7 +4490,7 @@ static int gfx_v7_0_sw_fini(void *handle)
 		amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
 
 	gfx_v7_0_cp_compute_fini(adev);
-	gfx_v7_0_rlc_fini(adev);
+	amdgpu_gfx_rlc_fini(adev);
 	gfx_v7_0_mec_fini(adev);
 	amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj,
 				&adev->gfx.rlc.clear_state_gpu_addr,
@@ -4627,7 +4513,7 @@ static int gfx_v7_0_hw_init(void *handle)
 	gfx_v7_0_constants_init(adev);
 
 	/* init rlc */
-	r = gfx_v7_0_rlc_resume(adev);
+	r = adev->gfx.rlc.funcs->resume(adev);
 	if (r)
 		return r;
 
@@ -4645,7 +4531,7 @@ static int gfx_v7_0_hw_fini(void *handle)
 	amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
 	amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
 	gfx_v7_0_cp_enable(adev, false);
-	gfx_v7_0_rlc_stop(adev);
+	adev->gfx.rlc.funcs->stop(adev);
 	gfx_v7_0_fini_pg(adev);
 
 	return 0;
@@ -4730,7 +4616,7 @@ static int gfx_v7_0_soft_reset(void *handle)
 		gfx_v7_0_update_cg(adev, false);
 
 		/* stop the rlc */
-		gfx_v7_0_rlc_stop(adev);
+		adev->gfx.rlc.funcs->stop(adev);
 
 		/* Disable GFX parsing/prefetching */
 		WREG32(mmCP_ME_CNTL, CP_ME_CNTL__ME_HALT_MASK | CP_ME_CNTL__PFP_HALT_MASK | CP_ME_CNTL__CE_HALT_MASK);
@@ -4959,12 +4845,36 @@ static int gfx_v7_0_eop_irq(struct amdgpu_device *adev,
 	return 0;
 }
 
+static void gfx_v7_0_fault(struct amdgpu_device *adev,
+			   struct amdgpu_iv_entry *entry)
+{
+	struct amdgpu_ring *ring;
+	u8 me_id, pipe_id;
+	int i;
+
+	me_id = (entry->ring_id & 0x0c) >> 2;
+	pipe_id = (entry->ring_id & 0x03) >> 0;
+	switch (me_id) {
+	case 0:
+		drm_sched_fault(&adev->gfx.gfx_ring[0].sched);
+		break;
+	case 1:
+	case 2:
+		for (i = 0; i < adev->gfx.num_compute_rings; i++) {
+			ring = &adev->gfx.compute_ring[i];
+			if ((ring->me == me_id) && (ring->pipe == pipe_id))
+				drm_sched_fault(&ring->sched);
+		}
+		break;
+	}
+}
+
 static int gfx_v7_0_priv_reg_irq(struct amdgpu_device *adev,
 				 struct amdgpu_irq_src *source,
 				 struct amdgpu_iv_entry *entry)
 {
 	DRM_ERROR("Illegal register access in command stream\n");
-	schedule_work(&adev->reset_work);
+	gfx_v7_0_fault(adev, entry);
 	return 0;
 }
 
@@ -4974,7 +4884,7 @@ static int gfx_v7_0_priv_inst_irq(struct amdgpu_device *adev,
 {
 	DRM_ERROR("Illegal instruction in command stream\n");
 	// XXX soft reset the gfx block only
-	schedule_work(&adev->reset_work);
+	gfx_v7_0_fault(adev, entry);
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index 617b0c8..cb066a8 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -54,7 +54,7 @@
 #include "ivsrcid/ivsrcid_vislands30.h"
 
 #define GFX8_NUM_GFX_RINGS     1
-#define GFX8_MEC_HPD_SIZE 2048
+#define GFX8_MEC_HPD_SIZE 4096
 
 #define TOPAZ_GB_ADDR_CONFIG_GOLDEN 0x22010001
 #define CARRIZO_GB_ADDR_CONFIG_GOLDEN 0x22010001
@@ -839,18 +839,14 @@ static int gfx_v8_0_ring_test_ring(struct amdgpu_ring *ring)
 	int r;
 
 	r = amdgpu_gfx_scratch_get(adev, &scratch);
-	if (r) {
-		DRM_ERROR("amdgpu: cp failed to get scratch reg (%d).\n", r);
+	if (r)
 		return r;
-	}
+
 	WREG32(scratch, 0xCAFEDEAD);
 	r = amdgpu_ring_alloc(ring, 3);
-	if (r) {
-		DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n",
-			  ring->idx, r);
-		amdgpu_gfx_scratch_free(adev, scratch);
-		return r;
-	}
+	if (r)
+		goto error_free_scratch;
+
 	amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
 	amdgpu_ring_write(ring, (scratch - PACKET3_SET_UCONFIG_REG_START));
 	amdgpu_ring_write(ring, 0xDEADBEEF);
@@ -862,14 +858,11 @@ static int gfx_v8_0_ring_test_ring(struct amdgpu_ring *ring)
 			break;
 		DRM_UDELAY(1);
 	}
-	if (i < adev->usec_timeout) {
-		DRM_DEBUG("ring test on %d succeeded in %d usecs\n",
-			 ring->idx, i);
-	} else {
-		DRM_ERROR("amdgpu: ring %d test failed (scratch(0x%04X)=0x%08X)\n",
-			  ring->idx, scratch, tmp);
-		r = -EINVAL;
-	}
+
+	if (i >= adev->usec_timeout)
+		r = -ETIMEDOUT;
+
+error_free_scratch:
 	amdgpu_gfx_scratch_free(adev, scratch);
 	return r;
 }
@@ -886,19 +879,16 @@ static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 	long r;
 
 	r = amdgpu_device_wb_get(adev, &index);
-	if (r) {
-		dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r);
+	if (r)
 		return r;
-	}
 
 	gpu_addr = adev->wb.gpu_addr + (index * 4);
 	adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
 	memset(&ib, 0, sizeof(ib));
 	r = amdgpu_ib_get(adev, NULL, 16, &ib);
-	if (r) {
-		DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
+	if (r)
 		goto err1;
-	}
+
 	ib.ptr[0] = PACKET3(PACKET3_WRITE_DATA, 3);
 	ib.ptr[1] = WRITE_DATA_DST_SEL(5) | WR_CONFIRM;
 	ib.ptr[2] = lower_32_bits(gpu_addr);
@@ -912,22 +902,17 @@ static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 
 	r = dma_fence_wait_timeout(f, false, timeout);
 	if (r == 0) {
-		DRM_ERROR("amdgpu: IB test timed out.\n");
 		r = -ETIMEDOUT;
 		goto err2;
 	} else if (r < 0) {
-		DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
 		goto err2;
 	}
 
 	tmp = adev->wb.wb[index];
-	if (tmp == 0xDEADBEEF) {
-		DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
+	if (tmp == 0xDEADBEEF)
 		r = 0;
-	} else {
-		DRM_ERROR("ib test on ring %d failed\n", ring->idx);
+	else
 		r = -EINVAL;
-	}
 
 err2:
 	amdgpu_ib_free(adev, &ib, NULL);
@@ -1298,81 +1283,16 @@ static void gfx_v8_0_get_csb_buffer(struct amdgpu_device *adev,
 	buffer[count++] = cpu_to_le32(0);
 }
 
-static void cz_init_cp_jump_table(struct amdgpu_device *adev)
+static int gfx_v8_0_cp_jump_table_num(struct amdgpu_device *adev)
 {
-	const __le32 *fw_data;
-	volatile u32 *dst_ptr;
-	int me, i, max_me = 4;
-	u32 bo_offset = 0;
-	u32 table_offset, table_size;
-
 	if (adev->asic_type == CHIP_CARRIZO)
-		max_me = 5;
-
-	/* write the cp table buffer */
-	dst_ptr = adev->gfx.rlc.cp_table_ptr;
-	for (me = 0; me < max_me; me++) {
-		if (me == 0) {
-			const struct gfx_firmware_header_v1_0 *hdr =
-				(const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
-			fw_data = (const __le32 *)
-				(adev->gfx.ce_fw->data +
-				 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
-			table_offset = le32_to_cpu(hdr->jt_offset);
-			table_size = le32_to_cpu(hdr->jt_size);
-		} else if (me == 1) {
-			const struct gfx_firmware_header_v1_0 *hdr =
-				(const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
-			fw_data = (const __le32 *)
-				(adev->gfx.pfp_fw->data +
-				 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
-			table_offset = le32_to_cpu(hdr->jt_offset);
-			table_size = le32_to_cpu(hdr->jt_size);
-		} else if (me == 2) {
-			const struct gfx_firmware_header_v1_0 *hdr =
-				(const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
-			fw_data = (const __le32 *)
-				(adev->gfx.me_fw->data +
-				 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
-			table_offset = le32_to_cpu(hdr->jt_offset);
-			table_size = le32_to_cpu(hdr->jt_size);
-		} else if (me == 3) {
-			const struct gfx_firmware_header_v1_0 *hdr =
-				(const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
-			fw_data = (const __le32 *)
-				(adev->gfx.mec_fw->data +
-				 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
-			table_offset = le32_to_cpu(hdr->jt_offset);
-			table_size = le32_to_cpu(hdr->jt_size);
-		} else  if (me == 4) {
-			const struct gfx_firmware_header_v1_0 *hdr =
-				(const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data;
-			fw_data = (const __le32 *)
-				(adev->gfx.mec2_fw->data +
-				 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
-			table_offset = le32_to_cpu(hdr->jt_offset);
-			table_size = le32_to_cpu(hdr->jt_size);
-		}
-
-		for (i = 0; i < table_size; i ++) {
-			dst_ptr[bo_offset + i] =
-				cpu_to_le32(le32_to_cpu(fw_data[table_offset + i]));
-		}
-
-		bo_offset += table_size;
-	}
-}
-
-static void gfx_v8_0_rlc_fini(struct amdgpu_device *adev)
-{
-	amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj, NULL, NULL);
-	amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj, NULL, NULL);
+		return 5;
+	else
+		return 4;
 }
 
 static int gfx_v8_0_rlc_init(struct amdgpu_device *adev)
 {
-	volatile u32 *dst_ptr;
-	u32 dws;
 	const struct cs_section_def *cs_data;
 	int r;
 
@@ -1381,44 +1301,18 @@ static int gfx_v8_0_rlc_init(struct amdgpu_device *adev)
 	cs_data = adev->gfx.rlc.cs_data;
 
 	if (cs_data) {
-		/* clear state block */
-		adev->gfx.rlc.clear_state_size = dws = gfx_v8_0_get_csb_size(adev);
-
-		r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE,
-					      AMDGPU_GEM_DOMAIN_VRAM,
-					      &adev->gfx.rlc.clear_state_obj,
-					      &adev->gfx.rlc.clear_state_gpu_addr,
-					      (void **)&adev->gfx.rlc.cs_ptr);
-		if (r) {
-			dev_warn(adev->dev, "(%d) create RLC c bo failed\n", r);
-			gfx_v8_0_rlc_fini(adev);
+		/* init clear state block */
+		r = amdgpu_gfx_rlc_init_csb(adev);
+		if (r)
 			return r;
-		}
-
-		/* set up the cs buffer */
-		dst_ptr = adev->gfx.rlc.cs_ptr;
-		gfx_v8_0_get_csb_buffer(adev, dst_ptr);
-		amdgpu_bo_kunmap(adev->gfx.rlc.clear_state_obj);
-		amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
 	}
 
 	if ((adev->asic_type == CHIP_CARRIZO) ||
 	    (adev->asic_type == CHIP_STONEY)) {
 		adev->gfx.rlc.cp_table_size = ALIGN(96 * 5 * 4, 2048) + (64 * 1024); /* JT + GDS */
-		r = amdgpu_bo_create_reserved(adev, adev->gfx.rlc.cp_table_size,
-					      PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
-					      &adev->gfx.rlc.cp_table_obj,
-					      &adev->gfx.rlc.cp_table_gpu_addr,
-					      (void **)&adev->gfx.rlc.cp_table_ptr);
-		if (r) {
-			dev_warn(adev->dev, "(%d) create RLC cp table bo failed\n", r);
+		r = amdgpu_gfx_rlc_init_cpt(adev);
+		if (r)
 			return r;
-		}
-
-		cz_init_cp_jump_table(adev);
-
-		amdgpu_bo_kunmap(adev->gfx.rlc.cp_table_obj);
-		amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj);
 	}
 
 	return 0;
@@ -1443,7 +1337,7 @@ static int gfx_v8_0_mec_init(struct amdgpu_device *adev)
 	mec_hpd_size = adev->gfx.num_compute_rings * GFX8_MEC_HPD_SIZE;
 
 	r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE,
-				      AMDGPU_GEM_DOMAIN_GTT,
+				      AMDGPU_GEM_DOMAIN_VRAM,
 				      &adev->gfx.mec.hpd_eop_obj,
 				      &adev->gfx.mec.hpd_eop_gpu_addr,
 				      (void **)&hpd);
@@ -1629,7 +1523,7 @@ static int gfx_v8_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
 		return 0;
 
 	/* bail if the compute ring is not ready */
-	if (!ring->ready)
+	if (!ring->sched.ready)
 		return 0;
 
 	tmp = RREG32(mmGB_EDC_MODE);
@@ -2088,7 +1982,7 @@ static int gfx_v8_0_sw_init(void *handle)
 		return r;
 	}
 
-	r = gfx_v8_0_rlc_init(adev);
+	r = adev->gfx.rlc.funcs->init(adev);
 	if (r) {
 		DRM_ERROR("Failed to init rlc BOs!\n");
 		return r;
@@ -2181,7 +2075,7 @@ static int gfx_v8_0_sw_fini(void *handle)
 	amdgpu_gfx_kiq_fini(adev);
 
 	gfx_v8_0_mec_fini(adev);
-	gfx_v8_0_rlc_fini(adev);
+	amdgpu_gfx_rlc_fini(adev);
 	amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj,
 				&adev->gfx.rlc.clear_state_gpu_addr,
 				(void **)&adev->gfx.rlc.cs_ptr);
@@ -4175,10 +4069,10 @@ static void gfx_v8_0_rlc_start(struct amdgpu_device *adev)
 
 static int gfx_v8_0_rlc_resume(struct amdgpu_device *adev)
 {
-	gfx_v8_0_rlc_stop(adev);
-	gfx_v8_0_rlc_reset(adev);
+	adev->gfx.rlc.funcs->stop(adev);
+	adev->gfx.rlc.funcs->reset(adev);
 	gfx_v8_0_init_pg(adev);
-	gfx_v8_0_rlc_start(adev);
+	adev->gfx.rlc.funcs->start(adev);
 
 	return 0;
 }
@@ -4197,7 +4091,7 @@ static void gfx_v8_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
 		tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, 1);
 		tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, 1);
 		for (i = 0; i < adev->gfx.num_gfx_rings; i++)
-			adev->gfx.gfx_ring[i].ready = false;
+			adev->gfx.gfx_ring[i].sched.ready = false;
 	}
 	WREG32(mmCP_ME_CNTL, tmp);
 	udelay(50);
@@ -4379,10 +4273,8 @@ static int gfx_v8_0_cp_gfx_resume(struct amdgpu_device *adev)
 	/* start the ring */
 	amdgpu_ring_clear_ring(ring);
 	gfx_v8_0_cp_gfx_start(adev);
-	ring->ready = true;
-	r = amdgpu_ring_test_ring(ring);
-	if (r)
-		ring->ready = false;
+	ring->sched.ready = true;
+	r = amdgpu_ring_test_helper(ring);
 
 	return r;
 }
@@ -4396,8 +4288,8 @@ static void gfx_v8_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
 	} else {
 		WREG32(mmCP_MEC_CNTL, (CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK));
 		for (i = 0; i < adev->gfx.num_compute_rings; i++)
-			adev->gfx.compute_ring[i].ready = false;
-		adev->gfx.kiq.ring.ready = false;
+			adev->gfx.compute_ring[i].sched.ready = false;
+		adev->gfx.kiq.ring.sched.ready = false;
 	}
 	udelay(50);
 }
@@ -4473,11 +4365,9 @@ static int gfx_v8_0_kiq_kcq_enable(struct amdgpu_device *adev)
 		amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr));
 	}
 
-	r = amdgpu_ring_test_ring(kiq_ring);
-	if (r) {
+	r = amdgpu_ring_test_helper(kiq_ring);
+	if (r)
 		DRM_ERROR("KCQ enable failed\n");
-		kiq_ring->ready = false;
-	}
 	return r;
 }
 
@@ -4781,7 +4671,7 @@ static int gfx_v8_0_kiq_resume(struct amdgpu_device *adev)
 	amdgpu_bo_kunmap(ring->mqd_obj);
 	ring->mqd_ptr = NULL;
 	amdgpu_bo_unreserve(ring->mqd_obj);
-	ring->ready = true;
+	ring->sched.ready = true;
 	return 0;
 }
 
@@ -4820,10 +4710,7 @@ static int gfx_v8_0_kcq_resume(struct amdgpu_device *adev)
 	 */
 	for (i = adev->gfx.num_compute_rings - 1; i >= 0; i--) {
 		ring = &adev->gfx.compute_ring[i];
-		ring->ready = true;
-		r = amdgpu_ring_test_ring(ring);
-		if (r)
-			ring->ready = false;
+		r = amdgpu_ring_test_helper(ring);
 	}
 
 done:
@@ -4867,7 +4754,7 @@ static int gfx_v8_0_hw_init(void *handle)
 	gfx_v8_0_init_golden_registers(adev);
 	gfx_v8_0_constants_init(adev);
 
-	r = gfx_v8_0_rlc_resume(adev);
+	r = adev->gfx.rlc.funcs->resume(adev);
 	if (r)
 		return r;
 
@@ -4899,7 +4786,7 @@ static int gfx_v8_0_kcq_disable(struct amdgpu_device *adev)
 		amdgpu_ring_write(kiq_ring, 0);
 		amdgpu_ring_write(kiq_ring, 0);
 	}
-	r = amdgpu_ring_test_ring(kiq_ring);
+	r = amdgpu_ring_test_helper(kiq_ring);
 	if (r)
 		DRM_ERROR("KCQ disable failed\n");
 
@@ -4973,16 +4860,16 @@ static int gfx_v8_0_hw_fini(void *handle)
 		pr_debug("For SRIOV client, shouldn't do anything.\n");
 		return 0;
 	}
-	adev->gfx.rlc.funcs->enter_safe_mode(adev);
+	amdgpu_gfx_rlc_enter_safe_mode(adev);
 	if (!gfx_v8_0_wait_for_idle(adev))
 		gfx_v8_0_cp_enable(adev, false);
 	else
 		pr_err("cp is busy, skip halt cp\n");
 	if (!gfx_v8_0_wait_for_rlc_idle(adev))
-		gfx_v8_0_rlc_stop(adev);
+		adev->gfx.rlc.funcs->stop(adev);
 	else
 		pr_err("rlc is busy, skip halt rlc\n");
-	adev->gfx.rlc.funcs->exit_safe_mode(adev);
+	amdgpu_gfx_rlc_exit_safe_mode(adev);
 	return 0;
 }
 
@@ -5071,7 +4958,7 @@ static int gfx_v8_0_pre_soft_reset(void *handle)
 	srbm_soft_reset = adev->gfx.srbm_soft_reset;
 
 	/* stop the rlc */
-	gfx_v8_0_rlc_stop(adev);
+	adev->gfx.rlc.funcs->stop(adev);
 
 	if (REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CP) ||
 	    REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_GFX))
@@ -5197,7 +5084,7 @@ static int gfx_v8_0_post_soft_reset(void *handle)
 	    REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_GFX))
 		gfx_v8_0_cp_gfx_resume(adev);
 
-	gfx_v8_0_rlc_start(adev);
+	adev->gfx.rlc.funcs->start(adev);
 
 	return 0;
 }
@@ -5445,7 +5332,7 @@ static int gfx_v8_0_set_powergating_state(void *handle,
 				AMD_PG_SUPPORT_RLC_SMU_HS |
 				AMD_PG_SUPPORT_CP |
 				AMD_PG_SUPPORT_GFX_DMG))
-		adev->gfx.rlc.funcs->enter_safe_mode(adev);
+		amdgpu_gfx_rlc_enter_safe_mode(adev);
 	switch (adev->asic_type) {
 	case CHIP_CARRIZO:
 	case CHIP_STONEY:
@@ -5499,7 +5386,7 @@ static int gfx_v8_0_set_powergating_state(void *handle,
 				AMD_PG_SUPPORT_RLC_SMU_HS |
 				AMD_PG_SUPPORT_CP |
 				AMD_PG_SUPPORT_GFX_DMG))
-		adev->gfx.rlc.funcs->exit_safe_mode(adev);
+		amdgpu_gfx_rlc_exit_safe_mode(adev);
 	return 0;
 }
 
@@ -5593,57 +5480,53 @@ static void gfx_v8_0_send_serdes_cmd(struct amdgpu_device *adev,
 #define RLC_GPR_REG2__MESSAGE__SHIFT 0x00000001
 #define RLC_GPR_REG2__MESSAGE_MASK 0x0000001e
 
-static void iceland_enter_rlc_safe_mode(struct amdgpu_device *adev)
+static bool gfx_v8_0_is_rlc_enabled(struct amdgpu_device *adev)
 {
-	u32 data;
+	uint32_t rlc_setting;
+
+	rlc_setting = RREG32(mmRLC_CNTL);
+	if (!(rlc_setting & RLC_CNTL__RLC_ENABLE_F32_MASK))
+		return false;
+
+	return true;
+}
+
+static void gfx_v8_0_set_safe_mode(struct amdgpu_device *adev)
+{
+	uint32_t data;
 	unsigned i;
-
 	data = RREG32(mmRLC_CNTL);
-	if (!(data & RLC_CNTL__RLC_ENABLE_F32_MASK))
-		return;
+	data |= RLC_SAFE_MODE__CMD_MASK;
+	data &= ~RLC_SAFE_MODE__MESSAGE_MASK;
+	data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT);
+	WREG32(mmRLC_SAFE_MODE, data);
 
-	if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_MGCG)) {
-		data |= RLC_SAFE_MODE__CMD_MASK;
-		data &= ~RLC_SAFE_MODE__MESSAGE_MASK;
-		data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT);
-		WREG32(mmRLC_SAFE_MODE, data);
-
-		for (i = 0; i < adev->usec_timeout; i++) {
-			if ((RREG32(mmRLC_GPM_STAT) &
-			     (RLC_GPM_STAT__GFX_CLOCK_STATUS_MASK |
-			      RLC_GPM_STAT__GFX_POWER_STATUS_MASK)) ==
-			    (RLC_GPM_STAT__GFX_CLOCK_STATUS_MASK |
-			     RLC_GPM_STAT__GFX_POWER_STATUS_MASK))
-				break;
-			udelay(1);
-		}
-
-		for (i = 0; i < adev->usec_timeout; i++) {
-			if (!REG_GET_FIELD(RREG32(mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD))
-				break;
-			udelay(1);
-		}
-		adev->gfx.rlc.in_safe_mode = true;
+	/* wait for RLC_SAFE_MODE */
+	for (i = 0; i < adev->usec_timeout; i++) {
+		if ((RREG32(mmRLC_GPM_STAT) &
+		     (RLC_GPM_STAT__GFX_CLOCK_STATUS_MASK |
+		      RLC_GPM_STAT__GFX_POWER_STATUS_MASK)) ==
+		    (RLC_GPM_STAT__GFX_CLOCK_STATUS_MASK |
+		     RLC_GPM_STAT__GFX_POWER_STATUS_MASK))
+			break;
+		udelay(1);
+	}
+	for (i = 0; i < adev->usec_timeout; i++) {
+		if (!REG_GET_FIELD(RREG32(mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD))
+			break;
+		udelay(1);
 	}
 }
 
-static void iceland_exit_rlc_safe_mode(struct amdgpu_device *adev)
+static void gfx_v8_0_unset_safe_mode(struct amdgpu_device *adev)
 {
-	u32 data = 0;
+	uint32_t data;
 	unsigned i;
 
 	data = RREG32(mmRLC_CNTL);
-	if (!(data & RLC_CNTL__RLC_ENABLE_F32_MASK))
-		return;
-
-	if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_MGCG)) {
-		if (adev->gfx.rlc.in_safe_mode) {
-			data |= RLC_SAFE_MODE__CMD_MASK;
-			data &= ~RLC_SAFE_MODE__MESSAGE_MASK;
-			WREG32(mmRLC_SAFE_MODE, data);
-			adev->gfx.rlc.in_safe_mode = false;
-		}
-	}
+	data |= RLC_SAFE_MODE__CMD_MASK;
+	data &= ~RLC_SAFE_MODE__MESSAGE_MASK;
+	WREG32(mmRLC_SAFE_MODE, data);
 
 	for (i = 0; i < adev->usec_timeout; i++) {
 		if (!REG_GET_FIELD(RREG32(mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD))
@@ -5653,8 +5536,17 @@ static void iceland_exit_rlc_safe_mode(struct amdgpu_device *adev)
 }
 
 static const struct amdgpu_rlc_funcs iceland_rlc_funcs = {
-	.enter_safe_mode = iceland_enter_rlc_safe_mode,
-	.exit_safe_mode = iceland_exit_rlc_safe_mode
+	.is_rlc_enabled = gfx_v8_0_is_rlc_enabled,
+	.set_safe_mode = gfx_v8_0_set_safe_mode,
+	.unset_safe_mode = gfx_v8_0_unset_safe_mode,
+	.init = gfx_v8_0_rlc_init,
+	.get_csb_size = gfx_v8_0_get_csb_size,
+	.get_csb_buffer = gfx_v8_0_get_csb_buffer,
+	.get_cp_table_num = gfx_v8_0_cp_jump_table_num,
+	.resume = gfx_v8_0_rlc_resume,
+	.stop = gfx_v8_0_rlc_stop,
+	.reset = gfx_v8_0_rlc_reset,
+	.start = gfx_v8_0_rlc_start
 };
 
 static void gfx_v8_0_update_medium_grain_clock_gating(struct amdgpu_device *adev,
@@ -5662,7 +5554,7 @@ static void gfx_v8_0_update_medium_grain_clock_gating(struct amdgpu_device *adev
 {
 	uint32_t temp, data;
 
-	adev->gfx.rlc.funcs->enter_safe_mode(adev);
+	amdgpu_gfx_rlc_enter_safe_mode(adev);
 
 	/* It is disabled by HW by default */
 	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) {
@@ -5758,7 +5650,7 @@ static void gfx_v8_0_update_medium_grain_clock_gating(struct amdgpu_device *adev
 		gfx_v8_0_wait_for_rlc_serdes(adev);
 	}
 
-	adev->gfx.rlc.funcs->exit_safe_mode(adev);
+	amdgpu_gfx_rlc_exit_safe_mode(adev);
 }
 
 static void gfx_v8_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev,
@@ -5768,7 +5660,7 @@ static void gfx_v8_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev
 
 	temp = data = RREG32(mmRLC_CGCG_CGLS_CTRL);
 
-	adev->gfx.rlc.funcs->enter_safe_mode(adev);
+	amdgpu_gfx_rlc_enter_safe_mode(adev);
 
 	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) {
 		temp1 = data1 =	RREG32(mmRLC_CGTT_MGCG_OVERRIDE);
@@ -5851,7 +5743,7 @@ static void gfx_v8_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev
 
 	gfx_v8_0_wait_for_rlc_serdes(adev);
 
-	adev->gfx.rlc.funcs->exit_safe_mode(adev);
+	amdgpu_gfx_rlc_exit_safe_mode(adev);
 }
 static int gfx_v8_0_update_gfx_clock_gating(struct amdgpu_device *adev,
 					    bool enable)
@@ -6131,9 +6023,11 @@ static void gfx_v8_0_ring_emit_vgt_flush(struct amdgpu_ring *ring)
 }
 
 static void gfx_v8_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
-				      struct amdgpu_ib *ib,
-				      unsigned vmid, bool ctx_switch)
+					struct amdgpu_job *job,
+					struct amdgpu_ib *ib,
+					bool ctx_switch)
 {
+	unsigned vmid = AMDGPU_JOB_GET_VMID(job);
 	u32 header, control = 0;
 
 	if (ib->flags & AMDGPU_IB_FLAG_CE)
@@ -6161,9 +6055,11 @@ static void gfx_v8_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
 }
 
 static void gfx_v8_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
+					  struct amdgpu_job *job,
 					  struct amdgpu_ib *ib,
-					  unsigned vmid, bool ctx_switch)
+					  bool ctx_switch)
 {
+	unsigned vmid = AMDGPU_JOB_GET_VMID(job);
 	u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24);
 
 	amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
@@ -6738,12 +6634,39 @@ static int gfx_v8_0_eop_irq(struct amdgpu_device *adev,
 	return 0;
 }
 
+static void gfx_v8_0_fault(struct amdgpu_device *adev,
+			   struct amdgpu_iv_entry *entry)
+{
+	u8 me_id, pipe_id, queue_id;
+	struct amdgpu_ring *ring;
+	int i;
+
+	me_id = (entry->ring_id & 0x0c) >> 2;
+	pipe_id = (entry->ring_id & 0x03) >> 0;
+	queue_id = (entry->ring_id & 0x70) >> 4;
+
+	switch (me_id) {
+	case 0:
+		drm_sched_fault(&adev->gfx.gfx_ring[0].sched);
+		break;
+	case 1:
+	case 2:
+		for (i = 0; i < adev->gfx.num_compute_rings; i++) {
+			ring = &adev->gfx.compute_ring[i];
+			if (ring->me == me_id && ring->pipe == pipe_id &&
+			    ring->queue == queue_id)
+				drm_sched_fault(&ring->sched);
+		}
+		break;
+	}
+}
+
 static int gfx_v8_0_priv_reg_irq(struct amdgpu_device *adev,
 				 struct amdgpu_irq_src *source,
 				 struct amdgpu_iv_entry *entry)
 {
 	DRM_ERROR("Illegal register access in command stream\n");
-	schedule_work(&adev->reset_work);
+	gfx_v8_0_fault(adev, entry);
 	return 0;
 }
 
@@ -6752,7 +6675,7 @@ static int gfx_v8_0_priv_inst_irq(struct amdgpu_device *adev,
 				  struct amdgpu_iv_entry *entry)
 {
 	DRM_ERROR("Illegal instruction in command stream\n");
-	schedule_work(&adev->reset_work);
+	gfx_v8_0_fault(adev, entry);
 	return 0;
 }
 
@@ -6976,10 +6899,8 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_kiq = {
 		17 + /* gfx_v8_0_ring_emit_vm_flush */
 		7 + 7 + 7, /* gfx_v8_0_ring_emit_fence_kiq x3 for user fence, vm fence */
 	.emit_ib_size =	4, /* gfx_v8_0_ring_emit_ib_compute */
-	.emit_ib = gfx_v8_0_ring_emit_ib_compute,
 	.emit_fence = gfx_v8_0_ring_emit_fence_kiq,
 	.test_ring = gfx_v8_0_ring_test_ring,
-	.test_ib = gfx_v8_0_ring_test_ib,
 	.insert_nop = amdgpu_ring_insert_nop,
 	.pad_ib = amdgpu_ring_generic_pad_ib,
 	.emit_rreg = gfx_v8_0_ring_emit_rreg,
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 6d7baf5..c27caa1 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -41,7 +41,7 @@
 #include "ivsrcid/gfx/irqsrcs_gfx_9_0.h"
 
 #define GFX9_NUM_GFX_RINGS     1
-#define GFX9_MEC_HPD_SIZE 2048
+#define GFX9_MEC_HPD_SIZE 4096
 #define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L
 #define RLC_SAVE_RESTORE_ADDR_STARTING_OFFSET 0x00000000L
 
@@ -396,18 +396,14 @@ static int gfx_v9_0_ring_test_ring(struct amdgpu_ring *ring)
 	int r;
 
 	r = amdgpu_gfx_scratch_get(adev, &scratch);
-	if (r) {
-		DRM_ERROR("amdgpu: cp failed to get scratch reg (%d).\n", r);
+	if (r)
 		return r;
-	}
+
 	WREG32(scratch, 0xCAFEDEAD);
 	r = amdgpu_ring_alloc(ring, 3);
-	if (r) {
-		DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n",
-			  ring->idx, r);
-		amdgpu_gfx_scratch_free(adev, scratch);
-		return r;
-	}
+	if (r)
+		goto error_free_scratch;
+
 	amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
 	amdgpu_ring_write(ring, (scratch - PACKET3_SET_UCONFIG_REG_START));
 	amdgpu_ring_write(ring, 0xDEADBEEF);
@@ -419,14 +415,11 @@ static int gfx_v9_0_ring_test_ring(struct amdgpu_ring *ring)
 			break;
 		DRM_UDELAY(1);
 	}
-	if (i < adev->usec_timeout) {
-		DRM_DEBUG("ring test on %d succeeded in %d usecs\n",
-			 ring->idx, i);
-	} else {
-		DRM_ERROR("amdgpu: ring %d test failed (scratch(0x%04X)=0x%08X)\n",
-			  ring->idx, scratch, tmp);
-		r = -EINVAL;
-	}
+
+	if (i >= adev->usec_timeout)
+		r = -ETIMEDOUT;
+
+error_free_scratch:
 	amdgpu_gfx_scratch_free(adev, scratch);
 	return r;
 }
@@ -443,19 +436,16 @@ static int gfx_v9_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 	long r;
 
 	r = amdgpu_device_wb_get(adev, &index);
-	if (r) {
-		dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r);
+	if (r)
 		return r;
-	}
 
 	gpu_addr = adev->wb.gpu_addr + (index * 4);
 	adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
 	memset(&ib, 0, sizeof(ib));
 	r = amdgpu_ib_get(adev, NULL, 16, &ib);
-	if (r) {
-		DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
+	if (r)
 		goto err1;
-	}
+
 	ib.ptr[0] = PACKET3(PACKET3_WRITE_DATA, 3);
 	ib.ptr[1] = WRITE_DATA_DST_SEL(5) | WR_CONFIRM;
 	ib.ptr[2] = lower_32_bits(gpu_addr);
@@ -469,22 +459,17 @@ static int gfx_v9_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 
 	r = dma_fence_wait_timeout(f, false, timeout);
 	if (r == 0) {
-			DRM_ERROR("amdgpu: IB test timed out.\n");
-			r = -ETIMEDOUT;
-			goto err2;
+		r = -ETIMEDOUT;
+		goto err2;
 	} else if (r < 0) {
-			DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
-			goto err2;
+		goto err2;
 	}
 
 	tmp = adev->wb.wb[index];
-	if (tmp == 0xDEADBEEF) {
-			DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
-			r = 0;
-	} else {
-			DRM_ERROR("ib test on ring %d failed\n", ring->idx);
-			r = -EINVAL;
-	}
+	if (tmp == 0xDEADBEEF)
+		r = 0;
+	else
+		r = -EINVAL;
 
 err2:
 	amdgpu_ib_free(adev, &ib, NULL);
@@ -1065,85 +1050,13 @@ static void gfx_v9_0_enable_lbpw(struct amdgpu_device *adev, bool enable)
 	WREG32_FIELD15(GC, 0, RLC_LB_CNTL, LOAD_BALANCE_ENABLE, enable ? 1 : 0);
 }
 
-static void rv_init_cp_jump_table(struct amdgpu_device *adev)
+static int gfx_v9_0_cp_jump_table_num(struct amdgpu_device *adev)
 {
-	const __le32 *fw_data;
-	volatile u32 *dst_ptr;
-	int me, i, max_me = 5;
-	u32 bo_offset = 0;
-	u32 table_offset, table_size;
-
-	/* write the cp table buffer */
-	dst_ptr = adev->gfx.rlc.cp_table_ptr;
-	for (me = 0; me < max_me; me++) {
-		if (me == 0) {
-			const struct gfx_firmware_header_v1_0 *hdr =
-				(const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
-			fw_data = (const __le32 *)
-				(adev->gfx.ce_fw->data +
-				 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
-			table_offset = le32_to_cpu(hdr->jt_offset);
-			table_size = le32_to_cpu(hdr->jt_size);
-		} else if (me == 1) {
-			const struct gfx_firmware_header_v1_0 *hdr =
-				(const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
-			fw_data = (const __le32 *)
-				(adev->gfx.pfp_fw->data +
-				 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
-			table_offset = le32_to_cpu(hdr->jt_offset);
-			table_size = le32_to_cpu(hdr->jt_size);
-		} else if (me == 2) {
-			const struct gfx_firmware_header_v1_0 *hdr =
-				(const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
-			fw_data = (const __le32 *)
-				(adev->gfx.me_fw->data +
-				 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
-			table_offset = le32_to_cpu(hdr->jt_offset);
-			table_size = le32_to_cpu(hdr->jt_size);
-		} else if (me == 3) {
-			const struct gfx_firmware_header_v1_0 *hdr =
-				(const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
-			fw_data = (const __le32 *)
-				(adev->gfx.mec_fw->data +
-				 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
-			table_offset = le32_to_cpu(hdr->jt_offset);
-			table_size = le32_to_cpu(hdr->jt_size);
-		} else  if (me == 4) {
-			const struct gfx_firmware_header_v1_0 *hdr =
-				(const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data;
-			fw_data = (const __le32 *)
-				(adev->gfx.mec2_fw->data +
-				 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
-			table_offset = le32_to_cpu(hdr->jt_offset);
-			table_size = le32_to_cpu(hdr->jt_size);
-		}
-
-		for (i = 0; i < table_size; i ++) {
-			dst_ptr[bo_offset + i] =
-				cpu_to_le32(le32_to_cpu(fw_data[table_offset + i]));
-		}
-
-		bo_offset += table_size;
-	}
-}
-
-static void gfx_v9_0_rlc_fini(struct amdgpu_device *adev)
-{
-	/* clear state block */
-	amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj,
-			&adev->gfx.rlc.clear_state_gpu_addr,
-			(void **)&adev->gfx.rlc.cs_ptr);
-
-	/* jump table block */
-	amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj,
-			&adev->gfx.rlc.cp_table_gpu_addr,
-			(void **)&adev->gfx.rlc.cp_table_ptr);
+	return 5;
 }
 
 static int gfx_v9_0_rlc_init(struct amdgpu_device *adev)
 {
-	volatile u32 *dst_ptr;
-	u32 dws;
 	const struct cs_section_def *cs_data;
 	int r;
 
@@ -1152,45 +1065,18 @@ static int gfx_v9_0_rlc_init(struct amdgpu_device *adev)
 	cs_data = adev->gfx.rlc.cs_data;
 
 	if (cs_data) {
-		/* clear state block */
-		adev->gfx.rlc.clear_state_size = dws = gfx_v9_0_get_csb_size(adev);
-		r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE,
-					      AMDGPU_GEM_DOMAIN_VRAM,
-					      &adev->gfx.rlc.clear_state_obj,
-					      &adev->gfx.rlc.clear_state_gpu_addr,
-					      (void **)&adev->gfx.rlc.cs_ptr);
-		if (r) {
-			dev_err(adev->dev, "(%d) failed to create rlc csb bo\n",
-				r);
-			gfx_v9_0_rlc_fini(adev);
+		/* init clear state block */
+		r = amdgpu_gfx_rlc_init_csb(adev);
+		if (r)
 			return r;
-		}
-		/* set up the cs buffer */
-		dst_ptr = adev->gfx.rlc.cs_ptr;
-		gfx_v9_0_get_csb_buffer(adev, dst_ptr);
-		amdgpu_bo_kunmap(adev->gfx.rlc.clear_state_obj);
-		amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj);
-		amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
 	}
 
 	if (adev->asic_type == CHIP_RAVEN) {
 		/* TODO: double check the cp_table_size for RV */
 		adev->gfx.rlc.cp_table_size = ALIGN(96 * 5 * 4, 2048) + (64 * 1024); /* JT + GDS */
-		r = amdgpu_bo_create_reserved(adev, adev->gfx.rlc.cp_table_size,
-					      PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
-					      &adev->gfx.rlc.cp_table_obj,
-					      &adev->gfx.rlc.cp_table_gpu_addr,
-					      (void **)&adev->gfx.rlc.cp_table_ptr);
-		if (r) {
-			dev_err(adev->dev,
-				"(%d) failed to create cp table bo\n", r);
-			gfx_v9_0_rlc_fini(adev);
+		r = amdgpu_gfx_rlc_init_cpt(adev);
+		if (r)
 			return r;
-		}
-
-		rv_init_cp_jump_table(adev);
-		amdgpu_bo_kunmap(adev->gfx.rlc.cp_table_obj);
-		amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj);
 	}
 
 	switch (adev->asic_type) {
@@ -1264,7 +1150,7 @@ static int gfx_v9_0_mec_init(struct amdgpu_device *adev)
 	mec_hpd_size = adev->gfx.num_compute_rings * GFX9_MEC_HPD_SIZE;
 
 	r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE,
-				      AMDGPU_GEM_DOMAIN_GTT,
+				      AMDGPU_GEM_DOMAIN_VRAM,
 				      &adev->gfx.mec.hpd_eop_obj,
 				      &adev->gfx.mec.hpd_eop_gpu_addr,
 				      (void **)&hpd);
@@ -1635,8 +1521,8 @@ static int gfx_v9_0_ngg_en(struct amdgpu_device *adev)
 	/* Clear GDS reserved memory */
 	r = amdgpu_ring_alloc(ring, 17);
 	if (r) {
-		DRM_ERROR("amdgpu: NGG failed to lock ring %d (%d).\n",
-			  ring->idx, r);
+		DRM_ERROR("amdgpu: NGG failed to lock ring %s (%d).\n",
+			  ring->name, r);
 		return r;
 	}
 
@@ -1748,7 +1634,7 @@ static int gfx_v9_0_sw_init(void *handle)
 		return r;
 	}
 
-	r = gfx_v9_0_rlc_init(adev);
+	r = adev->gfx.rlc.funcs->init(adev);
 	if (r) {
 		DRM_ERROR("Failed to init rlc BOs!\n");
 		return r;
@@ -2498,12 +2384,12 @@ static int gfx_v9_0_rlc_resume(struct amdgpu_device *adev)
 		return 0;
 	}
 
-	gfx_v9_0_rlc_stop(adev);
+	adev->gfx.rlc.funcs->stop(adev);
 
 	/* disable CG */
 	WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, 0);
 
-	gfx_v9_0_rlc_reset(adev);
+	adev->gfx.rlc.funcs->reset(adev);
 
 	gfx_v9_0_init_pg(adev);
 
@@ -2514,15 +2400,24 @@ static int gfx_v9_0_rlc_resume(struct amdgpu_device *adev)
 			return r;
 	}
 
-	if (adev->asic_type == CHIP_RAVEN ||
-	    adev->asic_type == CHIP_VEGA20) {
-		if (amdgpu_lbpw != 0)
+	switch (adev->asic_type) {
+	case CHIP_RAVEN:
+		if (amdgpu_lbpw == 0)
+			gfx_v9_0_enable_lbpw(adev, false);
+		else
+			gfx_v9_0_enable_lbpw(adev, true);
+		break;
+	case CHIP_VEGA20:
+		if (amdgpu_lbpw > 0)
 			gfx_v9_0_enable_lbpw(adev, true);
 		else
 			gfx_v9_0_enable_lbpw(adev, false);
+		break;
+	default:
+		break;
 	}
 
-	gfx_v9_0_rlc_start(adev);
+	adev->gfx.rlc.funcs->start(adev);
 
 	return 0;
 }
@@ -2537,7 +2432,7 @@ static void gfx_v9_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
 	tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, enable ? 0 : 1);
 	if (!enable) {
 		for (i = 0; i < adev->gfx.num_gfx_rings; i++)
-			adev->gfx.gfx_ring[i].ready = false;
+			adev->gfx.gfx_ring[i].sched.ready = false;
 	}
 	WREG32_SOC15(GC, 0, mmCP_ME_CNTL, tmp);
 	udelay(50);
@@ -2727,7 +2622,7 @@ static int gfx_v9_0_cp_gfx_resume(struct amdgpu_device *adev)
 
 	/* start the ring */
 	gfx_v9_0_cp_gfx_start(adev);
-	ring->ready = true;
+	ring->sched.ready = true;
 
 	return 0;
 }
@@ -2742,8 +2637,8 @@ static void gfx_v9_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
 		WREG32_SOC15(GC, 0, mmCP_MEC_CNTL,
 			(CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK));
 		for (i = 0; i < adev->gfx.num_compute_rings; i++)
-			adev->gfx.compute_ring[i].ready = false;
-		adev->gfx.kiq.ring.ready = false;
+			adev->gfx.compute_ring[i].sched.ready = false;
+		adev->gfx.kiq.ring.sched.ready = false;
 	}
 	udelay(50);
 }
@@ -2866,11 +2761,9 @@ static int gfx_v9_0_kiq_kcq_enable(struct amdgpu_device *adev)
 		amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr));
 	}
 
-	r = amdgpu_ring_test_ring(kiq_ring);
-	if (r) {
+	r = amdgpu_ring_test_helper(kiq_ring);
+	if (r)
 		DRM_ERROR("KCQ enable failed\n");
-		kiq_ring->ready = false;
-	}
 
 	return r;
 }
@@ -3249,7 +3142,7 @@ static int gfx_v9_0_kiq_resume(struct amdgpu_device *adev)
 	amdgpu_bo_kunmap(ring->mqd_obj);
 	ring->mqd_ptr = NULL;
 	amdgpu_bo_unreserve(ring->mqd_obj);
-	ring->ready = true;
+	ring->sched.ready = true;
 	return 0;
 }
 
@@ -3314,19 +3207,13 @@ static int gfx_v9_0_cp_resume(struct amdgpu_device *adev)
 		return r;
 
 	ring = &adev->gfx.gfx_ring[0];
-	r = amdgpu_ring_test_ring(ring);
-	if (r) {
-		ring->ready = false;
+	r = amdgpu_ring_test_helper(ring);
+	if (r)
 		return r;
-	}
 
 	for (i = 0; i < adev->gfx.num_compute_rings; i++) {
 		ring = &adev->gfx.compute_ring[i];
-
-		ring->ready = true;
-		r = amdgpu_ring_test_ring(ring);
-		if (r)
-			ring->ready = false;
+		amdgpu_ring_test_helper(ring);
 	}
 
 	gfx_v9_0_enable_gui_idle_interrupt(adev, true);
@@ -3353,7 +3240,7 @@ static int gfx_v9_0_hw_init(void *handle)
 	if (r)
 		return r;
 
-	r = gfx_v9_0_rlc_resume(adev);
+	r = adev->gfx.rlc.funcs->resume(adev);
 	if (r)
 		return r;
 
@@ -3391,7 +3278,7 @@ static int gfx_v9_0_kcq_disable(struct amdgpu_device *adev)
 		amdgpu_ring_write(kiq_ring, 0);
 		amdgpu_ring_write(kiq_ring, 0);
 	}
-	r = amdgpu_ring_test_ring(kiq_ring);
+	r = amdgpu_ring_test_helper(kiq_ring);
 	if (r)
 		DRM_ERROR("KCQ disable failed\n");
 
@@ -3433,7 +3320,7 @@ static int gfx_v9_0_hw_fini(void *handle)
 	}
 
 	gfx_v9_0_cp_enable(adev, false);
-	gfx_v9_0_rlc_stop(adev);
+	adev->gfx.rlc.funcs->stop(adev);
 
 	gfx_v9_0_csb_vram_unpin(adev);
 
@@ -3508,7 +3395,7 @@ static int gfx_v9_0_soft_reset(void *handle)
 
 	if (grbm_soft_reset) {
 		/* stop the rlc */
-		gfx_v9_0_rlc_stop(adev);
+		adev->gfx.rlc.funcs->stop(adev);
 
 		/* Disable GFX parsing/prefetching */
 		gfx_v9_0_cp_gfx_enable(adev, false);
@@ -3607,64 +3494,47 @@ static int gfx_v9_0_late_init(void *handle)
 	return 0;
 }
 
-static void gfx_v9_0_enter_rlc_safe_mode(struct amdgpu_device *adev)
+static bool gfx_v9_0_is_rlc_enabled(struct amdgpu_device *adev)
 {
-	uint32_t rlc_setting, data;
-	unsigned i;
-
-	if (adev->gfx.rlc.in_safe_mode)
-		return;
+	uint32_t rlc_setting;
 
 	/* if RLC is not enabled, do nothing */
 	rlc_setting = RREG32_SOC15(GC, 0, mmRLC_CNTL);
 	if (!(rlc_setting & RLC_CNTL__RLC_ENABLE_F32_MASK))
-		return;
+		return false;
 
-	if (adev->cg_flags &
-	    (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_MGCG |
-	     AMD_CG_SUPPORT_GFX_3D_CGCG)) {
-		data = RLC_SAFE_MODE__CMD_MASK;
-		data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT);
-		WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE, data);
+	return true;
+}
 
-		/* wait for RLC_SAFE_MODE */
-		for (i = 0; i < adev->usec_timeout; i++) {
-			if (!REG_GET_FIELD(RREG32_SOC15(GC, 0, mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD))
-				break;
-			udelay(1);
-		}
-		adev->gfx.rlc.in_safe_mode = true;
+static void gfx_v9_0_set_safe_mode(struct amdgpu_device *adev)
+{
+	uint32_t data;
+	unsigned i;
+
+	data = RLC_SAFE_MODE__CMD_MASK;
+	data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT);
+	WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE, data);
+
+	/* wait for RLC_SAFE_MODE */
+	for (i = 0; i < adev->usec_timeout; i++) {
+		if (!REG_GET_FIELD(RREG32_SOC15(GC, 0, mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD))
+			break;
+		udelay(1);
 	}
 }
 
-static void gfx_v9_0_exit_rlc_safe_mode(struct amdgpu_device *adev)
+static void gfx_v9_0_unset_safe_mode(struct amdgpu_device *adev)
 {
-	uint32_t rlc_setting, data;
+	uint32_t data;
 
-	if (!adev->gfx.rlc.in_safe_mode)
-		return;
-
-	/* if RLC is not enabled, do nothing */
-	rlc_setting = RREG32_SOC15(GC, 0, mmRLC_CNTL);
-	if (!(rlc_setting & RLC_CNTL__RLC_ENABLE_F32_MASK))
-		return;
-
-	if (adev->cg_flags &
-	    (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_MGCG)) {
-		/*
-		 * Try to exit safe mode only if it is already in safe
-		 * mode.
-		 */
-		data = RLC_SAFE_MODE__CMD_MASK;
-		WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE, data);
-		adev->gfx.rlc.in_safe_mode = false;
-	}
+	data = RLC_SAFE_MODE__CMD_MASK;
+	WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE, data);
 }
 
 static void gfx_v9_0_update_gfx_cg_power_gating(struct amdgpu_device *adev,
 						bool enable)
 {
-	gfx_v9_0_enter_rlc_safe_mode(adev);
+	amdgpu_gfx_rlc_enter_safe_mode(adev);
 
 	if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) && enable) {
 		gfx_v9_0_enable_gfx_cg_power_gating(adev, true);
@@ -3675,7 +3545,7 @@ static void gfx_v9_0_update_gfx_cg_power_gating(struct amdgpu_device *adev,
 		gfx_v9_0_enable_gfx_pipeline_powergating(adev, false);
 	}
 
-	gfx_v9_0_exit_rlc_safe_mode(adev);
+	amdgpu_gfx_rlc_exit_safe_mode(adev);
 }
 
 static void gfx_v9_0_update_gfx_mg_power_gating(struct amdgpu_device *adev,
@@ -3773,7 +3643,7 @@ static void gfx_v9_0_update_3d_clock_gating(struct amdgpu_device *adev,
 {
 	uint32_t data, def;
 
-	adev->gfx.rlc.funcs->enter_safe_mode(adev);
+	amdgpu_gfx_rlc_enter_safe_mode(adev);
 
 	/* Enable 3D CGCG/CGLS */
 	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG)) {
@@ -3813,7 +3683,7 @@ static void gfx_v9_0_update_3d_clock_gating(struct amdgpu_device *adev,
 			WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D, data);
 	}
 
-	adev->gfx.rlc.funcs->exit_safe_mode(adev);
+	amdgpu_gfx_rlc_exit_safe_mode(adev);
 }
 
 static void gfx_v9_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev,
@@ -3821,7 +3691,7 @@ static void gfx_v9_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev
 {
 	uint32_t def, data;
 
-	adev->gfx.rlc.funcs->enter_safe_mode(adev);
+	amdgpu_gfx_rlc_enter_safe_mode(adev);
 
 	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) {
 		def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
@@ -3861,7 +3731,7 @@ static void gfx_v9_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev
 			WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, data);
 	}
 
-	adev->gfx.rlc.funcs->exit_safe_mode(adev);
+	amdgpu_gfx_rlc_exit_safe_mode(adev);
 }
 
 static int gfx_v9_0_update_gfx_clock_gating(struct amdgpu_device *adev,
@@ -3890,8 +3760,17 @@ static int gfx_v9_0_update_gfx_clock_gating(struct amdgpu_device *adev,
 }
 
 static const struct amdgpu_rlc_funcs gfx_v9_0_rlc_funcs = {
-	.enter_safe_mode = gfx_v9_0_enter_rlc_safe_mode,
-	.exit_safe_mode = gfx_v9_0_exit_rlc_safe_mode
+	.is_rlc_enabled = gfx_v9_0_is_rlc_enabled,
+	.set_safe_mode = gfx_v9_0_set_safe_mode,
+	.unset_safe_mode = gfx_v9_0_unset_safe_mode,
+	.init = gfx_v9_0_rlc_init,
+	.get_csb_size = gfx_v9_0_get_csb_size,
+	.get_csb_buffer = gfx_v9_0_get_csb_buffer,
+	.get_cp_table_num = gfx_v9_0_cp_jump_table_num,
+	.resume = gfx_v9_0_rlc_resume,
+	.stop = gfx_v9_0_rlc_stop,
+	.reset = gfx_v9_0_rlc_reset,
+	.start = gfx_v9_0_rlc_start
 };
 
 static int gfx_v9_0_set_powergating_state(void *handle,
@@ -4072,9 +3951,11 @@ static void gfx_v9_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
 }
 
 static void gfx_v9_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
-                                      struct amdgpu_ib *ib,
-                                      unsigned vmid, bool ctx_switch)
+					struct amdgpu_job *job,
+					struct amdgpu_ib *ib,
+					bool ctx_switch)
 {
+	unsigned vmid = AMDGPU_JOB_GET_VMID(job);
 	u32 header, control = 0;
 
 	if (ib->flags & AMDGPU_IB_FLAG_CE)
@@ -4103,20 +3984,22 @@ static void gfx_v9_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
 }
 
 static void gfx_v9_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
-                                          struct amdgpu_ib *ib,
-                                          unsigned vmid, bool ctx_switch)
+					  struct amdgpu_job *job,
+					  struct amdgpu_ib *ib,
+					  bool ctx_switch)
 {
-        u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24);
+	unsigned vmid = AMDGPU_JOB_GET_VMID(job);
+	u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24);
 
-        amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
+	amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
 	BUG_ON(ib->gpu_addr & 0x3); /* Dword align */
-        amdgpu_ring_write(ring,
+	amdgpu_ring_write(ring,
 #ifdef __BIG_ENDIAN
-                                (2 << 0) |
+				(2 << 0) |
 #endif
-                                lower_32_bits(ib->gpu_addr));
-        amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
-        amdgpu_ring_write(ring, control);
+				lower_32_bits(ib->gpu_addr));
+	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
+	amdgpu_ring_write(ring, control);
 }
 
 static void gfx_v9_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
@@ -4695,12 +4578,39 @@ static int gfx_v9_0_eop_irq(struct amdgpu_device *adev,
 	return 0;
 }
 
+static void gfx_v9_0_fault(struct amdgpu_device *adev,
+			   struct amdgpu_iv_entry *entry)
+{
+	u8 me_id, pipe_id, queue_id;
+	struct amdgpu_ring *ring;
+	int i;
+
+	me_id = (entry->ring_id & 0x0c) >> 2;
+	pipe_id = (entry->ring_id & 0x03) >> 0;
+	queue_id = (entry->ring_id & 0x70) >> 4;
+
+	switch (me_id) {
+	case 0:
+		drm_sched_fault(&adev->gfx.gfx_ring[0].sched);
+		break;
+	case 1:
+	case 2:
+		for (i = 0; i < adev->gfx.num_compute_rings; i++) {
+			ring = &adev->gfx.compute_ring[i];
+			if (ring->me == me_id && ring->pipe == pipe_id &&
+			    ring->queue == queue_id)
+				drm_sched_fault(&ring->sched);
+		}
+		break;
+	}
+}
+
 static int gfx_v9_0_priv_reg_irq(struct amdgpu_device *adev,
 				 struct amdgpu_irq_src *source,
 				 struct amdgpu_iv_entry *entry)
 {
 	DRM_ERROR("Illegal register access in command stream\n");
-	schedule_work(&adev->reset_work);
+	gfx_v9_0_fault(adev, entry);
 	return 0;
 }
 
@@ -4709,7 +4619,7 @@ static int gfx_v9_0_priv_inst_irq(struct amdgpu_device *adev,
 				  struct amdgpu_iv_entry *entry)
 {
 	DRM_ERROR("Illegal instruction in command stream\n");
-	schedule_work(&adev->reset_work);
+	gfx_v9_0_fault(adev, entry);
 	return 0;
 }
 
@@ -4836,10 +4746,8 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_kiq = {
 		2 + /* gfx_v9_0_ring_emit_vm_flush */
 		8 + 8 + 8, /* gfx_v9_0_ring_emit_fence_kiq x3 for user fence, vm fence */
 	.emit_ib_size =	4, /* gfx_v9_0_ring_emit_ib_compute */
-	.emit_ib = gfx_v9_0_ring_emit_ib_compute,
 	.emit_fence = gfx_v9_0_ring_emit_fence_kiq,
 	.test_ring = gfx_v9_0_ring_test_ring,
-	.test_ib = gfx_v9_0_ring_test_ib,
 	.insert_nop = amdgpu_ring_insert_nop,
 	.pad_ib = amdgpu_ring_generic_pad_ib,
 	.emit_rreg = gfx_v9_0_ring_emit_rreg,
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
index bfa317a..f5edddf 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
@@ -35,20 +35,25 @@ u64 gfxhub_v1_0_get_mc_fb_offset(struct amdgpu_device *adev)
 	return (u64)RREG32_SOC15(GC, 0, mmMC_VM_FB_OFFSET) << 24;
 }
 
-static void gfxhub_v1_0_init_gart_pt_regs(struct amdgpu_device *adev)
+void gfxhub_v1_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
+				uint64_t page_table_base)
 {
-	uint64_t value = amdgpu_gmc_pd_addr(adev->gart.bo);
+	/* two registers distance between mmVM_CONTEXT0_* to mmVM_CONTEXT1_* */
+	int offset = mmVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32
+			- mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32;
 
-	WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
-		     lower_32_bits(value));
+	WREG32_SOC15_OFFSET(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
+				offset * vmid, lower_32_bits(page_table_base));
 
-	WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
-		     upper_32_bits(value));
+	WREG32_SOC15_OFFSET(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
+				offset * vmid, upper_32_bits(page_table_base));
 }
 
 static void gfxhub_v1_0_init_gart_aperture_regs(struct amdgpu_device *adev)
 {
-	gfxhub_v1_0_init_gart_pt_regs(adev);
+	uint64_t pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
+
+	gfxhub_v1_0_setup_vm_pt_regs(adev, 0, pt_base);
 
 	WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
 		     (u32)(adev->gmc.gart_start >> 12));
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.h b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.h
index 206e29c..92d3a70 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.h
@@ -30,5 +30,7 @@ void gfxhub_v1_0_set_fault_enable_default(struct amdgpu_device *adev,
 					  bool value);
 void gfxhub_v1_0_init(struct amdgpu_device *adev);
 u64 gfxhub_v1_0_get_mc_fb_offset(struct amdgpu_device *adev);
+void gfxhub_v1_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
+				uint64_t page_table_base);
 
 #endif
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
index e1c2b4e..2821d1d 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
@@ -358,7 +358,8 @@ static int gmc_v6_0_mc_init(struct amdgpu_device *adev)
 	return 0;
 }
 
-static void gmc_v6_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid)
+static void gmc_v6_0_flush_gpu_tlb(struct amdgpu_device *adev,
+				uint32_t vmid, uint32_t flush_type)
 {
 	WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
 }
@@ -580,7 +581,7 @@ static int gmc_v6_0_gart_enable(struct amdgpu_device *adev)
 	else
 		gmc_v6_0_set_fault_enable_default(adev, true);
 
-	gmc_v6_0_flush_gpu_tlb(adev, 0);
+	gmc_v6_0_flush_gpu_tlb(adev, 0, 0);
 	dev_info(adev->dev, "PCIE GART of %uM enabled (table at 0x%016llX).\n",
 		 (unsigned)(adev->gmc.gart_size >> 20),
 		 (unsigned long long)table_addr);
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index 910c4ce..761dcfb 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -430,7 +430,8 @@ static int gmc_v7_0_mc_init(struct amdgpu_device *adev)
  *
  * Flush the TLB for the requested page table (CIK).
  */
-static void gmc_v7_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid)
+static void gmc_v7_0_flush_gpu_tlb(struct amdgpu_device *adev,
+				uint32_t vmid, uint32_t flush_type)
 {
 	/* bits 0-15 are the VM contexts0-15 */
 	WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
@@ -698,7 +699,7 @@ static int gmc_v7_0_gart_enable(struct amdgpu_device *adev)
 		WREG32(mmCHUB_CONTROL, tmp);
 	}
 
-	gmc_v7_0_flush_gpu_tlb(adev, 0);
+	gmc_v7_0_flush_gpu_tlb(adev, 0, 0);
 	DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
 		 (unsigned)(adev->gmc.gart_size >> 20),
 		 (unsigned long long)table_addr);
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index 1d3265c..531aaf3 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -611,7 +611,7 @@ static int gmc_v8_0_mc_init(struct amdgpu_device *adev)
  * Flush the TLB for the requested page table (CIK).
  */
 static void gmc_v8_0_flush_gpu_tlb(struct amdgpu_device *adev,
-					uint32_t vmid)
+				uint32_t vmid, uint32_t flush_type)
 {
 	/* bits 0-15 are the VM contexts0-15 */
 	WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
@@ -920,7 +920,7 @@ static int gmc_v8_0_gart_enable(struct amdgpu_device *adev)
 	else
 		gmc_v8_0_set_fault_enable_default(adev, true);
 
-	gmc_v8_0_flush_gpu_tlb(adev, 0);
+	gmc_v8_0_flush_gpu_tlb(adev, 0, 0);
 	DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
 		 (unsigned)(adev->gmc.gart_size >> 20),
 		 (unsigned long long)table_addr);
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index f35d7a5..811231e 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -293,14 +293,14 @@ static void gmc_v9_0_set_irq_funcs(struct amdgpu_device *adev)
 	adev->gmc.vm_fault.funcs = &gmc_v9_0_irq_funcs;
 }
 
-static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vmid)
+static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vmid,
+					uint32_t flush_type)
 {
 	u32 req = 0;
 
-	/* invalidate using legacy mode on vmid*/
 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
 			    PER_VMID_INVALIDATE_REQ, 1 << vmid);
-	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, 0);
+	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, flush_type);
 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1);
 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1);
 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE1, 1);
@@ -312,48 +312,6 @@ static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vmid)
 	return req;
 }
 
-static signed long  amdgpu_kiq_reg_write_reg_wait(struct amdgpu_device *adev,
-						  uint32_t reg0, uint32_t reg1,
-						  uint32_t ref, uint32_t mask)
-{
-	signed long r, cnt = 0;
-	unsigned long flags;
-	uint32_t seq;
-	struct amdgpu_kiq *kiq = &adev->gfx.kiq;
-	struct amdgpu_ring *ring = &kiq->ring;
-
-	spin_lock_irqsave(&kiq->ring_lock, flags);
-
-	amdgpu_ring_alloc(ring, 32);
-	amdgpu_ring_emit_reg_write_reg_wait(ring, reg0, reg1,
-					    ref, mask);
-	amdgpu_fence_emit_polling(ring, &seq);
-	amdgpu_ring_commit(ring);
-	spin_unlock_irqrestore(&kiq->ring_lock, flags);
-
-	r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
-
-	/* don't wait anymore for IRQ context */
-	if (r < 1 && in_interrupt())
-		goto failed_kiq;
-
-	might_sleep();
-
-	while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
-		msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
-		r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
-	}
-
-	if (cnt > MAX_KIQ_REG_TRY)
-		goto failed_kiq;
-
-	return 0;
-
-failed_kiq:
-	pr_err("failed to invalidate tlb with kiq\n");
-	return r;
-}
-
 /*
  * GART
  * VMID 0 is the physical GPU addresses as used by the kernel.
@@ -362,64 +320,47 @@ static signed long  amdgpu_kiq_reg_write_reg_wait(struct amdgpu_device *adev,
  */
 
 /**
- * gmc_v9_0_flush_gpu_tlb - gart tlb flush callback
+ * gmc_v9_0_flush_gpu_tlb - tlb flush with certain type
  *
  * @adev: amdgpu_device pointer
  * @vmid: vm instance to flush
+ * @flush_type: the flush type
  *
- * Flush the TLB for the requested page table.
+ * Flush the TLB for the requested page table using certain type.
  */
 static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev,
-					uint32_t vmid)
+				uint32_t vmid, uint32_t flush_type)
 {
-	/* Use register 17 for GART */
 	const unsigned eng = 17;
 	unsigned i, j;
-	int r;
 
 	for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
 		struct amdgpu_vmhub *hub = &adev->vmhub[i];
-		u32 tmp = gmc_v9_0_get_invalidate_req(vmid);
+		u32 tmp = gmc_v9_0_get_invalidate_req(vmid, flush_type);
 
-		if (adev->gfx.kiq.ring.ready &&
-		    (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev)) &&
-		    !adev->in_gpu_reset) {
-			r = amdgpu_kiq_reg_write_reg_wait(adev, hub->vm_inv_eng0_req + eng,
-				hub->vm_inv_eng0_ack + eng, tmp, 1 << vmid);
-			if (!r)
-				continue;
+		if (i == AMDGPU_GFXHUB && !adev->in_gpu_reset &&
+		    adev->gfx.kiq.ring.sched.ready &&
+		    (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev))) {
+			uint32_t req = hub->vm_inv_eng0_req + eng;
+			uint32_t ack = hub->vm_inv_eng0_ack + eng;
+
+			amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, tmp,
+							   1 << vmid);
+			continue;
 		}
 
 		spin_lock(&adev->gmc.invalidate_lock);
-
 		WREG32_NO_KIQ(hub->vm_inv_eng0_req + eng, tmp);
-
-		/* Busy wait for ACK.*/
-		for (j = 0; j < 100; j++) {
-			tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_ack + eng);
-			tmp &= 1 << vmid;
-			if (tmp)
-				break;
-			cpu_relax();
-		}
-		if (j < 100) {
-			spin_unlock(&adev->gmc.invalidate_lock);
-			continue;
-		}
-
-		/* Wait for ACK with a delay.*/
 		for (j = 0; j < adev->usec_timeout; j++) {
 			tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_ack + eng);
-			tmp &= 1 << vmid;
-			if (tmp)
+			if (tmp & (1 << vmid))
 				break;
 			udelay(1);
 		}
-		if (j < adev->usec_timeout) {
-			spin_unlock(&adev->gmc.invalidate_lock);
-			continue;
-		}
 		spin_unlock(&adev->gmc.invalidate_lock);
+		if (j < adev->usec_timeout)
+			continue;
+
 		DRM_ERROR("Timeout waiting for VM flush ACK!\n");
 	}
 }
@@ -429,7 +370,7 @@ static uint64_t gmc_v9_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
 {
 	struct amdgpu_device *adev = ring->adev;
 	struct amdgpu_vmhub *hub = &adev->vmhub[ring->funcs->vmhub];
-	uint32_t req = gmc_v9_0_get_invalidate_req(vmid);
+	uint32_t req = gmc_v9_0_get_invalidate_req(vmid, 0);
 	unsigned eng = ring->vm_inv_eng;
 
 	amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_lo32 + (2 * vmid),
@@ -739,9 +680,8 @@ static int gmc_v9_0_late_init(void *handle)
 		unsigned vmhub = ring->funcs->vmhub;
 
 		ring->vm_inv_eng = vm_inv_eng[vmhub]++;
-		dev_info(adev->dev, "ring %u(%s) uses VM inv eng %u on hub %u\n",
-			 ring->idx, ring->name, ring->vm_inv_eng,
-			 ring->funcs->vmhub);
+		dev_info(adev->dev, "ring %s uses VM inv eng %u on hub %u\n",
+			 ring->name, ring->vm_inv_eng, ring->funcs->vmhub);
 	}
 
 	/* Engine 16 is used for KFD and 17 for GART flushes */
@@ -1122,7 +1062,7 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
 
 	gfxhub_v1_0_set_fault_enable_default(adev, value);
 	mmhub_v1_0_set_fault_enable_default(adev, value);
-	gmc_v9_0_flush_gpu_tlb(adev, 0);
+	gmc_v9_0_flush_gpu_tlb(adev, 0, 0);
 
 	DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
 		 (unsigned)(adev->gmc.gart_size >> 20),
diff --git a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
index d0e478f..0c9a2c0 100644
--- a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
@@ -508,19 +508,19 @@ static int kv_enable_didt(struct amdgpu_device *adev, bool enable)
 	    pi->caps_db_ramping ||
 	    pi->caps_td_ramping ||
 	    pi->caps_tcp_ramping) {
-		adev->gfx.rlc.funcs->enter_safe_mode(adev);
+		amdgpu_gfx_rlc_enter_safe_mode(adev);
 
 		if (enable) {
 			ret = kv_program_pt_config_registers(adev, didt_config_kv);
 			if (ret) {
-				adev->gfx.rlc.funcs->exit_safe_mode(adev);
+				amdgpu_gfx_rlc_exit_safe_mode(adev);
 				return ret;
 			}
 		}
 
 		kv_do_enable_didt(adev, enable);
 
-		adev->gfx.rlc.funcs->exit_safe_mode(adev);
+		amdgpu_gfx_rlc_exit_safe_mode(adev);
 	}
 
 	return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
index a0db67a..d0d966d 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
@@ -52,20 +52,25 @@ u64 mmhub_v1_0_get_fb_location(struct amdgpu_device *adev)
 	return base;
 }
 
-static void mmhub_v1_0_init_gart_pt_regs(struct amdgpu_device *adev)
+void mmhub_v1_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
+				uint64_t page_table_base)
 {
-	uint64_t value = amdgpu_gmc_pd_addr(adev->gart.bo);
+	/* two registers distance between mmVM_CONTEXT0_* to mmVM_CONTEXT1_* */
+	int offset = mmVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32
+			- mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32;
 
-	WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
-		     lower_32_bits(value));
+	WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
+			offset * vmid, lower_32_bits(page_table_base));
 
-	WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
-		     upper_32_bits(value));
+	WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
+			offset * vmid, upper_32_bits(page_table_base));
 }
 
 static void mmhub_v1_0_init_gart_aperture_regs(struct amdgpu_device *adev)
 {
-	mmhub_v1_0_init_gart_pt_regs(adev);
+	uint64_t pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
+
+	mmhub_v1_0_setup_vm_pt_regs(adev, 0, pt_base);
 
 	WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
 		     (u32)(adev->gmc.gart_start >> 12));
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.h b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.h
index bef3d0c..0de0fdf 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.h
@@ -34,5 +34,7 @@ int mmhub_v1_0_set_clockgating(struct amdgpu_device *adev,
 void mmhub_v1_0_get_clockgating(struct amdgpu_device *adev, u32 *flags);
 void mmhub_v1_0_update_power_gating(struct amdgpu_device *adev,
                                 bool enable);
+void mmhub_v1_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
+				uint64_t page_table_base);
 
 #endif
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
index 3f3fac2..e5dd052 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
@@ -34,6 +34,7 @@
 #include "nbio/nbio_7_4_offset.h"
 
 MODULE_FIRMWARE("amdgpu/vega20_sos.bin");
+MODULE_FIRMWARE("amdgpu/vega20_ta.bin");
 
 /* address block */
 #define smnMP1_FIRMWARE_FLAGS		0x3010024
@@ -98,7 +99,8 @@ static int psp_v11_0_init_microcode(struct psp_context *psp)
 	const char *chip_name;
 	char fw_name[30];
 	int err = 0;
-	const struct psp_firmware_header_v1_0 *hdr;
+	const struct psp_firmware_header_v1_0 *sos_hdr;
+	const struct ta_firmware_header_v1_0 *ta_hdr;
 
 	DRM_DEBUG("\n");
 
@@ -119,16 +121,32 @@ static int psp_v11_0_init_microcode(struct psp_context *psp)
 	if (err)
 		goto out;
 
-	hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.sos_fw->data;
-	adev->psp.sos_fw_version = le32_to_cpu(hdr->header.ucode_version);
-	adev->psp.sos_feature_version = le32_to_cpu(hdr->ucode_feature_version);
-	adev->psp.sos_bin_size = le32_to_cpu(hdr->sos_size_bytes);
-	adev->psp.sys_bin_size = le32_to_cpu(hdr->header.ucode_size_bytes) -
-					le32_to_cpu(hdr->sos_size_bytes);
-	adev->psp.sys_start_addr = (uint8_t *)hdr +
-				le32_to_cpu(hdr->header.ucode_array_offset_bytes);
+	sos_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.sos_fw->data;
+	adev->psp.sos_fw_version = le32_to_cpu(sos_hdr->header.ucode_version);
+	adev->psp.sos_feature_version = le32_to_cpu(sos_hdr->ucode_feature_version);
+	adev->psp.sos_bin_size = le32_to_cpu(sos_hdr->sos_size_bytes);
+	adev->psp.sys_bin_size = le32_to_cpu(sos_hdr->header.ucode_size_bytes) -
+					le32_to_cpu(sos_hdr->sos_size_bytes);
+	adev->psp.sys_start_addr = (uint8_t *)sos_hdr +
+				le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes);
 	adev->psp.sos_start_addr = (uint8_t *)adev->psp.sys_start_addr +
-				le32_to_cpu(hdr->sos_offset_bytes);
+				le32_to_cpu(sos_hdr->sos_offset_bytes);
+
+	snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ta.bin", chip_name);
+	err = request_firmware(&adev->psp.ta_fw, fw_name, adev->dev);
+	if (err)
+		goto out;
+
+	err = amdgpu_ucode_validate(adev->psp.ta_fw);
+	if (err)
+		goto out;
+
+	ta_hdr = (const struct ta_firmware_header_v1_0 *)adev->psp.ta_fw->data;
+	adev->psp.ta_xgmi_ucode_version = le32_to_cpu(ta_hdr->ta_xgmi_ucode_version);
+	adev->psp.ta_xgmi_ucode_size = le32_to_cpu(ta_hdr->ta_xgmi_size_bytes);
+	adev->psp.ta_xgmi_start_addr = (uint8_t *)ta_hdr +
+		le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
+
 	return 0;
 out:
 	if (err) {
@@ -167,7 +185,7 @@ static int psp_v11_0_bootloader_load_sysdrv(struct psp_context *psp)
 	/* Copy PSP System Driver binary to memory */
 	memcpy(psp->fw_pri_buf, psp->sys_start_addr, psp->sys_bin_size);
 
-	/* Provide the sys driver to bootrom */
+	/* Provide the sys driver to bootloader */
 	WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36,
 	       (uint32_t)(psp->fw_pri_mc_addr >> 20));
 	psp_gfxdrv_command_reg = 1 << 16;
@@ -208,7 +226,7 @@ static int psp_v11_0_bootloader_load_sos(struct psp_context *psp)
 	/* Copy Secure OS binary to PSP memory */
 	memcpy(psp->fw_pri_buf, psp->sos_start_addr, psp->sos_bin_size);
 
-	/* Provide the PSP secure OS to bootrom */
+	/* Provide the PSP secure OS to bootloader */
 	WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36,
 	       (uint32_t)(psp->fw_pri_mc_addr >> 20));
 	psp_gfxdrv_command_reg = 2 << 16;
@@ -552,24 +570,110 @@ static int psp_v11_0_mode1_reset(struct psp_context *psp)
 static int psp_v11_0_xgmi_get_topology_info(struct psp_context *psp,
 	int number_devices, struct psp_xgmi_topology_info *topology)
 {
+	struct ta_xgmi_shared_memory *xgmi_cmd;
+	struct ta_xgmi_cmd_get_topology_info_input *topology_info_input;
+	struct ta_xgmi_cmd_get_topology_info_output *topology_info_output;
+	int i;
+	int ret;
+
+	if (!topology || topology->num_nodes > TA_XGMI__MAX_CONNECTED_NODES)
+		return -EINVAL;
+
+	xgmi_cmd = (struct ta_xgmi_shared_memory*)psp->xgmi_context.xgmi_shared_buf;
+	memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
+
+	/* Fill in the shared memory with topology information as input */
+	topology_info_input = &xgmi_cmd->xgmi_in_message.get_topology_info;
+	xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_GET_TOPOLOGY_INFO;
+	topology_info_input->num_nodes = number_devices;
+
+	for (i = 0; i < topology_info_input->num_nodes; i++) {
+		topology_info_input->nodes[i].node_id = topology->nodes[i].node_id;
+		topology_info_input->nodes[i].num_hops = topology->nodes[i].num_hops;
+		topology_info_input->nodes[i].is_sharing_enabled = topology->nodes[i].is_sharing_enabled;
+		topology_info_input->nodes[i].sdma_engine = topology->nodes[i].sdma_engine;
+	}
+
+	/* Invoke xgmi ta to get the topology information */
+	ret = psp_xgmi_invoke(psp, TA_COMMAND_XGMI__GET_GET_TOPOLOGY_INFO);
+	if (ret)
+		return ret;
+
+	/* Read the output topology information from the shared memory */
+	topology_info_output = &xgmi_cmd->xgmi_out_message.get_topology_info;
+	topology->num_nodes = xgmi_cmd->xgmi_out_message.get_topology_info.num_nodes;
+	for (i = 0; i < topology->num_nodes; i++) {
+		topology->nodes[i].node_id = topology_info_output->nodes[i].node_id;
+		topology->nodes[i].num_hops = topology_info_output->nodes[i].num_hops;
+		topology->nodes[i].is_sharing_enabled = topology_info_output->nodes[i].is_sharing_enabled;
+		topology->nodes[i].sdma_engine = topology_info_output->nodes[i].sdma_engine;
+	}
+
 	return 0;
 }
 
 static int psp_v11_0_xgmi_set_topology_info(struct psp_context *psp,
 	int number_devices, struct psp_xgmi_topology_info *topology)
 {
-	return 0;
+	struct ta_xgmi_shared_memory *xgmi_cmd;
+	struct ta_xgmi_cmd_get_topology_info_input *topology_info_input;
+	int i;
+
+	if (!topology || topology->num_nodes > TA_XGMI__MAX_CONNECTED_NODES)
+		return -EINVAL;
+
+	xgmi_cmd = (struct ta_xgmi_shared_memory*)psp->xgmi_context.xgmi_shared_buf;
+	memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
+
+	topology_info_input = &xgmi_cmd->xgmi_in_message.get_topology_info;
+	xgmi_cmd->cmd_id = TA_COMMAND_XGMI__SET_TOPOLOGY_INFO;
+	topology_info_input->num_nodes = number_devices;
+
+	for (i = 0; i < topology_info_input->num_nodes; i++) {
+		topology_info_input->nodes[i].node_id = topology->nodes[i].node_id;
+		topology_info_input->nodes[i].num_hops = topology->nodes[i].num_hops;
+		topology_info_input->nodes[i].is_sharing_enabled = topology->nodes[i].is_sharing_enabled;
+		topology_info_input->nodes[i].sdma_engine = topology->nodes[i].sdma_engine;
+	}
+
+	/* Invoke xgmi ta to set topology information */
+	return psp_xgmi_invoke(psp, TA_COMMAND_XGMI__SET_TOPOLOGY_INFO);
 }
 
 static u64 psp_v11_0_xgmi_get_hive_id(struct psp_context *psp)
 {
-	u64 hive_id = 0;
+	struct ta_xgmi_shared_memory *xgmi_cmd;
+	int ret;
 
-	/* Remove me when we can get correct hive_id through PSP */
-	if (psp->adev->gmc.xgmi.num_physical_nodes)
-		hive_id = 0x123456789abcdef;
+	xgmi_cmd = (struct ta_xgmi_shared_memory*)psp->xgmi_context.xgmi_shared_buf;
+	memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
 
-	return hive_id;
+	xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_HIVE_ID;
+
+	/* Invoke xgmi ta to get hive id */
+	ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
+	if (ret)
+		return 0;
+	else
+		return xgmi_cmd->xgmi_out_message.get_hive_id.hive_id;
+}
+
+static u64 psp_v11_0_xgmi_get_node_id(struct psp_context *psp)
+{
+	struct ta_xgmi_shared_memory *xgmi_cmd;
+	int ret;
+
+	xgmi_cmd = (struct ta_xgmi_shared_memory*)psp->xgmi_context.xgmi_shared_buf;
+	memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
+
+	xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_NODE_ID;
+
+	/* Invoke xgmi ta to get the node id */
+	ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
+	if (ret)
+		return 0;
+	else
+		return xgmi_cmd->xgmi_out_message.get_node_id.node_id;
 }
 
 static const struct psp_funcs psp_v11_0_funcs = {
@@ -587,6 +691,7 @@ static const struct psp_funcs psp_v11_0_funcs = {
 	.xgmi_get_topology_info = psp_v11_0_xgmi_get_topology_info,
 	.xgmi_set_topology_info = psp_v11_0_xgmi_set_topology_info,
 	.xgmi_get_hive_id = psp_v11_0_xgmi_get_hive_id,
+	.xgmi_get_node_id = psp_v11_0_xgmi_get_node_id,
 };
 
 void psp_v11_0_set_psp_funcs(struct psp_context *psp)
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
index e1ebf77..9cea0bb 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
@@ -194,7 +194,7 @@ static int psp_v3_1_bootloader_load_sysdrv(struct psp_context *psp)
 	/* Copy PSP System Driver binary to memory */
 	memcpy(psp->fw_pri_buf, psp->sys_start_addr, psp->sys_bin_size);
 
-	/* Provide the sys driver to bootrom */
+	/* Provide the sys driver to bootloader */
 	WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36,
 	       (uint32_t)(psp->fw_pri_mc_addr >> 20));
 	psp_gfxdrv_command_reg = 1 << 16;
@@ -254,7 +254,7 @@ static int psp_v3_1_bootloader_load_sos(struct psp_context *psp)
 	/* Copy Secure OS binary to PSP memory */
 	memcpy(psp->fw_pri_buf, psp->sos_start_addr, psp->sos_bin_size);
 
-	/* Provide the PSP secure OS to bootrom */
+	/* Provide the PSP secure OS to bootloader */
 	WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36,
 	       (uint32_t)(psp->fw_pri_mc_addr >> 20));
 	psp_gfxdrv_command_reg = 2 << 16;
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
index 2d4770e..9f3cb2a 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
@@ -225,7 +225,7 @@ static void sdma_v2_4_ring_set_wptr(struct amdgpu_ring *ring)
 
 static void sdma_v2_4_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
 {
-	struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ring);
+	struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring);
 	int i;
 
 	for (i = 0; i < count; i++)
@@ -245,9 +245,12 @@ static void sdma_v2_4_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
  * Schedule an IB in the DMA ring (VI).
  */
 static void sdma_v2_4_ring_emit_ib(struct amdgpu_ring *ring,
+				   struct amdgpu_job *job,
 				   struct amdgpu_ib *ib,
-				   unsigned vmid, bool ctx_switch)
+				   bool ctx_switch)
 {
+	unsigned vmid = AMDGPU_JOB_GET_VMID(job);
+
 	/* IB packet must end on a 8 DW boundary */
 	sdma_v2_4_ring_insert_nop(ring, (10 - (lower_32_bits(ring->wptr) & 7)) % 8);
 
@@ -349,8 +352,8 @@ static void sdma_v2_4_gfx_stop(struct amdgpu_device *adev)
 		ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0);
 		WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
 	}
-	sdma0->ready = false;
-	sdma1->ready = false;
+	sdma0->sched.ready = false;
+	sdma1->sched.ready = false;
 }
 
 /**
@@ -471,17 +474,15 @@ static int sdma_v2_4_gfx_resume(struct amdgpu_device *adev)
 		/* enable DMA IBs */
 		WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
 
-		ring->ready = true;
+		ring->sched.ready = true;
 	}
 
 	sdma_v2_4_enable(adev, true);
 	for (i = 0; i < adev->sdma.num_instances; i++) {
 		ring = &adev->sdma.instance[i].ring;
-		r = amdgpu_ring_test_ring(ring);
-		if (r) {
-			ring->ready = false;
+		r = amdgpu_ring_test_helper(ring);
+		if (r)
 			return r;
-		}
 
 		if (adev->mman.buffer_funcs_ring == ring)
 			amdgpu_ttm_set_buffer_funcs_status(adev, true);
@@ -550,21 +551,16 @@ static int sdma_v2_4_ring_test_ring(struct amdgpu_ring *ring)
 	u64 gpu_addr;
 
 	r = amdgpu_device_wb_get(adev, &index);
-	if (r) {
-		dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r);
+	if (r)
 		return r;
-	}
 
 	gpu_addr = adev->wb.gpu_addr + (index * 4);
 	tmp = 0xCAFEDEAD;
 	adev->wb.wb[index] = cpu_to_le32(tmp);
 
 	r = amdgpu_ring_alloc(ring, 5);
-	if (r) {
-		DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r);
-		amdgpu_device_wb_free(adev, index);
-		return r;
-	}
+	if (r)
+		goto error_free_wb;
 
 	amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
 			  SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR));
@@ -581,15 +577,11 @@ static int sdma_v2_4_ring_test_ring(struct amdgpu_ring *ring)
 		DRM_UDELAY(1);
 	}
 
-	if (i < adev->usec_timeout) {
-		DRM_DEBUG("ring test on %d succeeded in %d usecs\n", ring->idx, i);
-	} else {
-		DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
-			  ring->idx, tmp);
-		r = -EINVAL;
-	}
-	amdgpu_device_wb_free(adev, index);
+	if (i >= adev->usec_timeout)
+		r = -ETIMEDOUT;
 
+error_free_wb:
+	amdgpu_device_wb_free(adev, index);
 	return r;
 }
 
@@ -612,20 +604,16 @@ static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 	long r;
 
 	r = amdgpu_device_wb_get(adev, &index);
-	if (r) {
-		dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r);
+	if (r)
 		return r;
-	}
 
 	gpu_addr = adev->wb.gpu_addr + (index * 4);
 	tmp = 0xCAFEDEAD;
 	adev->wb.wb[index] = cpu_to_le32(tmp);
 	memset(&ib, 0, sizeof(ib));
 	r = amdgpu_ib_get(adev, NULL, 256, &ib);
-	if (r) {
-		DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
+	if (r)
 		goto err0;
-	}
 
 	ib.ptr[0] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
 		SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR);
@@ -644,21 +632,16 @@ static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 
 	r = dma_fence_wait_timeout(f, false, timeout);
 	if (r == 0) {
-		DRM_ERROR("amdgpu: IB test timed out\n");
 		r = -ETIMEDOUT;
 		goto err1;
 	} else if (r < 0) {
-		DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
 		goto err1;
 	}
 	tmp = le32_to_cpu(adev->wb.wb[index]);
-	if (tmp == 0xDEADBEEF) {
-		DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
+	if (tmp == 0xDEADBEEF)
 		r = 0;
-	} else {
-		DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp);
+	else
 		r = -EINVAL;
-	}
 
 err1:
 	amdgpu_ib_free(adev, &ib, NULL);
@@ -760,7 +743,7 @@ static void sdma_v2_4_vm_set_pte_pde(struct amdgpu_ib *ib, uint64_t pe,
  */
 static void sdma_v2_4_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
 {
-	struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ring);
+	struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring);
 	u32 pad_count;
 	int i;
 
@@ -1105,8 +1088,14 @@ static int sdma_v2_4_process_illegal_inst_irq(struct amdgpu_device *adev,
 					      struct amdgpu_irq_src *source,
 					      struct amdgpu_iv_entry *entry)
 {
+	u8 instance_id, queue_id;
+
 	DRM_ERROR("Illegal instruction in SDMA command stream\n");
-	schedule_work(&adev->reset_work);
+	instance_id = (entry->ring_id & 0x3) >> 0;
+	queue_id = (entry->ring_id & 0xc) >> 2;
+
+	if (instance_id <= 1 && queue_id == 0)
+		drm_sched_fault(&adev->sdma.instance[instance_id].ring.sched);
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
index 6fb3eda..b6a25f9 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
@@ -399,7 +399,7 @@ static void sdma_v3_0_ring_set_wptr(struct amdgpu_ring *ring)
 
 static void sdma_v3_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
 {
-	struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ring);
+	struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring);
 	int i;
 
 	for (i = 0; i < count; i++)
@@ -419,9 +419,12 @@ static void sdma_v3_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
  * Schedule an IB in the DMA ring (VI).
  */
 static void sdma_v3_0_ring_emit_ib(struct amdgpu_ring *ring,
+				   struct amdgpu_job *job,
 				   struct amdgpu_ib *ib,
-				   unsigned vmid, bool ctx_switch)
+				   bool ctx_switch)
 {
+	unsigned vmid = AMDGPU_JOB_GET_VMID(job);
+
 	/* IB packet must end on a 8 DW boundary */
 	sdma_v3_0_ring_insert_nop(ring, (10 - (lower_32_bits(ring->wptr) & 7)) % 8);
 
@@ -523,8 +526,8 @@ static void sdma_v3_0_gfx_stop(struct amdgpu_device *adev)
 		ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0);
 		WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
 	}
-	sdma0->ready = false;
-	sdma1->ready = false;
+	sdma0->sched.ready = false;
+	sdma1->sched.ready = false;
 }
 
 /**
@@ -739,7 +742,7 @@ static int sdma_v3_0_gfx_resume(struct amdgpu_device *adev)
 		/* enable DMA IBs */
 		WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
 
-		ring->ready = true;
+		ring->sched.ready = true;
 	}
 
 	/* unhalt the MEs */
@@ -749,11 +752,9 @@ static int sdma_v3_0_gfx_resume(struct amdgpu_device *adev)
 
 	for (i = 0; i < adev->sdma.num_instances; i++) {
 		ring = &adev->sdma.instance[i].ring;
-		r = amdgpu_ring_test_ring(ring);
-		if (r) {
-			ring->ready = false;
+		r = amdgpu_ring_test_helper(ring);
+		if (r)
 			return r;
-		}
 
 		if (adev->mman.buffer_funcs_ring == ring)
 			amdgpu_ttm_set_buffer_funcs_status(adev, true);
@@ -822,21 +823,16 @@ static int sdma_v3_0_ring_test_ring(struct amdgpu_ring *ring)
 	u64 gpu_addr;
 
 	r = amdgpu_device_wb_get(adev, &index);
-	if (r) {
-		dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r);
+	if (r)
 		return r;
-	}
 
 	gpu_addr = adev->wb.gpu_addr + (index * 4);
 	tmp = 0xCAFEDEAD;
 	adev->wb.wb[index] = cpu_to_le32(tmp);
 
 	r = amdgpu_ring_alloc(ring, 5);
-	if (r) {
-		DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r);
-		amdgpu_device_wb_free(adev, index);
-		return r;
-	}
+	if (r)
+		goto error_free_wb;
 
 	amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
 			  SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR));
@@ -853,15 +849,11 @@ static int sdma_v3_0_ring_test_ring(struct amdgpu_ring *ring)
 		DRM_UDELAY(1);
 	}
 
-	if (i < adev->usec_timeout) {
-		DRM_DEBUG("ring test on %d succeeded in %d usecs\n", ring->idx, i);
-	} else {
-		DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
-			  ring->idx, tmp);
-		r = -EINVAL;
-	}
-	amdgpu_device_wb_free(adev, index);
+	if (i >= adev->usec_timeout)
+		r = -ETIMEDOUT;
 
+error_free_wb:
+	amdgpu_device_wb_free(adev, index);
 	return r;
 }
 
@@ -884,20 +876,16 @@ static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 	long r;
 
 	r = amdgpu_device_wb_get(adev, &index);
-	if (r) {
-		dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r);
+	if (r)
 		return r;
-	}
 
 	gpu_addr = adev->wb.gpu_addr + (index * 4);
 	tmp = 0xCAFEDEAD;
 	adev->wb.wb[index] = cpu_to_le32(tmp);
 	memset(&ib, 0, sizeof(ib));
 	r = amdgpu_ib_get(adev, NULL, 256, &ib);
-	if (r) {
-		DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
+	if (r)
 		goto err0;
-	}
 
 	ib.ptr[0] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
 		SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR);
@@ -916,21 +904,16 @@ static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 
 	r = dma_fence_wait_timeout(f, false, timeout);
 	if (r == 0) {
-		DRM_ERROR("amdgpu: IB test timed out\n");
 		r = -ETIMEDOUT;
 		goto err1;
 	} else if (r < 0) {
-		DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
 		goto err1;
 	}
 	tmp = le32_to_cpu(adev->wb.wb[index]);
-	if (tmp == 0xDEADBEEF) {
-		DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
+	if (tmp == 0xDEADBEEF)
 		r = 0;
-	} else {
-		DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp);
+	else
 		r = -EINVAL;
-	}
 err1:
 	amdgpu_ib_free(adev, &ib, NULL);
 	dma_fence_put(f);
@@ -1031,7 +1014,7 @@ static void sdma_v3_0_vm_set_pte_pde(struct amdgpu_ib *ib, uint64_t pe,
  */
 static void sdma_v3_0_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
 {
-	struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ring);
+	struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring);
 	u32 pad_count;
 	int i;
 
@@ -1440,8 +1423,14 @@ static int sdma_v3_0_process_illegal_inst_irq(struct amdgpu_device *adev,
 					      struct amdgpu_irq_src *source,
 					      struct amdgpu_iv_entry *entry)
 {
+	u8 instance_id, queue_id;
+
 	DRM_ERROR("Illegal instruction in SDMA command stream\n");
-	schedule_work(&adev->reset_work);
+	instance_id = (entry->ring_id & 0x3) >> 0;
+	queue_id = (entry->ring_id & 0xc) >> 2;
+
+	if (instance_id <= 1 && queue_id == 0)
+		drm_sched_fault(&adev->sdma.instance[instance_id].ring.sched);
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index 7a8c917..f4490cd 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -54,6 +54,11 @@ MODULE_FIRMWARE("amdgpu/raven2_sdma.bin");
 #define SDMA0_POWER_CNTL__ON_OFF_CONDITION_HOLD_TIME_MASK  0x000000F8L
 #define SDMA0_POWER_CNTL__ON_OFF_STATUS_DURATION_TIME_MASK 0xFC000000L
 
+#define WREG32_SDMA(instance, offset, value) \
+	WREG32(sdma_v4_0_get_reg_offset(adev, (instance), (offset)), value)
+#define RREG32_SDMA(instance, offset) \
+	RREG32(sdma_v4_0_get_reg_offset(adev, (instance), (offset)))
+
 static void sdma_v4_0_set_ring_funcs(struct amdgpu_device *adev);
 static void sdma_v4_0_set_buffer_funcs(struct amdgpu_device *adev);
 static void sdma_v4_0_set_vm_pte_funcs(struct amdgpu_device *adev);
@@ -367,16 +372,11 @@ static uint64_t sdma_v4_0_ring_get_wptr(struct amdgpu_ring *ring)
 		wptr = READ_ONCE(*((u64 *)&adev->wb.wb[ring->wptr_offs]));
 		DRM_DEBUG("wptr/doorbell before shift == 0x%016llx\n", wptr);
 	} else {
-		u32 lowbit, highbit;
-
-		lowbit = RREG32(sdma_v4_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR)) >> 2;
-		highbit = RREG32(sdma_v4_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR_HI)) >> 2;
-
-		DRM_DEBUG("wptr [%i]high== 0x%08x low==0x%08x\n",
-				ring->me, highbit, lowbit);
-		wptr = highbit;
+		wptr = RREG32_SDMA(ring->me, mmSDMA0_GFX_RB_WPTR_HI);
 		wptr = wptr << 32;
-		wptr |= lowbit;
+		wptr |= RREG32_SDMA(ring->me, mmSDMA0_GFX_RB_WPTR);
+		DRM_DEBUG("wptr before shift [%i] wptr == 0x%016llx\n",
+				ring->me, wptr);
 	}
 
 	return wptr >> 2;
@@ -417,14 +417,67 @@ static void sdma_v4_0_ring_set_wptr(struct amdgpu_ring *ring)
 				lower_32_bits(ring->wptr << 2),
 				ring->me,
 				upper_32_bits(ring->wptr << 2));
-		WREG32(sdma_v4_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR), lower_32_bits(ring->wptr << 2));
-		WREG32(sdma_v4_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR_HI), upper_32_bits(ring->wptr << 2));
+		WREG32_SDMA(ring->me, mmSDMA0_GFX_RB_WPTR,
+			    lower_32_bits(ring->wptr << 2));
+		WREG32_SDMA(ring->me, mmSDMA0_GFX_RB_WPTR_HI,
+			    upper_32_bits(ring->wptr << 2));
+	}
+}
+
+/**
+ * sdma_v4_0_page_ring_get_wptr - get the current write pointer
+ *
+ * @ring: amdgpu ring pointer
+ *
+ * Get the current wptr from the hardware (VEGA10+).
+ */
+static uint64_t sdma_v4_0_page_ring_get_wptr(struct amdgpu_ring *ring)
+{
+	struct amdgpu_device *adev = ring->adev;
+	u64 wptr;
+
+	if (ring->use_doorbell) {
+		/* XXX check if swapping is necessary on BE */
+		wptr = READ_ONCE(*((u64 *)&adev->wb.wb[ring->wptr_offs]));
+	} else {
+		wptr = RREG32_SDMA(ring->me, mmSDMA0_PAGE_RB_WPTR_HI);
+		wptr = wptr << 32;
+		wptr |= RREG32_SDMA(ring->me, mmSDMA0_PAGE_RB_WPTR);
+	}
+
+	return wptr >> 2;
+}
+
+/**
+ * sdma_v4_0_ring_set_wptr - commit the write pointer
+ *
+ * @ring: amdgpu ring pointer
+ *
+ * Write the wptr back to the hardware (VEGA10+).
+ */
+static void sdma_v4_0_page_ring_set_wptr(struct amdgpu_ring *ring)
+{
+	struct amdgpu_device *adev = ring->adev;
+
+	if (ring->use_doorbell) {
+		u64 *wb = (u64 *)&adev->wb.wb[ring->wptr_offs];
+
+		/* XXX check if swapping is necessary on BE */
+		WRITE_ONCE(*wb, (ring->wptr << 2));
+		WDOORBELL64(ring->doorbell_index, ring->wptr << 2);
+	} else {
+		uint64_t wptr = ring->wptr << 2;
+
+		WREG32_SDMA(ring->me, mmSDMA0_PAGE_RB_WPTR,
+			    lower_32_bits(wptr));
+		WREG32_SDMA(ring->me, mmSDMA0_PAGE_RB_WPTR_HI,
+			    upper_32_bits(wptr));
 	}
 }
 
 static void sdma_v4_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
 {
-	struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ring);
+	struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring);
 	int i;
 
 	for (i = 0; i < count; i++)
@@ -444,9 +497,12 @@ static void sdma_v4_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
  * Schedule an IB in the DMA ring (VEGA10).
  */
 static void sdma_v4_0_ring_emit_ib(struct amdgpu_ring *ring,
-					struct amdgpu_ib *ib,
-					unsigned vmid, bool ctx_switch)
+				   struct amdgpu_job *job,
+				   struct amdgpu_ib *ib,
+				   bool ctx_switch)
 {
+	unsigned vmid = AMDGPU_JOB_GET_VMID(job);
+
 	/* IB packet must end on a 8 DW boundary */
 	sdma_v4_0_ring_insert_nop(ring, (10 - (lower_32_bits(ring->wptr) & 7)) % 8);
 
@@ -568,16 +624,16 @@ static void sdma_v4_0_gfx_stop(struct amdgpu_device *adev)
 			amdgpu_ttm_set_buffer_funcs_status(adev, false);
 
 	for (i = 0; i < adev->sdma.num_instances; i++) {
-		rb_cntl = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL));
+		rb_cntl = RREG32_SDMA(i, mmSDMA0_GFX_RB_CNTL);
 		rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0);
-		WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl);
-		ib_cntl = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL));
+		WREG32_SDMA(i, mmSDMA0_GFX_RB_CNTL, rb_cntl);
+		ib_cntl = RREG32_SDMA(i, mmSDMA0_GFX_IB_CNTL);
 		ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0);
-		WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL), ib_cntl);
+		WREG32_SDMA(i, mmSDMA0_GFX_IB_CNTL, ib_cntl);
 	}
 
-	sdma0->ready = false;
-	sdma1->ready = false;
+	sdma0->sched.ready = false;
+	sdma1->sched.ready = false;
 }
 
 /**
@@ -593,6 +649,39 @@ static void sdma_v4_0_rlc_stop(struct amdgpu_device *adev)
 }
 
 /**
+ * sdma_v4_0_page_stop - stop the page async dma engines
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Stop the page async dma ring buffers (VEGA10).
+ */
+static void sdma_v4_0_page_stop(struct amdgpu_device *adev)
+{
+	struct amdgpu_ring *sdma0 = &adev->sdma.instance[0].page;
+	struct amdgpu_ring *sdma1 = &adev->sdma.instance[1].page;
+	u32 rb_cntl, ib_cntl;
+	int i;
+
+	if ((adev->mman.buffer_funcs_ring == sdma0) ||
+	    (adev->mman.buffer_funcs_ring == sdma1))
+		amdgpu_ttm_set_buffer_funcs_status(adev, false);
+
+	for (i = 0; i < adev->sdma.num_instances; i++) {
+		rb_cntl = RREG32_SDMA(i, mmSDMA0_PAGE_RB_CNTL);
+		rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_PAGE_RB_CNTL,
+					RB_ENABLE, 0);
+		WREG32_SDMA(i, mmSDMA0_PAGE_RB_CNTL, rb_cntl);
+		ib_cntl = RREG32_SDMA(i, mmSDMA0_PAGE_IB_CNTL);
+		ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_PAGE_IB_CNTL,
+					IB_ENABLE, 0);
+		WREG32_SDMA(i, mmSDMA0_PAGE_IB_CNTL, ib_cntl);
+	}
+
+	sdma0->sched.ready = false;
+	sdma1->sched.ready = false;
+}
+
+/**
  * sdma_v_0_ctx_switch_enable - stop the async dma engines context switch
  *
  * @adev: amdgpu_device pointer
@@ -630,18 +719,15 @@ static void sdma_v4_0_ctx_switch_enable(struct amdgpu_device *adev, bool enable)
 	}
 
 	for (i = 0; i < adev->sdma.num_instances; i++) {
-		f32_cntl = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_CNTL));
+		f32_cntl = RREG32_SDMA(i, mmSDMA0_CNTL);
 		f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL,
 				AUTO_CTXSW_ENABLE, enable ? 1 : 0);
 		if (enable && amdgpu_sdma_phase_quantum) {
-			WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_PHASE0_QUANTUM),
-			       phase_quantum);
-			WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_PHASE1_QUANTUM),
-			       phase_quantum);
-			WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_PHASE2_QUANTUM),
-			       phase_quantum);
+			WREG32_SDMA(i, mmSDMA0_PHASE0_QUANTUM, phase_quantum);
+			WREG32_SDMA(i, mmSDMA0_PHASE1_QUANTUM, phase_quantum);
+			WREG32_SDMA(i, mmSDMA0_PHASE2_QUANTUM, phase_quantum);
 		}
-		WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_CNTL), f32_cntl);
+		WREG32_SDMA(i, mmSDMA0_CNTL, f32_cntl);
 	}
 
 }
@@ -662,156 +748,217 @@ static void sdma_v4_0_enable(struct amdgpu_device *adev, bool enable)
 	if (enable == false) {
 		sdma_v4_0_gfx_stop(adev);
 		sdma_v4_0_rlc_stop(adev);
+		if (adev->sdma.has_page_queue)
+			sdma_v4_0_page_stop(adev);
 	}
 
 	for (i = 0; i < adev->sdma.num_instances; i++) {
-		f32_cntl = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_F32_CNTL));
+		f32_cntl = RREG32_SDMA(i, mmSDMA0_F32_CNTL);
 		f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, enable ? 0 : 1);
-		WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_F32_CNTL), f32_cntl);
+		WREG32_SDMA(i, mmSDMA0_F32_CNTL, f32_cntl);
 	}
 }
 
 /**
+ * sdma_v4_0_rb_cntl - get parameters for rb_cntl
+ */
+static uint32_t sdma_v4_0_rb_cntl(struct amdgpu_ring *ring, uint32_t rb_cntl)
+{
+	/* Set ring buffer size in dwords */
+	uint32_t rb_bufsz = order_base_2(ring->ring_size / 4);
+
+	rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SIZE, rb_bufsz);
+#ifdef __BIG_ENDIAN
+	rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SWAP_ENABLE, 1);
+	rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL,
+				RPTR_WRITEBACK_SWAP_ENABLE, 1);
+#endif
+	return rb_cntl;
+}
+
+/**
  * sdma_v4_0_gfx_resume - setup and start the async dma engines
  *
  * @adev: amdgpu_device pointer
+ * @i: instance to resume
  *
  * Set up the gfx DMA ring buffers and enable them (VEGA10).
  * Returns 0 for success, error for failure.
  */
-static int sdma_v4_0_gfx_resume(struct amdgpu_device *adev)
+static void sdma_v4_0_gfx_resume(struct amdgpu_device *adev, unsigned int i)
 {
-	struct amdgpu_ring *ring;
+	struct amdgpu_ring *ring = &adev->sdma.instance[i].ring;
 	u32 rb_cntl, ib_cntl, wptr_poll_cntl;
-	u32 rb_bufsz;
 	u32 wb_offset;
 	u32 doorbell;
 	u32 doorbell_offset;
-	u32 temp;
 	u64 wptr_gpu_addr;
-	int i, r;
 
-	for (i = 0; i < adev->sdma.num_instances; i++) {
-		ring = &adev->sdma.instance[i].ring;
-		wb_offset = (ring->rptr_offs * 4);
+	wb_offset = (ring->rptr_offs * 4);
 
-		WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL), 0);
+	rb_cntl = RREG32_SDMA(i, mmSDMA0_GFX_RB_CNTL);
+	rb_cntl = sdma_v4_0_rb_cntl(ring, rb_cntl);
+	WREG32_SDMA(i, mmSDMA0_GFX_RB_CNTL, rb_cntl);
 
-		/* Set ring buffer size in dwords */
-		rb_bufsz = order_base_2(ring->ring_size / 4);
-		rb_cntl = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL));
-		rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SIZE, rb_bufsz);
-#ifdef __BIG_ENDIAN
-		rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SWAP_ENABLE, 1);
-		rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL,
-					RPTR_WRITEBACK_SWAP_ENABLE, 1);
-#endif
-		WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl);
+	/* Initialize the ring buffer's read and write pointers */
+	WREG32_SDMA(i, mmSDMA0_GFX_RB_RPTR, 0);
+	WREG32_SDMA(i, mmSDMA0_GFX_RB_RPTR_HI, 0);
+	WREG32_SDMA(i, mmSDMA0_GFX_RB_WPTR, 0);
+	WREG32_SDMA(i, mmSDMA0_GFX_RB_WPTR_HI, 0);
 
-		/* Initialize the ring buffer's read and write pointers */
-		WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR), 0);
-		WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_HI), 0);
-		WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR), 0);
-		WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_HI), 0);
+	/* set the wb address whether it's enabled or not */
+	WREG32_SDMA(i, mmSDMA0_GFX_RB_RPTR_ADDR_HI,
+	       upper_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFF);
+	WREG32_SDMA(i, mmSDMA0_GFX_RB_RPTR_ADDR_LO,
+	       lower_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC);
 
-		/* set the wb address whether it's enabled or not */
-		WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_ADDR_HI),
-		       upper_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFF);
-		WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_ADDR_LO),
-		       lower_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC);
+	rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL,
+				RPTR_WRITEBACK_ENABLE, 1);
 
-		rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RPTR_WRITEBACK_ENABLE, 1);
+	WREG32_SDMA(i, mmSDMA0_GFX_RB_BASE, ring->gpu_addr >> 8);
+	WREG32_SDMA(i, mmSDMA0_GFX_RB_BASE_HI, ring->gpu_addr >> 40);
 
-		WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_BASE), ring->gpu_addr >> 8);
-		WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_BASE_HI), ring->gpu_addr >> 40);
+	ring->wptr = 0;
 
-		ring->wptr = 0;
+	/* before programing wptr to a less value, need set minor_ptr_update first */
+	WREG32_SDMA(i, mmSDMA0_GFX_MINOR_PTR_UPDATE, 1);
 
-		/* before programing wptr to a less value, need set minor_ptr_update first */
-		WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_MINOR_PTR_UPDATE), 1);
+	doorbell = RREG32_SDMA(i, mmSDMA0_GFX_DOORBELL);
+	doorbell_offset = RREG32_SDMA(i, mmSDMA0_GFX_DOORBELL_OFFSET);
 
-		if (!amdgpu_sriov_vf(adev)) { /* only bare-metal use register write for wptr */
-			WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR), lower_32_bits(ring->wptr) << 2);
-			WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_HI), upper_32_bits(ring->wptr) << 2);
-		}
-
-		doorbell = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL));
-		doorbell_offset = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL_OFFSET));
-
-		if (ring->use_doorbell) {
-			doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE, 1);
-			doorbell_offset = REG_SET_FIELD(doorbell_offset, SDMA0_GFX_DOORBELL_OFFSET,
+	doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE,
+				 ring->use_doorbell);
+	doorbell_offset = REG_SET_FIELD(doorbell_offset,
+					SDMA0_GFX_DOORBELL_OFFSET,
 					OFFSET, ring->doorbell_index);
-		} else {
-			doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE, 0);
-		}
-		WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL), doorbell);
-		WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL_OFFSET), doorbell_offset);
-		adev->nbio_funcs->sdma_doorbell_range(adev, i, ring->use_doorbell,
-						      ring->doorbell_index);
+	WREG32_SDMA(i, mmSDMA0_GFX_DOORBELL, doorbell);
+	WREG32_SDMA(i, mmSDMA0_GFX_DOORBELL_OFFSET, doorbell_offset);
+	adev->nbio_funcs->sdma_doorbell_range(adev, i, ring->use_doorbell,
+					      ring->doorbell_index);
 
-		if (amdgpu_sriov_vf(adev))
-			sdma_v4_0_ring_set_wptr(ring);
+	sdma_v4_0_ring_set_wptr(ring);
 
-		/* set minor_ptr_update to 0 after wptr programed */
-		WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_MINOR_PTR_UPDATE), 0);
+	/* set minor_ptr_update to 0 after wptr programed */
+	WREG32_SDMA(i, mmSDMA0_GFX_MINOR_PTR_UPDATE, 0);
 
-		/* set utc l1 enable flag always to 1 */
-		temp = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_CNTL));
-		temp = REG_SET_FIELD(temp, SDMA0_CNTL, UTC_L1_ENABLE, 1);
-		WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_CNTL), temp);
+	/* setup the wptr shadow polling */
+	wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
+	WREG32_SDMA(i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_LO,
+		    lower_32_bits(wptr_gpu_addr));
+	WREG32_SDMA(i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_HI,
+		    upper_32_bits(wptr_gpu_addr));
+	wptr_poll_cntl = RREG32_SDMA(i, mmSDMA0_GFX_RB_WPTR_POLL_CNTL);
+	wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl,
+				       SDMA0_GFX_RB_WPTR_POLL_CNTL,
+				       F32_POLL_ENABLE, amdgpu_sriov_vf(adev));
+	WREG32_SDMA(i, mmSDMA0_GFX_RB_WPTR_POLL_CNTL, wptr_poll_cntl);
 
-		if (!amdgpu_sriov_vf(adev)) {
-			/* unhalt engine */
-			temp = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_F32_CNTL));
-			temp = REG_SET_FIELD(temp, SDMA0_F32_CNTL, HALT, 0);
-			WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_F32_CNTL), temp);
-		}
+	/* enable DMA RB */
+	rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 1);
+	WREG32_SDMA(i, mmSDMA0_GFX_RB_CNTL, rb_cntl);
 
-		/* setup the wptr shadow polling */
-		wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
-		WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_LO),
-		       lower_32_bits(wptr_gpu_addr));
-		WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_HI),
-		       upper_32_bits(wptr_gpu_addr));
-		wptr_poll_cntl = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_CNTL));
-		if (amdgpu_sriov_vf(adev))
-			wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl, SDMA0_GFX_RB_WPTR_POLL_CNTL, F32_POLL_ENABLE, 1);
-		else
-			wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl, SDMA0_GFX_RB_WPTR_POLL_CNTL, F32_POLL_ENABLE, 0);
-		WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_CNTL), wptr_poll_cntl);
-
-		/* enable DMA RB */
-		rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 1);
-		WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl);
-
-		ib_cntl = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL));
-		ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 1);
+	ib_cntl = RREG32_SDMA(i, mmSDMA0_GFX_IB_CNTL);
+	ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 1);
 #ifdef __BIG_ENDIAN
-		ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_SWAP_ENABLE, 1);
+	ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_SWAP_ENABLE, 1);
 #endif
-		/* enable DMA IBs */
-		WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL), ib_cntl);
+	/* enable DMA IBs */
+	WREG32_SDMA(i, mmSDMA0_GFX_IB_CNTL, ib_cntl);
 
-		ring->ready = true;
+	ring->sched.ready = true;
+}
 
-		if (amdgpu_sriov_vf(adev)) { /* bare-metal sequence doesn't need below to lines */
-			sdma_v4_0_ctx_switch_enable(adev, true);
-			sdma_v4_0_enable(adev, true);
-		}
+/**
+ * sdma_v4_0_page_resume - setup and start the async dma engines
+ *
+ * @adev: amdgpu_device pointer
+ * @i: instance to resume
+ *
+ * Set up the page DMA ring buffers and enable them (VEGA10).
+ * Returns 0 for success, error for failure.
+ */
+static void sdma_v4_0_page_resume(struct amdgpu_device *adev, unsigned int i)
+{
+	struct amdgpu_ring *ring = &adev->sdma.instance[i].page;
+	u32 rb_cntl, ib_cntl, wptr_poll_cntl;
+	u32 wb_offset;
+	u32 doorbell;
+	u32 doorbell_offset;
+	u64 wptr_gpu_addr;
 
-		r = amdgpu_ring_test_ring(ring);
-		if (r) {
-			ring->ready = false;
-			return r;
-		}
+	wb_offset = (ring->rptr_offs * 4);
 
-		if (adev->mman.buffer_funcs_ring == ring)
-			amdgpu_ttm_set_buffer_funcs_status(adev, true);
+	rb_cntl = RREG32_SDMA(i, mmSDMA0_PAGE_RB_CNTL);
+	rb_cntl = sdma_v4_0_rb_cntl(ring, rb_cntl);
+	WREG32_SDMA(i, mmSDMA0_PAGE_RB_CNTL, rb_cntl);
 
-	}
+	/* Initialize the ring buffer's read and write pointers */
+	WREG32_SDMA(i, mmSDMA0_PAGE_RB_RPTR, 0);
+	WREG32_SDMA(i, mmSDMA0_PAGE_RB_RPTR_HI, 0);
+	WREG32_SDMA(i, mmSDMA0_PAGE_RB_WPTR, 0);
+	WREG32_SDMA(i, mmSDMA0_PAGE_RB_WPTR_HI, 0);
 
-	return 0;
+	/* set the wb address whether it's enabled or not */
+	WREG32_SDMA(i, mmSDMA0_PAGE_RB_RPTR_ADDR_HI,
+	       upper_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFF);
+	WREG32_SDMA(i, mmSDMA0_PAGE_RB_RPTR_ADDR_LO,
+	       lower_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC);
+
+	rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_PAGE_RB_CNTL,
+				RPTR_WRITEBACK_ENABLE, 1);
+
+	WREG32_SDMA(i, mmSDMA0_PAGE_RB_BASE, ring->gpu_addr >> 8);
+	WREG32_SDMA(i, mmSDMA0_PAGE_RB_BASE_HI, ring->gpu_addr >> 40);
+
+	ring->wptr = 0;
+
+	/* before programing wptr to a less value, need set minor_ptr_update first */
+	WREG32_SDMA(i, mmSDMA0_PAGE_MINOR_PTR_UPDATE, 1);
+
+	doorbell = RREG32_SDMA(i, mmSDMA0_PAGE_DOORBELL);
+	doorbell_offset = RREG32_SDMA(i, mmSDMA0_PAGE_DOORBELL_OFFSET);
+
+	doorbell = REG_SET_FIELD(doorbell, SDMA0_PAGE_DOORBELL, ENABLE,
+				 ring->use_doorbell);
+	doorbell_offset = REG_SET_FIELD(doorbell_offset,
+					SDMA0_PAGE_DOORBELL_OFFSET,
+					OFFSET, ring->doorbell_index);
+	WREG32_SDMA(i, mmSDMA0_PAGE_DOORBELL, doorbell);
+	WREG32_SDMA(i, mmSDMA0_PAGE_DOORBELL_OFFSET, doorbell_offset);
+	/* TODO: enable doorbell support */
+	/*adev->nbio_funcs->sdma_doorbell_range(adev, i, ring->use_doorbell,
+					      ring->doorbell_index);*/
+
+	sdma_v4_0_ring_set_wptr(ring);
+
+	/* set minor_ptr_update to 0 after wptr programed */
+	WREG32_SDMA(i, mmSDMA0_PAGE_MINOR_PTR_UPDATE, 0);
+
+	/* setup the wptr shadow polling */
+	wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
+	WREG32_SDMA(i, mmSDMA0_PAGE_RB_WPTR_POLL_ADDR_LO,
+		    lower_32_bits(wptr_gpu_addr));
+	WREG32_SDMA(i, mmSDMA0_PAGE_RB_WPTR_POLL_ADDR_HI,
+		    upper_32_bits(wptr_gpu_addr));
+	wptr_poll_cntl = RREG32_SDMA(i, mmSDMA0_PAGE_RB_WPTR_POLL_CNTL);
+	wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl,
+				       SDMA0_PAGE_RB_WPTR_POLL_CNTL,
+				       F32_POLL_ENABLE, amdgpu_sriov_vf(adev));
+	WREG32_SDMA(i, mmSDMA0_PAGE_RB_WPTR_POLL_CNTL, wptr_poll_cntl);
+
+	/* enable DMA RB */
+	rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_PAGE_RB_CNTL, RB_ENABLE, 1);
+	WREG32_SDMA(i, mmSDMA0_PAGE_RB_CNTL, rb_cntl);
+
+	ib_cntl = RREG32_SDMA(i, mmSDMA0_PAGE_IB_CNTL);
+	ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_PAGE_IB_CNTL, IB_ENABLE, 1);
+#ifdef __BIG_ENDIAN
+	ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_PAGE_IB_CNTL, IB_SWAP_ENABLE, 1);
+#endif
+	/* enable DMA IBs */
+	WREG32_SDMA(i, mmSDMA0_PAGE_IB_CNTL, ib_cntl);
+
+	ring->sched.ready = true;
 }
 
 static void
@@ -922,12 +1069,14 @@ static int sdma_v4_0_load_microcode(struct amdgpu_device *adev)
 			(adev->sdma.instance[i].fw->data +
 				le32_to_cpu(hdr->header.ucode_array_offset_bytes));
 
-		WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_UCODE_ADDR), 0);
+		WREG32_SDMA(i, mmSDMA0_UCODE_ADDR, 0);
 
 		for (j = 0; j < fw_size; j++)
-			WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_UCODE_DATA), le32_to_cpup(fw_data++));
+			WREG32_SDMA(i, mmSDMA0_UCODE_DATA,
+				    le32_to_cpup(fw_data++));
 
-		WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_UCODE_ADDR), adev->sdma.instance[i].fw_version);
+		WREG32_SDMA(i, mmSDMA0_UCODE_ADDR,
+			    adev->sdma.instance[i].fw_version);
 	}
 
 	return 0;
@@ -943,33 +1092,78 @@ static int sdma_v4_0_load_microcode(struct amdgpu_device *adev)
  */
 static int sdma_v4_0_start(struct amdgpu_device *adev)
 {
-	int r = 0;
+	struct amdgpu_ring *ring;
+	int i, r;
 
 	if (amdgpu_sriov_vf(adev)) {
 		sdma_v4_0_ctx_switch_enable(adev, false);
 		sdma_v4_0_enable(adev, false);
+	} else {
 
-		/* set RB registers */
-		r = sdma_v4_0_gfx_resume(adev);
-		return r;
+		if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
+			r = sdma_v4_0_load_microcode(adev);
+			if (r)
+				return r;
+		}
+
+		/* unhalt the MEs */
+		sdma_v4_0_enable(adev, true);
+		/* enable sdma ring preemption */
+		sdma_v4_0_ctx_switch_enable(adev, true);
 	}
 
-	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
-		r = sdma_v4_0_load_microcode(adev);
+	/* start the gfx rings and rlc compute queues */
+	for (i = 0; i < adev->sdma.num_instances; i++) {
+		uint32_t temp;
+
+		WREG32_SDMA(i, mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL, 0);
+		sdma_v4_0_gfx_resume(adev, i);
+		if (adev->sdma.has_page_queue)
+			sdma_v4_0_page_resume(adev, i);
+
+		/* set utc l1 enable flag always to 1 */
+		temp = RREG32_SDMA(i, mmSDMA0_CNTL);
+		temp = REG_SET_FIELD(temp, SDMA0_CNTL, UTC_L1_ENABLE, 1);
+		WREG32_SDMA(i, mmSDMA0_CNTL, temp);
+
+		if (!amdgpu_sriov_vf(adev)) {
+			/* unhalt engine */
+			temp = RREG32_SDMA(i, mmSDMA0_F32_CNTL);
+			temp = REG_SET_FIELD(temp, SDMA0_F32_CNTL, HALT, 0);
+			WREG32_SDMA(i, mmSDMA0_F32_CNTL, temp);
+		}
+	}
+
+	if (amdgpu_sriov_vf(adev)) {
+		sdma_v4_0_ctx_switch_enable(adev, true);
+		sdma_v4_0_enable(adev, true);
+	} else {
+		r = sdma_v4_0_rlc_resume(adev);
 		if (r)
 			return r;
 	}
 
-	/* unhalt the MEs */
-	sdma_v4_0_enable(adev, true);
-	/* enable sdma ring preemption */
-	sdma_v4_0_ctx_switch_enable(adev, true);
+	for (i = 0; i < adev->sdma.num_instances; i++) {
+		ring = &adev->sdma.instance[i].ring;
 
-	/* start the gfx rings and rlc compute queues */
-	r = sdma_v4_0_gfx_resume(adev);
-	if (r)
-		return r;
-	r = sdma_v4_0_rlc_resume(adev);
+		r = amdgpu_ring_test_helper(ring);
+		if (r)
+			return r;
+
+		if (adev->sdma.has_page_queue) {
+			struct amdgpu_ring *page = &adev->sdma.instance[i].page;
+
+			r = amdgpu_ring_test_helper(page);
+			if (r)
+				return r;
+
+			if (adev->mman.buffer_funcs_ring == page)
+				amdgpu_ttm_set_buffer_funcs_status(adev, true);
+		}
+
+		if (adev->mman.buffer_funcs_ring == ring)
+			amdgpu_ttm_set_buffer_funcs_status(adev, true);
+	}
 
 	return r;
 }
@@ -993,21 +1187,16 @@ static int sdma_v4_0_ring_test_ring(struct amdgpu_ring *ring)
 	u64 gpu_addr;
 
 	r = amdgpu_device_wb_get(adev, &index);
-	if (r) {
-		dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r);
+	if (r)
 		return r;
-	}
 
 	gpu_addr = adev->wb.gpu_addr + (index * 4);
 	tmp = 0xCAFEDEAD;
 	adev->wb.wb[index] = cpu_to_le32(tmp);
 
 	r = amdgpu_ring_alloc(ring, 5);
-	if (r) {
-		DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r);
-		amdgpu_device_wb_free(adev, index);
-		return r;
-	}
+	if (r)
+		goto error_free_wb;
 
 	amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
 			  SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR));
@@ -1024,15 +1213,11 @@ static int sdma_v4_0_ring_test_ring(struct amdgpu_ring *ring)
 		DRM_UDELAY(1);
 	}
 
-	if (i < adev->usec_timeout) {
-		DRM_DEBUG("ring test on %d succeeded in %d usecs\n", ring->idx, i);
-	} else {
-		DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
-			  ring->idx, tmp);
-		r = -EINVAL;
-	}
-	amdgpu_device_wb_free(adev, index);
+	if (i >= adev->usec_timeout)
+		r = -ETIMEDOUT;
 
+error_free_wb:
+	amdgpu_device_wb_free(adev, index);
 	return r;
 }
 
@@ -1055,20 +1240,16 @@ static int sdma_v4_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 	u64 gpu_addr;
 
 	r = amdgpu_device_wb_get(adev, &index);
-	if (r) {
-		dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r);
+	if (r)
 		return r;
-	}
 
 	gpu_addr = adev->wb.gpu_addr + (index * 4);
 	tmp = 0xCAFEDEAD;
 	adev->wb.wb[index] = cpu_to_le32(tmp);
 	memset(&ib, 0, sizeof(ib));
 	r = amdgpu_ib_get(adev, NULL, 256, &ib);
-	if (r) {
-		DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
+	if (r)
 		goto err0;
-	}
 
 	ib.ptr[0] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
 		SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR);
@@ -1087,21 +1268,17 @@ static int sdma_v4_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 
 	r = dma_fence_wait_timeout(f, false, timeout);
 	if (r == 0) {
-		DRM_ERROR("amdgpu: IB test timed out\n");
 		r = -ETIMEDOUT;
 		goto err1;
 	} else if (r < 0) {
-		DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
 		goto err1;
 	}
 	tmp = le32_to_cpu(adev->wb.wb[index]);
-	if (tmp == 0xDEADBEEF) {
-		DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
+	if (tmp == 0xDEADBEEF)
 		r = 0;
-	} else {
-		DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp);
+	else
 		r = -EINVAL;
-	}
+
 err1:
 	amdgpu_ib_free(adev, &ib, NULL);
 	dma_fence_put(f);
@@ -1206,7 +1383,7 @@ static void sdma_v4_0_vm_set_pte_pde(struct amdgpu_ib *ib,
  */
 static void sdma_v4_0_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
 {
-	struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ring);
+	struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring);
 	u32 pad_count;
 	int i;
 
@@ -1276,10 +1453,18 @@ static int sdma_v4_0_early_init(void *handle)
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-	if (adev->asic_type == CHIP_RAVEN)
+	if (adev->asic_type == CHIP_RAVEN) {
 		adev->sdma.num_instances = 1;
-	else
+		adev->sdma.has_page_queue = false;
+	} else {
 		adev->sdma.num_instances = 2;
+		/* TODO: Page queue breaks driver reload under SRIOV */
+		if ((adev->asic_type == CHIP_VEGA10) && amdgpu_sriov_vf((adev)))
+			adev->sdma.has_page_queue = false;
+		else if (adev->asic_type != CHIP_VEGA20 &&
+				adev->asic_type != CHIP_VEGA12)
+			adev->sdma.has_page_queue = true;
+	}
 
 	sdma_v4_0_set_ring_funcs(adev);
 	sdma_v4_0_set_buffer_funcs(adev);
@@ -1340,6 +1525,21 @@ static int sdma_v4_0_sw_init(void *handle)
 				     AMDGPU_SDMA_IRQ_TRAP1);
 		if (r)
 			return r;
+
+		if (adev->sdma.has_page_queue) {
+			ring = &adev->sdma.instance[i].page;
+			ring->ring_obj = NULL;
+			ring->use_doorbell = false;
+
+			sprintf(ring->name, "page%d", i);
+			r = amdgpu_ring_init(adev, ring, 1024,
+					     &adev->sdma.trap_irq,
+					     (i == 0) ?
+					     AMDGPU_SDMA_IRQ_TRAP0 :
+					     AMDGPU_SDMA_IRQ_TRAP1);
+			if (r)
+				return r;
+		}
 	}
 
 	return r;
@@ -1350,8 +1550,11 @@ static int sdma_v4_0_sw_fini(void *handle)
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 	int i;
 
-	for (i = 0; i < adev->sdma.num_instances; i++)
+	for (i = 0; i < adev->sdma.num_instances; i++) {
 		amdgpu_ring_fini(&adev->sdma.instance[i].ring);
+		if (adev->sdma.has_page_queue)
+			amdgpu_ring_fini(&adev->sdma.instance[i].page);
+	}
 
 	for (i = 0; i < adev->sdma.num_instances; i++) {
 		release_firmware(adev->sdma.instance[i].fw);
@@ -1414,7 +1617,7 @@ static bool sdma_v4_0_is_idle(void *handle)
 	u32 i;
 
 	for (i = 0; i < adev->sdma.num_instances; i++) {
-		u32 tmp = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_STATUS_REG));
+		u32 tmp = RREG32_SDMA(i, mmSDMA0_STATUS_REG);
 
 		if (!(tmp & SDMA0_STATUS_REG__IDLE_MASK))
 			return false;
@@ -1430,8 +1633,8 @@ static int sdma_v4_0_wait_for_idle(void *handle)
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
 	for (i = 0; i < adev->usec_timeout; i++) {
-		sdma0 = RREG32(sdma_v4_0_get_reg_offset(adev, 0, mmSDMA0_STATUS_REG));
-		sdma1 = RREG32(sdma_v4_0_get_reg_offset(adev, 1, mmSDMA0_STATUS_REG));
+		sdma0 = RREG32_SDMA(0, mmSDMA0_STATUS_REG);
+		sdma1 = RREG32_SDMA(1, mmSDMA0_STATUS_REG);
 
 		if (sdma0 & sdma1 & SDMA0_STATUS_REG__IDLE_MASK)
 			return 0;
@@ -1452,16 +1655,13 @@ static int sdma_v4_0_set_trap_irq_state(struct amdgpu_device *adev,
 					unsigned type,
 					enum amdgpu_interrupt_state state)
 {
+	unsigned int instance = (type == AMDGPU_SDMA_IRQ_TRAP0) ? 0 : 1;
 	u32 sdma_cntl;
 
-	u32 reg_offset = (type == AMDGPU_SDMA_IRQ_TRAP0) ?
-		sdma_v4_0_get_reg_offset(adev, 0, mmSDMA0_CNTL) :
-		sdma_v4_0_get_reg_offset(adev, 1, mmSDMA0_CNTL);
-
-	sdma_cntl = RREG32(reg_offset);
+	sdma_cntl = RREG32_SDMA(instance, mmSDMA0_CNTL);
 	sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE,
 		       state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
-	WREG32(reg_offset, sdma_cntl);
+	WREG32_SDMA(instance, mmSDMA0_CNTL, sdma_cntl);
 
 	return 0;
 }
@@ -1470,39 +1670,32 @@ static int sdma_v4_0_process_trap_irq(struct amdgpu_device *adev,
 				      struct amdgpu_irq_src *source,
 				      struct amdgpu_iv_entry *entry)
 {
+	uint32_t instance;
+
 	DRM_DEBUG("IH: SDMA trap\n");
 	switch (entry->client_id) {
 	case SOC15_IH_CLIENTID_SDMA0:
-		switch (entry->ring_id) {
-		case 0:
-			amdgpu_fence_process(&adev->sdma.instance[0].ring);
-			break;
-		case 1:
-			/* XXX compute */
-			break;
-		case 2:
-			/* XXX compute */
-			break;
-		case 3:
-			/* XXX page queue*/
-			break;
-		}
+		instance = 0;
 		break;
 	case SOC15_IH_CLIENTID_SDMA1:
-		switch (entry->ring_id) {
-		case 0:
-			amdgpu_fence_process(&adev->sdma.instance[1].ring);
-			break;
-		case 1:
-			/* XXX compute */
-			break;
-		case 2:
-			/* XXX compute */
-			break;
-		case 3:
-			/* XXX page queue*/
-			break;
-		}
+		instance = 1;
+		break;
+	default:
+		return 0;
+	}
+
+	switch (entry->ring_id) {
+	case 0:
+		amdgpu_fence_process(&adev->sdma.instance[instance].ring);
+		break;
+	case 1:
+		/* XXX compute */
+		break;
+	case 2:
+		/* XXX compute */
+		break;
+	case 3:
+		amdgpu_fence_process(&adev->sdma.instance[instance].page);
 		break;
 	}
 	return 0;
@@ -1512,12 +1705,29 @@ static int sdma_v4_0_process_illegal_inst_irq(struct amdgpu_device *adev,
 					      struct amdgpu_irq_src *source,
 					      struct amdgpu_iv_entry *entry)
 {
+	int instance;
+
 	DRM_ERROR("Illegal instruction in SDMA command stream\n");
-	schedule_work(&adev->reset_work);
+
+	switch (entry->client_id) {
+	case SOC15_IH_CLIENTID_SDMA0:
+		instance = 0;
+		break;
+	case SOC15_IH_CLIENTID_SDMA1:
+		instance = 1;
+		break;
+	default:
+		return 0;
+	}
+
+	switch (entry->ring_id) {
+	case 0:
+		drm_sched_fault(&adev->sdma.instance[instance].ring.sched);
+		break;
+	}
 	return 0;
 }
 
-
 static void sdma_v4_0_update_medium_grain_clock_gating(
 		struct amdgpu_device *adev,
 		bool enable)
@@ -1730,6 +1940,38 @@ static const struct amdgpu_ring_funcs sdma_v4_0_ring_funcs = {
 	.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
 };
 
+static const struct amdgpu_ring_funcs sdma_v4_0_page_ring_funcs = {
+	.type = AMDGPU_RING_TYPE_SDMA,
+	.align_mask = 0xf,
+	.nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP),
+	.support_64bit_ptrs = true,
+	.vmhub = AMDGPU_MMHUB,
+	.get_rptr = sdma_v4_0_ring_get_rptr,
+	.get_wptr = sdma_v4_0_page_ring_get_wptr,
+	.set_wptr = sdma_v4_0_page_ring_set_wptr,
+	.emit_frame_size =
+		6 + /* sdma_v4_0_ring_emit_hdp_flush */
+		3 + /* hdp invalidate */
+		6 + /* sdma_v4_0_ring_emit_pipeline_sync */
+		/* sdma_v4_0_ring_emit_vm_flush */
+		SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
+		SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 6 +
+		10 + 10 + 10, /* sdma_v4_0_ring_emit_fence x3 for user fence, vm fence */
+	.emit_ib_size = 7 + 6, /* sdma_v4_0_ring_emit_ib */
+	.emit_ib = sdma_v4_0_ring_emit_ib,
+	.emit_fence = sdma_v4_0_ring_emit_fence,
+	.emit_pipeline_sync = sdma_v4_0_ring_emit_pipeline_sync,
+	.emit_vm_flush = sdma_v4_0_ring_emit_vm_flush,
+	.emit_hdp_flush = sdma_v4_0_ring_emit_hdp_flush,
+	.test_ring = sdma_v4_0_ring_test_ring,
+	.test_ib = sdma_v4_0_ring_test_ib,
+	.insert_nop = sdma_v4_0_ring_insert_nop,
+	.pad_ib = sdma_v4_0_ring_pad_ib,
+	.emit_wreg = sdma_v4_0_ring_emit_wreg,
+	.emit_reg_wait = sdma_v4_0_ring_emit_reg_wait,
+	.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
+};
+
 static void sdma_v4_0_set_ring_funcs(struct amdgpu_device *adev)
 {
 	int i;
@@ -1737,6 +1979,10 @@ static void sdma_v4_0_set_ring_funcs(struct amdgpu_device *adev)
 	for (i = 0; i < adev->sdma.num_instances; i++) {
 		adev->sdma.instance[i].ring.funcs = &sdma_v4_0_ring_funcs;
 		adev->sdma.instance[i].ring.me = i;
+		if (adev->sdma.has_page_queue) {
+			adev->sdma.instance[i].page.funcs = &sdma_v4_0_page_ring_funcs;
+			adev->sdma.instance[i].page.me = i;
+		}
 	}
 }
 
@@ -1818,7 +2064,10 @@ static const struct amdgpu_buffer_funcs sdma_v4_0_buffer_funcs = {
 static void sdma_v4_0_set_buffer_funcs(struct amdgpu_device *adev)
 {
 	adev->mman.buffer_funcs = &sdma_v4_0_buffer_funcs;
-	adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
+	if (adev->sdma.has_page_queue)
+		adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].page;
+	else
+		adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
 }
 
 static const struct amdgpu_vm_pte_funcs sdma_v4_0_vm_pte_funcs = {
@@ -1836,7 +2085,10 @@ static void sdma_v4_0_set_vm_pte_funcs(struct amdgpu_device *adev)
 
 	adev->vm_manager.vm_pte_funcs = &sdma_v4_0_vm_pte_funcs;
 	for (i = 0; i < adev->sdma.num_instances; i++) {
-		sched = &adev->sdma.instance[i].ring.sched;
+		if (adev->sdma.has_page_queue)
+			sched = &adev->sdma.instance[i].page.sched;
+		else
+			sched = &adev->sdma.instance[i].ring.sched;
 		adev->vm_manager.vm_pte_rqs[i] =
 			&sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL];
 	}
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dma.c b/drivers/gpu/drm/amd/amdgpu/si_dma.c
index adbaea6..b6e4731 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dma.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_dma.c
@@ -61,9 +61,11 @@ static void si_dma_ring_set_wptr(struct amdgpu_ring *ring)
 }
 
 static void si_dma_ring_emit_ib(struct amdgpu_ring *ring,
+				struct amdgpu_job *job,
 				struct amdgpu_ib *ib,
-				unsigned vmid, bool ctx_switch)
+				bool ctx_switch)
 {
+	unsigned vmid = AMDGPU_JOB_GET_VMID(job);
 	/* The indirect buffer packet must end on an 8 DW boundary in the DMA ring.
 	 * Pad as necessary with NOPs.
 	 */
@@ -122,7 +124,7 @@ static void si_dma_stop(struct amdgpu_device *adev)
 
 		if (adev->mman.buffer_funcs_ring == ring)
 			amdgpu_ttm_set_buffer_funcs_status(adev, false);
-		ring->ready = false;
+		ring->sched.ready = false;
 	}
 }
 
@@ -175,13 +177,11 @@ static int si_dma_start(struct amdgpu_device *adev)
 		WREG32(DMA_RB_WPTR + sdma_offsets[i], lower_32_bits(ring->wptr) << 2);
 		WREG32(DMA_RB_CNTL + sdma_offsets[i], rb_cntl | DMA_RB_ENABLE);
 
-		ring->ready = true;
+		ring->sched.ready = true;
 
-		r = amdgpu_ring_test_ring(ring);
-		if (r) {
-			ring->ready = false;
+		r = amdgpu_ring_test_helper(ring);
+		if (r)
 			return r;
-		}
 
 		if (adev->mman.buffer_funcs_ring == ring)
 			amdgpu_ttm_set_buffer_funcs_status(adev, true);
@@ -209,21 +209,16 @@ static int si_dma_ring_test_ring(struct amdgpu_ring *ring)
 	u64 gpu_addr;
 
 	r = amdgpu_device_wb_get(adev, &index);
-	if (r) {
-		dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r);
+	if (r)
 		return r;
-	}
 
 	gpu_addr = adev->wb.gpu_addr + (index * 4);
 	tmp = 0xCAFEDEAD;
 	adev->wb.wb[index] = cpu_to_le32(tmp);
 
 	r = amdgpu_ring_alloc(ring, 4);
-	if (r) {
-		DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r);
-		amdgpu_device_wb_free(adev, index);
-		return r;
-	}
+	if (r)
+		goto error_free_wb;
 
 	amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, 1));
 	amdgpu_ring_write(ring, lower_32_bits(gpu_addr));
@@ -238,15 +233,11 @@ static int si_dma_ring_test_ring(struct amdgpu_ring *ring)
 		DRM_UDELAY(1);
 	}
 
-	if (i < adev->usec_timeout) {
-		DRM_DEBUG("ring test on %d succeeded in %d usecs\n", ring->idx, i);
-	} else {
-		DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
-			  ring->idx, tmp);
-		r = -EINVAL;
-	}
-	amdgpu_device_wb_free(adev, index);
+	if (i >= adev->usec_timeout)
+		r = -ETIMEDOUT;
 
+error_free_wb:
+	amdgpu_device_wb_free(adev, index);
 	return r;
 }
 
@@ -269,20 +260,16 @@ static int si_dma_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 	long r;
 
 	r = amdgpu_device_wb_get(adev, &index);
-	if (r) {
-		dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r);
+	if (r)
 		return r;
-	}
 
 	gpu_addr = adev->wb.gpu_addr + (index * 4);
 	tmp = 0xCAFEDEAD;
 	adev->wb.wb[index] = cpu_to_le32(tmp);
 	memset(&ib, 0, sizeof(ib));
 	r = amdgpu_ib_get(adev, NULL, 256, &ib);
-	if (r) {
-		DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
+	if (r)
 		goto err0;
-	}
 
 	ib.ptr[0] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, 1);
 	ib.ptr[1] = lower_32_bits(gpu_addr);
@@ -295,21 +282,16 @@ static int si_dma_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 
 	r = dma_fence_wait_timeout(f, false, timeout);
 	if (r == 0) {
-		DRM_ERROR("amdgpu: IB test timed out\n");
 		r = -ETIMEDOUT;
 		goto err1;
 	} else if (r < 0) {
-		DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
 		goto err1;
 	}
 	tmp = le32_to_cpu(adev->wb.wb[index]);
-	if (tmp == 0xDEADBEEF) {
-		DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
+	if (tmp == 0xDEADBEEF)
 		r = 0;
-	} else {
-		DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp);
+	else
 		r = -EINVAL;
-	}
 
 err1:
 	amdgpu_ib_free(adev, &ib, NULL);
@@ -658,15 +640,6 @@ static int si_dma_process_trap_irq(struct amdgpu_device *adev,
 	return 0;
 }
 
-static int si_dma_process_illegal_inst_irq(struct amdgpu_device *adev,
-					      struct amdgpu_irq_src *source,
-					      struct amdgpu_iv_entry *entry)
-{
-	DRM_ERROR("Illegal instruction in SDMA command stream\n");
-	schedule_work(&adev->reset_work);
-	return 0;
-}
-
 static int si_dma_set_clockgating_state(void *handle,
 					  enum amd_clockgating_state state)
 {
@@ -781,15 +754,10 @@ static const struct amdgpu_irq_src_funcs si_dma_trap_irq_funcs = {
 	.process = si_dma_process_trap_irq,
 };
 
-static const struct amdgpu_irq_src_funcs si_dma_illegal_inst_irq_funcs = {
-	.process = si_dma_process_illegal_inst_irq,
-};
-
 static void si_dma_set_irq_funcs(struct amdgpu_device *adev)
 {
 	adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_LAST;
 	adev->sdma.trap_irq.funcs = &si_dma_trap_irq_funcs;
-	adev->sdma.illegal_inst_irq.funcs = &si_dma_illegal_inst_irq_funcs;
 }
 
 /**
diff --git a/drivers/gpu/drm/amd/amdgpu/ta_xgmi_if.h b/drivers/gpu/drm/amd/amdgpu/ta_xgmi_if.h
new file mode 100644
index 0000000..ac2c27b
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/ta_xgmi_if.h
@@ -0,0 +1,130 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _TA_XGMI_IF_H
+#define _TA_XGMI_IF_H
+
+/* Responses have bit 31 set */
+#define RSP_ID_MASK (1U << 31)
+#define RSP_ID(cmdId) (((uint32_t)(cmdId)) | RSP_ID_MASK)
+
+enum ta_command_xgmi {
+	TA_COMMAND_XGMI__INITIALIZE			= 0x00,
+	TA_COMMAND_XGMI__GET_NODE_ID			= 0x01,
+	TA_COMMAND_XGMI__GET_HIVE_ID			= 0x02,
+	TA_COMMAND_XGMI__GET_GET_TOPOLOGY_INFO		= 0x03,
+	TA_COMMAND_XGMI__SET_TOPOLOGY_INFO		= 0x04
+};
+
+/* XGMI related enumerations */
+/**********************************************************/;
+enum ta_xgmi_connected_nodes {
+	TA_XGMI__MAX_CONNECTED_NODES			= 64
+};
+
+enum ta_xgmi_status {
+	TA_XGMI_STATUS__SUCCESS				= 0x00,
+	TA_XGMI_STATUS__GENERIC_FAILURE			= 0x01,
+	TA_XGMI_STATUS__NULL_POINTER			= 0x02,
+	TA_XGMI_STATUS__INVALID_PARAMETER		= 0x03,
+	TA_XGMI_STATUS__NOT_INITIALIZED			= 0x04,
+	TA_XGMI_STATUS__INVALID_NODE_NUM		= 0x05,
+	TA_XGMI_STATUS__INVALID_NODE_ID			= 0x06,
+	TA_XGMI_STATUS__INVALID_TOPOLOGY		= 0x07,
+	TA_XGMI_STATUS__FAILED_ID_GEN			= 0x08,
+	TA_XGMI_STATUS__FAILED_TOPOLOGY_INIT		= 0x09,
+	TA_XGMI_STATUS__SET_SHARING_ERROR		= 0x0A
+};
+
+enum ta_xgmi_assigned_sdma_engine {
+	TA_XGMI_ASSIGNED_SDMA_ENGINE__NOT_ASSIGNED	= -1,
+	TA_XGMI_ASSIGNED_SDMA_ENGINE__SDMA0		= 0,
+	TA_XGMI_ASSIGNED_SDMA_ENGINE__SDMA1		= 1,
+	TA_XGMI_ASSIGNED_SDMA_ENGINE__SDMA2		= 2,
+	TA_XGMI_ASSIGNED_SDMA_ENGINE__SDMA3		= 3,
+	TA_XGMI_ASSIGNED_SDMA_ENGINE__SDMA4		= 4,
+	TA_XGMI_ASSIGNED_SDMA_ENGINE__SDMA5		= 5
+};
+
+/* input/output structures for XGMI commands */
+/**********************************************************/
+struct ta_xgmi_node_info {
+	uint64_t				node_id;
+	uint8_t					num_hops;
+	uint8_t					is_sharing_enabled;
+	enum ta_xgmi_assigned_sdma_engine	sdma_engine;
+};
+
+struct ta_xgmi_cmd_initialize_output {
+	uint32_t	status;
+};
+
+struct ta_xgmi_cmd_get_node_id_output {
+	uint64_t	node_id;
+};
+
+struct ta_xgmi_cmd_get_hive_id_output {
+	uint64_t	hive_id;
+};
+
+struct ta_xgmi_cmd_get_topology_info_input {
+	uint32_t			num_nodes;
+	struct ta_xgmi_node_info	nodes[TA_XGMI__MAX_CONNECTED_NODES];
+};
+
+struct ta_xgmi_cmd_get_topology_info_output {
+	uint32_t			num_nodes;
+	struct ta_xgmi_node_info	nodes[TA_XGMI__MAX_CONNECTED_NODES];
+};
+
+struct ta_xgmi_cmd_set_topology_info_input {
+	uint32_t			num_nodes;
+	struct ta_xgmi_node_info	nodes[TA_XGMI__MAX_CONNECTED_NODES];
+};
+
+/**********************************************************/
+/* Common input structure for XGMI callbacks */
+union ta_xgmi_cmd_input {
+	struct ta_xgmi_cmd_get_topology_info_input	get_topology_info;
+	struct ta_xgmi_cmd_set_topology_info_input	set_topology_info;
+};
+
+/* Common output structure for XGMI callbacks */
+union ta_xgmi_cmd_output {
+	struct ta_xgmi_cmd_initialize_output		initialize;
+	struct ta_xgmi_cmd_get_node_id_output		get_node_id;
+	struct ta_xgmi_cmd_get_hive_id_output		get_hive_id;
+	struct ta_xgmi_cmd_get_topology_info_output	get_topology_info;
+};
+/**********************************************************/
+
+struct ta_xgmi_shared_memory {
+	uint32_t			cmd_id;
+	uint32_t			resp_id;
+	enum ta_xgmi_status		xgmi_status;
+	uint32_t			reserved;
+	union ta_xgmi_cmd_input		xgmi_in_message;
+	union ta_xgmi_cmd_output	xgmi_out_message;
+};
+
+#endif   //_TA_XGMI_IF_H
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
index 1fc17bf..90bbcee 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
@@ -162,12 +162,9 @@ static int uvd_v4_2_hw_init(void *handle)
 	uvd_v4_2_enable_mgcg(adev, true);
 	amdgpu_asic_set_uvd_clocks(adev, 10000, 10000);
 
-	ring->ready = true;
-	r = amdgpu_ring_test_ring(ring);
-	if (r) {
-		ring->ready = false;
+	r = amdgpu_ring_test_helper(ring);
+	if (r)
 		goto done;
-	}
 
 	r = amdgpu_ring_alloc(ring, 10);
 	if (r) {
@@ -218,7 +215,7 @@ static int uvd_v4_2_hw_fini(void *handle)
 	if (RREG32(mmUVD_STATUS) != 0)
 		uvd_v4_2_stop(adev);
 
-	ring->ready = false;
+	ring->sched.ready = false;
 
 	return 0;
 }
@@ -484,11 +481,9 @@ static int uvd_v4_2_ring_test_ring(struct amdgpu_ring *ring)
 
 	WREG32(mmUVD_CONTEXT_ID, 0xCAFEDEAD);
 	r = amdgpu_ring_alloc(ring, 3);
-	if (r) {
-		DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n",
-			  ring->idx, r);
+	if (r)
 		return r;
-	}
+
 	amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0));
 	amdgpu_ring_write(ring, 0xDEADBEEF);
 	amdgpu_ring_commit(ring);
@@ -499,14 +494,9 @@ static int uvd_v4_2_ring_test_ring(struct amdgpu_ring *ring)
 		DRM_UDELAY(1);
 	}
 
-	if (i < adev->usec_timeout) {
-		DRM_DEBUG("ring test on %d succeeded in %d usecs\n",
-			 ring->idx, i);
-	} else {
-		DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
-			  ring->idx, tmp);
-		r = -EINVAL;
-	}
+	if (i >= adev->usec_timeout)
+		r = -ETIMEDOUT;
+
 	return r;
 }
 
@@ -519,8 +509,9 @@ static int uvd_v4_2_ring_test_ring(struct amdgpu_ring *ring)
  * Write ring commands to execute the indirect buffer
  */
 static void uvd_v4_2_ring_emit_ib(struct amdgpu_ring *ring,
+				  struct amdgpu_job *job,
 				  struct amdgpu_ib *ib,
-				  unsigned vmid, bool ctx_switch)
+				  bool ctx_switch)
 {
 	amdgpu_ring_write(ring, PACKET0(mmUVD_RBC_IB_BASE, 0));
 	amdgpu_ring_write(ring, ib->gpu_addr);
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
index fde6ad5..1c5e127 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
@@ -158,12 +158,9 @@ static int uvd_v5_0_hw_init(void *handle)
 	uvd_v5_0_set_clockgating_state(adev, AMD_CG_STATE_UNGATE);
 	uvd_v5_0_enable_mgcg(adev, true);
 
-	ring->ready = true;
-	r = amdgpu_ring_test_ring(ring);
-	if (r) {
-		ring->ready = false;
+	r = amdgpu_ring_test_helper(ring);
+	if (r)
 		goto done;
-	}
 
 	r = amdgpu_ring_alloc(ring, 10);
 	if (r) {
@@ -215,7 +212,7 @@ static int uvd_v5_0_hw_fini(void *handle)
 	if (RREG32(mmUVD_STATUS) != 0)
 		uvd_v5_0_stop(adev);
 
-	ring->ready = false;
+	ring->sched.ready = false;
 
 	return 0;
 }
@@ -500,11 +497,8 @@ static int uvd_v5_0_ring_test_ring(struct amdgpu_ring *ring)
 
 	WREG32(mmUVD_CONTEXT_ID, 0xCAFEDEAD);
 	r = amdgpu_ring_alloc(ring, 3);
-	if (r) {
-		DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n",
-			  ring->idx, r);
+	if (r)
 		return r;
-	}
 	amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0));
 	amdgpu_ring_write(ring, 0xDEADBEEF);
 	amdgpu_ring_commit(ring);
@@ -515,14 +509,9 @@ static int uvd_v5_0_ring_test_ring(struct amdgpu_ring *ring)
 		DRM_UDELAY(1);
 	}
 
-	if (i < adev->usec_timeout) {
-		DRM_DEBUG("ring test on %d succeeded in %d usecs\n",
-			 ring->idx, i);
-	} else {
-		DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
-			  ring->idx, tmp);
-		r = -EINVAL;
-	}
+	if (i >= adev->usec_timeout)
+		r = -ETIMEDOUT;
+
 	return r;
 }
 
@@ -535,8 +524,9 @@ static int uvd_v5_0_ring_test_ring(struct amdgpu_ring *ring)
  * Write ring commands to execute the indirect buffer
  */
 static void uvd_v5_0_ring_emit_ib(struct amdgpu_ring *ring,
+				  struct amdgpu_job *job,
 				  struct amdgpu_ib *ib,
-				  unsigned vmid, bool ctx_switch)
+				  bool ctx_switch)
 {
 	amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_LOW, 0));
 	amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
index 7a5b402..f184842 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
@@ -175,11 +175,8 @@ static int uvd_v6_0_enc_ring_test_ring(struct amdgpu_ring *ring)
 	int r;
 
 	r = amdgpu_ring_alloc(ring, 16);
-	if (r) {
-		DRM_ERROR("amdgpu: uvd enc failed to lock ring %d (%d).\n",
-			  ring->idx, r);
+	if (r)
 		return r;
-	}
 	amdgpu_ring_write(ring, HEVC_ENC_CMD_END);
 	amdgpu_ring_commit(ring);
 
@@ -189,14 +186,8 @@ static int uvd_v6_0_enc_ring_test_ring(struct amdgpu_ring *ring)
 		DRM_UDELAY(1);
 	}
 
-	if (i < adev->usec_timeout) {
-		DRM_DEBUG("ring test on %d succeeded in %d usecs\n",
-			 ring->idx, i);
-	} else {
-		DRM_ERROR("amdgpu: ring %d test failed\n",
-			  ring->idx);
+	if (i >= adev->usec_timeout)
 		r = -ETIMEDOUT;
-	}
 
 	return r;
 }
@@ -336,31 +327,24 @@ static int uvd_v6_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 	long r;
 
 	r = uvd_v6_0_enc_get_create_msg(ring, 1, NULL);
-	if (r) {
-		DRM_ERROR("amdgpu: failed to get create msg (%ld).\n", r);
+	if (r)
 		goto error;
-	}
 
 	r = uvd_v6_0_enc_get_destroy_msg(ring, 1, &fence);
-	if (r) {
-		DRM_ERROR("amdgpu: failed to get destroy ib (%ld).\n", r);
+	if (r)
 		goto error;
-	}
 
 	r = dma_fence_wait_timeout(fence, false, timeout);
-	if (r == 0) {
-		DRM_ERROR("amdgpu: IB test timed out.\n");
+	if (r == 0)
 		r = -ETIMEDOUT;
-	} else if (r < 0) {
-		DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
-	} else {
-		DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
+	else if (r > 0)
 		r = 0;
-	}
+
 error:
 	dma_fence_put(fence);
 	return r;
 }
+
 static int uvd_v6_0_early_init(void *handle)
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -476,12 +460,9 @@ static int uvd_v6_0_hw_init(void *handle)
 	uvd_v6_0_set_clockgating_state(adev, AMD_CG_STATE_UNGATE);
 	uvd_v6_0_enable_mgcg(adev, true);
 
-	ring->ready = true;
-	r = amdgpu_ring_test_ring(ring);
-	if (r) {
-		ring->ready = false;
+	r = amdgpu_ring_test_helper(ring);
+	if (r)
 		goto done;
-	}
 
 	r = amdgpu_ring_alloc(ring, 10);
 	if (r) {
@@ -513,12 +494,9 @@ static int uvd_v6_0_hw_init(void *handle)
 	if (uvd_v6_0_enc_support(adev)) {
 		for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
 			ring = &adev->uvd.inst->ring_enc[i];
-			ring->ready = true;
-			r = amdgpu_ring_test_ring(ring);
-			if (r) {
-				ring->ready = false;
+			r = amdgpu_ring_test_helper(ring);
+			if (r)
 				goto done;
-			}
 		}
 	}
 
@@ -548,7 +526,7 @@ static int uvd_v6_0_hw_fini(void *handle)
 	if (RREG32(mmUVD_STATUS) != 0)
 		uvd_v6_0_stop(adev);
 
-	ring->ready = false;
+	ring->sched.ready = false;
 
 	return 0;
 }
@@ -969,11 +947,9 @@ static int uvd_v6_0_ring_test_ring(struct amdgpu_ring *ring)
 
 	WREG32(mmUVD_CONTEXT_ID, 0xCAFEDEAD);
 	r = amdgpu_ring_alloc(ring, 3);
-	if (r) {
-		DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n",
-			  ring->idx, r);
+	if (r)
 		return r;
-	}
+
 	amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0));
 	amdgpu_ring_write(ring, 0xDEADBEEF);
 	amdgpu_ring_commit(ring);
@@ -984,14 +960,9 @@ static int uvd_v6_0_ring_test_ring(struct amdgpu_ring *ring)
 		DRM_UDELAY(1);
 	}
 
-	if (i < adev->usec_timeout) {
-		DRM_DEBUG("ring test on %d succeeded in %d usecs\n",
-			 ring->idx, i);
-	} else {
-		DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
-			  ring->idx, tmp);
-		r = -EINVAL;
-	}
+	if (i >= adev->usec_timeout)
+		r = -ETIMEDOUT;
+
 	return r;
 }
 
@@ -1004,9 +975,12 @@ static int uvd_v6_0_ring_test_ring(struct amdgpu_ring *ring)
  * Write ring commands to execute the indirect buffer
  */
 static void uvd_v6_0_ring_emit_ib(struct amdgpu_ring *ring,
+				  struct amdgpu_job *job,
 				  struct amdgpu_ib *ib,
-				  unsigned vmid, bool ctx_switch)
+				  bool ctx_switch)
 {
+	unsigned vmid = AMDGPU_JOB_GET_VMID(job);
+
 	amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_VMID, 0));
 	amdgpu_ring_write(ring, vmid);
 
@@ -1027,8 +1001,12 @@ static void uvd_v6_0_ring_emit_ib(struct amdgpu_ring *ring,
  * Write enc ring commands to execute the indirect buffer
  */
 static void uvd_v6_0_enc_ring_emit_ib(struct amdgpu_ring *ring,
-		struct amdgpu_ib *ib, unsigned int vmid, bool ctx_switch)
+					struct amdgpu_job *job,
+					struct amdgpu_ib *ib,
+					bool ctx_switch)
 {
+	unsigned vmid = AMDGPU_JOB_GET_VMID(job);
+
 	amdgpu_ring_write(ring, HEVC_ENC_CMD_IB_VM);
 	amdgpu_ring_write(ring, vmid);
 	amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
index 58b39af..8a45959 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
@@ -183,11 +183,8 @@ static int uvd_v7_0_enc_ring_test_ring(struct amdgpu_ring *ring)
 		return 0;
 
 	r = amdgpu_ring_alloc(ring, 16);
-	if (r) {
-		DRM_ERROR("amdgpu: uvd enc failed to lock (%d)ring %d (%d).\n",
-			  ring->me, ring->idx, r);
+	if (r)
 		return r;
-	}
 	amdgpu_ring_write(ring, HEVC_ENC_CMD_END);
 	amdgpu_ring_commit(ring);
 
@@ -197,14 +194,8 @@ static int uvd_v7_0_enc_ring_test_ring(struct amdgpu_ring *ring)
 		DRM_UDELAY(1);
 	}
 
-	if (i < adev->usec_timeout) {
-		DRM_DEBUG("(%d)ring test on %d succeeded in %d usecs\n",
-			 ring->me, ring->idx, i);
-	} else {
-		DRM_ERROR("amdgpu: (%d)ring %d test failed\n",
-			  ring->me, ring->idx);
+	if (i >= adev->usec_timeout)
 		r = -ETIMEDOUT;
-	}
 
 	return r;
 }
@@ -343,27 +334,19 @@ static int uvd_v7_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 	long r;
 
 	r = uvd_v7_0_enc_get_create_msg(ring, 1, NULL);
-	if (r) {
-		DRM_ERROR("amdgpu: (%d)failed to get create msg (%ld).\n", ring->me, r);
+	if (r)
 		goto error;
-	}
 
 	r = uvd_v7_0_enc_get_destroy_msg(ring, 1, &fence);
-	if (r) {
-		DRM_ERROR("amdgpu: (%d)failed to get destroy ib (%ld).\n", ring->me, r);
+	if (r)
 		goto error;
-	}
 
 	r = dma_fence_wait_timeout(fence, false, timeout);
-	if (r == 0) {
-		DRM_ERROR("amdgpu: (%d)IB test timed out.\n", ring->me);
+	if (r == 0)
 		r = -ETIMEDOUT;
-	} else if (r < 0) {
-		DRM_ERROR("amdgpu: (%d)fence wait failed (%ld).\n", ring->me, r);
-	} else {
-		DRM_DEBUG("ib test on (%d)ring %d succeeded\n", ring->me, ring->idx);
+	else if (r > 0)
 		r = 0;
-	}
+
 error:
 	dma_fence_put(fence);
 	return r;
@@ -540,12 +523,9 @@ static int uvd_v7_0_hw_init(void *handle)
 		ring = &adev->uvd.inst[j].ring;
 
 		if (!amdgpu_sriov_vf(adev)) {
-			ring->ready = true;
-			r = amdgpu_ring_test_ring(ring);
-			if (r) {
-				ring->ready = false;
+			r = amdgpu_ring_test_helper(ring);
+			if (r)
 				goto done;
-			}
 
 			r = amdgpu_ring_alloc(ring, 10);
 			if (r) {
@@ -582,12 +562,9 @@ static int uvd_v7_0_hw_init(void *handle)
 
 		for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
 			ring = &adev->uvd.inst[j].ring_enc[i];
-			ring->ready = true;
-			r = amdgpu_ring_test_ring(ring);
-			if (r) {
-				ring->ready = false;
+			r = amdgpu_ring_test_helper(ring);
+			if (r)
 				goto done;
-			}
 		}
 	}
 done:
@@ -619,7 +596,7 @@ static int uvd_v7_0_hw_fini(void *handle)
 	for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
 		if (adev->uvd.harvest_config & (1 << i))
 			continue;
-		adev->uvd.inst[i].ring.ready = false;
+		adev->uvd.inst[i].ring.sched.ready = false;
 	}
 
 	return 0;
@@ -1235,11 +1212,9 @@ static int uvd_v7_0_ring_test_ring(struct amdgpu_ring *ring)
 
 	WREG32_SOC15(UVD, ring->me, mmUVD_CONTEXT_ID, 0xCAFEDEAD);
 	r = amdgpu_ring_alloc(ring, 3);
-	if (r) {
-		DRM_ERROR("amdgpu: (%d)cp failed to lock ring %d (%d).\n",
-			  ring->me, ring->idx, r);
+	if (r)
 		return r;
-	}
+
 	amdgpu_ring_write(ring,
 		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_CONTEXT_ID), 0));
 	amdgpu_ring_write(ring, 0xDEADBEEF);
@@ -1251,14 +1226,9 @@ static int uvd_v7_0_ring_test_ring(struct amdgpu_ring *ring)
 		DRM_UDELAY(1);
 	}
 
-	if (i < adev->usec_timeout) {
-		DRM_DEBUG("(%d)ring test on %d succeeded in %d usecs\n",
-			 ring->me, ring->idx, i);
-	} else {
-		DRM_ERROR("(%d)amdgpu: ring %d test failed (0x%08X)\n",
-			  ring->me, ring->idx, tmp);
-		r = -EINVAL;
-	}
+	if (i >= adev->usec_timeout)
+		r = -ETIMEDOUT;
+
 	return r;
 }
 
@@ -1300,10 +1270,12 @@ static int uvd_v7_0_ring_patch_cs_in_place(struct amdgpu_cs_parser *p,
  * Write ring commands to execute the indirect buffer
  */
 static void uvd_v7_0_ring_emit_ib(struct amdgpu_ring *ring,
+				  struct amdgpu_job *job,
 				  struct amdgpu_ib *ib,
-				  unsigned vmid, bool ctx_switch)
+				  bool ctx_switch)
 {
 	struct amdgpu_device *adev = ring->adev;
+	unsigned vmid = AMDGPU_JOB_GET_VMID(job);
 
 	amdgpu_ring_write(ring,
 		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_LMI_RBC_IB_VMID), 0));
@@ -1329,8 +1301,12 @@ static void uvd_v7_0_ring_emit_ib(struct amdgpu_ring *ring,
  * Write enc ring commands to execute the indirect buffer
  */
 static void uvd_v7_0_enc_ring_emit_ib(struct amdgpu_ring *ring,
-		struct amdgpu_ib *ib, unsigned int vmid, bool ctx_switch)
+					struct amdgpu_job *job,
+					struct amdgpu_ib *ib,
+					bool ctx_switch)
 {
+	unsigned vmid = AMDGPU_JOB_GET_VMID(job);
+
 	amdgpu_ring_write(ring, HEVC_ENC_CMD_IB_VM);
 	amdgpu_ring_write(ring, vmid);
 	amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
index ea28828..bed78a7 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
@@ -463,15 +463,11 @@ static int vce_v2_0_hw_init(void *handle)
 
 	amdgpu_asic_set_vce_clocks(adev, 10000, 10000);
 	vce_v2_0_enable_mgcg(adev, true, false);
-	for (i = 0; i < adev->vce.num_rings; i++)
-		adev->vce.ring[i].ready = false;
 
 	for (i = 0; i < adev->vce.num_rings; i++) {
-		r = amdgpu_ring_test_ring(&adev->vce.ring[i]);
+		r = amdgpu_ring_test_helper(&adev->vce.ring[i]);
 		if (r)
 			return r;
-		else
-			adev->vce.ring[i].ready = true;
 	}
 
 	DRM_INFO("VCE initialized successfully.\n");
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
index 6dbd397..3e84840 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
@@ -474,15 +474,10 @@ static int vce_v3_0_hw_init(void *handle)
 
 	amdgpu_asic_set_vce_clocks(adev, 10000, 10000);
 
-	for (i = 0; i < adev->vce.num_rings; i++)
-		adev->vce.ring[i].ready = false;
-
 	for (i = 0; i < adev->vce.num_rings; i++) {
-		r = amdgpu_ring_test_ring(&adev->vce.ring[i]);
+		r = amdgpu_ring_test_helper(&adev->vce.ring[i]);
 		if (r)
 			return r;
-		else
-			adev->vce.ring[i].ready = true;
 	}
 
 	DRM_INFO("VCE initialized successfully.\n");
@@ -838,8 +833,12 @@ static void vce_v3_0_get_clockgating_state(void *handle, u32 *flags)
 }
 
 static void vce_v3_0_ring_emit_ib(struct amdgpu_ring *ring,
-		struct amdgpu_ib *ib, unsigned int vmid, bool ctx_switch)
+				  struct amdgpu_job *job,
+				  struct amdgpu_ib *ib,
+				  bool ctx_switch)
 {
+	unsigned vmid = AMDGPU_JOB_GET_VMID(job);
+
 	amdgpu_ring_write(ring, VCE_CMD_IB_VM);
 	amdgpu_ring_write(ring, vmid);
 	amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
index 1c94718..0054ba1 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
@@ -519,15 +519,10 @@ static int vce_v4_0_hw_init(void *handle)
 	if (r)
 		return r;
 
-	for (i = 0; i < adev->vce.num_rings; i++)
-		adev->vce.ring[i].ready = false;
-
 	for (i = 0; i < adev->vce.num_rings; i++) {
-		r = amdgpu_ring_test_ring(&adev->vce.ring[i]);
+		r = amdgpu_ring_test_helper(&adev->vce.ring[i]);
 		if (r)
 			return r;
-		else
-			adev->vce.ring[i].ready = true;
 	}
 
 	DRM_INFO("VCE initialized successfully.\n");
@@ -549,7 +544,7 @@ static int vce_v4_0_hw_fini(void *handle)
 	}
 
 	for (i = 0; i < adev->vce.num_rings; i++)
-		adev->vce.ring[i].ready = false;
+		adev->vce.ring[i].sched.ready = false;
 
 	return 0;
 }
@@ -951,9 +946,11 @@ static int vce_v4_0_set_powergating_state(void *handle,
 }
 #endif
 
-static void vce_v4_0_ring_emit_ib(struct amdgpu_ring *ring,
-		struct amdgpu_ib *ib, unsigned int vmid, bool ctx_switch)
+static void vce_v4_0_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_job *job,
+					struct amdgpu_ib *ib, bool ctx_switch)
 {
+	unsigned vmid = AMDGPU_JOB_GET_VMID(job);
+
 	amdgpu_ring_write(ring, VCE_CMD_IB_VM);
 	amdgpu_ring_write(ring, vmid);
 	amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
index eae9092..c1a0350 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
@@ -176,30 +176,22 @@ static int vcn_v1_0_hw_init(void *handle)
 	struct amdgpu_ring *ring = &adev->vcn.ring_dec;
 	int i, r;
 
-	ring->ready = true;
-	r = amdgpu_ring_test_ring(ring);
-	if (r) {
-		ring->ready = false;
+	r = amdgpu_ring_test_helper(ring);
+	if (r)
 		goto done;
-	}
 
 	for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
 		ring = &adev->vcn.ring_enc[i];
-		ring->ready = true;
-		r = amdgpu_ring_test_ring(ring);
-		if (r) {
-			ring->ready = false;
+		ring->sched.ready = true;
+		r = amdgpu_ring_test_helper(ring);
+		if (r)
 			goto done;
-		}
 	}
 
 	ring = &adev->vcn.ring_jpeg;
-	ring->ready = true;
-	r = amdgpu_ring_test_ring(ring);
-	if (r) {
-		ring->ready = false;
+	r = amdgpu_ring_test_helper(ring);
+	if (r)
 		goto done;
-	}
 
 done:
 	if (!r)
@@ -224,7 +216,7 @@ static int vcn_v1_0_hw_fini(void *handle)
 	if (RREG32_SOC15(VCN, 0, mmUVD_STATUS))
 		vcn_v1_0_stop(adev);
 
-	ring->ready = false;
+	ring->sched.ready = false;
 
 	return 0;
 }
@@ -1366,10 +1358,12 @@ static void vcn_v1_0_dec_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64
  * Write ring commands to execute the indirect buffer
  */
 static void vcn_v1_0_dec_ring_emit_ib(struct amdgpu_ring *ring,
-				  struct amdgpu_ib *ib,
-				  unsigned vmid, bool ctx_switch)
+					struct amdgpu_job *job,
+					struct amdgpu_ib *ib,
+					bool ctx_switch)
 {
 	struct amdgpu_device *adev = ring->adev;
+	unsigned vmid = AMDGPU_JOB_GET_VMID(job);
 
 	amdgpu_ring_write(ring,
 		PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_IB_VMID), 0));
@@ -1524,8 +1518,12 @@ static void vcn_v1_0_enc_ring_insert_end(struct amdgpu_ring *ring)
  * Write enc ring commands to execute the indirect buffer
  */
 static void vcn_v1_0_enc_ring_emit_ib(struct amdgpu_ring *ring,
-		struct amdgpu_ib *ib, unsigned int vmid, bool ctx_switch)
+					struct amdgpu_job *job,
+					struct amdgpu_ib *ib,
+					bool ctx_switch)
 {
+	unsigned vmid = AMDGPU_JOB_GET_VMID(job);
+
 	amdgpu_ring_write(ring, VCN_ENC_CMD_IB);
 	amdgpu_ring_write(ring, vmid);
 	amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
@@ -1725,10 +1723,12 @@ static void vcn_v1_0_jpeg_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u6
  * Write ring commands to execute the indirect buffer.
  */
 static void vcn_v1_0_jpeg_ring_emit_ib(struct amdgpu_ring *ring,
-				  struct amdgpu_ib *ib,
-				  unsigned vmid, bool ctx_switch)
+					struct amdgpu_job *job,
+					struct amdgpu_ib *ib,
+					bool ctx_switch)
 {
 	struct amdgpu_device *adev = ring->adev;
+	unsigned vmid = AMDGPU_JOB_GET_VMID(job);
 
 	amdgpu_ring_write(ring,
 		PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_IB_VMID), 0, 0, PACKETJ_TYPE0));
diff --git a/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c b/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c
index 5d2475d..177d1e5 100644
--- a/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c
+++ b/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c
@@ -23,6 +23,7 @@
 #include "kfd_priv.h"
 #include "kfd_events.h"
 #include "cik_int.h"
+#include "amdgpu_amdkfd.h"
 
 static bool cik_event_interrupt_isr(struct kfd_dev *dev,
 					const uint32_t *ih_ring_entry,
@@ -107,7 +108,7 @@ static void cik_event_interrupt_wq(struct kfd_dev *dev,
 		kfd_process_vm_fault(dev->dqm, pasid);
 
 		memset(&info, 0, sizeof(info));
-		dev->kfd2kgd->get_vm_fault_info(dev->kgd, &info);
+		amdgpu_amdkfd_gpuvm_get_vm_fault_info(dev->kgd, &info);
 		if (!info.page_addr && !info.status)
 			return;
 
diff --git a/drivers/gpu/drm/amd/amdkfd/cik_regs.h b/drivers/gpu/drm/amd/amdkfd/cik_regs.h
index 37ce6dd..8e2a166 100644
--- a/drivers/gpu/drm/amd/amdkfd/cik_regs.h
+++ b/drivers/gpu/drm/amd/amdkfd/cik_regs.h
@@ -68,6 +68,4 @@
 
 #define GRBM_GFX_INDEX					0x30800
 
-#define	ATC_VMID_PASID_MAPPING_VALID			(1U << 31)
-
 #endif
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
index 14d5b5f..5f4062b 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
@@ -37,6 +37,7 @@
 #include "kfd_priv.h"
 #include "kfd_device_queue_manager.h"
 #include "kfd_dbgmgr.h"
+#include "amdgpu_amdkfd.h"
 
 static long kfd_ioctl(struct file *, unsigned int, unsigned long);
 static int kfd_open(struct inode *, struct file *);
@@ -834,8 +835,7 @@ static int kfd_ioctl_get_clock_counters(struct file *filep,
 	dev = kfd_device_by_id(args->gpu_id);
 	if (dev)
 		/* Reading GPU clock counter from KGD */
-		args->gpu_clock_counter =
-			dev->kfd2kgd->get_gpu_clock_counter(dev->kgd);
+		args->gpu_clock_counter = amdgpu_amdkfd_get_gpu_clock_counter(dev->kgd);
 	else
 		/* Node without GPU resource */
 		args->gpu_clock_counter = 0;
@@ -1042,7 +1042,7 @@ static int kfd_ioctl_create_event(struct file *filp, struct kfd_process *p,
 		}
 		mutex_unlock(&p->mutex);
 
-		err = kfd->kfd2kgd->map_gtt_bo_to_kernel(kfd->kgd,
+		err = amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(kfd->kgd,
 						mem, &kern_addr, &size);
 		if (err) {
 			pr_err("Failed to map event page to kernel\n");
@@ -1240,7 +1240,7 @@ bool kfd_dev_is_large_bar(struct kfd_dev *dev)
 	if (dev->device_info->needs_iommu_device)
 		return false;
 
-	dev->kfd2kgd->get_local_mem_info(dev->kgd, &mem_info);
+	amdgpu_amdkfd_get_local_mem_info(dev->kgd, &mem_info);
 	if (mem_info.local_mem_size_private == 0 &&
 			mem_info.local_mem_size_public > 0)
 		return true;
@@ -1281,7 +1281,7 @@ static int kfd_ioctl_alloc_memory_of_gpu(struct file *filep,
 		goto err_unlock;
 	}
 
-	err = dev->kfd2kgd->alloc_memory_of_gpu(
+	err = amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
 		dev->kgd, args->va_addr, args->size,
 		pdd->vm, (struct kgd_mem **) &mem, &offset,
 		flags);
@@ -1303,7 +1303,7 @@ static int kfd_ioctl_alloc_memory_of_gpu(struct file *filep,
 	return 0;
 
 err_free:
-	dev->kfd2kgd->free_memory_of_gpu(dev->kgd, (struct kgd_mem *)mem);
+	amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->kgd, (struct kgd_mem *)mem);
 err_unlock:
 	mutex_unlock(&p->mutex);
 	return err;
@@ -1338,7 +1338,8 @@ static int kfd_ioctl_free_memory_of_gpu(struct file *filep,
 		goto err_unlock;
 	}
 
-	ret = dev->kfd2kgd->free_memory_of_gpu(dev->kgd, (struct kgd_mem *)mem);
+	ret = amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->kgd,
+						(struct kgd_mem *)mem);
 
 	/* If freeing the buffer failed, leave the handle in place for
 	 * clean-up during process tear-down.
@@ -1418,7 +1419,7 @@ static int kfd_ioctl_map_memory_to_gpu(struct file *filep,
 			err = PTR_ERR(peer_pdd);
 			goto get_mem_obj_from_handle_failed;
 		}
-		err = peer->kfd2kgd->map_memory_to_gpu(
+		err = amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
 			peer->kgd, (struct kgd_mem *)mem, peer_pdd->vm);
 		if (err) {
 			pr_err("Failed to map to gpu %d/%d\n",
@@ -1430,7 +1431,7 @@ static int kfd_ioctl_map_memory_to_gpu(struct file *filep,
 
 	mutex_unlock(&p->mutex);
 
-	err = dev->kfd2kgd->sync_memory(dev->kgd, (struct kgd_mem *) mem, true);
+	err = amdgpu_amdkfd_gpuvm_sync_memory(dev->kgd, (struct kgd_mem *) mem, true);
 	if (err) {
 		pr_debug("Sync memory failed, wait interrupted by user signal\n");
 		goto sync_memory_failed;
@@ -1525,7 +1526,7 @@ static int kfd_ioctl_unmap_memory_from_gpu(struct file *filep,
 			err = -ENODEV;
 			goto get_mem_obj_from_handle_failed;
 		}
-		err = dev->kfd2kgd->unmap_memory_to_gpu(
+		err = amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
 			peer->kgd, (struct kgd_mem *)mem, peer_pdd->vm);
 		if (err) {
 			pr_err("Failed to unmap from gpu %d/%d\n",
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
index 56412b0..3783d12 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
@@ -26,6 +26,7 @@
 #include "kfd_priv.h"
 #include "kfd_topology.h"
 #include "kfd_iommu.h"
+#include "amdgpu_amdkfd.h"
 
 /* GPU Processor ID base for dGPUs for which VCRAT needs to be created.
  * GPU processor ID are expressed with Bit[31]=1.
@@ -753,12 +754,10 @@ int kfd_create_crat_image_acpi(void **crat_image, size_t *size)
 		return -ENODATA;
 	}
 
-	pcrat_image = kmalloc(crat_table->length, GFP_KERNEL);
+	pcrat_image = kmemdup(crat_table, crat_table->length, GFP_KERNEL);
 	if (!pcrat_image)
 		return -ENOMEM;
 
-	memcpy(pcrat_image, crat_table, crat_table->length);
-
 	*crat_image = pcrat_image;
 	*size = crat_table->length;
 
@@ -1161,7 +1160,7 @@ static int kfd_create_vcrat_image_gpu(void *pcrat_image,
 	cu->flags |= CRAT_CU_FLAGS_GPU_PRESENT;
 	cu->proximity_domain = proximity_domain;
 
-	kdev->kfd2kgd->get_cu_info(kdev->kgd, &cu_info);
+	amdgpu_amdkfd_get_cu_info(kdev->kgd, &cu_info);
 	cu->num_simd_per_cu = cu_info.simd_per_cu;
 	cu->num_simd_cores = cu_info.simd_per_cu * cu_info.cu_active_number;
 	cu->max_waves_simd = cu_info.max_waves_per_simd;
@@ -1192,7 +1191,7 @@ static int kfd_create_vcrat_image_gpu(void *pcrat_image,
 	 * report the total FB size (public+private) as a single
 	 * private heap.
 	 */
-	kdev->kfd2kgd->get_local_mem_info(kdev->kgd, &local_mem_info);
+	amdgpu_amdkfd_get_local_mem_info(kdev->kgd, &local_mem_info);
 	sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
 			sub_type_hdr->length);
 
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index a9f18ea..c004647 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -28,6 +28,7 @@
 #include "kfd_pm4_headers_vi.h"
 #include "cwsr_trap_handler.h"
 #include "kfd_iommu.h"
+#include "amdgpu_amdkfd.h"
 
 #define MQD_SIZE_ALIGNED 768
 
@@ -478,7 +479,7 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
 	/* add another 512KB for all other allocations on gart (HPD, fences) */
 	size += 512 * 1024;
 
-	if (kfd->kfd2kgd->init_gtt_mem_allocation(
+	if (amdgpu_amdkfd_alloc_gtt_mem(
 			kfd->kgd, size, &kfd->gtt_mem,
 			&kfd->gtt_start_gpu_addr, &kfd->gtt_start_cpu_ptr,
 			false)) {
@@ -552,7 +553,7 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
 kfd_doorbell_error:
 	kfd_gtt_sa_fini(kfd);
 kfd_gtt_sa_init_error:
-	kfd->kfd2kgd->free_gtt_mem(kfd->kgd, kfd->gtt_mem);
+	amdgpu_amdkfd_free_gtt_mem(kfd->kgd, kfd->gtt_mem);
 	dev_err(kfd_device,
 		"device %x:%x NOT added due to errors\n",
 		kfd->pdev->vendor, kfd->pdev->device);
@@ -569,7 +570,7 @@ void kgd2kfd_device_exit(struct kfd_dev *kfd)
 		kfd_topology_remove_device(kfd);
 		kfd_doorbell_fini(kfd);
 		kfd_gtt_sa_fini(kfd);
-		kfd->kfd2kgd->free_gtt_mem(kfd->kgd, kfd->gtt_mem);
+		amdgpu_amdkfd_free_gtt_mem(kfd->kgd, kfd->gtt_mem);
 	}
 
 	kfree(kfd);
@@ -681,6 +682,7 @@ void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry)
 {
 	uint32_t patched_ihre[KFD_MAX_RING_ENTRY_SIZE];
 	bool is_patched = false;
+	unsigned long flags;
 
 	if (!kfd->init_complete)
 		return;
@@ -690,7 +692,7 @@ void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry)
 		return;
 	}
 
-	spin_lock(&kfd->interrupt_lock);
+	spin_lock_irqsave(&kfd->interrupt_lock, flags);
 
 	if (kfd->interrupts_active
 	    && interrupt_is_wanted(kfd, ih_ring_entry,
@@ -699,7 +701,7 @@ void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry)
 				     is_patched ? patched_ihre : ih_ring_entry))
 		queue_work(kfd->ih_wq, &kfd->interrupt_work);
 
-	spin_unlock(&kfd->interrupt_lock);
+	spin_unlock_irqrestore(&kfd->interrupt_lock, flags);
 }
 
 int kgd2kfd_quiesce_mm(struct mm_struct *mm)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index a3b9339..fb9d66e 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -33,6 +33,7 @@
 #include "kfd_mqd_manager.h"
 #include "cik_regs.h"
 #include "kfd_kernel_queue.h"
+#include "amdgpu_amdkfd.h"
 
 /* Size of the per-pipe EOP queue */
 #define CIK_HPD_EOP_BYTES_LOG2 11
@@ -219,7 +220,7 @@ static int flush_texture_cache_nocpsch(struct kfd_dev *kdev,
 	if (ret)
 		return ret;
 
-	return kdev->kfd2kgd->submit_ib(kdev->kgd, KGD_ENGINE_MEC1, qpd->vmid,
+	return amdgpu_amdkfd_submit_ib(kdev->kgd, KGD_ENGINE_MEC1, qpd->vmid,
 				qpd->ib_base, (uint32_t *)qpd->ib_kaddr,
 				pmf->release_mem_size / sizeof(uint32_t));
 }
@@ -672,7 +673,7 @@ static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
 
 	pdd = qpd_to_pdd(qpd);
 	/* Retrieve PD base */
-	pd_base = dqm->dev->kfd2kgd->get_process_page_dir(pdd->vm);
+	pd_base = amdgpu_amdkfd_gpuvm_get_process_page_dir(pdd->vm);
 
 	dqm_lock(dqm);
 	if (WARN_ON_ONCE(!qpd->evicted)) /* already restored, do nothing */
@@ -743,7 +744,7 @@ static int restore_process_queues_cpsch(struct device_queue_manager *dqm,
 
 	pdd = qpd_to_pdd(qpd);
 	/* Retrieve PD base */
-	pd_base = dqm->dev->kfd2kgd->get_process_page_dir(pdd->vm);
+	pd_base = amdgpu_amdkfd_gpuvm_get_process_page_dir(pdd->vm);
 
 	dqm_lock(dqm);
 	if (WARN_ON_ONCE(!qpd->evicted)) /* already restored, do nothing */
@@ -793,7 +794,7 @@ static int register_process(struct device_queue_manager *dqm,
 
 	pdd = qpd_to_pdd(qpd);
 	/* Retrieve PD base */
-	pd_base = dqm->dev->kfd2kgd->get_process_page_dir(pdd->vm);
+	pd_base = amdgpu_amdkfd_gpuvm_get_process_page_dir(pdd->vm);
 
 	dqm_lock(dqm);
 	list_add(&n->list, &dqm->queues);
@@ -805,7 +806,7 @@ static int register_process(struct device_queue_manager *dqm,
 	retval = dqm->asic_ops.update_qpd(dqm, qpd);
 
 	if (dqm->processes_count++ == 0)
-		dqm->dev->kfd2kgd->set_compute_idle(dqm->dev->kgd, false);
+		amdgpu_amdkfd_set_compute_idle(dqm->dev->kgd, false);
 
 	dqm_unlock(dqm);
 
@@ -829,7 +830,7 @@ static int unregister_process(struct device_queue_manager *dqm,
 			list_del(&cur->list);
 			kfree(cur);
 			if (--dqm->processes_count == 0)
-				dqm->dev->kfd2kgd->set_compute_idle(
+				amdgpu_amdkfd_set_compute_idle(
 					dqm->dev->kgd, true);
 			goto out;
 		}
@@ -845,15 +846,8 @@ static int
 set_pasid_vmid_mapping(struct device_queue_manager *dqm, unsigned int pasid,
 			unsigned int vmid)
 {
-	uint32_t pasid_mapping;
-
-	pasid_mapping = (pasid == 0) ? 0 :
-		(uint32_t)pasid |
-		ATC_VMID_PASID_MAPPING_VALID;
-
 	return dqm->dev->kfd2kgd->set_pasid_vmid_mapping(
-						dqm->dev->kgd, pasid_mapping,
-						vmid);
+						dqm->dev->kgd, pasid, vmid);
 }
 
 static void init_interrupts(struct device_queue_manager *dqm)
@@ -1796,7 +1790,7 @@ static void kfd_process_hw_exception(struct work_struct *work)
 {
 	struct device_queue_manager *dqm = container_of(work,
 			struct device_queue_manager, hw_exception_work);
-	dqm->dev->kfd2kgd->gpu_recover(dqm->dev->kgd);
+	amdgpu_amdkfd_gpu_reset(dqm->dev->kgd);
 }
 
 #if defined(CONFIG_DEBUG_FS)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
index e33019a..6910028 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
@@ -22,6 +22,7 @@
  */
 
 #include "kfd_mqd_manager.h"
+#include "amdgpu_amdkfd.h"
 
 struct mqd_manager *mqd_manager_init(enum KFD_MQD_TYPE type,
 					struct kfd_dev *dev)
@@ -58,7 +59,7 @@ void mqd_symmetrically_map_cu_mask(struct mqd_manager *mm,
 	uint32_t cu_per_sh[4] = {0};
 	int i, se, cu = 0;
 
-	mm->dev->kfd2kgd->get_cu_info(mm->dev->kgd, &cu_info);
+	amdgpu_amdkfd_get_cu_info(mm->dev->kgd, &cu_info);
 
 	if (cu_mask_count > cu_info.cu_active_number)
 		cu_mask_count = cu_info.cu_active_number;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
index f381c1c..9dbba60 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
@@ -30,6 +30,7 @@
 #include "gc/gc_9_0_offset.h"
 #include "gc/gc_9_0_sh_mask.h"
 #include "sdma0/sdma0_4_0_sh_mask.h"
+#include "amdgpu_amdkfd.h"
 
 static inline struct v9_mqd *get_mqd(void *mqd)
 {
@@ -83,7 +84,7 @@ static int init_mqd(struct mqd_manager *mm, void **mqd,
 		*mqd_mem_obj = kzalloc(sizeof(struct kfd_mem_obj), GFP_KERNEL);
 		if (!*mqd_mem_obj)
 			return -ENOMEM;
-		retval = kfd->kfd2kgd->init_gtt_mem_allocation(kfd->kgd,
+		retval = amdgpu_amdkfd_alloc_gtt_mem(kfd->kgd,
 			ALIGN(q->ctl_stack_size, PAGE_SIZE) +
 				ALIGN(sizeof(struct v9_mqd), PAGE_SIZE),
 			&((*mqd_mem_obj)->gtt_mem),
@@ -250,7 +251,7 @@ static void uninit_mqd(struct mqd_manager *mm, void *mqd,
 	struct kfd_dev *kfd = mm->dev;
 
 	if (mqd_mem_obj->gtt_mem) {
-		kfd->kfd2kgd->free_gtt_mem(kfd->kgd, mqd_mem_obj->gtt_mem);
+		amdgpu_amdkfd_free_gtt_mem(kfd->kgd, mqd_mem_obj->gtt_mem);
 		kfree(mqd_mem_obj);
 	} else {
 		kfd_gtt_sa_free(mm->dev, mqd_mem_obj);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c b/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c
index 15fff44..33b08ff 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c
@@ -22,6 +22,7 @@
 
 #include <linux/types.h>
 #include "kfd_priv.h"
+#include "amdgpu_ids.h"
 
 static unsigned int pasid_bits = 16;
 static const struct kfd2kgd_calls *kfd2kgd;
@@ -71,7 +72,7 @@ unsigned int kfd_pasid_alloc(void)
 			return false;
 	}
 
-	r = kfd2kgd->alloc_pasid(pasid_bits);
+	r = amdgpu_pasid_alloc(pasid_bits);
 
 	return r > 0 ? r : 0;
 }
@@ -79,5 +80,5 @@ unsigned int kfd_pasid_alloc(void)
 void kfd_pasid_free(unsigned int pasid)
 {
 	if (kfd2kgd)
-		kfd2kgd->free_pasid(pasid);
+		amdgpu_pasid_free(pasid);
 }
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index 53ff86d..dec8e64 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -507,6 +507,7 @@ struct qcm_process_device {
 	 * All the memory management data should be here too
 	 */
 	uint64_t gds_context_area;
+	/* Contains page table flags such as AMDGPU_PTE_VALID since gfx9 */
 	uint64_t page_table_base;
 	uint32_t sh_mem_config;
 	uint32_t sh_mem_bases;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
index 0039e45..80b36e8 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
@@ -31,6 +31,7 @@
 #include <linux/compat.h>
 #include <linux/mman.h>
 #include <linux/file.h>
+#include "amdgpu_amdkfd.h"
 
 struct mm_struct;
 
@@ -100,8 +101,8 @@ static void kfd_process_free_gpuvm(struct kgd_mem *mem,
 {
 	struct kfd_dev *dev = pdd->dev;
 
-	dev->kfd2kgd->unmap_memory_to_gpu(dev->kgd, mem, pdd->vm);
-	dev->kfd2kgd->free_memory_of_gpu(dev->kgd, mem);
+	amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(dev->kgd, mem, pdd->vm);
+	amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->kgd, mem);
 }
 
 /* kfd_process_alloc_gpuvm - Allocate GPU VM for the KFD process
@@ -119,16 +120,16 @@ static int kfd_process_alloc_gpuvm(struct kfd_process_device *pdd,
 	int handle;
 	int err;
 
-	err = kdev->kfd2kgd->alloc_memory_of_gpu(kdev->kgd, gpu_va, size,
+	err = amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(kdev->kgd, gpu_va, size,
 						 pdd->vm, &mem, NULL, flags);
 	if (err)
 		goto err_alloc_mem;
 
-	err = kdev->kfd2kgd->map_memory_to_gpu(kdev->kgd, mem, pdd->vm);
+	err = amdgpu_amdkfd_gpuvm_map_memory_to_gpu(kdev->kgd, mem, pdd->vm);
 	if (err)
 		goto err_map_mem;
 
-	err = kdev->kfd2kgd->sync_memory(kdev->kgd, mem, true);
+	err = amdgpu_amdkfd_gpuvm_sync_memory(kdev->kgd, mem, true);
 	if (err) {
 		pr_debug("Sync memory failed, wait interrupted by user signal\n");
 		goto sync_memory_failed;
@@ -147,7 +148,7 @@ static int kfd_process_alloc_gpuvm(struct kfd_process_device *pdd,
 	}
 
 	if (kptr) {
-		err = kdev->kfd2kgd->map_gtt_bo_to_kernel(kdev->kgd,
+		err = amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(kdev->kgd,
 				(struct kgd_mem *)mem, kptr, NULL);
 		if (err) {
 			pr_debug("Map GTT BO to kernel failed\n");
@@ -165,7 +166,7 @@ static int kfd_process_alloc_gpuvm(struct kfd_process_device *pdd,
 	return err;
 
 err_map_mem:
-	kdev->kfd2kgd->free_memory_of_gpu(kdev->kgd, mem);
+	amdgpu_amdkfd_gpuvm_free_memory_of_gpu(kdev->kgd, mem);
 err_alloc_mem:
 	*kptr = NULL;
 	return err;
@@ -296,11 +297,11 @@ static void kfd_process_device_free_bos(struct kfd_process_device *pdd)
 				    per_device_list) {
 			if (!peer_pdd->vm)
 				continue;
-			peer_pdd->dev->kfd2kgd->unmap_memory_to_gpu(
+			amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
 				peer_pdd->dev->kgd, mem, peer_pdd->vm);
 		}
 
-		pdd->dev->kfd2kgd->free_memory_of_gpu(pdd->dev->kgd, mem);
+		amdgpu_amdkfd_gpuvm_free_memory_of_gpu(pdd->dev->kgd, mem);
 		kfd_process_device_remove_obj_handle(pdd, id);
 	}
 }
@@ -323,11 +324,12 @@ static void kfd_process_destroy_pdds(struct kfd_process *p)
 				pdd->dev->id, p->pasid);
 
 		if (pdd->drm_file) {
-			pdd->dev->kfd2kgd->release_process_vm(pdd->dev->kgd, pdd->vm);
+			amdgpu_amdkfd_gpuvm_release_process_vm(
+					pdd->dev->kgd, pdd->vm);
 			fput(pdd->drm_file);
 		}
 		else if (pdd->vm)
-			pdd->dev->kfd2kgd->destroy_process_vm(
+			amdgpu_amdkfd_gpuvm_destroy_process_vm(
 				pdd->dev->kgd, pdd->vm);
 
 		list_del(&pdd->per_device_list);
@@ -688,12 +690,12 @@ int kfd_process_device_init_vm(struct kfd_process_device *pdd,
 	dev = pdd->dev;
 
 	if (drm_file)
-		ret = dev->kfd2kgd->acquire_process_vm(
+		ret = amdgpu_amdkfd_gpuvm_acquire_process_vm(
 			dev->kgd, drm_file, p->pasid,
 			&pdd->vm, &p->kgd_process_info, &p->ef);
 	else
-		ret = dev->kfd2kgd->create_process_vm(
-			dev->kgd, p->pasid, &pdd->vm, &p->kgd_process_info, &p->ef);
+		ret = amdgpu_amdkfd_gpuvm_create_process_vm(dev->kgd, p->pasid,
+			&pdd->vm, &p->kgd_process_info, &p->ef);
 	if (ret) {
 		pr_err("Failed to create process VM object\n");
 		return ret;
@@ -714,7 +716,7 @@ int kfd_process_device_init_vm(struct kfd_process_device *pdd,
 err_reserve_ib_mem:
 	kfd_process_device_free_bos(pdd);
 	if (!drm_file)
-		dev->kfd2kgd->destroy_process_vm(dev->kgd, pdd->vm);
+		amdgpu_amdkfd_gpuvm_destroy_process_vm(dev->kgd, pdd->vm);
 	pdd->vm = NULL;
 
 	return ret;
@@ -972,7 +974,7 @@ static void restore_process_worker(struct work_struct *work)
 	 */
 
 	p->last_restore_timestamp = get_jiffies_64();
-	ret = pdd->dev->kfd2kgd->restore_process_bos(p->kgd_process_info,
+	ret = amdgpu_amdkfd_gpuvm_restore_process_bos(p->kgd_process_info,
 						     &p->ef);
 	if (ret) {
 		pr_debug("Failed to restore BOs of pasid %d, retry after %d ms\n",
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
index e3843c5..c73b4ff 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
@@ -36,6 +36,7 @@
 #include "kfd_topology.h"
 #include "kfd_device_queue_manager.h"
 #include "kfd_iommu.h"
+#include "amdgpu_amdkfd.h"
 
 /* topology_device_list - Master list of all topology devices */
 static struct list_head topology_device_list;
@@ -1052,7 +1053,7 @@ static uint32_t kfd_generate_gpu_id(struct kfd_dev *gpu)
 	if (!gpu)
 		return 0;
 
-	gpu->kfd2kgd->get_local_mem_info(gpu->kgd, &local_mem_info);
+	amdgpu_amdkfd_get_local_mem_info(gpu->kgd, &local_mem_info);
 
 	local_mem_size = local_mem_info.local_mem_size_private +
 			local_mem_info.local_mem_size_public;
@@ -1118,8 +1119,7 @@ static void kfd_fill_mem_clk_max_info(struct kfd_topology_device *dev)
 	 * for APUs - If CRAT from ACPI reports more than one bank, then
 	 *	all the banks will report the same mem_clk_max information
 	 */
-	dev->gpu->kfd2kgd->get_local_mem_info(dev->gpu->kgd,
-		&local_mem_info);
+	amdgpu_amdkfd_get_local_mem_info(dev->gpu->kgd, &local_mem_info);
 
 	list_for_each_entry(mem, &dev->mem_props, list)
 		mem->mem_clk_max = local_mem_info.mem_clk_max;
@@ -1240,7 +1240,7 @@ int kfd_topology_add_device(struct kfd_dev *gpu)
 	 * needed for the topology
 	 */
 
-	dev->gpu->kfd2kgd->get_cu_info(dev->gpu->kgd, &cu_info);
+	amdgpu_amdkfd_get_cu_info(dev->gpu->kgd, &cu_info);
 	dev->node_props.simd_arrays_per_engine =
 		cu_info.num_shader_arrays_per_engine;
 
@@ -1249,7 +1249,7 @@ int kfd_topology_add_device(struct kfd_dev *gpu)
 	dev->node_props.location_id = PCI_DEVID(gpu->pdev->bus->number,
 		gpu->pdev->devfn);
 	dev->node_props.max_engine_clk_fcompute =
-		dev->gpu->kfd2kgd->get_max_engine_clock_in_mhz(dev->gpu->kgd);
+		amdgpu_amdkfd_get_max_engine_clock_in_mhz(dev->gpu->kgd);
 	dev->node_props.max_engine_clk_ccompute =
 		cpufreq_quick_get_max(0) / 1000;
 	dev->node_props.drm_render_minor =
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 5064768..aa43bb25 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -76,6 +76,16 @@
 #define FIRMWARE_RAVEN_DMCU		"amdgpu/raven_dmcu.bin"
 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
 
+/**
+ * DOC: overview
+ *
+ * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
+ * **dm**) sits between DRM and DC. It acts as a liason, converting DRM
+ * requests into DC requests, and DC responses into DRM responses.
+ *
+ * The root control structure is &struct amdgpu_display_manager.
+ */
+
 /* basic init/fini API */
 static int amdgpu_dm_init(struct amdgpu_device *adev);
 static void amdgpu_dm_fini(struct amdgpu_device *adev);
@@ -95,7 +105,7 @@ static void
 amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector);
 
 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
-				struct amdgpu_plane *aplane,
+				struct drm_plane *plane,
 				unsigned long possible_crtcs);
 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
 			       struct drm_plane *plane,
@@ -379,11 +389,6 @@ static void amdgpu_dm_fbc_init(struct drm_connector *connector)
 
 }
 
-/*
- * Init display KMS
- *
- * Returns 0 on success
- */
 static int amdgpu_dm_init(struct amdgpu_device *adev)
 {
 	struct dc_init_data init_data;
@@ -663,6 +668,26 @@ static void s3_handle_mst(struct drm_device *dev, bool suspend)
 	drm_modeset_unlock(&dev->mode_config.connection_mutex);
 }
 
+/**
+ * dm_hw_init() - Initialize DC device
+ * @handle: The base driver device containing the amdpgu_dm device.
+ *
+ * Initialize the &struct amdgpu_display_manager device. This involves calling
+ * the initializers of each DM component, then populating the struct with them.
+ *
+ * Although the function implies hardware initialization, both hardware and
+ * software are initialized here. Splitting them out to their relevant init
+ * hooks is a future TODO item.
+ *
+ * Some notable things that are initialized here:
+ *
+ * - Display Core, both software and hardware
+ * - DC modules that we need (freesync and color management)
+ * - DRM software states
+ * - Interrupt sources and handlers
+ * - Vblank support
+ * - Debug FS entries, if enabled
+ */
 static int dm_hw_init(void *handle)
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -673,6 +698,14 @@ static int dm_hw_init(void *handle)
 	return 0;
 }
 
+/**
+ * dm_hw_fini() - Teardown DC device
+ * @handle: The base driver device containing the amdpgu_dm device.
+ *
+ * Teardown components within &struct amdgpu_display_manager that require
+ * cleanup. This involves cleaning up the DRM device, DC, and any modules that
+ * were loaded. Also flush IRQ workqueues and disable them.
+ */
 static int dm_hw_fini(void *handle)
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -898,6 +931,16 @@ static int dm_resume(void *handle)
 	return ret;
 }
 
+/**
+ * DOC: DM Lifecycle
+ *
+ * DM (and consequently DC) is registered in the amdgpu base driver as a IP
+ * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
+ * the base driver's device list to be initialized and torn down accordingly.
+ *
+ * The functions to do so are provided as hooks in &struct amd_ip_funcs.
+ */
+
 static const struct amd_ip_funcs amdgpu_dm_funcs = {
 	.name = "dm",
 	.early_init = dm_early_init,
@@ -965,6 +1008,12 @@ dm_atomic_state_alloc_free(struct drm_atomic_state *state)
 	kfree(dm_state);
 }
 
+/**
+ * DOC: atomic
+ *
+ * *WIP*
+ */
+
 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
 	.fb_create = amdgpu_display_user_framebuffer_create,
 	.output_poll_changed = drm_fb_helper_output_poll_changed,
@@ -1527,8 +1576,23 @@ static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
 {
 	struct amdgpu_display_manager *dm = bl_get_data(bd);
 
+	/* backlight_pwm_u16_16 parameter is in unsigned 32 bit, 16 bit integer
+	 * and 16 bit fractional, where 1.0 is max backlight value.
+	 * bd->props.brightness is 8 bit format and needs to be converted by
+	 * scaling via copy lower byte to upper byte of 16 bit value.
+	 */
+	uint32_t brightness = bd->props.brightness * 0x101;
+
+	/*
+	 * PWM interperts 0 as 100% rather than 0% because of HW
+	 * limitation for level 0.  So limiting minimum brightness level
+	 * to 1.
+	 */
+	if (bd->props.brightness < 1)
+		brightness = 0x101;
+
 	if (dc_link_set_backlight_level(dm->backlight_link,
-			bd->props.brightness, 0, 0))
+			brightness, 0, 0))
 		return 0;
 	else
 		return 1;
@@ -1580,18 +1644,18 @@ static int initialize_plane(struct amdgpu_display_manager *dm,
 			     struct amdgpu_mode_info *mode_info,
 			     int plane_id)
 {
-	struct amdgpu_plane *plane;
+	struct drm_plane *plane;
 	unsigned long possible_crtcs;
 	int ret = 0;
 
-	plane = kzalloc(sizeof(struct amdgpu_plane), GFP_KERNEL);
+	plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
 	mode_info->planes[plane_id] = plane;
 
 	if (!plane) {
 		DRM_ERROR("KMS: Failed to allocate plane\n");
 		return -ENOMEM;
 	}
-	plane->base.type = mode_info->plane_type[plane_id];
+	plane->type = mode_info->plane_type[plane_id];
 
 	/*
 	 * HACK: IGT tests expect that each plane can only have
@@ -1682,7 +1746,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
 	}
 
 	for (i = 0; i < dm->dc->caps.max_streams; i++)
-		if (amdgpu_dm_crtc_init(dm, &mode_info->planes[i]->base, i)) {
+		if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
 			DRM_ERROR("KMS: Failed to initialize crtc\n");
 			goto fail;
 		}
@@ -3457,49 +3521,49 @@ static const u32 cursor_formats[] = {
 };
 
 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
-				struct amdgpu_plane *aplane,
+				struct drm_plane *plane,
 				unsigned long possible_crtcs)
 {
 	int res = -EPERM;
 
-	switch (aplane->base.type) {
+	switch (plane->type) {
 	case DRM_PLANE_TYPE_PRIMARY:
 		res = drm_universal_plane_init(
 				dm->adev->ddev,
-				&aplane->base,
+				plane,
 				possible_crtcs,
 				&dm_plane_funcs,
 				rgb_formats,
 				ARRAY_SIZE(rgb_formats),
-				NULL, aplane->base.type, NULL);
+				NULL, plane->type, NULL);
 		break;
 	case DRM_PLANE_TYPE_OVERLAY:
 		res = drm_universal_plane_init(
 				dm->adev->ddev,
-				&aplane->base,
+				plane,
 				possible_crtcs,
 				&dm_plane_funcs,
 				yuv_formats,
 				ARRAY_SIZE(yuv_formats),
-				NULL, aplane->base.type, NULL);
+				NULL, plane->type, NULL);
 		break;
 	case DRM_PLANE_TYPE_CURSOR:
 		res = drm_universal_plane_init(
 				dm->adev->ddev,
-				&aplane->base,
+				plane,
 				possible_crtcs,
 				&dm_plane_funcs,
 				cursor_formats,
 				ARRAY_SIZE(cursor_formats),
-				NULL, aplane->base.type, NULL);
+				NULL, plane->type, NULL);
 		break;
 	}
 
-	drm_plane_helper_add(&aplane->base, &dm_plane_helper_funcs);
+	drm_plane_helper_add(plane, &dm_plane_helper_funcs);
 
 	/* Create (reset) the plane state */
-	if (aplane->base.funcs->reset)
-		aplane->base.funcs->reset(&aplane->base);
+	if (plane->funcs->reset)
+		plane->funcs->reset(plane);
 
 
 	return res;
@@ -3510,7 +3574,7 @@ static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
 			       uint32_t crtc_index)
 {
 	struct amdgpu_crtc *acrtc = NULL;
-	struct amdgpu_plane *cursor_plane;
+	struct drm_plane *cursor_plane;
 
 	int res = -ENOMEM;
 
@@ -3518,7 +3582,7 @@ static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
 	if (!cursor_plane)
 		goto fail;
 
-	cursor_plane->base.type = DRM_PLANE_TYPE_CURSOR;
+	cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
 	res = amdgpu_dm_plane_init(dm, cursor_plane, 0);
 
 	acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
@@ -3529,7 +3593,7 @@ static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
 			dm->ddev,
 			&acrtc->base,
 			plane,
-			&cursor_plane->base,
+			cursor_plane,
 			&amdgpu_dm_crtc_funcs, NULL);
 
 	if (res)
@@ -3768,12 +3832,12 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
 	case DRM_MODE_CONNECTOR_HDMIA:
 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
 		aconnector->base.ycbcr_420_allowed =
-			link->link_enc->features.ycbcr420_supported ? true : false;
+			link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
 		break;
 	case DRM_MODE_CONNECTOR_DisplayPort:
 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
 		aconnector->base.ycbcr_420_allowed =
-			link->link_enc->features.ycbcr420_supported ? true : false;
+			link->link_enc->features.dp_ycbcr420_supported ? true : false;
 		break;
 	case DRM_MODE_CONNECTOR_DVID:
 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
@@ -4531,6 +4595,14 @@ static int amdgpu_dm_atomic_commit(struct drm_device *dev,
 	/*TODO Handle EINTR, reenable IRQ*/
 }
 
+/**
+ * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
+ * @state: The atomic state to commit
+ *
+ * This will tell DC to commit the constructed DC state from atomic_check,
+ * programming the hardware. Any failures here implies a hardware failure, since
+ * atomic check should have filtered anything non-kosher.
+ */
 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
 {
 	struct drm_device *dev = state->dev;
@@ -5302,6 +5374,12 @@ enum surface_update_type dm_determine_update_type_for_commit(struct dc *dc, stru
 	struct dc_stream_update stream_update;
 	enum surface_update_type update_type = UPDATE_TYPE_FAST;
 
+	if (!updates || !surface) {
+		DRM_ERROR("Plane or surface update failed to allocate");
+		/* Set type to FULL to avoid crashing in DC*/
+		update_type = UPDATE_TYPE_FULL;
+		goto ret;
+	}
 
 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
 		new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
@@ -5377,6 +5455,31 @@ enum surface_update_type dm_determine_update_type_for_commit(struct dc *dc, stru
 	return update_type;
 }
 
+/**
+ * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
+ * @dev: The DRM device
+ * @state: The atomic state to commit
+ *
+ * Validate that the given atomic state is programmable by DC into hardware.
+ * This involves constructing a &struct dc_state reflecting the new hardware
+ * state we wish to commit, then querying DC to see if it is programmable. It's
+ * important not to modify the existing DC state. Otherwise, atomic_check
+ * may unexpectedly commit hardware changes.
+ *
+ * When validating the DC state, it's important that the right locks are
+ * acquired. For full updates case which removes/adds/updates streams on one
+ * CRTC while flipping on another CRTC, acquiring global lock will guarantee
+ * that any such full update commit will wait for completion of any outstanding
+ * flip using DRMs synchronization events. See
+ * dm_determine_update_type_for_commit()
+ *
+ * Note that DM adds the affected connectors for all CRTCs in state, when that
+ * might not seem necessary. This is because DC stream creation requires the
+ * DC sink, which is tied to the DRM connector state. Cleaning this up should
+ * be possible but non-trivial - a possible TODO item.
+ *
+ * Return: -Error code if validation failed.
+ */
 static int amdgpu_dm_atomic_check(struct drm_device *dev,
 				  struct drm_atomic_state *state)
 {
@@ -5479,15 +5582,6 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
 		lock_and_validation_needed = true;
 	}
 
-	/*
-	 * For full updates case when
-	 * removing/adding/updating streams on one CRTC while flipping
-	 * on another CRTC,
-	 * acquiring global lock  will guarantee that any such full
-	 * update commit
-	 * will wait for completion of any outstanding flip using DRMs
-	 * synchronization events.
-	 */
 	update_type = dm_determine_update_type_for_commit(dc, state);
 
 	if (overall_update_type < update_type)
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
index 924a38a..d696064 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
@@ -59,49 +59,100 @@ struct common_irq_params {
 	enum dc_irq_source irq_src;
 };
 
+/**
+ * struct irq_list_head - Linked-list for low context IRQ handlers.
+ *
+ * @head: The list_head within &struct handler_data
+ * @work: A work_struct containing the deferred handler work
+ */
 struct irq_list_head {
 	struct list_head head;
 	/* In case this interrupt needs post-processing, 'work' will be queued*/
 	struct work_struct work;
 };
 
+/**
+ * struct dm_compressor_info - Buffer info used by frame buffer compression
+ * @cpu_addr: MMIO cpu addr
+ * @bo_ptr: Pointer to the buffer object
+ * @gpu_addr: MMIO gpu addr
+ */
 struct dm_comressor_info {
 	void *cpu_addr;
 	struct amdgpu_bo *bo_ptr;
 	uint64_t gpu_addr;
 };
 
+/**
+ * struct amdgpu_display_manager - Central amdgpu display manager device
+ *
+ * @dc: Display Core control structure
+ * @adev: AMDGPU base driver structure
+ * @ddev: DRM base driver structure
+ * @display_indexes_num: Max number of display streams supported
+ * @irq_handler_list_table_lock: Synchronizes access to IRQ tables
+ * @backlight_dev: Backlight control device
+ * @cached_state: Caches device atomic state for suspend/resume
+ * @compressor: Frame buffer compression buffer. See &struct dm_comressor_info
+ */
 struct amdgpu_display_manager {
+
 	struct dc *dc;
+
+	/**
+	 * @cgs_device:
+	 *
+	 * The Common Graphics Services device. It provides an interface for
+	 * accessing registers.
+	 */
 	struct cgs_device *cgs_device;
 
-	struct amdgpu_device *adev;	/*AMD base driver*/
-	struct drm_device *ddev;	/*DRM base driver*/
+	struct amdgpu_device *adev;
+	struct drm_device *ddev;
 	u16 display_indexes_num;
 
-	/*
-	 * 'irq_source_handler_table' holds a list of handlers
-	 * per (DAL) IRQ source.
+	/**
+	 * @irq_handler_list_low_tab:
 	 *
-	 * Each IRQ source may need to be handled at different contexts.
-	 * By 'context' we mean, for example:
-	 * - The ISR context, which is the direct interrupt handler.
-	 * - The 'deferred' context - this is the post-processing of the
-	 *	interrupt, but at a lower priority.
+	 * Low priority IRQ handler table.
+	 *
+	 * It is a n*m table consisting of n IRQ sources, and m handlers per IRQ
+	 * source. Low priority IRQ handlers are deferred to a workqueue to be
+	 * processed. Hence, they can sleep.
 	 *
 	 * Note that handlers are called in the same order as they were
 	 * registered (FIFO).
 	 */
 	struct irq_list_head irq_handler_list_low_tab[DAL_IRQ_SOURCES_NUMBER];
+
+	/**
+	 * @irq_handler_list_high_tab:
+	 *
+	 * High priority IRQ handler table.
+	 *
+	 * It is a n*m table, same as &irq_handler_list_low_tab. However,
+	 * handlers in this table are not deferred and are called immediately.
+	 */
 	struct list_head irq_handler_list_high_tab[DAL_IRQ_SOURCES_NUMBER];
 
+	/**
+	 * @pflip_params:
+	 *
+	 * Page flip IRQ parameters, passed to registered handlers when
+	 * triggered.
+	 */
 	struct common_irq_params
 	pflip_params[DC_IRQ_SOURCE_PFLIP_LAST - DC_IRQ_SOURCE_PFLIP_FIRST + 1];
 
+	/**
+	 * @vblank_params:
+	 *
+	 * Vertical blanking IRQ parameters, passed to registered handlers when
+	 * triggered.
+	 */
 	struct common_irq_params
 	vblank_params[DC_IRQ_SOURCE_VBLANK6 - DC_IRQ_SOURCE_VBLANK1 + 1];
 
-	/* this spin lock synchronizes access to 'irq_handler_list_table' */
 	spinlock_t irq_handler_list_table_lock;
 
 	struct backlight_device *backlight_dev;
@@ -110,9 +161,6 @@ struct amdgpu_display_manager {
 
 	struct mod_freesync *freesync_module;
 
-	/**
-	 * Caches device atomic state for suspend/resume
-	 */
 	struct drm_atomic_state *cached_state;
 
 	struct dm_comressor_info compressor;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
index be19e68..216e48c 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
@@ -164,7 +164,7 @@ int amdgpu_dm_set_regamma_lut(struct dm_crtc_state *crtc)
 	 */
 	stream->out_transfer_func->type = TF_TYPE_DISTRIBUTED_POINTS;
 	ret = mod_color_calculate_regamma_params(stream->out_transfer_func,
-						 gamma, true, adev->asic_type <= CHIP_RAVEN);
+						 gamma, true, adev->asic_type <= CHIP_RAVEN, NULL);
 	dc_gamma_release(&gamma);
 	if (!ret) {
 		stream->out_transfer_func->type = old_type;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
index 01fc571..f088ac5 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
@@ -75,6 +75,11 @@ int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name)
 		return -EINVAL;
 	}
 
+	if (!stream_state) {
+		DRM_ERROR("No stream state for CRTC%d\n", crtc->index);
+		return -EINVAL;
+	}
+
 	/* When enabling CRC, we should also disable dithering. */
 	if (source == AMDGPU_DM_PIPE_CRC_SOURCE_AUTO) {
 		if (dc_stream_configure_crc(stream_state->ctx->dc,
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
index a212178..cd10f77 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
@@ -32,16 +32,55 @@
 #include "amdgpu_dm.h"
 #include "amdgpu_dm_irq.h"
 
+/**
+ * DOC: overview
+ *
+ * DM provides another layer of IRQ management on top of what the base driver
+ * already provides. This is something that could be cleaned up, and is a
+ * future TODO item.
+ *
+ * The base driver provides IRQ source registration with DRM, handler
+ * registration into the base driver's IRQ table, and a handler callback
+ * amdgpu_irq_handler(), with which DRM calls on interrupts. This generic
+ * handler looks up the IRQ table, and calls the respective
+ * &amdgpu_irq_src_funcs.process hookups.
+ *
+ * What DM provides on top are two IRQ tables specifically for top-half and
+ * bottom-half IRQ handling, with the bottom-half implementing workqueues:
+ *
+ * - &amdgpu_display_manager.irq_handler_list_high_tab
+ * - &amdgpu_display_manager.irq_handler_list_low_tab
+ *
+ * They override the base driver's IRQ table, and the effect can be seen
+ * in the hooks that DM provides for &amdgpu_irq_src_funcs.process. They
+ * are all set to the DM generic handler amdgpu_dm_irq_handler(), which looks up
+ * DM's IRQ tables. However, in order for base driver to recognize this hook, DM
+ * still needs to register the IRQ with the base driver. See
+ * dce110_register_irq_handlers() and dcn10_register_irq_handlers().
+ *
+ * To expose DC's hardware interrupt toggle to the base driver, DM implements
+ * &amdgpu_irq_src_funcs.set hooks. Base driver calls it through
+ * amdgpu_irq_update() to enable or disable the interrupt.
+ */
+
 /******************************************************************************
  * Private declarations.
  *****************************************************************************/
 
+/**
+ * struct amdgpu_dm_irq_handler_data - Data for DM interrupt handlers.
+ *
+ * @list: Linked list entry referencing the next/previous handler
+ * @handler: Handler function
+ * @handler_arg: Argument passed to the handler when triggered
+ * @dm: DM which this handler belongs to
+ * @irq_source: DC interrupt source that this handler is registered for
+ */
 struct amdgpu_dm_irq_handler_data {
 	struct list_head list;
 	interrupt_handler handler;
 	void *handler_arg;
 
-	/* DM which this handler belongs to */
 	struct amdgpu_display_manager *dm;
 	/* DAL irq source which registered for this interrupt. */
 	enum dc_irq_source irq_source;
@@ -68,7 +107,7 @@ static void init_handler_common_data(struct amdgpu_dm_irq_handler_data *hcd,
 }
 
 /**
- * dm_irq_work_func - Handle an IRQ outside of the interrupt handler proper.
+ * dm_irq_work_func() - Handle an IRQ outside of the interrupt handler proper.
  *
  * @work: work struct
  */
@@ -99,8 +138,8 @@ static void dm_irq_work_func(struct work_struct *work)
 	 * (The most common use is HPD interrupt) */
 }
 
-/**
- * Remove a handler and return a pointer to hander list from which the
+/*
+ * Remove a handler and return a pointer to handler list from which the
  * handler was removed.
  */
 static struct list_head *remove_irq_handler(struct amdgpu_device *adev,
@@ -203,6 +242,24 @@ static bool validate_irq_unregistration_params(enum dc_irq_source irq_source,
  * Note: caller is responsible for input validation.
  *****************************************************************************/
 
+/**
+ * amdgpu_dm_irq_register_interrupt() - Register a handler within DM.
+ * @adev: The base driver device containing the DM device.
+ * @int_params: Interrupt parameters containing the source, and handler context
+ * @ih: Function pointer to the interrupt handler to register
+ * @handler_args: Arguments passed to the handler when the interrupt occurs
+ *
+ * Register an interrupt handler for the given IRQ source, under the given
+ * context. The context can either be high or low. High context handlers are
+ * executed directly within ISR context, while low context is executed within a
+ * workqueue, thereby allowing operations that sleep.
+ *
+ * Registered handlers are called in a FIFO manner, i.e. the most recently
+ * registered handler will be called first.
+ *
+ * Return: Handler data &struct amdgpu_dm_irq_handler_data containing the IRQ
+ *         source, handler function, and args
+ */
 void *amdgpu_dm_irq_register_interrupt(struct amdgpu_device *adev,
 				       struct dc_interrupt_params *int_params,
 				       void (*ih)(void *),
@@ -261,6 +318,15 @@ void *amdgpu_dm_irq_register_interrupt(struct amdgpu_device *adev,
 	return handler_data;
 }
 
+/**
+ * amdgpu_dm_irq_unregister_interrupt() - Remove a handler from the DM IRQ table
+ * @adev: The base driver device containing the DM device
+ * @irq_source: IRQ source to remove the given handler from
+ * @ih: Function pointer to the interrupt handler to unregister
+ *
+ * Go through both low and high context IRQ tables, and find the given handler
+ * for the given irq source. If found, remove it. Otherwise, do nothing.
+ */
 void amdgpu_dm_irq_unregister_interrupt(struct amdgpu_device *adev,
 					enum dc_irq_source irq_source,
 					void *ih)
@@ -295,6 +361,20 @@ void amdgpu_dm_irq_unregister_interrupt(struct amdgpu_device *adev,
 	}
 }
 
+/**
+ * amdgpu_dm_irq_init() - Initialize DM IRQ management
+ * @adev:  The base driver device containing the DM device
+ *
+ * Initialize DM's high and low context IRQ tables.
+ *
+ * The N by M table contains N IRQ sources, with M
+ * &struct amdgpu_dm_irq_handler_data hooked together in a linked list. The
+ * list_heads are initialized here. When an interrupt n is triggered, all m
+ * handlers are called in sequence, FIFO according to registration order.
+ *
+ * The low context table requires special steps to initialize, since handlers
+ * will be deferred to a workqueue. See &struct irq_list_head.
+ */
 int amdgpu_dm_irq_init(struct amdgpu_device *adev)
 {
 	int src;
@@ -317,7 +397,12 @@ int amdgpu_dm_irq_init(struct amdgpu_device *adev)
 	return 0;
 }
 
-/* DM IRQ and timer resource release */
+/**
+ * amdgpu_dm_irq_fini() - Tear down DM IRQ management
+ * @adev: The base driver device containing the DM device
+ *
+ * Flush all work within the low context IRQ table.
+ */
 void amdgpu_dm_irq_fini(struct amdgpu_device *adev)
 {
 	int src;
@@ -414,7 +499,7 @@ int amdgpu_dm_irq_resume_late(struct amdgpu_device *adev)
 	return 0;
 }
 
-/**
+/*
  * amdgpu_dm_irq_schedule_work - schedule all work items registered for the
  * "irq_source".
  */
@@ -439,8 +524,9 @@ static void amdgpu_dm_irq_schedule_work(struct amdgpu_device *adev,
 
 }
 
-/** amdgpu_dm_irq_immediate_work
- *  Callback high irq work immediately, don't send to work queue
+/*
+ * amdgpu_dm_irq_immediate_work
+ * Callback high irq work immediately, don't send to work queue
  */
 static void amdgpu_dm_irq_immediate_work(struct amdgpu_device *adev,
 					 enum dc_irq_source irq_source)
@@ -467,11 +553,14 @@ static void amdgpu_dm_irq_immediate_work(struct amdgpu_device *adev,
 	DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
 }
 
-/*
- * amdgpu_dm_irq_handler
+/**
+ * amdgpu_dm_irq_handler - Generic DM IRQ handler
+ * @adev: amdgpu base driver device containing the DM device
+ * @source: Unused
+ * @entry: Data about the triggered interrupt
  *
- * Generic IRQ handler, calls all registered high irq work immediately, and
- * schedules work for low irq
+ * Calls all registered high irq work immediately, and schedules work for low
+ * irq. The DM IRQ table is used to find the corresponding handlers.
  */
 static int amdgpu_dm_irq_handler(struct amdgpu_device *adev,
 				 struct amdgpu_irq_src *source,
@@ -613,7 +702,7 @@ void amdgpu_dm_set_irq_funcs(struct amdgpu_device *adev)
 	adev->hpd_irq.funcs = &dm_hpd_irq_funcs;
 }
 
-/*
+/**
  * amdgpu_dm_hpd_init - hpd setup callback.
  *
  * @adev: amdgpu_device pointer
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
index 12001a0..9d2d698 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
@@ -485,11 +485,11 @@ void pp_rv_set_display_requirement(struct pp_smu *pp,
 		return;
 
 	clock.clock_type = amd_pp_dcf_clock;
-	clock.clock_freq_in_khz = req->hard_min_dcefclk_khz;
+	clock.clock_freq_in_khz = req->hard_min_dcefclk_mhz * 1000;
 	pp_funcs->display_clock_voltage_request(pp_handle, &clock);
 
 	clock.clock_type = amd_pp_f_clock;
-	clock.clock_freq_in_khz = req->hard_min_fclk_khz;
+	clock.clock_freq_in_khz = req->hard_min_fclk_mhz * 1000;
 	pp_funcs->display_clock_voltage_request(pp_handle, &clock);
 }
 
@@ -518,13 +518,13 @@ void pp_rv_set_wm_ranges(struct pp_smu *pp,
 			wm_dce_clocks[i].wm_set_id =
 					ranges->reader_wm_sets[i].wm_inst;
 		wm_dce_clocks[i].wm_max_dcfclk_clk_in_khz =
-				ranges->reader_wm_sets[i].max_drain_clk_khz;
+				ranges->reader_wm_sets[i].max_drain_clk_mhz * 1000;
 		wm_dce_clocks[i].wm_min_dcfclk_clk_in_khz =
-				ranges->reader_wm_sets[i].min_drain_clk_khz;
+				ranges->reader_wm_sets[i].min_drain_clk_mhz * 1000;
 		wm_dce_clocks[i].wm_max_mem_clk_in_khz =
-				ranges->reader_wm_sets[i].max_fill_clk_khz;
+				ranges->reader_wm_sets[i].max_fill_clk_mhz * 1000;
 		wm_dce_clocks[i].wm_min_mem_clk_in_khz =
-				ranges->reader_wm_sets[i].min_fill_clk_khz;
+				ranges->reader_wm_sets[i].min_fill_clk_mhz * 1000;
 	}
 
 	for (i = 0; i < wm_with_clock_ranges.num_wm_mcif_sets; i++) {
@@ -534,13 +534,13 @@ void pp_rv_set_wm_ranges(struct pp_smu *pp,
 			wm_soc_clocks[i].wm_set_id =
 					ranges->writer_wm_sets[i].wm_inst;
 		wm_soc_clocks[i].wm_max_socclk_clk_in_khz =
-				ranges->writer_wm_sets[i].max_fill_clk_khz;
+				ranges->writer_wm_sets[i].max_fill_clk_mhz * 1000;
 		wm_soc_clocks[i].wm_min_socclk_clk_in_khz =
-				ranges->writer_wm_sets[i].min_fill_clk_khz;
+				ranges->writer_wm_sets[i].min_fill_clk_mhz * 1000;
 		wm_soc_clocks[i].wm_max_mem_clk_in_khz =
-				ranges->writer_wm_sets[i].max_drain_clk_khz;
+				ranges->writer_wm_sets[i].max_drain_clk_mhz * 1000;
 		wm_soc_clocks[i].wm_min_mem_clk_in_khz =
-				ranges->writer_wm_sets[i].min_drain_clk_khz;
+				ranges->writer_wm_sets[i].min_drain_clk_mhz * 1000;
 	}
 
 	pp_funcs->set_watermarks_for_clocks_ranges(pp_handle, &wm_with_clock_ranges);
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
index 0e1dc1b..c2ab026 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
@@ -2030,7 +2030,7 @@ static uint32_t get_src_obj_list(struct bios_parser *bp, ATOM_OBJECT *object,
 static struct device_id device_type_from_device_id(uint16_t device_id)
 {
 
-	struct device_id result_device_id;
+	struct device_id result_device_id = {0};
 
 	switch (device_id) {
 	case ATOM_DEVICE_LCD1_SUPPORT:
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
index ff764da..751bb61 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
@@ -1884,6 +1884,8 @@ static const struct dc_vbios_funcs vbios_funcs = {
 
 	.is_accelerated_mode = bios_parser_is_accelerated_mode,
 
+	.is_active_display = bios_is_active_display,
+
 	.set_scratch_critical_state = bios_parser_set_scratch_critical_state,
 
 
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.c
index d458947..fdda8aa 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.c
@@ -88,3 +88,96 @@ uint32_t bios_get_vga_enabled_displays(
 	return active_disp;
 }
 
+bool bios_is_active_display(
+		struct dc_bios *bios,
+		enum signal_type signal,
+		const struct connector_device_tag_info *device_tag)
+{
+	uint32_t active = 0;
+	uint32_t connected = 0;
+	uint32_t bios_scratch_0 = 0;
+	uint32_t bios_scratch_3 = 0;
+
+	switch (signal)	{
+	case SIGNAL_TYPE_DVI_SINGLE_LINK:
+	case SIGNAL_TYPE_DVI_DUAL_LINK:
+	case SIGNAL_TYPE_HDMI_TYPE_A:
+	case SIGNAL_TYPE_DISPLAY_PORT:
+	case SIGNAL_TYPE_DISPLAY_PORT_MST:
+		{
+			if (device_tag->dev_id.device_type == DEVICE_TYPE_DFP) {
+				switch (device_tag->dev_id.enum_id)	{
+				case 1:
+					{
+						active    = ATOM_S3_DFP1_ACTIVE;
+						connected = 0x0008;	//ATOM_DISPLAY_DFP1_CONNECT
+					}
+					break;
+
+				case 2:
+					{
+						active    = ATOM_S3_DFP2_ACTIVE;
+						connected = 0x0080; //ATOM_DISPLAY_DFP2_CONNECT
+					}
+					break;
+
+				case 3:
+					{
+						active    = ATOM_S3_DFP3_ACTIVE;
+						connected = 0x0200; //ATOM_DISPLAY_DFP3_CONNECT
+					}
+					break;
+
+				case 4:
+					{
+						active    = ATOM_S3_DFP4_ACTIVE;
+						connected = 0x0400;	//ATOM_DISPLAY_DFP4_CONNECT
+					}
+					break;
+
+				case 5:
+					{
+						active    = ATOM_S3_DFP5_ACTIVE;
+						connected = 0x0800; //ATOM_DISPLAY_DFP5_CONNECT
+					}
+					break;
+
+				case 6:
+					{
+						active    = ATOM_S3_DFP6_ACTIVE;
+						connected = 0x0040; //ATOM_DISPLAY_DFP6_CONNECT
+					}
+					break;
+
+				default:
+					break;
+				}
+				}
+			}
+			break;
+
+	case SIGNAL_TYPE_LVDS:
+	case SIGNAL_TYPE_EDP:
+		{
+			active    = ATOM_S3_LCD1_ACTIVE;
+			connected = 0x0002;	//ATOM_DISPLAY_LCD1_CONNECT
+		}
+		break;
+
+	default:
+		break;
+	}
+
+
+	if (bios->regs->BIOS_SCRATCH_0) /*follow up with other asic, todo*/
+		bios_scratch_0 = REG_READ(BIOS_SCRATCH_0);
+	if (bios->regs->BIOS_SCRATCH_3) /*follow up with other asic, todo*/
+		bios_scratch_3 = REG_READ(BIOS_SCRATCH_3);
+
+	bios_scratch_3 &= ATOM_S3_DEVICE_ACTIVE_MASK;
+	if ((active & bios_scratch_3) && (connected & bios_scratch_0))
+		return true;
+
+	return false;
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.h b/drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.h
index 75a29e6..f33cac2 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.h
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.h
@@ -35,6 +35,10 @@ bool bios_is_accelerated_mode(struct dc_bios *bios);
 void bios_set_scratch_acc_mode_change(struct dc_bios *bios);
 void bios_set_scratch_critical_state(struct dc_bios *bios, bool state);
 uint32_t bios_get_vga_enabled_displays(struct dc_bios *bios);
+bool bios_is_active_display(
+	struct dc_bios *bios,
+	enum signal_type signal,
+	const struct connector_device_tag_info *device_tag);
 
 #define GET_IMAGE(type, offset) ((type *) bios_get_image(&bp->base, offset, sizeof(type)))
 
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
index 3208188..43e4a2b 100644
--- a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
@@ -1423,27 +1423,27 @@ void dcn_bw_notify_pplib_of_wm_ranges(struct dc *dc)
 	ranges.num_reader_wm_sets = WM_SET_COUNT;
 	ranges.num_writer_wm_sets = WM_SET_COUNT;
 	ranges.reader_wm_sets[0].wm_inst = WM_A;
-	ranges.reader_wm_sets[0].min_drain_clk_khz = min_dcfclk_khz;
-	ranges.reader_wm_sets[0].max_drain_clk_khz = overdrive;
-	ranges.reader_wm_sets[0].min_fill_clk_khz = min_fclk_khz;
-	ranges.reader_wm_sets[0].max_fill_clk_khz = overdrive;
+	ranges.reader_wm_sets[0].min_drain_clk_mhz = min_dcfclk_khz / 1000;
+	ranges.reader_wm_sets[0].max_drain_clk_mhz = overdrive / 1000;
+	ranges.reader_wm_sets[0].min_fill_clk_mhz = min_fclk_khz / 1000;
+	ranges.reader_wm_sets[0].max_fill_clk_mhz = overdrive / 1000;
 	ranges.writer_wm_sets[0].wm_inst = WM_A;
-	ranges.writer_wm_sets[0].min_fill_clk_khz = socclk_khz;
-	ranges.writer_wm_sets[0].max_fill_clk_khz = overdrive;
-	ranges.writer_wm_sets[0].min_drain_clk_khz = min_fclk_khz;
-	ranges.writer_wm_sets[0].max_drain_clk_khz = overdrive;
+	ranges.writer_wm_sets[0].min_fill_clk_mhz = socclk_khz / 1000;
+	ranges.writer_wm_sets[0].max_fill_clk_mhz = overdrive / 1000;
+	ranges.writer_wm_sets[0].min_drain_clk_mhz = min_fclk_khz / 1000;
+	ranges.writer_wm_sets[0].max_drain_clk_mhz = overdrive / 1000;
 
 	if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE) {
 		ranges.reader_wm_sets[0].wm_inst = WM_A;
-		ranges.reader_wm_sets[0].min_drain_clk_khz = 300000;
-		ranges.reader_wm_sets[0].max_drain_clk_khz = 5000000;
-		ranges.reader_wm_sets[0].min_fill_clk_khz = 800000;
-		ranges.reader_wm_sets[0].max_fill_clk_khz = 5000000;
+		ranges.reader_wm_sets[0].min_drain_clk_mhz = 300;
+		ranges.reader_wm_sets[0].max_drain_clk_mhz = 5000;
+		ranges.reader_wm_sets[0].min_fill_clk_mhz = 800;
+		ranges.reader_wm_sets[0].max_fill_clk_mhz = 5000;
 		ranges.writer_wm_sets[0].wm_inst = WM_A;
-		ranges.writer_wm_sets[0].min_fill_clk_khz = 200000;
-		ranges.writer_wm_sets[0].max_fill_clk_khz = 5000000;
-		ranges.writer_wm_sets[0].min_drain_clk_khz = 800000;
-		ranges.writer_wm_sets[0].max_drain_clk_khz = 5000000;
+		ranges.writer_wm_sets[0].min_fill_clk_mhz = 200;
+		ranges.writer_wm_sets[0].max_fill_clk_mhz = 5000;
+		ranges.writer_wm_sets[0].min_drain_clk_mhz = 800;
+		ranges.writer_wm_sets[0].max_drain_clk_mhz = 5000;
 	}
 
 	ranges.reader_wm_sets[1] = ranges.writer_wm_sets[0];
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 7c491c9..3279e26 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -391,9 +391,11 @@ bool dc_stream_program_csc_matrix(struct dc *dc, struct dc_stream_state *stream)
 				== stream) {
 
 			pipes = &dc->current_state->res_ctx.pipe_ctx[i];
-			dc->hwss.program_csc_matrix(pipes,
-			stream->output_color_space,
-			stream->csc_color_matrix.matrix);
+			dc->hwss.program_output_csc(dc,
+					pipes,
+					stream->output_color_space,
+					stream->csc_color_matrix.matrix,
+					pipes->plane_res.hubp->opp_id);
 			ret = true;
 		}
 	}
@@ -941,7 +943,7 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
 	if (!dcb->funcs->is_accelerated_mode(dcb))
 		dc->hwss.enable_accelerated_mode(dc, context);
 
-	dc->hwss.set_bandwidth(dc, context, false);
+	dc->hwss.prepare_bandwidth(dc, context);
 
 	/* re-program planes for existing stream, in case we need to
 	 * free up plane resource for later use
@@ -957,8 +959,6 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
 	}
 
 	/* Program hardware */
-	dc->hwss.ready_shared_resources(dc, context);
-
 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
 		pipe = &context->res_ctx.pipe_ctx[i];
 		dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe);
@@ -1012,7 +1012,7 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
 	dc_enable_stereo(dc, context, dc_streams, context->stream_count);
 
 	/* pplib is notified if disp_num changed */
-	dc->hwss.set_bandwidth(dc, context, true);
+	dc->hwss.optimize_bandwidth(dc, context);
 
 	dc_release_state(dc->current_state);
 
@@ -1020,8 +1020,6 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
 
 	dc_retain_state(dc->current_state);
 
-	dc->hwss.optimize_shared_resources(dc);
-
 	return result;
 }
 
@@ -1063,7 +1061,7 @@ bool dc_post_update_surfaces_to_stream(struct dc *dc)
 
 	dc->optimized_required = false;
 
-	dc->hwss.set_bandwidth(dc, context, true);
+	dc->hwss.optimize_bandwidth(dc, context);
 	return true;
 }
 
@@ -1369,35 +1367,6 @@ static struct dc_stream_status *stream_get_status(
 
 static const enum surface_update_type update_surface_trace_level = UPDATE_TYPE_FULL;
 
-static void notify_display_count_to_smu(
-		struct dc *dc,
-		struct dc_state *context)
-{
-	int i, display_count;
-	struct pp_smu_funcs_rv *pp_smu = dc->res_pool->pp_smu;
-
-	/*
-	 * if function pointer not set up, this message is
-	 * sent as part of pplib_apply_display_requirements.
-	 * So just return.
-	 */
-	if (!pp_smu || !pp_smu->set_display_count)
-		return;
-
-	display_count = 0;
-	for (i = 0; i < context->stream_count; i++) {
-		const struct dc_stream_state *stream = context->streams[i];
-
-		/* only notify active stream */
-		if (stream->dpms_off)
-			continue;
-
-		display_count++;
-	}
-
-	pp_smu->set_display_count(&pp_smu->pp_smu, display_count);
-}
-
 static void commit_planes_do_stream_update(struct dc *dc,
 		struct dc_stream_state *stream,
 		struct dc_stream_update *stream_update,
@@ -1422,7 +1391,6 @@ static void commit_planes_do_stream_update(struct dc *dc,
 					stream_update->adjust->v_total_max);
 
 			if (stream_update->periodic_fn_vsync_delta &&
-					pipe_ctx->stream_res.tg &&
 					pipe_ctx->stream_res.tg->funcs->program_vline_interrupt)
 				pipe_ctx->stream_res.tg->funcs->program_vline_interrupt(
 					pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing,
@@ -1448,19 +1416,13 @@ static void commit_planes_do_stream_update(struct dc *dc,
 			if (stream_update->dpms_off) {
 				if (*stream_update->dpms_off) {
 					core_link_disable_stream(pipe_ctx, KEEP_ACQUIRED_RESOURCE);
-					dc->hwss.pplib_apply_display_requirements(
-						dc, dc->current_state);
-					notify_display_count_to_smu(dc, dc->current_state);
+					dc->hwss.optimize_bandwidth(dc, dc->current_state);
 				} else {
-					dc->hwss.pplib_apply_display_requirements(
-						dc, dc->current_state);
-					notify_display_count_to_smu(dc, dc->current_state);
+					dc->hwss.prepare_bandwidth(dc, dc->current_state);
 					core_link_enable_stream(dc->current_state, pipe_ctx);
 				}
 			}
 
-
-
 			if (stream_update->abm_level && pipe_ctx->stream_res.abm) {
 				if (pipe_ctx->stream_res.tg->funcs->is_blanked) {
 					// if otg funcs defined check if blanked before programming
@@ -1487,7 +1449,7 @@ static void commit_planes_for_stream(struct dc *dc,
 	struct pipe_ctx *top_pipe_to_program = NULL;
 
 	if (update_type == UPDATE_TYPE_FULL) {
-		dc->hwss.set_bandwidth(dc, context, false);
+		dc->hwss.prepare_bandwidth(dc, context);
 		context_clock_trace(dc, context);
 	}
 
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_debug.c b/drivers/gpu/drm/amd/display/dc/core/dc_debug.c
index e1ebdf7..73d0495 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_debug.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_debug.c
@@ -311,7 +311,7 @@ void context_timing_trace(
 {
 	int i;
 	struct dc  *core_dc = dc;
-	int h_pos[MAX_PIPES], v_pos[MAX_PIPES];
+	int h_pos[MAX_PIPES] = {0}, v_pos[MAX_PIPES] = {0};
 	struct crtc_position position;
 	unsigned int underlay_idx = core_dc->res_pool->underlay_pipe_index;
 	DC_LOGGER_INIT(dc->ctx->logger);
@@ -322,8 +322,7 @@ void context_timing_trace(
 		/* get_position() returns CRTC vertical/horizontal counter
 		 * hence not applicable for underlay pipe
 		 */
-		if (pipe_ctx->stream == NULL
-				 || pipe_ctx->pipe_idx == underlay_idx)
+		if (pipe_ctx->stream == NULL || pipe_ctx->pipe_idx == underlay_idx)
 			continue;
 
 		pipe_ctx->stream_res.tg->funcs->get_position(pipe_ctx->stream_res.tg, &position);
@@ -333,7 +332,7 @@ void context_timing_trace(
 	for (i = 0; i < core_dc->res_pool->pipe_count; i++) {
 		struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i];
 
-		if (pipe_ctx->stream == NULL)
+		if (pipe_ctx->stream == NULL || pipe_ctx->pipe_idx == underlay_idx)
 			continue;
 
 		TIMING_TRACE("OTG_%d   H_tot:%d  V_tot:%d   H_pos:%d  V_pos:%d\n",
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index 5da2186..7ee9c03 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -1357,28 +1357,13 @@ static enum dc_status enable_link_dp(
 	struct dc_link *link = stream->sink->link;
 	struct dc_link_settings link_settings = {0};
 	enum dp_panel_mode panel_mode;
-	enum dc_link_rate max_link_rate = LINK_RATE_HIGH2;
 
 	/* get link settings for video mode timing */
 	decide_link_settings(stream, &link_settings);
 
-	/* raise clock state for HBR3 if required. Confirmed with HW DCE/DPCS
-	 * logic for HBR3 still needs Nominal (0.8V) on VDDC rail
-	 */
-	if (link->link_enc->features.flags.bits.IS_HBR3_CAPABLE)
-		max_link_rate = LINK_RATE_HIGH3;
-
-	if (link_settings.link_rate == max_link_rate) {
-		struct dc_clocks clocks = state->bw.dcn.clk;
-
-		/* dce/dcn compat, do not update dispclk */
-		clocks.dispclk_khz = 0;
-		/* 27mhz = 27000000hz= 27000khz */
-		clocks.phyclk_khz = link_settings.link_rate * 27000;
-
-		state->dis_clk->funcs->update_clocks(
-				state->dis_clk, &clocks, false);
-	}
+	pipe_ctx->stream_res.pix_clk_params.requested_sym_clk =
+			link_settings.link_rate * LINK_RATE_REF_FREQ_IN_KHZ;
+	state->dccg->funcs->update_clocks(state->dccg, state, false);
 
 	dp_enable_link_phy(
 		link,
@@ -2156,14 +2141,16 @@ int dc_link_get_backlight_level(const struct dc_link *link)
 {
 	struct abm *abm = link->ctx->dc->res_pool->abm;
 
-	if (abm == NULL || abm->funcs->get_current_backlight_8_bit == NULL)
+	if (abm == NULL || abm->funcs->get_current_backlight == NULL)
 		return DC_ERROR_UNEXPECTED;
 
-	return (int) abm->funcs->get_current_backlight_8_bit(abm);
+	return (int) abm->funcs->get_current_backlight(abm);
 }
 
-bool dc_link_set_backlight_level(const struct dc_link *link, uint32_t level,
-		uint32_t frame_ramp, const struct dc_stream_state *stream)
+bool dc_link_set_backlight_level(const struct dc_link *link,
+		uint32_t backlight_pwm_u16_16,
+		uint32_t frame_ramp,
+		const struct dc_stream_state *stream)
 {
 	struct dc  *core_dc = link->ctx->dc;
 	struct abm *abm = core_dc->res_pool->abm;
@@ -2175,19 +2162,17 @@ bool dc_link_set_backlight_level(const struct dc_link *link, uint32_t level,
 
 	if ((dmcu == NULL) ||
 		(abm == NULL) ||
-		(abm->funcs->set_backlight_level == NULL))
+		(abm->funcs->set_backlight_level_pwm == NULL))
 		return false;
 
-	if (stream) {
-		if (stream->bl_pwm_level == EDP_BACKLIGHT_RAMP_DISABLE_LEVEL)
-			frame_ramp = 0;
-
-		((struct dc_stream_state *)stream)->bl_pwm_level = level;
-	}
+	if (stream)
+		((struct dc_stream_state *)stream)->bl_pwm_level =
+				backlight_pwm_u16_16;
 
 	use_smooth_brightness = dmcu->funcs->is_dmcu_initialized(dmcu);
 
-	DC_LOG_BACKLIGHT("New Backlight level: %d (0x%X)\n", level, level);
+	DC_LOG_BACKLIGHT("New Backlight level: %d (0x%X)\n",
+			backlight_pwm_u16_16, backlight_pwm_u16_16);
 
 	if (dc_is_embedded_signal(link->connector_signal)) {
 		if (stream != NULL) {
@@ -2204,9 +2189,9 @@ bool dc_link_set_backlight_level(const struct dc_link *link, uint32_t level,
 						1;
 			}
 		}
-		abm->funcs->set_backlight_level(
+		abm->funcs->set_backlight_level_pwm(
 				abm,
-				level,
+				backlight_pwm_u16_16,
 				frame_ramp,
 				controller_id,
 				use_smooth_brightness);
@@ -2220,7 +2205,7 @@ bool dc_link_set_abm_disable(const struct dc_link *link)
 	struct dc  *core_dc = link->ctx->dc;
 	struct abm *abm = core_dc->res_pool->abm;
 
-	if ((abm == NULL) || (abm->funcs->set_backlight_level == NULL))
+	if ((abm == NULL) || (abm->funcs->set_backlight_level_pwm == NULL))
 		return false;
 
 	abm->funcs->set_abm_immediate_disable(abm);
@@ -2609,6 +2594,10 @@ void core_link_enable_stream(
 		core_dc->hwss.unblank_stream(pipe_ctx,
 			&pipe_ctx->stream->sink->link->cur_link_settings);
 
+		dc_link_set_backlight_level(pipe_ctx->stream->sink->link,
+				pipe_ctx->stream->bl_pwm_level,
+				0,
+				pipe_ctx->stream);
 	}
 
 }
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index b6fe29b..fc65b00 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -499,8 +499,13 @@ static void calculate_viewport(struct pipe_ctx *pipe_ctx)
 			pipe_ctx->top_pipe->plane_state == pipe_ctx->plane_state;
 	bool flip_vert_scan_dir = false, flip_horz_scan_dir = false;
 
+
 	/*
-	 * Need to calculate the scan direction for viewport to properly determine offset
+	 * We need take horizontal mirror into account. On an unrotated surface this means
+	 * that the viewport offset is actually the offset from the other side of source
+	 * image so we have to subtract the right edge of the viewport from the right edge of
+	 * the source window. Similar to mirror we need to take into account how offset is
+	 * affected for 270/180 rotations
 	 */
 	if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_180) {
 		flip_vert_scan_dir = true;
@@ -510,6 +515,9 @@ static void calculate_viewport(struct pipe_ctx *pipe_ctx)
 	else if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_270)
 		flip_horz_scan_dir = true;
 
+	if (pipe_ctx->plane_state->horizontal_mirror)
+		flip_horz_scan_dir = !flip_horz_scan_dir;
+
 	if (stream->view_format == VIEW_3D_FORMAT_SIDE_BY_SIDE ||
 		stream->view_format == VIEW_3D_FORMAT_TOP_AND_BOTTOM) {
 		pri_split = false;
@@ -540,45 +548,27 @@ static void calculate_viewport(struct pipe_ctx *pipe_ctx)
 			plane_state->clip_rect.y + plane_state->clip_rect.height - clip.y ;
 
 	/* offset = surf_src.ofs + (clip.ofs - surface->dst_rect.ofs) * scl_ratio
+	 * note: surf_src.ofs should be added after rotation/mirror offset direction
+	 *       adjustment since it is already in viewport space
 	 * num_pixels = clip.num_pix * scl_ratio
 	 */
-	data->viewport.x = surf_src.x + (clip.x - plane_state->dst_rect.x) *
+	data->viewport.x = (clip.x - plane_state->dst_rect.x) *
 			surf_src.width / plane_state->dst_rect.width;
 	data->viewport.width = clip.width *
 			surf_src.width / plane_state->dst_rect.width;
 
-	data->viewport.y = surf_src.y + (clip.y - plane_state->dst_rect.y) *
+	data->viewport.y = (clip.y - plane_state->dst_rect.y) *
 			surf_src.height / plane_state->dst_rect.height;
 	data->viewport.height = clip.height *
 			surf_src.height / plane_state->dst_rect.height;
 
-	/* To transfer the x, y to correct coordinate on mirror image (camera).
-	 * deg  0 : transfer x,
-	 * deg 90 : don't need to transfer,
-	 * deg180 : transfer y,
-	 * deg270 : transfer x and y.
-	 * To transfer the x, y to correct coordinate on non-mirror image (video).
-	 * deg  0 : don't need to transfer,
-	 * deg 90 : transfer y,
-	 * deg180 : transfer x and y,
-	 * deg270 : transfer x.
-	 */
-	if (pipe_ctx->plane_state->horizontal_mirror) {
-		if (flip_horz_scan_dir && !flip_vert_scan_dir) {
-			data->viewport.y = surf_src.height - data->viewport.y - data->viewport.height;
-			data->viewport.x = surf_src.width - data->viewport.x - data->viewport.width;
-		} else if (flip_horz_scan_dir && flip_vert_scan_dir)
-			data->viewport.y = surf_src.height - data->viewport.y - data->viewport.height;
-		else {
-			if (!flip_horz_scan_dir && !flip_vert_scan_dir)
-				data->viewport.x = surf_src.width - data->viewport.x - data->viewport.width;
-		}
-	} else {
-		if (flip_horz_scan_dir)
-			data->viewport.x = surf_src.width - data->viewport.x - data->viewport.width;
-		if (flip_vert_scan_dir)
-			data->viewport.y = surf_src.height - data->viewport.y - data->viewport.height;
-	}
+	if (flip_vert_scan_dir)
+		data->viewport.y = surf_src.height - data->viewport.y - data->viewport.height;
+	if (flip_horz_scan_dir)
+		data->viewport.x = surf_src.width - data->viewport.x - data->viewport.width;
+
+	data->viewport.x += surf_src.x;
+	data->viewport.y += surf_src.y;
 
 	/* Round down, compensate in init */
 	data->viewport_c.x = data->viewport.x / vpc_div;
@@ -773,22 +763,15 @@ static void calculate_inits_and_adj_vp(struct pipe_ctx *pipe_ctx, struct rect *r
 	else if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_270)
 		flip_horz_scan_dir = true;
 
+	if (pipe_ctx->plane_state->horizontal_mirror)
+			flip_horz_scan_dir = !flip_horz_scan_dir;
+
 	if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_90 ||
 			pipe_ctx->plane_state->rotation == ROTATION_ANGLE_270) {
 		rect_swap_helper(&src);
 		rect_swap_helper(&data->viewport_c);
 		rect_swap_helper(&data->viewport);
-
-		if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_270 &&
-			pipe_ctx->plane_state->horizontal_mirror) {
-			flip_vert_scan_dir = true;
-		}
-		if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_90 &&
-			pipe_ctx->plane_state->horizontal_mirror) {
-			flip_vert_scan_dir = false;
-		}
-	} else if (pipe_ctx->plane_state->horizontal_mirror)
-			flip_horz_scan_dir = !flip_horz_scan_dir;
+	}
 
 	/*
 	 * Init calculated according to formula:
@@ -1115,9 +1098,6 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
 	pipe_ctx->plane_res.scl_data.format = convert_pixel_format_to_dalsurface(
 			pipe_ctx->plane_state->format);
 
-	if (pipe_ctx->stream->timing.flags.INTERLACE)
-		pipe_ctx->stream->dst.height *= 2;
-
 	calculate_scaling_ratios(pipe_ctx);
 
 	calculate_viewport(pipe_ctx);
@@ -1138,9 +1118,6 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
 
 	pipe_ctx->plane_res.scl_data.h_active = timing->h_addressable + timing->h_border_left + timing->h_border_right;
 	pipe_ctx->plane_res.scl_data.v_active = timing->v_addressable + timing->v_border_top + timing->v_border_bottom;
-	if (pipe_ctx->stream->timing.flags.INTERLACE)
-		pipe_ctx->plane_res.scl_data.v_active *= 2;
-
 
 	/* Taps calculations */
 	if (pipe_ctx->plane_res.xfm != NULL)
@@ -1185,9 +1162,6 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
 				plane_state->dst_rect.x,
 				plane_state->dst_rect.y);
 
-	if (pipe_ctx->stream->timing.flags.INTERLACE)
-		pipe_ctx->stream->dst.height /= 2;
-
 	return res;
 }
 
@@ -2071,7 +2045,7 @@ void dc_resource_state_construct(
 		const struct dc *dc,
 		struct dc_state *dst_ctx)
 {
-	dst_ctx->dis_clk = dc->res_pool->dccg;
+	dst_ctx->dccg = dc->res_pool->clk_mgr;
 }
 
 enum dc_status dc_validate_global_state(
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
index 2ac848a1..e113439 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
@@ -106,6 +106,7 @@ static void construct(struct dc_stream_state *stream,
 
 	stream->out_transfer_func = dc_create_transfer_func();
 	stream->out_transfer_func->type = TF_TYPE_BYPASS;
+	stream->out_transfer_func->ctx = stream->ctx;
 }
 
 static void destruct(struct dc_stream_state *stream)
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
index 8fb3aef..c60c9b4 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
@@ -44,6 +44,7 @@ static void construct(struct dc_context *ctx, struct dc_plane_state *plane_state
 
 	plane_state->in_transfer_func = dc_create_transfer_func();
 	plane_state->in_transfer_func->type = TF_TYPE_BYPASS;
+	plane_state->in_transfer_func->ctx = ctx;
 }
 
 static void destruct(struct dc_plane_state *plane_state)
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index b57fa61..d16a20c 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -38,7 +38,7 @@
 #include "inc/compressor.h"
 #include "dml/display_mode_lib.h"
 
-#define DC_VER "3.1.68"
+#define DC_VER "3.2.04"
 
 #define MAX_SURFACES 3
 #define MAX_STREAMS 6
@@ -250,8 +250,6 @@ struct dc_debug_options {
 	bool disable_dmcu;
 	bool disable_psr;
 	bool force_abm_enable;
-	bool disable_hbup_pg;
-	bool disable_dpp_pg;
 	bool disable_stereo_support;
 	bool vsr_support;
 	bool performance_trace;
@@ -305,11 +303,6 @@ struct dc {
 	struct hw_sequencer_funcs hwss;
 	struct dce_hwseq *hwseq;
 
-	/* temp store of dm_pp_display_configuration
-	 * to compare to see if display config changed
-	 */
-	struct dm_pp_display_configuration prev_display_config;
-
 	bool optimized_required;
 
 	/* FBC compressor */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_bios_types.h b/drivers/gpu/drm/amd/display/dc/dc_bios_types.h
index 8130b95..a8b3ced 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_bios_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_bios_types.h
@@ -86,6 +86,10 @@ struct dc_vbios_funcs {
 
 	bool (*is_accelerated_mode)(
 		struct dc_bios *bios);
+	bool (*is_active_display)(
+		struct dc_bios *bios,
+		enum signal_type signal,
+		const struct connector_device_tag_info *device_tag);
 	void (*set_scratch_critical_state)(
 		struct dc_bios *bios,
 		bool state);
@@ -141,6 +145,7 @@ struct dc_vbios_funcs {
 };
 
 struct bios_registers {
+	uint32_t BIOS_SCRATCH_0;
 	uint32_t BIOS_SCRATCH_3;
 	uint32_t BIOS_SCRATCH_6;
 };
diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h
index 3bfdccc..8738f27 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_link.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_link.h
@@ -138,9 +138,14 @@ static inline struct dc_link *dc_get_link_at_index(struct dc *dc, uint32_t link_
 	return dc->links[link_index];
 }
 
-/* Set backlight level of an embedded panel (eDP, LVDS). */
-bool dc_link_set_backlight_level(const struct dc_link *dc_link, uint32_t level,
-		uint32_t frame_ramp, const struct dc_stream_state *stream);
+/* Set backlight level of an embedded panel (eDP, LVDS).
+ * backlight_pwm_u16_16 is unsigned 32 bit with 16 bit integer
+ * and 16 bit fractional, where 1.0 is max backlight value.
+ */
+bool dc_link_set_backlight_level(const struct dc_link *dc_link,
+		uint32_t backlight_pwm_u16_16,
+		uint32_t frame_ramp,
+		const struct dc_stream_state *stream);
 
 int dc_link_get_backlight_level(const struct dc_link *dc_link);
 
diff --git a/drivers/gpu/drm/amd/display/dc/dce/Makefile b/drivers/gpu/drm/amd/display/dc/dce/Makefile
index 8f7f0e8..6d7b64a 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dce/Makefile
@@ -28,7 +28,7 @@
 
 DCE = dce_audio.o dce_stream_encoder.o dce_link_encoder.o dce_hwseq.o \
 dce_mem_input.o dce_clock_source.o dce_scl_filters.o dce_transform.o \
-dce_clocks.o dce_opp.o dce_dmcu.o dce_abm.o dce_ipp.o dce_aux.o \
+dce_clk_mgr.o dce_opp.o dce_dmcu.o dce_abm.o dce_ipp.o dce_aux.o \
 dce_i2c.o dce_i2c_hw.o dce_i2c_sw.o
 
 AMD_DAL_DCE = $(addprefix $(AMDDALPATH)/dc/dce/,$(DCE))
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
index 29294db..2a342ea 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
@@ -54,7 +54,7 @@
 #define MCP_DISABLE_ABM_IMMEDIATELY 255
 
 
-static unsigned int get_current_backlight_16_bit(struct dce_abm *abm_dce)
+static unsigned int calculate_16_bit_backlight_from_pwm(struct dce_abm *abm_dce)
 {
 	uint64_t current_backlight;
 	uint32_t round_result;
@@ -103,45 +103,21 @@ static unsigned int get_current_backlight_16_bit(struct dce_abm *abm_dce)
 	return (uint32_t)(current_backlight);
 }
 
-static void driver_set_backlight_level(struct dce_abm *abm_dce, uint32_t level)
+static void driver_set_backlight_level(struct dce_abm *abm_dce,
+		uint32_t backlight_pwm_u16_16)
 {
-	uint32_t backlight_24bit;
-	uint32_t backlight_17bit;
 	uint32_t backlight_16bit;
 	uint32_t masked_pwm_period;
-	uint8_t rounding_bit;
 	uint8_t bit_count;
 	uint64_t active_duty_cycle;
 	uint32_t pwm_period_bitcnt;
 
 	/*
-	 * 1. Convert 8-bit value to 17 bit U1.16 format
-	 * (1 integer, 16 fractional bits)
-	 */
-
-	/* 1.1 multiply 8 bit value by 0x10101 to get a 24 bit value,
-	 * effectively multiplying value by 256/255
-	 * eg. for a level of 0xEF, backlight_24bit = 0xEF * 0x10101 = 0xEFEFEF
-	 */
-	backlight_24bit = level * 0x10101;
-
-	/* 1.2 The upper 16 bits of the 24 bit value is the fraction, lower 8
-	 * used for rounding, take most significant bit of fraction for
-	 * rounding, e.g. for 0xEFEFEF, rounding bit is 1
-	 */
-	rounding_bit = (backlight_24bit >> 7) & 1;
-
-	/* 1.3 Add the upper 16 bits of the 24 bit value with the rounding bit
-	 * resulting in a 17 bit value e.g. 0xEFF0 = (0xEFEFEF >> 8) + 1
-	 */
-	backlight_17bit = (backlight_24bit >> 8) + rounding_bit;
-
-	/*
-	 * 2. Find  16 bit backlight active duty cycle, where 0 <= backlight
+	 * 1. Find  16 bit backlight active duty cycle, where 0 <= backlight
 	 * active duty cycle <= backlight period
 	 */
 
-	/* 2.1 Apply bitmask for backlight period value based on value of BITCNT
+	/* 1.1 Apply bitmask for backlight period value based on value of BITCNT
 	 */
 	REG_GET_2(BL_PWM_PERIOD_CNTL,
 			BL_PWM_PERIOD_BITCNT, &pwm_period_bitcnt,
@@ -155,13 +131,13 @@ static void driver_set_backlight_level(struct dce_abm *abm_dce, uint32_t level)
 	/* e.g. maskedPwmPeriod = 0x24 when bitCount is 6 */
 	masked_pwm_period = masked_pwm_period & ((1 << bit_count) - 1);
 
-	/* 2.2 Calculate integer active duty cycle required upper 16 bits
+	/* 1.2 Calculate integer active duty cycle required upper 16 bits
 	 * contain integer component, lower 16 bits contain fractional component
 	 * of active duty cycle e.g. 0x21BDC0 = 0xEFF0 * 0x24
 	 */
-	active_duty_cycle = backlight_17bit * masked_pwm_period;
+	active_duty_cycle = backlight_pwm_u16_16 * masked_pwm_period;
 
-	/* 2.3 Calculate 16 bit active duty cycle from integer and fractional
+	/* 1.3 Calculate 16 bit active duty cycle from integer and fractional
 	 * components shift by bitCount then mask 16 bits and add rounding bit
 	 * from MSB of fraction e.g. 0x86F7 = ((0x21BDC0 >> 6) & 0xFFF) + 0
 	 */
@@ -170,23 +146,23 @@ static void driver_set_backlight_level(struct dce_abm *abm_dce, uint32_t level)
 	backlight_16bit += (active_duty_cycle >> (bit_count - 1)) & 0x1;
 
 	/*
-	 * 3. Program register with updated value
+	 * 2. Program register with updated value
 	 */
 
-	/* 3.1 Lock group 2 backlight registers */
+	/* 2.1 Lock group 2 backlight registers */
 
 	REG_UPDATE_2(BL_PWM_GRP1_REG_LOCK,
 			BL_PWM_GRP1_IGNORE_MASTER_LOCK_EN, 1,
 			BL_PWM_GRP1_REG_LOCK, 1);
 
-	// 3.2 Write new active duty cycle
+	// 2.2 Write new active duty cycle
 	REG_UPDATE(BL_PWM_CNTL, BL_ACTIVE_INT_FRAC_CNT, backlight_16bit);
 
-	/* 3.3 Unlock group 2 backlight registers */
+	/* 2.3 Unlock group 2 backlight registers */
 	REG_UPDATE(BL_PWM_GRP1_REG_LOCK,
 			BL_PWM_GRP1_REG_LOCK, 0);
 
-	/* 5.4.4 Wait for pending bit to be cleared */
+	/* 3 Wait for pending bit to be cleared */
 	REG_WAIT(BL_PWM_GRP1_REG_LOCK,
 			BL_PWM_GRP1_REG_UPDATE_PENDING, 0,
 			1, 10000);
@@ -194,16 +170,21 @@ static void driver_set_backlight_level(struct dce_abm *abm_dce, uint32_t level)
 
 static void dmcu_set_backlight_level(
 	struct dce_abm *abm_dce,
-	uint32_t level,
+	uint32_t backlight_pwm_u16_16,
 	uint32_t frame_ramp,
 	uint32_t controller_id)
 {
-	unsigned int backlight_16_bit = (level * 0x10101) >> 8;
-	unsigned int backlight_17_bit = backlight_16_bit +
-				(((backlight_16_bit & 0x80) >> 7) & 1);
+	unsigned int backlight_8_bit = 0;
 	uint32_t rampingBoundary = 0xFFFF;
 	uint32_t s2;
 
+	if (backlight_pwm_u16_16 & 0x10000)
+		// Check for max backlight condition
+		backlight_8_bit = 0xFF;
+	else
+		// Take MSB of fractional part since backlight is not max
+		backlight_8_bit = (backlight_pwm_u16_16 >> 8) & 0xFF;
+
 	/* set ramping boundary */
 	REG_WRITE(MASTER_COMM_DATA_REG1, rampingBoundary);
 
@@ -220,7 +201,7 @@ static void dmcu_set_backlight_level(
 			0, 1, 80000);
 
 	/* setDMCUParam_BL */
-	REG_UPDATE(BL1_PWM_USER_LEVEL, BL1_PWM_USER_LEVEL, backlight_17_bit);
+	REG_UPDATE(BL1_PWM_USER_LEVEL, BL1_PWM_USER_LEVEL, backlight_pwm_u16_16);
 
 	/* write ramp */
 	if (controller_id == 0)
@@ -237,9 +218,9 @@ static void dmcu_set_backlight_level(
 	s2 = REG_READ(BIOS_SCRATCH_2);
 
 	s2 &= ~ATOM_S2_CURRENT_BL_LEVEL_MASK;
-	level &= (ATOM_S2_CURRENT_BL_LEVEL_MASK >>
+	backlight_8_bit &= (ATOM_S2_CURRENT_BL_LEVEL_MASK >>
 				ATOM_S2_CURRENT_BL_LEVEL_SHIFT);
-	s2 |= (level << ATOM_S2_CURRENT_BL_LEVEL_SHIFT);
+	s2 |= (backlight_8_bit << ATOM_S2_CURRENT_BL_LEVEL_SHIFT);
 
 	REG_WRITE(BIOS_SCRATCH_2, s2);
 }
@@ -247,7 +228,7 @@ static void dmcu_set_backlight_level(
 static void dce_abm_init(struct abm *abm)
 {
 	struct dce_abm *abm_dce = TO_DCE_ABM(abm);
-	unsigned int backlight = get_current_backlight_16_bit(abm_dce);
+	unsigned int backlight = calculate_16_bit_backlight_from_pwm(abm_dce);
 
 	REG_WRITE(DC_ABM1_HG_SAMPLE_RATE, 0x103);
 	REG_WRITE(DC_ABM1_HG_SAMPLE_RATE, 0x101);
@@ -284,12 +265,26 @@ static void dce_abm_init(struct abm *abm)
 			ABM1_BL_REG_READ_MISSED_FRAME_CLEAR, 1);
 }
 
-static unsigned int dce_abm_get_current_backlight_8_bit(struct abm *abm)
+static unsigned int dce_abm_get_current_backlight(struct abm *abm)
 {
 	struct dce_abm *abm_dce = TO_DCE_ABM(abm);
 	unsigned int backlight = REG_READ(BL1_PWM_CURRENT_ABM_LEVEL);
 
-	return (backlight >> 8);
+	/* return backlight in hardware format which is unsigned 17 bits, with
+	 * 1 bit integer and 16 bit fractional
+	 */
+	return backlight;
+}
+
+static unsigned int dce_abm_get_target_backlight(struct abm *abm)
+{
+	struct dce_abm *abm_dce = TO_DCE_ABM(abm);
+	unsigned int backlight = REG_READ(BL1_PWM_TARGET_ABM_LEVEL);
+
+	/* return backlight in hardware format which is unsigned 17 bits, with
+	 * 1 bit integer and 16 bit fractional
+	 */
+	return backlight;
 }
 
 static bool dce_abm_set_level(struct abm *abm, uint32_t level)
@@ -396,9 +391,9 @@ static bool dce_abm_init_backlight(struct abm *abm)
 	return true;
 }
 
-static bool dce_abm_set_backlight_level(
+static bool dce_abm_set_backlight_level_pwm(
 		struct abm *abm,
-		unsigned int backlight_level,
+		unsigned int backlight_pwm_u16_16,
 		unsigned int frame_ramp,
 		unsigned int controller_id,
 		bool use_smooth_brightness)
@@ -406,16 +401,16 @@ static bool dce_abm_set_backlight_level(
 	struct dce_abm *abm_dce = TO_DCE_ABM(abm);
 
 	DC_LOG_BACKLIGHT("New Backlight level: %d (0x%X)\n",
-			backlight_level, backlight_level);
+			backlight_pwm_u16_16, backlight_pwm_u16_16);
 
 	/* If DMCU is in reset state, DMCU is uninitialized */
 	if (use_smooth_brightness)
 		dmcu_set_backlight_level(abm_dce,
-				backlight_level,
+				backlight_pwm_u16_16,
 				frame_ramp,
 				controller_id);
 	else
-		driver_set_backlight_level(abm_dce, backlight_level);
+		driver_set_backlight_level(abm_dce, backlight_pwm_u16_16);
 
 	return true;
 }
@@ -424,8 +419,9 @@ static const struct abm_funcs dce_funcs = {
 	.abm_init = dce_abm_init,
 	.set_abm_level = dce_abm_set_level,
 	.init_backlight = dce_abm_init_backlight,
-	.set_backlight_level = dce_abm_set_backlight_level,
-	.get_current_backlight_8_bit = dce_abm_get_current_backlight_8_bit,
+	.set_backlight_level_pwm = dce_abm_set_backlight_level_pwm,
+	.get_current_backlight = dce_abm_get_current_backlight,
+	.get_target_backlight = dce_abm_get_target_backlight,
 	.set_abm_immediate_disable = dce_abm_immediate_disable
 };
 
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c
new file mode 100644
index 0000000..9a28a04
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c
@@ -0,0 +1,879 @@
+/*
+ * Copyright 2012-16 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dce_clk_mgr.h"
+
+#include "reg_helper.h"
+#include "dmcu.h"
+#include "core_types.h"
+#include "dal_asic_id.h"
+
+#define TO_DCE_CLK_MGR(clocks)\
+	container_of(clocks, struct dce_clk_mgr, base)
+
+#define REG(reg) \
+	(clk_mgr_dce->regs->reg)
+
+#undef FN
+#define FN(reg_name, field_name) \
+	clk_mgr_dce->clk_mgr_shift->field_name, clk_mgr_dce->clk_mgr_mask->field_name
+
+#define CTX \
+	clk_mgr_dce->base.ctx
+#define DC_LOGGER \
+	clk_mgr->ctx->logger
+
+/* Max clock values for each state indexed by "enum clocks_state": */
+static const struct state_dependent_clocks dce80_max_clks_by_state[] = {
+/* ClocksStateInvalid - should not be used */
+{ .display_clk_khz = 0, .pixel_clk_khz = 0 },
+/* ClocksStateUltraLow - not expected to be used for DCE 8.0 */
+{ .display_clk_khz = 0, .pixel_clk_khz = 0 },
+/* ClocksStateLow */
+{ .display_clk_khz = 352000, .pixel_clk_khz = 330000},
+/* ClocksStateNominal */
+{ .display_clk_khz = 600000, .pixel_clk_khz = 400000 },
+/* ClocksStatePerformance */
+{ .display_clk_khz = 600000, .pixel_clk_khz = 400000 } };
+
+static const struct state_dependent_clocks dce110_max_clks_by_state[] = {
+/*ClocksStateInvalid - should not be used*/
+{ .display_clk_khz = 0, .pixel_clk_khz = 0 },
+/*ClocksStateUltraLow - currently by HW design team not supposed to be used*/
+{ .display_clk_khz = 352000, .pixel_clk_khz = 330000 },
+/*ClocksStateLow*/
+{ .display_clk_khz = 352000, .pixel_clk_khz = 330000 },
+/*ClocksStateNominal*/
+{ .display_clk_khz = 467000, .pixel_clk_khz = 400000 },
+/*ClocksStatePerformance*/
+{ .display_clk_khz = 643000, .pixel_clk_khz = 400000 } };
+
+static const struct state_dependent_clocks dce112_max_clks_by_state[] = {
+/*ClocksStateInvalid - should not be used*/
+{ .display_clk_khz = 0, .pixel_clk_khz = 0 },
+/*ClocksStateUltraLow - currently by HW design team not supposed to be used*/
+{ .display_clk_khz = 389189, .pixel_clk_khz = 346672 },
+/*ClocksStateLow*/
+{ .display_clk_khz = 459000, .pixel_clk_khz = 400000 },
+/*ClocksStateNominal*/
+{ .display_clk_khz = 667000, .pixel_clk_khz = 600000 },
+/*ClocksStatePerformance*/
+{ .display_clk_khz = 1132000, .pixel_clk_khz = 600000 } };
+
+static const struct state_dependent_clocks dce120_max_clks_by_state[] = {
+/*ClocksStateInvalid - should not be used*/
+{ .display_clk_khz = 0, .pixel_clk_khz = 0 },
+/*ClocksStateUltraLow - currently by HW design team not supposed to be used*/
+{ .display_clk_khz = 0, .pixel_clk_khz = 0 },
+/*ClocksStateLow*/
+{ .display_clk_khz = 460000, .pixel_clk_khz = 400000 },
+/*ClocksStateNominal*/
+{ .display_clk_khz = 670000, .pixel_clk_khz = 600000 },
+/*ClocksStatePerformance*/
+{ .display_clk_khz = 1133000, .pixel_clk_khz = 600000 } };
+
+static int dentist_get_divider_from_did(int did)
+{
+	if (did < DENTIST_BASE_DID_1)
+		did = DENTIST_BASE_DID_1;
+	if (did > DENTIST_MAX_DID)
+		did = DENTIST_MAX_DID;
+
+	if (did < DENTIST_BASE_DID_2) {
+		return DENTIST_DIVIDER_RANGE_1_START + DENTIST_DIVIDER_RANGE_1_STEP
+							* (did - DENTIST_BASE_DID_1);
+	} else if (did < DENTIST_BASE_DID_3) {
+		return DENTIST_DIVIDER_RANGE_2_START + DENTIST_DIVIDER_RANGE_2_STEP
+							* (did - DENTIST_BASE_DID_2);
+	} else if (did < DENTIST_BASE_DID_4) {
+		return DENTIST_DIVIDER_RANGE_3_START + DENTIST_DIVIDER_RANGE_3_STEP
+							* (did - DENTIST_BASE_DID_3);
+	} else {
+		return DENTIST_DIVIDER_RANGE_4_START + DENTIST_DIVIDER_RANGE_4_STEP
+							* (did - DENTIST_BASE_DID_4);
+	}
+}
+
+/* SW will adjust DP REF Clock average value for all purposes
+ * (DP DTO / DP Audio DTO and DP GTC)
+ if clock is spread for all cases:
+ -if SS enabled on DP Ref clock and HW de-spreading enabled with SW
+ calculations for DS_INCR/DS_MODULO (this is planned to be default case)
+ -if SS enabled on DP Ref clock and HW de-spreading enabled with HW
+ calculations (not planned to be used, but average clock should still
+ be valid)
+ -if SS enabled on DP Ref clock and HW de-spreading disabled
+ (should not be case with CIK) then SW should program all rates
+ generated according to average value (case as with previous ASICs)
+  */
+static int clk_mgr_adjust_dp_ref_freq_for_ss(struct dce_clk_mgr *clk_mgr_dce, int dp_ref_clk_khz)
+{
+	if (clk_mgr_dce->ss_on_dprefclk && clk_mgr_dce->dprefclk_ss_divider != 0) {
+		struct fixed31_32 ss_percentage = dc_fixpt_div_int(
+				dc_fixpt_from_fraction(clk_mgr_dce->dprefclk_ss_percentage,
+							clk_mgr_dce->dprefclk_ss_divider), 200);
+		struct fixed31_32 adj_dp_ref_clk_khz;
+
+		ss_percentage = dc_fixpt_sub(dc_fixpt_one, ss_percentage);
+		adj_dp_ref_clk_khz = dc_fixpt_mul_int(ss_percentage, dp_ref_clk_khz);
+		dp_ref_clk_khz = dc_fixpt_floor(adj_dp_ref_clk_khz);
+	}
+	return dp_ref_clk_khz;
+}
+
+static int dce_get_dp_ref_freq_khz(struct clk_mgr *clk_mgr)
+{
+	struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr);
+	int dprefclk_wdivider;
+	int dprefclk_src_sel;
+	int dp_ref_clk_khz = 600000;
+	int target_div;
+
+	/* ASSERT DP Reference Clock source is from DFS*/
+	REG_GET(DPREFCLK_CNTL, DPREFCLK_SRC_SEL, &dprefclk_src_sel);
+	ASSERT(dprefclk_src_sel == 0);
+
+	/* Read the mmDENTIST_DISPCLK_CNTL to get the currently
+	 * programmed DID DENTIST_DPREFCLK_WDIVIDER*/
+	REG_GET(DENTIST_DISPCLK_CNTL, DENTIST_DPREFCLK_WDIVIDER, &dprefclk_wdivider);
+
+	/* Convert DENTIST_DPREFCLK_WDIVIDERto actual divider*/
+	target_div = dentist_get_divider_from_did(dprefclk_wdivider);
+
+	/* Calculate the current DFS clock, in kHz.*/
+	dp_ref_clk_khz = (DENTIST_DIVIDER_RANGE_SCALE_FACTOR
+		* clk_mgr_dce->dentist_vco_freq_khz) / target_div;
+
+	return clk_mgr_adjust_dp_ref_freq_for_ss(clk_mgr_dce, dp_ref_clk_khz);
+}
+
+int dce12_get_dp_ref_freq_khz(struct clk_mgr *clk_mgr)
+{
+	struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr);
+
+	return clk_mgr_adjust_dp_ref_freq_for_ss(clk_mgr_dce, clk_mgr_dce->dprefclk_khz);
+}
+
+/* unit: in_khz before mode set, get pixel clock from context. ASIC register
+ * may not be programmed yet
+ */
+static uint32_t get_max_pixel_clock_for_all_paths(struct dc_state *context)
+{
+	uint32_t max_pix_clk = 0;
+	int i;
+
+	for (i = 0; i < MAX_PIPES; i++) {
+		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+
+		if (pipe_ctx->stream == NULL)
+			continue;
+
+		/* do not check under lay */
+		if (pipe_ctx->top_pipe)
+			continue;
+
+		if (pipe_ctx->stream_res.pix_clk_params.requested_pix_clk > max_pix_clk)
+			max_pix_clk = pipe_ctx->stream_res.pix_clk_params.requested_pix_clk;
+
+		/* raise clock state for HBR3/2 if required. Confirmed with HW DCE/DPCS
+		 * logic for HBR3 still needs Nominal (0.8V) on VDDC rail
+		 */
+		if (dc_is_dp_signal(pipe_ctx->stream->signal) &&
+				pipe_ctx->stream_res.pix_clk_params.requested_sym_clk > max_pix_clk)
+			max_pix_clk = pipe_ctx->stream_res.pix_clk_params.requested_sym_clk;
+	}
+
+	return max_pix_clk;
+}
+
+static enum dm_pp_clocks_state dce_get_required_clocks_state(
+	struct clk_mgr *clk_mgr,
+	struct dc_state *context)
+{
+	struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr);
+	int i;
+	enum dm_pp_clocks_state low_req_clk;
+	int max_pix_clk = get_max_pixel_clock_for_all_paths(context);
+
+	/* Iterate from highest supported to lowest valid state, and update
+	 * lowest RequiredState with the lowest state that satisfies
+	 * all required clocks
+	 */
+	for (i = clk_mgr_dce->max_clks_state; i >= DM_PP_CLOCKS_STATE_ULTRA_LOW; i--)
+		if (context->bw.dce.dispclk_khz >
+				clk_mgr_dce->max_clks_by_state[i].display_clk_khz
+			|| max_pix_clk >
+				clk_mgr_dce->max_clks_by_state[i].pixel_clk_khz)
+			break;
+
+	low_req_clk = i + 1;
+	if (low_req_clk > clk_mgr_dce->max_clks_state) {
+		/* set max clock state for high phyclock, invalid on exceeding display clock */
+		if (clk_mgr_dce->max_clks_by_state[clk_mgr_dce->max_clks_state].display_clk_khz
+				< context->bw.dce.dispclk_khz)
+			low_req_clk = DM_PP_CLOCKS_STATE_INVALID;
+		else
+			low_req_clk = clk_mgr_dce->max_clks_state;
+	}
+
+	return low_req_clk;
+}
+
+static int dce_set_clock(
+	struct clk_mgr *clk_mgr,
+	int requested_clk_khz)
+{
+	struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr);
+	struct bp_pixel_clock_parameters pxl_clk_params = { 0 };
+	struct dc_bios *bp = clk_mgr->ctx->dc_bios;
+	int actual_clock = requested_clk_khz;
+	struct dmcu *dmcu = clk_mgr_dce->base.ctx->dc->res_pool->dmcu;
+
+	/* Make sure requested clock isn't lower than minimum threshold*/
+	if (requested_clk_khz > 0)
+		requested_clk_khz = max(requested_clk_khz,
+				clk_mgr_dce->dentist_vco_freq_khz / 64);
+
+	/* Prepare to program display clock*/
+	pxl_clk_params.target_pixel_clock = requested_clk_khz;
+	pxl_clk_params.pll_id = CLOCK_SOURCE_ID_DFS;
+
+	if (clk_mgr_dce->dfs_bypass_active)
+		pxl_clk_params.flags.SET_DISPCLK_DFS_BYPASS = true;
+
+	bp->funcs->program_display_engine_pll(bp, &pxl_clk_params);
+
+	if (clk_mgr_dce->dfs_bypass_active) {
+		/* Cache the fixed display clock*/
+		clk_mgr_dce->dfs_bypass_disp_clk =
+			pxl_clk_params.dfs_bypass_display_clock;
+		actual_clock = pxl_clk_params.dfs_bypass_display_clock;
+	}
+
+	/* from power down, we need mark the clock state as ClocksStateNominal
+	 * from HWReset, so when resume we will call pplib voltage regulator.*/
+	if (requested_clk_khz == 0)
+		clk_mgr_dce->cur_min_clks_state = DM_PP_CLOCKS_STATE_NOMINAL;
+
+	dmcu->funcs->set_psr_wait_loop(dmcu, actual_clock / 1000 / 7);
+
+	return actual_clock;
+}
+
+int dce112_set_clock(struct clk_mgr *clk_mgr, int requested_clk_khz)
+{
+	struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr);
+	struct bp_set_dce_clock_parameters dce_clk_params;
+	struct dc_bios *bp = clk_mgr->ctx->dc_bios;
+	struct dc *core_dc = clk_mgr->ctx->dc;
+	struct dmcu *dmcu = core_dc->res_pool->dmcu;
+	int actual_clock = requested_clk_khz;
+	/* Prepare to program display clock*/
+	memset(&dce_clk_params, 0, sizeof(dce_clk_params));
+
+	/* Make sure requested clock isn't lower than minimum threshold*/
+	if (requested_clk_khz > 0)
+		requested_clk_khz = max(requested_clk_khz,
+				clk_mgr_dce->dentist_vco_freq_khz / 62);
+
+	dce_clk_params.target_clock_frequency = requested_clk_khz;
+	dce_clk_params.pll_id = CLOCK_SOURCE_ID_DFS;
+	dce_clk_params.clock_type = DCECLOCK_TYPE_DISPLAY_CLOCK;
+
+	bp->funcs->set_dce_clock(bp, &dce_clk_params);
+	actual_clock = dce_clk_params.target_clock_frequency;
+
+	/* from power down, we need mark the clock state as ClocksStateNominal
+	 * from HWReset, so when resume we will call pplib voltage regulator.*/
+	if (requested_clk_khz == 0)
+		clk_mgr_dce->cur_min_clks_state = DM_PP_CLOCKS_STATE_NOMINAL;
+
+	/*Program DP ref Clock*/
+	/*VBIOS will determine DPREFCLK frequency, so we don't set it*/
+	dce_clk_params.target_clock_frequency = 0;
+	dce_clk_params.clock_type = DCECLOCK_TYPE_DPREFCLK;
+	if (!ASICREV_IS_VEGA20_P(clk_mgr->ctx->asic_id.hw_internal_rev))
+		dce_clk_params.flags.USE_GENLOCK_AS_SOURCE_FOR_DPREFCLK =
+			(dce_clk_params.pll_id ==
+					CLOCK_SOURCE_COMBO_DISPLAY_PLL0);
+	else
+		dce_clk_params.flags.USE_GENLOCK_AS_SOURCE_FOR_DPREFCLK = false;
+
+	bp->funcs->set_dce_clock(bp, &dce_clk_params);
+
+	if (!IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment)) {
+		if (clk_mgr_dce->dfs_bypass_disp_clk != actual_clock)
+			dmcu->funcs->set_psr_wait_loop(dmcu,
+					actual_clock / 1000 / 7);
+	}
+
+	clk_mgr_dce->dfs_bypass_disp_clk = actual_clock;
+	return actual_clock;
+}
+
+static void dce_clock_read_integrated_info(struct dce_clk_mgr *clk_mgr_dce)
+{
+	struct dc_debug_options *debug = &clk_mgr_dce->base.ctx->dc->debug;
+	struct dc_bios *bp = clk_mgr_dce->base.ctx->dc_bios;
+	struct integrated_info info = { { { 0 } } };
+	struct dc_firmware_info fw_info = { { 0 } };
+	int i;
+
+	if (bp->integrated_info)
+		info = *bp->integrated_info;
+
+	clk_mgr_dce->dentist_vco_freq_khz = info.dentist_vco_freq;
+	if (clk_mgr_dce->dentist_vco_freq_khz == 0) {
+		bp->funcs->get_firmware_info(bp, &fw_info);
+		clk_mgr_dce->dentist_vco_freq_khz =
+			fw_info.smu_gpu_pll_output_freq;
+		if (clk_mgr_dce->dentist_vco_freq_khz == 0)
+			clk_mgr_dce->dentist_vco_freq_khz = 3600000;
+	}
+
+	/*update the maximum display clock for each power state*/
+	for (i = 0; i < NUMBER_OF_DISP_CLK_VOLTAGE; ++i) {
+		enum dm_pp_clocks_state clk_state = DM_PP_CLOCKS_STATE_INVALID;
+
+		switch (i) {
+		case 0:
+			clk_state = DM_PP_CLOCKS_STATE_ULTRA_LOW;
+			break;
+
+		case 1:
+			clk_state = DM_PP_CLOCKS_STATE_LOW;
+			break;
+
+		case 2:
+			clk_state = DM_PP_CLOCKS_STATE_NOMINAL;
+			break;
+
+		case 3:
+			clk_state = DM_PP_CLOCKS_STATE_PERFORMANCE;
+			break;
+
+		default:
+			clk_state = DM_PP_CLOCKS_STATE_INVALID;
+			break;
+		}
+
+		/*Do not allow bad VBIOS/SBIOS to override with invalid values,
+		 * check for > 100MHz*/
+		if (info.disp_clk_voltage[i].max_supported_clk >= 100000)
+			clk_mgr_dce->max_clks_by_state[clk_state].display_clk_khz =
+				info.disp_clk_voltage[i].max_supported_clk;
+	}
+
+	if (!debug->disable_dfs_bypass && bp->integrated_info)
+		if (bp->integrated_info->gpu_cap_info & DFS_BYPASS_ENABLE)
+			clk_mgr_dce->dfs_bypass_enabled = true;
+}
+
+void dce_clock_read_ss_info(struct dce_clk_mgr *clk_mgr_dce)
+{
+	struct dc_bios *bp = clk_mgr_dce->base.ctx->dc_bios;
+	int ss_info_num = bp->funcs->get_ss_entry_number(
+			bp, AS_SIGNAL_TYPE_GPU_PLL);
+
+	if (ss_info_num) {
+		struct spread_spectrum_info info = { { 0 } };
+		enum bp_result result = bp->funcs->get_spread_spectrum_info(
+				bp, AS_SIGNAL_TYPE_GPU_PLL, 0, &info);
+
+		/* Based on VBIOS, VBIOS will keep entry for GPU PLL SS
+		 * even if SS not enabled and in that case
+		 * SSInfo.spreadSpectrumPercentage !=0 would be sign
+		 * that SS is enabled
+		 */
+		if (result == BP_RESULT_OK &&
+				info.spread_spectrum_percentage != 0) {
+			clk_mgr_dce->ss_on_dprefclk = true;
+			clk_mgr_dce->dprefclk_ss_divider = info.spread_percentage_divider;
+
+			if (info.type.CENTER_MODE == 0) {
+				/* TODO: Currently for DP Reference clock we
+				 * need only SS percentage for
+				 * downspread */
+				clk_mgr_dce->dprefclk_ss_percentage =
+						info.spread_spectrum_percentage;
+			}
+
+			return;
+		}
+
+		result = bp->funcs->get_spread_spectrum_info(
+				bp, AS_SIGNAL_TYPE_DISPLAY_PORT, 0, &info);
+
+		/* Based on VBIOS, VBIOS will keep entry for DPREFCLK SS
+		 * even if SS not enabled and in that case
+		 * SSInfo.spreadSpectrumPercentage !=0 would be sign
+		 * that SS is enabled
+		 */
+		if (result == BP_RESULT_OK &&
+				info.spread_spectrum_percentage != 0) {
+			clk_mgr_dce->ss_on_dprefclk = true;
+			clk_mgr_dce->dprefclk_ss_divider = info.spread_percentage_divider;
+
+			if (info.type.CENTER_MODE == 0) {
+				/* Currently for DP Reference clock we
+				 * need only SS percentage for
+				 * downspread */
+				clk_mgr_dce->dprefclk_ss_percentage =
+						info.spread_spectrum_percentage;
+			}
+		}
+	}
+}
+
+void dce110_fill_display_configs(
+	const struct dc_state *context,
+	struct dm_pp_display_configuration *pp_display_cfg)
+{
+	int j;
+	int num_cfgs = 0;
+
+	for (j = 0; j < context->stream_count; j++) {
+		int k;
+
+		const struct dc_stream_state *stream = context->streams[j];
+		struct dm_pp_single_disp_config *cfg =
+			&pp_display_cfg->disp_configs[num_cfgs];
+		const struct pipe_ctx *pipe_ctx = NULL;
+
+		for (k = 0; k < MAX_PIPES; k++)
+			if (stream == context->res_ctx.pipe_ctx[k].stream) {
+				pipe_ctx = &context->res_ctx.pipe_ctx[k];
+				break;
+			}
+
+		ASSERT(pipe_ctx != NULL);
+
+		/* only notify active stream */
+		if (stream->dpms_off)
+			continue;
+
+		num_cfgs++;
+		cfg->signal = pipe_ctx->stream->signal;
+		cfg->pipe_idx = pipe_ctx->stream_res.tg->inst;
+		cfg->src_height = stream->src.height;
+		cfg->src_width = stream->src.width;
+		cfg->ddi_channel_mapping =
+			stream->sink->link->ddi_channel_mapping.raw;
+		cfg->transmitter =
+			stream->sink->link->link_enc->transmitter;
+		cfg->link_settings.lane_count =
+			stream->sink->link->cur_link_settings.lane_count;
+		cfg->link_settings.link_rate =
+			stream->sink->link->cur_link_settings.link_rate;
+		cfg->link_settings.link_spread =
+			stream->sink->link->cur_link_settings.link_spread;
+		cfg->sym_clock = stream->phy_pix_clk;
+		/* Round v_refresh*/
+		cfg->v_refresh = stream->timing.pix_clk_khz * 1000;
+		cfg->v_refresh /= stream->timing.h_total;
+		cfg->v_refresh = (cfg->v_refresh + stream->timing.v_total / 2)
+							/ stream->timing.v_total;
+	}
+
+	pp_display_cfg->display_count = num_cfgs;
+}
+
+static uint32_t dce110_get_min_vblank_time_us(const struct dc_state *context)
+{
+	uint8_t j;
+	uint32_t min_vertical_blank_time = -1;
+
+	for (j = 0; j < context->stream_count; j++) {
+		struct dc_stream_state *stream = context->streams[j];
+		uint32_t vertical_blank_in_pixels = 0;
+		uint32_t vertical_blank_time = 0;
+
+		vertical_blank_in_pixels = stream->timing.h_total *
+			(stream->timing.v_total
+			 - stream->timing.v_addressable);
+
+		vertical_blank_time = vertical_blank_in_pixels
+			* 1000 / stream->timing.pix_clk_khz;
+
+		if (min_vertical_blank_time > vertical_blank_time)
+			min_vertical_blank_time = vertical_blank_time;
+	}
+
+	return min_vertical_blank_time;
+}
+
+static int determine_sclk_from_bounding_box(
+		const struct dc *dc,
+		int required_sclk)
+{
+	int i;
+
+	/*
+	 * Some asics do not give us sclk levels, so we just report the actual
+	 * required sclk
+	 */
+	if (dc->sclk_lvls.num_levels == 0)
+		return required_sclk;
+
+	for (i = 0; i < dc->sclk_lvls.num_levels; i++) {
+		if (dc->sclk_lvls.clocks_in_khz[i] >= required_sclk)
+			return dc->sclk_lvls.clocks_in_khz[i];
+	}
+	/*
+	 * even maximum level could not satisfy requirement, this
+	 * is unexpected at this stage, should have been caught at
+	 * validation time
+	 */
+	ASSERT(0);
+	return dc->sclk_lvls.clocks_in_khz[dc->sclk_lvls.num_levels - 1];
+}
+
+static void dce_pplib_apply_display_requirements(
+	struct dc *dc,
+	struct dc_state *context)
+{
+	struct dm_pp_display_configuration *pp_display_cfg = &context->pp_display_cfg;
+
+	pp_display_cfg->avail_mclk_switch_time_us = dce110_get_min_vblank_time_us(context);
+
+	dce110_fill_display_configs(context, pp_display_cfg);
+
+	if (memcmp(&dc->current_state->pp_display_cfg, pp_display_cfg, sizeof(*pp_display_cfg)) !=  0)
+		dm_pp_apply_display_requirements(dc->ctx, pp_display_cfg);
+}
+
+static void dce11_pplib_apply_display_requirements(
+	struct dc *dc,
+	struct dc_state *context)
+{
+	struct dm_pp_display_configuration *pp_display_cfg = &context->pp_display_cfg;
+
+	pp_display_cfg->all_displays_in_sync =
+		context->bw.dce.all_displays_in_sync;
+	pp_display_cfg->nb_pstate_switch_disable =
+			context->bw.dce.nbp_state_change_enable == false;
+	pp_display_cfg->cpu_cc6_disable =
+			context->bw.dce.cpuc_state_change_enable == false;
+	pp_display_cfg->cpu_pstate_disable =
+			context->bw.dce.cpup_state_change_enable == false;
+	pp_display_cfg->cpu_pstate_separation_time =
+			context->bw.dce.blackout_recovery_time_us;
+
+	pp_display_cfg->min_memory_clock_khz = context->bw.dce.yclk_khz
+		/ MEMORY_TYPE_MULTIPLIER_CZ;
+
+	pp_display_cfg->min_engine_clock_khz = determine_sclk_from_bounding_box(
+			dc,
+			context->bw.dce.sclk_khz);
+
+	pp_display_cfg->min_engine_clock_deep_sleep_khz
+			= context->bw.dce.sclk_deep_sleep_khz;
+
+	pp_display_cfg->avail_mclk_switch_time_us =
+						dce110_get_min_vblank_time_us(context);
+	/* TODO: dce11.2*/
+	pp_display_cfg->avail_mclk_switch_time_in_disp_active_us = 0;
+
+	pp_display_cfg->disp_clk_khz = dc->res_pool->clk_mgr->clks.dispclk_khz;
+
+	dce110_fill_display_configs(context, pp_display_cfg);
+
+	/* TODO: is this still applicable?*/
+	if (pp_display_cfg->display_count == 1) {
+		const struct dc_crtc_timing *timing =
+			&context->streams[0]->timing;
+
+		pp_display_cfg->crtc_index =
+			pp_display_cfg->disp_configs[0].pipe_idx;
+		pp_display_cfg->line_time_in_us = timing->h_total * 1000 / timing->pix_clk_khz;
+	}
+
+	if (memcmp(&dc->current_state->pp_display_cfg, pp_display_cfg, sizeof(*pp_display_cfg)) !=  0)
+		dm_pp_apply_display_requirements(dc->ctx, pp_display_cfg);
+}
+
+static void dce_update_clocks(struct clk_mgr *clk_mgr,
+			struct dc_state *context,
+			bool safe_to_lower)
+{
+	struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr);
+	struct dm_pp_power_level_change_request level_change_req;
+	int unpatched_disp_clk = context->bw.dce.dispclk_khz;
+
+	/*TODO: W/A for dal3 linux, investigate why this works */
+	if (!clk_mgr_dce->dfs_bypass_active)
+		context->bw.dce.dispclk_khz = context->bw.dce.dispclk_khz * 115 / 100;
+
+	level_change_req.power_level = dce_get_required_clocks_state(clk_mgr, context);
+	/* get max clock state from PPLIB */
+	if ((level_change_req.power_level < clk_mgr_dce->cur_min_clks_state && safe_to_lower)
+			|| level_change_req.power_level > clk_mgr_dce->cur_min_clks_state) {
+		if (dm_pp_apply_power_level_change_request(clk_mgr->ctx, &level_change_req))
+			clk_mgr_dce->cur_min_clks_state = level_change_req.power_level;
+	}
+
+	if (should_set_clock(safe_to_lower, context->bw.dce.dispclk_khz, clk_mgr->clks.dispclk_khz)) {
+		context->bw.dce.dispclk_khz = dce_set_clock(clk_mgr, context->bw.dce.dispclk_khz);
+		clk_mgr->clks.dispclk_khz = context->bw.dce.dispclk_khz; 
+	}
+	dce_pplib_apply_display_requirements(clk_mgr->ctx->dc, context);
+
+	context->bw.dce.dispclk_khz = unpatched_disp_clk;
+}
+
+static void dce11_update_clocks(struct clk_mgr *clk_mgr,
+			struct dc_state *context,
+			bool safe_to_lower)
+{
+	struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr);
+	struct dm_pp_power_level_change_request level_change_req;
+
+	level_change_req.power_level = dce_get_required_clocks_state(clk_mgr, context);
+	/* get max clock state from PPLIB */
+	if ((level_change_req.power_level < clk_mgr_dce->cur_min_clks_state && safe_to_lower)
+			|| level_change_req.power_level > clk_mgr_dce->cur_min_clks_state) {
+		if (dm_pp_apply_power_level_change_request(clk_mgr->ctx, &level_change_req))
+			clk_mgr_dce->cur_min_clks_state = level_change_req.power_level;
+	}
+
+	if (should_set_clock(safe_to_lower, context->bw.dce.dispclk_khz, clk_mgr->clks.dispclk_khz)) {
+		context->bw.dce.dispclk_khz = dce_set_clock(clk_mgr, context->bw.dce.dispclk_khz);
+		clk_mgr->clks.dispclk_khz = context->bw.dce.dispclk_khz;
+	}
+	dce11_pplib_apply_display_requirements(clk_mgr->ctx->dc, context);
+}
+
+static void dce112_update_clocks(struct clk_mgr *clk_mgr,
+			struct dc_state *context,
+			bool safe_to_lower)
+{
+	struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr);
+	struct dm_pp_power_level_change_request level_change_req;
+
+	level_change_req.power_level = dce_get_required_clocks_state(clk_mgr, context);
+	/* get max clock state from PPLIB */
+	if ((level_change_req.power_level < clk_mgr_dce->cur_min_clks_state && safe_to_lower)
+			|| level_change_req.power_level > clk_mgr_dce->cur_min_clks_state) {
+		if (dm_pp_apply_power_level_change_request(clk_mgr->ctx, &level_change_req))
+			clk_mgr_dce->cur_min_clks_state = level_change_req.power_level;
+	}
+
+	if (should_set_clock(safe_to_lower, context->bw.dce.dispclk_khz, clk_mgr->clks.dispclk_khz)) {
+		context->bw.dce.dispclk_khz = dce112_set_clock(clk_mgr, context->bw.dce.dispclk_khz);
+		clk_mgr->clks.dispclk_khz = context->bw.dce.dispclk_khz;
+	}
+	dce11_pplib_apply_display_requirements(clk_mgr->ctx->dc, context);
+}
+
+static void dce12_update_clocks(struct clk_mgr *clk_mgr,
+			struct dc_state *context,
+			bool safe_to_lower)
+{
+	struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr);
+	struct dm_pp_clock_for_voltage_req clock_voltage_req = {0};
+	int max_pix_clk = get_max_pixel_clock_for_all_paths(context);
+	int unpatched_disp_clk = context->bw.dce.dispclk_khz;
+
+	/*TODO: W/A for dal3 linux, investigate why this works */
+	if (!clk_mgr_dce->dfs_bypass_active)
+		context->bw.dce.dispclk_khz = context->bw.dce.dispclk_khz * 115 / 100;
+
+	if (should_set_clock(safe_to_lower, context->bw.dce.dispclk_khz, clk_mgr->clks.dispclk_khz)) {
+		clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DISPLAY_CLK;
+		clock_voltage_req.clocks_in_khz = context->bw.dce.dispclk_khz;
+		context->bw.dce.dispclk_khz = dce112_set_clock(clk_mgr, context->bw.dce.dispclk_khz);
+		clk_mgr->clks.dispclk_khz = context->bw.dce.dispclk_khz;
+
+		dm_pp_apply_clock_for_voltage_request(clk_mgr->ctx, &clock_voltage_req);
+	}
+
+	if (should_set_clock(safe_to_lower, max_pix_clk, clk_mgr->clks.phyclk_khz)) {
+		clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DISPLAYPHYCLK;
+		clock_voltage_req.clocks_in_khz = max_pix_clk;
+		clk_mgr->clks.phyclk_khz = max_pix_clk;
+
+		dm_pp_apply_clock_for_voltage_request(clk_mgr->ctx, &clock_voltage_req);
+	}
+	dce11_pplib_apply_display_requirements(clk_mgr->ctx->dc, context);
+
+	context->bw.dce.dispclk_khz = unpatched_disp_clk;
+}
+
+static const struct clk_mgr_funcs dce120_funcs = {
+	.get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
+	.update_clocks = dce12_update_clocks
+};
+
+static const struct clk_mgr_funcs dce112_funcs = {
+	.get_dp_ref_clk_frequency = dce_get_dp_ref_freq_khz,
+	.update_clocks = dce112_update_clocks
+};
+
+static const struct clk_mgr_funcs dce110_funcs = {
+	.get_dp_ref_clk_frequency = dce_get_dp_ref_freq_khz,
+	.update_clocks = dce11_update_clocks,
+};
+
+static const struct clk_mgr_funcs dce_funcs = {
+	.get_dp_ref_clk_frequency = dce_get_dp_ref_freq_khz,
+	.update_clocks = dce_update_clocks
+};
+
+static void dce_clk_mgr_construct(
+	struct dce_clk_mgr *clk_mgr_dce,
+	struct dc_context *ctx,
+	const struct clk_mgr_registers *regs,
+	const struct clk_mgr_shift *clk_shift,
+	const struct clk_mgr_mask *clk_mask)
+{
+	struct clk_mgr *base = &clk_mgr_dce->base;
+	struct dm_pp_static_clock_info static_clk_info = {0};
+
+	base->ctx = ctx;
+	base->funcs = &dce_funcs;
+
+	clk_mgr_dce->regs = regs;
+	clk_mgr_dce->clk_mgr_shift = clk_shift;
+	clk_mgr_dce->clk_mgr_mask = clk_mask;
+
+	clk_mgr_dce->dfs_bypass_disp_clk = 0;
+
+	clk_mgr_dce->dprefclk_ss_percentage = 0;
+	clk_mgr_dce->dprefclk_ss_divider = 1000;
+	clk_mgr_dce->ss_on_dprefclk = false;
+
+
+	if (dm_pp_get_static_clocks(ctx, &static_clk_info))
+		clk_mgr_dce->max_clks_state = static_clk_info.max_clocks_state;
+	else
+		clk_mgr_dce->max_clks_state = DM_PP_CLOCKS_STATE_NOMINAL;
+	clk_mgr_dce->cur_min_clks_state = DM_PP_CLOCKS_STATE_INVALID;
+
+	dce_clock_read_integrated_info(clk_mgr_dce);
+	dce_clock_read_ss_info(clk_mgr_dce);
+}
+
+struct clk_mgr *dce_clk_mgr_create(
+	struct dc_context *ctx,
+	const struct clk_mgr_registers *regs,
+	const struct clk_mgr_shift *clk_shift,
+	const struct clk_mgr_mask *clk_mask)
+{
+	struct dce_clk_mgr *clk_mgr_dce = kzalloc(sizeof(*clk_mgr_dce), GFP_KERNEL);
+
+	if (clk_mgr_dce == NULL) {
+		BREAK_TO_DEBUGGER();
+		return NULL;
+	}
+
+	memcpy(clk_mgr_dce->max_clks_by_state,
+		dce80_max_clks_by_state,
+		sizeof(dce80_max_clks_by_state));
+
+	dce_clk_mgr_construct(
+		clk_mgr_dce, ctx, regs, clk_shift, clk_mask);
+
+	return &clk_mgr_dce->base;
+}
+
+struct clk_mgr *dce110_clk_mgr_create(
+	struct dc_context *ctx,
+	const struct clk_mgr_registers *regs,
+	const struct clk_mgr_shift *clk_shift,
+	const struct clk_mgr_mask *clk_mask)
+{
+	struct dce_clk_mgr *clk_mgr_dce = kzalloc(sizeof(*clk_mgr_dce), GFP_KERNEL);
+
+	if (clk_mgr_dce == NULL) {
+		BREAK_TO_DEBUGGER();
+		return NULL;
+	}
+
+	memcpy(clk_mgr_dce->max_clks_by_state,
+		dce110_max_clks_by_state,
+		sizeof(dce110_max_clks_by_state));
+
+	dce_clk_mgr_construct(
+		clk_mgr_dce, ctx, regs, clk_shift, clk_mask);
+
+	clk_mgr_dce->base.funcs = &dce110_funcs;
+
+	return &clk_mgr_dce->base;
+}
+
+struct clk_mgr *dce112_clk_mgr_create(
+	struct dc_context *ctx,
+	const struct clk_mgr_registers *regs,
+	const struct clk_mgr_shift *clk_shift,
+	const struct clk_mgr_mask *clk_mask)
+{
+	struct dce_clk_mgr *clk_mgr_dce = kzalloc(sizeof(*clk_mgr_dce), GFP_KERNEL);
+
+	if (clk_mgr_dce == NULL) {
+		BREAK_TO_DEBUGGER();
+		return NULL;
+	}
+
+	memcpy(clk_mgr_dce->max_clks_by_state,
+		dce112_max_clks_by_state,
+		sizeof(dce112_max_clks_by_state));
+
+	dce_clk_mgr_construct(
+		clk_mgr_dce, ctx, regs, clk_shift, clk_mask);
+
+	clk_mgr_dce->base.funcs = &dce112_funcs;
+
+	return &clk_mgr_dce->base;
+}
+
+struct clk_mgr *dce120_clk_mgr_create(struct dc_context *ctx)
+{
+	struct dce_clk_mgr *clk_mgr_dce = kzalloc(sizeof(*clk_mgr_dce), GFP_KERNEL);
+
+	if (clk_mgr_dce == NULL) {
+		BREAK_TO_DEBUGGER();
+		return NULL;
+	}
+
+	memcpy(clk_mgr_dce->max_clks_by_state,
+		dce120_max_clks_by_state,
+		sizeof(dce120_max_clks_by_state));
+
+	dce_clk_mgr_construct(
+		clk_mgr_dce, ctx, NULL, NULL, NULL);
+
+	clk_mgr_dce->dprefclk_khz = 600000;
+	clk_mgr_dce->base.funcs = &dce120_funcs;
+
+	return &clk_mgr_dce->base;
+}
+
+void dce_clk_mgr_destroy(struct clk_mgr **clk_mgr)
+{
+	struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(*clk_mgr);
+
+	kfree(clk_mgr_dce);
+	*clk_mgr = NULL;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.h b/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.h
new file mode 100644
index 0000000..0460777
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.h
@@ -0,0 +1,168 @@
+/*
+ * Copyright 2012-16 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+
+#ifndef _DCE_CLK_MGR_H_
+#define _DCE_CLK_MGR_H_
+
+#include "clk_mgr.h"
+#include "dccg.h"
+
+#define MEMORY_TYPE_MULTIPLIER_CZ 4
+
+#define CLK_COMMON_REG_LIST_DCE_BASE() \
+	.DPREFCLK_CNTL = mmDPREFCLK_CNTL, \
+	.DENTIST_DISPCLK_CNTL = mmDENTIST_DISPCLK_CNTL
+
+#define CLK_COMMON_REG_LIST_DCN_BASE() \
+	SR(DENTIST_DISPCLK_CNTL)
+
+#define CLK_SF(reg_name, field_name, post_fix)\
+	.field_name = reg_name ## __ ## field_name ## post_fix
+
+#define CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(mask_sh) \
+	CLK_SF(DPREFCLK_CNTL, DPREFCLK_SRC_SEL, mask_sh), \
+	CLK_SF(DENTIST_DISPCLK_CNTL, DENTIST_DPREFCLK_WDIVIDER, mask_sh)
+
+#define CLK_COMMON_MASK_SH_LIST_DCN_COMMON_BASE(mask_sh) \
+	CLK_SF(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_WDIVIDER, mask_sh),\
+	CLK_SF(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_CHG_DONE, mask_sh)
+
+#define CLK_REG_FIELD_LIST(type) \
+	type DPREFCLK_SRC_SEL; \
+	type DENTIST_DPREFCLK_WDIVIDER; \
+	type DENTIST_DISPCLK_WDIVIDER; \
+	type DENTIST_DISPCLK_CHG_DONE;
+
+struct clk_mgr_shift {
+	CLK_REG_FIELD_LIST(uint8_t)
+};
+
+struct clk_mgr_mask {
+	CLK_REG_FIELD_LIST(uint32_t)
+};
+
+struct clk_mgr_registers {
+	uint32_t DPREFCLK_CNTL;
+	uint32_t DENTIST_DISPCLK_CNTL;
+};
+
+struct state_dependent_clocks {
+	int display_clk_khz;
+	int pixel_clk_khz;
+};
+
+struct dce_clk_mgr {
+	struct clk_mgr base;
+	const struct clk_mgr_registers *regs;
+	const struct clk_mgr_shift *clk_mgr_shift;
+	const struct clk_mgr_mask *clk_mgr_mask;
+
+	struct dccg *dccg;
+
+	struct state_dependent_clocks max_clks_by_state[DM_PP_CLOCKS_MAX_STATES];
+
+	int dentist_vco_freq_khz;
+
+	/* Cache the status of DFS-bypass feature*/
+	bool dfs_bypass_enabled;
+	/* True if the DFS-bypass feature is enabled and active. */
+	bool dfs_bypass_active;
+	/* Cache the display clock returned by VBIOS if DFS-bypass is enabled.
+	 * This is basically "Crystal Frequency In KHz" (XTALIN) frequency */
+	int dfs_bypass_disp_clk;
+
+	/* Flag for Enabled SS on DPREFCLK */
+	bool ss_on_dprefclk;
+	/* DPREFCLK SS percentage (if down-spread enabled) */
+	int dprefclk_ss_percentage;
+	/* DPREFCLK SS percentage Divider (100 or 1000) */
+	int dprefclk_ss_divider;
+	int dprefclk_khz;
+
+	enum dm_pp_clocks_state max_clks_state;
+	enum dm_pp_clocks_state cur_min_clks_state;
+};
+
+/* Starting DID for each range */
+enum dentist_base_divider_id {
+	DENTIST_BASE_DID_1 = 0x08,
+	DENTIST_BASE_DID_2 = 0x40,
+	DENTIST_BASE_DID_3 = 0x60,
+	DENTIST_BASE_DID_4 = 0x7e,
+	DENTIST_MAX_DID = 0x7f
+};
+
+/* Starting point and step size for each divider range.*/
+enum dentist_divider_range {
+	DENTIST_DIVIDER_RANGE_1_START = 8,   /* 2.00  */
+	DENTIST_DIVIDER_RANGE_1_STEP  = 1,   /* 0.25  */
+	DENTIST_DIVIDER_RANGE_2_START = 64,  /* 16.00 */
+	DENTIST_DIVIDER_RANGE_2_STEP  = 2,   /* 0.50  */
+	DENTIST_DIVIDER_RANGE_3_START = 128, /* 32.00 */
+	DENTIST_DIVIDER_RANGE_3_STEP  = 4,   /* 1.00  */
+	DENTIST_DIVIDER_RANGE_4_START = 248, /* 62.00 */
+	DENTIST_DIVIDER_RANGE_4_STEP  = 264, /* 66.00 */
+	DENTIST_DIVIDER_RANGE_SCALE_FACTOR = 4
+};
+
+static inline bool should_set_clock(bool safe_to_lower, int calc_clk, int cur_clk)
+{
+	return ((safe_to_lower && calc_clk < cur_clk) || calc_clk > cur_clk);
+}
+
+void dce_clock_read_ss_info(struct dce_clk_mgr *dccg_dce);
+
+int dce12_get_dp_ref_freq_khz(struct clk_mgr *dccg);
+
+void dce110_fill_display_configs(
+	const struct dc_state *context,
+	struct dm_pp_display_configuration *pp_display_cfg);
+
+int dce112_set_clock(struct clk_mgr *dccg, int requested_clk_khz);
+
+struct clk_mgr *dce_clk_mgr_create(
+	struct dc_context *ctx,
+	const struct clk_mgr_registers *regs,
+	const struct clk_mgr_shift *clk_shift,
+	const struct clk_mgr_mask *clk_mask);
+
+struct clk_mgr *dce110_clk_mgr_create(
+	struct dc_context *ctx,
+	const struct clk_mgr_registers *regs,
+	const struct clk_mgr_shift *clk_shift,
+	const struct clk_mgr_mask *clk_mask);
+
+struct clk_mgr *dce112_clk_mgr_create(
+	struct dc_context *ctx,
+	const struct clk_mgr_registers *regs,
+	const struct clk_mgr_shift *clk_shift,
+	const struct clk_mgr_mask *clk_mask);
+
+struct clk_mgr *dce120_clk_mgr_create(struct dc_context *ctx);
+
+void dce_clk_mgr_destroy(struct clk_mgr **clk_mgr);
+
+#endif /* _DCE_CLK_MGR_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c
deleted file mode 100644
index d89a097..0000000
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c
+++ /dev/null
@@ -1,947 +0,0 @@
-/*
- * Copyright 2012-16 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: AMD
- *
- */
-
-#include "dce_clocks.h"
-#include "dm_services.h"
-#include "reg_helper.h"
-#include "fixed31_32.h"
-#include "bios_parser_interface.h"
-#include "dc.h"
-#include "dmcu.h"
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
-#include "dcn_calcs.h"
-#endif
-#include "core_types.h"
-#include "dc_types.h"
-#include "dal_asic_id.h"
-
-#define TO_DCE_CLOCKS(clocks)\
-	container_of(clocks, struct dce_dccg, base)
-
-#define REG(reg) \
-	(clk_dce->regs->reg)
-
-#undef FN
-#define FN(reg_name, field_name) \
-	clk_dce->clk_shift->field_name, clk_dce->clk_mask->field_name
-
-#define CTX \
-	clk_dce->base.ctx
-#define DC_LOGGER \
-	clk->ctx->logger
-
-/* Max clock values for each state indexed by "enum clocks_state": */
-static const struct state_dependent_clocks dce80_max_clks_by_state[] = {
-/* ClocksStateInvalid - should not be used */
-{ .display_clk_khz = 0, .pixel_clk_khz = 0 },
-/* ClocksStateUltraLow - not expected to be used for DCE 8.0 */
-{ .display_clk_khz = 0, .pixel_clk_khz = 0 },
-/* ClocksStateLow */
-{ .display_clk_khz = 352000, .pixel_clk_khz = 330000},
-/* ClocksStateNominal */
-{ .display_clk_khz = 600000, .pixel_clk_khz = 400000 },
-/* ClocksStatePerformance */
-{ .display_clk_khz = 600000, .pixel_clk_khz = 400000 } };
-
-static const struct state_dependent_clocks dce110_max_clks_by_state[] = {
-/*ClocksStateInvalid - should not be used*/
-{ .display_clk_khz = 0, .pixel_clk_khz = 0 },
-/*ClocksStateUltraLow - currently by HW design team not supposed to be used*/
-{ .display_clk_khz = 352000, .pixel_clk_khz = 330000 },
-/*ClocksStateLow*/
-{ .display_clk_khz = 352000, .pixel_clk_khz = 330000 },
-/*ClocksStateNominal*/
-{ .display_clk_khz = 467000, .pixel_clk_khz = 400000 },
-/*ClocksStatePerformance*/
-{ .display_clk_khz = 643000, .pixel_clk_khz = 400000 } };
-
-static const struct state_dependent_clocks dce112_max_clks_by_state[] = {
-/*ClocksStateInvalid - should not be used*/
-{ .display_clk_khz = 0, .pixel_clk_khz = 0 },
-/*ClocksStateUltraLow - currently by HW design team not supposed to be used*/
-{ .display_clk_khz = 389189, .pixel_clk_khz = 346672 },
-/*ClocksStateLow*/
-{ .display_clk_khz = 459000, .pixel_clk_khz = 400000 },
-/*ClocksStateNominal*/
-{ .display_clk_khz = 667000, .pixel_clk_khz = 600000 },
-/*ClocksStatePerformance*/
-{ .display_clk_khz = 1132000, .pixel_clk_khz = 600000 } };
-
-static const struct state_dependent_clocks dce120_max_clks_by_state[] = {
-/*ClocksStateInvalid - should not be used*/
-{ .display_clk_khz = 0, .pixel_clk_khz = 0 },
-/*ClocksStateUltraLow - currently by HW design team not supposed to be used*/
-{ .display_clk_khz = 0, .pixel_clk_khz = 0 },
-/*ClocksStateLow*/
-{ .display_clk_khz = 460000, .pixel_clk_khz = 400000 },
-/*ClocksStateNominal*/
-{ .display_clk_khz = 670000, .pixel_clk_khz = 600000 },
-/*ClocksStatePerformance*/
-{ .display_clk_khz = 1133000, .pixel_clk_khz = 600000 } };
-
-/* Starting DID for each range */
-enum dentist_base_divider_id {
-	DENTIST_BASE_DID_1 = 0x08,
-	DENTIST_BASE_DID_2 = 0x40,
-	DENTIST_BASE_DID_3 = 0x60,
-	DENTIST_BASE_DID_4 = 0x7e,
-	DENTIST_MAX_DID = 0x7f
-};
-
-/* Starting point and step size for each divider range.*/
-enum dentist_divider_range {
-	DENTIST_DIVIDER_RANGE_1_START = 8,   /* 2.00  */
-	DENTIST_DIVIDER_RANGE_1_STEP  = 1,   /* 0.25  */
-	DENTIST_DIVIDER_RANGE_2_START = 64,  /* 16.00 */
-	DENTIST_DIVIDER_RANGE_2_STEP  = 2,   /* 0.50  */
-	DENTIST_DIVIDER_RANGE_3_START = 128, /* 32.00 */
-	DENTIST_DIVIDER_RANGE_3_STEP  = 4,   /* 1.00  */
-	DENTIST_DIVIDER_RANGE_4_START = 248, /* 62.00 */
-	DENTIST_DIVIDER_RANGE_4_STEP  = 264, /* 66.00 */
-	DENTIST_DIVIDER_RANGE_SCALE_FACTOR = 4
-};
-
-static int dentist_get_divider_from_did(int did)
-{
-	if (did < DENTIST_BASE_DID_1)
-		did = DENTIST_BASE_DID_1;
-	if (did > DENTIST_MAX_DID)
-		did = DENTIST_MAX_DID;
-
-	if (did < DENTIST_BASE_DID_2) {
-		return DENTIST_DIVIDER_RANGE_1_START + DENTIST_DIVIDER_RANGE_1_STEP
-							* (did - DENTIST_BASE_DID_1);
-	} else if (did < DENTIST_BASE_DID_3) {
-		return DENTIST_DIVIDER_RANGE_2_START + DENTIST_DIVIDER_RANGE_2_STEP
-							* (did - DENTIST_BASE_DID_2);
-	} else if (did < DENTIST_BASE_DID_4) {
-		return DENTIST_DIVIDER_RANGE_3_START + DENTIST_DIVIDER_RANGE_3_STEP
-							* (did - DENTIST_BASE_DID_3);
-	} else {
-		return DENTIST_DIVIDER_RANGE_4_START + DENTIST_DIVIDER_RANGE_4_STEP
-							* (did - DENTIST_BASE_DID_4);
-	}
-}
-
-/* SW will adjust DP REF Clock average value for all purposes
- * (DP DTO / DP Audio DTO and DP GTC)
- if clock is spread for all cases:
- -if SS enabled on DP Ref clock and HW de-spreading enabled with SW
- calculations for DS_INCR/DS_MODULO (this is planned to be default case)
- -if SS enabled on DP Ref clock and HW de-spreading enabled with HW
- calculations (not planned to be used, but average clock should still
- be valid)
- -if SS enabled on DP Ref clock and HW de-spreading disabled
- (should not be case with CIK) then SW should program all rates
- generated according to average value (case as with previous ASICs)
-  */
-static int dccg_adjust_dp_ref_freq_for_ss(struct dce_dccg *clk_dce, int dp_ref_clk_khz)
-{
-	if (clk_dce->ss_on_dprefclk && clk_dce->dprefclk_ss_divider != 0) {
-		struct fixed31_32 ss_percentage = dc_fixpt_div_int(
-				dc_fixpt_from_fraction(clk_dce->dprefclk_ss_percentage,
-							clk_dce->dprefclk_ss_divider), 200);
-		struct fixed31_32 adj_dp_ref_clk_khz;
-
-		ss_percentage = dc_fixpt_sub(dc_fixpt_one, ss_percentage);
-		adj_dp_ref_clk_khz = dc_fixpt_mul_int(ss_percentage, dp_ref_clk_khz);
-		dp_ref_clk_khz = dc_fixpt_floor(adj_dp_ref_clk_khz);
-	}
-	return dp_ref_clk_khz;
-}
-
-static int dce_get_dp_ref_freq_khz(struct dccg *clk)
-{
-	struct dce_dccg *clk_dce = TO_DCE_CLOCKS(clk);
-	int dprefclk_wdivider;
-	int dprefclk_src_sel;
-	int dp_ref_clk_khz = 600000;
-	int target_div;
-
-	/* ASSERT DP Reference Clock source is from DFS*/
-	REG_GET(DPREFCLK_CNTL, DPREFCLK_SRC_SEL, &dprefclk_src_sel);
-	ASSERT(dprefclk_src_sel == 0);
-
-	/* Read the mmDENTIST_DISPCLK_CNTL to get the currently
-	 * programmed DID DENTIST_DPREFCLK_WDIVIDER*/
-	REG_GET(DENTIST_DISPCLK_CNTL, DENTIST_DPREFCLK_WDIVIDER, &dprefclk_wdivider);
-
-	/* Convert DENTIST_DPREFCLK_WDIVIDERto actual divider*/
-	target_div = dentist_get_divider_from_did(dprefclk_wdivider);
-
-	/* Calculate the current DFS clock, in kHz.*/
-	dp_ref_clk_khz = (DENTIST_DIVIDER_RANGE_SCALE_FACTOR
-		* clk_dce->dentist_vco_freq_khz) / target_div;
-
-	return dccg_adjust_dp_ref_freq_for_ss(clk_dce, dp_ref_clk_khz);
-}
-
-static int dce12_get_dp_ref_freq_khz(struct dccg *clk)
-{
-	struct dce_dccg *clk_dce = TO_DCE_CLOCKS(clk);
-
-	return dccg_adjust_dp_ref_freq_for_ss(clk_dce, clk_dce->dprefclk_khz);
-}
-
-static enum dm_pp_clocks_state dce_get_required_clocks_state(
-	struct dccg *clk,
-	struct dc_clocks *req_clocks)
-{
-	struct dce_dccg *clk_dce = TO_DCE_CLOCKS(clk);
-	int i;
-	enum dm_pp_clocks_state low_req_clk;
-
-	/* Iterate from highest supported to lowest valid state, and update
-	 * lowest RequiredState with the lowest state that satisfies
-	 * all required clocks
-	 */
-	for (i = clk->max_clks_state; i >= DM_PP_CLOCKS_STATE_ULTRA_LOW; i--)
-		if (req_clocks->dispclk_khz >
-				clk_dce->max_clks_by_state[i].display_clk_khz
-			|| req_clocks->phyclk_khz >
-				clk_dce->max_clks_by_state[i].pixel_clk_khz)
-			break;
-
-	low_req_clk = i + 1;
-	if (low_req_clk > clk->max_clks_state) {
-		/* set max clock state for high phyclock, invalid on exceeding display clock */
-		if (clk_dce->max_clks_by_state[clk->max_clks_state].display_clk_khz
-				< req_clocks->dispclk_khz)
-			low_req_clk = DM_PP_CLOCKS_STATE_INVALID;
-		else
-			low_req_clk = clk->max_clks_state;
-	}
-
-	return low_req_clk;
-}
-
-static int dce_set_clock(
-	struct dccg *clk,
-	int requested_clk_khz)
-{
-	struct dce_dccg *clk_dce = TO_DCE_CLOCKS(clk);
-	struct bp_pixel_clock_parameters pxl_clk_params = { 0 };
-	struct dc_bios *bp = clk->ctx->dc_bios;
-	int actual_clock = requested_clk_khz;
-
-	/* Make sure requested clock isn't lower than minimum threshold*/
-	if (requested_clk_khz > 0)
-		requested_clk_khz = max(requested_clk_khz,
-				clk_dce->dentist_vco_freq_khz / 64);
-
-	/* Prepare to program display clock*/
-	pxl_clk_params.target_pixel_clock = requested_clk_khz;
-	pxl_clk_params.pll_id = CLOCK_SOURCE_ID_DFS;
-
-	if (clk_dce->dfs_bypass_active)
-		pxl_clk_params.flags.SET_DISPCLK_DFS_BYPASS = true;
-
-	bp->funcs->program_display_engine_pll(bp, &pxl_clk_params);
-
-	if (clk_dce->dfs_bypass_active) {
-		/* Cache the fixed display clock*/
-		clk_dce->dfs_bypass_disp_clk =
-			pxl_clk_params.dfs_bypass_display_clock;
-		actual_clock = pxl_clk_params.dfs_bypass_display_clock;
-	}
-
-	/* from power down, we need mark the clock state as ClocksStateNominal
-	 * from HWReset, so when resume we will call pplib voltage regulator.*/
-	if (requested_clk_khz == 0)
-		clk->cur_min_clks_state = DM_PP_CLOCKS_STATE_NOMINAL;
-	return actual_clock;
-}
-
-static int dce_psr_set_clock(
-	struct dccg *clk,
-	int requested_clk_khz)
-{
-	struct dce_dccg *clk_dce = TO_DCE_CLOCKS(clk);
-	struct dc_context *ctx = clk_dce->base.ctx;
-	struct dc *core_dc = ctx->dc;
-	struct dmcu *dmcu = core_dc->res_pool->dmcu;
-	int actual_clk_khz = requested_clk_khz;
-
-	actual_clk_khz = dce_set_clock(clk, requested_clk_khz);
-
-	dmcu->funcs->set_psr_wait_loop(dmcu, actual_clk_khz / 1000 / 7);
-	return actual_clk_khz;
-}
-
-static int dce112_set_clock(
-	struct dccg *clk,
-	int requested_clk_khz)
-{
-	struct dce_dccg *clk_dce = TO_DCE_CLOCKS(clk);
-	struct bp_set_dce_clock_parameters dce_clk_params;
-	struct dc_bios *bp = clk->ctx->dc_bios;
-	struct dc *core_dc = clk->ctx->dc;
-	struct dmcu *dmcu = core_dc->res_pool->dmcu;
-	int actual_clock = requested_clk_khz;
-	/* Prepare to program display clock*/
-	memset(&dce_clk_params, 0, sizeof(dce_clk_params));
-
-	/* Make sure requested clock isn't lower than minimum threshold*/
-	if (requested_clk_khz > 0)
-		requested_clk_khz = max(requested_clk_khz,
-				clk_dce->dentist_vco_freq_khz / 62);
-
-	dce_clk_params.target_clock_frequency = requested_clk_khz;
-	dce_clk_params.pll_id = CLOCK_SOURCE_ID_DFS;
-	dce_clk_params.clock_type = DCECLOCK_TYPE_DISPLAY_CLOCK;
-
-	bp->funcs->set_dce_clock(bp, &dce_clk_params);
-	actual_clock = dce_clk_params.target_clock_frequency;
-
-	/* from power down, we need mark the clock state as ClocksStateNominal
-	 * from HWReset, so when resume we will call pplib voltage regulator.*/
-	if (requested_clk_khz == 0)
-		clk->cur_min_clks_state = DM_PP_CLOCKS_STATE_NOMINAL;
-
-	/*Program DP ref Clock*/
-	/*VBIOS will determine DPREFCLK frequency, so we don't set it*/
-	dce_clk_params.target_clock_frequency = 0;
-	dce_clk_params.clock_type = DCECLOCK_TYPE_DPREFCLK;
-	if (!ASICREV_IS_VEGA20_P(clk->ctx->asic_id.hw_internal_rev))
-		dce_clk_params.flags.USE_GENLOCK_AS_SOURCE_FOR_DPREFCLK =
-			(dce_clk_params.pll_id ==
-					CLOCK_SOURCE_COMBO_DISPLAY_PLL0);
-	else
-		dce_clk_params.flags.USE_GENLOCK_AS_SOURCE_FOR_DPREFCLK = false;
-
-	bp->funcs->set_dce_clock(bp, &dce_clk_params);
-
-	if (!IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment)) {
-		if (clk_dce->dfs_bypass_disp_clk != actual_clock)
-			dmcu->funcs->set_psr_wait_loop(dmcu,
-					actual_clock / 1000 / 7);
-	}
-
-	clk_dce->dfs_bypass_disp_clk = actual_clock;
-	return actual_clock;
-}
-
-static void dce_clock_read_integrated_info(struct dce_dccg *clk_dce)
-{
-	struct dc_debug_options *debug = &clk_dce->base.ctx->dc->debug;
-	struct dc_bios *bp = clk_dce->base.ctx->dc_bios;
-	struct integrated_info info = { { { 0 } } };
-	struct dc_firmware_info fw_info = { { 0 } };
-	int i;
-
-	if (bp->integrated_info)
-		info = *bp->integrated_info;
-
-	clk_dce->dentist_vco_freq_khz = info.dentist_vco_freq;
-	if (clk_dce->dentist_vco_freq_khz == 0) {
-		bp->funcs->get_firmware_info(bp, &fw_info);
-		clk_dce->dentist_vco_freq_khz =
-			fw_info.smu_gpu_pll_output_freq;
-		if (clk_dce->dentist_vco_freq_khz == 0)
-			clk_dce->dentist_vco_freq_khz = 3600000;
-	}
-
-	/*update the maximum display clock for each power state*/
-	for (i = 0; i < NUMBER_OF_DISP_CLK_VOLTAGE; ++i) {
-		enum dm_pp_clocks_state clk_state = DM_PP_CLOCKS_STATE_INVALID;
-
-		switch (i) {
-		case 0:
-			clk_state = DM_PP_CLOCKS_STATE_ULTRA_LOW;
-			break;
-
-		case 1:
-			clk_state = DM_PP_CLOCKS_STATE_LOW;
-			break;
-
-		case 2:
-			clk_state = DM_PP_CLOCKS_STATE_NOMINAL;
-			break;
-
-		case 3:
-			clk_state = DM_PP_CLOCKS_STATE_PERFORMANCE;
-			break;
-
-		default:
-			clk_state = DM_PP_CLOCKS_STATE_INVALID;
-			break;
-		}
-
-		/*Do not allow bad VBIOS/SBIOS to override with invalid values,
-		 * check for > 100MHz*/
-		if (info.disp_clk_voltage[i].max_supported_clk >= 100000)
-			clk_dce->max_clks_by_state[clk_state].display_clk_khz =
-				info.disp_clk_voltage[i].max_supported_clk;
-	}
-
-	if (!debug->disable_dfs_bypass && bp->integrated_info)
-		if (bp->integrated_info->gpu_cap_info & DFS_BYPASS_ENABLE)
-			clk_dce->dfs_bypass_enabled = true;
-}
-
-static void dce_clock_read_ss_info(struct dce_dccg *clk_dce)
-{
-	struct dc_bios *bp = clk_dce->base.ctx->dc_bios;
-	int ss_info_num = bp->funcs->get_ss_entry_number(
-			bp, AS_SIGNAL_TYPE_GPU_PLL);
-
-	if (ss_info_num) {
-		struct spread_spectrum_info info = { { 0 } };
-		enum bp_result result = bp->funcs->get_spread_spectrum_info(
-				bp, AS_SIGNAL_TYPE_GPU_PLL, 0, &info);
-
-		/* Based on VBIOS, VBIOS will keep entry for GPU PLL SS
-		 * even if SS not enabled and in that case
-		 * SSInfo.spreadSpectrumPercentage !=0 would be sign
-		 * that SS is enabled
-		 */
-		if (result == BP_RESULT_OK &&
-				info.spread_spectrum_percentage != 0) {
-			clk_dce->ss_on_dprefclk = true;
-			clk_dce->dprefclk_ss_divider = info.spread_percentage_divider;
-
-			if (info.type.CENTER_MODE == 0) {
-				/* TODO: Currently for DP Reference clock we
-				 * need only SS percentage for
-				 * downspread */
-				clk_dce->dprefclk_ss_percentage =
-						info.spread_spectrum_percentage;
-			}
-
-			return;
-		}
-
-		result = bp->funcs->get_spread_spectrum_info(
-				bp, AS_SIGNAL_TYPE_DISPLAY_PORT, 0, &info);
-
-		/* Based on VBIOS, VBIOS will keep entry for DPREFCLK SS
-		 * even if SS not enabled and in that case
-		 * SSInfo.spreadSpectrumPercentage !=0 would be sign
-		 * that SS is enabled
-		 */
-		if (result == BP_RESULT_OK &&
-				info.spread_spectrum_percentage != 0) {
-			clk_dce->ss_on_dprefclk = true;
-			clk_dce->dprefclk_ss_divider = info.spread_percentage_divider;
-
-			if (info.type.CENTER_MODE == 0) {
-				/* Currently for DP Reference clock we
-				 * need only SS percentage for
-				 * downspread */
-				clk_dce->dprefclk_ss_percentage =
-						info.spread_spectrum_percentage;
-			}
-		}
-	}
-}
-
-static inline bool should_set_clock(bool safe_to_lower, int calc_clk, int cur_clk)
-{
-	return ((safe_to_lower && calc_clk < cur_clk) || calc_clk > cur_clk);
-}
-
-static void dce12_update_clocks(struct dccg *dccg,
-			struct dc_clocks *new_clocks,
-			bool safe_to_lower)
-{
-	struct dm_pp_clock_for_voltage_req clock_voltage_req = {0};
-
-	/* TODO: Investigate why this is needed to fix display corruption. */
-	new_clocks->dispclk_khz = new_clocks->dispclk_khz * 115 / 100;
-
-	if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, dccg->clks.dispclk_khz)) {
-		clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DISPLAY_CLK;
-		clock_voltage_req.clocks_in_khz = new_clocks->dispclk_khz;
-		new_clocks->dispclk_khz = dccg->funcs->set_dispclk(dccg, new_clocks->dispclk_khz);
-		dccg->clks.dispclk_khz = new_clocks->dispclk_khz;
-
-		dm_pp_apply_clock_for_voltage_request(dccg->ctx, &clock_voltage_req);
-	}
-
-	if (should_set_clock(safe_to_lower, new_clocks->phyclk_khz, dccg->clks.phyclk_khz)) {
-		clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DISPLAYPHYCLK;
-		clock_voltage_req.clocks_in_khz = new_clocks->phyclk_khz;
-		dccg->clks.phyclk_khz = new_clocks->phyclk_khz;
-
-		dm_pp_apply_clock_for_voltage_request(dccg->ctx, &clock_voltage_req);
-	}
-}
-
-#ifdef CONFIG_DRM_AMD_DC_DCN1_0
-static int dcn1_determine_dppclk_threshold(struct dccg *dccg, struct dc_clocks *new_clocks)
-{
-	bool request_dpp_div = new_clocks->dispclk_khz > new_clocks->dppclk_khz;
-	bool dispclk_increase = new_clocks->dispclk_khz > dccg->clks.dispclk_khz;
-	int disp_clk_threshold = new_clocks->max_supported_dppclk_khz;
-	bool cur_dpp_div = dccg->clks.dispclk_khz > dccg->clks.dppclk_khz;
-
-	/* increase clock, looking for div is 0 for current, request div is 1*/
-	if (dispclk_increase) {
-		/* already divided by 2, no need to reach target clk with 2 steps*/
-		if (cur_dpp_div)
-			return new_clocks->dispclk_khz;
-
-		/* request disp clk is lower than maximum supported dpp clk,
-		 * no need to reach target clk with two steps.
-		 */
-		if (new_clocks->dispclk_khz <= disp_clk_threshold)
-			return new_clocks->dispclk_khz;
-
-		/* target dpp clk not request divided by 2, still within threshold */
-		if (!request_dpp_div)
-			return new_clocks->dispclk_khz;
-
-	} else {
-		/* decrease clock, looking for current dppclk divided by 2,
-		 * request dppclk not divided by 2.
-		 */
-
-		/* current dpp clk not divided by 2, no need to ramp*/
-		if (!cur_dpp_div)
-			return new_clocks->dispclk_khz;
-
-		/* current disp clk is lower than current maximum dpp clk,
-		 * no need to ramp
-		 */
-		if (dccg->clks.dispclk_khz <= disp_clk_threshold)
-			return new_clocks->dispclk_khz;
-
-		/* request dpp clk need to be divided by 2 */
-		if (request_dpp_div)
-			return new_clocks->dispclk_khz;
-	}
-
-	return disp_clk_threshold;
-}
-
-static void dcn1_ramp_up_dispclk_with_dpp(struct dccg *dccg, struct dc_clocks *new_clocks)
-{
-	struct dc *dc = dccg->ctx->dc;
-	int dispclk_to_dpp_threshold = dcn1_determine_dppclk_threshold(dccg, new_clocks);
-	bool request_dpp_div = new_clocks->dispclk_khz > new_clocks->dppclk_khz;
-	int i;
-
-	/* set disp clk to dpp clk threshold */
-	dccg->funcs->set_dispclk(dccg, dispclk_to_dpp_threshold);
-
-	/* update request dpp clk division option */
-	for (i = 0; i < dc->res_pool->pipe_count; i++) {
-		struct pipe_ctx *pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
-
-		if (!pipe_ctx->plane_state)
-			continue;
-
-		pipe_ctx->plane_res.dpp->funcs->dpp_dppclk_control(
-				pipe_ctx->plane_res.dpp,
-				request_dpp_div,
-				true);
-	}
-
-	/* If target clk not same as dppclk threshold, set to target clock */
-	if (dispclk_to_dpp_threshold != new_clocks->dispclk_khz)
-		dccg->funcs->set_dispclk(dccg, new_clocks->dispclk_khz);
-
-	dccg->clks.dispclk_khz = new_clocks->dispclk_khz;
-	dccg->clks.dppclk_khz = new_clocks->dppclk_khz;
-	dccg->clks.max_supported_dppclk_khz = new_clocks->max_supported_dppclk_khz;
-}
-
-static void dcn1_update_clocks(struct dccg *dccg,
-			struct dc_clocks *new_clocks,
-			bool safe_to_lower)
-{
-	struct dc *dc = dccg->ctx->dc;
-	struct pp_smu_display_requirement_rv *smu_req_cur =
-			&dc->res_pool->pp_smu_req;
-	struct pp_smu_display_requirement_rv smu_req = *smu_req_cur;
-	struct pp_smu_funcs_rv *pp_smu = dc->res_pool->pp_smu;
-	struct dm_pp_clock_for_voltage_req clock_voltage_req = {0};
-	bool send_request_to_increase = false;
-	bool send_request_to_lower = false;
-
-	if (new_clocks->phyclk_khz)
-		smu_req.display_count = 1;
-	else
-		smu_req.display_count = 0;
-
-	if (new_clocks->dispclk_khz > dccg->clks.dispclk_khz
-			|| new_clocks->phyclk_khz > dccg->clks.phyclk_khz
-			|| new_clocks->fclk_khz > dccg->clks.fclk_khz
-			|| new_clocks->dcfclk_khz > dccg->clks.dcfclk_khz)
-		send_request_to_increase = true;
-
-	if (should_set_clock(safe_to_lower, new_clocks->phyclk_khz, dccg->clks.phyclk_khz)) {
-		dccg->clks.phyclk_khz = new_clocks->phyclk_khz;
-
-		send_request_to_lower = true;
-	}
-
-	if (should_set_clock(safe_to_lower, new_clocks->fclk_khz, dccg->clks.fclk_khz)) {
-		dccg->clks.fclk_khz = new_clocks->fclk_khz;
-		clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_FCLK;
-		clock_voltage_req.clocks_in_khz = new_clocks->fclk_khz;
-		smu_req.hard_min_fclk_khz = new_clocks->fclk_khz;
-
-		dm_pp_apply_clock_for_voltage_request(dccg->ctx, &clock_voltage_req);
-		send_request_to_lower = true;
-	}
-
-	if (should_set_clock(safe_to_lower, new_clocks->dcfclk_khz, dccg->clks.dcfclk_khz)) {
-		dccg->clks.dcfclk_khz = new_clocks->dcfclk_khz;
-		smu_req.hard_min_dcefclk_khz = new_clocks->dcfclk_khz;
-
-		send_request_to_lower = true;
-	}
-
-	if (should_set_clock(safe_to_lower,
-			new_clocks->dcfclk_deep_sleep_khz, dccg->clks.dcfclk_deep_sleep_khz)) {
-		dccg->clks.dcfclk_deep_sleep_khz = new_clocks->dcfclk_deep_sleep_khz;
-		smu_req.min_deep_sleep_dcefclk_mhz = new_clocks->dcfclk_deep_sleep_khz;
-
-		send_request_to_lower = true;
-	}
-
-	/* make sure dcf clk is before dpp clk to
-	 * make sure we have enough voltage to run dpp clk
-	 */
-	if (send_request_to_increase) {
-		/*use dcfclk to request voltage*/
-		clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DCFCLK;
-		clock_voltage_req.clocks_in_khz = dcn_find_dcfclk_suits_all(dc, new_clocks);
-		dm_pp_apply_clock_for_voltage_request(dccg->ctx, &clock_voltage_req);
-		if (pp_smu->set_display_requirement)
-			pp_smu->set_display_requirement(&pp_smu->pp_smu, &smu_req);
-	}
-
-	/* dcn1 dppclk is tied to dispclk */
-	/* program dispclk on = as a w/a for sleep resume clock ramping issues */
-	if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, dccg->clks.dispclk_khz)
-			|| new_clocks->dispclk_khz == dccg->clks.dispclk_khz) {
-		dcn1_ramp_up_dispclk_with_dpp(dccg, new_clocks);
-		dccg->clks.dispclk_khz = new_clocks->dispclk_khz;
-
-		send_request_to_lower = true;
-	}
-
-	if (!send_request_to_increase && send_request_to_lower) {
-		/*use dcfclk to request voltage*/
-		clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DCFCLK;
-		clock_voltage_req.clocks_in_khz = dcn_find_dcfclk_suits_all(dc, new_clocks);
-		dm_pp_apply_clock_for_voltage_request(dccg->ctx, &clock_voltage_req);
-		if (pp_smu->set_display_requirement)
-			pp_smu->set_display_requirement(&pp_smu->pp_smu, &smu_req);
-	}
-
-
-	*smu_req_cur = smu_req;
-}
-#endif
-
-static void dce_update_clocks(struct dccg *dccg,
-			struct dc_clocks *new_clocks,
-			bool safe_to_lower)
-{
-	struct dm_pp_power_level_change_request level_change_req;
-	struct dce_dccg *clk_dce = TO_DCE_CLOCKS(dccg);
-
-	/* TODO: Investigate why this is needed to fix display corruption. */
-	if (!clk_dce->dfs_bypass_active)
-		new_clocks->dispclk_khz = new_clocks->dispclk_khz * 115 / 100;
-
-	level_change_req.power_level = dce_get_required_clocks_state(dccg, new_clocks);
-	/* get max clock state from PPLIB */
-	if ((level_change_req.power_level < dccg->cur_min_clks_state && safe_to_lower)
-			|| level_change_req.power_level > dccg->cur_min_clks_state) {
-		if (dm_pp_apply_power_level_change_request(dccg->ctx, &level_change_req))
-			dccg->cur_min_clks_state = level_change_req.power_level;
-	}
-
-	if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, dccg->clks.dispclk_khz)) {
-		new_clocks->dispclk_khz = dccg->funcs->set_dispclk(dccg, new_clocks->dispclk_khz);
-		dccg->clks.dispclk_khz = new_clocks->dispclk_khz;
-	}
-}
-
-static bool dce_update_dfs_bypass(
-       struct dccg *dccg,
-       struct dc *dc,
-       struct dc_state *context,
-       int requested_clock_khz)
-{
-       struct dce_dccg *clk_dce = TO_DCE_CLOCKS(dccg);
-       struct resource_context *res_ctx = &context->res_ctx;
-       enum signal_type signal_type = SIGNAL_TYPE_NONE;
-       bool was_active = clk_dce->dfs_bypass_active;
-       int i;
-
-       /* Disable DFS bypass by default. */
-       clk_dce->dfs_bypass_active = false;
-
-       /* Check that DFS bypass is available. */
-       if (!clk_dce->dfs_bypass_enabled)
-               goto update;
-
-       /* Check if the requested display clock is below the threshold. */
-       if (requested_clock_khz >= 400000)
-               goto update;
-
-       /* DFS-bypass should only be enabled on single stream setups */
-       if (context->stream_count != 1)
-               goto update;
-
-       /* Check that the stream's signal type is an embedded panel */
-       for (i = 0; i < dc->res_pool->pipe_count; i++) {
-               if (res_ctx->pipe_ctx[i].stream) {
-                       struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i];
-
-                       signal_type = pipe_ctx->stream->sink->link->connector_signal;
-                       break;
-               }
-       }
-
-       if (signal_type == SIGNAL_TYPE_EDP ||
-               signal_type == SIGNAL_TYPE_LVDS)
-               clk_dce->dfs_bypass_active = true;
-
-update:
-       /* Update the clock state. We don't need to respect safe_to_lower
-        * because DFS bypass should always be greater than the current
-        * display clock frequency.
-        */
-       if (was_active != clk_dce->dfs_bypass_active) {
-               dccg->clks.dispclk_khz =
-                       dccg->funcs->set_dispclk(dccg, dccg->clks.dispclk_khz);
-               return true;
-       }
-
-       return false;
-}
-
-#ifdef CONFIG_DRM_AMD_DC_DCN1_0
-static const struct display_clock_funcs dcn1_funcs = {
-	.get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
-	.set_dispclk = dce112_set_clock,
-	.update_clocks = dcn1_update_clocks
-};
-#endif
-
-static const struct display_clock_funcs dce120_funcs = {
-	.get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
-	.set_dispclk = dce112_set_clock,
-	.update_clocks = dce12_update_clocks
-};
-
-static const struct display_clock_funcs dce112_funcs = {
-	.get_dp_ref_clk_frequency = dce_get_dp_ref_freq_khz,
-	.set_dispclk = dce112_set_clock,
-	.update_clocks = dce_update_clocks
-};
-
-static const struct display_clock_funcs dce110_funcs = {
-	.get_dp_ref_clk_frequency = dce_get_dp_ref_freq_khz,
-	.set_dispclk = dce_psr_set_clock,
-	.update_clocks = dce_update_clocks,
-	.update_dfs_bypass = dce_update_dfs_bypass
-};
-
-static const struct display_clock_funcs dce_funcs = {
-	.get_dp_ref_clk_frequency = dce_get_dp_ref_freq_khz,
-	.set_dispclk = dce_set_clock,
-	.update_clocks = dce_update_clocks
-};
-
-static void dce_dccg_construct(
-	struct dce_dccg *clk_dce,
-	struct dc_context *ctx,
-	const struct dccg_registers *regs,
-	const struct dccg_shift *clk_shift,
-	const struct dccg_mask *clk_mask)
-{
-	struct dccg *base = &clk_dce->base;
-
-	base->ctx = ctx;
-	base->funcs = &dce_funcs;
-
-	clk_dce->regs = regs;
-	clk_dce->clk_shift = clk_shift;
-	clk_dce->clk_mask = clk_mask;
-
-	clk_dce->dfs_bypass_disp_clk = 0;
-
-	clk_dce->dprefclk_ss_percentage = 0;
-	clk_dce->dprefclk_ss_divider = 1000;
-	clk_dce->ss_on_dprefclk = false;
-
-	base->max_clks_state = DM_PP_CLOCKS_STATE_NOMINAL;
-	base->cur_min_clks_state = DM_PP_CLOCKS_STATE_INVALID;
-
-	dce_clock_read_integrated_info(clk_dce);
-	dce_clock_read_ss_info(clk_dce);
-}
-
-struct dccg *dce_dccg_create(
-	struct dc_context *ctx,
-	const struct dccg_registers *regs,
-	const struct dccg_shift *clk_shift,
-	const struct dccg_mask *clk_mask)
-{
-	struct dce_dccg *clk_dce = kzalloc(sizeof(*clk_dce), GFP_KERNEL);
-
-	if (clk_dce == NULL) {
-		BREAK_TO_DEBUGGER();
-		return NULL;
-	}
-
-	memcpy(clk_dce->max_clks_by_state,
-		dce80_max_clks_by_state,
-		sizeof(dce80_max_clks_by_state));
-
-	dce_dccg_construct(
-		clk_dce, ctx, regs, clk_shift, clk_mask);
-
-	return &clk_dce->base;
-}
-
-struct dccg *dce110_dccg_create(
-	struct dc_context *ctx,
-	const struct dccg_registers *regs,
-	const struct dccg_shift *clk_shift,
-	const struct dccg_mask *clk_mask)
-{
-	struct dce_dccg *clk_dce = kzalloc(sizeof(*clk_dce), GFP_KERNEL);
-
-	if (clk_dce == NULL) {
-		BREAK_TO_DEBUGGER();
-		return NULL;
-	}
-
-	memcpy(clk_dce->max_clks_by_state,
-		dce110_max_clks_by_state,
-		sizeof(dce110_max_clks_by_state));
-
-	dce_dccg_construct(
-		clk_dce, ctx, regs, clk_shift, clk_mask);
-
-	clk_dce->base.funcs = &dce110_funcs;
-
-	return &clk_dce->base;
-}
-
-struct dccg *dce112_dccg_create(
-	struct dc_context *ctx,
-	const struct dccg_registers *regs,
-	const struct dccg_shift *clk_shift,
-	const struct dccg_mask *clk_mask)
-{
-	struct dce_dccg *clk_dce = kzalloc(sizeof(*clk_dce), GFP_KERNEL);
-
-	if (clk_dce == NULL) {
-		BREAK_TO_DEBUGGER();
-		return NULL;
-	}
-
-	memcpy(clk_dce->max_clks_by_state,
-		dce112_max_clks_by_state,
-		sizeof(dce112_max_clks_by_state));
-
-	dce_dccg_construct(
-		clk_dce, ctx, regs, clk_shift, clk_mask);
-
-	clk_dce->base.funcs = &dce112_funcs;
-
-	return &clk_dce->base;
-}
-
-struct dccg *dce120_dccg_create(struct dc_context *ctx)
-{
-	struct dce_dccg *clk_dce = kzalloc(sizeof(*clk_dce), GFP_KERNEL);
-
-	if (clk_dce == NULL) {
-		BREAK_TO_DEBUGGER();
-		return NULL;
-	}
-
-	memcpy(clk_dce->max_clks_by_state,
-		dce120_max_clks_by_state,
-		sizeof(dce120_max_clks_by_state));
-
-	dce_dccg_construct(
-		clk_dce, ctx, NULL, NULL, NULL);
-
-	clk_dce->dprefclk_khz = 600000;
-	clk_dce->base.funcs = &dce120_funcs;
-
-	return &clk_dce->base;
-}
-
-#ifdef CONFIG_DRM_AMD_DC_DCN1_0
-struct dccg *dcn1_dccg_create(struct dc_context *ctx)
-{
-	struct dc_debug_options *debug = &ctx->dc->debug;
-	struct dc_bios *bp = ctx->dc_bios;
-	struct dc_firmware_info fw_info = { { 0 } };
-	struct dce_dccg *clk_dce = kzalloc(sizeof(*clk_dce), GFP_KERNEL);
-
-	if (clk_dce == NULL) {
-		BREAK_TO_DEBUGGER();
-		return NULL;
-	}
-
-	clk_dce->base.ctx = ctx;
-	clk_dce->base.funcs = &dcn1_funcs;
-
-	clk_dce->dfs_bypass_disp_clk = 0;
-
-	clk_dce->dprefclk_ss_percentage = 0;
-	clk_dce->dprefclk_ss_divider = 1000;
-	clk_dce->ss_on_dprefclk = false;
-
-	clk_dce->dprefclk_khz = 600000;
-	if (bp->integrated_info)
-		clk_dce->dentist_vco_freq_khz = bp->integrated_info->dentist_vco_freq;
-	if (clk_dce->dentist_vco_freq_khz == 0) {
-		bp->funcs->get_firmware_info(bp, &fw_info);
-		clk_dce->dentist_vco_freq_khz = fw_info.smu_gpu_pll_output_freq;
-		if (clk_dce->dentist_vco_freq_khz == 0)
-			clk_dce->dentist_vco_freq_khz = 3600000;
-	}
-
-	if (!debug->disable_dfs_bypass && bp->integrated_info)
-		if (bp->integrated_info->gpu_cap_info & DFS_BYPASS_ENABLE)
-			clk_dce->dfs_bypass_enabled = true;
-
-	dce_clock_read_ss_info(clk_dce);
-
-	return &clk_dce->base;
-}
-#endif
-
-void dce_dccg_destroy(struct dccg **dccg)
-{
-	struct dce_dccg *clk_dce = TO_DCE_CLOCKS(*dccg);
-
-	kfree(clk_dce);
-	*dccg = NULL;
-}
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.h b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.h
deleted file mode 100644
index 34fdb38..0000000
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.h
+++ /dev/null
@@ -1,123 +0,0 @@
-/*
- * Copyright 2012-16 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: AMD
- *
- */
-
-
-#ifndef _DCE_CLOCKS_H_
-#define _DCE_CLOCKS_H_
-
-#include "display_clock.h"
-
-#define CLK_COMMON_REG_LIST_DCE_BASE() \
-	.DPREFCLK_CNTL = mmDPREFCLK_CNTL, \
-	.DENTIST_DISPCLK_CNTL = mmDENTIST_DISPCLK_CNTL
-
-#define CLK_COMMON_REG_LIST_DCN_BASE() \
-	SR(DENTIST_DISPCLK_CNTL)
-
-#define CLK_SF(reg_name, field_name, post_fix)\
-	.field_name = reg_name ## __ ## field_name ## post_fix
-
-#define CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(mask_sh) \
-	CLK_SF(DPREFCLK_CNTL, DPREFCLK_SRC_SEL, mask_sh), \
-	CLK_SF(DENTIST_DISPCLK_CNTL, DENTIST_DPREFCLK_WDIVIDER, mask_sh)
-
-#define CLK_COMMON_MASK_SH_LIST_DCN_COMMON_BASE(mask_sh) \
-	CLK_SF(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_WDIVIDER, mask_sh),\
-	CLK_SF(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_CHG_DONE, mask_sh)
-
-#define CLK_REG_FIELD_LIST(type) \
-	type DPREFCLK_SRC_SEL; \
-	type DENTIST_DPREFCLK_WDIVIDER; \
-	type DENTIST_DISPCLK_WDIVIDER; \
-	type DENTIST_DISPCLK_CHG_DONE;
-
-struct dccg_shift {
-	CLK_REG_FIELD_LIST(uint8_t)
-};
-
-struct dccg_mask {
-	CLK_REG_FIELD_LIST(uint32_t)
-};
-
-struct dccg_registers {
-	uint32_t DPREFCLK_CNTL;
-	uint32_t DENTIST_DISPCLK_CNTL;
-};
-
-struct dce_dccg {
-	struct dccg base;
-	const struct dccg_registers *regs;
-	const struct dccg_shift *clk_shift;
-	const struct dccg_mask *clk_mask;
-
-	struct state_dependent_clocks max_clks_by_state[DM_PP_CLOCKS_MAX_STATES];
-
-	int dentist_vco_freq_khz;
-
-	/* Cache the status of DFS-bypass feature*/
-	bool dfs_bypass_enabled;
-	/* True if the DFS-bypass feature is enabled and active. */
-	bool dfs_bypass_active;
-	/* Cache the display clock returned by VBIOS if DFS-bypass is enabled.
-	 * This is basically "Crystal Frequency In KHz" (XTALIN) frequency */
-	int dfs_bypass_disp_clk;
-
-	/* Flag for Enabled SS on DPREFCLK */
-	bool ss_on_dprefclk;
-	/* DPREFCLK SS percentage (if down-spread enabled) */
-	int dprefclk_ss_percentage;
-	/* DPREFCLK SS percentage Divider (100 or 1000) */
-	int dprefclk_ss_divider;
-	int dprefclk_khz;
-};
-
-
-struct dccg *dce_dccg_create(
-	struct dc_context *ctx,
-	const struct dccg_registers *regs,
-	const struct dccg_shift *clk_shift,
-	const struct dccg_mask *clk_mask);
-
-struct dccg *dce110_dccg_create(
-	struct dc_context *ctx,
-	const struct dccg_registers *regs,
-	const struct dccg_shift *clk_shift,
-	const struct dccg_mask *clk_mask);
-
-struct dccg *dce112_dccg_create(
-	struct dc_context *ctx,
-	const struct dccg_registers *regs,
-	const struct dccg_shift *clk_shift,
-	const struct dccg_mask *clk_mask);
-
-struct dccg *dce120_dccg_create(struct dc_context *ctx);
-
-#ifdef CONFIG_DRM_AMD_DC_DCN1_0
-struct dccg *dcn1_dccg_create(struct dc_context *ctx);
-#endif
-
-void dce_dccg_destroy(struct dccg **dccg);
-
-#endif /* _DCE_CLOCKS_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
index 64dc753..c83a7f0 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
@@ -233,6 +233,16 @@ struct dce_hwseq_registers {
 	uint32_t DOMAIN5_PG_CONFIG;
 	uint32_t DOMAIN6_PG_CONFIG;
 	uint32_t DOMAIN7_PG_CONFIG;
+	uint32_t DOMAIN8_PG_CONFIG;
+	uint32_t DOMAIN9_PG_CONFIG;
+	uint32_t DOMAIN10_PG_CONFIG;
+	uint32_t DOMAIN11_PG_CONFIG;
+	uint32_t DOMAIN16_PG_CONFIG;
+	uint32_t DOMAIN17_PG_CONFIG;
+	uint32_t DOMAIN18_PG_CONFIG;
+	uint32_t DOMAIN19_PG_CONFIG;
+	uint32_t DOMAIN20_PG_CONFIG;
+	uint32_t DOMAIN21_PG_CONFIG;
 	uint32_t DOMAIN0_PG_STATUS;
 	uint32_t DOMAIN1_PG_STATUS;
 	uint32_t DOMAIN2_PG_STATUS;
@@ -241,6 +251,16 @@ struct dce_hwseq_registers {
 	uint32_t DOMAIN5_PG_STATUS;
 	uint32_t DOMAIN6_PG_STATUS;
 	uint32_t DOMAIN7_PG_STATUS;
+	uint32_t DOMAIN8_PG_STATUS;
+	uint32_t DOMAIN9_PG_STATUS;
+	uint32_t DOMAIN10_PG_STATUS;
+	uint32_t DOMAIN11_PG_STATUS;
+	uint32_t DOMAIN16_PG_STATUS;
+	uint32_t DOMAIN17_PG_STATUS;
+	uint32_t DOMAIN18_PG_STATUS;
+	uint32_t DOMAIN19_PG_STATUS;
+	uint32_t DOMAIN20_PG_STATUS;
+	uint32_t DOMAIN21_PG_STATUS;
 	uint32_t DIO_MEM_PWR_CTRL;
 	uint32_t DCCG_GATE_DISABLE_CNTL;
 	uint32_t DCCG_GATE_DISABLE_CNTL2;
@@ -262,6 +282,8 @@ struct dce_hwseq_registers {
 	uint32_t D2VGA_CONTROL;
 	uint32_t D3VGA_CONTROL;
 	uint32_t D4VGA_CONTROL;
+	uint32_t D5VGA_CONTROL;
+	uint32_t D6VGA_CONTROL;
 	uint32_t VGA_TEST_CONTROL;
 	/* MMHUB registers. read only. temporary hack */
 	uint32_t VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32;
@@ -489,6 +511,26 @@ struct dce_hwseq_registers {
 	type DOMAIN6_POWER_GATE; \
 	type DOMAIN7_POWER_FORCEON; \
 	type DOMAIN7_POWER_GATE; \
+	type DOMAIN8_POWER_FORCEON; \
+	type DOMAIN8_POWER_GATE; \
+	type DOMAIN9_POWER_FORCEON; \
+	type DOMAIN9_POWER_GATE; \
+	type DOMAIN10_POWER_FORCEON; \
+	type DOMAIN10_POWER_GATE; \
+	type DOMAIN11_POWER_FORCEON; \
+	type DOMAIN11_POWER_GATE; \
+	type DOMAIN16_POWER_FORCEON; \
+	type DOMAIN16_POWER_GATE; \
+	type DOMAIN17_POWER_FORCEON; \
+	type DOMAIN17_POWER_GATE; \
+	type DOMAIN18_POWER_FORCEON; \
+	type DOMAIN18_POWER_GATE; \
+	type DOMAIN19_POWER_FORCEON; \
+	type DOMAIN19_POWER_GATE; \
+	type DOMAIN20_POWER_FORCEON; \
+	type DOMAIN20_POWER_GATE; \
+	type DOMAIN21_POWER_FORCEON; \
+	type DOMAIN21_POWER_GATE; \
 	type DOMAIN0_PGFSM_PWR_STATUS; \
 	type DOMAIN1_PGFSM_PWR_STATUS; \
 	type DOMAIN2_PGFSM_PWR_STATUS; \
@@ -497,6 +539,16 @@ struct dce_hwseq_registers {
 	type DOMAIN5_PGFSM_PWR_STATUS; \
 	type DOMAIN6_PGFSM_PWR_STATUS; \
 	type DOMAIN7_PGFSM_PWR_STATUS; \
+	type DOMAIN8_PGFSM_PWR_STATUS; \
+	type DOMAIN9_PGFSM_PWR_STATUS; \
+	type DOMAIN10_PGFSM_PWR_STATUS; \
+	type DOMAIN11_PGFSM_PWR_STATUS; \
+	type DOMAIN16_PGFSM_PWR_STATUS; \
+	type DOMAIN17_PGFSM_PWR_STATUS; \
+	type DOMAIN18_PGFSM_PWR_STATUS; \
+	type DOMAIN19_PGFSM_PWR_STATUS; \
+	type DOMAIN20_PGFSM_PWR_STATUS; \
+	type DOMAIN21_PGFSM_PWR_STATUS; \
 	type DCFCLK_GATE_DIS; \
 	type DCHUBBUB_GLOBAL_TIMER_REFDIV; \
 	type VGA_TEST_ENABLE; \
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
index 366bc8c..3e18ea8 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
@@ -645,7 +645,7 @@ static bool dce110_link_encoder_validate_hdmi_output(
 		return false;
 
 	/* DCE11 HW does not support 420 */
-	if (!enc110->base.features.ycbcr420_supported &&
+	if (!enc110->base.features.hdmi_ycbcr420_supported &&
 			crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR420)
 		return false;
 
diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c
index 74c05e8..bc50a8e 100644
--- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c
@@ -105,74 +105,18 @@ bool dce100_enable_display_power_gating(
 		return false;
 }
 
-static void dce100_pplib_apply_display_requirements(
-	struct dc *dc,
-	struct dc_state *context)
-{
-	struct dm_pp_display_configuration *pp_display_cfg = &context->pp_display_cfg;
-
-	pp_display_cfg->avail_mclk_switch_time_us =
-						dce110_get_min_vblank_time_us(context);
-	/*pp_display_cfg->min_memory_clock_khz = context->bw.dce.yclk_khz
-		/ MEMORY_TYPE_MULTIPLIER;*/
-
-	dce110_fill_display_configs(context, pp_display_cfg);
-
-	if (memcmp(&dc->prev_display_config, pp_display_cfg, sizeof(
-			struct dm_pp_display_configuration)) !=  0)
-		dm_pp_apply_display_requirements(dc->ctx, pp_display_cfg);
-
-	dc->prev_display_config = *pp_display_cfg;
-}
-
-/* unit: in_khz before mode set, get pixel clock from context. ASIC register
- * may not be programmed yet
- */
-static uint32_t get_max_pixel_clock_for_all_paths(
-	struct dc *dc,
-	struct dc_state *context)
-{
-	uint32_t max_pix_clk = 0;
-	int i;
-
-	for (i = 0; i < MAX_PIPES; i++) {
-		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
-
-		if (pipe_ctx->stream == NULL)
-			continue;
-
-		/* do not check under lay */
-		if (pipe_ctx->top_pipe)
-			continue;
-
-		if (pipe_ctx->stream_res.pix_clk_params.requested_pix_clk > max_pix_clk)
-			max_pix_clk =
-				pipe_ctx->stream_res.pix_clk_params.requested_pix_clk;
-	}
-	return max_pix_clk;
-}
-
-void dce100_set_bandwidth(
+void dce100_prepare_bandwidth(
 		struct dc *dc,
-		struct dc_state *context,
-		bool decrease_allowed)
+		struct dc_state *context)
 {
-	struct dc_clocks req_clks;
-
-	req_clks.dispclk_khz = context->bw.dce.dispclk_khz * 115 / 100;
-	req_clks.phyclk_khz = get_max_pixel_clock_for_all_paths(dc, context);
-
 	dce110_set_safe_displaymarks(&context->res_ctx, dc->res_pool);
 
-	dc->res_pool->dccg->funcs->update_clocks(
-			dc->res_pool->dccg,
-			&req_clks,
-			decrease_allowed);
-
-	dce100_pplib_apply_display_requirements(dc, context);
+	dc->res_pool->clk_mgr->funcs->update_clocks(
+			dc->res_pool->clk_mgr,
+			context,
+			false);
 }
 
-
 /**************************************************************************/
 
 void dce100_hw_sequencer_construct(struct dc *dc)
@@ -180,8 +124,7 @@ void dce100_hw_sequencer_construct(struct dc *dc)
 	dce110_hw_sequencer_construct(dc);
 
 	dc->hwss.enable_display_power_gating = dce100_enable_display_power_gating;
-	dc->hwss.set_bandwidth = dce100_set_bandwidth;
-	dc->hwss.pplib_apply_display_requirements =
-			dce100_pplib_apply_display_requirements;
+	dc->hwss.prepare_bandwidth = dce100_prepare_bandwidth;
+	dc->hwss.optimize_bandwidth = dce100_prepare_bandwidth;
 }
 
diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.h
index c6ec0ed..acd4185 100644
--- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.h
@@ -33,10 +33,9 @@ struct dc_state;
 
 void dce100_hw_sequencer_construct(struct dc *dc);
 
-void dce100_set_bandwidth(
+void dce100_prepare_bandwidth(
 		struct dc *dc,
-		struct dc_state *context,
-		bool decrease_allowed);
+		struct dc_state *context);
 
 bool dce100_enable_display_power_gating(struct dc *dc, uint8_t controller_id,
 					struct dc_bios *dcb,
diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
index 14754a8..6ae51a5 100644
--- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
@@ -36,11 +36,11 @@
 #include "dce/dce_link_encoder.h"
 #include "dce/dce_stream_encoder.h"
 
+#include "dce/dce_clk_mgr.h"
 #include "dce/dce_mem_input.h"
 #include "dce/dce_ipp.h"
 #include "dce/dce_transform.h"
 #include "dce/dce_opp.h"
-#include "dce/dce_clocks.h"
 #include "dce/dce_clock_source.h"
 #include "dce/dce_audio.h"
 #include "dce/dce_hwseq.h"
@@ -137,15 +137,15 @@ static const struct dce110_timing_generator_offsets dce100_tg_offsets[] = {
 	.reg_name = mm ## block ## id ## _ ## reg_name
 
 
-static const struct dccg_registers disp_clk_regs = {
+static const struct clk_mgr_registers disp_clk_regs = {
 		CLK_COMMON_REG_LIST_DCE_BASE()
 };
 
-static const struct dccg_shift disp_clk_shift = {
+static const struct clk_mgr_shift disp_clk_shift = {
 		CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT)
 };
 
-static const struct dccg_mask disp_clk_mask = {
+static const struct clk_mgr_mask disp_clk_mask = {
 		CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK)
 };
 
@@ -722,8 +722,8 @@ static void destruct(struct dce110_resource_pool *pool)
 			dce_aud_destroy(&pool->base.audios[i]);
 	}
 
-	if (pool->base.dccg != NULL)
-		dce_dccg_destroy(&pool->base.dccg);
+	if (pool->base.clk_mgr != NULL)
+		dce_clk_mgr_destroy(&pool->base.clk_mgr);
 
 	if (pool->base.abm != NULL)
 				dce_abm_destroy(&pool->base.abm);
@@ -767,7 +767,7 @@ bool dce100_validate_bandwidth(
 	if (at_least_one_pipe) {
 		/* TODO implement when needed but for now hardcode max value*/
 		context->bw.dce.dispclk_khz = 681000;
-		context->bw.dce.yclk_khz = 250000 * MEMORY_TYPE_MULTIPLIER;
+		context->bw.dce.yclk_khz = 250000 * MEMORY_TYPE_MULTIPLIER_CZ;
 	} else {
 		context->bw.dce.dispclk_khz = 0;
 		context->bw.dce.yclk_khz = 0;
@@ -860,7 +860,6 @@ static bool construct(
 	struct dc_context *ctx = dc->ctx;
 	struct dc_firmware_info info;
 	struct dc_bios *bp;
-	struct dm_pp_static_clock_info static_clk_info = {0};
 
 	ctx->dc_bios->regs = &bios_regs;
 
@@ -908,11 +907,11 @@ static bool construct(
 		}
 	}
 
-	pool->base.dccg = dce_dccg_create(ctx,
+	pool->base.clk_mgr = dce_clk_mgr_create(ctx,
 			&disp_clk_regs,
 			&disp_clk_shift,
 			&disp_clk_mask);
-	if (pool->base.dccg == NULL) {
+	if (pool->base.clk_mgr == NULL) {
 		dm_error("DC: failed to create display clock!\n");
 		BREAK_TO_DEBUGGER();
 		goto res_create_fail;
@@ -938,12 +937,6 @@ static bool construct(
 		goto res_create_fail;
 	}
 
-	/* get static clock information for PPLIB or firmware, save
-	 * max_clock_state
-	 */
-	if (dm_pp_get_static_clocks(ctx, &static_clk_info))
-		pool->base.dccg->max_clks_state =
-					static_clk_info.max_clocks_state;
 	{
 		struct irq_service_init_data init_data;
 		init_data.ctx = dc->ctx;
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
index b459867..9724a17 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
@@ -548,14 +548,14 @@ dce110_translate_regamma_to_hw_format(const struct dc_transfer_func *output_tf,
 
 	regamma_params->hw_points_num = hw_points;
 
-	i = 1;
-	for (k = 0; k < 16 && i < 16; k++) {
+	k = 0;
+	for (i = 1; i < 16; i++) {
 		if (seg_distr[k] != -1) {
 			regamma_params->arr_curve_points[k].segments_num = seg_distr[k];
 			regamma_params->arr_curve_points[i].offset =
 					regamma_params->arr_curve_points[k].offset + (1 << seg_distr[k]);
 		}
-		i++;
+		k++;
 	}
 
 	if (seg_distr[k] != -1)
@@ -1085,7 +1085,6 @@ void dce110_unblank_stream(struct pipe_ctx *pipe_ctx,
 
 	if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) {
 		link->dc->hwss.edp_backlight_control(link, true);
-		stream->bl_pwm_level = EDP_BACKLIGHT_RAMP_DISABLE_LEVEL;
 	}
 }
 void dce110_blank_stream(struct pipe_ctx *pipe_ctx)
@@ -1192,8 +1191,8 @@ static void build_audio_output(
 	if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT ||
 			pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
 		audio_output->pll_info.dp_dto_source_clock_in_khz =
-				state->dis_clk->funcs->get_dp_ref_clk_frequency(
-						state->dis_clk);
+				state->dccg->funcs->get_dp_ref_clk_frequency(
+						state->dccg);
 	}
 
 	audio_output->pll_info.feed_back_divider =
@@ -1547,6 +1546,7 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context)
 	int i;
 	struct dc_link *edp_link_to_turnoff = NULL;
 	struct dc_link *edp_link = get_link_for_edp(dc);
+	struct dc_bios *bios = dc->ctx->dc_bios;
 	bool can_edp_fast_boot_optimize = false;
 	bool apply_edp_fast_boot_optimization = false;
 
@@ -1573,6 +1573,20 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context)
 			if (context->streams[i]->signal == SIGNAL_TYPE_EDP) {
 				context->streams[i]->apply_edp_fast_boot_optimization = true;
 				apply_edp_fast_boot_optimization = true;
+
+				/* When after S4 and S5, vbios may post edp and previous dpms_off
+				 * doesn't make sense.
+				 * Update dpms_off state to align hw and sw state via check
+				 * vBios scratch register.
+				 */
+				if (bios->funcs->is_active_display)	{
+					const struct connector_device_tag_info *device_tag = &(edp_link->device_tag);
+
+					if (bios->funcs->is_active_display(bios,
+							context->streams[i]->signal,
+							device_tag))
+						context->streams[i]->dpms_off = false;
+				}
 			}
 		}
 	}
@@ -1748,34 +1762,6 @@ static void set_static_screen_control(struct pipe_ctx **pipe_ctx,
 			set_static_screen_control(pipe_ctx[i]->stream_res.tg, value);
 }
 
-/* unit: in_khz before mode set, get pixel clock from context. ASIC register
- * may not be programmed yet
- */
-static uint32_t get_max_pixel_clock_for_all_paths(
-	struct dc *dc,
-	struct dc_state *context)
-{
-	uint32_t max_pix_clk = 0;
-	int i;
-
-	for (i = 0; i < MAX_PIPES; i++) {
-		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
-
-		if (pipe_ctx->stream == NULL)
-			continue;
-
-		/* do not check under lay */
-		if (pipe_ctx->top_pipe)
-			continue;
-
-		if (pipe_ctx->stream_res.pix_clk_params.requested_pix_clk > max_pix_clk)
-			max_pix_clk =
-				pipe_ctx->stream_res.pix_clk_params.requested_pix_clk;
-	}
-
-	return max_pix_clk;
-}
-
 /*
  *  Check if FBC can be enabled
  */
@@ -2385,191 +2371,33 @@ static void init_hw(struct dc *dc)
 
 }
 
-void dce110_fill_display_configs(
-	const struct dc_state *context,
-	struct dm_pp_display_configuration *pp_display_cfg)
-{
-	int j;
-	int num_cfgs = 0;
 
-	for (j = 0; j < context->stream_count; j++) {
-		int k;
-
-		const struct dc_stream_state *stream = context->streams[j];
-		struct dm_pp_single_disp_config *cfg =
-			&pp_display_cfg->disp_configs[num_cfgs];
-		const struct pipe_ctx *pipe_ctx = NULL;
-
-		for (k = 0; k < MAX_PIPES; k++)
-			if (stream == context->res_ctx.pipe_ctx[k].stream) {
-				pipe_ctx = &context->res_ctx.pipe_ctx[k];
-				break;
-			}
-
-		ASSERT(pipe_ctx != NULL);
-
-		/* only notify active stream */
-		if (stream->dpms_off)
-			continue;
-
-		num_cfgs++;
-		cfg->signal = pipe_ctx->stream->signal;
-		cfg->pipe_idx = pipe_ctx->stream_res.tg->inst;
-		cfg->src_height = stream->src.height;
-		cfg->src_width = stream->src.width;
-		cfg->ddi_channel_mapping =
-			stream->sink->link->ddi_channel_mapping.raw;
-		cfg->transmitter =
-			stream->sink->link->link_enc->transmitter;
-		cfg->link_settings.lane_count =
-			stream->sink->link->cur_link_settings.lane_count;
-		cfg->link_settings.link_rate =
-			stream->sink->link->cur_link_settings.link_rate;
-		cfg->link_settings.link_spread =
-			stream->sink->link->cur_link_settings.link_spread;
-		cfg->sym_clock = stream->phy_pix_clk;
-		/* Round v_refresh*/
-		cfg->v_refresh = stream->timing.pix_clk_khz * 1000;
-		cfg->v_refresh /= stream->timing.h_total;
-		cfg->v_refresh = (cfg->v_refresh + stream->timing.v_total / 2)
-							/ stream->timing.v_total;
-	}
-
-	pp_display_cfg->display_count = num_cfgs;
-}
-
-uint32_t dce110_get_min_vblank_time_us(const struct dc_state *context)
-{
-	uint8_t j;
-	uint32_t min_vertical_blank_time = -1;
-
-	for (j = 0; j < context->stream_count; j++) {
-		struct dc_stream_state *stream = context->streams[j];
-		uint32_t vertical_blank_in_pixels = 0;
-		uint32_t vertical_blank_time = 0;
-
-		vertical_blank_in_pixels = stream->timing.h_total *
-			(stream->timing.v_total
-			 - stream->timing.v_addressable);
-
-		vertical_blank_time = vertical_blank_in_pixels
-			* 1000 / stream->timing.pix_clk_khz;
-
-		if (min_vertical_blank_time > vertical_blank_time)
-			min_vertical_blank_time = vertical_blank_time;
-	}
-
-	return min_vertical_blank_time;
-}
-
-static int determine_sclk_from_bounding_box(
-		const struct dc *dc,
-		int required_sclk)
-{
-	int i;
-
-	/*
-	 * Some asics do not give us sclk levels, so we just report the actual
-	 * required sclk
-	 */
-	if (dc->sclk_lvls.num_levels == 0)
-		return required_sclk;
-
-	for (i = 0; i < dc->sclk_lvls.num_levels; i++) {
-		if (dc->sclk_lvls.clocks_in_khz[i] >= required_sclk)
-			return dc->sclk_lvls.clocks_in_khz[i];
-	}
-	/*
-	 * even maximum level could not satisfy requirement, this
-	 * is unexpected at this stage, should have been caught at
-	 * validation time
-	 */
-	ASSERT(0);
-	return dc->sclk_lvls.clocks_in_khz[dc->sclk_lvls.num_levels - 1];
-}
-
-static void pplib_apply_display_requirements(
-	struct dc *dc,
-	struct dc_state *context)
-{
-	struct dm_pp_display_configuration *pp_display_cfg = &context->pp_display_cfg;
-
-	pp_display_cfg->all_displays_in_sync =
-		context->bw.dce.all_displays_in_sync;
-	pp_display_cfg->nb_pstate_switch_disable =
-			context->bw.dce.nbp_state_change_enable == false;
-	pp_display_cfg->cpu_cc6_disable =
-			context->bw.dce.cpuc_state_change_enable == false;
-	pp_display_cfg->cpu_pstate_disable =
-			context->bw.dce.cpup_state_change_enable == false;
-	pp_display_cfg->cpu_pstate_separation_time =
-			context->bw.dce.blackout_recovery_time_us;
-
-	pp_display_cfg->min_memory_clock_khz = context->bw.dce.yclk_khz
-		/ MEMORY_TYPE_MULTIPLIER;
-
-	pp_display_cfg->min_engine_clock_khz = determine_sclk_from_bounding_box(
-			dc,
-			context->bw.dce.sclk_khz);
-
-	pp_display_cfg->min_engine_clock_deep_sleep_khz
-			= context->bw.dce.sclk_deep_sleep_khz;
-
-	pp_display_cfg->avail_mclk_switch_time_us =
-						dce110_get_min_vblank_time_us(context);
-	/* TODO: dce11.2*/
-	pp_display_cfg->avail_mclk_switch_time_in_disp_active_us = 0;
-
-	pp_display_cfg->disp_clk_khz = dc->res_pool->dccg->clks.dispclk_khz;
-
-	dce110_fill_display_configs(context, pp_display_cfg);
-
-	/* TODO: is this still applicable?*/
-	if (pp_display_cfg->display_count == 1) {
-		const struct dc_crtc_timing *timing =
-			&context->streams[0]->timing;
-
-		pp_display_cfg->crtc_index =
-			pp_display_cfg->disp_configs[0].pipe_idx;
-		pp_display_cfg->line_time_in_us = timing->h_total * 1000
-							/ timing->pix_clk_khz;
-	}
-
-	if (memcmp(&dc->prev_display_config, pp_display_cfg, sizeof(
-			struct dm_pp_display_configuration)) !=  0)
-		dm_pp_apply_display_requirements(dc->ctx, pp_display_cfg);
-
-	dc->prev_display_config = *pp_display_cfg;
-}
-
-static void dce110_set_bandwidth(
+void dce110_prepare_bandwidth(
 		struct dc *dc,
-		struct dc_state *context,
-		bool decrease_allowed)
+		struct dc_state *context)
 {
-	struct dc_clocks req_clks;
-	struct dccg *dccg = dc->res_pool->dccg;
+	struct clk_mgr *dccg = dc->res_pool->clk_mgr;
 
-	req_clks.dispclk_khz = context->bw.dce.dispclk_khz;
-	req_clks.phyclk_khz = get_max_pixel_clock_for_all_paths(dc, context);
-
-	if (decrease_allowed)
-		dce110_set_displaymarks(dc, context);
-	else
-		dce110_set_safe_displaymarks(&context->res_ctx, dc->res_pool);
-
-	if (dccg->funcs->update_dfs_bypass)
-		dccg->funcs->update_dfs_bypass(
-			dccg,
-			dc,
-			context,
-			req_clks.dispclk_khz);
+	dce110_set_safe_displaymarks(&context->res_ctx, dc->res_pool);
 
 	dccg->funcs->update_clocks(
 			dccg,
-			&req_clks,
-			decrease_allowed);
-	pplib_apply_display_requirements(dc, context);
+			context,
+			false);
+}
+
+void dce110_optimize_bandwidth(
+		struct dc *dc,
+		struct dc_state *context)
+{
+	struct clk_mgr *dccg = dc->res_pool->clk_mgr;
+
+	dce110_set_displaymarks(dc, context);
+
+	dccg->funcs->update_clocks(
+			dccg,
+			context,
+			true);
 }
 
 static void dce110_program_front_end_for_pipe(
@@ -2774,28 +2602,6 @@ static void dce110_wait_for_mpcc_disconnect(
 	/* do nothing*/
 }
 
-static void program_csc_matrix(struct pipe_ctx *pipe_ctx,
-		enum dc_color_space colorspace,
-		uint16_t *matrix)
-{
-	int i;
-	struct out_csc_color_matrix tbl_entry;
-
-	if (pipe_ctx->stream->csc_color_matrix.enable_adjustment
-				== true) {
-			enum dc_color_space color_space =
-				pipe_ctx->stream->output_color_space;
-
-			//uint16_t matrix[12];
-			for (i = 0; i < 12; i++)
-				tbl_entry.regval[i] = pipe_ctx->stream->csc_color_matrix.matrix[i];
-
-			tbl_entry.color_space = color_space;
-			//tbl_entry.regval = matrix;
-			pipe_ctx->plane_res.xfm->funcs->opp_set_csc_adjustment(pipe_ctx->plane_res.xfm, &tbl_entry);
-	}
-}
-
 void dce110_set_cursor_position(struct pipe_ctx *pipe_ctx)
 {
 	struct dc_cursor_position pos_cpy = pipe_ctx->stream->cursor_position;
@@ -2844,13 +2650,8 @@ void dce110_set_cursor_attribute(struct pipe_ctx *pipe_ctx)
 				pipe_ctx->plane_res.xfm, attributes);
 }
 
-static void ready_shared_resources(struct dc *dc, struct dc_state *context) {}
-
-static void optimize_shared_resources(struct dc *dc) {}
-
 static const struct hw_sequencer_funcs dce110_funcs = {
 	.program_gamut_remap = program_gamut_remap,
-	.program_csc_matrix = program_csc_matrix,
 	.init_hw = init_hw,
 	.apply_ctx_to_hw = dce110_apply_ctx_to_hw,
 	.apply_ctx_for_surface = dce110_apply_ctx_for_surface,
@@ -2873,7 +2674,8 @@ static const struct hw_sequencer_funcs dce110_funcs = {
 	.enable_display_power_gating = dce110_enable_display_power_gating,
 	.disable_plane = dce110_power_down_fe,
 	.pipe_control_lock = dce_pipe_control_lock,
-	.set_bandwidth = dce110_set_bandwidth,
+	.prepare_bandwidth = dce110_prepare_bandwidth,
+	.optimize_bandwidth = dce110_optimize_bandwidth,
 	.set_drr = set_drr,
 	.get_position = get_position,
 	.set_static_screen_control = set_static_screen_control,
@@ -2882,9 +2684,6 @@ static const struct hw_sequencer_funcs dce110_funcs = {
 	.setup_stereo = NULL,
 	.set_avmute = dce110_set_avmute,
 	.wait_for_mpcc_disconnect = dce110_wait_for_mpcc_disconnect,
-	.ready_shared_resources = ready_shared_resources,
-	.optimize_shared_resources = optimize_shared_resources,
-	.pplib_apply_display_requirements = pplib_apply_display_requirements,
 	.edp_backlight_control = hwss_edp_backlight_control,
 	.edp_power_control = hwss_edp_power_control,
 	.edp_wait_for_hpd_ready = hwss_edp_wait_for_hpd_ready,
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h
index d6db3db..cd3e36d 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h
@@ -40,7 +40,6 @@ enum dc_status dce110_apply_ctx_to_hw(
 		struct dc_state *context);
 
 
-
 void dce110_enable_stream(struct pipe_ctx *pipe_ctx);
 
 void dce110_disable_stream(struct pipe_ctx *pipe_ctx, int option);
@@ -64,11 +63,13 @@ void dce110_set_safe_displaymarks(
 		struct resource_context *res_ctx,
 		const struct resource_pool *pool);
 
-void dce110_fill_display_configs(
-	const struct dc_state *context,
-	struct dm_pp_display_configuration *pp_display_cfg);
+void dce110_prepare_bandwidth(
+		struct dc *dc,
+		struct dc_state *context);
 
-uint32_t dce110_get_min_vblank_time_us(const struct dc_state *context);
+void dce110_optimize_bandwidth(
+		struct dc *dc,
+		struct dc_state *context);
 
 void dp_receiver_power_ctrl(struct dc_link *link, bool on);
 
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
index 7c9fd90..e33d117 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
@@ -31,6 +31,7 @@
 #include "resource.h"
 #include "dce110/dce110_resource.h"
 
+#include "dce/dce_clk_mgr.h"
 #include "include/irq_service_interface.h"
 #include "dce/dce_audio.h"
 #include "dce110/dce110_timing_generator.h"
@@ -45,7 +46,6 @@
 #include "dce110/dce110_transform_v.h"
 #include "dce/dce_opp.h"
 #include "dce110/dce110_opp_v.h"
-#include "dce/dce_clocks.h"
 #include "dce/dce_clock_source.h"
 #include "dce/dce_hwseq.h"
 #include "dce110/dce110_hw_sequencer.h"
@@ -148,15 +148,15 @@ static const struct dce110_timing_generator_offsets dce110_tg_offsets[] = {
 #define SRI(reg_name, block, id)\
 	.reg_name = mm ## block ## id ## _ ## reg_name
 
-static const struct dccg_registers disp_clk_regs = {
+static const struct clk_mgr_registers disp_clk_regs = {
 		CLK_COMMON_REG_LIST_DCE_BASE()
 };
 
-static const struct dccg_shift disp_clk_shift = {
+static const struct clk_mgr_shift disp_clk_shift = {
 		CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT)
 };
 
-static const struct dccg_mask disp_clk_mask = {
+static const struct clk_mgr_mask disp_clk_mask = {
 		CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK)
 };
 
@@ -760,8 +760,8 @@ static void destruct(struct dce110_resource_pool *pool)
 	if (pool->base.dmcu != NULL)
 		dce_dmcu_destroy(&pool->base.dmcu);
 
-	if (pool->base.dccg != NULL)
-		dce_dccg_destroy(&pool->base.dccg);
+	if (pool->base.clk_mgr != NULL)
+		dce_clk_mgr_destroy(&pool->base.clk_mgr);
 
 	if (pool->base.irqs != NULL) {
 		dal_irq_service_destroy(&pool->base.irqs);
@@ -1173,12 +1173,12 @@ static void bw_calcs_data_update_from_pplib(struct dc *dc)
 			&clks);
 
 	dc->bw_vbios->low_yclk = bw_frc_to_fixed(
-		clks.clocks_in_khz[0] * MEMORY_TYPE_MULTIPLIER, 1000);
+		clks.clocks_in_khz[0] * MEMORY_TYPE_MULTIPLIER_CZ, 1000);
 	dc->bw_vbios->mid_yclk = bw_frc_to_fixed(
-		clks.clocks_in_khz[clks.num_levels>>1] * MEMORY_TYPE_MULTIPLIER,
+		clks.clocks_in_khz[clks.num_levels>>1] * MEMORY_TYPE_MULTIPLIER_CZ,
 		1000);
 	dc->bw_vbios->high_yclk = bw_frc_to_fixed(
-		clks.clocks_in_khz[clks.num_levels-1] * MEMORY_TYPE_MULTIPLIER,
+		clks.clocks_in_khz[clks.num_levels-1] * MEMORY_TYPE_MULTIPLIER_CZ,
 		1000);
 }
 
@@ -1201,7 +1201,6 @@ static bool construct(
 	struct dc_context *ctx = dc->ctx;
 	struct dc_firmware_info info;
 	struct dc_bios *bp;
-	struct dm_pp_static_clock_info static_clk_info = {0};
 
 	ctx->dc_bios->regs = &bios_regs;
 
@@ -1257,11 +1256,11 @@ static bool construct(
 		}
 	}
 
-	pool->base.dccg = dce110_dccg_create(ctx,
+	pool->base.clk_mgr = dce110_clk_mgr_create(ctx,
 			&disp_clk_regs,
 			&disp_clk_shift,
 			&disp_clk_mask);
-	if (pool->base.dccg == NULL) {
+	if (pool->base.clk_mgr == NULL) {
 		dm_error("DC: failed to create display clock!\n");
 		BREAK_TO_DEBUGGER();
 		goto res_create_fail;
@@ -1287,13 +1286,6 @@ static bool construct(
 		goto res_create_fail;
 	}
 
-	/* get static clock information for PPLIB or firmware, save
-	 * max_clock_state
-	 */
-	if (dm_pp_get_static_clocks(ctx, &static_clk_info))
-		pool->base.dccg->max_clks_state =
-				static_clk_info.max_clocks_state;
-
 	{
 		struct irq_service_init_data init_data;
 		init_data.ctx = dc->ctx;
diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
index 3ce79c2..969d4e7 100644
--- a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
@@ -35,6 +35,7 @@
 
 #include "irq/dce110/irq_service_dce110.h"
 
+#include "dce/dce_clk_mgr.h"
 #include "dce/dce_mem_input.h"
 #include "dce/dce_transform.h"
 #include "dce/dce_link_encoder.h"
@@ -42,7 +43,6 @@
 #include "dce/dce_audio.h"
 #include "dce/dce_opp.h"
 #include "dce/dce_ipp.h"
-#include "dce/dce_clocks.h"
 #include "dce/dce_clock_source.h"
 
 #include "dce/dce_hwseq.h"
@@ -148,15 +148,15 @@ static const struct dce110_timing_generator_offsets dce112_tg_offsets[] = {
 	.reg_name = mm ## block ## id ## _ ## reg_name
 
 
-static const struct dccg_registers disp_clk_regs = {
+static const struct clk_mgr_registers disp_clk_regs = {
 		CLK_COMMON_REG_LIST_DCE_BASE()
 };
 
-static const struct dccg_shift disp_clk_shift = {
+static const struct clk_mgr_shift disp_clk_shift = {
 		CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT)
 };
 
-static const struct dccg_mask disp_clk_mask = {
+static const struct clk_mgr_mask disp_clk_mask = {
 		CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK)
 };
 
@@ -551,7 +551,8 @@ static struct transform *dce112_transform_create(
 static const struct encoder_feature_support link_enc_feature = {
 		.max_hdmi_deep_color = COLOR_DEPTH_121212,
 		.max_hdmi_pixel_clock = 600000,
-		.ycbcr420_supported = true,
+		.hdmi_ycbcr420_supported = true,
+		.dp_ycbcr420_supported = false,
 		.flags.bits.IS_HBR2_CAPABLE = true,
 		.flags.bits.IS_HBR3_CAPABLE = true,
 		.flags.bits.IS_TPS3_CAPABLE = true,
@@ -749,8 +750,8 @@ static void destruct(struct dce110_resource_pool *pool)
 	if (pool->base.dmcu != NULL)
 		dce_dmcu_destroy(&pool->base.dmcu);
 
-	if (pool->base.dccg != NULL)
-		dce_dccg_destroy(&pool->base.dccg);
+	if (pool->base.clk_mgr != NULL)
+		dce_clk_mgr_destroy(&pool->base.clk_mgr);
 
 	if (pool->base.irqs != NULL) {
 		dal_irq_service_destroy(&pool->base.irqs);
@@ -1015,12 +1016,12 @@ static void bw_calcs_data_update_from_pplib(struct dc *dc)
 				&clks);
 
 		dc->bw_vbios->low_yclk = bw_frc_to_fixed(
-			clks.clocks_in_khz[0] * MEMORY_TYPE_MULTIPLIER, 1000);
+			clks.clocks_in_khz[0] * MEMORY_TYPE_MULTIPLIER_CZ, 1000);
 		dc->bw_vbios->mid_yclk = bw_frc_to_fixed(
-			clks.clocks_in_khz[clks.num_levels>>1] * MEMORY_TYPE_MULTIPLIER,
+			clks.clocks_in_khz[clks.num_levels>>1] * MEMORY_TYPE_MULTIPLIER_CZ,
 			1000);
 		dc->bw_vbios->high_yclk = bw_frc_to_fixed(
-			clks.clocks_in_khz[clks.num_levels-1] * MEMORY_TYPE_MULTIPLIER,
+			clks.clocks_in_khz[clks.num_levels-1] * MEMORY_TYPE_MULTIPLIER_CZ,
 			1000);
 
 		return;
@@ -1056,12 +1057,12 @@ static void bw_calcs_data_update_from_pplib(struct dc *dc)
 	 * YCLK = UMACLK*m_memoryTypeMultiplier
 	 */
 	dc->bw_vbios->low_yclk = bw_frc_to_fixed(
-		mem_clks.data[0].clocks_in_khz * MEMORY_TYPE_MULTIPLIER, 1000);
+		mem_clks.data[0].clocks_in_khz * MEMORY_TYPE_MULTIPLIER_CZ, 1000);
 	dc->bw_vbios->mid_yclk = bw_frc_to_fixed(
-		mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz * MEMORY_TYPE_MULTIPLIER,
+		mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz * MEMORY_TYPE_MULTIPLIER_CZ,
 		1000);
 	dc->bw_vbios->high_yclk = bw_frc_to_fixed(
-		mem_clks.data[mem_clks.num_levels-1].clocks_in_khz * MEMORY_TYPE_MULTIPLIER,
+		mem_clks.data[mem_clks.num_levels-1].clocks_in_khz * MEMORY_TYPE_MULTIPLIER_CZ,
 		1000);
 
 	/* Now notify PPLib/SMU about which Watermarks sets they should select
@@ -1131,7 +1132,6 @@ static bool construct(
 {
 	unsigned int i;
 	struct dc_context *ctx = dc->ctx;
-	struct dm_pp_static_clock_info static_clk_info = {0};
 
 	ctx->dc_bios->regs = &bios_regs;
 
@@ -1199,11 +1199,11 @@ static bool construct(
 		}
 	}
 
-	pool->base.dccg = dce112_dccg_create(ctx,
+	pool->base.clk_mgr = dce112_clk_mgr_create(ctx,
 			&disp_clk_regs,
 			&disp_clk_shift,
 			&disp_clk_mask);
-	if (pool->base.dccg == NULL) {
+	if (pool->base.clk_mgr == NULL) {
 		dm_error("DC: failed to create display clock!\n");
 		BREAK_TO_DEBUGGER();
 		goto res_create_fail;
@@ -1229,13 +1229,6 @@ static bool construct(
 		goto res_create_fail;
 	}
 
-	/* get static clock information for PPLIB or firmware, save
-	 * max_clock_state
-	 */
-	if (dm_pp_get_static_clocks(ctx, &static_clk_info))
-		pool->base.dccg->max_clks_state =
-				static_clk_info.max_clocks_state;
-
 	{
 		struct irq_service_init_data init_data;
 		init_data.ctx = dc->ctx;
diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
index 79ab5f9..f126966 100644
--- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
@@ -31,6 +31,7 @@
 #include "resource.h"
 #include "include/irq_service_interface.h"
 #include "dce120_resource.h"
+
 #include "dce112/dce112_resource.h"
 
 #include "dce110/dce110_resource.h"
@@ -39,7 +40,6 @@
 #include "irq/dce120/irq_service_dce120.h"
 #include "dce/dce_opp.h"
 #include "dce/dce_clock_source.h"
-#include "dce/dce_clocks.h"
 #include "dce/dce_ipp.h"
 #include "dce/dce_mem_input.h"
 
@@ -47,6 +47,7 @@
 #include "dce120/dce120_hw_sequencer.h"
 #include "dce/dce_transform.h"
 
+#include "dce/dce_clk_mgr.h"
 #include "dce/dce_audio.h"
 #include "dce/dce_link_encoder.h"
 #include "dce/dce_stream_encoder.h"
@@ -573,8 +574,8 @@ static void destruct(struct dce110_resource_pool *pool)
 	if (pool->base.dmcu != NULL)
 		dce_dmcu_destroy(&pool->base.dmcu);
 
-	if (pool->base.dccg != NULL)
-		dce_dccg_destroy(&pool->base.dccg);
+	if (pool->base.clk_mgr != NULL)
+		dce_clk_mgr_destroy(&pool->base.clk_mgr);
 }
 
 static void read_dce_straps(
@@ -606,7 +607,8 @@ static struct audio *create_audio(
 static const struct encoder_feature_support link_enc_feature = {
 		.max_hdmi_deep_color = COLOR_DEPTH_121212,
 		.max_hdmi_pixel_clock = 600000,
-		.ycbcr420_supported = true,
+		.hdmi_ycbcr420_supported = true,
+		.dp_ycbcr420_supported = false,
 		.flags.bits.IS_HBR2_CAPABLE = true,
 		.flags.bits.IS_HBR3_CAPABLE = true,
 		.flags.bits.IS_TPS3_CAPABLE = true,
@@ -834,12 +836,12 @@ static void bw_calcs_data_update_from_pplib(struct dc *dc)
 	 * YCLK = UMACLK*m_memoryTypeMultiplier
 	 */
 	dc->bw_vbios->low_yclk = bw_frc_to_fixed(
-		mem_clks.data[0].clocks_in_khz * MEMORY_TYPE_MULTIPLIER, 1000);
+		mem_clks.data[0].clocks_in_khz * MEMORY_TYPE_MULTIPLIER_CZ, 1000);
 	dc->bw_vbios->mid_yclk = bw_frc_to_fixed(
-		mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz * MEMORY_TYPE_MULTIPLIER,
+		mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz * MEMORY_TYPE_MULTIPLIER_CZ,
 		1000);
 	dc->bw_vbios->high_yclk = bw_frc_to_fixed(
-		mem_clks.data[mem_clks.num_levels-1].clocks_in_khz * MEMORY_TYPE_MULTIPLIER,
+		mem_clks.data[mem_clks.num_levels-1].clocks_in_khz * MEMORY_TYPE_MULTIPLIER_CZ,
 		1000);
 
 	/* Now notify PPLib/SMU about which Watermarks sets they should select
@@ -973,8 +975,8 @@ static bool construct(
 		}
 	}
 
-	pool->base.dccg = dce120_dccg_create(ctx);
-	if (pool->base.dccg == NULL) {
+	pool->base.clk_mgr = dce120_clk_mgr_create(ctx);
+	if (pool->base.clk_mgr == NULL) {
 		dm_error("DC: failed to create display clock!\n");
 		BREAK_TO_DEBUGGER();
 		goto dccg_create_fail;
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.c
index 6c6a1a1..a60a90e 100644
--- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.c
@@ -76,6 +76,7 @@ void dce80_hw_sequencer_construct(struct dc *dc)
 
 	dc->hwss.enable_display_power_gating = dce100_enable_display_power_gating;
 	dc->hwss.pipe_control_lock = dce_pipe_control_lock;
-	dc->hwss.set_bandwidth = dce100_set_bandwidth;
+	dc->hwss.prepare_bandwidth = dce100_prepare_bandwidth;
+	dc->hwss.optimize_bandwidth = dce100_prepare_bandwidth;
 }
 
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
index d68f951..6d40b3d 100644
--- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
@@ -37,6 +37,7 @@
 #include "dce110/dce110_timing_generator.h"
 #include "dce110/dce110_resource.h"
 #include "dce80/dce80_timing_generator.h"
+#include "dce/dce_clk_mgr.h"
 #include "dce/dce_mem_input.h"
 #include "dce/dce_link_encoder.h"
 #include "dce/dce_stream_encoder.h"
@@ -44,7 +45,6 @@
 #include "dce/dce_ipp.h"
 #include "dce/dce_transform.h"
 #include "dce/dce_opp.h"
-#include "dce/dce_clocks.h"
 #include "dce/dce_clock_source.h"
 #include "dce/dce_audio.h"
 #include "dce/dce_hwseq.h"
@@ -155,15 +155,15 @@ static const struct dce110_timing_generator_offsets dce80_tg_offsets[] = {
 	.reg_name = mm ## block ## id ## _ ## reg_name
 
 
-static const struct dccg_registers disp_clk_regs = {
+static const struct clk_mgr_registers disp_clk_regs = {
 		CLK_COMMON_REG_LIST_DCE_BASE()
 };
 
-static const struct dccg_shift disp_clk_shift = {
+static const struct clk_mgr_shift disp_clk_shift = {
 		CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT)
 };
 
-static const struct dccg_mask disp_clk_mask = {
+static const struct clk_mgr_mask disp_clk_mask = {
 		CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK)
 };
 
@@ -779,8 +779,8 @@ static void destruct(struct dce110_resource_pool *pool)
 		}
 	}
 
-	if (pool->base.dccg != NULL)
-		dce_dccg_destroy(&pool->base.dccg);
+	if (pool->base.clk_mgr != NULL)
+		dce_clk_mgr_destroy(&pool->base.clk_mgr);
 
 	if (pool->base.irqs != NULL) {
 		dal_irq_service_destroy(&pool->base.irqs);
@@ -793,7 +793,7 @@ bool dce80_validate_bandwidth(
 {
 	/* TODO implement when needed but for now hardcode max value*/
 	context->bw.dce.dispclk_khz = 681000;
-	context->bw.dce.yclk_khz = 250000 * MEMORY_TYPE_MULTIPLIER;
+	context->bw.dce.yclk_khz = 250000 * MEMORY_TYPE_MULTIPLIER_CZ;
 
 	return true;
 }
@@ -855,7 +855,6 @@ static bool dce80_construct(
 	struct dc_context *ctx = dc->ctx;
 	struct dc_firmware_info info;
 	struct dc_bios *bp;
-	struct dm_pp_static_clock_info static_clk_info = {0};
 
 	ctx->dc_bios->regs = &bios_regs;
 
@@ -918,11 +917,11 @@ static bool dce80_construct(
 		}
 	}
 
-	pool->base.dccg = dce_dccg_create(ctx,
+	pool->base.clk_mgr = dce_clk_mgr_create(ctx,
 			&disp_clk_regs,
 			&disp_clk_shift,
 			&disp_clk_mask);
-	if (pool->base.dccg == NULL) {
+	if (pool->base.clk_mgr == NULL) {
 		dm_error("DC: failed to create display clock!\n");
 		BREAK_TO_DEBUGGER();
 		goto res_create_fail;
@@ -948,10 +947,6 @@ static bool dce80_construct(
 		goto res_create_fail;
 	}
 
-	if (dm_pp_get_static_clocks(ctx, &static_clk_info))
-		pool->base.dccg->max_clks_state =
-					static_clk_info.max_clocks_state;
-
 	{
 		struct irq_service_init_data init_data;
 		init_data.ctx = dc->ctx;
@@ -1065,7 +1060,6 @@ static bool dce81_construct(
 	struct dc_context *ctx = dc->ctx;
 	struct dc_firmware_info info;
 	struct dc_bios *bp;
-	struct dm_pp_static_clock_info static_clk_info = {0};
 
 	ctx->dc_bios->regs = &bios_regs;
 
@@ -1128,11 +1122,11 @@ static bool dce81_construct(
 		}
 	}
 
-	pool->base.dccg = dce_dccg_create(ctx,
+	pool->base.clk_mgr = dce_clk_mgr_create(ctx,
 			&disp_clk_regs,
 			&disp_clk_shift,
 			&disp_clk_mask);
-	if (pool->base.dccg == NULL) {
+	if (pool->base.clk_mgr == NULL) {
 		dm_error("DC: failed to create display clock!\n");
 		BREAK_TO_DEBUGGER();
 		goto res_create_fail;
@@ -1158,10 +1152,6 @@ static bool dce81_construct(
 		goto res_create_fail;
 	}
 
-	if (dm_pp_get_static_clocks(ctx, &static_clk_info))
-		pool->base.dccg->max_clks_state =
-					static_clk_info.max_clocks_state;
-
 	{
 		struct irq_service_init_data init_data;
 		init_data.ctx = dc->ctx;
@@ -1275,7 +1265,6 @@ static bool dce83_construct(
 	struct dc_context *ctx = dc->ctx;
 	struct dc_firmware_info info;
 	struct dc_bios *bp;
-	struct dm_pp_static_clock_info static_clk_info = {0};
 
 	ctx->dc_bios->regs = &bios_regs;
 
@@ -1334,11 +1323,11 @@ static bool dce83_construct(
 		}
 	}
 
-	pool->base.dccg = dce_dccg_create(ctx,
+	pool->base.clk_mgr = dce_clk_mgr_create(ctx,
 			&disp_clk_regs,
 			&disp_clk_shift,
 			&disp_clk_mask);
-	if (pool->base.dccg == NULL) {
+	if (pool->base.clk_mgr == NULL) {
 		dm_error("DC: failed to create display clock!\n");
 		BREAK_TO_DEBUGGER();
 		goto res_create_fail;
@@ -1364,10 +1353,6 @@ static bool dce83_construct(
 		goto res_create_fail;
 	}
 
-	if (dm_pp_get_static_clocks(ctx, &static_clk_info))
-		pool->base.dccg->max_clks_state =
-					static_clk_info.max_clocks_state;
-
 	{
 		struct irq_service_init_data init_data;
 		init_data.ctx = dc->ctx;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/Makefile b/drivers/gpu/drm/amd/display/dc/dcn10/Makefile
index 032f872..55f293c 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/Makefile
@@ -24,7 +24,7 @@
 
 DCN10 = dcn10_resource.o dcn10_ipp.o dcn10_hw_sequencer.o dcn10_hw_sequencer_debug.o \
 		dcn10_dpp.o dcn10_opp.o dcn10_optc.o \
-		dcn10_hubp.o dcn10_mpc.o \
+		dcn10_hubp.o dcn10_mpc.o dcn10_clk_mgr.o \
 		dcn10_dpp_dscl.o dcn10_dpp_cm.o dcn10_cm_common.o \
 		dcn10_hubbub.o dcn10_stream_encoder.o dcn10_link_encoder.o
 
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.c
new file mode 100644
index 0000000..20f531d
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.c
@@ -0,0 +1,379 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dcn10_clk_mgr.h"
+
+#include "reg_helper.h"
+#include "core_types.h"
+
+#define TO_DCE_CLK_MGR(clocks)\
+	container_of(clocks, struct dce_clk_mgr, base)
+
+#define REG(reg) \
+	(clk_mgr_dce->regs->reg)
+
+#undef FN
+#define FN(reg_name, field_name) \
+	clk_mgr_dce->clk_mgr_shift->field_name, clk_mgr_dce->clk_mgr_mask->field_name
+
+#define CTX \
+	clk_mgr_dce->base.ctx
+#define DC_LOGGER \
+	clk_mgr->ctx->logger
+
+void dcn1_pplib_apply_display_requirements(
+	struct dc *dc,
+	struct dc_state *context)
+{
+	struct dm_pp_display_configuration *pp_display_cfg = &context->pp_display_cfg;
+
+	pp_display_cfg->min_engine_clock_khz = dc->res_pool->clk_mgr->clks.dcfclk_khz;
+	pp_display_cfg->min_memory_clock_khz = dc->res_pool->clk_mgr->clks.fclk_khz;
+	pp_display_cfg->min_engine_clock_deep_sleep_khz = dc->res_pool->clk_mgr->clks.dcfclk_deep_sleep_khz;
+	pp_display_cfg->min_dcfc_deep_sleep_clock_khz = dc->res_pool->clk_mgr->clks.dcfclk_deep_sleep_khz;
+	pp_display_cfg->min_dcfclock_khz = dc->res_pool->clk_mgr->clks.dcfclk_khz;
+	pp_display_cfg->disp_clk_khz = dc->res_pool->clk_mgr->clks.dispclk_khz;
+	dce110_fill_display_configs(context, pp_display_cfg);
+
+	dm_pp_apply_display_requirements(dc->ctx, pp_display_cfg);
+}
+
+static int dcn1_determine_dppclk_threshold(struct clk_mgr *clk_mgr, struct dc_clocks *new_clocks)
+{
+	bool request_dpp_div = new_clocks->dispclk_khz > new_clocks->dppclk_khz;
+	bool dispclk_increase = new_clocks->dispclk_khz > clk_mgr->clks.dispclk_khz;
+	int disp_clk_threshold = new_clocks->max_supported_dppclk_khz;
+	bool cur_dpp_div = clk_mgr->clks.dispclk_khz > clk_mgr->clks.dppclk_khz;
+
+	/* increase clock, looking for div is 0 for current, request div is 1*/
+	if (dispclk_increase) {
+		/* already divided by 2, no need to reach target clk with 2 steps*/
+		if (cur_dpp_div)
+			return new_clocks->dispclk_khz;
+
+		/* request disp clk is lower than maximum supported dpp clk,
+		 * no need to reach target clk with two steps.
+		 */
+		if (new_clocks->dispclk_khz <= disp_clk_threshold)
+			return new_clocks->dispclk_khz;
+
+		/* target dpp clk not request divided by 2, still within threshold */
+		if (!request_dpp_div)
+			return new_clocks->dispclk_khz;
+
+	} else {
+		/* decrease clock, looking for current dppclk divided by 2,
+		 * request dppclk not divided by 2.
+		 */
+
+		/* current dpp clk not divided by 2, no need to ramp*/
+		if (!cur_dpp_div)
+			return new_clocks->dispclk_khz;
+
+		/* current disp clk is lower than current maximum dpp clk,
+		 * no need to ramp
+		 */
+		if (clk_mgr->clks.dispclk_khz <= disp_clk_threshold)
+			return new_clocks->dispclk_khz;
+
+		/* request dpp clk need to be divided by 2 */
+		if (request_dpp_div)
+			return new_clocks->dispclk_khz;
+	}
+
+	return disp_clk_threshold;
+}
+
+static void dcn1_ramp_up_dispclk_with_dpp(struct clk_mgr *clk_mgr, struct dc_clocks *new_clocks)
+{
+	struct dc *dc = clk_mgr->ctx->dc;
+	int dispclk_to_dpp_threshold = dcn1_determine_dppclk_threshold(clk_mgr, new_clocks);
+	bool request_dpp_div = new_clocks->dispclk_khz > new_clocks->dppclk_khz;
+	int i;
+
+	/* set disp clk to dpp clk threshold */
+	dce112_set_clock(clk_mgr, dispclk_to_dpp_threshold);
+
+	/* update request dpp clk division option */
+	for (i = 0; i < dc->res_pool->pipe_count; i++) {
+		struct pipe_ctx *pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
+
+		if (!pipe_ctx->plane_state)
+			continue;
+
+		pipe_ctx->plane_res.dpp->funcs->dpp_dppclk_control(
+				pipe_ctx->plane_res.dpp,
+				request_dpp_div,
+				true);
+	}
+
+	/* If target clk not same as dppclk threshold, set to target clock */
+	if (dispclk_to_dpp_threshold != new_clocks->dispclk_khz)
+		dce112_set_clock(clk_mgr, new_clocks->dispclk_khz);
+
+	clk_mgr->clks.dispclk_khz = new_clocks->dispclk_khz;
+	clk_mgr->clks.dppclk_khz = new_clocks->dppclk_khz;
+	clk_mgr->clks.max_supported_dppclk_khz = new_clocks->max_supported_dppclk_khz;
+}
+
+static int get_active_display_cnt(
+		struct dc *dc,
+		struct dc_state *context)
+{
+	int i, display_count;
+
+	display_count = 0;
+	for (i = 0; i < context->stream_count; i++) {
+		const struct dc_stream_state *stream = context->streams[i];
+
+		/*
+		 * Only notify active stream or virtual stream.
+		 * Need to notify virtual stream to work around
+		 * headless case. HPD does not fire when system is in
+		 * S0i2.
+		 */
+		if (!stream->dpms_off || stream->signal == SIGNAL_TYPE_VIRTUAL)
+			display_count++;
+	}
+
+	return display_count;
+}
+
+static void notify_deep_sleep_dcfclk_to_smu(
+		struct pp_smu_funcs_rv *pp_smu, int min_dcef_deep_sleep_clk_khz)
+{
+	int min_dcef_deep_sleep_clk_mhz; //minimum required DCEF Deep Sleep clock in mhz
+	/*
+	 * if function pointer not set up, this message is
+	 * sent as part of pplib_apply_display_requirements.
+	 * So just return.
+	 */
+	if (!pp_smu || !pp_smu->set_min_deep_sleep_dcfclk)
+		return;
+
+	min_dcef_deep_sleep_clk_mhz = (min_dcef_deep_sleep_clk_khz + 999) / 1000; //Round up
+	pp_smu->set_min_deep_sleep_dcfclk(&pp_smu->pp_smu, min_dcef_deep_sleep_clk_mhz);
+}
+
+static void notify_hard_min_dcfclk_to_smu(
+		struct pp_smu_funcs_rv *pp_smu, int min_dcf_clk_khz)
+{
+	int min_dcf_clk_mhz; //minimum required DCF clock in mhz
+
+	/*
+	 * if function pointer not set up, this message is
+	 * sent as part of pplib_apply_display_requirements.
+	 * So just return.
+	 */
+	if (!pp_smu || !pp_smu->set_hard_min_dcfclk_by_freq)
+		return;
+
+	min_dcf_clk_mhz = min_dcf_clk_khz / 1000;
+
+	pp_smu->set_hard_min_dcfclk_by_freq(&pp_smu->pp_smu, min_dcf_clk_mhz);
+}
+
+static void notify_hard_min_fclk_to_smu(
+		struct pp_smu_funcs_rv *pp_smu, int min_f_clk_khz)
+{
+	int min_f_clk_mhz; //minimum required F clock in mhz
+
+	/*
+	 * if function pointer not set up, this message is
+	 * sent as part of pplib_apply_display_requirements.
+	 * So just return.
+	 */
+	if (!pp_smu || !pp_smu->set_hard_min_fclk_by_freq)
+		return;
+
+	min_f_clk_mhz = min_f_clk_khz / 1000;
+
+	pp_smu->set_hard_min_fclk_by_freq(&pp_smu->pp_smu, min_f_clk_mhz);
+}
+
+static void dcn1_update_clocks(struct clk_mgr *clk_mgr,
+			struct dc_state *context,
+			bool safe_to_lower)
+{
+	struct dc *dc = clk_mgr->ctx->dc;
+	struct dc_clocks *new_clocks = &context->bw.dcn.clk;
+	struct pp_smu_display_requirement_rv *smu_req_cur =
+			&dc->res_pool->pp_smu_req;
+	struct pp_smu_display_requirement_rv smu_req = *smu_req_cur;
+	struct pp_smu_funcs_rv *pp_smu = dc->res_pool->pp_smu;
+	struct dm_pp_clock_for_voltage_req clock_voltage_req = {0};
+	bool send_request_to_increase = false;
+	bool send_request_to_lower = false;
+	int display_count;
+
+	bool enter_display_off = false;
+
+	display_count = get_active_display_cnt(dc, context);
+
+	if (display_count == 0)
+		enter_display_off = true;
+
+	if (enter_display_off == safe_to_lower) {
+		/*
+		 * Notify SMU active displays
+		 * if function pointer not set up, this message is
+		 * sent as part of pplib_apply_display_requirements.
+		 */
+		if (pp_smu->set_display_count)
+			pp_smu->set_display_count(&pp_smu->pp_smu, display_count);
+		else
+			smu_req.display_count = display_count;
+
+	}
+
+	if (new_clocks->dispclk_khz > clk_mgr->clks.dispclk_khz
+			|| new_clocks->phyclk_khz > clk_mgr->clks.phyclk_khz
+			|| new_clocks->fclk_khz > clk_mgr->clks.fclk_khz
+			|| new_clocks->dcfclk_khz > clk_mgr->clks.dcfclk_khz)
+		send_request_to_increase = true;
+
+	if (should_set_clock(safe_to_lower, new_clocks->phyclk_khz, clk_mgr->clks.phyclk_khz)) {
+		clk_mgr->clks.phyclk_khz = new_clocks->phyclk_khz;
+
+		send_request_to_lower = true;
+	}
+
+	// F Clock
+	if (should_set_clock(safe_to_lower, new_clocks->fclk_khz, clk_mgr->clks.fclk_khz)) {
+		clk_mgr->clks.fclk_khz = new_clocks->fclk_khz;
+		clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_FCLK;
+		clock_voltage_req.clocks_in_khz = new_clocks->fclk_khz;
+		smu_req.hard_min_fclk_mhz = new_clocks->fclk_khz / 1000;
+
+		notify_hard_min_fclk_to_smu(pp_smu, new_clocks->fclk_khz);
+
+		send_request_to_lower = true;
+	}
+
+	//DCF Clock
+	if (should_set_clock(safe_to_lower, new_clocks->dcfclk_khz, clk_mgr->clks.dcfclk_khz)) {
+		clk_mgr->clks.dcfclk_khz = new_clocks->dcfclk_khz;
+		smu_req.hard_min_dcefclk_mhz = new_clocks->dcfclk_khz / 1000;
+
+		send_request_to_lower = true;
+	}
+
+	if (should_set_clock(safe_to_lower,
+			new_clocks->dcfclk_deep_sleep_khz, clk_mgr->clks.dcfclk_deep_sleep_khz)) {
+		clk_mgr->clks.dcfclk_deep_sleep_khz = new_clocks->dcfclk_deep_sleep_khz;
+		smu_req.min_deep_sleep_dcefclk_mhz = new_clocks->dcfclk_deep_sleep_khz / 1000;
+
+		send_request_to_lower = true;
+	}
+
+	/* make sure dcf clk is before dpp clk to
+	 * make sure we have enough voltage to run dpp clk
+	 */
+	if (send_request_to_increase) {
+		/*use dcfclk to request voltage*/
+		clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DCFCLK;
+		clock_voltage_req.clocks_in_khz = dcn_find_dcfclk_suits_all(dc, new_clocks);
+
+		notify_hard_min_dcfclk_to_smu(pp_smu, clock_voltage_req.clocks_in_khz);
+
+		if (pp_smu->set_display_requirement)
+			pp_smu->set_display_requirement(&pp_smu->pp_smu, &smu_req);
+
+		notify_deep_sleep_dcfclk_to_smu(pp_smu, clk_mgr->clks.dcfclk_deep_sleep_khz);
+		dcn1_pplib_apply_display_requirements(dc, context);
+	}
+
+	/* dcn1 dppclk is tied to dispclk */
+	/* program dispclk on = as a w/a for sleep resume clock ramping issues */
+	if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr->clks.dispclk_khz)
+			|| new_clocks->dispclk_khz == clk_mgr->clks.dispclk_khz) {
+		dcn1_ramp_up_dispclk_with_dpp(clk_mgr, new_clocks);
+		clk_mgr->clks.dispclk_khz = new_clocks->dispclk_khz;
+
+		send_request_to_lower = true;
+	}
+
+	if (!send_request_to_increase && send_request_to_lower) {
+		/*use dcfclk to request voltage*/
+		clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DCFCLK;
+		clock_voltage_req.clocks_in_khz = dcn_find_dcfclk_suits_all(dc, new_clocks);
+
+		notify_hard_min_dcfclk_to_smu(pp_smu, clock_voltage_req.clocks_in_khz);
+
+		if (pp_smu->set_display_requirement)
+			pp_smu->set_display_requirement(&pp_smu->pp_smu, &smu_req);
+
+		notify_deep_sleep_dcfclk_to_smu(pp_smu, clk_mgr->clks.dcfclk_deep_sleep_khz);
+		dcn1_pplib_apply_display_requirements(dc, context);
+	}
+
+
+	*smu_req_cur = smu_req;
+}
+
+static const struct clk_mgr_funcs dcn1_funcs = {
+	.get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
+	.update_clocks = dcn1_update_clocks
+};
+
+struct clk_mgr *dcn1_clk_mgr_create(struct dc_context *ctx)
+{
+	struct dc_debug_options *debug = &ctx->dc->debug;
+	struct dc_bios *bp = ctx->dc_bios;
+	struct dc_firmware_info fw_info = { { 0 } };
+	struct dce_clk_mgr *clk_mgr_dce = kzalloc(sizeof(*clk_mgr_dce), GFP_KERNEL);
+
+	if (clk_mgr_dce == NULL) {
+		BREAK_TO_DEBUGGER();
+		return NULL;
+	}
+
+	clk_mgr_dce->base.ctx = ctx;
+	clk_mgr_dce->base.funcs = &dcn1_funcs;
+
+	clk_mgr_dce->dfs_bypass_disp_clk = 0;
+
+	clk_mgr_dce->dprefclk_ss_percentage = 0;
+	clk_mgr_dce->dprefclk_ss_divider = 1000;
+	clk_mgr_dce->ss_on_dprefclk = false;
+
+	clk_mgr_dce->dprefclk_khz = 600000;
+	if (bp->integrated_info)
+		clk_mgr_dce->dentist_vco_freq_khz = bp->integrated_info->dentist_vco_freq;
+	if (clk_mgr_dce->dentist_vco_freq_khz == 0) {
+		bp->funcs->get_firmware_info(bp, &fw_info);
+		clk_mgr_dce->dentist_vco_freq_khz = fw_info.smu_gpu_pll_output_freq;
+		if (clk_mgr_dce->dentist_vco_freq_khz == 0)
+			clk_mgr_dce->dentist_vco_freq_khz = 3600000;
+	}
+
+	if (!debug->disable_dfs_bypass && bp->integrated_info)
+		if (bp->integrated_info->gpu_cap_info & DFS_BYPASS_ENABLE)
+			clk_mgr_dce->dfs_bypass_enabled = true;
+
+	dce_clock_read_ss_info(clk_mgr_dce);
+
+	return &clk_mgr_dce->base;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.h
new file mode 100644
index 0000000..9dbaf65
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DCN10_CLK_MGR_H__
+#define __DCN10_CLK_MGR_H__
+
+#include "../dce/dce_clk_mgr.h"
+
+void dcn1_pplib_apply_display_requirements(
+	struct dc *dc,
+	struct dc_state *context);
+
+struct clk_mgr *dcn1_clk_mgr_create(struct dc_context *ctx);
+
+#endif //__DCN10_CLK_MGR_H__
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c
index 5d95a99..3eea440 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c
@@ -71,39 +71,39 @@ void cm_helper_program_xfer_func(
 	unsigned int i = 0;
 
 	REG_SET_2(reg->start_cntl_b, 0,
-			exp_region_start, params->arr_points[0].custom_float_x,
+			exp_region_start, params->corner_points[0].blue.custom_float_x,
 			exp_resion_start_segment, 0);
 	REG_SET_2(reg->start_cntl_g, 0,
-			exp_region_start, params->arr_points[0].custom_float_x,
+			exp_region_start, params->corner_points[0].green.custom_float_x,
 			exp_resion_start_segment, 0);
 	REG_SET_2(reg->start_cntl_r, 0,
-			exp_region_start, params->arr_points[0].custom_float_x,
+			exp_region_start, params->corner_points[0].red.custom_float_x,
 			exp_resion_start_segment, 0);
 
 	REG_SET(reg->start_slope_cntl_b, 0,
-			field_region_linear_slope, params->arr_points[0].custom_float_slope);
+			field_region_linear_slope, params->corner_points[0].blue.custom_float_slope);
 	REG_SET(reg->start_slope_cntl_g, 0,
-			field_region_linear_slope, params->arr_points[0].custom_float_slope);
+			field_region_linear_slope, params->corner_points[0].green.custom_float_slope);
 	REG_SET(reg->start_slope_cntl_r, 0,
-			field_region_linear_slope, params->arr_points[0].custom_float_slope);
+			field_region_linear_slope, params->corner_points[0].red.custom_float_slope);
 
 	REG_SET(reg->start_end_cntl1_b, 0,
-			field_region_end, params->arr_points[1].custom_float_x);
+			field_region_end, params->corner_points[1].blue.custom_float_x);
 	REG_SET_2(reg->start_end_cntl2_b, 0,
-			field_region_end_slope, params->arr_points[1].custom_float_slope,
-			field_region_end_base, params->arr_points[1].custom_float_y);
+			field_region_end_slope, params->corner_points[1].blue.custom_float_slope,
+			field_region_end_base, params->corner_points[1].blue.custom_float_y);
 
 	REG_SET(reg->start_end_cntl1_g, 0,
-			field_region_end, params->arr_points[1].custom_float_x);
+			field_region_end, params->corner_points[1].green.custom_float_x);
 	REG_SET_2(reg->start_end_cntl2_g, 0,
-			field_region_end_slope, params->arr_points[1].custom_float_slope,
-		field_region_end_base, params->arr_points[1].custom_float_y);
+			field_region_end_slope, params->corner_points[1].green.custom_float_slope,
+		field_region_end_base, params->corner_points[1].green.custom_float_y);
 
 	REG_SET(reg->start_end_cntl1_r, 0,
-			field_region_end, params->arr_points[1].custom_float_x);
+			field_region_end, params->corner_points[1].red.custom_float_x);
 	REG_SET_2(reg->start_end_cntl2_r, 0,
-			field_region_end_slope, params->arr_points[1].custom_float_slope,
-		field_region_end_base, params->arr_points[1].custom_float_y);
+			field_region_end_slope, params->corner_points[1].red.custom_float_slope,
+		field_region_end_base, params->corner_points[1].red.custom_float_y);
 
 	for (reg_region_cur = reg->region_start;
 			reg_region_cur <= reg->region_end;
@@ -127,7 +127,7 @@ void cm_helper_program_xfer_func(
 
 bool cm_helper_convert_to_custom_float(
 		struct pwl_result_data *rgb_resulted,
-		struct curve_points *arr_points,
+		struct curve_points3 *corner_points,
 		uint32_t hw_points_num,
 		bool fixpoint)
 {
@@ -141,20 +141,53 @@ bool cm_helper_convert_to_custom_float(
 	fmt.mantissa_bits = 12;
 	fmt.sign = false;
 
-	if (!convert_to_custom_float_format(arr_points[0].x, &fmt,
-					    &arr_points[0].custom_float_x)) {
+	/* corner_points[0] - beginning base, slope offset for R,G,B
+	 * corner_points[1] - end base, slope offset for R,G,B
+	 */
+	if (!convert_to_custom_float_format(corner_points[0].red.x, &fmt,
+				&corner_points[0].red.custom_float_x)) {
+		BREAK_TO_DEBUGGER();
+		return false;
+	}
+	if (!convert_to_custom_float_format(corner_points[0].green.x, &fmt,
+				&corner_points[0].green.custom_float_x)) {
+		BREAK_TO_DEBUGGER();
+		return false;
+	}
+	if (!convert_to_custom_float_format(corner_points[0].blue.x, &fmt,
+				&corner_points[0].blue.custom_float_x)) {
 		BREAK_TO_DEBUGGER();
 		return false;
 	}
 
-	if (!convert_to_custom_float_format(arr_points[0].offset, &fmt,
-					    &arr_points[0].custom_float_offset)) {
+	if (!convert_to_custom_float_format(corner_points[0].red.offset, &fmt,
+				&corner_points[0].red.custom_float_offset)) {
+		BREAK_TO_DEBUGGER();
+		return false;
+	}
+	if (!convert_to_custom_float_format(corner_points[0].green.offset, &fmt,
+				&corner_points[0].green.custom_float_offset)) {
+		BREAK_TO_DEBUGGER();
+		return false;
+	}
+	if (!convert_to_custom_float_format(corner_points[0].blue.offset, &fmt,
+				&corner_points[0].blue.custom_float_offset)) {
 		BREAK_TO_DEBUGGER();
 		return false;
 	}
 
-	if (!convert_to_custom_float_format(arr_points[0].slope, &fmt,
-					    &arr_points[0].custom_float_slope)) {
+	if (!convert_to_custom_float_format(corner_points[0].red.slope, &fmt,
+				&corner_points[0].red.custom_float_slope)) {
+		BREAK_TO_DEBUGGER();
+		return false;
+	}
+	if (!convert_to_custom_float_format(corner_points[0].green.slope, &fmt,
+				&corner_points[0].green.custom_float_slope)) {
+		BREAK_TO_DEBUGGER();
+		return false;
+	}
+	if (!convert_to_custom_float_format(corner_points[0].blue.slope, &fmt,
+				&corner_points[0].blue.custom_float_slope)) {
 		BREAK_TO_DEBUGGER();
 		return false;
 	}
@@ -162,22 +195,59 @@ bool cm_helper_convert_to_custom_float(
 	fmt.mantissa_bits = 10;
 	fmt.sign = false;
 
-	if (!convert_to_custom_float_format(arr_points[1].x, &fmt,
-					    &arr_points[1].custom_float_x)) {
+	if (!convert_to_custom_float_format(corner_points[1].red.x, &fmt,
+				&corner_points[1].red.custom_float_x)) {
+		BREAK_TO_DEBUGGER();
+		return false;
+	}
+	if (!convert_to_custom_float_format(corner_points[1].green.x, &fmt,
+				&corner_points[1].green.custom_float_x)) {
+		BREAK_TO_DEBUGGER();
+		return false;
+	}
+	if (!convert_to_custom_float_format(corner_points[1].blue.x, &fmt,
+				&corner_points[1].blue.custom_float_x)) {
 		BREAK_TO_DEBUGGER();
 		return false;
 	}
 
-	if (fixpoint == true)
-		arr_points[1].custom_float_y = dc_fixpt_clamp_u0d14(arr_points[1].y);
-	else if (!convert_to_custom_float_format(arr_points[1].y, &fmt,
-		&arr_points[1].custom_float_y)) {
+	if (fixpoint == true) {
+		corner_points[1].red.custom_float_y =
+				dc_fixpt_clamp_u0d14(corner_points[1].red.y);
+		corner_points[1].green.custom_float_y =
+				dc_fixpt_clamp_u0d14(corner_points[1].green.y);
+		corner_points[1].blue.custom_float_y =
+				dc_fixpt_clamp_u0d14(corner_points[1].blue.y);
+	} else {
+		if (!convert_to_custom_float_format(corner_points[1].red.y,
+				&fmt, &corner_points[1].red.custom_float_y)) {
+			BREAK_TO_DEBUGGER();
+			return false;
+		}
+		if (!convert_to_custom_float_format(corner_points[1].green.y,
+				&fmt, &corner_points[1].green.custom_float_y)) {
+			BREAK_TO_DEBUGGER();
+			return false;
+		}
+		if (!convert_to_custom_float_format(corner_points[1].blue.y,
+				&fmt, &corner_points[1].blue.custom_float_y)) {
+			BREAK_TO_DEBUGGER();
+			return false;
+		}
+	}
+
+	if (!convert_to_custom_float_format(corner_points[1].red.slope, &fmt,
+				&corner_points[1].red.custom_float_slope)) {
 		BREAK_TO_DEBUGGER();
 		return false;
 	}
-
-	if (!convert_to_custom_float_format(arr_points[1].slope, &fmt,
-					    &arr_points[1].custom_float_slope)) {
+	if (!convert_to_custom_float_format(corner_points[1].green.slope, &fmt,
+				&corner_points[1].green.custom_float_slope)) {
+		BREAK_TO_DEBUGGER();
+		return false;
+	}
+	if (!convert_to_custom_float_format(corner_points[1].blue.slope, &fmt,
+				&corner_points[1].blue.custom_float_slope)) {
 		BREAK_TO_DEBUGGER();
 		return false;
 	}
@@ -242,15 +312,10 @@ bool cm_helper_translate_curve_to_hw_format(
 				const struct dc_transfer_func *output_tf,
 				struct pwl_params *lut_params, bool fixpoint)
 {
-	struct curve_points *arr_points;
+	struct curve_points3 *corner_points;
 	struct pwl_result_data *rgb_resulted;
 	struct pwl_result_data *rgb;
 	struct pwl_result_data *rgb_plus_1;
-	struct fixed31_32 y_r;
-	struct fixed31_32 y_g;
-	struct fixed31_32 y_b;
-	struct fixed31_32 y1_min;
-	struct fixed31_32 y3_max;
 
 	int32_t region_start, region_end;
 	int32_t i;
@@ -261,14 +326,14 @@ bool cm_helper_translate_curve_to_hw_format(
 
 	PERF_TRACE();
 
-	arr_points = lut_params->arr_points;
+	corner_points = lut_params->corner_points;
 	rgb_resulted = lut_params->rgb_resulted;
 	hw_points = 0;
 
 	memset(lut_params, 0, sizeof(struct pwl_params));
 	memset(seg_distr, 0, sizeof(seg_distr));
 
-	if (output_tf->tf == TRANSFER_FUNCTION_PQ) {
+	if (output_tf->tf == TRANSFER_FUNCTION_PQ || output_tf->tf == TRANSFER_FUNCTION_GAMMA22) {
 		/* 32 segments
 		 * segments are from 2^-25 to 2^7
 		 */
@@ -327,31 +392,37 @@ bool cm_helper_translate_curve_to_hw_format(
 	rgb_resulted[hw_points - 1].green = output_tf->tf_pts.green[start_index];
 	rgb_resulted[hw_points - 1].blue = output_tf->tf_pts.blue[start_index];
 
-	arr_points[0].x = dc_fixpt_pow(dc_fixpt_from_int(2),
+	// All 3 color channels have same x
+	corner_points[0].red.x = dc_fixpt_pow(dc_fixpt_from_int(2),
 					     dc_fixpt_from_int(region_start));
-	arr_points[1].x = dc_fixpt_pow(dc_fixpt_from_int(2),
+	corner_points[0].green.x = corner_points[0].red.x;
+	corner_points[0].blue.x = corner_points[0].red.x;
+
+	corner_points[1].red.x = dc_fixpt_pow(dc_fixpt_from_int(2),
 					     dc_fixpt_from_int(region_end));
+	corner_points[1].green.x = corner_points[1].red.x;
+	corner_points[1].blue.x = corner_points[1].red.x;
 
-	y_r = rgb_resulted[0].red;
-	y_g = rgb_resulted[0].green;
-	y_b = rgb_resulted[0].blue;
+	corner_points[0].red.y = rgb_resulted[0].red;
+	corner_points[0].green.y = rgb_resulted[0].green;
+	corner_points[0].blue.y = rgb_resulted[0].blue;
 
-	y1_min = dc_fixpt_min(y_r, dc_fixpt_min(y_g, y_b));
-
-	arr_points[0].y = y1_min;
-	arr_points[0].slope = dc_fixpt_div(arr_points[0].y, arr_points[0].x);
-	y_r = rgb_resulted[hw_points - 1].red;
-	y_g = rgb_resulted[hw_points - 1].green;
-	y_b = rgb_resulted[hw_points - 1].blue;
+	corner_points[0].red.slope = dc_fixpt_div(corner_points[0].red.y,
+			corner_points[0].red.x);
+	corner_points[0].green.slope = dc_fixpt_div(corner_points[0].green.y,
+			corner_points[0].green.x);
+	corner_points[0].blue.slope = dc_fixpt_div(corner_points[0].blue.y,
+			corner_points[0].blue.x);
 
 	/* see comment above, m_arrPoints[1].y should be the Y value for the
 	 * region end (m_numOfHwPoints), not last HW point(m_numOfHwPoints - 1)
 	 */
-	y3_max = dc_fixpt_max(y_r, dc_fixpt_max(y_g, y_b));
-
-	arr_points[1].y = y3_max;
-
-	arr_points[1].slope = dc_fixpt_zero;
+	corner_points[1].red.y = rgb_resulted[hw_points - 1].red;
+	corner_points[1].green.y = rgb_resulted[hw_points - 1].green;
+	corner_points[1].blue.y = rgb_resulted[hw_points - 1].blue;
+	corner_points[1].red.slope = dc_fixpt_zero;
+	corner_points[1].green.slope = dc_fixpt_zero;
+	corner_points[1].blue.slope = dc_fixpt_zero;
 
 	if (output_tf->tf == TRANSFER_FUNCTION_PQ) {
 		/* for PQ, we want to have a straight line from last HW X point,
@@ -360,9 +431,15 @@ bool cm_helper_translate_curve_to_hw_format(
 		const struct fixed31_32 end_value =
 				dc_fixpt_from_int(125);
 
-		arr_points[1].slope = dc_fixpt_div(
-			dc_fixpt_sub(dc_fixpt_one, arr_points[1].y),
-			dc_fixpt_sub(end_value, arr_points[1].x));
+		corner_points[1].red.slope = dc_fixpt_div(
+			dc_fixpt_sub(dc_fixpt_one, corner_points[1].red.y),
+			dc_fixpt_sub(end_value, corner_points[1].red.x));
+		corner_points[1].green.slope = dc_fixpt_div(
+			dc_fixpt_sub(dc_fixpt_one, corner_points[1].green.y),
+			dc_fixpt_sub(end_value, corner_points[1].green.x));
+		corner_points[1].blue.slope = dc_fixpt_div(
+			dc_fixpt_sub(dc_fixpt_one, corner_points[1].blue.y),
+			dc_fixpt_sub(end_value, corner_points[1].blue.x));
 	}
 
 	lut_params->hw_points_num = hw_points;
@@ -411,7 +488,7 @@ bool cm_helper_translate_curve_to_hw_format(
 		++i;
 	}
 	cm_helper_convert_to_custom_float(rgb_resulted,
-						lut_params->arr_points,
+						lut_params->corner_points,
 						hw_points, fixpoint);
 
 	return true;
@@ -424,15 +501,10 @@ bool cm_helper_translate_curve_to_degamma_hw_format(
 				const struct dc_transfer_func *output_tf,
 				struct pwl_params *lut_params)
 {
-	struct curve_points *arr_points;
+	struct curve_points3 *corner_points;
 	struct pwl_result_data *rgb_resulted;
 	struct pwl_result_data *rgb;
 	struct pwl_result_data *rgb_plus_1;
-	struct fixed31_32 y_r;
-	struct fixed31_32 y_g;
-	struct fixed31_32 y_b;
-	struct fixed31_32 y1_min;
-	struct fixed31_32 y3_max;
 
 	int32_t region_start, region_end;
 	int32_t i;
@@ -443,7 +515,7 @@ bool cm_helper_translate_curve_to_degamma_hw_format(
 
 	PERF_TRACE();
 
-	arr_points = lut_params->arr_points;
+	corner_points = lut_params->corner_points;
 	rgb_resulted = lut_params->rgb_resulted;
 	hw_points = 0;
 
@@ -489,31 +561,28 @@ bool cm_helper_translate_curve_to_degamma_hw_format(
 	rgb_resulted[hw_points - 1].green = output_tf->tf_pts.green[start_index];
 	rgb_resulted[hw_points - 1].blue = output_tf->tf_pts.blue[start_index];
 
-	arr_points[0].x = dc_fixpt_pow(dc_fixpt_from_int(2),
+	corner_points[0].red.x = dc_fixpt_pow(dc_fixpt_from_int(2),
 					     dc_fixpt_from_int(region_start));
-	arr_points[1].x = dc_fixpt_pow(dc_fixpt_from_int(2),
+	corner_points[0].green.x = corner_points[0].red.x;
+	corner_points[0].blue.x = corner_points[0].red.x;
+	corner_points[1].red.x = dc_fixpt_pow(dc_fixpt_from_int(2),
 					     dc_fixpt_from_int(region_end));
+	corner_points[1].green.x = corner_points[1].red.x;
+	corner_points[1].blue.x = corner_points[1].red.x;
 
-	y_r = rgb_resulted[0].red;
-	y_g = rgb_resulted[0].green;
-	y_b = rgb_resulted[0].blue;
-
-	y1_min = dc_fixpt_min(y_r, dc_fixpt_min(y_g, y_b));
-
-	arr_points[0].y = y1_min;
-	arr_points[0].slope = dc_fixpt_div(arr_points[0].y, arr_points[0].x);
-	y_r = rgb_resulted[hw_points - 1].red;
-	y_g = rgb_resulted[hw_points - 1].green;
-	y_b = rgb_resulted[hw_points - 1].blue;
+	corner_points[0].red.y = rgb_resulted[0].red;
+	corner_points[0].green.y = rgb_resulted[0].green;
+	corner_points[0].blue.y = rgb_resulted[0].blue;
 
 	/* see comment above, m_arrPoints[1].y should be the Y value for the
 	 * region end (m_numOfHwPoints), not last HW point(m_numOfHwPoints - 1)
 	 */
-	y3_max = dc_fixpt_max(y_r, dc_fixpt_max(y_g, y_b));
-
-	arr_points[1].y = y3_max;
-
-	arr_points[1].slope = dc_fixpt_zero;
+	corner_points[1].red.y = rgb_resulted[hw_points - 1].red;
+	corner_points[1].green.y = rgb_resulted[hw_points - 1].green;
+	corner_points[1].blue.y = rgb_resulted[hw_points - 1].blue;
+	corner_points[1].red.slope = dc_fixpt_zero;
+	corner_points[1].green.slope = dc_fixpt_zero;
+	corner_points[1].blue.slope = dc_fixpt_zero;
 
 	if (output_tf->tf == TRANSFER_FUNCTION_PQ) {
 		/* for PQ, we want to have a straight line from last HW X point,
@@ -522,9 +591,15 @@ bool cm_helper_translate_curve_to_degamma_hw_format(
 		const struct fixed31_32 end_value =
 				dc_fixpt_from_int(125);
 
-		arr_points[1].slope = dc_fixpt_div(
-			dc_fixpt_sub(dc_fixpt_one, arr_points[1].y),
-			dc_fixpt_sub(end_value, arr_points[1].x));
+		corner_points[1].red.slope = dc_fixpt_div(
+			dc_fixpt_sub(dc_fixpt_one, corner_points[1].red.y),
+			dc_fixpt_sub(end_value, corner_points[1].red.x));
+		corner_points[1].green.slope = dc_fixpt_div(
+			dc_fixpt_sub(dc_fixpt_one, corner_points[1].green.y),
+			dc_fixpt_sub(end_value, corner_points[1].green.x));
+		corner_points[1].blue.slope = dc_fixpt_div(
+			dc_fixpt_sub(dc_fixpt_one, corner_points[1].blue.y),
+			dc_fixpt_sub(end_value, corner_points[1].blue.x));
 	}
 
 	lut_params->hw_points_num = hw_points;
@@ -564,7 +639,7 @@ bool cm_helper_translate_curve_to_degamma_hw_format(
 		++i;
 	}
 	cm_helper_convert_to_custom_float(rgb_resulted,
-						lut_params->arr_points,
+						lut_params->corner_points,
 						hw_points, false);
 
 	return true;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.h
index 7a531b0..5ae4d69 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.h
@@ -98,7 +98,7 @@ void cm_helper_program_xfer_func(
 
 bool cm_helper_convert_to_custom_float(
 		struct pwl_result_data *rgb_resulted,
-		struct curve_points *arr_points,
+		struct curve_points3 *corner_points,
 		uint32_t hw_points_num,
 		bool fixpoint);
 
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index 193184a..87495de 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -45,6 +45,7 @@
 #include "dcn10_hubbub.h"
 #include "dcn10_cm_common.h"
 #include "dc_link_dp.h"
+#include "dccg.h"
 
 #define DC_LOGGER_INIT(logger)
 
@@ -786,7 +787,7 @@ static bool dcn10_hw_wa_force_recovery(struct dc *dc)
 			&dc->current_state->res_ctx.pipe_ctx[i];
 		if (pipe_ctx != NULL) {
 			hubp = pipe_ctx->plane_res.hubp;
-			if (hubp != NULL) {
+			if (hubp != NULL && hubp->funcs->hubp_get_underflow_status) {
 				if (hubp->funcs->hubp_get_underflow_status(hubp) != 0) {
 					/* one pipe underflow, we will reset all the pipes*/
 					need_recover = true;
@@ -812,7 +813,7 @@ static bool dcn10_hw_wa_force_recovery(struct dc *dc)
 		if (pipe_ctx != NULL) {
 			hubp = pipe_ctx->plane_res.hubp;
 			/*DCHUBP_CNTL:HUBP_BLANK_EN=1*/
-			if (hubp != NULL)
+			if (hubp != NULL && hubp->funcs->set_hubp_blank_en)
 				hubp->funcs->set_hubp_blank_en(hubp, true);
 		}
 	}
@@ -825,7 +826,7 @@ static bool dcn10_hw_wa_force_recovery(struct dc *dc)
 		if (pipe_ctx != NULL) {
 			hubp = pipe_ctx->plane_res.hubp;
 			/*DCHUBP_CNTL:HUBP_DISABLE=1*/
-			if (hubp != NULL)
+			if (hubp != NULL && hubp->funcs->hubp_disable_control)
 				hubp->funcs->hubp_disable_control(hubp, true);
 		}
 	}
@@ -835,7 +836,7 @@ static bool dcn10_hw_wa_force_recovery(struct dc *dc)
 		if (pipe_ctx != NULL) {
 			hubp = pipe_ctx->plane_res.hubp;
 			/*DCHUBP_CNTL:HUBP_DISABLE=0*/
-			if (hubp != NULL)
+			if (hubp != NULL && hubp->funcs->hubp_disable_control)
 				hubp->funcs->hubp_disable_control(hubp, true);
 		}
 	}
@@ -847,7 +848,7 @@ static bool dcn10_hw_wa_force_recovery(struct dc *dc)
 		if (pipe_ctx != NULL) {
 			hubp = pipe_ctx->plane_res.hubp;
 			/*DCHUBP_CNTL:HUBP_BLANK_EN=0*/
-			if (hubp != NULL)
+			if (hubp != NULL && hubp->funcs->set_hubp_blank_en)
 				hubp->funcs->set_hubp_blank_en(hubp, true);
 		}
 	}
@@ -1126,7 +1127,7 @@ static void dcn10_init_hw(struct dc *dc)
 
 	enable_power_gating_plane(dc->hwseq, true);
 
-	memset(&dc->res_pool->dccg->clks, 0, sizeof(dc->res_pool->dccg->clks));
+	memset(&dc->res_pool->clk_mgr->clks, 0, sizeof(dc->res_pool->clk_mgr->clks));
 }
 
 static void reset_hw_ctx_wrap(
@@ -1603,7 +1604,7 @@ static void mmhub_read_vm_context0_settings(struct dcn10_hubp *hubp1,
 }
 
 
-static void dcn10_program_pte_vm(struct dce_hwseq *hws, struct hubp *hubp)
+void dcn10_program_pte_vm(struct dce_hwseq *hws, struct hubp *hubp)
 {
 	struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
 	struct vm_system_aperture_param apt = { {{ 0 } } };
@@ -1703,33 +1704,22 @@ static void program_gamut_remap(struct pipe_ctx *pipe_ctx)
 	pipe_ctx->plane_res.dpp->funcs->dpp_set_gamut_remap(pipe_ctx->plane_res.dpp, &adjust);
 }
 
-
-static void program_csc_matrix(struct pipe_ctx *pipe_ctx,
-		enum dc_color_space colorspace,
-		uint16_t *matrix)
-{
-	if (pipe_ctx->stream->csc_color_matrix.enable_adjustment == true) {
-			if (pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment != NULL)
-				pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment(pipe_ctx->plane_res.dpp, matrix);
-	} else {
-		if (pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_default != NULL)
-			pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_default(pipe_ctx->plane_res.dpp, colorspace);
-	}
-}
-
 static void dcn10_program_output_csc(struct dc *dc,
 		struct pipe_ctx *pipe_ctx,
 		enum dc_color_space colorspace,
 		uint16_t *matrix,
 		int opp_id)
 {
-	if (pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment != NULL)
-		program_csc_matrix(pipe_ctx,
-				colorspace,
-				matrix);
+	if (pipe_ctx->stream->csc_color_matrix.enable_adjustment == true) {
+		if (pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment != NULL)
+			pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment(pipe_ctx->plane_res.dpp, matrix);
+	} else {
+		if (pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_default != NULL)
+			pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_default(pipe_ctx->plane_res.dpp, colorspace);
+	}
 }
 
-static bool is_lower_pipe_tree_visible(struct pipe_ctx *pipe_ctx)
+bool is_lower_pipe_tree_visible(struct pipe_ctx *pipe_ctx)
 {
 	if (pipe_ctx->plane_state->visible)
 		return true;
@@ -1738,7 +1728,7 @@ static bool is_lower_pipe_tree_visible(struct pipe_ctx *pipe_ctx)
 	return false;
 }
 
-static bool is_upper_pipe_tree_visible(struct pipe_ctx *pipe_ctx)
+bool is_upper_pipe_tree_visible(struct pipe_ctx *pipe_ctx)
 {
 	if (pipe_ctx->plane_state->visible)
 		return true;
@@ -1747,7 +1737,7 @@ static bool is_upper_pipe_tree_visible(struct pipe_ctx *pipe_ctx)
 	return false;
 }
 
-static bool is_pipe_tree_visible(struct pipe_ctx *pipe_ctx)
+bool is_pipe_tree_visible(struct pipe_ctx *pipe_ctx)
 {
 	if (pipe_ctx->plane_state->visible)
 		return true;
@@ -1943,10 +1933,6 @@ static void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
 	struct mpc *mpc = dc->res_pool->mpc;
 	struct mpc_tree *mpc_tree_params = &(pipe_ctx->stream_res.opp->mpc_tree_params);
 
-
-
-	/* TODO: proper fix once fpga works */
-
 	if (dc->debug.visual_confirm == VISUAL_CONFIRM_HDR) {
 		dcn10_get_hdr_visual_confirm_color(
 				pipe_ctx, &blnd_cfg.black_color);
@@ -2026,8 +2012,6 @@ static void update_scaler(struct pipe_ctx *pipe_ctx)
 	bool per_pixel_alpha =
 			pipe_ctx->plane_state->per_pixel_alpha && pipe_ctx->bottom_pipe;
 
-	/* TODO: proper fix once fpga works */
-
 	pipe_ctx->plane_res.scl_data.lb_params.alpha_en = per_pixel_alpha;
 	pipe_ctx->plane_res.scl_data.lb_params.depth = LB_PIXEL_DEPTH_30BPP;
 	/* scaler configuration */
@@ -2035,7 +2019,7 @@ static void update_scaler(struct pipe_ctx *pipe_ctx)
 			pipe_ctx->plane_res.dpp, &pipe_ctx->plane_res.scl_data);
 }
 
-static void update_dchubp_dpp(
+void update_dchubp_dpp(
 	struct dc *dc,
 	struct pipe_ctx *pipe_ctx,
 	struct dc_state *context)
@@ -2052,16 +2036,22 @@ static void update_dchubp_dpp(
 	 */
 	if (plane_state->update_flags.bits.full_update) {
 		bool should_divided_by_2 = context->bw.dcn.clk.dppclk_khz <=
-				dc->res_pool->dccg->clks.dispclk_khz / 2;
+				dc->res_pool->clk_mgr->clks.dispclk_khz / 2;
 
 		dpp->funcs->dpp_dppclk_control(
 				dpp,
 				should_divided_by_2,
 				true);
 
-		dc->res_pool->dccg->clks.dppclk_khz = should_divided_by_2 ?
-						dc->res_pool->dccg->clks.dispclk_khz / 2 :
-							dc->res_pool->dccg->clks.dispclk_khz;
+		if (dc->res_pool->dccg)
+			dc->res_pool->dccg->funcs->update_dpp_dto(
+					dc->res_pool->dccg,
+					dpp->inst,
+					pipe_ctx->plane_res.bw.calc.dppclk_khz);
+		else
+			dc->res_pool->clk_mgr->clks.dppclk_khz = should_divided_by_2 ?
+						dc->res_pool->clk_mgr->clks.dispclk_khz / 2 :
+							dc->res_pool->clk_mgr->clks.dispclk_khz;
 	}
 
 	/* TODO: Need input parameter to tell current DCHUB pipe tie to which OTG
@@ -2182,7 +2172,7 @@ static void dcn10_blank_pixel_data(
 	}
 }
 
-static void set_hdr_multiplier(struct pipe_ctx *pipe_ctx)
+void set_hdr_multiplier(struct pipe_ctx *pipe_ctx)
 {
 	struct fixed31_32 multiplier = dc_fixpt_from_fraction(
 			pipe_ctx->plane_state->sdr_white_level, 80);
@@ -2257,47 +2247,7 @@ static void program_all_pipe_in_tree(
 	}
 }
 
-static void dcn10_pplib_apply_display_requirements(
-	struct dc *dc,
-	struct dc_state *context)
-{
-	struct dm_pp_display_configuration *pp_display_cfg = &context->pp_display_cfg;
-
-	pp_display_cfg->min_engine_clock_khz = dc->res_pool->dccg->clks.dcfclk_khz;
-	pp_display_cfg->min_memory_clock_khz = dc->res_pool->dccg->clks.fclk_khz;
-	pp_display_cfg->min_engine_clock_deep_sleep_khz = dc->res_pool->dccg->clks.dcfclk_deep_sleep_khz;
-	pp_display_cfg->min_dcfc_deep_sleep_clock_khz = dc->res_pool->dccg->clks.dcfclk_deep_sleep_khz;
-	pp_display_cfg->min_dcfclock_khz = dc->res_pool->dccg->clks.dcfclk_khz;
-	pp_display_cfg->disp_clk_khz = dc->res_pool->dccg->clks.dispclk_khz;
-	dce110_fill_display_configs(context, pp_display_cfg);
-
-	if (memcmp(&dc->prev_display_config, pp_display_cfg, sizeof(
-			struct dm_pp_display_configuration)) !=  0)
-		dm_pp_apply_display_requirements(dc->ctx, pp_display_cfg);
-
-	dc->prev_display_config = *pp_display_cfg;
-}
-
-static void optimize_shared_resources(struct dc *dc)
-{
-	if (dc->current_state->stream_count == 0) {
-		/* S0i2 message */
-		dcn10_pplib_apply_display_requirements(dc, dc->current_state);
-	}
-
-	if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE)
-		dcn_bw_notify_pplib_of_wm_ranges(dc);
-}
-
-static void ready_shared_resources(struct dc *dc, struct dc_state *context)
-{
-	/* S0i2 message */
-	if (dc->current_state->stream_count == 0 &&
-			context->stream_count != 0)
-		dcn10_pplib_apply_display_requirements(dc, context);
-}
-
-static struct pipe_ctx *find_top_pipe_for_stream(
+struct pipe_ctx *find_top_pipe_for_stream(
 		struct dc *dc,
 		struct dc_state *context,
 		const struct dc_stream_state *stream)
@@ -2398,10 +2348,9 @@ static void dcn10_apply_ctx_for_surface(
 		hubbub1_wm_change_req_wa(dc->res_pool->hubbub);
 }
 
-static void dcn10_set_bandwidth(
+static void dcn10_prepare_bandwidth(
 		struct dc *dc,
-		struct dc_state *context,
-		bool safe_to_lower)
+		struct dc_state *context)
 {
 	if (dc->debug.sanity_checks)
 		dcn10_verify_allow_pstate_change_high(dc);
@@ -2410,12 +2359,10 @@ static void dcn10_set_bandwidth(
 		if (context->stream_count == 0)
 			context->bw.dcn.clk.phyclk_khz = 0;
 
-		dc->res_pool->dccg->funcs->update_clocks(
-				dc->res_pool->dccg,
-				&context->bw.dcn.clk,
-				safe_to_lower);
-
-		dcn10_pplib_apply_display_requirements(dc, context);
+		dc->res_pool->clk_mgr->funcs->update_clocks(
+				dc->res_pool->clk_mgr,
+				context,
+				false);
 	}
 
 	hubbub1_program_watermarks(dc->res_pool->hubbub,
@@ -2423,6 +2370,38 @@ static void dcn10_set_bandwidth(
 			dc->res_pool->ref_clock_inKhz / 1000,
 			true);
 
+	if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE)
+		dcn_bw_notify_pplib_of_wm_ranges(dc);
+
+	if (dc->debug.sanity_checks)
+		dcn10_verify_allow_pstate_change_high(dc);
+}
+
+static void dcn10_optimize_bandwidth(
+		struct dc *dc,
+		struct dc_state *context)
+{
+	if (dc->debug.sanity_checks)
+		dcn10_verify_allow_pstate_change_high(dc);
+
+	if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
+		if (context->stream_count == 0)
+			context->bw.dcn.clk.phyclk_khz = 0;
+
+		dc->res_pool->clk_mgr->funcs->update_clocks(
+				dc->res_pool->clk_mgr,
+				context,
+				true);
+	}
+
+	hubbub1_program_watermarks(dc->res_pool->hubbub,
+			&context->bw.dcn.watermarks,
+			dc->res_pool->ref_clock_inKhz / 1000,
+			true);
+
+	if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE)
+		dcn_bw_notify_pplib_of_wm_ranges(dc);
+
 	if (dc->debug.sanity_checks)
 		dcn10_verify_allow_pstate_change_high(dc);
 }
@@ -2694,7 +2673,6 @@ static void dcn10_set_cursor_sdr_white_level(struct pipe_ctx *pipe_ctx)
 
 static const struct hw_sequencer_funcs dcn10_funcs = {
 	.program_gamut_remap = program_gamut_remap,
-	.program_csc_matrix = program_csc_matrix,
 	.init_hw = dcn10_init_hw,
 	.apply_ctx_to_hw = dce110_apply_ctx_to_hw,
 	.apply_ctx_for_surface = dcn10_apply_ctx_for_surface,
@@ -2721,7 +2699,8 @@ static const struct hw_sequencer_funcs dcn10_funcs = {
 	.disable_plane = dcn10_disable_plane,
 	.blank_pixel_data = dcn10_blank_pixel_data,
 	.pipe_control_lock = dcn10_pipe_control_lock,
-	.set_bandwidth = dcn10_set_bandwidth,
+	.prepare_bandwidth = dcn10_prepare_bandwidth,
+	.optimize_bandwidth = dcn10_optimize_bandwidth,
 	.reset_hw_ctx_wrap = reset_hw_ctx_wrap,
 	.enable_stream_timing = dcn10_enable_stream_timing,
 	.set_drr = set_drr,
@@ -2732,10 +2711,6 @@ static const struct hw_sequencer_funcs dcn10_funcs = {
 	.log_hw_state = dcn10_log_hw_state,
 	.get_hw_state = dcn10_get_hw_state,
 	.wait_for_mpcc_disconnect = dcn10_wait_for_mpcc_disconnect,
-	.ready_shared_resources = ready_shared_resources,
-	.optimize_shared_resources = optimize_shared_resources,
-	.pplib_apply_display_requirements =
-			dcn10_pplib_apply_display_requirements,
 	.edp_backlight_control = hwss_edp_backlight_control,
 	.edp_power_control = hwss_edp_power_control,
 	.edp_wait_for_hpd_ready = hwss_edp_wait_for_hpd_ready,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
index 84d461e..5e5610c 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
@@ -51,4 +51,24 @@ void dcn10_get_hw_state(
 		char *pBuf, unsigned int bufSize,
 		unsigned int mask);
 
+bool is_lower_pipe_tree_visible(struct pipe_ctx *pipe_ctx);
+
+bool is_upper_pipe_tree_visible(struct pipe_ctx *pipe_ctx);
+
+bool is_pipe_tree_visible(struct pipe_ctx *pipe_ctx);
+
+void dcn10_program_pte_vm(struct dce_hwseq *hws, struct hubp *hubp);
+
+void set_hdr_multiplier(struct pipe_ctx *pipe_ctx);
+
+void update_dchubp_dpp(
+	struct dc *dc,
+	struct pipe_ctx *pipe_ctx,
+	struct dc_state *context);
+
+struct pipe_ctx *find_top_pipe_for_stream(
+		struct dc *dc,
+		struct dc_state *context,
+		const struct dc_stream_state *stream);
+
 #endif /* __DC_HWSS_DCN10_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
index ba6a8686..477ab92 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
@@ -589,7 +589,7 @@ static bool dcn10_link_encoder_validate_hdmi_output(
 		return false;
 
 	/* DCE11 HW does not support 420 */
-	if (!enc10->base.features.ycbcr420_supported &&
+	if (!enc10->base.features.hdmi_ycbcr420_supported &&
 			crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR420)
 		return false;
 
@@ -606,8 +606,10 @@ bool dcn10_link_encoder_validate_dp_output(
 	const struct dcn10_link_encoder *enc10,
 	const struct dc_crtc_timing *crtc_timing)
 {
-	if (crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR420)
-		return false;
+	if (crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR420) {
+		if (!enc10->base.features.dp_ycbcr420_supported)
+			return false;
+	}
 
 	return true;
 }
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
index 5462668..7d1f667 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
@@ -87,9 +87,8 @@ static void optc1_disable_stereo(struct timing_generator *optc)
 	REG_SET(OTG_STEREO_CONTROL, 0,
 		OTG_STEREO_EN, 0);
 
-	REG_SET_3(OTG_3D_STRUCTURE_CONTROL, 0,
+	REG_SET_2(OTG_3D_STRUCTURE_CONTROL, 0,
 		OTG_3D_STRUCTURE_EN, 0,
-		OTG_3D_STRUCTURE_V_UPDATE_MODE, 0,
 		OTG_3D_STRUCTURE_STEREO_SEL_OVR, 0);
 }
 
@@ -274,10 +273,12 @@ void optc1_program_timing(
 	 * program the reg for interrupt postition.
 	 */
 	vertical_line_start = asic_blank_end - optc->dlg_otg_param.vstartup_start + 1;
-	if (vertical_line_start < 0) {
-		ASSERT(0);
+	v_fp2 = 0;
+	if (vertical_line_start < 0)
+		v_fp2 = -vertical_line_start;
+	if (vertical_line_start < 0)
 		vertical_line_start = 0;
-	}
+
 	REG_SET(OTG_VERTICAL_INTERRUPT2_POSITION, 0,
 			OTG_VERTICAL_INTERRUPT2_LINE_START, vertical_line_start);
 
@@ -296,9 +297,6 @@ void optc1_program_timing(
 		if (patched_crtc_timing.flags.INTERLACE == 1)
 			field_num = 1;
 	}
-	v_fp2 = 0;
-	if (optc->dlg_otg_param.vstartup_start > asic_blank_end)
-		v_fp2 = optc->dlg_otg_param.vstartup_start > asic_blank_end;
 
 	/* Interlace */
 	if (patched_crtc_timing.flags.INTERLACE == 1) {
@@ -1155,9 +1153,8 @@ static void optc1_enable_stereo(struct timing_generator *optc,
 				OTG_DISABLE_STEREOSYNC_OUTPUT_FOR_DP, 1);
 
 		if (flags->PROGRAM_STEREO)
-			REG_UPDATE_3(OTG_3D_STRUCTURE_CONTROL,
+			REG_UPDATE_2(OTG_3D_STRUCTURE_CONTROL,
 				OTG_3D_STRUCTURE_EN, flags->FRAME_PACKED,
-				OTG_3D_STRUCTURE_V_UPDATE_MODE, flags->FRAME_PACKED,
 				OTG_3D_STRUCTURE_STEREO_SEL_OVR, flags->FRAME_PACKED);
 
 	}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
index a71453a..47dbe4b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
@@ -28,23 +28,23 @@
 
 #include "resource.h"
 #include "include/irq_service_interface.h"
-#include "dcn10/dcn10_resource.h"
+#include "dcn10_resource.h"
 
-#include "dcn10/dcn10_ipp.h"
-#include "dcn10/dcn10_mpc.h"
+#include "dcn10_ipp.h"
+#include "dcn10_mpc.h"
 #include "irq/dcn10/irq_service_dcn10.h"
-#include "dcn10/dcn10_dpp.h"
+#include "dcn10_dpp.h"
 #include "dcn10_optc.h"
-#include "dcn10/dcn10_hw_sequencer.h"
+#include "dcn10_hw_sequencer.h"
 #include "dce110/dce110_hw_sequencer.h"
-#include "dcn10/dcn10_opp.h"
-#include "dcn10/dcn10_link_encoder.h"
-#include "dcn10/dcn10_stream_encoder.h"
-#include "dce/dce_clocks.h"
+#include "dcn10_opp.h"
+#include "dcn10_link_encoder.h"
+#include "dcn10_stream_encoder.h"
+#include "dcn10_clk_mgr.h"
 #include "dce/dce_clock_source.h"
 #include "dce/dce_audio.h"
 #include "dce/dce_hwseq.h"
-#include "../virtual/virtual_stream_encoder.h"
+#include "virtual/virtual_stream_encoder.h"
 #include "dce110/dce110_resource.h"
 #include "dce112/dce112_resource.h"
 #include "dcn10_hubp.h"
@@ -438,6 +438,7 @@ static const struct dcn_optc_mask tg_mask = {
 
 
 static const struct bios_registers bios_regs = {
+		NBIO_SR(BIOS_SCRATCH_0),
 		NBIO_SR(BIOS_SCRATCH_3),
 		NBIO_SR(BIOS_SCRATCH_6)
 };
@@ -719,7 +720,8 @@ static struct timing_generator *dcn10_timing_generator_create(
 static const struct encoder_feature_support link_enc_feature = {
 		.max_hdmi_deep_color = COLOR_DEPTH_121212,
 		.max_hdmi_pixel_clock = 600000,
-		.ycbcr420_supported = true,
+		.hdmi_ycbcr420_supported = true,
+		.dp_ycbcr420_supported = false,
 		.flags.bits.IS_HBR2_CAPABLE = true,
 		.flags.bits.IS_HBR3_CAPABLE = true,
 		.flags.bits.IS_TPS3_CAPABLE = true,
@@ -949,8 +951,8 @@ static void destruct(struct dcn10_resource_pool *pool)
 	if (pool->base.dmcu != NULL)
 		dce_dmcu_destroy(&pool->base.dmcu);
 
-	if (pool->base.dccg != NULL)
-		dce_dccg_destroy(&pool->base.dccg);
+	if (pool->base.clk_mgr != NULL)
+		dce_clk_mgr_destroy(&pool->base.clk_mgr);
 
 	kfree(pool->base.pp_smu);
 }
@@ -1276,8 +1278,8 @@ static bool construct(
 		}
 	}
 
-	pool->base.dccg = dcn1_dccg_create(ctx);
-	if (pool->base.dccg == NULL) {
+	pool->base.clk_mgr = dcn1_clk_mgr_create(ctx);
+	if (pool->base.clk_mgr == NULL) {
 		dm_error("DC: failed to create display clock!\n");
 		BREAK_TO_DEBUGGER();
 		goto fail;
diff --git a/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h b/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h
index f2ea845..beb08fd 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h
@@ -55,10 +55,10 @@ struct pp_smu {
 
 struct pp_smu_wm_set_range {
 	unsigned int wm_inst;
-	uint32_t min_fill_clk_khz;
-	uint32_t max_fill_clk_khz;
-	uint32_t min_drain_clk_khz;
-	uint32_t max_drain_clk_khz;
+	uint32_t min_fill_clk_mhz;
+	uint32_t max_fill_clk_mhz;
+	uint32_t min_drain_clk_mhz;
+	uint32_t max_drain_clk_mhz;
 };
 
 #define MAX_WATERMARK_SETS 4
@@ -77,15 +77,15 @@ struct pp_smu_display_requirement_rv {
 	 */
 	unsigned int display_count;
 
-	/* PPSMC_MSG_SetHardMinFclkByFreq: khz
+	/* PPSMC_MSG_SetHardMinFclkByFreq: mhz
 	 *  FCLK will vary with DPM, but never below requested hard min
 	 */
-	unsigned int hard_min_fclk_khz;
+	unsigned int hard_min_fclk_mhz;
 
-	/* PPSMC_MSG_SetHardMinDcefclkByFreq: khz
+	/* PPSMC_MSG_SetHardMinDcefclkByFreq: mhz
 	 *  fixed clock at requested freq, either from FCH bypass or DFS
 	 */
-	unsigned int hard_min_dcefclk_khz;
+	unsigned int hard_min_dcefclk_mhz;
 
 	/* PPSMC_MSG_SetMinDeepSleepDcefclk: mhz
 	 *  when DF is in cstate, dcf clock is further divided down
@@ -103,13 +103,19 @@ struct pp_smu_funcs_rv {
 	void (*set_display_count)(struct pp_smu *pp, int count);
 
 	/* which SMU message?  are reader and writer WM separate SMU msg? */
+	/*
+	 * PPSMC_MSG_SetDriverDramAddrHigh
+	 * PPSMC_MSG_SetDriverDramAddrLow
+	 * PPSMC_MSG_TransferTableDram2Smu
+	 *
+	 * */
 	void (*set_wm_ranges)(struct pp_smu *pp,
 			struct pp_smu_wm_range_sets *ranges);
 
 	/* PPSMC_MSG_SetHardMinDcfclkByFreq
 	 * fixed clock at requested freq, either from FCH bypass or DFS
 	 */
-	void (*set_hard_min_dcfclk_by_freq)(struct pp_smu *pp, int khz);
+	void (*set_hard_min_dcfclk_by_freq)(struct pp_smu *pp, int mhz);
 
 	/* PPSMC_MSG_SetMinDeepSleepDcfclk
 	 * when DF is in cstate, dcf clock is further divided down
@@ -120,12 +126,12 @@ struct pp_smu_funcs_rv {
 	/* PPSMC_MSG_SetHardMinFclkByFreq
 	 * FCLK will vary with DPM, but never below requested hard min
 	 */
-	void (*set_hard_min_fclk_by_freq)(struct pp_smu *pp, int khz);
+	void (*set_hard_min_fclk_by_freq)(struct pp_smu *pp, int mhz);
 
 	/* PPSMC_MSG_SetHardMinSocclkByFreq
 	 * Needed for DWB support
 	 */
-	void (*set_hard_min_socclk_by_freq)(struct pp_smu *pp, int khz);
+	void (*set_hard_min_socclk_by_freq)(struct pp_smu *pp, int mhz);
 
 	/* PME w/a */
 	void (*set_pme_wa_enable)(struct pp_smu *pp);
diff --git a/drivers/gpu/drm/amd/display/dc/dm_services_types.h b/drivers/gpu/drm/amd/display/dc/dm_services_types.h
index 2b83f92..1af8c77 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_services_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_services_types.h
@@ -208,22 +208,20 @@ struct dm_bl_data_point {
 		/* Brightness level as effective value in range 0-255,
 		 * corresponding to above percentage
 		 */
-		uint8_t signalLevel;
+		uint8_t signal_level;
 };
 
 /* Total size of the structure should not exceed 256 bytes */
 struct dm_acpi_atif_backlight_caps {
-
-
 	uint16_t size; /* Bytes 0-1 (2 bytes) */
 	uint16_t flags; /* Byted 2-3 (2 bytes) */
-	uint8_t  errorCode; /* Byte 4 */
-	uint8_t  acLevelPercentage; /* Byte 5 */
-	uint8_t  dcLevelPercentage; /* Byte 6 */
-	uint8_t  minInputSignal; /* Byte 7 */
-	uint8_t  maxInputSignal; /* Byte 8 */
-	uint8_t  numOfDataPoints; /* Byte 9 */
-	struct dm_bl_data_point dataPoints[99]; /* Bytes 10-207 (198 bytes)*/
+	uint8_t  error_code; /* Byte 4 */
+	uint8_t  ac_level_percentage; /* Byte 5 */
+	uint8_t  dc_level_percentage; /* Byte 6 */
+	uint8_t  min_input_signal; /* Byte 7 */
+	uint8_t  max_input_signal; /* Byte 8 */
+	uint8_t  num_data_points; /* Byte 9 */
+	struct dm_bl_data_point data_points[99]; /* Bytes 10-207 (198 bytes)*/
 };
 
 enum dm_acpi_display_type {
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
index cbafce6..5dd0452 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
@@ -113,7 +113,8 @@ struct _vcs_dpi_soc_bounding_box_st {
 	int use_urgent_burst_bw;
 	double max_hscl_ratio;
 	double max_vscl_ratio;
-	struct _vcs_dpi_voltage_scaling_st clock_limits[7];
+	unsigned int num_states;
+	struct _vcs_dpi_voltage_scaling_st clock_limits[8];
 };
 
 struct _vcs_dpi_ip_params_st {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/bw_fixed.h b/drivers/gpu/drm/amd/display/dc/inc/bw_fixed.h
index 39ee8eba3..d1656c9 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/bw_fixed.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/bw_fixed.h
@@ -126,7 +126,7 @@ static inline struct bw_fixed bw_div(const struct bw_fixed arg1, const struct bw
 static inline struct bw_fixed bw_mod(const struct bw_fixed arg1, const struct bw_fixed arg2)
 {
 	struct bw_fixed res;
-	div64_u64_rem(arg1.value, arg2.value, &res.value);
+	div64_u64_rem(arg1.value, arg2.value, (uint64_t *)&res.value);
 	return res;
 }
 
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
index c1976c1..e3ee96a 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
@@ -82,7 +82,7 @@ void core_link_disable_stream(struct pipe_ctx *pipe_ctx, int option);
 
 void core_link_set_avmute(struct pipe_ctx *pipe_ctx, bool enable);
 /********** DAL Core*********************/
-#include "display_clock.h"
+#include "hw/clk_mgr.h"
 #include "transform.h"
 #include "dpp.h"
 
@@ -169,6 +169,7 @@ struct resource_pool {
 	unsigned int audio_count;
 	struct audio_support audio_support;
 
+	struct clk_mgr *clk_mgr;
 	struct dccg *dccg;
 	struct irq_service *irqs;
 
@@ -287,7 +288,7 @@ struct dc_state {
 	struct dcn_bw_internal_vars dcn_bw_vars;
 #endif
 
-	struct dccg *dis_clk;
+	struct clk_mgr *dccg;
 
 	struct kref refcount;
 };
diff --git a/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h b/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h
index e688eb9..ece954a 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h
@@ -31,8 +31,8 @@
 #define __DCN_CALCS_H__
 
 #include "bw_fixed.h"
-#include "display_clock.h"
 #include "../dml/display_mode_lib.h"
+#include "hw/clk_mgr.h"
 
 struct dc;
 struct dc_state;
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h b/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h
index a83a484..abc961c 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h
@@ -47,12 +47,18 @@ struct abm_funcs {
 	bool (*set_abm_level)(struct abm *abm, unsigned int abm_level);
 	bool (*set_abm_immediate_disable)(struct abm *abm);
 	bool (*init_backlight)(struct abm *abm);
-	bool (*set_backlight_level)(struct abm *abm,
-			unsigned int backlight_level,
+
+	/* backlight_pwm_u16_16 is unsigned 32 bit,
+	 * 16 bit integer + 16 fractional, where 1.0 is max backlight value.
+	 */
+	bool (*set_backlight_level_pwm)(struct abm *abm,
+			unsigned int backlight_pwm_u16_16,
 			unsigned int frame_ramp,
 			unsigned int controller_id,
 			bool use_smooth_brightness);
-	unsigned int (*get_current_backlight_8_bit)(struct abm *abm);
+
+	unsigned int (*get_current_backlight)(struct abm *abm);
+	unsigned int (*get_target_backlight)(struct abm *abm);
 };
 
 #endif
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/display_clock.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
similarity index 63%
rename from drivers/gpu/drm/amd/display/dc/inc/hw/display_clock.h
rename to drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
index 689faa1..23a4b18 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/display_clock.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
@@ -23,41 +23,25 @@
  *
  */
 
-#ifndef __DISPLAY_CLOCK_H__
-#define __DISPLAY_CLOCK_H__
+#ifndef __DAL_CLK_MGR_H__
+#define __DAL_CLK_MGR_H__
 
 #include "dm_services_types.h"
 #include "dc.h"
 
-/* Structure containing all state-dependent clocks
- * (dependent on "enum clocks_state") */
-struct state_dependent_clocks {
-	int display_clk_khz;
-	int pixel_clk_khz;
-};
-
-struct dccg {
+struct clk_mgr {
 	struct dc_context *ctx;
-	const struct display_clock_funcs *funcs;
+	const struct clk_mgr_funcs *funcs;
 
-	enum dm_pp_clocks_state max_clks_state;
-	enum dm_pp_clocks_state cur_min_clks_state;
 	struct dc_clocks clks;
 };
 
-struct display_clock_funcs {
-	void (*update_clocks)(struct dccg *dccg,
-			struct dc_clocks *new_clocks,
+struct clk_mgr_funcs {
+	void (*update_clocks)(struct clk_mgr *clk_mgr,
+			struct dc_state *context,
 			bool safe_to_lower);
-	int (*set_dispclk)(struct dccg *dccg,
-		int requested_clock_khz);
 
-	int (*get_dp_ref_clk_frequency)(struct dccg *dccg);
-
-	bool (*update_dfs_bypass)(struct dccg *dccg,
-		struct dc *dc,
-		struct dc_state *context,
-		int requested_clock_khz);
+	int (*get_dp_ref_clk_frequency)(struct clk_mgr *clk_mgr);
 };
 
-#endif /* __DISPLAY_CLOCK_H__ */
+#endif /* __DAL_CLK_MGR_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h
new file mode 100644
index 0000000..95a56d0
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_DCCG_H__
+#define __DAL_DCCG_H__
+
+#include "dc_types.h"
+
+struct dccg {
+	struct dc_context *ctx;
+	const struct dccg_funcs *funcs;
+
+	int ref_dppclk;
+};
+
+struct dccg_funcs {
+	void (*update_dpp_dto)(struct dccg *dccg,
+			int dpp_inst,
+			int req_dppclk);
+};
+
+#endif //__DAL_DCCG_H__
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
index cf7433e..da85537 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
@@ -53,6 +53,12 @@ struct curve_points {
 	uint32_t custom_float_slope;
 };
 
+struct curve_points3 {
+	struct curve_points red;
+	struct curve_points green;
+	struct curve_points blue;
+};
+
 struct pwl_result_data {
 	struct fixed31_32 red;
 	struct fixed31_32 green;
@@ -71,9 +77,17 @@ struct pwl_result_data {
 	uint32_t delta_blue_reg;
 };
 
+/* arr_curve_points - regamma regions/segments specification
+ * arr_points - beginning and end point specified separately (only one on DCE)
+ * corner_points - beginning and end point for all 3 colors (DCN)
+ * rgb_resulted - final curve
+ */
 struct pwl_params {
 	struct gamma_curve arr_curve_points[34];
-	struct curve_points arr_points[2];
+	union {
+		struct curve_points arr_points[2];
+		struct curve_points3 corner_points[2];
+	};
 	struct pwl_result_data rgb_resulted[256 + 3];
 	uint32_t hw_points_num;
 };
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
index e28e977..c20fdca 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
@@ -65,7 +65,8 @@ struct encoder_feature_support {
 
 	enum dc_color_depth max_hdmi_deep_color;
 	unsigned int max_hdmi_pixel_clock;
-	bool ycbcr420_supported;
+	bool hdmi_ycbcr420_supported;
+	bool dp_ycbcr420_supported;
 };
 
 union dpcd_psr_configuration {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h b/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h
index da89c2e..06df02d 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h
@@ -31,7 +31,7 @@
 #include "dml/display_mode_structs.h"
 
 struct dchub_init_data;
-struct cstate_pstate_watermarks_st {
+struct cstate_pstate_watermarks_st1 {
 	uint32_t cstate_exit_ns;
 	uint32_t cstate_enter_plus_exit_ns;
 	uint32_t pstate_change_ns;
@@ -40,7 +40,7 @@ struct cstate_pstate_watermarks_st {
 struct dcn_watermarks {
 	uint32_t pte_meta_urgent_ns;
 	uint32_t urgent_ns;
-	struct cstate_pstate_watermarks_st cstate_pstate;
+	struct cstate_pstate_watermarks_st1 cstate_pstate;
 };
 
 struct dcn_watermark_set {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
index 26f29d5..e9b702c 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
@@ -32,8 +32,6 @@
 #include "inc/hw/link_encoder.h"
 #include "core_status.h"
 
-#define EDP_BACKLIGHT_RAMP_DISABLE_LEVEL 0xFFFFFFFF
-
 enum pipe_gating_control {
 	PIPE_GATING_CONTROL_DISABLE = 0,
 	PIPE_GATING_CONTROL_ENABLE,
@@ -87,11 +85,6 @@ struct hw_sequencer_funcs {
 	void (*program_gamut_remap)(
 			struct pipe_ctx *pipe_ctx);
 
-	void (*program_csc_matrix)(
-			struct pipe_ctx *pipe_ctx,
-			enum dc_color_space colorspace,
-			uint16_t *matrix);
-
 	void (*program_output_csc)(struct dc *dc,
 			struct pipe_ctx *pipe_ctx,
 			enum dc_color_space colorspace,
@@ -177,10 +170,12 @@ struct hw_sequencer_funcs {
 			struct pipe_ctx *pipe_ctx,
 			bool blank);
 
-	void (*set_bandwidth)(
+	void (*prepare_bandwidth)(
 			struct dc *dc,
-			struct dc_state *context,
-			bool safe_to_lower);
+			struct dc_state *context);
+	void (*optimize_bandwidth)(
+			struct dc *dc,
+			struct dc_state *context);
 
 	void (*set_drr)(struct pipe_ctx **pipe_ctx, int num_pipes,
 			int vmin, int vmax);
@@ -210,11 +205,6 @@ struct hw_sequencer_funcs {
 			struct resource_pool *res_pool,
 			struct pipe_ctx *pipe_ctx);
 
-	void (*ready_shared_resources)(struct dc *dc, struct dc_state *context);
-	void (*optimize_shared_resources)(struct dc *dc);
-	void (*pplib_apply_display_requirements)(
-			struct dc *dc,
-			struct dc_state *context);
 	void (*edp_power_control)(
 			struct dc_link *link,
 			bool enable);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/resource.h b/drivers/gpu/drm/amd/display/dc/inc/resource.h
index 33b99e3..0086a2f 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/resource.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/resource.h
@@ -30,9 +30,6 @@
 #include "dal_asic_id.h"
 #include "dm_pp_smu.h"
 
-/* TODO unhardcode, 4 for CZ*/
-#define MEMORY_TYPE_MULTIPLIER 4
-
 enum dce_version resource_parse_asic_id(
 		struct hw_asic_id asic_id);
 
diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
index cdcefd0..7480f07 100644
--- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
+++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
@@ -306,6 +306,18 @@ static struct fixed31_32 translate_from_linear_space(
 			a1);
 }
 
+static struct fixed31_32 calculate_gamma22(struct fixed31_32 arg)
+{
+	struct fixed31_32 gamma = dc_fixpt_from_fraction(22, 10);
+
+	return translate_from_linear_space(arg,
+			dc_fixpt_zero,
+			dc_fixpt_zero,
+			dc_fixpt_zero,
+			dc_fixpt_zero,
+			gamma);
+}
+
 static struct fixed31_32 translate_to_linear_space(
 	struct fixed31_32 arg,
 	struct fixed31_32 a0,
@@ -709,6 +721,169 @@ static void build_regamma(struct pwl_float_data_ex *rgb_regamma,
 	}
 }
 
+static void hermite_spline_eetf(struct fixed31_32 input_x,
+				struct fixed31_32 max_display,
+				struct fixed31_32 min_display,
+				struct fixed31_32 max_content,
+				struct fixed31_32 *out_x)
+{
+	struct fixed31_32 min_lum_pq;
+	struct fixed31_32 max_lum_pq;
+	struct fixed31_32 max_content_pq;
+	struct fixed31_32 ks;
+	struct fixed31_32 E1;
+	struct fixed31_32 E2;
+	struct fixed31_32 E3;
+	struct fixed31_32 t;
+	struct fixed31_32 t2;
+	struct fixed31_32 t3;
+	struct fixed31_32 two;
+	struct fixed31_32 three;
+	struct fixed31_32 temp1;
+	struct fixed31_32 temp2;
+	struct fixed31_32 a = dc_fixpt_from_fraction(15, 10);
+	struct fixed31_32 b = dc_fixpt_from_fraction(5, 10);
+	struct fixed31_32 epsilon = dc_fixpt_from_fraction(1, 1000000); // dc_fixpt_epsilon is a bit too small
+
+	if (dc_fixpt_eq(max_content, dc_fixpt_zero)) {
+		*out_x = dc_fixpt_zero;
+		return;
+	}
+
+	compute_pq(input_x, &E1);
+	compute_pq(dc_fixpt_div(min_display, max_content), &min_lum_pq);
+	compute_pq(dc_fixpt_div(max_display, max_content), &max_lum_pq);
+	compute_pq(dc_fixpt_one, &max_content_pq); // always 1? DAL2 code is weird
+	a = dc_fixpt_div(dc_fixpt_add(dc_fixpt_one, b), max_content_pq); // (1+b)/maxContent
+	ks = dc_fixpt_sub(dc_fixpt_mul(a, max_lum_pq), b); // a * max_lum_pq - b
+
+	if (dc_fixpt_lt(E1, ks))
+		E2 = E1;
+	else if (dc_fixpt_le(ks, E1) && dc_fixpt_le(E1, dc_fixpt_one)) {
+		if (dc_fixpt_lt(epsilon, dc_fixpt_sub(dc_fixpt_one, ks)))
+			// t = (E1 - ks) / (1 - ks)
+			t = dc_fixpt_div(dc_fixpt_sub(E1, ks),
+					dc_fixpt_sub(dc_fixpt_one, ks));
+		else
+			t = dc_fixpt_zero;
+
+		two = dc_fixpt_from_int(2);
+		three = dc_fixpt_from_int(3);
+
+		t2 = dc_fixpt_mul(t, t);
+		t3 = dc_fixpt_mul(t2, t);
+		temp1 = dc_fixpt_mul(two, t3);
+		temp2 = dc_fixpt_mul(three, t2);
+
+		// (2t^3 - 3t^2 + 1) * ks
+		E2 = dc_fixpt_mul(ks, dc_fixpt_add(dc_fixpt_one,
+				dc_fixpt_sub(temp1, temp2)));
+
+		// (-2t^3 + 3t^2) * max_lum_pq
+		E2 = dc_fixpt_add(E2, dc_fixpt_mul(max_lum_pq,
+				dc_fixpt_sub(temp2, temp1)));
+
+		temp1 = dc_fixpt_mul(two, t2);
+		temp2 = dc_fixpt_sub(dc_fixpt_one, ks);
+
+		// (t^3 - 2t^2 + t) * (1-ks)
+		E2 = dc_fixpt_add(E2, dc_fixpt_mul(temp2,
+				dc_fixpt_add(t, dc_fixpt_sub(t3, temp1))));
+	} else
+		E2 = dc_fixpt_one;
+
+	temp1 = dc_fixpt_sub(dc_fixpt_one, E2);
+	temp2 = dc_fixpt_mul(temp1, temp1);
+	temp2 = dc_fixpt_mul(temp2, temp2);
+	// temp2 = (1-E2)^4
+
+	E3 =  dc_fixpt_add(E2, dc_fixpt_mul(min_lum_pq, temp2));
+	compute_de_pq(E3, out_x);
+
+	*out_x = dc_fixpt_div(*out_x, dc_fixpt_div(max_display, max_content));
+}
+
+static bool build_freesync_hdr(struct pwl_float_data_ex *rgb_regamma,
+		uint32_t hw_points_num,
+		const struct hw_x_point *coordinate_x,
+		const struct freesync_hdr_tf_params *fs_params)
+{
+	uint32_t i;
+	struct pwl_float_data_ex *rgb = rgb_regamma;
+	const struct hw_x_point *coord_x = coordinate_x;
+	struct fixed31_32 scaledX = dc_fixpt_zero;
+	struct fixed31_32 scaledX1 = dc_fixpt_zero;
+	struct fixed31_32 max_display = dc_fixpt_from_int(fs_params->max_display);
+	struct fixed31_32 min_display = dc_fixpt_from_fraction(fs_params->min_display, 10000);
+	struct fixed31_32 max_content = dc_fixpt_from_int(fs_params->max_content);
+	struct fixed31_32 min_content = dc_fixpt_from_fraction(fs_params->min_content, 10000);
+	struct fixed31_32 clip = dc_fixpt_one;
+	struct fixed31_32 output;
+	bool use_eetf = false;
+	bool is_clipped = false;
+	struct fixed31_32 sdr_white_level = dc_fixpt_from_int(fs_params->sdr_white_level);
+
+	if (fs_params == NULL || fs_params->max_content == 0 ||
+			fs_params->max_display == 0)
+		return false;
+
+	if (fs_params->min_display > 1000) // cap at 0.1 at the bottom
+		min_display = dc_fixpt_from_fraction(1, 10);
+	if (fs_params->max_display < 100) // cap at 100 at the top
+		max_display = dc_fixpt_from_int(100);
+
+	if (fs_params->min_content < fs_params->min_display)
+		use_eetf = true;
+	else
+		min_content = min_display;
+
+	if (fs_params->max_content > fs_params->max_display)
+		use_eetf = true;
+	else
+		max_content = max_display;
+
+	rgb += 32; // first 32 points have problems with fixed point, too small
+	coord_x += 32;
+	for (i = 32; i <= hw_points_num; i++) {
+		if (!is_clipped) {
+			if (use_eetf) {
+				/*max content is equal 1 */
+				scaledX1 = dc_fixpt_div(coord_x->x,
+						dc_fixpt_div(max_content, sdr_white_level));
+				hermite_spline_eetf(scaledX1, max_display, min_display,
+						max_content, &scaledX);
+			} else
+				scaledX = dc_fixpt_div(coord_x->x,
+						dc_fixpt_div(max_display, sdr_white_level));
+
+			if (dc_fixpt_lt(scaledX, clip)) {
+				if (dc_fixpt_lt(scaledX, dc_fixpt_zero))
+					output = dc_fixpt_zero;
+				else
+					output = calculate_gamma22(scaledX);
+
+				rgb->r = output;
+				rgb->g = output;
+				rgb->b = output;
+			} else {
+				is_clipped = true;
+				rgb->r = clip;
+				rgb->g = clip;
+				rgb->b = clip;
+			}
+		} else {
+			rgb->r = clip;
+			rgb->g = clip;
+			rgb->b = clip;
+		}
+
+		++coord_x;
+		++rgb;
+	}
+
+	return true;
+}
+
 static void build_degamma(struct pwl_float_data_ex *curve,
 		uint32_t hw_points_num,
 		const struct hw_x_point *coordinate_x, bool is_2_4)
@@ -1356,7 +1531,8 @@ static bool map_regamma_hw_to_x_user(
 #define _EXTRA_POINTS 3
 
 bool mod_color_calculate_regamma_params(struct dc_transfer_func *output_tf,
-		const struct dc_gamma *ramp, bool mapUserRamp, bool canRomBeUsed)
+		const struct dc_gamma *ramp, bool mapUserRamp, bool canRomBeUsed,
+		const struct freesync_hdr_tf_params *fs_params)
 {
 	struct dc_transfer_func_distributed_points *tf_pts = &output_tf->tf_pts;
 	struct dividers dividers;
@@ -1374,7 +1550,7 @@ bool mod_color_calculate_regamma_params(struct dc_transfer_func *output_tf,
 	/* we can use hardcoded curve for plain SRGB TF */
 	if (output_tf->type == TF_TYPE_PREDEFINED && canRomBeUsed == true &&
 			output_tf->tf == TRANSFER_FUNCTION_SRGB &&
-			(!mapUserRamp && ramp->type == GAMMA_RGB_256))
+			(ramp->is_identity || (!mapUserRamp && ramp->type == GAMMA_RGB_256)))
 		return true;
 
 	output_tf->type = TF_TYPE_DISTRIBUTED_POINTS;
@@ -1424,6 +1600,12 @@ bool mod_color_calculate_regamma_params(struct dc_transfer_func *output_tf,
 				MAX_HW_POINTS,
 				coordinates_x,
 				output_tf->sdr_ref_white_level);
+	} else if (tf == TRANSFER_FUNCTION_GAMMA22 &&
+			fs_params != NULL) {
+		build_freesync_hdr(rgb_regamma,
+				MAX_HW_POINTS,
+				coordinates_x,
+				fs_params);
 	} else {
 		tf_pts->end_exponent = 0;
 		tf_pts->x_point_at_y1_red = 1;
diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.h b/drivers/gpu/drm/amd/display/modules/color/color_gamma.h
index 63ccb9c..a6e164d 100644
--- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.h
+++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.h
@@ -73,12 +73,21 @@ struct regamma_lut {
 	};
 };
 
+struct freesync_hdr_tf_params {
+	unsigned int sdr_white_level;
+	unsigned int min_content; // luminance in 1/10000 nits
+	unsigned int max_content; // luminance in nits
+	unsigned int min_display; // luminance in 1/10000 nits
+	unsigned int max_display; // luminance in nits
+};
+
 void setup_x_points_distribution(void);
 void precompute_pq(void);
 void precompute_de_pq(void);
 
 bool mod_color_calculate_regamma_params(struct dc_transfer_func *output_tf,
-		const struct dc_gamma *ramp, bool mapUserRamp, bool canRomBeUsed);
+		const struct dc_gamma *ramp, bool mapUserRamp, bool canRomBeUsed,
+		const struct freesync_hdr_tf_params *fs_params);
 
 bool mod_color_calculate_degamma_params(struct dc_transfer_func *output_tf,
 		const struct dc_gamma *ramp, bool mapUserRamp);
diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
index 4018c71..620a171 100644
--- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
+++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
@@ -37,6 +37,8 @@
 #define RENDER_TIMES_MAX_COUNT 10
 /* Threshold to exit BTR (to avoid frequent enter-exits at the lower limit) */
 #define BTR_EXIT_MARGIN 2000
+/*Threshold to exit fixed refresh rate*/
+#define FIXED_REFRESH_EXIT_MARGIN_IN_HZ 4
 /* Number of consecutive frames to check before entering/exiting fixed refresh*/
 #define FIXED_REFRESH_ENTER_FRAME_COUNT 5
 #define FIXED_REFRESH_EXIT_FRAME_COUNT 5
@@ -257,40 +259,14 @@ static void apply_below_the_range(struct core_freesync *core_freesync,
 		if (in_out_vrr->btr.btr_active) {
 			in_out_vrr->btr.frame_counter = 0;
 			in_out_vrr->btr.btr_active = false;
-
-		/* Exit Fixed Refresh mode */
-		} else if (in_out_vrr->fixed.fixed_active) {
-
-			in_out_vrr->fixed.frame_counter++;
-
-			if (in_out_vrr->fixed.frame_counter >
-					FIXED_REFRESH_EXIT_FRAME_COUNT) {
-				in_out_vrr->fixed.frame_counter = 0;
-				in_out_vrr->fixed.fixed_active = false;
-			}
 		}
 	} else if (last_render_time_in_us > max_render_time_in_us) {
 		/* Enter Below the Range */
-		if (!in_out_vrr->btr.btr_active &&
-				in_out_vrr->btr.btr_enabled) {
-			in_out_vrr->btr.btr_active = true;
-
-		/* Enter Fixed Refresh mode */
-		} else if (!in_out_vrr->fixed.fixed_active &&
-				!in_out_vrr->btr.btr_enabled) {
-			in_out_vrr->fixed.frame_counter++;
-
-			if (in_out_vrr->fixed.frame_counter >
-					FIXED_REFRESH_ENTER_FRAME_COUNT) {
-				in_out_vrr->fixed.frame_counter = 0;
-				in_out_vrr->fixed.fixed_active = true;
-			}
-		}
+		in_out_vrr->btr.btr_active = true;
 	}
 
 	/* BTR set to "not active" so disengage */
 	if (!in_out_vrr->btr.btr_active) {
-		in_out_vrr->btr.btr_active = false;
 		in_out_vrr->btr.inserted_duration_in_us = 0;
 		in_out_vrr->btr.frames_to_insert = 0;
 		in_out_vrr->btr.frame_counter = 0;
@@ -375,7 +351,12 @@ static void apply_fixed_refresh(struct core_freesync *core_freesync,
 	bool update = false;
 	unsigned int max_render_time_in_us = in_out_vrr->max_duration_in_us;
 
-	if (last_render_time_in_us + BTR_EXIT_MARGIN < max_render_time_in_us) {
+	//Compute the exit refresh rate and exit frame duration
+	unsigned int exit_refresh_rate_in_milli_hz = ((1000000000/max_render_time_in_us)
+			+ (1000*FIXED_REFRESH_EXIT_MARGIN_IN_HZ));
+	unsigned int exit_frame_duration_in_us = 1000000000/exit_refresh_rate_in_milli_hz;
+
+	if (last_render_time_in_us < exit_frame_duration_in_us) {
 		/* Exit Fixed Refresh mode */
 		if (in_out_vrr->fixed.fixed_active) {
 			in_out_vrr->fixed.frame_counter++;
diff --git a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
index 64ecffd..58ac0b9 100644
--- a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
@@ -205,20 +205,6 @@ struct tile_config {
 /**
  * struct kfd2kgd_calls
  *
- * @init_gtt_mem_allocation: Allocate a buffer on the gart aperture.
- * The buffer can be used for mqds, hpds, kernel queue, fence and runlists
- *
- * @free_gtt_mem: Frees a buffer that was allocated on the gart aperture
- *
- * @get_local_mem_info: Retrieves information about GPU local memory
- *
- * @get_gpu_clock_counter: Retrieves GPU clock counter
- *
- * @get_max_engine_clock_in_mhz: Retrieves maximum GPU clock in MHz
- *
- * @alloc_pasid: Allocate a PASID
- * @free_pasid: Free a PASID
- *
  * @program_sh_mem_settings: A function that should initiate the memory
  * properties such as main aperture memory type (cache / non cached) and
  * secondary aperture base address, size and memory type.
@@ -255,64 +241,16 @@ struct tile_config {
  *
  * @get_tile_config: Returns GPU-specific tiling mode information
  *
- * @get_cu_info: Retrieves activated cu info
- *
- * @get_vram_usage: Returns current VRAM usage
- *
- * @create_process_vm: Create a VM address space for a given process and GPU
- *
- * @destroy_process_vm: Destroy a VM
- *
- * @get_process_page_dir: Get physical address of a VM page directory
- *
  * @set_vm_context_page_table_base: Program page table base for a VMID
  *
- * @alloc_memory_of_gpu: Allocate GPUVM memory
- *
- * @free_memory_of_gpu: Free GPUVM memory
- *
- * @map_memory_to_gpu: Map GPUVM memory into a specific VM address
- * space. Allocates and updates page tables and page directories as
- * needed. This function may return before all page table updates have
- * completed. This allows multiple map operations (on multiple GPUs)
- * to happen concurrently. Use sync_memory to synchronize with all
- * pending updates.
- *
- * @unmap_memor_to_gpu: Unmap GPUVM memory from a specific VM address space
- *
- * @sync_memory: Wait for pending page table updates to complete
- *
- * @map_gtt_bo_to_kernel: Map a GTT BO for kernel access
- * Pins the BO, maps it to kernel address space. Such BOs are never evicted.
- * The kernel virtual address remains valid until the BO is freed.
- *
- * @restore_process_bos: Restore all BOs that belong to the
- * process. This is intended for restoring memory mappings after a TTM
- * eviction.
- *
  * @invalidate_tlbs: Invalidate TLBs for a specific PASID
  *
  * @invalidate_tlbs_vmid: Invalidate TLBs for a specific VMID
  *
- * @submit_ib: Submits an IB to the engine specified by inserting the
- * IB to the corresponding ring (ring type). The IB is executed with the
- * specified VMID in a user mode context.
- *
- * @get_vm_fault_info: Return information about a recent VM fault on
- * GFXv7 and v8. If multiple VM faults occurred since the last call of
- * this function, it will return information about the first of those
- * faults. On GFXv9 VM fault information is fully contained in the IH
- * packet and this function is not needed.
- *
  * @read_vmid_from_vmfault_reg: On Hawaii the VMID is not set in the
  * IH ring entry. This function allows the KFD ISR to get the VMID
  * from the fault status register as early as possible.
  *
- * @gpu_recover: let kgd reset gpu after kfd detect CPC hang
- *
- * @set_compute_idle: Indicates that compute is idle on a device. This
- * can be used to change power profiles depending on compute activity.
- *
  * @get_hive_id: Returns hive id of current  device,  0 if xgmi is not enabled
  *
  * This structure contains function pointers to services that the kgd driver
@@ -320,21 +258,6 @@ struct tile_config {
  *
  */
 struct kfd2kgd_calls {
-	int (*init_gtt_mem_allocation)(struct kgd_dev *kgd, size_t size,
-					void **mem_obj, uint64_t *gpu_addr,
-					void **cpu_ptr, bool mqd_gfx9);
-
-	void (*free_gtt_mem)(struct kgd_dev *kgd, void *mem_obj);
-
-	void (*get_local_mem_info)(struct kgd_dev *kgd,
-			struct kfd_local_mem_info *mem_info);
-	uint64_t (*get_gpu_clock_counter)(struct kgd_dev *kgd);
-
-	uint32_t (*get_max_engine_clock_in_mhz)(struct kgd_dev *kgd);
-
-	int (*alloc_pasid)(unsigned int bits);
-	void (*free_pasid)(unsigned int pasid);
-
 	/* Register access functions */
 	void (*program_sh_mem_settings)(struct kgd_dev *kgd, uint32_t vmid,
 			uint32_t sh_mem_config,	uint32_t sh_mem_ape1_base,
@@ -398,49 +321,11 @@ struct kfd2kgd_calls {
 				uint64_t va, uint32_t vmid);
 	int (*get_tile_config)(struct kgd_dev *kgd, struct tile_config *config);
 
-	void (*get_cu_info)(struct kgd_dev *kgd,
-			struct kfd_cu_info *cu_info);
-	uint64_t (*get_vram_usage)(struct kgd_dev *kgd);
-
-	int (*create_process_vm)(struct kgd_dev *kgd, unsigned int pasid, void **vm,
-			void **process_info, struct dma_fence **ef);
-	int (*acquire_process_vm)(struct kgd_dev *kgd, struct file *filp,
-			unsigned int pasid, void **vm, void **process_info,
-			struct dma_fence **ef);
-	void (*destroy_process_vm)(struct kgd_dev *kgd, void *vm);
-	void (*release_process_vm)(struct kgd_dev *kgd, void *vm);
-	uint64_t (*get_process_page_dir)(void *vm);
 	void (*set_vm_context_page_table_base)(struct kgd_dev *kgd,
 			uint32_t vmid, uint64_t page_table_base);
-	int (*alloc_memory_of_gpu)(struct kgd_dev *kgd, uint64_t va,
-			uint64_t size, void *vm,
-			struct kgd_mem **mem, uint64_t *offset,
-			uint32_t flags);
-	int (*free_memory_of_gpu)(struct kgd_dev *kgd, struct kgd_mem *mem);
-	int (*map_memory_to_gpu)(struct kgd_dev *kgd, struct kgd_mem *mem,
-			void *vm);
-	int (*unmap_memory_to_gpu)(struct kgd_dev *kgd, struct kgd_mem *mem,
-			void *vm);
-	int (*sync_memory)(struct kgd_dev *kgd, struct kgd_mem *mem, bool intr);
-	int (*map_gtt_bo_to_kernel)(struct kgd_dev *kgd, struct kgd_mem *mem,
-			void **kptr, uint64_t *size);
-	int (*restore_process_bos)(void *process_info, struct dma_fence **ef);
-
 	int (*invalidate_tlbs)(struct kgd_dev *kgd, uint16_t pasid);
 	int (*invalidate_tlbs_vmid)(struct kgd_dev *kgd, uint16_t vmid);
-
-	int (*submit_ib)(struct kgd_dev *kgd, enum kgd_engine_type engine,
-			uint32_t vmid, uint64_t gpu_addr,
-			uint32_t *ib_cmd, uint32_t ib_len);
-
-	int (*get_vm_fault_info)(struct kgd_dev *kgd,
-			struct kfd_vm_fault_info *info);
 	uint32_t (*read_vmid_from_vmfault_reg)(struct kgd_dev *kgd);
-
-	void (*gpu_recover)(struct kgd_dev *kgd);
-
-	void (*set_compute_idle)(struct kgd_dev *kgd, bool idle);
-
 	uint64_t (*get_hive_id)(struct kgd_dev *kgd);
 
 };
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c
index 5e19f59..d138ddae 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c
@@ -967,7 +967,7 @@ int smu7_enable_didt_config(struct pp_hwmgr *hwmgr)
 	    PP_CAP(PHM_PlatformCaps_TDRamping) ||
 	    PP_CAP(PHM_PlatformCaps_TCPRamping)) {
 
-		adev->gfx.rlc.funcs->enter_safe_mode(adev);
+		amdgpu_gfx_rlc_enter_safe_mode(adev);
 		mutex_lock(&adev->grbm_idx_mutex);
 		value = 0;
 		value2 = cgs_read_register(hwmgr->device, mmGRBM_GFX_INDEX);
@@ -1014,13 +1014,13 @@ int smu7_enable_didt_config(struct pp_hwmgr *hwmgr)
 					"Failed to enable DPM DIDT.", goto error);
 		}
 		mutex_unlock(&adev->grbm_idx_mutex);
-		adev->gfx.rlc.funcs->exit_safe_mode(adev);
+		amdgpu_gfx_rlc_exit_safe_mode(adev);
 	}
 
 	return 0;
 error:
 	mutex_unlock(&adev->grbm_idx_mutex);
-	adev->gfx.rlc.funcs->exit_safe_mode(adev);
+	amdgpu_gfx_rlc_exit_safe_mode(adev);
 	return result;
 }
 
@@ -1034,7 +1034,7 @@ int smu7_disable_didt_config(struct pp_hwmgr *hwmgr)
 	    PP_CAP(PHM_PlatformCaps_TDRamping) ||
 	    PP_CAP(PHM_PlatformCaps_TCPRamping)) {
 
-		adev->gfx.rlc.funcs->enter_safe_mode(adev);
+		amdgpu_gfx_rlc_enter_safe_mode(adev);
 
 		result = smu7_enable_didt(hwmgr, false);
 		PP_ASSERT_WITH_CODE((result == 0),
@@ -1046,12 +1046,12 @@ int smu7_disable_didt_config(struct pp_hwmgr *hwmgr)
 			PP_ASSERT_WITH_CODE((0 == result),
 					"Failed to disable DPM DIDT.", goto error);
 		}
-		adev->gfx.rlc.funcs->exit_safe_mode(adev);
+		amdgpu_gfx_rlc_exit_safe_mode(adev);
 	}
 
 	return 0;
 error:
-	adev->gfx.rlc.funcs->exit_safe_mode(adev);
+	amdgpu_gfx_rlc_exit_safe_mode(adev);
 	return result;
 }
 
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c
index 2d88abf..6f26cb2 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c
@@ -937,7 +937,7 @@ static int vega10_enable_cac_driving_se_didt_config(struct pp_hwmgr *hwmgr)
 
 	num_se = adev->gfx.config.max_shader_engines;
 
-	adev->gfx.rlc.funcs->enter_safe_mode(adev);
+	amdgpu_gfx_rlc_enter_safe_mode(adev);
 
 	mutex_lock(&adev->grbm_idx_mutex);
 	for (count = 0; count < num_se; count++) {
@@ -962,7 +962,7 @@ static int vega10_enable_cac_driving_se_didt_config(struct pp_hwmgr *hwmgr)
 
 	vega10_didt_set_mask(hwmgr, true);
 
-	adev->gfx.rlc.funcs->exit_safe_mode(adev);
+	amdgpu_gfx_rlc_exit_safe_mode(adev);
 
 	return 0;
 }
@@ -971,11 +971,11 @@ static int vega10_disable_cac_driving_se_didt_config(struct pp_hwmgr *hwmgr)
 {
 	struct amdgpu_device *adev = hwmgr->adev;
 
-	adev->gfx.rlc.funcs->enter_safe_mode(adev);
+	amdgpu_gfx_rlc_enter_safe_mode(adev);
 
 	vega10_didt_set_mask(hwmgr, false);
 
-	adev->gfx.rlc.funcs->exit_safe_mode(adev);
+	amdgpu_gfx_rlc_exit_safe_mode(adev);
 
 	return 0;
 }
@@ -988,7 +988,7 @@ static int vega10_enable_psm_gc_didt_config(struct pp_hwmgr *hwmgr)
 
 	num_se = adev->gfx.config.max_shader_engines;
 
-	adev->gfx.rlc.funcs->enter_safe_mode(adev);
+	amdgpu_gfx_rlc_enter_safe_mode(adev);
 
 	mutex_lock(&adev->grbm_idx_mutex);
 	for (count = 0; count < num_se; count++) {
@@ -1007,7 +1007,7 @@ static int vega10_enable_psm_gc_didt_config(struct pp_hwmgr *hwmgr)
 
 	vega10_didt_set_mask(hwmgr, true);
 
-	adev->gfx.rlc.funcs->exit_safe_mode(adev);
+	amdgpu_gfx_rlc_exit_safe_mode(adev);
 
 	vega10_program_gc_didt_config_registers(hwmgr, GCDiDtDroopCtrlConfig_vega10);
 	if (PP_CAP(PHM_PlatformCaps_GCEDC))
@@ -1024,11 +1024,11 @@ static int vega10_disable_psm_gc_didt_config(struct pp_hwmgr *hwmgr)
 	struct amdgpu_device *adev = hwmgr->adev;
 	uint32_t data;
 
-	adev->gfx.rlc.funcs->enter_safe_mode(adev);
+	amdgpu_gfx_rlc_enter_safe_mode(adev);
 
 	vega10_didt_set_mask(hwmgr, false);
 
-	adev->gfx.rlc.funcs->exit_safe_mode(adev);
+	amdgpu_gfx_rlc_exit_safe_mode(adev);
 
 	if (PP_CAP(PHM_PlatformCaps_GCEDC)) {
 		data = 0x00000000;
@@ -1049,7 +1049,7 @@ static int vega10_enable_se_edc_config(struct pp_hwmgr *hwmgr)
 
 	num_se = adev->gfx.config.max_shader_engines;
 
-	adev->gfx.rlc.funcs->enter_safe_mode(adev);
+	amdgpu_gfx_rlc_enter_safe_mode(adev);
 
 	mutex_lock(&adev->grbm_idx_mutex);
 	for (count = 0; count < num_se; count++) {
@@ -1070,7 +1070,7 @@ static int vega10_enable_se_edc_config(struct pp_hwmgr *hwmgr)
 
 	vega10_didt_set_mask(hwmgr, true);
 
-	adev->gfx.rlc.funcs->exit_safe_mode(adev);
+	amdgpu_gfx_rlc_exit_safe_mode(adev);
 
 	return 0;
 }
@@ -1079,11 +1079,11 @@ static int vega10_disable_se_edc_config(struct pp_hwmgr *hwmgr)
 {
 	struct amdgpu_device *adev = hwmgr->adev;
 
-	adev->gfx.rlc.funcs->enter_safe_mode(adev);
+	amdgpu_gfx_rlc_enter_safe_mode(adev);
 
 	vega10_didt_set_mask(hwmgr, false);
 
-	adev->gfx.rlc.funcs->exit_safe_mode(adev);
+	amdgpu_gfx_rlc_exit_safe_mode(adev);
 
 	return 0;
 }
@@ -1097,7 +1097,7 @@ static int vega10_enable_psm_gc_edc_config(struct pp_hwmgr *hwmgr)
 
 	num_se = adev->gfx.config.max_shader_engines;
 
-	adev->gfx.rlc.funcs->enter_safe_mode(adev);
+	amdgpu_gfx_rlc_enter_safe_mode(adev);
 
 	vega10_program_gc_didt_config_registers(hwmgr, AvfsPSMResetConfig_vega10);
 
@@ -1118,7 +1118,7 @@ static int vega10_enable_psm_gc_edc_config(struct pp_hwmgr *hwmgr)
 
 	vega10_didt_set_mask(hwmgr, true);
 
-	adev->gfx.rlc.funcs->exit_safe_mode(adev);
+	amdgpu_gfx_rlc_exit_safe_mode(adev);
 
 	vega10_program_gc_didt_config_registers(hwmgr, PSMGCEDCDroopCtrlConfig_vega10);
 
@@ -1138,11 +1138,11 @@ static int vega10_disable_psm_gc_edc_config(struct pp_hwmgr *hwmgr)
 	struct amdgpu_device *adev = hwmgr->adev;
 	uint32_t data;
 
-	adev->gfx.rlc.funcs->enter_safe_mode(adev);
+	amdgpu_gfx_rlc_enter_safe_mode(adev);
 
 	vega10_didt_set_mask(hwmgr, false);
 
-	adev->gfx.rlc.funcs->exit_safe_mode(adev);
+	amdgpu_gfx_rlc_exit_safe_mode(adev);
 
 	if (PP_CAP(PHM_PlatformCaps_GCEDC)) {
 		data = 0x00000000;
@@ -1160,7 +1160,7 @@ static int vega10_enable_se_edc_force_stall_config(struct pp_hwmgr *hwmgr)
 	struct amdgpu_device *adev = hwmgr->adev;
 	int result;
 
-	adev->gfx.rlc.funcs->enter_safe_mode(adev);
+	amdgpu_gfx_rlc_enter_safe_mode(adev);
 
 	mutex_lock(&adev->grbm_idx_mutex);
 	WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, 0xE0000000);
@@ -1173,7 +1173,7 @@ static int vega10_enable_se_edc_force_stall_config(struct pp_hwmgr *hwmgr)
 
 	vega10_didt_set_mask(hwmgr, false);
 
-	adev->gfx.rlc.funcs->exit_safe_mode(adev);
+	amdgpu_gfx_rlc_exit_safe_mode(adev);
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
index 99861f3..f2daf00 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
@@ -2755,7 +2755,7 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
 		for (i = 0; i < clocks.num_levels; i++)
 			size += sprintf(buf + size, "%d: %uMhz %s\n",
 				i, clocks.data[i].clocks_in_khz / 1000,
-				(clocks.data[i].clocks_in_khz == now) ? "*" : "");
+				(clocks.data[i].clocks_in_khz == now * 10) ? "*" : "");
 		break;
 
 	case PP_MCLK:
@@ -2772,7 +2772,7 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
 		for (i = 0; i < clocks.num_levels; i++)
 			size += sprintf(buf + size, "%d: %uMhz %s\n",
 				i, clocks.data[i].clocks_in_khz / 1000,
-				(clocks.data[i].clocks_in_khz == now) ? "*" : "");
+				(clocks.data[i].clocks_in_khz == now * 10) ? "*" : "");
 		break;
 
 	case PP_PCIE:
@@ -3454,109 +3454,64 @@ static int vega20_get_thermal_temperature_range(struct pp_hwmgr *hwmgr,
 
 static const struct pp_hwmgr_func vega20_hwmgr_funcs = {
 	/* init/fini related */
-	.backend_init =
-		vega20_hwmgr_backend_init,
-	.backend_fini =
-		vega20_hwmgr_backend_fini,
-	.asic_setup =
-		vega20_setup_asic_task,
-	.power_off_asic =
-		vega20_power_off_asic,
-	.dynamic_state_management_enable =
-		vega20_enable_dpm_tasks,
-	.dynamic_state_management_disable =
-		vega20_disable_dpm_tasks,
+	.backend_init = vega20_hwmgr_backend_init,
+	.backend_fini = vega20_hwmgr_backend_fini,
+	.asic_setup = vega20_setup_asic_task,
+	.power_off_asic = vega20_power_off_asic,
+	.dynamic_state_management_enable = vega20_enable_dpm_tasks,
+	.dynamic_state_management_disable = vega20_disable_dpm_tasks,
 	/* power state related */
-	.apply_clocks_adjust_rules =
-		vega20_apply_clocks_adjust_rules,
-	.pre_display_config_changed =
-		vega20_pre_display_configuration_changed_task,
-	.display_config_changed =
-		vega20_display_configuration_changed_task,
+	.apply_clocks_adjust_rules = vega20_apply_clocks_adjust_rules,
+	.pre_display_config_changed = vega20_pre_display_configuration_changed_task,
+	.display_config_changed = vega20_display_configuration_changed_task,
 	.check_smc_update_required_for_display_configuration =
 		vega20_check_smc_update_required_for_display_configuration,
 	.notify_smc_display_config_after_ps_adjustment =
 		vega20_notify_smc_display_config_after_ps_adjustment,
 	/* export to DAL */
-	.get_sclk =
-		vega20_dpm_get_sclk,
-	.get_mclk =
-		vega20_dpm_get_mclk,
-	.get_dal_power_level =
-		vega20_get_dal_power_level,
-	.get_clock_by_type_with_latency =
-		vega20_get_clock_by_type_with_latency,
-	.get_clock_by_type_with_voltage =
-		vega20_get_clock_by_type_with_voltage,
-	.set_watermarks_for_clocks_ranges =
-		vega20_set_watermarks_for_clocks_ranges,
-	.display_clock_voltage_request =
-		vega20_display_clock_voltage_request,
-	.get_performance_level =
-		vega20_get_performance_level,
+	.get_sclk = vega20_dpm_get_sclk,
+	.get_mclk = vega20_dpm_get_mclk,
+	.get_dal_power_level = vega20_get_dal_power_level,
+	.get_clock_by_type_with_latency = vega20_get_clock_by_type_with_latency,
+	.get_clock_by_type_with_voltage = vega20_get_clock_by_type_with_voltage,
+	.set_watermarks_for_clocks_ranges = vega20_set_watermarks_for_clocks_ranges,
+	.display_clock_voltage_request = vega20_display_clock_voltage_request,
+	.get_performance_level = vega20_get_performance_level,
 	/* UMD pstate, profile related */
-	.force_dpm_level =
-		vega20_dpm_force_dpm_level,
-	.get_power_profile_mode =
-		vega20_get_power_profile_mode,
-	.set_power_profile_mode =
-		vega20_set_power_profile_mode,
+	.force_dpm_level = vega20_dpm_force_dpm_level,
+	.get_power_profile_mode = vega20_get_power_profile_mode,
+	.set_power_profile_mode = vega20_set_power_profile_mode,
 	/* od related */
-	.set_power_limit =
-		vega20_set_power_limit,
-	.get_sclk_od =
-		vega20_get_sclk_od,
-	.set_sclk_od =
-		vega20_set_sclk_od,
-	.get_mclk_od =
-		vega20_get_mclk_od,
-	.set_mclk_od =
-		vega20_set_mclk_od,
-	.odn_edit_dpm_table =
-		vega20_odn_edit_dpm_table,
+	.set_power_limit = vega20_set_power_limit,
+	.get_sclk_od = vega20_get_sclk_od,
+	.set_sclk_od = vega20_set_sclk_od,
+	.get_mclk_od = vega20_get_mclk_od,
+	.set_mclk_od = vega20_set_mclk_od,
+	.odn_edit_dpm_table = vega20_odn_edit_dpm_table,
 	/* for sysfs to retrive/set gfxclk/memclk */
-	.force_clock_level =
-		vega20_force_clock_level,
-	.print_clock_levels =
-		vega20_print_clock_levels,
-	.read_sensor =
-		vega20_read_sensor,
+	.force_clock_level = vega20_force_clock_level,
+	.print_clock_levels = vega20_print_clock_levels,
+	.read_sensor = vega20_read_sensor,
 	/* powergate related */
-	.powergate_uvd =
-		vega20_power_gate_uvd,
-	.powergate_vce =
-		vega20_power_gate_vce,
+	.powergate_uvd = vega20_power_gate_uvd,
+	.powergate_vce = vega20_power_gate_vce,
 	/* thermal related */
-	.start_thermal_controller =
-		vega20_start_thermal_controller,
-	.stop_thermal_controller =
-		vega20_thermal_stop_thermal_controller,
-	.get_thermal_temperature_range =
-		vega20_get_thermal_temperature_range,
-	.register_irq_handlers =
-		smu9_register_irq_handlers,
-	.disable_smc_firmware_ctf =
-		vega20_thermal_disable_alert,
+	.start_thermal_controller = vega20_start_thermal_controller,
+	.stop_thermal_controller = vega20_thermal_stop_thermal_controller,
+	.get_thermal_temperature_range = vega20_get_thermal_temperature_range,
+	.register_irq_handlers = smu9_register_irq_handlers,
+	.disable_smc_firmware_ctf = vega20_thermal_disable_alert,
 	/* fan control related */
-	.get_fan_speed_percent =
-		vega20_fan_ctrl_get_fan_speed_percent,
-	.set_fan_speed_percent =
-		vega20_fan_ctrl_set_fan_speed_percent,
-	.get_fan_speed_info =
-		vega20_fan_ctrl_get_fan_speed_info,
-	.get_fan_speed_rpm =
-		vega20_fan_ctrl_get_fan_speed_rpm,
-	.set_fan_speed_rpm =
-		vega20_fan_ctrl_set_fan_speed_rpm,
-	.get_fan_control_mode =
-		vega20_get_fan_control_mode,
-	.set_fan_control_mode =
-		vega20_set_fan_control_mode,
+	.get_fan_speed_percent = vega20_fan_ctrl_get_fan_speed_percent,
+	.set_fan_speed_percent = vega20_fan_ctrl_set_fan_speed_percent,
+	.get_fan_speed_info = vega20_fan_ctrl_get_fan_speed_info,
+	.get_fan_speed_rpm = vega20_fan_ctrl_get_fan_speed_rpm,
+	.set_fan_speed_rpm = vega20_fan_ctrl_set_fan_speed_rpm,
+	.get_fan_control_mode = vega20_get_fan_control_mode,
+	.set_fan_control_mode = vega20_set_fan_control_mode,
 	/* smu memory related */
-	.notify_cac_buffer_info =
-		vega20_notify_cac_buffer_info,
-	.enable_mgpu_fan_boost =
-		vega20_enable_mgpu_fan_boost,
+	.notify_cac_buffer_info = vega20_notify_cac_buffer_info,
+	.enable_mgpu_fan_boost = vega20_enable_mgpu_fan_boost,
 };
 
 int vega20_hwmgr_init(struct pp_hwmgr *hwmgr)
diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
index e5a60aa..07d180ce 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
@@ -28,7 +28,6 @@
 #include "hardwaremanager.h"
 #include "hwmgr_ppt.h"
 #include "ppatomctrl.h"
-#include "hwmgr_ppt.h"
 #include "power_state.h"
 #include "smu_helper.h"
 
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu7_common.h b/drivers/gpu/drm/amd/powerplay/inc/smu7_common.h
index 65eb630..94bf7b6 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu7_common.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu7_common.h
@@ -40,10 +40,6 @@
 #include "bif/bif_5_0_d.h"
 #include "bif/bif_5_0_sh_mask.h"
 
-
-#include "bif/bif_5_0_d.h"
-#include "bif/bif_5_0_sh_mask.h"
-
 #include "dce/dce_10_0_d.h"
 #include "dce/dce_10_0_sh_mask.h"
 
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
index 872d382..2b2c266 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
@@ -44,7 +44,6 @@
 
 #include "smu7_hwmgr.h"
 #include "hardwaremanager.h"
-#include "ppatomctrl.h"
 #include "atombios.h"
 #include "pppcielanes.h"
 
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c
index d0eb8ab..d111dd4 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c
@@ -29,7 +29,6 @@
 #include "rv_ppsmc.h"
 #include "smu10_driver_if.h"
 #include "smu10.h"
-#include "ppatomctrl.h"
 #include "pp_debug.h"
 
 
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c
index 09b844e..e2787e1 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c
@@ -24,6 +24,7 @@
 #include <linux/delay.h>
 #include <linux/gfp.h>
 #include <linux/kernel.h>
+#include <linux/ktime.h>
 #include <linux/slab.h>
 #include <linux/types.h>
 
@@ -61,9 +62,13 @@ static uint32_t smu8_get_argument(struct pp_hwmgr *hwmgr)
 					mmSMU_MP1_SRBM2P_ARG_0);
 }
 
-static int smu8_send_msg_to_smc_async(struct pp_hwmgr *hwmgr, uint16_t msg)
+/* Send a message to the SMC, and wait for its response.*/
+static int smu8_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr,
+					    uint16_t msg, uint32_t parameter)
 {
 	int result = 0;
+	ktime_t t_start;
+	s64 elapsed_us;
 
 	if (hwmgr == NULL || hwmgr->device == NULL)
 		return -EINVAL;
@@ -74,28 +79,31 @@ static int smu8_send_msg_to_smc_async(struct pp_hwmgr *hwmgr, uint16_t msg)
 		/* Read the last message to SMU, to report actual cause */
 		uint32_t val = cgs_read_register(hwmgr->device,
 						 mmSMU_MP1_SRBM2P_MSG_0);
-		pr_err("smu8_send_msg_to_smc_async (0x%04x) failed\n", msg);
-		pr_err("SMU still servicing msg (0x%04x)\n", val);
+		pr_err("%s(0x%04x) aborted; SMU still servicing msg (0x%04x)\n",
+			__func__, msg, val);
 		return result;
 	}
+	t_start = ktime_get();
+
+	cgs_write_register(hwmgr->device, mmSMU_MP1_SRBM2P_ARG_0, parameter);
 
 	cgs_write_register(hwmgr->device, mmSMU_MP1_SRBM2P_RESP_0, 0);
 	cgs_write_register(hwmgr->device, mmSMU_MP1_SRBM2P_MSG_0, msg);
 
-	return 0;
+	result = PHM_WAIT_FIELD_UNEQUAL(hwmgr,
+					SMU_MP1_SRBM2P_RESP_0, CONTENT, 0);
+
+	elapsed_us = ktime_us_delta(ktime_get(), t_start);
+
+	WARN(result, "%s(0x%04x, %#x) timed out after %lld us\n",
+			__func__, msg, parameter, elapsed_us);
+
+	return result;
 }
 
-/* Send a message to the SMC, and wait for its response.*/
 static int smu8_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
 {
-	int result = 0;
-
-	result = smu8_send_msg_to_smc_async(hwmgr, msg);
-	if (result != 0)
-		return result;
-
-	return PHM_WAIT_FIELD_UNEQUAL(hwmgr,
-					SMU_MP1_SRBM2P_RESP_0, CONTENT, 0);
+	return smu8_send_msg_to_smc_with_parameter(hwmgr, msg, 0);
 }
 
 static int smu8_set_smc_sram_address(struct pp_hwmgr *hwmgr,
@@ -135,17 +143,6 @@ static int smu8_write_smc_sram_dword(struct pp_hwmgr *hwmgr,
 	return result;
 }
 
-static int smu8_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr,
-					  uint16_t msg, uint32_t parameter)
-{
-	if (hwmgr == NULL || hwmgr->device == NULL)
-		return -EINVAL;
-
-	cgs_write_register(hwmgr->device, mmSMU_MP1_SRBM2P_ARG_0, parameter);
-
-	return smu8_send_msg_to_smc(hwmgr, msg);
-}
-
 static int smu8_check_fw_load_finish(struct pp_hwmgr *hwmgr,
 				   uint32_t firmware)
 {
@@ -737,6 +734,10 @@ static int smu8_start_smu(struct pp_hwmgr *hwmgr)
 
 	cgs_write_register(hwmgr->device, mmMP0PUB_IND_INDEX, index);
 	hwmgr->smu_version = cgs_read_register(hwmgr->device, mmMP0PUB_IND_DATA);
+	pr_info("smu version %02d.%02d.%02d\n",
+		((hwmgr->smu_version >> 16) & 0xFF),
+		((hwmgr->smu_version >> 8) & 0xFF),
+		(hwmgr->smu_version & 0xFF));
 	adev->pm.fw_version = hwmgr->smu_version >> 8;
 
 	return smu8_request_smu_load_fw(hwmgr);
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
index 99d5e4f..a6edd5d 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
@@ -37,10 +37,13 @@ MODULE_FIRMWARE("amdgpu/fiji_smc.bin");
 MODULE_FIRMWARE("amdgpu/polaris10_smc.bin");
 MODULE_FIRMWARE("amdgpu/polaris10_smc_sk.bin");
 MODULE_FIRMWARE("amdgpu/polaris10_k_smc.bin");
+MODULE_FIRMWARE("amdgpu/polaris10_k2_smc.bin");
 MODULE_FIRMWARE("amdgpu/polaris11_smc.bin");
 MODULE_FIRMWARE("amdgpu/polaris11_smc_sk.bin");
 MODULE_FIRMWARE("amdgpu/polaris11_k_smc.bin");
+MODULE_FIRMWARE("amdgpu/polaris11_k2_smc.bin");
 MODULE_FIRMWARE("amdgpu/polaris12_smc.bin");
+MODULE_FIRMWARE("amdgpu/polaris12_k_smc.bin");
 MODULE_FIRMWARE("amdgpu/vegam_smc.bin");
 MODULE_FIRMWARE("amdgpu/vega10_smc.bin");
 MODULE_FIRMWARE("amdgpu/vega10_acg_smc.bin");
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c
index 9f71512..1e69300 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c
@@ -40,7 +40,6 @@
 
 #include "smu7_hwmgr.h"
 #include "hardwaremanager.h"
-#include "ppatomctrl.h"
 #include "atombios.h"
 #include "pppcielanes.h"
 
diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h
index e6c4cd3..bfc6504 100644
--- a/drivers/gpu/drm/ast/ast_drv.h
+++ b/drivers/gpu/drm/ast/ast_drv.h
@@ -104,8 +104,6 @@ struct ast_private {
 	int fb_mtrr;
 
 	struct {
-		struct drm_global_reference mem_global_ref;
-		struct ttm_bo_global_ref bo_global_ref;
 		struct ttm_bo_device bdev;
 	} ttm;
 
diff --git a/drivers/gpu/drm/ast/ast_ttm.c b/drivers/gpu/drm/ast/ast_ttm.c
index fe354eb..c168d62 100644
--- a/drivers/gpu/drm/ast/ast_ttm.c
+++ b/drivers/gpu/drm/ast/ast_ttm.c
@@ -36,63 +36,6 @@ ast_bdev(struct ttm_bo_device *bd)
 	return container_of(bd, struct ast_private, ttm.bdev);
 }
 
-static int
-ast_ttm_mem_global_init(struct drm_global_reference *ref)
-{
-	return ttm_mem_global_init(ref->object);
-}
-
-static void
-ast_ttm_mem_global_release(struct drm_global_reference *ref)
-{
-	ttm_mem_global_release(ref->object);
-}
-
-static int ast_ttm_global_init(struct ast_private *ast)
-{
-	struct drm_global_reference *global_ref;
-	int r;
-
-	global_ref = &ast->ttm.mem_global_ref;
-	global_ref->global_type = DRM_GLOBAL_TTM_MEM;
-	global_ref->size = sizeof(struct ttm_mem_global);
-	global_ref->init = &ast_ttm_mem_global_init;
-	global_ref->release = &ast_ttm_mem_global_release;
-	r = drm_global_item_ref(global_ref);
-	if (r != 0) {
-		DRM_ERROR("Failed setting up TTM memory accounting "
-			  "subsystem.\n");
-		return r;
-	}
-
-	ast->ttm.bo_global_ref.mem_glob =
-		ast->ttm.mem_global_ref.object;
-	global_ref = &ast->ttm.bo_global_ref.ref;
-	global_ref->global_type = DRM_GLOBAL_TTM_BO;
-	global_ref->size = sizeof(struct ttm_bo_global);
-	global_ref->init = &ttm_bo_global_init;
-	global_ref->release = &ttm_bo_global_release;
-	r = drm_global_item_ref(global_ref);
-	if (r != 0) {
-		DRM_ERROR("Failed setting up TTM BO subsystem.\n");
-		drm_global_item_unref(&ast->ttm.mem_global_ref);
-		return r;
-	}
-	return 0;
-}
-
-static void
-ast_ttm_global_release(struct ast_private *ast)
-{
-	if (ast->ttm.mem_global_ref.release == NULL)
-		return;
-
-	drm_global_item_unref(&ast->ttm.bo_global_ref.ref);
-	drm_global_item_unref(&ast->ttm.mem_global_ref);
-	ast->ttm.mem_global_ref.release = NULL;
-}
-
-
 static void ast_bo_ttm_destroy(struct ttm_buffer_object *tbo)
 {
 	struct ast_bo *bo;
@@ -232,12 +175,7 @@ int ast_mm_init(struct ast_private *ast)
 	struct drm_device *dev = ast->dev;
 	struct ttm_bo_device *bdev = &ast->ttm.bdev;
 
-	ret = ast_ttm_global_init(ast);
-	if (ret)
-		return ret;
-
 	ret = ttm_bo_device_init(&ast->ttm.bdev,
-				 ast->ttm.bo_global_ref.ref.object,
 				 &ast_bo_driver,
 				 dev->anon_inode->i_mapping,
 				 DRM_FILE_PAGE_OFFSET,
@@ -268,8 +206,6 @@ void ast_mm_fini(struct ast_private *ast)
 
 	ttm_bo_device_release(&ast->ttm.bdev);
 
-	ast_ttm_global_release(ast);
-
 	arch_phys_wc_del(ast->fb_mtrr);
 	arch_io_free_memtype_wc(pci_resource_start(dev->pdev, 0),
 				pci_resource_len(dev->pdev, 0));
diff --git a/drivers/gpu/drm/bochs/bochs.h b/drivers/gpu/drm/bochs/bochs.h
index 577a8b9..fb38c8b 100644
--- a/drivers/gpu/drm/bochs/bochs.h
+++ b/drivers/gpu/drm/bochs/bochs.h
@@ -77,8 +77,6 @@ struct bochs_device {
 
 	/* ttm */
 	struct {
-		struct drm_global_reference mem_global_ref;
-		struct ttm_bo_global_ref bo_global_ref;
 		struct ttm_bo_device bdev;
 		bool initialized;
 	} ttm;
diff --git a/drivers/gpu/drm/bochs/bochs_mm.c b/drivers/gpu/drm/bochs/bochs_mm.c
index e6ccf7f..0980411 100644
--- a/drivers/gpu/drm/bochs/bochs_mm.c
+++ b/drivers/gpu/drm/bochs/bochs_mm.c
@@ -16,61 +16,6 @@ static inline struct bochs_device *bochs_bdev(struct ttm_bo_device *bd)
 	return container_of(bd, struct bochs_device, ttm.bdev);
 }
 
-static int bochs_ttm_mem_global_init(struct drm_global_reference *ref)
-{
-	return ttm_mem_global_init(ref->object);
-}
-
-static void bochs_ttm_mem_global_release(struct drm_global_reference *ref)
-{
-	ttm_mem_global_release(ref->object);
-}
-
-static int bochs_ttm_global_init(struct bochs_device *bochs)
-{
-	struct drm_global_reference *global_ref;
-	int r;
-
-	global_ref = &bochs->ttm.mem_global_ref;
-	global_ref->global_type = DRM_GLOBAL_TTM_MEM;
-	global_ref->size = sizeof(struct ttm_mem_global);
-	global_ref->init = &bochs_ttm_mem_global_init;
-	global_ref->release = &bochs_ttm_mem_global_release;
-	r = drm_global_item_ref(global_ref);
-	if (r != 0) {
-		DRM_ERROR("Failed setting up TTM memory accounting "
-			  "subsystem.\n");
-		return r;
-	}
-
-	bochs->ttm.bo_global_ref.mem_glob =
-		bochs->ttm.mem_global_ref.object;
-	global_ref = &bochs->ttm.bo_global_ref.ref;
-	global_ref->global_type = DRM_GLOBAL_TTM_BO;
-	global_ref->size = sizeof(struct ttm_bo_global);
-	global_ref->init = &ttm_bo_global_init;
-	global_ref->release = &ttm_bo_global_release;
-	r = drm_global_item_ref(global_ref);
-	if (r != 0) {
-		DRM_ERROR("Failed setting up TTM BO subsystem.\n");
-		drm_global_item_unref(&bochs->ttm.mem_global_ref);
-		return r;
-	}
-
-	return 0;
-}
-
-static void bochs_ttm_global_release(struct bochs_device *bochs)
-{
-	if (bochs->ttm.mem_global_ref.release == NULL)
-		return;
-
-	drm_global_item_unref(&bochs->ttm.bo_global_ref.ref);
-	drm_global_item_unref(&bochs->ttm.mem_global_ref);
-	bochs->ttm.mem_global_ref.release = NULL;
-}
-
-
 static void bochs_bo_ttm_destroy(struct ttm_buffer_object *tbo)
 {
 	struct bochs_bo *bo;
@@ -208,12 +153,7 @@ int bochs_mm_init(struct bochs_device *bochs)
 	struct ttm_bo_device *bdev = &bochs->ttm.bdev;
 	int ret;
 
-	ret = bochs_ttm_global_init(bochs);
-	if (ret)
-		return ret;
-
 	ret = ttm_bo_device_init(&bochs->ttm.bdev,
-				 bochs->ttm.bo_global_ref.ref.object,
 				 &bochs_bo_driver,
 				 bochs->dev->anon_inode->i_mapping,
 				 DRM_FILE_PAGE_OFFSET,
@@ -240,7 +180,6 @@ void bochs_mm_fini(struct bochs_device *bochs)
 		return;
 
 	ttm_bo_device_release(&bochs->ttm.bdev);
-	bochs_ttm_global_release(bochs);
 	bochs->ttm.initialized = false;
 }
 
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.h b/drivers/gpu/drm/cirrus/cirrus_drv.h
index a29f87e..f2b2e0d 100644
--- a/drivers/gpu/drm/cirrus/cirrus_drv.h
+++ b/drivers/gpu/drm/cirrus/cirrus_drv.h
@@ -136,8 +136,6 @@ struct cirrus_device {
 	int fb_mtrr;
 
 	struct {
-		struct drm_global_reference mem_global_ref;
-		struct ttm_bo_global_ref bo_global_ref;
 		struct ttm_bo_device bdev;
 	} ttm;
 	bool mm_inited;
diff --git a/drivers/gpu/drm/cirrus/cirrus_ttm.c b/drivers/gpu/drm/cirrus/cirrus_ttm.c
index f219532..e075810 100644
--- a/drivers/gpu/drm/cirrus/cirrus_ttm.c
+++ b/drivers/gpu/drm/cirrus/cirrus_ttm.c
@@ -36,63 +36,6 @@ cirrus_bdev(struct ttm_bo_device *bd)
 	return container_of(bd, struct cirrus_device, ttm.bdev);
 }
 
-static int
-cirrus_ttm_mem_global_init(struct drm_global_reference *ref)
-{
-	return ttm_mem_global_init(ref->object);
-}
-
-static void
-cirrus_ttm_mem_global_release(struct drm_global_reference *ref)
-{
-	ttm_mem_global_release(ref->object);
-}
-
-static int cirrus_ttm_global_init(struct cirrus_device *cirrus)
-{
-	struct drm_global_reference *global_ref;
-	int r;
-
-	global_ref = &cirrus->ttm.mem_global_ref;
-	global_ref->global_type = DRM_GLOBAL_TTM_MEM;
-	global_ref->size = sizeof(struct ttm_mem_global);
-	global_ref->init = &cirrus_ttm_mem_global_init;
-	global_ref->release = &cirrus_ttm_mem_global_release;
-	r = drm_global_item_ref(global_ref);
-	if (r != 0) {
-		DRM_ERROR("Failed setting up TTM memory accounting "
-			  "subsystem.\n");
-		return r;
-	}
-
-	cirrus->ttm.bo_global_ref.mem_glob =
-		cirrus->ttm.mem_global_ref.object;
-	global_ref = &cirrus->ttm.bo_global_ref.ref;
-	global_ref->global_type = DRM_GLOBAL_TTM_BO;
-	global_ref->size = sizeof(struct ttm_bo_global);
-	global_ref->init = &ttm_bo_global_init;
-	global_ref->release = &ttm_bo_global_release;
-	r = drm_global_item_ref(global_ref);
-	if (r != 0) {
-		DRM_ERROR("Failed setting up TTM BO subsystem.\n");
-		drm_global_item_unref(&cirrus->ttm.mem_global_ref);
-		return r;
-	}
-	return 0;
-}
-
-static void
-cirrus_ttm_global_release(struct cirrus_device *cirrus)
-{
-	if (cirrus->ttm.mem_global_ref.release == NULL)
-		return;
-
-	drm_global_item_unref(&cirrus->ttm.bo_global_ref.ref);
-	drm_global_item_unref(&cirrus->ttm.mem_global_ref);
-	cirrus->ttm.mem_global_ref.release = NULL;
-}
-
-
 static void cirrus_bo_ttm_destroy(struct ttm_buffer_object *tbo)
 {
 	struct cirrus_bo *bo;
@@ -232,12 +175,7 @@ int cirrus_mm_init(struct cirrus_device *cirrus)
 	struct drm_device *dev = cirrus->dev;
 	struct ttm_bo_device *bdev = &cirrus->ttm.bdev;
 
-	ret = cirrus_ttm_global_init(cirrus);
-	if (ret)
-		return ret;
-
 	ret = ttm_bo_device_init(&cirrus->ttm.bdev,
-				 cirrus->ttm.bo_global_ref.ref.object,
 				 &cirrus_bo_driver,
 				 dev->anon_inode->i_mapping,
 				 DRM_FILE_PAGE_OFFSET,
@@ -273,8 +211,6 @@ void cirrus_mm_fini(struct cirrus_device *cirrus)
 
 	ttm_bo_device_release(&cirrus->ttm.bdev);
 
-	cirrus_ttm_global_release(cirrus);
-
 	arch_phys_wc_del(cirrus->fb_mtrr);
 	cirrus->fb_mtrr = 0;
 	arch_io_free_memtype_wc(pci_resource_start(dev->pdev, 0),
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index aa1cef7..e2ffecd 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -973,14 +973,12 @@ static void drm_core_exit(void)
 	drm_sysfs_destroy();
 	idr_destroy(&drm_minors_idr);
 	drm_connector_ida_destroy();
-	drm_global_release();
 }
 
 static int __init drm_core_init(void)
 {
 	int ret;
 
-	drm_global_init();
 	drm_connector_ida_init();
 	idr_init(&drm_minors_idr);
 
diff --git a/drivers/gpu/drm/drm_global.c b/drivers/gpu/drm/drm_global.c
deleted file mode 100644
index 5799e27..0000000
--- a/drivers/gpu/drm/drm_global.c
+++ /dev/null
@@ -1,137 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0 OR MIT
-/**************************************************************************
- *
- * Copyright 2008-2009 VMware, Inc., Palo Alto, CA., USA
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- **************************************************************************/
-/*
- * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
- */
-
-#include <linux/mutex.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <drm/drm_global.h>
-
-struct drm_global_item {
-	struct mutex mutex;
-	void *object;
-	int refcount;
-};
-
-static struct drm_global_item glob[DRM_GLOBAL_NUM];
-
-void drm_global_init(void)
-{
-	int i;
-
-	for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
-		struct drm_global_item *item = &glob[i];
-		mutex_init(&item->mutex);
-		item->object = NULL;
-		item->refcount = 0;
-	}
-}
-
-void drm_global_release(void)
-{
-	int i;
-	for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
-		struct drm_global_item *item = &glob[i];
-		BUG_ON(item->object != NULL);
-		BUG_ON(item->refcount != 0);
-	}
-}
-
-/**
- * drm_global_item_ref - Initialize and acquire reference to memory
- * object
- * @ref: Object for initialization
- *
- * This initializes a memory object, allocating memory and calling the
- * .init() hook. Further calls will increase the reference count for
- * that item.
- *
- * Returns:
- * Zero on success, non-zero otherwise.
- */
-int drm_global_item_ref(struct drm_global_reference *ref)
-{
-	int ret = 0;
-	struct drm_global_item *item = &glob[ref->global_type];
-
-	mutex_lock(&item->mutex);
-	if (item->refcount == 0) {
-		ref->object = kzalloc(ref->size, GFP_KERNEL);
-		if (unlikely(ref->object == NULL)) {
-			ret = -ENOMEM;
-			goto error_unlock;
-		}
-		ret = ref->init(ref);
-		if (unlikely(ret != 0))
-			goto error_free;
-
-		item->object = ref->object;
-	} else {
-		ref->object = item->object;
-	}
-
-	++item->refcount;
-	mutex_unlock(&item->mutex);
-	return 0;
-
-error_free:
-	kfree(ref->object);
-	ref->object = NULL;
-error_unlock:
-	mutex_unlock(&item->mutex);
-	return ret;
-}
-EXPORT_SYMBOL(drm_global_item_ref);
-
-/**
- * drm_global_item_unref - Drop reference to memory
- * object
- * @ref: Object being removed
- *
- * Drop a reference to the memory object and eventually call the
- * release() hook.  The allocated object should be dropped in the
- * release() hook or before calling this function
- *
- */
-
-void drm_global_item_unref(struct drm_global_reference *ref)
-{
-	struct drm_global_item *item = &glob[ref->global_type];
-
-	mutex_lock(&item->mutex);
-	BUG_ON(item->refcount == 0);
-	BUG_ON(ref->object != item->object);
-	if (--item->refcount == 0) {
-		ref->release(ref);
-		item->object = NULL;
-	}
-	mutex_unlock(&item->mutex);
-}
-EXPORT_SYMBOL(drm_global_item_unref);
-
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_sched.c b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
index 9b47636..49a6763 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_sched.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
@@ -105,8 +105,6 @@ static void etnaviv_sched_timedout_job(struct drm_sched_job *sched_job)
 	change = dma_addr - gpu->hangcheck_dma_addr;
 	if (change < 0 || change > 16) {
 		gpu->hangcheck_dma_addr = dma_addr;
-		schedule_delayed_work(&sched_job->sched->work_tdr,
-				      sched_job->sched->timeout);
 		return;
 	}
 
@@ -127,6 +125,8 @@ static void etnaviv_sched_free_job(struct drm_sched_job *sched_job)
 {
 	struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job);
 
+	drm_sched_job_cleanup(sched_job);
+
 	etnaviv_submit_put(submit);
 }
 
@@ -159,6 +159,7 @@ int etnaviv_sched_push_job(struct drm_sched_entity *sched_entity,
 						submit->out_fence, 0,
 						INT_MAX, GFP_KERNEL);
 	if (submit->out_fence_id < 0) {
+		drm_sched_job_cleanup(&submit->sched_job);
 		ret = -ENOMEM;
 		goto out_unlock;
 	}
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h
index 45c25a4..3c168ae 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h
@@ -49,8 +49,6 @@ struct hibmc_drm_private {
 	bool mode_config_initialized;
 
 	/* ttm */
-	struct drm_global_reference mem_global_ref;
-	struct ttm_bo_global_ref bo_global_ref;
 	struct ttm_bo_device bdev;
 	bool initialized;
 
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c
index 2e3e0bd..dd38326 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c
@@ -29,55 +29,6 @@ hibmc_bdev(struct ttm_bo_device *bd)
 	return container_of(bd, struct hibmc_drm_private, bdev);
 }
 
-static int
-hibmc_ttm_mem_global_init(struct drm_global_reference *ref)
-{
-	return ttm_mem_global_init(ref->object);
-}
-
-static void
-hibmc_ttm_mem_global_release(struct drm_global_reference *ref)
-{
-	ttm_mem_global_release(ref->object);
-}
-
-static int hibmc_ttm_global_init(struct hibmc_drm_private *hibmc)
-{
-	int ret;
-
-	hibmc->mem_global_ref.global_type = DRM_GLOBAL_TTM_MEM;
-	hibmc->mem_global_ref.size = sizeof(struct ttm_mem_global);
-	hibmc->mem_global_ref.init = &hibmc_ttm_mem_global_init;
-	hibmc->mem_global_ref.release = &hibmc_ttm_mem_global_release;
-	ret = drm_global_item_ref(&hibmc->mem_global_ref);
-	if (ret) {
-		DRM_ERROR("could not get ref on ttm global: %d\n", ret);
-		return ret;
-	}
-
-	hibmc->bo_global_ref.mem_glob =
-		hibmc->mem_global_ref.object;
-	hibmc->bo_global_ref.ref.global_type = DRM_GLOBAL_TTM_BO;
-	hibmc->bo_global_ref.ref.size = sizeof(struct ttm_bo_global);
-	hibmc->bo_global_ref.ref.init = &ttm_bo_global_init;
-	hibmc->bo_global_ref.ref.release = &ttm_bo_global_release;
-	ret = drm_global_item_ref(&hibmc->bo_global_ref.ref);
-	if (ret) {
-		DRM_ERROR("failed setting up TTM BO subsystem: %d\n", ret);
-		drm_global_item_unref(&hibmc->mem_global_ref);
-		return ret;
-	}
-	return 0;
-}
-
-static void
-hibmc_ttm_global_release(struct hibmc_drm_private *hibmc)
-{
-	drm_global_item_unref(&hibmc->bo_global_ref.ref);
-	drm_global_item_unref(&hibmc->mem_global_ref);
-	hibmc->mem_global_ref.release = NULL;
-}
-
 static void hibmc_bo_ttm_destroy(struct ttm_buffer_object *tbo)
 {
 	struct hibmc_bo *bo = container_of(tbo, struct hibmc_bo, bo);
@@ -237,18 +188,12 @@ int hibmc_mm_init(struct hibmc_drm_private *hibmc)
 	struct drm_device *dev = hibmc->dev;
 	struct ttm_bo_device *bdev = &hibmc->bdev;
 
-	ret = hibmc_ttm_global_init(hibmc);
-	if (ret)
-		return ret;
-
 	ret = ttm_bo_device_init(&hibmc->bdev,
-				 hibmc->bo_global_ref.ref.object,
 				 &hibmc_bo_driver,
 				 dev->anon_inode->i_mapping,
 				 DRM_FILE_PAGE_OFFSET,
 				 true);
 	if (ret) {
-		hibmc_ttm_global_release(hibmc);
 		DRM_ERROR("error initializing bo driver: %d\n", ret);
 		return ret;
 	}
@@ -256,7 +201,6 @@ int hibmc_mm_init(struct hibmc_drm_private *hibmc)
 	ret = ttm_bo_init_mm(bdev, TTM_PL_VRAM,
 			     hibmc->fb_size >> PAGE_SHIFT);
 	if (ret) {
-		hibmc_ttm_global_release(hibmc);
 		DRM_ERROR("failed ttm VRAM init: %d\n", ret);
 		return ret;
 	}
@@ -271,7 +215,6 @@ void hibmc_mm_fini(struct hibmc_drm_private *hibmc)
 		return;
 
 	ttm_bo_device_release(&hibmc->bdev);
-	hibmc_ttm_global_release(hibmc);
 	hibmc->mm_inited = false;
 }
 
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.h b/drivers/gpu/drm/mgag200/mgag200_drv.h
index 04f1dfb..0aaedc5 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.h
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.h
@@ -212,8 +212,6 @@ struct mga_device {
 	int fb_mtrr;
 
 	struct {
-		struct drm_global_reference mem_global_ref;
-		struct ttm_bo_global_ref bo_global_ref;
 		struct ttm_bo_device bdev;
 	} ttm;
 
diff --git a/drivers/gpu/drm/mgag200/mgag200_ttm.c b/drivers/gpu/drm/mgag200/mgag200_ttm.c
index 05570f0..d96a9b3 100644
--- a/drivers/gpu/drm/mgag200/mgag200_ttm.c
+++ b/drivers/gpu/drm/mgag200/mgag200_ttm.c
@@ -36,63 +36,6 @@ mgag200_bdev(struct ttm_bo_device *bd)
 	return container_of(bd, struct mga_device, ttm.bdev);
 }
 
-static int
-mgag200_ttm_mem_global_init(struct drm_global_reference *ref)
-{
-	return ttm_mem_global_init(ref->object);
-}
-
-static void
-mgag200_ttm_mem_global_release(struct drm_global_reference *ref)
-{
-	ttm_mem_global_release(ref->object);
-}
-
-static int mgag200_ttm_global_init(struct mga_device *ast)
-{
-	struct drm_global_reference *global_ref;
-	int r;
-
-	global_ref = &ast->ttm.mem_global_ref;
-	global_ref->global_type = DRM_GLOBAL_TTM_MEM;
-	global_ref->size = sizeof(struct ttm_mem_global);
-	global_ref->init = &mgag200_ttm_mem_global_init;
-	global_ref->release = &mgag200_ttm_mem_global_release;
-	r = drm_global_item_ref(global_ref);
-	if (r != 0) {
-		DRM_ERROR("Failed setting up TTM memory accounting "
-			  "subsystem.\n");
-		return r;
-	}
-
-	ast->ttm.bo_global_ref.mem_glob =
-		ast->ttm.mem_global_ref.object;
-	global_ref = &ast->ttm.bo_global_ref.ref;
-	global_ref->global_type = DRM_GLOBAL_TTM_BO;
-	global_ref->size = sizeof(struct ttm_bo_global);
-	global_ref->init = &ttm_bo_global_init;
-	global_ref->release = &ttm_bo_global_release;
-	r = drm_global_item_ref(global_ref);
-	if (r != 0) {
-		DRM_ERROR("Failed setting up TTM BO subsystem.\n");
-		drm_global_item_unref(&ast->ttm.mem_global_ref);
-		return r;
-	}
-	return 0;
-}
-
-static void
-mgag200_ttm_global_release(struct mga_device *ast)
-{
-	if (ast->ttm.mem_global_ref.release == NULL)
-		return;
-
-	drm_global_item_unref(&ast->ttm.bo_global_ref.ref);
-	drm_global_item_unref(&ast->ttm.mem_global_ref);
-	ast->ttm.mem_global_ref.release = NULL;
-}
-
-
 static void mgag200_bo_ttm_destroy(struct ttm_buffer_object *tbo)
 {
 	struct mgag200_bo *bo;
@@ -232,12 +175,7 @@ int mgag200_mm_init(struct mga_device *mdev)
 	struct drm_device *dev = mdev->dev;
 	struct ttm_bo_device *bdev = &mdev->ttm.bdev;
 
-	ret = mgag200_ttm_global_init(mdev);
-	if (ret)
-		return ret;
-
 	ret = ttm_bo_device_init(&mdev->ttm.bdev,
-				 mdev->ttm.bo_global_ref.ref.object,
 				 &mgag200_bo_driver,
 				 dev->anon_inode->i_mapping,
 				 DRM_FILE_PAGE_OFFSET,
@@ -268,8 +206,6 @@ void mgag200_mm_fini(struct mga_device *mdev)
 
 	ttm_bo_device_release(&mdev->ttm.bdev);
 
-	mgag200_ttm_global_release(mdev);
-
 	arch_io_free_memtype_wc(pci_resource_start(dev->pdev, 0),
 				pci_resource_len(dev->pdev, 0));
 	arch_phys_wc_del(mdev->fb_mtrr);
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index 0b2191f..d20b9ba 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -146,8 +146,6 @@ struct nouveau_drm {
 
 	/* TTM interface support */
 	struct {
-		struct drm_global_reference mem_global_ref;
-		struct ttm_bo_global_ref bo_global_ref;
 		struct ttm_bo_device bdev;
 		atomic_t validate_sequence;
 		int (*move)(struct nouveau_channel *,
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
index 8edb9f2..1543c2f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
@@ -175,66 +175,6 @@ nouveau_ttm_mmap(struct file *filp, struct vm_area_struct *vma)
 }
 
 static int
-nouveau_ttm_mem_global_init(struct drm_global_reference *ref)
-{
-	return ttm_mem_global_init(ref->object);
-}
-
-static void
-nouveau_ttm_mem_global_release(struct drm_global_reference *ref)
-{
-	ttm_mem_global_release(ref->object);
-}
-
-int
-nouveau_ttm_global_init(struct nouveau_drm *drm)
-{
-	struct drm_global_reference *global_ref;
-	int ret;
-
-	global_ref = &drm->ttm.mem_global_ref;
-	global_ref->global_type = DRM_GLOBAL_TTM_MEM;
-	global_ref->size = sizeof(struct ttm_mem_global);
-	global_ref->init = &nouveau_ttm_mem_global_init;
-	global_ref->release = &nouveau_ttm_mem_global_release;
-
-	ret = drm_global_item_ref(global_ref);
-	if (unlikely(ret != 0)) {
-		DRM_ERROR("Failed setting up TTM memory accounting\n");
-		drm->ttm.mem_global_ref.release = NULL;
-		return ret;
-	}
-
-	drm->ttm.bo_global_ref.mem_glob = global_ref->object;
-	global_ref = &drm->ttm.bo_global_ref.ref;
-	global_ref->global_type = DRM_GLOBAL_TTM_BO;
-	global_ref->size = sizeof(struct ttm_bo_global);
-	global_ref->init = &ttm_bo_global_init;
-	global_ref->release = &ttm_bo_global_release;
-
-	ret = drm_global_item_ref(global_ref);
-	if (unlikely(ret != 0)) {
-		DRM_ERROR("Failed setting up TTM BO subsystem\n");
-		drm_global_item_unref(&drm->ttm.mem_global_ref);
-		drm->ttm.mem_global_ref.release = NULL;
-		return ret;
-	}
-
-	return 0;
-}
-
-void
-nouveau_ttm_global_release(struct nouveau_drm *drm)
-{
-	if (drm->ttm.mem_global_ref.release == NULL)
-		return;
-
-	drm_global_item_unref(&drm->ttm.bo_global_ref.ref);
-	drm_global_item_unref(&drm->ttm.mem_global_ref);
-	drm->ttm.mem_global_ref.release = NULL;
-}
-
-static int
 nouveau_ttm_init_host(struct nouveau_drm *drm, u8 kind)
 {
 	struct nvif_mmu *mmu = &drm->client.mmu;
@@ -296,12 +236,7 @@ nouveau_ttm_init(struct nouveau_drm *drm)
 		drm->agp.cma = pci->agp.cma;
 	}
 
-	ret = nouveau_ttm_global_init(drm);
-	if (ret)
-		return ret;
-
 	ret = ttm_bo_device_init(&drm->ttm.bdev,
-				  drm->ttm.bo_global_ref.ref.object,
 				  &nouveau_bo_driver,
 				  dev->anon_inode->i_mapping,
 				  DRM_FILE_PAGE_OFFSET,
@@ -356,8 +291,6 @@ nouveau_ttm_fini(struct nouveau_drm *drm)
 
 	ttm_bo_device_release(&drm->ttm.bdev);
 
-	nouveau_ttm_global_release(drm);
-
 	arch_phys_wc_del(drm->ttm.mtrr);
 	drm->ttm.mtrr = 0;
 	arch_io_free_memtype_wc(device->func->resource_addr(device, 1),
diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
index 14d3fa8..13a0254 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.h
+++ b/drivers/gpu/drm/qxl/qxl_drv.h
@@ -126,9 +126,6 @@ struct qxl_output {
 #define drm_encoder_to_qxl_output(x) container_of(x, struct qxl_output, enc)
 
 struct qxl_mman {
-	struct ttm_bo_global_ref        bo_global_ref;
-	struct drm_global_reference	mem_global_ref;
-	unsigned int mem_global_referenced:1;
 	struct ttm_bo_device		bdev;
 };
 
diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c
index 559a101..886f61e 100644
--- a/drivers/gpu/drm/qxl/qxl_ttm.c
+++ b/drivers/gpu/drm/qxl/qxl_ttm.c
@@ -46,62 +46,6 @@ static struct qxl_device *qxl_get_qdev(struct ttm_bo_device *bdev)
 	return qdev;
 }
 
-static int qxl_ttm_mem_global_init(struct drm_global_reference *ref)
-{
-	return ttm_mem_global_init(ref->object);
-}
-
-static void qxl_ttm_mem_global_release(struct drm_global_reference *ref)
-{
-	ttm_mem_global_release(ref->object);
-}
-
-static int qxl_ttm_global_init(struct qxl_device *qdev)
-{
-	struct drm_global_reference *global_ref;
-	int r;
-
-	qdev->mman.mem_global_referenced = false;
-	global_ref = &qdev->mman.mem_global_ref;
-	global_ref->global_type = DRM_GLOBAL_TTM_MEM;
-	global_ref->size = sizeof(struct ttm_mem_global);
-	global_ref->init = &qxl_ttm_mem_global_init;
-	global_ref->release = &qxl_ttm_mem_global_release;
-
-	r = drm_global_item_ref(global_ref);
-	if (r != 0) {
-		DRM_ERROR("Failed setting up TTM memory accounting "
-			  "subsystem.\n");
-		return r;
-	}
-
-	qdev->mman.bo_global_ref.mem_glob =
-		qdev->mman.mem_global_ref.object;
-	global_ref = &qdev->mman.bo_global_ref.ref;
-	global_ref->global_type = DRM_GLOBAL_TTM_BO;
-	global_ref->size = sizeof(struct ttm_bo_global);
-	global_ref->init = &ttm_bo_global_init;
-	global_ref->release = &ttm_bo_global_release;
-	r = drm_global_item_ref(global_ref);
-	if (r != 0) {
-		DRM_ERROR("Failed setting up TTM BO subsystem.\n");
-		drm_global_item_unref(&qdev->mman.mem_global_ref);
-		return r;
-	}
-
-	qdev->mman.mem_global_referenced = true;
-	return 0;
-}
-
-static void qxl_ttm_global_fini(struct qxl_device *qdev)
-{
-	if (qdev->mman.mem_global_referenced) {
-		drm_global_item_unref(&qdev->mman.bo_global_ref.ref);
-		drm_global_item_unref(&qdev->mman.mem_global_ref);
-		qdev->mman.mem_global_referenced = false;
-	}
-}
-
 static struct vm_operations_struct qxl_ttm_vm_ops;
 static const struct vm_operations_struct *ttm_vm_ops;
 
@@ -372,12 +316,8 @@ int qxl_ttm_init(struct qxl_device *qdev)
 	int r;
 	int num_io_pages; /* != rom->num_io_pages, we include surface0 */
 
-	r = qxl_ttm_global_init(qdev);
-	if (r)
-		return r;
 	/* No others user of address space so set it to 0 */
 	r = ttm_bo_device_init(&qdev->mman.bdev,
-			       qdev->mman.bo_global_ref.ref.object,
 			       &qxl_bo_driver,
 			       qdev->ddev.anon_inode->i_mapping,
 			       DRM_FILE_PAGE_OFFSET, 0);
@@ -413,7 +353,6 @@ void qxl_ttm_fini(struct qxl_device *qdev)
 	ttm_bo_clean_mm(&qdev->mman.bdev, TTM_PL_VRAM);
 	ttm_bo_clean_mm(&qdev->mman.bdev, TTM_PL_PRIV);
 	ttm_bo_device_release(&qdev->mman.bdev);
-	qxl_ttm_global_fini(qdev);
 	DRM_INFO("qxl: ttm finalized\n");
 }
 
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index 21161aa..652126f 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -814,7 +814,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
 					  ((idx_value >> 21) & 0xF));
 				return -EINVAL;
 			}
-			/* Pass through. */
+			/* Fall through. */
 		case 6:
 			track->cb[i].cpp = 4;
 			break;
@@ -965,7 +965,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
 				return -EINVAL;
 			}
 			/* The same rules apply as for DXT3/5. */
-			/* Pass through. */
+			/* Fall through. */
 		case R300_TX_FORMAT_DXT3:
 		case R300_TX_FORMAT_DXT5:
 			track->textures[i].cpp = 1;
diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c
index 45e1d4e..2318d9e3 100644
--- a/drivers/gpu/drm/radeon/r420.c
+++ b/drivers/gpu/drm/radeon/r420.c
@@ -109,6 +109,7 @@ void r420_pipes_init(struct radeon_device *rdev)
 	default:
 		/* force to 1 pipe */
 		num_pipes = 1;
+		/* fall through */
 	case 1:
 		tmp = (0 << 1);
 		break;
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 1a6f6ed..32808e5 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -448,10 +448,7 @@ struct radeon_surface_reg {
  * TTM.
  */
 struct radeon_mman {
-	struct ttm_bo_global_ref        bo_global_ref;
-	struct drm_global_reference	mem_global_ref;
 	struct ttm_bo_device		bdev;
-	bool				mem_global_referenced;
 	bool				initialized;
 
 #if defined(CONFIG_DEBUG_FS)
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_tv.c b/drivers/gpu/drm/radeon/radeon_legacy_tv.c
index 4278272..3dae2c4 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_tv.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_tv.c
@@ -421,24 +421,14 @@ static void radeon_legacy_write_tv_restarts(struct radeon_encoder *radeon_encode
 
 static bool radeon_legacy_tv_init_restarts(struct drm_encoder *encoder)
 {
-	struct drm_device *dev = encoder->dev;
-	struct radeon_device *rdev = dev->dev_private;
 	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
 	struct radeon_encoder_tv_dac *tv_dac = radeon_encoder->enc_priv;
-	struct radeon_crtc *radeon_crtc;
 	int restart;
 	unsigned int h_total, v_total, f_total;
 	int v_offset, h_offset;
 	u16 p1, p2, h_inc;
 	bool h_changed;
 	const struct radeon_tv_mode_constants *const_ptr;
-	struct radeon_pll *pll;
-
-	radeon_crtc = to_radeon_crtc(radeon_encoder->base.crtc);
-	if (radeon_crtc->crtc_id == 1)
-		pll = &rdev->clock.p2pll;
-	else
-		pll = &rdev->clock.p1pll;
 
 	const_ptr = radeon_legacy_tv_get_std_mode(radeon_encoder, NULL);
 	if (!const_ptr)
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 92f6d40..833e909 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -314,11 +314,9 @@ struct radeon_bo *radeon_bo_ref(struct radeon_bo *bo)
 void radeon_bo_unref(struct radeon_bo **bo)
 {
 	struct ttm_buffer_object *tbo;
-	struct radeon_device *rdev;
 
 	if ((*bo) == NULL)
 		return;
-	rdev = (*bo)->rdev;
 	tbo = &((*bo)->tbo);
 	ttm_bo_put(tbo);
 	*bo = NULL;
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index cbb67e9..9920a6f 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -60,65 +60,6 @@ static struct radeon_device *radeon_get_rdev(struct ttm_bo_device *bdev)
 	return rdev;
 }
 
-
-/*
- * Global memory.
- */
-static int radeon_ttm_mem_global_init(struct drm_global_reference *ref)
-{
-	return ttm_mem_global_init(ref->object);
-}
-
-static void radeon_ttm_mem_global_release(struct drm_global_reference *ref)
-{
-	ttm_mem_global_release(ref->object);
-}
-
-static int radeon_ttm_global_init(struct radeon_device *rdev)
-{
-	struct drm_global_reference *global_ref;
-	int r;
-
-	rdev->mman.mem_global_referenced = false;
-	global_ref = &rdev->mman.mem_global_ref;
-	global_ref->global_type = DRM_GLOBAL_TTM_MEM;
-	global_ref->size = sizeof(struct ttm_mem_global);
-	global_ref->init = &radeon_ttm_mem_global_init;
-	global_ref->release = &radeon_ttm_mem_global_release;
-	r = drm_global_item_ref(global_ref);
-	if (r != 0) {
-		DRM_ERROR("Failed setting up TTM memory accounting "
-			  "subsystem.\n");
-		return r;
-	}
-
-	rdev->mman.bo_global_ref.mem_glob =
-		rdev->mman.mem_global_ref.object;
-	global_ref = &rdev->mman.bo_global_ref.ref;
-	global_ref->global_type = DRM_GLOBAL_TTM_BO;
-	global_ref->size = sizeof(struct ttm_bo_global);
-	global_ref->init = &ttm_bo_global_init;
-	global_ref->release = &ttm_bo_global_release;
-	r = drm_global_item_ref(global_ref);
-	if (r != 0) {
-		DRM_ERROR("Failed setting up TTM BO subsystem.\n");
-		drm_global_item_unref(&rdev->mman.mem_global_ref);
-		return r;
-	}
-
-	rdev->mman.mem_global_referenced = true;
-	return 0;
-}
-
-static void radeon_ttm_global_fini(struct radeon_device *rdev)
-{
-	if (rdev->mman.mem_global_referenced) {
-		drm_global_item_unref(&rdev->mman.bo_global_ref.ref);
-		drm_global_item_unref(&rdev->mman.mem_global_ref);
-		rdev->mman.mem_global_referenced = false;
-	}
-}
-
 static int radeon_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
 {
 	return 0;
@@ -847,13 +788,8 @@ int radeon_ttm_init(struct radeon_device *rdev)
 {
 	int r;
 
-	r = radeon_ttm_global_init(rdev);
-	if (r) {
-		return r;
-	}
 	/* No others user of address space so set it to 0 */
 	r = ttm_bo_device_init(&rdev->mman.bdev,
-			       rdev->mman.bo_global_ref.ref.object,
 			       &radeon_bo_driver,
 			       rdev->ddev->anon_inode->i_mapping,
 			       DRM_FILE_PAGE_OFFSET,
@@ -925,7 +861,6 @@ void radeon_ttm_fini(struct radeon_device *rdev)
 	ttm_bo_clean_mm(&rdev->mman.bdev, TTM_PL_TT);
 	ttm_bo_device_release(&rdev->mman.bdev);
 	radeon_gart_fini(rdev);
-	radeon_ttm_global_fini(rdev);
 	rdev->mman.initialized = false;
 	DRM_INFO("radeon: ttm finalized\n");
 }
diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c
index 3e22a54..4463d38 100644
--- a/drivers/gpu/drm/scheduler/sched_entity.c
+++ b/drivers/gpu/drm/scheduler/sched_entity.c
@@ -130,7 +130,14 @@ drm_sched_entity_get_free_sched(struct drm_sched_entity *entity)
 	int i;
 
 	for (i = 0; i < entity->num_rq_list; ++i) {
-		num_jobs = atomic_read(&entity->rq_list[i]->sched->num_jobs);
+		struct drm_gpu_scheduler *sched = entity->rq_list[i]->sched;
+
+		if (!entity->rq_list[i]->sched->ready) {
+			DRM_WARN("sched%s is not ready, skipping", sched->name);
+			continue;
+		}
+
+		num_jobs = atomic_read(&sched->num_jobs);
 		if (num_jobs < min_jobs) {
 			min_jobs = num_jobs;
 			rq = entity->rq_list[i];
@@ -204,7 +211,6 @@ static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f,
 
 	drm_sched_fence_finished(job->s_fence);
 	WARN_ON(job->s_fence->parent);
-	dma_fence_put(&job->s_fence->finished);
 	job->sched->ops->free_job(job);
 }
 
diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
index 44fe587..18ebbb0 100644
--- a/drivers/gpu/drm/scheduler/sched_main.c
+++ b/drivers/gpu/drm/scheduler/sched_main.c
@@ -196,6 +196,19 @@ static void drm_sched_start_timeout(struct drm_gpu_scheduler *sched)
 		schedule_delayed_work(&sched->work_tdr, sched->timeout);
 }
 
+/**
+ * drm_sched_fault - immediately start timeout handler
+ *
+ * @sched: scheduler where the timeout handling should be started.
+ *
+ * Start timeout handling immediately when the driver detects a hardware fault.
+ */
+void drm_sched_fault(struct drm_gpu_scheduler *sched)
+{
+	mod_delayed_work(system_wq, &sched->work_tdr, 0);
+}
+EXPORT_SYMBOL(drm_sched_fault);
+
 /* job_finish is called after hw fence signaled
  */
 static void drm_sched_job_finish(struct work_struct *work)
@@ -220,7 +233,6 @@ static void drm_sched_job_finish(struct work_struct *work)
 	drm_sched_start_timeout(sched);
 	spin_unlock(&sched->job_list_lock);
 
-	dma_fence_put(&s_job->s_fence->finished);
 	sched->ops->free_job(s_job);
 }
 
@@ -283,6 +295,7 @@ static void drm_sched_job_timedout(struct work_struct *work)
 already_signaled:
 		;
 	}
+	drm_sched_start_timeout(sched);
 	spin_unlock(&sched->job_list_lock);
 }
 
@@ -406,6 +419,9 @@ int drm_sched_job_init(struct drm_sched_job *job,
 	struct drm_gpu_scheduler *sched;
 
 	drm_sched_entity_select_rq(entity);
+	if (!entity->rq)
+		return -ENOENT;
+
 	sched = entity->rq->sched;
 
 	job->sched = sched;
@@ -424,6 +440,18 @@ int drm_sched_job_init(struct drm_sched_job *job,
 EXPORT_SYMBOL(drm_sched_job_init);
 
 /**
+ * drm_sched_job_cleanup - clean up scheduler job resources
+ *
+ * @job: scheduler job to clean up
+ */
+void drm_sched_job_cleanup(struct drm_sched_job *job)
+{
+	dma_fence_put(&job->s_fence->finished);
+	job->s_fence = NULL;
+}
+EXPORT_SYMBOL(drm_sched_job_cleanup);
+
+/**
  * drm_sched_ready - is the scheduler ready
  *
  * @sched: scheduler instance
@@ -619,6 +647,7 @@ int drm_sched_init(struct drm_gpu_scheduler *sched,
 		return PTR_ERR(sched->thread);
 	}
 
+	sched->ready = true;
 	return 0;
 }
 EXPORT_SYMBOL(drm_sched_init);
@@ -634,5 +663,7 @@ void drm_sched_fini(struct drm_gpu_scheduler *sched)
 {
 	if (sched->thread)
 		kthread_stop(sched->thread);
+
+	sched->ready = false;
 }
 EXPORT_SYMBOL(drm_sched_fini);
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 83b4657..d87935b 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -45,6 +45,14 @@
 
 static void ttm_bo_global_kobj_release(struct kobject *kobj);
 
+/**
+ * ttm_global_mutex - protecting the global BO state
+ */
+DEFINE_MUTEX(ttm_global_mutex);
+struct ttm_bo_global ttm_bo_glob = {
+	.use_count = 0
+};
+
 static struct attribute ttm_bo_count = {
 	.name = "bo_count",
 	.mode = S_IRUGO
@@ -1519,35 +1527,45 @@ static void ttm_bo_global_kobj_release(struct kobject *kobj)
 		container_of(kobj, struct ttm_bo_global, kobj);
 
 	__free_page(glob->dummy_read_page);
-	kfree(glob);
 }
 
-void ttm_bo_global_release(struct drm_global_reference *ref)
+static void ttm_bo_global_release(void)
 {
-	struct ttm_bo_global *glob = ref->object;
+	struct ttm_bo_global *glob = &ttm_bo_glob;
+
+	mutex_lock(&ttm_global_mutex);
+	if (--glob->use_count > 0)
+		goto out;
 
 	kobject_del(&glob->kobj);
 	kobject_put(&glob->kobj);
+	ttm_mem_global_release(&ttm_mem_glob);
+out:
+	mutex_unlock(&ttm_global_mutex);
 }
-EXPORT_SYMBOL(ttm_bo_global_release);
 
-int ttm_bo_global_init(struct drm_global_reference *ref)
+static int ttm_bo_global_init(void)
 {
-	struct ttm_bo_global_ref *bo_ref =
-		container_of(ref, struct ttm_bo_global_ref, ref);
-	struct ttm_bo_global *glob = ref->object;
-	int ret;
+	struct ttm_bo_global *glob = &ttm_bo_glob;
+	int ret = 0;
 	unsigned i;
 
-	mutex_init(&glob->device_list_mutex);
+	mutex_lock(&ttm_global_mutex);
+	if (++glob->use_count > 1)
+		goto out;
+
+	ret = ttm_mem_global_init(&ttm_mem_glob);
+	if (ret)
+		goto out;
+
 	spin_lock_init(&glob->lru_lock);
-	glob->mem_glob = bo_ref->mem_glob;
+	glob->mem_glob = &ttm_mem_glob;
 	glob->mem_glob->bo_glob = glob;
 	glob->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32);
 
 	if (unlikely(glob->dummy_read_page == NULL)) {
 		ret = -ENOMEM;
-		goto out_no_drp;
+		goto out;
 	}
 
 	for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i)
@@ -1559,13 +1577,10 @@ int ttm_bo_global_init(struct drm_global_reference *ref)
 		&glob->kobj, &ttm_bo_glob_kobj_type, ttm_get_kobj(), "buffer_objects");
 	if (unlikely(ret != 0))
 		kobject_put(&glob->kobj);
-	return ret;
-out_no_drp:
-	kfree(glob);
+out:
+	mutex_unlock(&ttm_global_mutex);
 	return ret;
 }
-EXPORT_SYMBOL(ttm_bo_global_init);
-
 
 int ttm_bo_device_release(struct ttm_bo_device *bdev)
 {
@@ -1587,9 +1602,9 @@ int ttm_bo_device_release(struct ttm_bo_device *bdev)
 		}
 	}
 
-	mutex_lock(&glob->device_list_mutex);
+	mutex_lock(&ttm_global_mutex);
 	list_del(&bdev->device_list);
-	mutex_unlock(&glob->device_list_mutex);
+	mutex_unlock(&ttm_global_mutex);
 
 	cancel_delayed_work_sync(&bdev->wq);
 
@@ -1604,18 +1619,25 @@ int ttm_bo_device_release(struct ttm_bo_device *bdev)
 
 	drm_vma_offset_manager_destroy(&bdev->vma_manager);
 
+	if (!ret)
+		ttm_bo_global_release();
+
 	return ret;
 }
 EXPORT_SYMBOL(ttm_bo_device_release);
 
 int ttm_bo_device_init(struct ttm_bo_device *bdev,
-		       struct ttm_bo_global *glob,
 		       struct ttm_bo_driver *driver,
 		       struct address_space *mapping,
 		       uint64_t file_page_offset,
 		       bool need_dma32)
 {
-	int ret = -EINVAL;
+	struct ttm_bo_global *glob = &ttm_bo_glob;
+	int ret;
+
+	ret = ttm_bo_global_init();
+	if (ret)
+		return ret;
 
 	bdev->driver = driver;
 
@@ -1636,12 +1658,13 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev,
 	bdev->dev_mapping = mapping;
 	bdev->glob = glob;
 	bdev->need_dma32 = need_dma32;
-	mutex_lock(&glob->device_list_mutex);
+	mutex_lock(&ttm_global_mutex);
 	list_add_tail(&bdev->device_list, &glob->device_list);
-	mutex_unlock(&glob->device_list_mutex);
+	mutex_unlock(&ttm_global_mutex);
 
 	return 0;
 out_no_sys:
+	ttm_bo_global_release();
 	return ret;
 }
 EXPORT_SYMBOL(ttm_bo_device_init);
diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
index e493edb..efa005a 100644
--- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c
+++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
@@ -187,14 +187,12 @@ void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
 	struct ttm_buffer_object *bo;
 	struct ttm_bo_global *glob;
 	struct ttm_bo_device *bdev;
-	struct ttm_bo_driver *driver;
 
 	if (list_empty(list))
 		return;
 
 	bo = list_first_entry(list, struct ttm_validate_buffer, head)->bo;
 	bdev = bo->bdev;
-	driver = bdev->driver;
 	glob = bo->bdev->glob;
 
 	spin_lock(&glob->lru_lock);
diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c
index 450387c..f1567c3 100644
--- a/drivers/gpu/drm/ttm/ttm_memory.c
+++ b/drivers/gpu/drm/ttm/ttm_memory.c
@@ -41,6 +41,9 @@
 
 #define TTM_MEMORY_ALLOC_RETRIES 4
 
+struct ttm_mem_global ttm_mem_glob;
+EXPORT_SYMBOL(ttm_mem_glob);
+
 struct ttm_mem_zone {
 	struct kobject kobj;
 	struct ttm_mem_global *glob;
@@ -216,14 +219,6 @@ static ssize_t ttm_mem_global_store(struct kobject *kobj,
 	return size;
 }
 
-static void ttm_mem_global_kobj_release(struct kobject *kobj)
-{
-	struct ttm_mem_global *glob =
-		container_of(kobj, struct ttm_mem_global, kobj);
-
-	kfree(glob);
-}
-
 static struct attribute *ttm_mem_global_attrs[] = {
 	&ttm_mem_global_lower_mem_limit,
 	NULL
@@ -235,7 +230,6 @@ static const struct sysfs_ops ttm_mem_global_ops = {
 };
 
 static struct kobj_type ttm_mem_glob_kobj_type = {
-	.release = &ttm_mem_global_kobj_release,
 	.sysfs_ops = &ttm_mem_global_ops,
 	.default_attrs = ttm_mem_global_attrs,
 };
@@ -464,7 +458,6 @@ int ttm_mem_global_init(struct ttm_mem_global *glob)
 	ttm_mem_global_release(glob);
 	return ret;
 }
-EXPORT_SYMBOL(ttm_mem_global_init);
 
 void ttm_mem_global_release(struct ttm_mem_global *glob)
 {
@@ -486,7 +479,6 @@ void ttm_mem_global_release(struct ttm_mem_global *glob)
 	kobject_del(&glob->kobj);
 	kobject_put(&glob->kobj);
 }
-EXPORT_SYMBOL(ttm_mem_global_release);
 
 static void ttm_check_swapping(struct ttm_mem_global *glob)
 {
diff --git a/drivers/gpu/drm/v3d/v3d_sched.c b/drivers/gpu/drm/v3d/v3d_sched.c
index 9243dea..445b2ef 100644
--- a/drivers/gpu/drm/v3d/v3d_sched.c
+++ b/drivers/gpu/drm/v3d/v3d_sched.c
@@ -35,6 +35,8 @@ v3d_job_free(struct drm_sched_job *sched_job)
 {
 	struct v3d_job *job = to_v3d_job(sched_job);
 
+	drm_sched_job_cleanup(sched_job);
+
 	v3d_exec_put(job->exec);
 }
 
@@ -167,9 +169,6 @@ v3d_job_timedout(struct drm_sched_job *sched_job)
 	if (job->timedout_ctca != ctca || job->timedout_ctra != ctra) {
 		job->timedout_ctca = ctca;
 		job->timedout_ctra = ctra;
-
-		schedule_delayed_work(&job->base.sched->work_tdr,
-				      job->base.sched->timeout);
 		return;
 	}
 
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h
index 6474e83..2a8aaea 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.h
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.h
@@ -143,9 +143,6 @@ struct virtio_gpu_fbdev {
 };
 
 struct virtio_gpu_mman {
-	struct ttm_bo_global_ref        bo_global_ref;
-	struct drm_global_reference	mem_global_ref;
-	bool				mem_global_referenced;
 	struct ttm_bo_device		bdev;
 };
 
diff --git a/drivers/gpu/drm/virtio/virtgpu_ttm.c b/drivers/gpu/drm/virtio/virtgpu_ttm.c
index cd63dff..4bfbf25fa 100644
--- a/drivers/gpu/drm/virtio/virtgpu_ttm.c
+++ b/drivers/gpu/drm/virtio/virtgpu_ttm.c
@@ -50,62 +50,6 @@ virtio_gpu_device *virtio_gpu_get_vgdev(struct ttm_bo_device *bdev)
 	return vgdev;
 }
 
-static int virtio_gpu_ttm_mem_global_init(struct drm_global_reference *ref)
-{
-	return ttm_mem_global_init(ref->object);
-}
-
-static void virtio_gpu_ttm_mem_global_release(struct drm_global_reference *ref)
-{
-	ttm_mem_global_release(ref->object);
-}
-
-static int virtio_gpu_ttm_global_init(struct virtio_gpu_device *vgdev)
-{
-	struct drm_global_reference *global_ref;
-	int r;
-
-	vgdev->mman.mem_global_referenced = false;
-	global_ref = &vgdev->mman.mem_global_ref;
-	global_ref->global_type = DRM_GLOBAL_TTM_MEM;
-	global_ref->size = sizeof(struct ttm_mem_global);
-	global_ref->init = &virtio_gpu_ttm_mem_global_init;
-	global_ref->release = &virtio_gpu_ttm_mem_global_release;
-
-	r = drm_global_item_ref(global_ref);
-	if (r != 0) {
-		DRM_ERROR("Failed setting up TTM memory accounting "
-			  "subsystem.\n");
-		return r;
-	}
-
-	vgdev->mman.bo_global_ref.mem_glob =
-		vgdev->mman.mem_global_ref.object;
-	global_ref = &vgdev->mman.bo_global_ref.ref;
-	global_ref->global_type = DRM_GLOBAL_TTM_BO;
-	global_ref->size = sizeof(struct ttm_bo_global);
-	global_ref->init = &ttm_bo_global_init;
-	global_ref->release = &ttm_bo_global_release;
-	r = drm_global_item_ref(global_ref);
-	if (r != 0) {
-		DRM_ERROR("Failed setting up TTM BO subsystem.\n");
-		drm_global_item_unref(&vgdev->mman.mem_global_ref);
-		return r;
-	}
-
-	vgdev->mman.mem_global_referenced = true;
-	return 0;
-}
-
-static void virtio_gpu_ttm_global_fini(struct virtio_gpu_device *vgdev)
-{
-	if (vgdev->mman.mem_global_referenced) {
-		drm_global_item_unref(&vgdev->mman.bo_global_ref.ref);
-		drm_global_item_unref(&vgdev->mman.mem_global_ref);
-		vgdev->mman.mem_global_referenced = false;
-	}
-}
-
 int virtio_gpu_mmap(struct file *filp, struct vm_area_struct *vma)
 {
 	struct drm_file *file_priv;
@@ -382,12 +326,8 @@ int virtio_gpu_ttm_init(struct virtio_gpu_device *vgdev)
 {
 	int r;
 
-	r = virtio_gpu_ttm_global_init(vgdev);
-	if (r)
-		return r;
 	/* No others user of address space so set it to 0 */
 	r = ttm_bo_device_init(&vgdev->mman.bdev,
-			       vgdev->mman.bo_global_ref.ref.object,
 			       &virtio_gpu_bo_driver,
 			       vgdev->ddev->anon_inode->i_mapping,
 			       DRM_FILE_PAGE_OFFSET, 0);
@@ -406,13 +346,11 @@ int virtio_gpu_ttm_init(struct virtio_gpu_device *vgdev)
 err_mm_init:
 	ttm_bo_device_release(&vgdev->mman.bdev);
 err_dev_init:
-	virtio_gpu_ttm_global_fini(vgdev);
 	return r;
 }
 
 void virtio_gpu_ttm_fini(struct virtio_gpu_device *vgdev)
 {
 	ttm_bo_device_release(&vgdev->mman.bdev);
-	virtio_gpu_ttm_global_fini(vgdev);
 	DRM_INFO("virtio_gpu: ttm finalized\n");
 }
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 61a84b9..b9c0788 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -801,11 +801,6 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
 	DRM_INFO("MMIO at 0x%08x size is %u kiB\n",
 		 dev_priv->mmio_start, dev_priv->mmio_size / 1024);
 
-	ret = vmw_ttm_global_init(dev_priv);
-	if (unlikely(ret != 0))
-		goto out_err0;
-
-
 	vmw_master_init(&dev_priv->fbdev_master);
 	ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
 	dev_priv->active_master = &dev_priv->fbdev_master;
@@ -816,7 +811,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
 	if (unlikely(dev_priv->mmio_virt == NULL)) {
 		ret = -ENOMEM;
 		DRM_ERROR("Failed mapping MMIO.\n");
-		goto out_err3;
+		goto out_err0;
 	}
 
 	/* Need mmio memory to check for fifo pitchlock cap. */
@@ -828,8 +823,8 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
 		goto out_err4;
 	}
 
-	dev_priv->tdev = ttm_object_device_init
-		(dev_priv->mem_global_ref.object, 12, &vmw_prime_dmabuf_ops);
+	dev_priv->tdev = ttm_object_device_init(&ttm_mem_glob, 12,
+						&vmw_prime_dmabuf_ops);
 
 	if (unlikely(dev_priv->tdev == NULL)) {
 		DRM_ERROR("Unable to initialize TTM object management.\n");
@@ -870,7 +865,6 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
 	}
 
 	ret = ttm_bo_device_init(&dev_priv->bdev,
-				 dev_priv->bo_global_ref.ref.object,
 				 &vmw_bo_driver,
 				 dev->anon_inode->i_mapping,
 				 VMWGFX_FILE_PAGE_OFFSET,
@@ -992,8 +986,6 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
 	ttm_object_device_release(&dev_priv->tdev);
 out_err4:
 	memunmap(dev_priv->mmio_virt);
-out_err3:
-	vmw_ttm_global_release(dev_priv);
 out_err0:
 	for (i = vmw_res_context; i < vmw_res_max; ++i)
 		idr_destroy(&dev_priv->res_idr[i]);
@@ -1045,7 +1037,6 @@ static void vmw_driver_unload(struct drm_device *dev)
 	memunmap(dev_priv->mmio_virt);
 	if (dev_priv->ctx.staged_bindings)
 		vmw_binding_state_free(dev_priv->ctx.staged_bindings);
-	vmw_ttm_global_release(dev_priv);
 
 	for (i = vmw_res_context; i < vmw_res_max; ++i)
 		idr_destroy(&dev_priv->res_idr[i]);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 59f6142..28df788 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -417,8 +417,6 @@ enum {
 
 struct vmw_private {
 	struct ttm_bo_device bdev;
-	struct ttm_bo_global_ref bo_global_ref;
-	struct drm_global_reference mem_global_ref;
 
 	struct vmw_fifo_state fifo;
 
@@ -842,8 +840,6 @@ extern int vmw_fifo_flush(struct vmw_private *dev_priv,
  * TTM glue - vmwgfx_ttm_glue.c
  */
 
-extern int vmw_ttm_global_init(struct vmw_private *dev_priv);
-extern void vmw_ttm_global_release(struct vmw_private *dev_priv);
 extern int vmw_mmap(struct file *filp, struct vm_area_struct *vma);
 
 /**
@@ -1363,7 +1359,7 @@ vmw_bo_reference(struct vmw_buffer_object *buf)
 
 static inline struct ttm_mem_global *vmw_mem_glob(struct vmw_private *dev_priv)
 {
-	return (struct ttm_mem_global *) dev_priv->mem_global_ref.object;
+	return &ttm_mem_glob;
 }
 
 static inline void vmw_fifo_resource_inc(struct vmw_private *dev_priv)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
index 7b1e5a5..154eb09 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
@@ -42,57 +42,3 @@ int vmw_mmap(struct file *filp, struct vm_area_struct *vma)
 	dev_priv = vmw_priv(file_priv->minor->dev);
 	return ttm_bo_mmap(filp, vma, &dev_priv->bdev);
 }
-
-static int vmw_ttm_mem_global_init(struct drm_global_reference *ref)
-{
-	DRM_INFO("global init.\n");
-	return ttm_mem_global_init(ref->object);
-}
-
-static void vmw_ttm_mem_global_release(struct drm_global_reference *ref)
-{
-	ttm_mem_global_release(ref->object);
-}
-
-int vmw_ttm_global_init(struct vmw_private *dev_priv)
-{
-	struct drm_global_reference *global_ref;
-	int ret;
-
-	global_ref = &dev_priv->mem_global_ref;
-	global_ref->global_type = DRM_GLOBAL_TTM_MEM;
-	global_ref->size = sizeof(struct ttm_mem_global);
-	global_ref->init = &vmw_ttm_mem_global_init;
-	global_ref->release = &vmw_ttm_mem_global_release;
-
-	ret = drm_global_item_ref(global_ref);
-	if (unlikely(ret != 0)) {
-		DRM_ERROR("Failed setting up TTM memory accounting.\n");
-		return ret;
-	}
-
-	dev_priv->bo_global_ref.mem_glob =
-		dev_priv->mem_global_ref.object;
-	global_ref = &dev_priv->bo_global_ref.ref;
-	global_ref->global_type = DRM_GLOBAL_TTM_BO;
-	global_ref->size = sizeof(struct ttm_bo_global);
-	global_ref->init = &ttm_bo_global_init;
-	global_ref->release = &ttm_bo_global_release;
-	ret = drm_global_item_ref(global_ref);
-
-	if (unlikely(ret != 0)) {
-		DRM_ERROR("Failed setting up TTM buffer objects.\n");
-		goto out_no_bo;
-	}
-
-	return 0;
-out_no_bo:
-	drm_global_item_unref(&dev_priv->mem_global_ref);
-	return ret;
-}
-
-void vmw_ttm_global_release(struct vmw_private *dev_priv)
-{
-	drm_global_item_unref(&dev_priv->bo_global_ref.ref);
-	drm_global_item_unref(&dev_priv->mem_global_ref);
-}
diff --git a/drivers/staging/vboxvideo/vbox_drv.h b/drivers/staging/vboxvideo/vbox_drv.h
index 73395a75..fa933d4 100644
--- a/drivers/staging/vboxvideo/vbox_drv.h
+++ b/drivers/staging/vboxvideo/vbox_drv.h
@@ -99,8 +99,6 @@ struct vbox_private {
 	int fb_mtrr;
 
 	struct {
-		struct drm_global_reference mem_global_ref;
-		struct ttm_bo_global_ref bo_global_ref;
 		struct ttm_bo_device bdev;
 	} ttm;
 
diff --git a/drivers/staging/vboxvideo/vbox_ttm.c b/drivers/staging/vboxvideo/vbox_ttm.c
index 5ecfa76..b36ec01 100644
--- a/drivers/staging/vboxvideo/vbox_ttm.c
+++ b/drivers/staging/vboxvideo/vbox_ttm.c
@@ -35,61 +35,6 @@ static inline struct vbox_private *vbox_bdev(struct ttm_bo_device *bd)
 	return container_of(bd, struct vbox_private, ttm.bdev);
 }
 
-static int vbox_ttm_mem_global_init(struct drm_global_reference *ref)
-{
-	return ttm_mem_global_init(ref->object);
-}
-
-static void vbox_ttm_mem_global_release(struct drm_global_reference *ref)
-{
-	ttm_mem_global_release(ref->object);
-}
-
-/**
- * Adds the vbox memory manager object/structures to the global memory manager.
- */
-static int vbox_ttm_global_init(struct vbox_private *vbox)
-{
-	struct drm_global_reference *global_ref;
-	int ret;
-
-	global_ref = &vbox->ttm.mem_global_ref;
-	global_ref->global_type = DRM_GLOBAL_TTM_MEM;
-	global_ref->size = sizeof(struct ttm_mem_global);
-	global_ref->init = &vbox_ttm_mem_global_init;
-	global_ref->release = &vbox_ttm_mem_global_release;
-	ret = drm_global_item_ref(global_ref);
-	if (ret) {
-		DRM_ERROR("Failed setting up TTM memory subsystem.\n");
-		return ret;
-	}
-
-	vbox->ttm.bo_global_ref.mem_glob = vbox->ttm.mem_global_ref.object;
-	global_ref = &vbox->ttm.bo_global_ref.ref;
-	global_ref->global_type = DRM_GLOBAL_TTM_BO;
-	global_ref->size = sizeof(struct ttm_bo_global);
-	global_ref->init = &ttm_bo_global_init;
-	global_ref->release = &ttm_bo_global_release;
-
-	ret = drm_global_item_ref(global_ref);
-	if (ret) {
-		DRM_ERROR("Failed setting up TTM BO subsystem.\n");
-		drm_global_item_unref(&vbox->ttm.mem_global_ref);
-		return ret;
-	}
-
-	return 0;
-}
-
-/**
- * Removes the vbox memory manager object from the global memory manager.
- */
-static void vbox_ttm_global_release(struct vbox_private *vbox)
-{
-	drm_global_item_unref(&vbox->ttm.bo_global_ref.ref);
-	drm_global_item_unref(&vbox->ttm.mem_global_ref);
-}
-
 static void vbox_bo_ttm_destroy(struct ttm_buffer_object *tbo)
 {
 	struct vbox_bo *bo;
@@ -227,18 +172,13 @@ int vbox_mm_init(struct vbox_private *vbox)
 	struct drm_device *dev = &vbox->ddev;
 	struct ttm_bo_device *bdev = &vbox->ttm.bdev;
 
-	ret = vbox_ttm_global_init(vbox);
-	if (ret)
-		return ret;
-
 	ret = ttm_bo_device_init(&vbox->ttm.bdev,
-				 vbox->ttm.bo_global_ref.ref.object,
 				 &vbox_bo_driver,
 				 dev->anon_inode->i_mapping,
 				 DRM_FILE_PAGE_OFFSET, true);
 	if (ret) {
 		DRM_ERROR("Error initialising bo driver; %d\n", ret);
-		goto err_ttm_global_release;
+		return ret;
 	}
 
 	ret = ttm_bo_init_mm(bdev, TTM_PL_VRAM,
@@ -260,8 +200,6 @@ int vbox_mm_init(struct vbox_private *vbox)
 
 err_device_release:
 	ttm_bo_device_release(&vbox->ttm.bdev);
-err_ttm_global_release:
-	vbox_ttm_global_release(vbox);
 	return ret;
 }
 
@@ -275,7 +213,6 @@ void vbox_mm_fini(struct vbox_private *vbox)
 	arch_phys_wc_del(vbox->fb_mtrr);
 #endif
 	ttm_bo_device_release(&vbox->ttm.bdev);
-	vbox_ttm_global_release(vbox);
 }
 
 void vbox_ttm_placement(struct vbox_bo *bo, int domain)
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index 514beb2..bdb0d55 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -68,7 +68,6 @@
 #include <drm/drm_agpsupport.h>
 #include <drm/drm_crtc.h>
 #include <drm/drm_fourcc.h>
-#include <drm/drm_global.h>
 #include <drm/drm_hashtab.h>
 #include <drm/drm_mm.h>
 #include <drm/drm_os_linux.h>
diff --git a/include/drm/drm_global.h b/include/drm/drm_global.h
deleted file mode 100644
index 3a83060..0000000
--- a/include/drm/drm_global.h
+++ /dev/null
@@ -1,53 +0,0 @@
-/**************************************************************************
- *
- * Copyright 2008-2009 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- **************************************************************************/
-/*
- * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
- */
-
-#ifndef _DRM_GLOBAL_H_
-#define _DRM_GLOBAL_H_
-enum drm_global_types {
-	DRM_GLOBAL_TTM_MEM = 0,
-	DRM_GLOBAL_TTM_BO,
-	DRM_GLOBAL_TTM_OBJECT,
-	DRM_GLOBAL_NUM
-};
-
-struct drm_global_reference {
-	enum drm_global_types global_type;
-	size_t size;
-	void *object;
-	int (*init) (struct drm_global_reference *);
-	void (*release) (struct drm_global_reference *);
-};
-
-void drm_global_init(void);
-void drm_global_release(void);
-int drm_global_item_ref(struct drm_global_reference *ref);
-void drm_global_item_unref(struct drm_global_reference *ref);
-
-#endif
diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
index d87b268..926379d 100644
--- a/include/drm/gpu_scheduler.h
+++ b/include/drm/gpu_scheduler.h
@@ -264,6 +264,7 @@ struct drm_sched_backend_ops {
  * @hang_limit: once the hangs by a job crosses this limit then it is marked
  *              guilty and it will be considered for scheduling further.
  * @num_jobs: the number of jobs in queue in the scheduler
+ * @ready: marks if the underlying HW is ready to work
  *
  * One scheduler is implemented for each hardware ring.
  */
@@ -283,22 +284,26 @@ struct drm_gpu_scheduler {
 	spinlock_t			job_list_lock;
 	int				hang_limit;
 	atomic_t                        num_jobs;
+	bool			ready;
 };
 
 int drm_sched_init(struct drm_gpu_scheduler *sched,
 		   const struct drm_sched_backend_ops *ops,
 		   uint32_t hw_submission, unsigned hang_limit, long timeout,
 		   const char *name);
+
 void drm_sched_fini(struct drm_gpu_scheduler *sched);
 int drm_sched_job_init(struct drm_sched_job *job,
 		       struct drm_sched_entity *entity,
 		       void *owner);
+void drm_sched_job_cleanup(struct drm_sched_job *job);
 void drm_sched_wakeup(struct drm_gpu_scheduler *sched);
 void drm_sched_hw_job_reset(struct drm_gpu_scheduler *sched,
 			    struct drm_sched_job *job);
 void drm_sched_job_recovery(struct drm_gpu_scheduler *sched);
 bool drm_sched_dependency_optimized(struct dma_fence* fence,
 				    struct drm_sched_entity *entity);
+void drm_sched_fault(struct drm_gpu_scheduler *sched);
 void drm_sched_job_kickout(struct drm_sched_job *s_job);
 
 void drm_sched_rq_add_entity(struct drm_sched_rq *rq,
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
index e4fee8e..1021106 100644
--- a/include/drm/ttm/ttm_bo_driver.h
+++ b/include/drm/ttm/ttm_bo_driver.h
@@ -31,7 +31,6 @@
 #define _TTM_BO_DRIVER_H_
 
 #include <drm/drm_mm.h>
-#include <drm/drm_global.h>
 #include <drm/drm_vma_manager.h>
 #include <linux/workqueue.h>
 #include <linux/fs.h>
@@ -385,15 +384,6 @@ struct ttm_bo_driver {
 };
 
 /**
- * struct ttm_bo_global_ref - Argument to initialize a struct ttm_bo_global.
- */
-
-struct ttm_bo_global_ref {
-	struct drm_global_reference ref;
-	struct ttm_mem_global *mem_glob;
-};
-
-/**
  * struct ttm_bo_global - Buffer object driver global data.
  *
  * @mem_glob: Pointer to a struct ttm_mem_global object for accounting.
@@ -407,7 +397,7 @@ struct ttm_bo_global_ref {
  * @swap_lru: Lru list of buffer objects used for swapping.
  */
 
-struct ttm_bo_global {
+extern struct ttm_bo_global {
 
 	/**
 	 * Constant after init.
@@ -416,12 +406,12 @@ struct ttm_bo_global {
 	struct kobject kobj;
 	struct ttm_mem_global *mem_glob;
 	struct page *dummy_read_page;
-	struct mutex device_list_mutex;
 	spinlock_t lru_lock;
 
 	/**
-	 * Protected by device_list_mutex.
+	 * Protected by ttm_global_mutex.
 	 */
+	unsigned int use_count;
 	struct list_head device_list;
 
 	/**
@@ -433,7 +423,7 @@ struct ttm_bo_global {
 	 * Internal protection.
 	 */
 	atomic_t bo_count;
-};
+} ttm_bo_glob;
 
 
 #define TTM_NUM_MEM_TYPES 8
@@ -578,9 +568,6 @@ void ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem);
 void ttm_bo_mem_put_locked(struct ttm_buffer_object *bo,
 			   struct ttm_mem_reg *mem);
 
-void ttm_bo_global_release(struct drm_global_reference *ref);
-int ttm_bo_global_init(struct drm_global_reference *ref);
-
 int ttm_bo_device_release(struct ttm_bo_device *bdev);
 
 /**
@@ -598,7 +585,7 @@ int ttm_bo_device_release(struct ttm_bo_device *bdev);
  * Returns:
  * !0: Failure.
  */
-int ttm_bo_device_init(struct ttm_bo_device *bdev, struct ttm_bo_global *glob,
+int ttm_bo_device_init(struct ttm_bo_device *bdev,
 		       struct ttm_bo_driver *driver,
 		       struct address_space *mapping,
 		       uint64_t file_page_offset, bool need_dma32);
diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h
index 737b5fe..3ff48a0 100644
--- a/include/drm/ttm/ttm_memory.h
+++ b/include/drm/ttm/ttm_memory.h
@@ -63,7 +63,7 @@
 
 #define TTM_MEM_MAX_ZONES 2
 struct ttm_mem_zone;
-struct ttm_mem_global {
+extern struct ttm_mem_global {
 	struct kobject kobj;
 	struct ttm_bo_global *bo_glob;
 	struct workqueue_struct *swap_queue;
@@ -78,7 +78,7 @@ struct ttm_mem_global {
 #else
 	struct ttm_mem_zone *zone_dma32;
 #endif
-};
+} ttm_mem_glob;
 
 extern int ttm_mem_global_init(struct ttm_mem_global *glob);
 extern void ttm_mem_global_release(struct ttm_mem_global *glob);
diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h
index 370e9a5..be84e43 100644
--- a/include/uapi/drm/amdgpu_drm.h
+++ b/include/uapi/drm/amdgpu_drm.h
@@ -326,6 +326,12 @@ struct drm_amdgpu_gem_userptr {
 /* GFX9 and later: */
 #define AMDGPU_TILING_SWIZZLE_MODE_SHIFT		0
 #define AMDGPU_TILING_SWIZZLE_MODE_MASK			0x1f
+#define AMDGPU_TILING_DCC_OFFSET_256B_SHIFT		5
+#define AMDGPU_TILING_DCC_OFFSET_256B_MASK		0xFFFFFF
+#define AMDGPU_TILING_DCC_PITCH_MAX_SHIFT		29
+#define AMDGPU_TILING_DCC_PITCH_MAX_MASK		0x3FFF
+#define AMDGPU_TILING_DCC_INDEPENDENT_64B_SHIFT		43
+#define AMDGPU_TILING_DCC_INDEPENDENT_64B_MASK		0x1
 
 /* Set/Get helpers for tiling flags. */
 #define AMDGPU_TILING_SET(field, value) \