Merge tag 'vmwgfx-next-2018-12-05' of git://people.freedesktop.org/~thomash/linux into drm-next

Pull request of 2018-12-05

Page flip with damage by Deepak and others,
Various vmwgfx minor fixes anc cleanups.

Signed-off-by: Dave Airlie <airlied@redhat.com>
From: Thomas Hellstrom <thellstrom@vmware.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20181205103554.3675-1-thellstrom@vmware.com
diff --git a/Documentation/gpu/drm-kms-helpers.rst b/Documentation/gpu/drm-kms-helpers.rst
index 4b4dc23..b422eb8 100644
--- a/Documentation/gpu/drm-kms-helpers.rst
+++ b/Documentation/gpu/drm-kms-helpers.rst
@@ -232,6 +232,18 @@
 .. kernel-doc:: drivers/gpu/drm/drm_mipi_dsi.c
    :export:
 
+Display Stream Compression Helper Functions Reference
+=====================================================
+
+.. kernel-doc:: drivers/gpu/drm/drm_dsc.c
+   :doc: dsc helpers
+
+.. kernel-doc:: include/drm/drm_dsc.h
+   :internal:
+
+.. kernel-doc:: drivers/gpu/drm/drm_dsc.c
+   :export:
+
 Output Probing Helper Functions Reference
 =========================================
 
diff --git a/Documentation/gpu/drm-kms.rst b/Documentation/gpu/drm-kms.rst
index 6c3e89e..75c882e 100644
--- a/Documentation/gpu/drm-kms.rst
+++ b/Documentation/gpu/drm-kms.rst
@@ -587,6 +587,13 @@
 .. kernel-doc:: drivers/gpu/drm/drm_atomic_uapi.c
    :doc: explicit fencing properties
 
+
+Variable Refresh Properties
+---------------------------
+
+.. kernel-doc:: drivers/gpu/drm/drm_connector.c
+   :doc: Variable refresh properties
+
 Existing KMS Properties
 -----------------------
 
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index e2336cd..ce8d1d3 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -32,7 +32,7 @@
 drm-$(CONFIG_DEBUG_FS) += drm_debugfs.o drm_debugfs_crc.o
 drm-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o
 
-drm_kms_helper-y := drm_crtc_helper.o drm_dp_helper.o drm_probe_helper.o \
+drm_kms_helper-y := drm_crtc_helper.o drm_dp_helper.o drm_dsc.o drm_probe_helper.o \
 		drm_plane_helper.o drm_dp_mst_topology.o drm_atomic_helper.o \
 		drm_kms_helper_common.o drm_dp_dual_mode_helper.o \
 		drm_simple_kms_helper.o drm_modeset_helper.o \
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 42f882c..c8ad6bf 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -81,6 +81,7 @@
 #include "amdgpu_job.h"
 #include "amdgpu_bo_list.h"
 #include "amdgpu_gem.h"
+#include "amdgpu_doorbell.h"
 
 #define MAX_GPU_INSTANCE		16
 
@@ -361,123 +362,6 @@ int amdgpu_fence_slab_init(void);
 void amdgpu_fence_slab_fini(void);
 
 /*
- * GPU doorbell structures, functions & helpers
- */
-typedef enum _AMDGPU_DOORBELL_ASSIGNMENT
-{
-	AMDGPU_DOORBELL_KIQ                     = 0x000,
-	AMDGPU_DOORBELL_HIQ                     = 0x001,
-	AMDGPU_DOORBELL_DIQ                     = 0x002,
-	AMDGPU_DOORBELL_MEC_RING0               = 0x010,
-	AMDGPU_DOORBELL_MEC_RING1               = 0x011,
-	AMDGPU_DOORBELL_MEC_RING2               = 0x012,
-	AMDGPU_DOORBELL_MEC_RING3               = 0x013,
-	AMDGPU_DOORBELL_MEC_RING4               = 0x014,
-	AMDGPU_DOORBELL_MEC_RING5               = 0x015,
-	AMDGPU_DOORBELL_MEC_RING6               = 0x016,
-	AMDGPU_DOORBELL_MEC_RING7               = 0x017,
-	AMDGPU_DOORBELL_GFX_RING0               = 0x020,
-	AMDGPU_DOORBELL_sDMA_ENGINE0            = 0x1E0,
-	AMDGPU_DOORBELL_sDMA_ENGINE1            = 0x1E1,
-	AMDGPU_DOORBELL_IH                      = 0x1E8,
-	AMDGPU_DOORBELL_MAX_ASSIGNMENT          = 0x3FF,
-	AMDGPU_DOORBELL_INVALID                 = 0xFFFF
-} AMDGPU_DOORBELL_ASSIGNMENT;
-
-struct amdgpu_doorbell {
-	/* doorbell mmio */
-	resource_size_t		base;
-	resource_size_t		size;
-	u32 __iomem		*ptr;
-	u32			num_doorbells;	/* Number of doorbells actually reserved for amdgpu. */
-};
-
-/*
- * 64bit doorbell, offset are in QWORD, occupy 2KB doorbell space
- */
-typedef enum _AMDGPU_DOORBELL64_ASSIGNMENT
-{
-	/*
-	 * All compute related doorbells: kiq, hiq, diq, traditional compute queue, user queue, should locate in
-	 * a continues range so that programming CP_MEC_DOORBELL_RANGE_LOWER/UPPER can cover this range.
-	 *  Compute related doorbells are allocated from 0x00 to 0x8a
-	 */
-
-
-	/* kernel scheduling */
-	AMDGPU_DOORBELL64_KIQ                     = 0x00,
-
-	/* HSA interface queue and debug queue */
-	AMDGPU_DOORBELL64_HIQ                     = 0x01,
-	AMDGPU_DOORBELL64_DIQ                     = 0x02,
-
-	/* Compute engines */
-	AMDGPU_DOORBELL64_MEC_RING0               = 0x03,
-	AMDGPU_DOORBELL64_MEC_RING1               = 0x04,
-	AMDGPU_DOORBELL64_MEC_RING2               = 0x05,
-	AMDGPU_DOORBELL64_MEC_RING3               = 0x06,
-	AMDGPU_DOORBELL64_MEC_RING4               = 0x07,
-	AMDGPU_DOORBELL64_MEC_RING5               = 0x08,
-	AMDGPU_DOORBELL64_MEC_RING6               = 0x09,
-	AMDGPU_DOORBELL64_MEC_RING7               = 0x0a,
-
-	/* User queue doorbell range (128 doorbells) */
-	AMDGPU_DOORBELL64_USERQUEUE_START         = 0x0b,
-	AMDGPU_DOORBELL64_USERQUEUE_END           = 0x8a,
-
-	/* Graphics engine */
-	AMDGPU_DOORBELL64_GFX_RING0               = 0x8b,
-
-	/*
-	 * Other graphics doorbells can be allocated here: from 0x8c to 0xdf
-	 * Graphics voltage island aperture 1
-	 * default non-graphics QWORD index is 0xe0 - 0xFF inclusive
-	 */
-
-	/* sDMA engines  reserved from 0xe0 -0xef  */
-	AMDGPU_DOORBELL64_sDMA_ENGINE0            = 0xE0,
-	AMDGPU_DOORBELL64_sDMA_HI_PRI_ENGINE0     = 0xE1,
-	AMDGPU_DOORBELL64_sDMA_ENGINE1            = 0xE8,
-	AMDGPU_DOORBELL64_sDMA_HI_PRI_ENGINE1     = 0xE9,
-
-	/* For vega10 sriov, the sdma doorbell must be fixed as follow
-	 * to keep the same setting with host driver, or it will
-	 * happen conflicts
-	 */
-	AMDGPU_VEGA10_DOORBELL64_sDMA_ENGINE0            = 0xF0,
-	AMDGPU_VEGA10_DOORBELL64_sDMA_HI_PRI_ENGINE0     = 0xF1,
-	AMDGPU_VEGA10_DOORBELL64_sDMA_ENGINE1            = 0xF2,
-	AMDGPU_VEGA10_DOORBELL64_sDMA_HI_PRI_ENGINE1     = 0xF3,
-
-	/* Interrupt handler */
-	AMDGPU_DOORBELL64_IH                      = 0xF4,  /* For legacy interrupt ring buffer */
-	AMDGPU_DOORBELL64_IH_RING1                = 0xF5,  /* For page migration request log */
-	AMDGPU_DOORBELL64_IH_RING2                = 0xF6,  /* For page migration translation/invalidation log */
-
-	/* VCN engine use 32 bits doorbell  */
-	AMDGPU_DOORBELL64_VCN0_1                  = 0xF8, /* lower 32 bits for VNC0 and upper 32 bits for VNC1 */
-	AMDGPU_DOORBELL64_VCN2_3                  = 0xF9,
-	AMDGPU_DOORBELL64_VCN4_5                  = 0xFA,
-	AMDGPU_DOORBELL64_VCN6_7                  = 0xFB,
-
-	/* overlap the doorbell assignment with VCN as they are  mutually exclusive
-	 * VCE engine's doorbell is 32 bit and two VCE ring share one QWORD
-	 */
-	AMDGPU_DOORBELL64_UVD_RING0_1             = 0xF8,
-	AMDGPU_DOORBELL64_UVD_RING2_3             = 0xF9,
-	AMDGPU_DOORBELL64_UVD_RING4_5             = 0xFA,
-	AMDGPU_DOORBELL64_UVD_RING6_7             = 0xFB,
-
-	AMDGPU_DOORBELL64_VCE_RING0_1             = 0xFC,
-	AMDGPU_DOORBELL64_VCE_RING2_3             = 0xFD,
-	AMDGPU_DOORBELL64_VCE_RING4_5             = 0xFE,
-	AMDGPU_DOORBELL64_VCE_RING6_7             = 0xFF,
-
-	AMDGPU_DOORBELL64_MAX_ASSIGNMENT          = 0xFF,
-	AMDGPU_DOORBELL64_INVALID                 = 0xFFFF
-} AMDGPU_DOORBELL64_ASSIGNMENT;
-
-/*
  * IRQS.
  */
 
@@ -654,6 +538,8 @@ struct amdgpu_asic_funcs {
 			       struct amdgpu_ring *ring);
 	/* check if the asic needs a full reset of if soft reset will work */
 	bool (*need_full_reset)(struct amdgpu_device *adev);
+	/* initialize doorbell layout for specific asic*/
+	void (*init_doorbell_index)(struct amdgpu_device *adev);
 };
 
 /*
@@ -1023,6 +909,8 @@ struct amdgpu_device {
 	unsigned long last_mm_index;
 	bool                            in_gpu_reset;
 	struct mutex  lock_reset;
+	struct amdgpu_doorbell_index doorbell_index;
+	int asic_reset_res;
 };
 
 static inline struct amdgpu_device *amdgpu_ttm_adev(struct ttm_bo_device *bdev)
@@ -1047,11 +935,6 @@ uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset);
 u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg);
 void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v);
 
-u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index);
-void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v);
-u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index);
-void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v);
-
 bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type);
 bool amdgpu_device_has_dc_support(struct amdgpu_device *adev);
 
@@ -1113,11 +996,6 @@ int emu_soc_asic_init(struct amdgpu_device *adev);
 #define RREG32_IO(reg) amdgpu_io_rreg(adev, (reg))
 #define WREG32_IO(reg, v) amdgpu_io_wreg(adev, (reg), (v))
 
-#define RDOORBELL32(index) amdgpu_mm_rdoorbell(adev, (index))
-#define WDOORBELL32(index, v) amdgpu_mm_wdoorbell(adev, (index), (v))
-#define RDOORBELL64(index) amdgpu_mm_rdoorbell64(adev, (index))
-#define WDOORBELL64(index, v) amdgpu_mm_wdoorbell64(adev, (index), (v))
-
 #define REG_FIELD_SHIFT(reg, field) reg##__##field##__SHIFT
 #define REG_FIELD_MASK(reg, field) reg##__##field##_MASK
 
@@ -1159,6 +1037,7 @@ int emu_soc_asic_init(struct amdgpu_device *adev);
 #define amdgpu_asic_flush_hdp(adev, r) (adev)->asic_funcs->flush_hdp((adev), (r))
 #define amdgpu_asic_invalidate_hdp(adev, r) (adev)->asic_funcs->invalidate_hdp((adev), (r))
 #define amdgpu_asic_need_full_reset(adev) (adev)->asic_funcs->need_full_reset((adev))
+#define amdgpu_asic_init_doorbell_index(adev) (adev)->asic_funcs->init_doorbell_index((adev))
 
 /* Common functions */
 bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev);
@@ -1219,12 +1098,6 @@ void amdgpu_disable_vblank_kms(struct drm_device *dev, unsigned int pipe);
 long amdgpu_kms_compat_ioctl(struct file *filp, unsigned int cmd,
 			     unsigned long arg);
 
-
-/*
- * functions used by amdgpu_xgmi.c
- */
-int amdgpu_xgmi_add_device(struct amdgpu_device *adev);
-
 /*
  * functions used by amdgpu_encoder.c
  */
@@ -1252,6 +1125,9 @@ bool amdgpu_acpi_is_pcie_performance_request_supported(struct amdgpu_device *ade
 int amdgpu_acpi_pcie_performance_request(struct amdgpu_device *adev,
 						u8 perf_req, bool advertise);
 int amdgpu_acpi_pcie_notify_device_ready(struct amdgpu_device *adev);
+
+void amdgpu_acpi_get_backlight_caps(struct amdgpu_device *adev,
+		struct amdgpu_dm_backlight_caps *caps);
 #else
 static inline int amdgpu_acpi_init(struct amdgpu_device *adev) { return 0; }
 static inline void amdgpu_acpi_fini(struct amdgpu_device *adev) { }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
index 7f0afc5..47db659 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
@@ -41,28 +41,21 @@ struct amdgpu_atif_notification_cfg {
 };
 
 struct amdgpu_atif_notifications {
-	bool display_switch;
-	bool expansion_mode_change;
 	bool thermal_state;
 	bool forced_power_state;
 	bool system_power_state;
-	bool display_conf_change;
-	bool px_gfx_switch;
 	bool brightness_change;
 	bool dgpu_display_event;
+	bool gpu_package_power_limit;
 };
 
 struct amdgpu_atif_functions {
 	bool system_params;
 	bool sbios_requests;
-	bool select_active_disp;
-	bool lid_state;
-	bool get_tv_standard;
-	bool set_tv_standard;
-	bool get_panel_expansion_mode;
-	bool set_panel_expansion_mode;
 	bool temperature_change;
-	bool graphics_device_types;
+	bool query_backlight_transfer_characteristics;
+	bool ready_to_undock;
+	bool external_gpu_information;
 };
 
 struct amdgpu_atif {
@@ -72,6 +65,7 @@ struct amdgpu_atif {
 	struct amdgpu_atif_functions functions;
 	struct amdgpu_atif_notification_cfg notification_cfg;
 	struct amdgpu_encoder *encoder_for_bl;
+	struct amdgpu_dm_backlight_caps backlight_caps;
 };
 
 /* Call the ATIF method
@@ -137,15 +131,12 @@ static union acpi_object *amdgpu_atif_call(struct amdgpu_atif *atif,
  */
 static void amdgpu_atif_parse_notification(struct amdgpu_atif_notifications *n, u32 mask)
 {
-	n->display_switch = mask & ATIF_DISPLAY_SWITCH_REQUEST_SUPPORTED;
-	n->expansion_mode_change = mask & ATIF_EXPANSION_MODE_CHANGE_REQUEST_SUPPORTED;
 	n->thermal_state = mask & ATIF_THERMAL_STATE_CHANGE_REQUEST_SUPPORTED;
 	n->forced_power_state = mask & ATIF_FORCED_POWER_STATE_CHANGE_REQUEST_SUPPORTED;
 	n->system_power_state = mask & ATIF_SYSTEM_POWER_SOURCE_CHANGE_REQUEST_SUPPORTED;
-	n->display_conf_change = mask & ATIF_DISPLAY_CONF_CHANGE_REQUEST_SUPPORTED;
-	n->px_gfx_switch = mask & ATIF_PX_GFX_SWITCH_REQUEST_SUPPORTED;
 	n->brightness_change = mask & ATIF_PANEL_BRIGHTNESS_CHANGE_REQUEST_SUPPORTED;
 	n->dgpu_display_event = mask & ATIF_DGPU_DISPLAY_EVENT_SUPPORTED;
+	n->gpu_package_power_limit = mask & ATIF_GPU_PACKAGE_POWER_LIMIT_REQUEST_SUPPORTED;
 }
 
 /**
@@ -162,14 +153,11 @@ static void amdgpu_atif_parse_functions(struct amdgpu_atif_functions *f, u32 mas
 {
 	f->system_params = mask & ATIF_GET_SYSTEM_PARAMETERS_SUPPORTED;
 	f->sbios_requests = mask & ATIF_GET_SYSTEM_BIOS_REQUESTS_SUPPORTED;
-	f->select_active_disp = mask & ATIF_SELECT_ACTIVE_DISPLAYS_SUPPORTED;
-	f->lid_state = mask & ATIF_GET_LID_STATE_SUPPORTED;
-	f->get_tv_standard = mask & ATIF_GET_TV_STANDARD_FROM_CMOS_SUPPORTED;
-	f->set_tv_standard = mask & ATIF_SET_TV_STANDARD_IN_CMOS_SUPPORTED;
-	f->get_panel_expansion_mode = mask & ATIF_GET_PANEL_EXPANSION_MODE_FROM_CMOS_SUPPORTED;
-	f->set_panel_expansion_mode = mask & ATIF_SET_PANEL_EXPANSION_MODE_IN_CMOS_SUPPORTED;
 	f->temperature_change = mask & ATIF_TEMPERATURE_CHANGE_NOTIFICATION_SUPPORTED;
-	f->graphics_device_types = mask & ATIF_GET_GRAPHICS_DEVICE_TYPES_SUPPORTED;
+	f->query_backlight_transfer_characteristics =
+		mask & ATIF_QUERY_BACKLIGHT_TRANSFER_CHARACTERISTICS_SUPPORTED;
+	f->ready_to_undock = mask & ATIF_READY_TO_UNDOCK_NOTIFICATION_SUPPORTED;
+	f->external_gpu_information = mask & ATIF_GET_EXTERNAL_GPU_INFORMATION_SUPPORTED;
 }
 
 /**
@@ -311,6 +299,65 @@ static int amdgpu_atif_get_notification_params(struct amdgpu_atif *atif)
 }
 
 /**
+ * amdgpu_atif_query_backlight_caps - get min and max backlight input signal
+ *
+ * @handle: acpi handle
+ *
+ * Execute the QUERY_BRIGHTNESS_TRANSFER_CHARACTERISTICS ATIF function
+ * to determine the acceptable range of backlight values
+ *
+ * Backlight_caps.caps_valid will be set to true if the query is successful
+ *
+ * The input signals are in range 0-255
+ *
+ * This function assumes the display with backlight is the first LCD
+ *
+ * Returns 0 on success, error on failure.
+ */
+static int amdgpu_atif_query_backlight_caps(struct amdgpu_atif *atif)
+{
+	union acpi_object *info;
+	struct atif_qbtc_output characteristics;
+	struct atif_qbtc_arguments arguments;
+	struct acpi_buffer params;
+	size_t size;
+	int err = 0;
+
+	arguments.size = sizeof(arguments);
+	arguments.requested_display = ATIF_QBTC_REQUEST_LCD1;
+
+	params.length = sizeof(arguments);
+	params.pointer = (void *)&arguments;
+
+	info = amdgpu_atif_call(atif,
+		ATIF_FUNCTION_QUERY_BRIGHTNESS_TRANSFER_CHARACTERISTICS,
+		&params);
+	if (!info) {
+		err = -EIO;
+		goto out;
+	}
+
+	size = *(u16 *) info->buffer.pointer;
+	if (size < 10) {
+		err = -EINVAL;
+		goto out;
+	}
+
+	memset(&characteristics, 0, sizeof(characteristics));
+	size = min(sizeof(characteristics), size);
+	memcpy(&characteristics, info->buffer.pointer, size);
+
+	atif->backlight_caps.caps_valid = true;
+	atif->backlight_caps.min_input_signal =
+			characteristics.min_input_signal;
+	atif->backlight_caps.max_input_signal =
+			characteristics.max_input_signal;
+out:
+	kfree(info);
+	return err;
+}
+
+/**
  * amdgpu_atif_get_sbios_requests - get requested sbios event
  *
  * @handle: acpi handle
@@ -799,6 +846,17 @@ int amdgpu_acpi_init(struct amdgpu_device *adev)
 		}
 	}
 
+	if (atif->functions.query_backlight_transfer_characteristics) {
+		ret = amdgpu_atif_query_backlight_caps(atif);
+		if (ret) {
+			DRM_DEBUG_DRIVER("Call to QUERY_BACKLIGHT_TRANSFER_CHARACTERISTICS failed: %d\n",
+					ret);
+			atif->backlight_caps.caps_valid = false;
+		}
+	} else {
+		atif->backlight_caps.caps_valid = false;
+	}
+
 out:
 	adev->acpi_nb.notifier_call = amdgpu_acpi_event;
 	register_acpi_notifier(&adev->acpi_nb);
@@ -806,6 +864,18 @@ int amdgpu_acpi_init(struct amdgpu_device *adev)
 	return ret;
 }
 
+void amdgpu_acpi_get_backlight_caps(struct amdgpu_device *adev,
+		struct amdgpu_dm_backlight_caps *caps)
+{
+	if (!adev->atif) {
+		caps->caps_valid = false;
+		return;
+	}
+	caps->caps_valid = adev->atif->backlight_caps.caps_valid;
+	caps->min_input_signal = adev->atif->backlight_caps.min_input_signal;
+	caps->max_input_signal = adev->atif->backlight_caps.max_input_signal;
+}
+
 /**
  * amdgpu_acpi_fini - tear down driver acpi support
  *
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index bcf1666..d693b804 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -73,9 +73,11 @@ void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev)
 	case CHIP_FIJI:
 	case CHIP_POLARIS10:
 	case CHIP_POLARIS11:
+	case CHIP_POLARIS12:
 		kfd2kgd = amdgpu_amdkfd_gfx_8_0_get_functions();
 		break;
 	case CHIP_VEGA10:
+	case CHIP_VEGA12:
 	case CHIP_VEGA20:
 	case CHIP_RAVEN:
 		kfd2kgd = amdgpu_amdkfd_gfx_9_0_get_functions();
@@ -179,25 +181,14 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
 			 * process in case of 64-bit doorbells so we
 			 * can use each doorbell assignment twice.
 			 */
-			if (adev->asic_type == CHIP_VEGA10) {
-				gpu_resources.sdma_doorbell[0][i] =
-					AMDGPU_VEGA10_DOORBELL64_sDMA_ENGINE0 + (i >> 1);
-				gpu_resources.sdma_doorbell[0][i+1] =
-					AMDGPU_VEGA10_DOORBELL64_sDMA_ENGINE0 + 0x200 + (i >> 1);
-				gpu_resources.sdma_doorbell[1][i] =
-					AMDGPU_VEGA10_DOORBELL64_sDMA_ENGINE1 + (i >> 1);
-				gpu_resources.sdma_doorbell[1][i+1] =
-					AMDGPU_VEGA10_DOORBELL64_sDMA_ENGINE1 + 0x200 + (i >> 1);
-			} else {
-				gpu_resources.sdma_doorbell[0][i] =
-					AMDGPU_DOORBELL64_sDMA_ENGINE0 + (i >> 1);
-				gpu_resources.sdma_doorbell[0][i+1] =
-					AMDGPU_DOORBELL64_sDMA_ENGINE0 + 0x200 + (i >> 1);
-				gpu_resources.sdma_doorbell[1][i] =
-					AMDGPU_DOORBELL64_sDMA_ENGINE1 + (i >> 1);
-				gpu_resources.sdma_doorbell[1][i+1] =
-					AMDGPU_DOORBELL64_sDMA_ENGINE1 + 0x200 + (i >> 1);
-			}
+			gpu_resources.sdma_doorbell[0][i] =
+				adev->doorbell_index.sdma_engine0 + (i >> 1);
+			gpu_resources.sdma_doorbell[0][i+1] =
+				adev->doorbell_index.sdma_engine0 + 0x200 + (i >> 1);
+			gpu_resources.sdma_doorbell[1][i] =
+				adev->doorbell_index.sdma_engine1 + (i >> 1);
+			gpu_resources.sdma_doorbell[1][i+1] =
+				adev->doorbell_index.sdma_engine1 + 0x200 + (i >> 1);
 		}
 		/* Doorbells 0x0e0-0ff and 0x2e0-2ff are reserved for
 		 * SDMA, IH and VCN. So don't use them for the CP.
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index df0a059..f3129b9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -46,9 +46,9 @@
 /* Impose limit on how much memory KFD can use */
 static struct {
 	uint64_t max_system_mem_limit;
-	uint64_t max_userptr_mem_limit;
+	uint64_t max_ttm_mem_limit;
 	int64_t system_mem_used;
-	int64_t userptr_mem_used;
+	int64_t ttm_mem_used;
 	spinlock_t mem_limit_lock;
 } kfd_mem_limit;
 
@@ -90,8 +90,8 @@ static bool check_if_add_bo_to_vm(struct amdgpu_vm *avm,
 }
 
 /* Set memory usage limits. Current, limits are
- *  System (kernel) memory - 3/8th System RAM
- *  Userptr memory - 3/4th System RAM
+ *  System (TTM + userptr) memory - 3/4th System RAM
+ *  TTM memory - 3/8th System RAM
  */
 void amdgpu_amdkfd_gpuvm_init_mem_limits(void)
 {
@@ -103,48 +103,54 @@ void amdgpu_amdkfd_gpuvm_init_mem_limits(void)
 	mem *= si.mem_unit;
 
 	spin_lock_init(&kfd_mem_limit.mem_limit_lock);
-	kfd_mem_limit.max_system_mem_limit = (mem >> 1) - (mem >> 3);
-	kfd_mem_limit.max_userptr_mem_limit = mem - (mem >> 2);
-	pr_debug("Kernel memory limit %lluM, userptr limit %lluM\n",
+	kfd_mem_limit.max_system_mem_limit = (mem >> 1) + (mem >> 2);
+	kfd_mem_limit.max_ttm_mem_limit = (mem >> 1) - (mem >> 3);
+	pr_debug("Kernel memory limit %lluM, TTM limit %lluM\n",
 		(kfd_mem_limit.max_system_mem_limit >> 20),
-		(kfd_mem_limit.max_userptr_mem_limit >> 20));
+		(kfd_mem_limit.max_ttm_mem_limit >> 20));
 }
 
 static int amdgpu_amdkfd_reserve_system_mem_limit(struct amdgpu_device *adev,
-					      uint64_t size, u32 domain)
+		uint64_t size, u32 domain, bool sg)
 {
-	size_t acc_size;
+	size_t acc_size, system_mem_needed, ttm_mem_needed;
 	int ret = 0;
 
 	acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size,
 				       sizeof(struct amdgpu_bo));
 
 	spin_lock(&kfd_mem_limit.mem_limit_lock);
+
 	if (domain == AMDGPU_GEM_DOMAIN_GTT) {
-		if (kfd_mem_limit.system_mem_used + (acc_size + size) >
-			kfd_mem_limit.max_system_mem_limit) {
-			ret = -ENOMEM;
-			goto err_no_mem;
-		}
-		kfd_mem_limit.system_mem_used += (acc_size + size);
-	} else if (domain == AMDGPU_GEM_DOMAIN_CPU) {
-		if ((kfd_mem_limit.system_mem_used + acc_size >
-			kfd_mem_limit.max_system_mem_limit) ||
-			(kfd_mem_limit.userptr_mem_used + (size + acc_size) >
-			kfd_mem_limit.max_userptr_mem_limit)) {
-			ret = -ENOMEM;
-			goto err_no_mem;
-		}
-		kfd_mem_limit.system_mem_used += acc_size;
-		kfd_mem_limit.userptr_mem_used += size;
+		/* TTM GTT memory */
+		system_mem_needed = acc_size + size;
+		ttm_mem_needed = acc_size + size;
+	} else if (domain == AMDGPU_GEM_DOMAIN_CPU && !sg) {
+		/* Userptr */
+		system_mem_needed = acc_size + size;
+		ttm_mem_needed = acc_size;
+	} else {
+		/* VRAM and SG */
+		system_mem_needed = acc_size;
+		ttm_mem_needed = acc_size;
 	}
-err_no_mem:
+
+	if ((kfd_mem_limit.system_mem_used + system_mem_needed >
+		kfd_mem_limit.max_system_mem_limit) ||
+		(kfd_mem_limit.ttm_mem_used + ttm_mem_needed >
+		kfd_mem_limit.max_ttm_mem_limit))
+		ret = -ENOMEM;
+	else {
+		kfd_mem_limit.system_mem_used += system_mem_needed;
+		kfd_mem_limit.ttm_mem_used += ttm_mem_needed;
+	}
+
 	spin_unlock(&kfd_mem_limit.mem_limit_lock);
 	return ret;
 }
 
 static void unreserve_system_mem_limit(struct amdgpu_device *adev,
-				       uint64_t size, u32 domain)
+		uint64_t size, u32 domain, bool sg)
 {
 	size_t acc_size;
 
@@ -154,14 +160,18 @@ static void unreserve_system_mem_limit(struct amdgpu_device *adev,
 	spin_lock(&kfd_mem_limit.mem_limit_lock);
 	if (domain == AMDGPU_GEM_DOMAIN_GTT) {
 		kfd_mem_limit.system_mem_used -= (acc_size + size);
-	} else if (domain == AMDGPU_GEM_DOMAIN_CPU) {
+		kfd_mem_limit.ttm_mem_used -= (acc_size + size);
+	} else if (domain == AMDGPU_GEM_DOMAIN_CPU && !sg) {
+		kfd_mem_limit.system_mem_used -= (acc_size + size);
+		kfd_mem_limit.ttm_mem_used -= acc_size;
+	} else {
 		kfd_mem_limit.system_mem_used -= acc_size;
-		kfd_mem_limit.userptr_mem_used -= size;
+		kfd_mem_limit.ttm_mem_used -= acc_size;
 	}
 	WARN_ONCE(kfd_mem_limit.system_mem_used < 0,
 		  "kfd system memory accounting unbalanced");
-	WARN_ONCE(kfd_mem_limit.userptr_mem_used < 0,
-		  "kfd userptr memory accounting unbalanced");
+	WARN_ONCE(kfd_mem_limit.ttm_mem_used < 0,
+		  "kfd TTM memory accounting unbalanced");
 
 	spin_unlock(&kfd_mem_limit.mem_limit_lock);
 }
@@ -171,16 +181,22 @@ void amdgpu_amdkfd_unreserve_system_memory_limit(struct amdgpu_bo *bo)
 	spin_lock(&kfd_mem_limit.mem_limit_lock);
 
 	if (bo->flags & AMDGPU_AMDKFD_USERPTR_BO) {
-		kfd_mem_limit.system_mem_used -= bo->tbo.acc_size;
-		kfd_mem_limit.userptr_mem_used -= amdgpu_bo_size(bo);
+		kfd_mem_limit.system_mem_used -=
+			(bo->tbo.acc_size + amdgpu_bo_size(bo));
+		kfd_mem_limit.ttm_mem_used -= bo->tbo.acc_size;
 	} else if (bo->preferred_domains == AMDGPU_GEM_DOMAIN_GTT) {
 		kfd_mem_limit.system_mem_used -=
 			(bo->tbo.acc_size + amdgpu_bo_size(bo));
+		kfd_mem_limit.ttm_mem_used -=
+			(bo->tbo.acc_size + amdgpu_bo_size(bo));
+	} else {
+		kfd_mem_limit.system_mem_used -= bo->tbo.acc_size;
+		kfd_mem_limit.ttm_mem_used -= bo->tbo.acc_size;
 	}
 	WARN_ONCE(kfd_mem_limit.system_mem_used < 0,
 		  "kfd system memory accounting unbalanced");
-	WARN_ONCE(kfd_mem_limit.userptr_mem_used < 0,
-		  "kfd userptr memory accounting unbalanced");
+	WARN_ONCE(kfd_mem_limit.ttm_mem_used < 0,
+		  "kfd TTM memory accounting unbalanced");
 
 	spin_unlock(&kfd_mem_limit.mem_limit_lock);
 }
@@ -395,23 +411,6 @@ static int vm_validate_pt_pd_bos(struct amdgpu_vm *vm)
 	return 0;
 }
 
-static int sync_vm_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
-			 struct dma_fence *f)
-{
-	int ret = amdgpu_sync_fence(adev, sync, f, false);
-
-	/* Sync objects can't handle multiple GPUs (contexts) updating
-	 * sync->last_vm_update. Fortunately we don't need it for
-	 * KFD's purposes, so we can just drop that fence.
-	 */
-	if (sync->last_vm_update) {
-		dma_fence_put(sync->last_vm_update);
-		sync->last_vm_update = NULL;
-	}
-
-	return ret;
-}
-
 static int vm_update_pds(struct amdgpu_vm *vm, struct amdgpu_sync *sync)
 {
 	struct amdgpu_bo *pd = vm->root.base.bo;
@@ -422,7 +421,7 @@ static int vm_update_pds(struct amdgpu_vm *vm, struct amdgpu_sync *sync)
 	if (ret)
 		return ret;
 
-	return sync_vm_fence(adev, sync, vm->last_update);
+	return amdgpu_sync_fence(NULL, sync, vm->last_update, false);
 }
 
 /* add_bo_to_vm - Add a BO to a VM
@@ -826,7 +825,7 @@ static int unmap_bo_from_gpuvm(struct amdgpu_device *adev,
 	/* Add the eviction fence back */
 	amdgpu_bo_fence(pd, &vm->process_info->eviction_fence->base, true);
 
-	sync_vm_fence(adev, sync, bo_va->last_pt_update);
+	amdgpu_sync_fence(NULL, sync, bo_va->last_pt_update, false);
 
 	return 0;
 }
@@ -851,7 +850,7 @@ static int update_gpuvm_pte(struct amdgpu_device *adev,
 		return ret;
 	}
 
-	return sync_vm_fence(adev, sync, bo_va->last_pt_update);
+	return amdgpu_sync_fence(NULL, sync, bo_va->last_pt_update, false);
 }
 
 static int map_bo_to_gpuvm(struct amdgpu_device *adev,
@@ -901,6 +900,26 @@ static int process_validate_vms(struct amdkfd_process_info *process_info)
 	return 0;
 }
 
+static int process_sync_pds_resv(struct amdkfd_process_info *process_info,
+				 struct amdgpu_sync *sync)
+{
+	struct amdgpu_vm *peer_vm;
+	int ret;
+
+	list_for_each_entry(peer_vm, &process_info->vm_list_head,
+			    vm_list_node) {
+		struct amdgpu_bo *pd = peer_vm->root.base.bo;
+
+		ret = amdgpu_sync_resv(NULL,
+					sync, pd->tbo.resv,
+					AMDGPU_FENCE_OWNER_UNDEFINED, false);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
 static int process_update_pds(struct amdkfd_process_info *process_info,
 			      struct amdgpu_sync *sync)
 {
@@ -1199,7 +1218,8 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
 	byte_align = (adev->family == AMDGPU_FAMILY_VI &&
 			adev->asic_type != CHIP_FIJI &&
 			adev->asic_type != CHIP_POLARIS10 &&
-			adev->asic_type != CHIP_POLARIS11) ?
+			adev->asic_type != CHIP_POLARIS11 &&
+			adev->asic_type != CHIP_POLARIS12) ?
 			VI_BO_SIZE_ALIGN : 1;
 
 	mapping_flags = AMDGPU_VM_PAGE_READABLE;
@@ -1215,10 +1235,11 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
 
 	amdgpu_sync_create(&(*mem)->sync);
 
-	ret = amdgpu_amdkfd_reserve_system_mem_limit(adev, size, alloc_domain);
+	ret = amdgpu_amdkfd_reserve_system_mem_limit(adev, size,
+						     alloc_domain, false);
 	if (ret) {
 		pr_debug("Insufficient system memory\n");
-		goto err_reserve_system_mem;
+		goto err_reserve_limit;
 	}
 
 	pr_debug("\tcreate BO VA 0x%llx size 0x%llx domain %s\n",
@@ -1266,10 +1287,10 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
 allocate_init_user_pages_failed:
 	amdgpu_bo_unref(&bo);
 	/* Don't unreserve system mem limit twice */
-	goto err_reserve_system_mem;
+	goto err_reserve_limit;
 err_bo_create:
-	unreserve_system_mem_limit(adev, size, alloc_domain);
-err_reserve_system_mem:
+	unreserve_system_mem_limit(adev, size, alloc_domain, false);
+err_reserve_limit:
 	mutex_destroy(&(*mem)->lock);
 	kfree(*mem);
 	return ret;
@@ -1405,7 +1426,8 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
 	 * the queues are still stopped and we can leave mapping for
 	 * the next restore worker
 	 */
-	if (bo->tbo.mem.mem_type == TTM_PL_SYSTEM)
+	if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) &&
+	    bo->tbo.mem.mem_type == TTM_PL_SYSTEM)
 		is_invalid_userptr = true;
 
 	if (check_if_add_bo_to_vm(avm, mem)) {
@@ -2044,13 +2066,10 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
 	if (ret)
 		goto validate_map_fail;
 
-	/* Wait for PD/PTs validate to finish */
-	/* FIXME: I think this isn't needed */
-	list_for_each_entry(peer_vm, &process_info->vm_list_head,
-			    vm_list_node) {
-		struct amdgpu_bo *bo = peer_vm->root.base.bo;
-
-		ttm_bo_wait(&bo->tbo, false, false);
+	ret = process_sync_pds_resv(process_info, &sync_obj);
+	if (ret) {
+		pr_debug("Memory eviction: Failed to sync to PD BO moving fence. Try again\n");
+		goto validate_map_fail;
 	}
 
 	/* Validate BOs and map them to GPUVM (update VM page tables). */
@@ -2066,7 +2085,11 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
 			pr_debug("Memory eviction: Validate BOs failed. Try again\n");
 			goto validate_map_fail;
 		}
-
+		ret = amdgpu_sync_fence(NULL, &sync_obj, bo->tbo.moving, false);
+		if (ret) {
+			pr_debug("Memory eviction: Sync BO fence failed. Try again\n");
+			goto validate_map_fail;
+		}
 		list_for_each_entry(bo_va_entry, &mem->bo_va_list,
 				    bo_list) {
 			ret = update_gpuvm_pte((struct amdgpu_device *)
@@ -2087,6 +2110,7 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
 		goto validate_map_fail;
 	}
 
+	/* Wait for validate and PT updates to finish */
 	amdgpu_sync_wait(&sync_obj, false);
 
 	/* Release old eviction fence and create new one, because fence only
@@ -2105,10 +2129,7 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
 	process_info->eviction_fence = new_fence;
 	*ef = dma_fence_get(&new_fence->base);
 
-	/* Wait for validate to finish and attach new eviction fence */
-	list_for_each_entry(mem, &process_info->kfd_bo_list,
-		validate_list.head)
-		ttm_bo_wait(&mem->bo->tbo, false, false);
+	/* Attach new eviction fence to all BOs */
 	list_for_each_entry(mem, &process_info->kfd_bo_list,
 		validate_list.head)
 		amdgpu_bo_fence(mem->bo,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c
index 0c590dd..5b55070 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c
@@ -43,7 +43,7 @@ int amdgpu_allocate_static_csa(struct amdgpu_device *adev, struct amdgpu_bo **bo
 	r = amdgpu_bo_create_kernel(adev, size, PAGE_SIZE,
 				domain, bo,
 				NULL, &ptr);
-	if (!bo)
+	if (!*bo)
 		return -ENOMEM;
 
 	memset(ptr, 0, size);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index f9b5423..95f4c41 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -39,6 +39,7 @@ const unsigned int amdgpu_ctx_num_entities[AMDGPU_HW_IP_NUM] = {
 	[AMDGPU_HW_IP_UVD_ENC]	=	1,
 	[AMDGPU_HW_IP_VCN_DEC]	=	1,
 	[AMDGPU_HW_IP_VCN_ENC]	=	1,
+	[AMDGPU_HW_IP_VCN_JPEG]	=	1,
 };
 
 static int amdgput_ctx_total_num_entities(void)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 590588a..c75badf 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -59,6 +59,8 @@
 #include "amdgpu_amdkfd.h"
 #include "amdgpu_pm.h"
 
+#include "amdgpu_xgmi.h"
+
 MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
 MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin");
 MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
@@ -513,6 +515,8 @@ void amdgpu_device_pci_config_reset(struct amdgpu_device *adev)
  */
 static int amdgpu_device_doorbell_init(struct amdgpu_device *adev)
 {
+	amdgpu_asic_init_doorbell_index(adev);
+
 	/* No doorbell on SI hardware generation */
 	if (adev->asic_type < CHIP_BONAIRE) {
 		adev->doorbell.base = 0;
@@ -530,10 +534,19 @@ static int amdgpu_device_doorbell_init(struct amdgpu_device *adev)
 	adev->doorbell.size = pci_resource_len(adev->pdev, 2);
 
 	adev->doorbell.num_doorbells = min_t(u32, adev->doorbell.size / sizeof(u32),
-					     AMDGPU_DOORBELL_MAX_ASSIGNMENT+1);
+					     adev->doorbell_index.max_assignment+1);
 	if (adev->doorbell.num_doorbells == 0)
 		return -EINVAL;
 
+	/* For Vega, reserve and map two pages on doorbell BAR since SDMA
+	 * paging queue doorbell use the second page. The
+	 * AMDGPU_DOORBELL64_MAX_ASSIGNMENT definition assumes all the
+	 * doorbells are in the first page. So with paging queue enabled,
+	 * the max num_doorbells should + 1 page (0x400 in dword)
+	 */
+	if (adev->asic_type >= CHIP_VEGA10)
+		adev->doorbell.num_doorbells += 0x400;
+
 	adev->doorbell.ptr = ioremap(adev->doorbell.base,
 				     adev->doorbell.num_doorbells *
 				     sizeof(u32));
@@ -2458,9 +2471,6 @@ int amdgpu_device_init(struct amdgpu_device *adev,
 	DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
 	DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size);
 
-	/* doorbell bar mapping */
-	amdgpu_device_doorbell_init(adev);
-
 	/* io port mapping */
 	for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
 		if (pci_resource_flags(adev->pdev, i) & IORESOURCE_IO) {
@@ -2479,6 +2489,9 @@ int amdgpu_device_init(struct amdgpu_device *adev,
 	if (r)
 		return r;
 
+	/* doorbell bar mapping and doorbell index init*/
+	amdgpu_device_doorbell_init(adev);
+
 	/* if we have > 1 VGA cards, then disable the amdgpu VGA resources */
 	/* this will fail for cards that aren't VGA class devices, just
 	 * ignore it */
@@ -3151,86 +3164,6 @@ static int amdgpu_device_recover_vram(struct amdgpu_device *adev)
 	return 0;
 }
 
-/**
- * amdgpu_device_reset - reset ASIC/GPU for bare-metal or passthrough
- *
- * @adev: amdgpu device pointer
- *
- * attempt to do soft-reset or full-reset and reinitialize Asic
- * return 0 means succeeded otherwise failed
- */
-static int amdgpu_device_reset(struct amdgpu_device *adev)
-{
-	bool need_full_reset, vram_lost = 0;
-	int r;
-
-	need_full_reset = amdgpu_device_ip_need_full_reset(adev);
-
-	if (!need_full_reset) {
-		amdgpu_device_ip_pre_soft_reset(adev);
-		r = amdgpu_device_ip_soft_reset(adev);
-		amdgpu_device_ip_post_soft_reset(adev);
-		if (r || amdgpu_device_ip_check_soft_reset(adev)) {
-			DRM_INFO("soft reset failed, will fallback to full reset!\n");
-			need_full_reset = true;
-		}
-	}
-
-	if (need_full_reset) {
-		r = amdgpu_device_ip_suspend(adev);
-
-retry:
-		r = amdgpu_asic_reset(adev);
-		/* post card */
-		amdgpu_atom_asic_init(adev->mode_info.atom_context);
-
-		if (!r) {
-			dev_info(adev->dev, "GPU reset succeeded, trying to resume\n");
-			r = amdgpu_device_ip_resume_phase1(adev);
-			if (r)
-				goto out;
-
-			vram_lost = amdgpu_device_check_vram_lost(adev);
-			if (vram_lost) {
-				DRM_ERROR("VRAM is lost!\n");
-				atomic_inc(&adev->vram_lost_counter);
-			}
-
-			r = amdgpu_gtt_mgr_recover(
-				&adev->mman.bdev.man[TTM_PL_TT]);
-			if (r)
-				goto out;
-
-			r = amdgpu_device_fw_loading(adev);
-			if (r)
-				return r;
-
-			r = amdgpu_device_ip_resume_phase2(adev);
-			if (r)
-				goto out;
-
-			if (vram_lost)
-				amdgpu_device_fill_reset_magic(adev);
-		}
-	}
-
-out:
-	if (!r) {
-		amdgpu_irq_gpu_reset_resume_helper(adev);
-		r = amdgpu_ib_ring_tests(adev);
-		if (r) {
-			dev_err(adev->dev, "ib ring test failed (%d).\n", r);
-			r = amdgpu_device_ip_suspend(adev);
-			need_full_reset = true;
-			goto retry;
-		}
-	}
-
-	if (!r)
-		r = amdgpu_device_recover_vram(adev);
-
-	return r;
-}
 
 /**
  * amdgpu_device_reset_sriov - reset ASIC for SR-IOV vf
@@ -3329,31 +3262,13 @@ bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev)
 		return false;
 }
 
-/**
- * amdgpu_device_gpu_recover - reset the asic and recover scheduler
- *
- * @adev: amdgpu device pointer
- * @job: which job trigger hang
- *
- * Attempt to reset the GPU if it has hung (all asics).
- * Returns 0 for success or an error on failure.
- */
-int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
-			      struct amdgpu_job *job)
+
+static int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
+					struct amdgpu_job *job,
+					bool *need_full_reset_arg)
 {
-	int i, r, resched;
-
-	dev_info(adev->dev, "GPU reset begin!\n");
-
-	mutex_lock(&adev->lock_reset);
-	atomic_inc(&adev->gpu_reset_counter);
-	adev->in_gpu_reset = 1;
-
-	/* Block kfd */
-	amdgpu_amdkfd_pre_reset(adev);
-
-	/* block TTM */
-	resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
+	int i, r = 0;
+	bool need_full_reset  = *need_full_reset_arg;
 
 	/* block all schedulers and reset given job's ring */
 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
@@ -3373,10 +3288,123 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
 		amdgpu_fence_driver_force_completion(ring);
 	}
 
-	if (amdgpu_sriov_vf(adev))
-		r = amdgpu_device_reset_sriov(adev, job ? false : true);
-	else
-		r = amdgpu_device_reset(adev);
+
+
+	if (!amdgpu_sriov_vf(adev)) {
+
+		if (!need_full_reset)
+			need_full_reset = amdgpu_device_ip_need_full_reset(adev);
+
+		if (!need_full_reset) {
+			amdgpu_device_ip_pre_soft_reset(adev);
+			r = amdgpu_device_ip_soft_reset(adev);
+			amdgpu_device_ip_post_soft_reset(adev);
+			if (r || amdgpu_device_ip_check_soft_reset(adev)) {
+				DRM_INFO("soft reset failed, will fallback to full reset!\n");
+				need_full_reset = true;
+			}
+		}
+
+		if (need_full_reset)
+			r = amdgpu_device_ip_suspend(adev);
+
+		*need_full_reset_arg = need_full_reset;
+	}
+
+	return r;
+}
+
+static int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive,
+			       struct list_head *device_list_handle,
+			       bool *need_full_reset_arg)
+{
+	struct amdgpu_device *tmp_adev = NULL;
+	bool need_full_reset = *need_full_reset_arg, vram_lost = false;
+	int r = 0;
+
+	/*
+	 * ASIC reset has to be done on all HGMI hive nodes ASAP
+	 * to allow proper links negotiation in FW (within 1 sec)
+	 */
+	if (need_full_reset) {
+		list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
+			r = amdgpu_asic_reset(tmp_adev);
+			if (r)
+				DRM_WARN("ASIC reset failed with err r, %d for drm dev, %s",
+					 r, tmp_adev->ddev->unique);
+		}
+	}
+
+
+	list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
+		if (need_full_reset) {
+			/* post card */
+			if (amdgpu_atom_asic_init(tmp_adev->mode_info.atom_context))
+				DRM_WARN("asic atom init failed!");
+
+			if (!r) {
+				dev_info(tmp_adev->dev, "GPU reset succeeded, trying to resume\n");
+				r = amdgpu_device_ip_resume_phase1(tmp_adev);
+				if (r)
+					goto out;
+
+				vram_lost = amdgpu_device_check_vram_lost(tmp_adev);
+				if (vram_lost) {
+					DRM_ERROR("VRAM is lost!\n");
+					atomic_inc(&tmp_adev->vram_lost_counter);
+				}
+
+				r = amdgpu_gtt_mgr_recover(
+					&tmp_adev->mman.bdev.man[TTM_PL_TT]);
+				if (r)
+					goto out;
+
+				r = amdgpu_device_fw_loading(tmp_adev);
+				if (r)
+					return r;
+
+				r = amdgpu_device_ip_resume_phase2(tmp_adev);
+				if (r)
+					goto out;
+
+				if (vram_lost)
+					amdgpu_device_fill_reset_magic(tmp_adev);
+
+				/* Update PSP FW topology after reset */
+				if (hive && tmp_adev->gmc.xgmi.num_physical_nodes > 1)
+					r = amdgpu_xgmi_update_topology(hive, tmp_adev);
+			}
+		}
+
+
+out:
+		if (!r) {
+			amdgpu_irq_gpu_reset_resume_helper(tmp_adev);
+			r = amdgpu_ib_ring_tests(tmp_adev);
+			if (r) {
+				dev_err(tmp_adev->dev, "ib ring test failed (%d).\n", r);
+				r = amdgpu_device_ip_suspend(tmp_adev);
+				need_full_reset = true;
+				r = -EAGAIN;
+				goto end;
+			}
+		}
+
+		if (!r)
+			r = amdgpu_device_recover_vram(tmp_adev);
+		else
+			tmp_adev->asic_reset_res = r;
+	}
+
+end:
+	*need_full_reset_arg = need_full_reset;
+	return r;
+}
+
+static void amdgpu_device_post_asic_reset(struct amdgpu_device *adev,
+					  struct amdgpu_job *job)
+{
+	int i;
 
 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
 		struct amdgpu_ring *ring = adev->rings[i];
@@ -3388,7 +3416,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
 		 * or all rings (in the case @job is NULL)
 		 * after above amdgpu_reset accomplished
 		 */
-		if ((!job || job->base.sched == &ring->sched) && !r)
+		if ((!job || job->base.sched == &ring->sched) && !adev->asic_reset_res)
 			drm_sched_job_recovery(&ring->sched);
 
 		kthread_unpark(ring->sched.thread);
@@ -3398,21 +3426,144 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
 		drm_helper_resume_force_mode(adev->ddev);
 	}
 
-	ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched);
+	adev->asic_reset_res = 0;
+}
 
-	if (r) {
-		/* bad news, how to tell it to userspace ? */
-		dev_info(adev->dev, "GPU reset(%d) failed\n", atomic_read(&adev->gpu_reset_counter));
-		amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
-	} else {
-		dev_info(adev->dev, "GPU reset(%d) succeeded!\n",atomic_read(&adev->gpu_reset_counter));
-	}
+static void amdgpu_device_lock_adev(struct amdgpu_device *adev)
+{
+	mutex_lock(&adev->lock_reset);
+	atomic_inc(&adev->gpu_reset_counter);
+	adev->in_gpu_reset = 1;
+	/* Block kfd */
+	amdgpu_amdkfd_pre_reset(adev);
+}
 
+static void amdgpu_device_unlock_adev(struct amdgpu_device *adev)
+{
 	/*unlock kfd */
 	amdgpu_amdkfd_post_reset(adev);
 	amdgpu_vf_error_trans_all(adev);
 	adev->in_gpu_reset = 0;
 	mutex_unlock(&adev->lock_reset);
+}
+
+
+/**
+ * amdgpu_device_gpu_recover - reset the asic and recover scheduler
+ *
+ * @adev: amdgpu device pointer
+ * @job: which job trigger hang
+ *
+ * Attempt to reset the GPU if it has hung (all asics).
+ * Attempt to do soft-reset or full-reset and reinitialize Asic
+ * Returns 0 for success or an error on failure.
+ */
+
+int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
+			      struct amdgpu_job *job)
+{
+	int r;
+	struct amdgpu_hive_info *hive = NULL;
+	bool need_full_reset = false;
+	struct amdgpu_device *tmp_adev = NULL;
+	struct list_head device_list, *device_list_handle =  NULL;
+
+	INIT_LIST_HEAD(&device_list);
+
+	dev_info(adev->dev, "GPU reset begin!\n");
+
+	/*
+	 * In case of XGMI hive disallow concurrent resets to be triggered
+	 * by different nodes. No point also since the one node already executing
+	 * reset will also reset all the other nodes in the hive.
+	 */
+	hive = amdgpu_get_xgmi_hive(adev);
+	if (hive && adev->gmc.xgmi.num_physical_nodes > 1 &&
+	    !mutex_trylock(&hive->hive_lock))
+		return 0;
+
+	/* Start with adev pre asic reset first for soft reset check.*/
+	amdgpu_device_lock_adev(adev);
+	r = amdgpu_device_pre_asic_reset(adev,
+					 job,
+					 &need_full_reset);
+	if (r) {
+		/*TODO Should we stop ?*/
+		DRM_ERROR("GPU pre asic reset failed with err, %d for drm dev, %s ",
+			  r, adev->ddev->unique);
+		adev->asic_reset_res = r;
+	}
+
+	/* Build list of devices to reset */
+	if  (need_full_reset && adev->gmc.xgmi.num_physical_nodes > 1) {
+		if (!hive) {
+			amdgpu_device_unlock_adev(adev);
+			return -ENODEV;
+		}
+
+		/*
+		 * In case we are in XGMI hive mode device reset is done for all the
+		 * nodes in the hive to retrain all XGMI links and hence the reset
+		 * sequence is executed in loop on all nodes.
+		 */
+		device_list_handle = &hive->device_list;
+	} else {
+		list_add_tail(&adev->gmc.xgmi.head, &device_list);
+		device_list_handle = &device_list;
+	}
+
+retry:	/* Rest of adevs pre asic reset from XGMI hive. */
+	list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
+
+		if (tmp_adev == adev)
+			continue;
+
+		dev_info(tmp_adev->dev, "GPU reset begin for drm dev %s!\n", adev->ddev->unique);
+
+		amdgpu_device_lock_adev(tmp_adev);
+		r = amdgpu_device_pre_asic_reset(tmp_adev,
+						 NULL,
+						 &need_full_reset);
+		/*TODO Should we stop ?*/
+		if (r) {
+			DRM_ERROR("GPU pre asic reset failed with err, %d for drm dev, %s ",
+				  r, tmp_adev->ddev->unique);
+			tmp_adev->asic_reset_res = r;
+		}
+	}
+
+	/* Actual ASIC resets if needed.*/
+	/* TODO Implement XGMI hive reset logic for SRIOV */
+	if (amdgpu_sriov_vf(adev)) {
+		r = amdgpu_device_reset_sriov(adev, job ? false : true);
+		if (r)
+			adev->asic_reset_res = r;
+	} else {
+		r  = amdgpu_do_asic_reset(hive, device_list_handle, &need_full_reset);
+		if (r && r == -EAGAIN)
+			goto retry;
+	}
+
+	/* Post ASIC reset for all devs .*/
+	list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
+		amdgpu_device_post_asic_reset(tmp_adev, tmp_adev == adev ? job : NULL);
+
+		if (r) {
+			/* bad news, how to tell it to userspace ? */
+			dev_info(tmp_adev->dev, "GPU reset(%d) failed\n", atomic_read(&adev->gpu_reset_counter));
+			amdgpu_vf_error_put(tmp_adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
+		} else {
+			dev_info(tmp_adev->dev, "GPU reset(%d) succeeded!\n", atomic_read(&adev->gpu_reset_counter));
+		}
+
+		amdgpu_device_unlock_adev(tmp_adev);
+	}
+
+	if (hive && adev->gmc.xgmi.num_physical_nodes > 1)
+		mutex_unlock(&hive->hive_lock);
+
+	if (r)
+		dev_info(adev->dev, "GPU reset end with ret = %d\n", r);
 	return r;
 }
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index 686a26d..15ce7e6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -631,6 +631,11 @@ int amdgpu_display_modeset_create_props(struct amdgpu_device *adev)
 			drm_property_create_range(adev->ddev, 0, "max bpc", 8, 16);
 		if (!adev->mode_info.max_bpc_property)
 			return -ENOMEM;
+		adev->mode_info.abm_level_property =
+			drm_property_create_range(adev->ddev, 0,
+						"abm level", 0, 4);
+		if (!adev->mode_info.abm_level_property)
+			return -ENOMEM;
 	}
 
 	return 0;
@@ -857,7 +862,12 @@ int amdgpu_display_get_crtc_scanoutpos(struct drm_device *dev,
 	/* Inside "upper part" of vblank area? Apply corrective offset if so: */
 	if (in_vbl && (*vpos >= vbl_start)) {
 		vtotal = mode->crtc_vtotal;
-		*vpos = *vpos - vtotal;
+
+		/* With variable refresh rate displays the vpos can exceed
+		 * the vtotal value. Clamp to 0 to return -vbl_end instead
+		 * of guessing the remaining number of lines until scanout.
+		 */
+		*vpos = (*vpos < vtotal) ? (*vpos - vtotal) : 0;
 	}
 
 	/* Correct for shifted end of vbl at vbl_end. */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell.h
new file mode 100644
index 0000000..be620b2
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell.h
@@ -0,0 +1,243 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+/*
+ * GPU doorbell structures, functions & helpers
+ */
+struct amdgpu_doorbell {
+	/* doorbell mmio */
+	resource_size_t		base;
+	resource_size_t		size;
+	u32 __iomem		*ptr;
+	u32			num_doorbells;	/* Number of doorbells actually reserved for amdgpu. */
+};
+
+/* Reserved doorbells for amdgpu (including multimedia).
+ * KFD can use all the rest in the 2M doorbell bar.
+ * For asic before vega10, doorbell is 32-bit, so the
+ * index/offset is in dword. For vega10 and after, doorbell
+ * can be 64-bit, so the index defined is in qword.
+ */
+struct amdgpu_doorbell_index {
+	uint32_t kiq;
+	uint32_t mec_ring0;
+	uint32_t mec_ring1;
+	uint32_t mec_ring2;
+	uint32_t mec_ring3;
+	uint32_t mec_ring4;
+	uint32_t mec_ring5;
+	uint32_t mec_ring6;
+	uint32_t mec_ring7;
+	uint32_t userqueue_start;
+	uint32_t userqueue_end;
+	uint32_t gfx_ring0;
+	uint32_t sdma_engine0;
+	uint32_t sdma_engine1;
+	uint32_t sdma_engine2;
+	uint32_t sdma_engine3;
+	uint32_t sdma_engine4;
+	uint32_t sdma_engine5;
+	uint32_t sdma_engine6;
+	uint32_t sdma_engine7;
+	uint32_t ih;
+	union {
+		struct {
+			uint32_t vcn_ring0_1;
+			uint32_t vcn_ring2_3;
+			uint32_t vcn_ring4_5;
+			uint32_t vcn_ring6_7;
+		} vcn;
+		struct {
+			uint32_t uvd_ring0_1;
+			uint32_t uvd_ring2_3;
+			uint32_t uvd_ring4_5;
+			uint32_t uvd_ring6_7;
+			uint32_t vce_ring0_1;
+			uint32_t vce_ring2_3;
+			uint32_t vce_ring4_5;
+			uint32_t vce_ring6_7;
+		} uvd_vce;
+	};
+	uint32_t max_assignment;
+};
+
+typedef enum _AMDGPU_DOORBELL_ASSIGNMENT
+{
+	AMDGPU_DOORBELL_KIQ                     = 0x000,
+	AMDGPU_DOORBELL_HIQ                     = 0x001,
+	AMDGPU_DOORBELL_DIQ                     = 0x002,
+	AMDGPU_DOORBELL_MEC_RING0               = 0x010,
+	AMDGPU_DOORBELL_MEC_RING1               = 0x011,
+	AMDGPU_DOORBELL_MEC_RING2               = 0x012,
+	AMDGPU_DOORBELL_MEC_RING3               = 0x013,
+	AMDGPU_DOORBELL_MEC_RING4               = 0x014,
+	AMDGPU_DOORBELL_MEC_RING5               = 0x015,
+	AMDGPU_DOORBELL_MEC_RING6               = 0x016,
+	AMDGPU_DOORBELL_MEC_RING7               = 0x017,
+	AMDGPU_DOORBELL_GFX_RING0               = 0x020,
+	AMDGPU_DOORBELL_sDMA_ENGINE0            = 0x1E0,
+	AMDGPU_DOORBELL_sDMA_ENGINE1            = 0x1E1,
+	AMDGPU_DOORBELL_IH                      = 0x1E8,
+	AMDGPU_DOORBELL_MAX_ASSIGNMENT          = 0x3FF,
+	AMDGPU_DOORBELL_INVALID                 = 0xFFFF
+} AMDGPU_DOORBELL_ASSIGNMENT;
+
+typedef enum _AMDGPU_VEGA20_DOORBELL_ASSIGNMENT
+{
+	/* Compute + GFX: 0~255 */
+	AMDGPU_VEGA20_DOORBELL_KIQ                     = 0x000,
+	AMDGPU_VEGA20_DOORBELL_HIQ                     = 0x001,
+	AMDGPU_VEGA20_DOORBELL_DIQ                     = 0x002,
+	AMDGPU_VEGA20_DOORBELL_MEC_RING0               = 0x003,
+	AMDGPU_VEGA20_DOORBELL_MEC_RING1               = 0x004,
+	AMDGPU_VEGA20_DOORBELL_MEC_RING2               = 0x005,
+	AMDGPU_VEGA20_DOORBELL_MEC_RING3               = 0x006,
+	AMDGPU_VEGA20_DOORBELL_MEC_RING4               = 0x007,
+	AMDGPU_VEGA20_DOORBELL_MEC_RING5               = 0x008,
+	AMDGPU_VEGA20_DOORBELL_MEC_RING6               = 0x009,
+	AMDGPU_VEGA20_DOORBELL_MEC_RING7               = 0x00A,
+	AMDGPU_VEGA20_DOORBELL_USERQUEUE_START	       = 0x00B,
+	AMDGPU_VEGA20_DOORBELL_USERQUEUE_END	       = 0x08A,
+	AMDGPU_VEGA20_DOORBELL_GFX_RING0               = 0x08B,
+	/* SDMA:256~335*/
+	AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE0            = 0x100,
+	AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE1            = 0x10A,
+	AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE2            = 0x114,
+	AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE3            = 0x11E,
+	AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE4            = 0x128,
+	AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE5            = 0x132,
+	AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE6            = 0x13C,
+	AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE7            = 0x146,
+	/* IH: 376~391 */
+	AMDGPU_VEGA20_DOORBELL_IH                      = 0x178,
+	/* MMSCH: 392~407
+	 * overlap the doorbell assignment with VCN as they are  mutually exclusive
+	 * VCE engine's doorbell is 32 bit and two VCE ring share one QWORD
+	 */
+	AMDGPU_VEGA20_DOORBELL64_VCN0_1                  = 0x188, /* lower 32 bits for VNC0 and upper 32 bits for VNC1 */
+	AMDGPU_VEGA20_DOORBELL64_VCN2_3                  = 0x189,
+	AMDGPU_VEGA20_DOORBELL64_VCN4_5                  = 0x18A,
+	AMDGPU_VEGA20_DOORBELL64_VCN6_7                  = 0x18B,
+
+	AMDGPU_VEGA20_DOORBELL64_UVD_RING0_1             = 0x188,
+	AMDGPU_VEGA20_DOORBELL64_UVD_RING2_3             = 0x189,
+	AMDGPU_VEGA20_DOORBELL64_UVD_RING4_5             = 0x18A,
+	AMDGPU_VEGA20_DOORBELL64_UVD_RING6_7             = 0x18B,
+
+	AMDGPU_VEGA20_DOORBELL64_VCE_RING0_1             = 0x18C,
+	AMDGPU_VEGA20_DOORBELL64_VCE_RING2_3             = 0x18D,
+	AMDGPU_VEGA20_DOORBELL64_VCE_RING4_5             = 0x18E,
+	AMDGPU_VEGA20_DOORBELL64_VCE_RING6_7             = 0x18F,
+	AMDGPU_VEGA20_DOORBELL_MAX_ASSIGNMENT            = 0x18F,
+	AMDGPU_VEGA20_DOORBELL_INVALID                   = 0xFFFF
+} AMDGPU_VEGA20_DOORBELL_ASSIGNMENT;
+
+/*
+ * 64bit doorbell, offset are in QWORD, occupy 2KB doorbell space
+ */
+typedef enum _AMDGPU_DOORBELL64_ASSIGNMENT
+{
+	/*
+	 * All compute related doorbells: kiq, hiq, diq, traditional compute queue, user queue, should locate in
+	 * a continues range so that programming CP_MEC_DOORBELL_RANGE_LOWER/UPPER can cover this range.
+	 *  Compute related doorbells are allocated from 0x00 to 0x8a
+	 */
+
+
+	/* kernel scheduling */
+	AMDGPU_DOORBELL64_KIQ                     = 0x00,
+
+	/* HSA interface queue and debug queue */
+	AMDGPU_DOORBELL64_HIQ                     = 0x01,
+	AMDGPU_DOORBELL64_DIQ                     = 0x02,
+
+	/* Compute engines */
+	AMDGPU_DOORBELL64_MEC_RING0               = 0x03,
+	AMDGPU_DOORBELL64_MEC_RING1               = 0x04,
+	AMDGPU_DOORBELL64_MEC_RING2               = 0x05,
+	AMDGPU_DOORBELL64_MEC_RING3               = 0x06,
+	AMDGPU_DOORBELL64_MEC_RING4               = 0x07,
+	AMDGPU_DOORBELL64_MEC_RING5               = 0x08,
+	AMDGPU_DOORBELL64_MEC_RING6               = 0x09,
+	AMDGPU_DOORBELL64_MEC_RING7               = 0x0a,
+
+	/* User queue doorbell range (128 doorbells) */
+	AMDGPU_DOORBELL64_USERQUEUE_START         = 0x0b,
+	AMDGPU_DOORBELL64_USERQUEUE_END           = 0x8a,
+
+	/* Graphics engine */
+	AMDGPU_DOORBELL64_GFX_RING0               = 0x8b,
+
+	/*
+	 * Other graphics doorbells can be allocated here: from 0x8c to 0xdf
+	 * Graphics voltage island aperture 1
+	 * default non-graphics QWORD index is 0xe0 - 0xFF inclusive
+	 */
+
+	/* For vega10 sriov, the sdma doorbell must be fixed as follow
+	 * to keep the same setting with host driver, or it will
+	 * happen conflicts
+	 */
+	AMDGPU_DOORBELL64_sDMA_ENGINE0            = 0xF0,
+	AMDGPU_DOORBELL64_sDMA_HI_PRI_ENGINE0     = 0xF1,
+	AMDGPU_DOORBELL64_sDMA_ENGINE1            = 0xF2,
+	AMDGPU_DOORBELL64_sDMA_HI_PRI_ENGINE1     = 0xF3,
+
+	/* Interrupt handler */
+	AMDGPU_DOORBELL64_IH                      = 0xF4,  /* For legacy interrupt ring buffer */
+	AMDGPU_DOORBELL64_IH_RING1                = 0xF5,  /* For page migration request log */
+	AMDGPU_DOORBELL64_IH_RING2                = 0xF6,  /* For page migration translation/invalidation log */
+
+	/* VCN engine use 32 bits doorbell  */
+	AMDGPU_DOORBELL64_VCN0_1                  = 0xF8, /* lower 32 bits for VNC0 and upper 32 bits for VNC1 */
+	AMDGPU_DOORBELL64_VCN2_3                  = 0xF9,
+	AMDGPU_DOORBELL64_VCN4_5                  = 0xFA,
+	AMDGPU_DOORBELL64_VCN6_7                  = 0xFB,
+
+	/* overlap the doorbell assignment with VCN as they are  mutually exclusive
+	 * VCE engine's doorbell is 32 bit and two VCE ring share one QWORD
+	 */
+	AMDGPU_DOORBELL64_UVD_RING0_1             = 0xF8,
+	AMDGPU_DOORBELL64_UVD_RING2_3             = 0xF9,
+	AMDGPU_DOORBELL64_UVD_RING4_5             = 0xFA,
+	AMDGPU_DOORBELL64_UVD_RING6_7             = 0xFB,
+
+	AMDGPU_DOORBELL64_VCE_RING0_1             = 0xFC,
+	AMDGPU_DOORBELL64_VCE_RING2_3             = 0xFD,
+	AMDGPU_DOORBELL64_VCE_RING4_5             = 0xFE,
+	AMDGPU_DOORBELL64_VCE_RING6_7             = 0xFF,
+
+	AMDGPU_DOORBELL64_MAX_ASSIGNMENT          = 0xFF,
+	AMDGPU_DOORBELL64_INVALID                 = 0xFFFF
+} AMDGPU_DOORBELL64_ASSIGNMENT;
+
+u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index);
+void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v);
+u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index);
+void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v);
+
+#define RDOORBELL32(index) amdgpu_mm_rdoorbell(adev, (index))
+#define WDOORBELL32(index, v) amdgpu_mm_wdoorbell(adev, (index), (v))
+#define RDOORBELL64(index) amdgpu_mm_rdoorbell64(adev, (index))
+#define WDOORBELL64(index, v) amdgpu_mm_wdoorbell64(adev, (index), (v))
+
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 8de55f7..90f474f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -454,9 +454,10 @@ module_param_named(cntl_sb_buf_per_se, amdgpu_cntl_sb_buf_per_se, int, 0444);
 
 /**
  * DOC: param_buf_per_se (int)
- * Override the size of Off-Chip Pramater Cache per Shader Engine in Byte. The default is 0 (depending on gfx).
+ * Override the size of Off-Chip Parameter Cache per Shader Engine in Byte.
+ * The default is 0 (depending on gfx).
  */
-MODULE_PARM_DESC(param_buf_per_se, "the size of Off-Chip Pramater Cache per Shader Engine (default depending on gfx)");
+MODULE_PARM_DESC(param_buf_per_se, "the size of Off-Chip Parameter Cache per Shader Engine (default depending on gfx)");
 module_param_named(param_buf_per_se, amdgpu_param_buf_per_se, int, 0444);
 
 /**
@@ -1220,9 +1221,6 @@ static struct drm_driver kms_driver = {
 	.patchlevel = KMS_DRIVER_PATCHLEVEL,
 };
 
-static struct drm_driver *driver;
-static struct pci_driver *pdriver;
-
 static struct pci_driver amdgpu_kms_pci_driver = {
 	.name = DRIVER_NAME,
 	.id_table = pciidlist,
@@ -1252,16 +1250,14 @@ static int __init amdgpu_init(void)
 		goto error_fence;
 
 	DRM_INFO("amdgpu kernel modesetting enabled.\n");
-	driver = &kms_driver;
-	pdriver = &amdgpu_kms_pci_driver;
-	driver->num_ioctls = amdgpu_max_kms_ioctl;
+	kms_driver.num_ioctls = amdgpu_max_kms_ioctl;
 	amdgpu_register_atpx_handler();
 
 	/* Ignore KFD init failures. Normal when CONFIG_HSA_AMD is not set. */
 	amdgpu_amdkfd_init();
 
 	/* let modprobe override vga console setting */
-	return pci_register_driver(pdriver);
+	return pci_register_driver(&amdgpu_kms_pci_driver);
 
 error_fence:
 	amdgpu_sync_fini();
@@ -1273,7 +1269,7 @@ static int __init amdgpu_init(void)
 static void __exit amdgpu_exit(void)
 {
 	amdgpu_amdkfd_fini();
-	pci_unregister_driver(pdriver);
+	pci_unregister_driver(&amdgpu_kms_pci_driver);
 	amdgpu_unregister_atpx_handler();
 	amdgpu_sync_fini();
 	amdgpu_fence_slab_fini();
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
index 6a70c0b..97a60da 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
@@ -250,7 +250,7 @@ int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev,
 	ring->adev = NULL;
 	ring->ring_obj = NULL;
 	ring->use_doorbell = true;
-	ring->doorbell_index = AMDGPU_DOORBELL_KIQ;
+	ring->doorbell_index = adev->doorbell_index.kiq;
 
 	r = amdgpu_gfx_kiq_acquire(adev, ring);
 	if (r)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index 9b3164c..bc62bf4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -467,9 +467,6 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
 	if (!info->return_size || !info->return_pointer)
 		return -EINVAL;
 
-	/* Ensure IB tests are run on ring */
-	flush_delayed_work(&adev->late_init_work);
-
 	switch (info->query) {
 	case AMDGPU_INFO_ACCEL_WORKING:
 		ui32 = adev->accel_working;
@@ -950,6 +947,9 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
 	struct amdgpu_fpriv *fpriv;
 	int r, pasid;
 
+	/* Ensure IB tests are run on ring */
+	flush_delayed_work(&adev->late_init_work);
+
 	file_priv->driver_priv = NULL;
 
 	r = pm_runtime_get_sync(dev->dev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
index 0dc2c5c..aadd0fa 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
@@ -38,7 +38,6 @@
 #include <drm/drm_crtc_helper.h>
 #include <drm/drm_fb_helper.h>
 #include <drm/drm_plane_helper.h>
-#include <drm/drm_fb_helper.h>
 #include <linux/i2c.h>
 #include <linux/i2c-algo-bit.h>
 #include <linux/hrtimer.h>
@@ -294,13 +293,6 @@ struct amdgpu_display_funcs {
 			      uint16_t connector_object_id,
 			      struct amdgpu_hpd *hpd,
 			      struct amdgpu_router *router);
-	/* it is used to enter or exit into free sync mode */
-	int (*notify_freesync)(struct drm_device *dev, void *data,
-			       struct drm_file *filp);
-	/* it is used to allow enablement of freesync mode */
-	int (*set_freesync_property)(struct drm_connector *connector,
-				     struct drm_property *property,
-				     uint64_t val);
 
 
 };
@@ -340,6 +332,8 @@ struct amdgpu_mode_info {
 	struct drm_property *dither_property;
 	/* maximum number of bits per channel for monitor color */
 	struct drm_property *max_bpc_property;
+	/* Adaptive Backlight Modulation (power feature) */
+	struct drm_property *abm_level_property;
 	/* hardcoded DFP edid from BIOS */
 	struct edid *bios_hardcoded_edid;
 	int bios_hardcoded_edid_size;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index 7235cd0..1f61ed9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -33,6 +33,8 @@
 #include <linux/hwmon.h>
 #include <linux/hwmon-sysfs.h>
 #include <linux/nospec.h>
+#include "hwmgr.h"
+#define WIDTH_4K 3840
 
 static int amdgpu_debugfs_pm_init(struct amdgpu_device *adev);
 
@@ -1642,6 +1644,19 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
 	    attr == &sensor_dev_attr_fan1_enable.dev_attr.attr))
 		return 0;
 
+	/* Skip fan attributes on APU */
+	if ((adev->flags & AMD_IS_APU) &&
+	    (attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
+	     attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
+	     attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
+	     attr == &sensor_dev_attr_pwm1_min.dev_attr.attr ||
+	     attr == &sensor_dev_attr_fan1_input.dev_attr.attr ||
+	     attr == &sensor_dev_attr_fan1_min.dev_attr.attr ||
+	     attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
+	     attr == &sensor_dev_attr_fan1_target.dev_attr.attr ||
+	     attr == &sensor_dev_attr_fan1_enable.dev_attr.attr))
+		return 0;
+
 	/* Skip limit attributes if DPM is not enabled */
 	if (!adev->pm.dpm_enabled &&
 	    (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr ||
@@ -1956,6 +1971,17 @@ void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable)
 		amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_UVD, !enable);
 		mutex_unlock(&adev->pm.mutex);
 	}
+	/* enable/disable Low Memory PState for UVD (4k videos) */
+	if (adev->asic_type == CHIP_STONEY &&
+		adev->uvd.decode_image_width >= WIDTH_4K) {
+		struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
+
+		if (hwmgr && hwmgr->hwmgr_func &&
+		    hwmgr->hwmgr_func->update_nbdpm_pstate)
+			hwmgr->hwmgr_func->update_nbdpm_pstate(hwmgr,
+							       !enable,
+							       true);
+	}
 }
 
 void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index 69896f4..4e5d13e4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -692,6 +692,8 @@ static int amdgpu_uvd_cs_msg_decode(struct amdgpu_device *adev, uint32_t *msg,
 	buf_sizes[0x1] = dpb_size;
 	buf_sizes[0x2] = image_size;
 	buf_sizes[0x4] = min_ctx_size;
+	/* store image width to adjust nb memory pstate */
+	adev->uvd.decode_image_width = width;
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
index a3ab1a4..5eb6328 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
@@ -65,6 +65,8 @@ struct amdgpu_uvd {
 	struct drm_sched_entity entity;
 	struct delayed_work	idle_work;
 	unsigned		harvest_config;
+	/* store image width to adjust nb memory state */
+	unsigned		decode_image_width;
 };
 
 int amdgpu_uvd_sw_init(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
index cfee747..462a04e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
@@ -334,7 +334,7 @@ void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev)
 
 	if (adev->fw_vram_usage.va != NULL) {
 		adev->virt.fw_reserve.p_pf2vf =
-			(struct amdgim_pf2vf_info_header *)(
+			(struct amd_sriov_msg_pf2vf_info_header *)(
 			adev->fw_vram_usage.va + AMDGIM_DATAEXCHANGE_OFFSET);
 		AMDGPU_FW_VRAM_PF2VF_READ(adev, header.size, &pf2vf_size);
 		AMDGPU_FW_VRAM_PF2VF_READ(adev, checksum, &checksum);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
index 0728fbc..722deef 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
@@ -63,8 +63,8 @@ struct amdgpu_virt_ops {
  * Firmware Reserve Frame buffer
  */
 struct amdgpu_virt_fw_reserve {
-	struct amdgim_pf2vf_info_header *p_pf2vf;
-	struct amdgim_vf2pf_info_header *p_vf2pf;
+	struct amd_sriov_msg_pf2vf_info_header *p_pf2vf;
+	struct amd_sriov_msg_vf2pf_info_header *p_vf2pf;
 	unsigned int checksum_key;
 };
 /*
@@ -85,15 +85,17 @@ enum AMDGIM_FEATURE_FLAG {
 	AMDGIM_FEATURE_GIM_FLR_VRAMLOST = 0x4,
 };
 
-struct amdgim_pf2vf_info_header {
+struct amd_sriov_msg_pf2vf_info_header {
 	/* the total structure size in byte. */
 	uint32_t size;
 	/* version of this structure, written by the GIM */
 	uint32_t version;
+	/* reserved */
+	uint32_t reserved[2];
 } __aligned(4);
 struct  amdgim_pf2vf_info_v1 {
 	/* header contains size and version */
-	struct amdgim_pf2vf_info_header header;
+	struct amd_sriov_msg_pf2vf_info_header header;
 	/* max_width * max_height */
 	unsigned int uvd_enc_max_pixels_count;
 	/* 16x16 pixels/sec, codec independent */
@@ -112,7 +114,7 @@ struct  amdgim_pf2vf_info_v1 {
 
 struct  amdgim_pf2vf_info_v2 {
 	/* header contains size and version */
-	struct amdgim_pf2vf_info_header header;
+	struct amd_sriov_msg_pf2vf_info_header header;
 	/* use private key from mailbox 2 to create chueksum */
 	uint32_t checksum;
 	/* The features flags of the GIM driver supports. */
@@ -137,20 +139,22 @@ struct  amdgim_pf2vf_info_v2 {
 	uint64_t vcefw_kboffset;
 	/* VCE FW size in KB */
 	uint32_t vcefw_ksize;
-	uint32_t reserved[AMDGIM_GET_STRUCTURE_RESERVED_SIZE(256, 0, 0, (9 + sizeof(struct amdgim_pf2vf_info_header)/sizeof(uint32_t)), 3)];
+	uint32_t reserved[AMDGIM_GET_STRUCTURE_RESERVED_SIZE(256, 0, 0, (9 + sizeof(struct amd_sriov_msg_pf2vf_info_header)/sizeof(uint32_t)), 3)];
 } __aligned(4);
 
 
-struct amdgim_vf2pf_info_header {
+struct amd_sriov_msg_vf2pf_info_header {
 	/* the total structure size in byte. */
 	uint32_t size;
 	/*version of this structure, written by the guest */
 	uint32_t version;
+	/* reserved */
+	uint32_t reserved[2];
 } __aligned(4);
 
 struct amdgim_vf2pf_info_v1 {
 	/* header contains size and version */
-	struct amdgim_vf2pf_info_header header;
+	struct amd_sriov_msg_vf2pf_info_header header;
 	/* driver version */
 	char driver_version[64];
 	/* driver certification, 1=WHQL, 0=None */
@@ -180,7 +184,7 @@ struct amdgim_vf2pf_info_v1 {
 
 struct amdgim_vf2pf_info_v2 {
 	/* header contains size and version */
-	struct amdgim_vf2pf_info_header header;
+	struct amd_sriov_msg_vf2pf_info_header header;
 	uint32_t checksum;
 	/* driver version */
 	uint8_t driver_version[64];
@@ -206,7 +210,7 @@ struct amdgim_vf2pf_info_v2 {
 	uint32_t uvd_enc_usage;
 	/* guest uvd engine usage percentage. 0xffff means N/A. */
 	uint32_t uvd_enc_health;
-	uint32_t reserved[AMDGIM_GET_STRUCTURE_RESERVED_SIZE(256, 64, 0, (12 + sizeof(struct amdgim_vf2pf_info_header)/sizeof(uint32_t)), 0)];
+	uint32_t reserved[AMDGIM_GET_STRUCTURE_RESERVED_SIZE(256, 64, 0, (12 + sizeof(struct amd_sriov_msg_vf2pf_info_header)/sizeof(uint32_t)), 0)];
 } __aligned(4);
 
 #define AMDGPU_FW_VRAM_VF2PF_VER 2
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
index 909216a..fb37e69 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
@@ -23,7 +23,7 @@
  */
 #include <linux/list.h>
 #include "amdgpu.h"
-#include "amdgpu_psp.h"
+#include "amdgpu_xgmi.h"
 
 
 static DEFINE_MUTEX(xgmi_mutex);
@@ -31,15 +31,16 @@ static DEFINE_MUTEX(xgmi_mutex);
 #define AMDGPU_MAX_XGMI_HIVE			8
 #define AMDGPU_MAX_XGMI_DEVICE_PER_HIVE		4
 
-struct amdgpu_hive_info {
-	uint64_t		hive_id;
-	struct list_head	device_list;
-};
-
 static struct amdgpu_hive_info xgmi_hives[AMDGPU_MAX_XGMI_HIVE];
 static unsigned hive_count = 0;
 
-static struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev)
+
+void *amdgpu_xgmi_hive_try_lock(struct amdgpu_hive_info *hive)
+{
+	return &hive->device_list;
+}
+
+struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev)
 {
 	int i;
 	struct amdgpu_hive_info *tmp;
@@ -58,15 +59,38 @@ static struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev)
 	tmp = &xgmi_hives[hive_count++];
 	tmp->hive_id = adev->gmc.xgmi.hive_id;
 	INIT_LIST_HEAD(&tmp->device_list);
+	mutex_init(&tmp->hive_lock);
+
 	return tmp;
 }
 
+int amdgpu_xgmi_update_topology(struct amdgpu_hive_info *hive, struct amdgpu_device *adev)
+{
+	int ret = -EINVAL;
+
+	/* Each psp need to set the latest topology */
+	ret = psp_xgmi_set_topology_info(&adev->psp,
+					 hive->number_devices,
+					 &hive->topology_info);
+	if (ret)
+		dev_err(adev->dev,
+			"XGMI: Set topology failure on device %llx, hive %llx, ret %d",
+			adev->gmc.xgmi.node_id,
+			adev->gmc.xgmi.hive_id, ret);
+	else
+		dev_info(adev->dev, "XGMI: Add node %d to hive 0x%llx.\n",
+			 adev->gmc.xgmi.physical_node_id,
+				 adev->gmc.xgmi.hive_id);
+
+	return ret;
+}
+
 int amdgpu_xgmi_add_device(struct amdgpu_device *adev)
 {
-	struct psp_xgmi_topology_info *tmp_topology;
+	struct psp_xgmi_topology_info *hive_topology;
 	struct amdgpu_hive_info *hive;
 	struct amdgpu_xgmi	*entry;
-	struct amdgpu_device 	*tmp_adev;
+	struct amdgpu_device *tmp_adev = NULL;
 
 	int count = 0, ret = -EINVAL;
 
@@ -76,21 +100,21 @@ int amdgpu_xgmi_add_device(struct amdgpu_device *adev)
 	adev->gmc.xgmi.node_id = psp_xgmi_get_node_id(&adev->psp);
 	adev->gmc.xgmi.hive_id = psp_xgmi_get_hive_id(&adev->psp);
 
-	tmp_topology = kzalloc(sizeof(struct psp_xgmi_topology_info), GFP_KERNEL);
-	if (!tmp_topology)
-		return -ENOMEM;
 	mutex_lock(&xgmi_mutex);
 	hive = amdgpu_get_xgmi_hive(adev);
 	if (!hive)
 		goto exit;
 
+	hive_topology = &hive->topology_info;
+
 	list_add_tail(&adev->gmc.xgmi.head, &hive->device_list);
 	list_for_each_entry(entry, &hive->device_list, head)
-		tmp_topology->nodes[count++].node_id = entry->node_id;
+		hive_topology->nodes[count++].node_id = entry->node_id;
+	hive->number_devices = count;
 
 	/* Each psp need to get the latest topology */
 	list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
-		ret = psp_xgmi_get_topology_info(&tmp_adev->psp, count, tmp_topology);
+		ret = psp_xgmi_get_topology_info(&tmp_adev->psp, count, hive_topology);
 		if (ret) {
 			dev_err(tmp_adev->dev,
 				"XGMI: Get topology failure on device %llx, hive %llx, ret %d",
@@ -101,25 +125,13 @@ int amdgpu_xgmi_add_device(struct amdgpu_device *adev)
 		}
 	}
 
-	/* Each psp need to set the latest topology */
 	list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
-		ret = psp_xgmi_set_topology_info(&tmp_adev->psp, count, tmp_topology);
-		if (ret) {
-			dev_err(tmp_adev->dev,
-				"XGMI: Set topology failure on device %llx, hive %llx, ret %d",
-				tmp_adev->gmc.xgmi.node_id,
-				tmp_adev->gmc.xgmi.hive_id, ret);
-			/* To do : continue with some  node failed or disable the  whole  hive */
+		ret = amdgpu_xgmi_update_topology(hive, tmp_adev);
+		if (ret)
 			break;
-		}
 	}
-	if (!ret)
-		dev_info(adev->dev, "XGMI: Add node %d to hive 0x%llx.\n",
-			adev->gmc.xgmi.physical_node_id,
-			adev->gmc.xgmi.hive_id);
 
 exit:
 	mutex_unlock(&xgmi_mutex);
-	kfree(tmp_topology);
 	return ret;
 }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
new file mode 100644
index 0000000..6335bfd
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef __AMDGPU_XGMI_H__
+#define __AMDGPU_XGMI_H__
+
+#include "amdgpu_psp.h"
+
+struct amdgpu_hive_info {
+	uint64_t		hive_id;
+	struct list_head	device_list;
+	struct psp_xgmi_topology_info	topology_info;
+	int number_devices;
+	struct mutex hive_lock;
+};
+
+struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev);
+int amdgpu_xgmi_update_topology(struct amdgpu_hive_info *hive, struct amdgpu_device *adev);
+int amdgpu_xgmi_add_device(struct amdgpu_device *adev);
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c
index f41f5f5..71c50d8 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik.c
@@ -1755,6 +1755,7 @@ static const struct amdgpu_asic_funcs cik_asic_funcs =
 	.flush_hdp = &cik_flush_hdp,
 	.invalidate_hdp = &cik_invalidate_hdp,
 	.need_full_reset = &cik_need_full_reset,
+	.init_doorbell_index = &legacy_doorbell_index_init,
 };
 
 static int cik_common_early_init(void *handle)
diff --git a/drivers/gpu/drm/amd/amdgpu/cik.h b/drivers/gpu/drm/amd/amdgpu/cik.h
index e49c6f1..54c625a 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik.h
+++ b/drivers/gpu/drm/amd/amdgpu/cik.h
@@ -30,4 +30,5 @@ void cik_srbm_select(struct amdgpu_device *adev,
 		     u32 me, u32 pipe, u32 queue, u32 vmid);
 int cik_set_ip_blocks(struct amdgpu_device *adev);
 
+void legacy_doorbell_index_init(struct amdgpu_device *adev);
 #endif
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index f467b9b..3a9fb60 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -4363,7 +4363,7 @@ static int gfx_v7_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
 
 	ring->ring_obj = NULL;
 	ring->use_doorbell = true;
-	ring->doorbell_index = AMDGPU_DOORBELL_MEC_RING0 + ring_id;
+	ring->doorbell_index = adev->doorbell_index.mec_ring0 + ring_id;
 	sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue);
 
 	irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index cb066a8..1454fc3 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -44,7 +44,6 @@
 #include "gca/gfx_8_0_d.h"
 #include "gca/gfx_8_0_enum.h"
 #include "gca/gfx_8_0_sh_mask.h"
-#include "gca/gfx_8_0_enum.h"
 
 #include "dce/dce_10_0_d.h"
 #include "dce/dce_10_0_sh_mask.h"
@@ -1891,7 +1890,7 @@ static int gfx_v8_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
 
 	ring->ring_obj = NULL;
 	ring->use_doorbell = true;
-	ring->doorbell_index = AMDGPU_DOORBELL_MEC_RING0 + ring_id;
+	ring->doorbell_index = adev->doorbell_index.mec_ring0 + ring_id;
 	ring->eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr
 				+ (ring_id * GFX8_MEC_HPD_SIZE);
 	sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue);
@@ -2002,7 +2001,7 @@ static int gfx_v8_0_sw_init(void *handle)
 		/* no gfx doorbells on iceland */
 		if (adev->asic_type != CHIP_TOPAZ) {
 			ring->use_doorbell = true;
-			ring->doorbell_index = AMDGPU_DOORBELL_GFX_RING0;
+			ring->doorbell_index = adev->doorbell_index.gfx_ring0;
 		}
 
 		r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq,
@@ -4216,7 +4215,7 @@ static void gfx_v8_0_set_cpg_door_bell(struct amdgpu_device *adev, struct amdgpu
 
 	tmp = REG_SET_FIELD(0, CP_RB_DOORBELL_RANGE_LOWER,
 					DOORBELL_RANGE_LOWER,
-					AMDGPU_DOORBELL_GFX_RING0);
+					adev->doorbell_index.gfx_ring0);
 	WREG32(mmCP_RB_DOORBELL_RANGE_LOWER, tmp);
 
 	WREG32(mmCP_RB_DOORBELL_RANGE_UPPER,
@@ -4645,8 +4644,8 @@ static int gfx_v8_0_kcq_init_queue(struct amdgpu_ring *ring)
 static void gfx_v8_0_set_mec_doorbell_range(struct amdgpu_device *adev)
 {
 	if (adev->asic_type > CHIP_TONGA) {
-		WREG32(mmCP_MEC_DOORBELL_RANGE_LOWER, AMDGPU_DOORBELL_KIQ << 2);
-		WREG32(mmCP_MEC_DOORBELL_RANGE_UPPER, AMDGPU_DOORBELL_MEC_RING7 << 2);
+		WREG32(mmCP_MEC_DOORBELL_RANGE_LOWER, adev->doorbell_index.kiq << 2);
+		WREG32(mmCP_MEC_DOORBELL_RANGE_UPPER, adev->doorbell_index.mec_ring7 << 2);
 	}
 	/* enable doorbells */
 	WREG32_FIELD(CP_PQ_STATUS, DOORBELL_ENABLE, 1);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index c27caa1..af8ccb0 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -1566,7 +1566,7 @@ static int gfx_v9_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
 
 	ring->ring_obj = NULL;
 	ring->use_doorbell = true;
-	ring->doorbell_index = (AMDGPU_DOORBELL_MEC_RING0 + ring_id) << 1;
+	ring->doorbell_index = (adev->doorbell_index.mec_ring0 + ring_id) << 1;
 	ring->eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr
 				+ (ring_id * GFX9_MEC_HPD_SIZE);
 	sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue);
@@ -1655,7 +1655,7 @@ static int gfx_v9_0_sw_init(void *handle)
 		else
 			sprintf(ring->name, "gfx_%d", i);
 		ring->use_doorbell = true;
-		ring->doorbell_index = AMDGPU_DOORBELL64_GFX_RING0 << 1;
+		ring->doorbell_index = adev->doorbell_index.gfx_ring0 << 1;
 		r = amdgpu_ring_init(adev, ring, 1024,
 				     &adev->gfx.eop_irq, AMDGPU_CP_IRQ_GFX_EOP);
 		if (r)
@@ -2981,9 +2981,9 @@ static int gfx_v9_0_kiq_init_register(struct amdgpu_ring *ring)
 	/* enable the doorbell if requested */
 	if (ring->use_doorbell) {
 		WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_LOWER,
-					(AMDGPU_DOORBELL64_KIQ *2) << 2);
+					(adev->doorbell_index.kiq * 2) << 2);
 		WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_UPPER,
-					(AMDGPU_DOORBELL64_USERQUEUE_END * 2) << 2);
+					(adev->doorbell_index.userqueue_end * 2) << 2);
 	}
 
 	WREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL,
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index 811231e..3a4e5d8 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -338,9 +338,12 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev,
 		struct amdgpu_vmhub *hub = &adev->vmhub[i];
 		u32 tmp = gmc_v9_0_get_invalidate_req(vmid, flush_type);
 
-		if (i == AMDGPU_GFXHUB && !adev->in_gpu_reset &&
-		    adev->gfx.kiq.ring.sched.ready &&
-		    (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev))) {
+		/* This is necessary for a HW workaround under SRIOV as well
+		 * as GFXOFF under bare metal
+		 */
+		if (adev->gfx.kiq.ring.sched.ready &&
+		    (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev)) &&
+		    !adev->in_gpu_reset) {
 			uint32_t req = hub->vm_inv_eng0_req + eng;
 			uint32_t ack = hub->vm_inv_eng0_ack + eng;
 
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c
index 64e875d..6a0fcd6 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c
@@ -37,7 +37,6 @@
 #include "gmc/gmc_8_2_sh_mask.h"
 #include "oss/oss_3_0_d.h"
 #include "oss/oss_3_0_sh_mask.h"
-#include "gca/gfx_8_0_sh_mask.h"
 #include "dce/dce_10_0_d.h"
 #include "dce/dce_10_0_sh_mask.h"
 #include "smu/smu_7_1_3_d.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
index 295c220..d78b430 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
@@ -240,12 +240,9 @@ static int psp_v10_0_ring_stop(struct psp_context *psp,
 			       enum psp_ring_type ring_type)
 {
 	int ret = 0;
-	struct psp_ring *ring;
 	unsigned int psp_ring_reg = 0;
 	struct amdgpu_device *adev = psp->adev;
 
-	ring = &psp->km_ring;
-
 	/* Write the ring destroy command to C2PMSG_64 */
 	psp_ring_reg = 3 << 16;
 	WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64, psp_ring_reg);
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
index 9cea0bb..7efb823 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
@@ -356,12 +356,9 @@ static int psp_v3_1_ring_stop(struct psp_context *psp,
 			      enum psp_ring_type ring_type)
 {
 	int ret = 0;
-	struct psp_ring *ring;
 	unsigned int psp_ring_reg = 0;
 	struct amdgpu_device *adev = psp->adev;
 
-	ring = &psp->km_ring;
-
 	/* Write the ring destroy command to C2PMSG_64 */
 	psp_ring_reg = 3 << 16;
 	WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64, psp_ring_reg);
@@ -593,7 +590,7 @@ static int psp_v3_1_mode1_reset(struct psp_context *psp)
 	}
 
 	/*send the mode 1 reset command*/
-	WREG32(offset, 0x70000);
+	WREG32(offset, GFX_CTRL_CMD_ID_MODE1_RST);
 
 	mdelay(1000);
 
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
index b6a25f9..1bccc5f 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
@@ -1146,7 +1146,7 @@ static int sdma_v3_0_sw_init(void *handle)
 		if (!amdgpu_sriov_vf(adev)) {
 			ring->use_doorbell = true;
 			ring->doorbell_index = (i == 0) ?
-				AMDGPU_DOORBELL_sDMA_ENGINE0 : AMDGPU_DOORBELL_sDMA_ENGINE1;
+				adev->doorbell_index.sdma_engine0 : adev->doorbell_index.sdma_engine1;
 		} else {
 			ring->use_pollmem = true;
 		}
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index f4490cd..4b6d3e5 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -925,11 +925,9 @@ static void sdma_v4_0_page_resume(struct amdgpu_device *adev, unsigned int i)
 					OFFSET, ring->doorbell_index);
 	WREG32_SDMA(i, mmSDMA0_PAGE_DOORBELL, doorbell);
 	WREG32_SDMA(i, mmSDMA0_PAGE_DOORBELL_OFFSET, doorbell_offset);
-	/* TODO: enable doorbell support */
-	/*adev->nbio_funcs->sdma_doorbell_range(adev, i, ring->use_doorbell,
-					      ring->doorbell_index);*/
 
-	sdma_v4_0_ring_set_wptr(ring);
+	/* paging queue doorbell range is setup at sdma_v4_0_gfx_resume */
+	sdma_v4_0_page_ring_set_wptr(ring);
 
 	/* set minor_ptr_update to 0 after wptr programed */
 	WREG32_SDMA(i, mmSDMA0_PAGE_MINOR_PTR_UPDATE, 0);
@@ -1449,23 +1447,46 @@ static void sdma_v4_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
 	sdma_v4_0_wait_reg_mem(ring, 0, 0, reg, 0, val, mask, 10);
 }
 
+static bool sdma_v4_0_fw_support_paging_queue(struct amdgpu_device *adev)
+{
+	uint fw_version = adev->sdma.instance[0].fw_version;
+
+	switch (adev->asic_type) {
+	case CHIP_VEGA10:
+		return fw_version >= 430;
+	case CHIP_VEGA12:
+		/*return fw_version >= 31;*/
+		return false;
+	case CHIP_VEGA20:
+		/*return fw_version >= 115;*/
+		return false;
+	default:
+		return false;
+	}
+}
+
 static int sdma_v4_0_early_init(void *handle)
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+	int r;
 
-	if (adev->asic_type == CHIP_RAVEN) {
+	if (adev->asic_type == CHIP_RAVEN)
 		adev->sdma.num_instances = 1;
-		adev->sdma.has_page_queue = false;
-	} else {
+	else
 		adev->sdma.num_instances = 2;
-		/* TODO: Page queue breaks driver reload under SRIOV */
-		if ((adev->asic_type == CHIP_VEGA10) && amdgpu_sriov_vf((adev)))
-			adev->sdma.has_page_queue = false;
-		else if (adev->asic_type != CHIP_VEGA20 &&
-				adev->asic_type != CHIP_VEGA12)
-			adev->sdma.has_page_queue = true;
+
+	r = sdma_v4_0_init_microcode(adev);
+	if (r) {
+		DRM_ERROR("Failed to load sdma firmware!\n");
+		return r;
 	}
 
+	/* TODO: Page queue breaks driver reload under SRIOV */
+	if ((adev->asic_type == CHIP_VEGA10) && amdgpu_sriov_vf((adev)))
+		adev->sdma.has_page_queue = false;
+	else if (sdma_v4_0_fw_support_paging_queue(adev))
+		adev->sdma.has_page_queue = true;
+
 	sdma_v4_0_set_ring_funcs(adev);
 	sdma_v4_0_set_buffer_funcs(adev);
 	sdma_v4_0_set_vm_pte_funcs(adev);
@@ -1474,7 +1495,6 @@ static int sdma_v4_0_early_init(void *handle)
 	return 0;
 }
 
-
 static int sdma_v4_0_sw_init(void *handle)
 {
 	struct amdgpu_ring *ring;
@@ -1493,12 +1513,6 @@ static int sdma_v4_0_sw_init(void *handle)
 	if (r)
 		return r;
 
-	r = sdma_v4_0_init_microcode(adev);
-	if (r) {
-		DRM_ERROR("Failed to load sdma firmware!\n");
-		return r;
-	}
-
 	for (i = 0; i < adev->sdma.num_instances; i++) {
 		ring = &adev->sdma.instance[i].ring;
 		ring->ring_obj = NULL;
@@ -1507,15 +1521,10 @@ static int sdma_v4_0_sw_init(void *handle)
 		DRM_INFO("use_doorbell being set to: [%s]\n",
 				ring->use_doorbell?"true":"false");
 
-		if (adev->asic_type == CHIP_VEGA10)
-			ring->doorbell_index = (i == 0) ?
-				(AMDGPU_VEGA10_DOORBELL64_sDMA_ENGINE0 << 1) //get DWORD offset
-				: (AMDGPU_VEGA10_DOORBELL64_sDMA_ENGINE1 << 1); // get DWORD offset
-		else
-			ring->doorbell_index = (i == 0) ?
-				(AMDGPU_DOORBELL64_sDMA_ENGINE0 << 1) //get DWORD offset
-				: (AMDGPU_DOORBELL64_sDMA_ENGINE1 << 1); // get DWORD offset
-
+		/* doorbell size is 2 dwords, get DWORD offset */
+		ring->doorbell_index = (i == 0) ?
+			(adev->doorbell_index.sdma_engine0 << 1)
+			: (adev->doorbell_index.sdma_engine1 << 1);
 
 		sprintf(ring->name, "sdma%d", i);
 		r = amdgpu_ring_init(adev, ring, 1024,
@@ -1529,7 +1538,15 @@ static int sdma_v4_0_sw_init(void *handle)
 		if (adev->sdma.has_page_queue) {
 			ring = &adev->sdma.instance[i].page;
 			ring->ring_obj = NULL;
-			ring->use_doorbell = false;
+			ring->use_doorbell = true;
+
+			/* paging queue use same doorbell index/routing as gfx queue
+			 * with 0x400 (4096 dwords) offset on second doorbell page
+			 */
+			ring->doorbell_index = (i == 0) ?
+				(adev->doorbell_index.sdma_engine0 << 1)
+				: (adev->doorbell_index.sdma_engine1 << 1);
+			ring->doorbell_index += 0x400;
 
 			sprintf(ring->name, "page%d", i);
 			r = amdgpu_ring_init(adev, ring, 1024,
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index 4cc0dcb..83624e1 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -613,6 +613,24 @@ static const struct amdgpu_asic_funcs soc15_asic_funcs =
 	.flush_hdp = &soc15_flush_hdp,
 	.invalidate_hdp = &soc15_invalidate_hdp,
 	.need_full_reset = &soc15_need_full_reset,
+	.init_doorbell_index = &vega10_doorbell_index_init,
+};
+
+static const struct amdgpu_asic_funcs vega20_asic_funcs =
+{
+	.read_disabled_bios = &soc15_read_disabled_bios,
+	.read_bios_from_rom = &soc15_read_bios_from_rom,
+	.read_register = &soc15_read_register,
+	.reset = &soc15_asic_reset,
+	.set_vga_state = &soc15_vga_set_state,
+	.get_xclk = &soc15_get_xclk,
+	.set_uvd_clocks = &soc15_set_uvd_clocks,
+	.set_vce_clocks = &soc15_set_vce_clocks,
+	.get_config_memsize = &soc15_get_config_memsize,
+	.flush_hdp = &soc15_flush_hdp,
+	.invalidate_hdp = &soc15_invalidate_hdp,
+	.need_full_reset = &soc15_need_full_reset,
+	.init_doorbell_index = &vega20_doorbell_index_init,
 };
 
 static int soc15_common_early_init(void *handle)
@@ -632,11 +650,11 @@ static int soc15_common_early_init(void *handle)
 	adev->se_cac_rreg = &soc15_se_cac_rreg;
 	adev->se_cac_wreg = &soc15_se_cac_wreg;
 
-	adev->asic_funcs = &soc15_asic_funcs;
 
 	adev->external_rev_id = 0xFF;
 	switch (adev->asic_type) {
 	case CHIP_VEGA10:
+		adev->asic_funcs = &soc15_asic_funcs;
 		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
 			AMD_CG_SUPPORT_GFX_MGLS |
 			AMD_CG_SUPPORT_GFX_RLC_LS |
@@ -660,6 +678,7 @@ static int soc15_common_early_init(void *handle)
 		adev->external_rev_id = 0x1;
 		break;
 	case CHIP_VEGA12:
+		adev->asic_funcs = &soc15_asic_funcs;
 		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
 			AMD_CG_SUPPORT_GFX_MGLS |
 			AMD_CG_SUPPORT_GFX_CGCG |
@@ -682,6 +701,7 @@ static int soc15_common_early_init(void *handle)
 		adev->external_rev_id = adev->rev_id + 0x14;
 		break;
 	case CHIP_VEGA20:
+		adev->asic_funcs = &vega20_asic_funcs;
 		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
 			AMD_CG_SUPPORT_GFX_MGLS |
 			AMD_CG_SUPPORT_GFX_CGCG |
@@ -704,6 +724,7 @@ static int soc15_common_early_init(void *handle)
 		adev->external_rev_id = adev->rev_id + 0x28;
 		break;
 	case CHIP_RAVEN:
+		adev->asic_funcs = &soc15_asic_funcs;
 		if (adev->rev_id >= 0x8)
 			adev->external_rev_id = adev->rev_id + 0x81;
 		else if (adev->pdev->device == 0x15d8)
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.h b/drivers/gpu/drm/amd/amdgpu/soc15.h
index f8ad780..a66c8bf 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.h
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.h
@@ -58,4 +58,6 @@ void soc15_program_register_sequence(struct amdgpu_device *adev,
 int vega10_reg_base_init(struct amdgpu_device *adev);
 int vega20_reg_base_init(struct amdgpu_device *adev);
 
+void vega10_doorbell_index_init(struct amdgpu_device *adev);
+void vega20_doorbell_index_init(struct amdgpu_device *adev);
 #endif
diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
index 3abffd0..dcdbb4d 100644
--- a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
@@ -322,7 +322,7 @@ static int tonga_ih_sw_init(void *handle)
 		return r;
 
 	adev->irq.ih.use_doorbell = true;
-	adev->irq.ih.doorbell_index = AMDGPU_DOORBELL_IH;
+	adev->irq.ih.doorbell_index = adev->doorbell_index.ih;
 
 	r = amdgpu_irq_init(adev);
 
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
index 90bbcee..d69c8f6 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
@@ -116,16 +116,16 @@ static int uvd_v4_2_sw_init(void *handle)
 	if (r)
 		return r;
 
-	r = amdgpu_uvd_resume(adev);
-	if (r)
-		return r;
-
 	ring = &adev->uvd.inst->ring;
 	sprintf(ring->name, "uvd");
 	r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0);
 	if (r)
 		return r;
 
+	r = amdgpu_uvd_resume(adev);
+	if (r)
+		return r;
+
 	r = amdgpu_uvd_entity_init(adev);
 
 	return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
index 1c5e127..ee8cd06 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
@@ -113,16 +113,16 @@ static int uvd_v5_0_sw_init(void *handle)
 	if (r)
 		return r;
 
-	r = amdgpu_uvd_resume(adev);
-	if (r)
-		return r;
-
 	ring = &adev->uvd.inst->ring;
 	sprintf(ring->name, "uvd");
 	r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0);
 	if (r)
 		return r;
 
+	r = amdgpu_uvd_resume(adev);
+	if (r)
+		return r;
+
 	r = amdgpu_uvd_entity_init(adev);
 
 	return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
index f184842..d4f4a66 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
@@ -400,16 +400,16 @@ static int uvd_v6_0_sw_init(void *handle)
 		DRM_INFO("UVD ENC is disabled\n");
 	}
 
-	r = amdgpu_uvd_resume(adev);
-	if (r)
-		return r;
-
 	ring = &adev->uvd.inst->ring;
 	sprintf(ring->name, "uvd");
 	r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0);
 	if (r)
 		return r;
 
+	r = amdgpu_uvd_resume(adev);
+	if (r)
+		return r;
+
 	if (uvd_v6_0_enc_support(adev)) {
 		for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
 			ring = &adev->uvd.inst->ring_enc[i];
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
index 8a45959..089645e 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
@@ -430,10 +430,6 @@ static int uvd_v7_0_sw_init(void *handle)
 		DRM_INFO("PSP loading UVD firmware\n");
 	}
 
-	r = amdgpu_uvd_resume(adev);
-	if (r)
-		return r;
-
 	for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
 		if (adev->uvd.harvest_config & (1 << j))
 			continue;
@@ -455,9 +451,9 @@ static int uvd_v7_0_sw_init(void *handle)
 				 * sriov, so set unused location for other unused rings.
 				 */
 				if (i == 0)
-					ring->doorbell_index = AMDGPU_DOORBELL64_UVD_RING0_1 * 2;
+					ring->doorbell_index = adev->doorbell_index.uvd_vce.uvd_ring0_1 * 2;
 				else
-					ring->doorbell_index = AMDGPU_DOORBELL64_UVD_RING2_3 * 2 + 1;
+					ring->doorbell_index = adev->doorbell_index.uvd_vce.uvd_ring2_3 * 2 + 1;
 			}
 			r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst[j].irq, 0);
 			if (r)
@@ -465,6 +461,10 @@ static int uvd_v7_0_sw_init(void *handle)
 		}
 	}
 
+	r = amdgpu_uvd_resume(adev);
+	if (r)
+		return r;
+
 	r = amdgpu_uvd_entity_init(adev);
 	if (r)
 		return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
index 3e84840..2668eff 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
@@ -37,7 +37,6 @@
 #include "gca/gfx_8_0_d.h"
 #include "smu/smu_7_1_2_d.h"
 #include "smu/smu_7_1_2_sh_mask.h"
-#include "gca/gfx_8_0_d.h"
 #include "gca/gfx_8_0_sh_mask.h"
 #include "ivsrcid/ivsrcid_vislands30.h"
 
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
index 0054ba1..9fb34b7 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
@@ -466,9 +466,9 @@ static int vce_v4_0_sw_init(void *handle)
 			 * so set unused location for other unused rings.
 			 */
 			if (i == 0)
-				ring->doorbell_index = AMDGPU_DOORBELL64_VCE_RING0_1 * 2;
+				ring->doorbell_index = adev->doorbell_index.uvd_vce.vce_ring0_1 * 2;
 			else
-				ring->doorbell_index = AMDGPU_DOORBELL64_VCE_RING2_3 * 2 + 1;
+				ring->doorbell_index = adev->doorbell_index.uvd_vce.vce_ring2_3 * 2 + 1;
 		}
 		r = amdgpu_ring_init(adev, ring, 512, &adev->vce.irq, 0);
 		if (r)
diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
index a0fda6f..d84b687 100644
--- a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
@@ -385,7 +385,7 @@ static int vega10_ih_sw_init(void *handle)
 		return r;
 
 	adev->irq.ih.use_doorbell = true;
-	adev->irq.ih.doorbell_index = AMDGPU_DOORBELL64_IH << 1;
+	adev->irq.ih.doorbell_index = adev->doorbell_index.ih << 1;
 
 	r = amdgpu_irq_init(adev);
 
diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_reg_init.c b/drivers/gpu/drm/amd/amdgpu/vega10_reg_init.c
index c5c9b2b..422674b 100644
--- a/drivers/gpu/drm/amd/amdgpu/vega10_reg_init.c
+++ b/drivers/gpu/drm/amd/amdgpu/vega10_reg_init.c
@@ -56,4 +56,32 @@ int vega10_reg_base_init(struct amdgpu_device *adev)
 	return 0;
 }
 
+void vega10_doorbell_index_init(struct amdgpu_device *adev)
+{
+	adev->doorbell_index.kiq = AMDGPU_DOORBELL64_KIQ;
+	adev->doorbell_index.mec_ring0 = AMDGPU_DOORBELL64_MEC_RING0;
+	adev->doorbell_index.mec_ring1 = AMDGPU_DOORBELL64_MEC_RING1;
+	adev->doorbell_index.mec_ring2 = AMDGPU_DOORBELL64_MEC_RING2;
+	adev->doorbell_index.mec_ring3 = AMDGPU_DOORBELL64_MEC_RING3;
+	adev->doorbell_index.mec_ring4 = AMDGPU_DOORBELL64_MEC_RING4;
+	adev->doorbell_index.mec_ring5 = AMDGPU_DOORBELL64_MEC_RING5;
+	adev->doorbell_index.mec_ring6 = AMDGPU_DOORBELL64_MEC_RING6;
+	adev->doorbell_index.mec_ring7 = AMDGPU_DOORBELL64_MEC_RING7;
+	adev->doorbell_index.userqueue_start = AMDGPU_DOORBELL64_USERQUEUE_START;
+	adev->doorbell_index.userqueue_end = AMDGPU_DOORBELL64_USERQUEUE_END;
+	adev->doorbell_index.gfx_ring0 = AMDGPU_DOORBELL64_GFX_RING0;
+	adev->doorbell_index.sdma_engine0 = AMDGPU_DOORBELL64_sDMA_ENGINE0;
+	adev->doorbell_index.sdma_engine1 = AMDGPU_DOORBELL64_sDMA_ENGINE1;
+	adev->doorbell_index.ih = AMDGPU_DOORBELL64_IH;
+	adev->doorbell_index.uvd_vce.uvd_ring0_1 = AMDGPU_DOORBELL64_UVD_RING0_1;
+	adev->doorbell_index.uvd_vce.uvd_ring2_3 = AMDGPU_DOORBELL64_UVD_RING2_3;
+	adev->doorbell_index.uvd_vce.uvd_ring4_5 = AMDGPU_DOORBELL64_UVD_RING4_5;
+	adev->doorbell_index.uvd_vce.uvd_ring6_7 = AMDGPU_DOORBELL64_UVD_RING6_7;
+	adev->doorbell_index.uvd_vce.vce_ring0_1 = AMDGPU_DOORBELL64_VCE_RING0_1;
+	adev->doorbell_index.uvd_vce.vce_ring2_3 = AMDGPU_DOORBELL64_VCE_RING2_3;
+	adev->doorbell_index.uvd_vce.vce_ring4_5 = AMDGPU_DOORBELL64_VCE_RING4_5;
+	adev->doorbell_index.uvd_vce.vce_ring6_7 = AMDGPU_DOORBELL64_VCE_RING6_7;
+	/* In unit of dword doorbell */
+	adev->doorbell_index.max_assignment = AMDGPU_DOORBELL64_MAX_ASSIGNMENT << 1;
+}
 
diff --git a/drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c b/drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c
index d13fc4f..edce413 100644
--- a/drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c
+++ b/drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c
@@ -54,4 +54,37 @@ int vega20_reg_base_init(struct amdgpu_device *adev)
 	return 0;
 }
 
+void vega20_doorbell_index_init(struct amdgpu_device *adev)
+{
+	adev->doorbell_index.kiq = AMDGPU_VEGA20_DOORBELL_KIQ;
+	adev->doorbell_index.mec_ring0 = AMDGPU_VEGA20_DOORBELL_MEC_RING0;
+	adev->doorbell_index.mec_ring1 = AMDGPU_VEGA20_DOORBELL_MEC_RING1;
+	adev->doorbell_index.mec_ring2 = AMDGPU_VEGA20_DOORBELL_MEC_RING2;
+	adev->doorbell_index.mec_ring3 = AMDGPU_VEGA20_DOORBELL_MEC_RING3;
+	adev->doorbell_index.mec_ring4 = AMDGPU_VEGA20_DOORBELL_MEC_RING4;
+	adev->doorbell_index.mec_ring5 = AMDGPU_VEGA20_DOORBELL_MEC_RING5;
+	adev->doorbell_index.mec_ring6 = AMDGPU_VEGA20_DOORBELL_MEC_RING6;
+	adev->doorbell_index.mec_ring7 = AMDGPU_VEGA20_DOORBELL_MEC_RING7;
+	adev->doorbell_index.userqueue_start = AMDGPU_VEGA20_DOORBELL_USERQUEUE_START;
+	adev->doorbell_index.userqueue_end = AMDGPU_VEGA20_DOORBELL_USERQUEUE_END;
+	adev->doorbell_index.gfx_ring0 = AMDGPU_VEGA20_DOORBELL_GFX_RING0;
+	adev->doorbell_index.sdma_engine0 = AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE0;
+	adev->doorbell_index.sdma_engine1 = AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE1;
+	adev->doorbell_index.sdma_engine2 = AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE2;
+	adev->doorbell_index.sdma_engine3 = AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE3;
+	adev->doorbell_index.sdma_engine4 = AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE4;
+	adev->doorbell_index.sdma_engine5 = AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE5;
+	adev->doorbell_index.sdma_engine6 = AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE6;
+	adev->doorbell_index.sdma_engine7 = AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE7;
+	adev->doorbell_index.ih = AMDGPU_VEGA20_DOORBELL_IH;
+	adev->doorbell_index.uvd_vce.uvd_ring0_1 = AMDGPU_VEGA20_DOORBELL64_UVD_RING0_1;
+	adev->doorbell_index.uvd_vce.uvd_ring2_3 = AMDGPU_VEGA20_DOORBELL64_UVD_RING2_3;
+	adev->doorbell_index.uvd_vce.uvd_ring4_5 = AMDGPU_VEGA20_DOORBELL64_UVD_RING4_5;
+	adev->doorbell_index.uvd_vce.uvd_ring6_7 = AMDGPU_VEGA20_DOORBELL64_UVD_RING6_7;
+	adev->doorbell_index.uvd_vce.vce_ring0_1 = AMDGPU_VEGA20_DOORBELL64_VCE_RING0_1;
+	adev->doorbell_index.uvd_vce.vce_ring2_3 = AMDGPU_VEGA20_DOORBELL64_VCE_RING2_3;
+	adev->doorbell_index.uvd_vce.vce_ring4_5 = AMDGPU_VEGA20_DOORBELL64_VCE_RING4_5;
+	adev->doorbell_index.uvd_vce.vce_ring6_7 = AMDGPU_VEGA20_DOORBELL64_VCE_RING6_7;
+	adev->doorbell_index.max_assignment = AMDGPU_VEGA20_DOORBELL_MAX_ASSIGNMENT << 1;
+}
 
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index 07880d3..ff2906c 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -955,6 +955,7 @@ static const struct amdgpu_asic_funcs vi_asic_funcs =
 	.flush_hdp = &vi_flush_hdp,
 	.invalidate_hdp = &vi_invalidate_hdp,
 	.need_full_reset = &vi_need_full_reset,
+	.init_doorbell_index = &legacy_doorbell_index_init,
 };
 
 #define CZ_REV_BRISTOL(rev)	 \
@@ -1712,3 +1713,21 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
 
 	return 0;
 }
+
+void legacy_doorbell_index_init(struct amdgpu_device *adev)
+{
+	adev->doorbell_index.kiq = AMDGPU_DOORBELL_KIQ;
+	adev->doorbell_index.mec_ring0 = AMDGPU_DOORBELL_MEC_RING0;
+	adev->doorbell_index.mec_ring1 = AMDGPU_DOORBELL_MEC_RING1;
+	adev->doorbell_index.mec_ring2 = AMDGPU_DOORBELL_MEC_RING2;
+	adev->doorbell_index.mec_ring3 = AMDGPU_DOORBELL_MEC_RING3;
+	adev->doorbell_index.mec_ring4 = AMDGPU_DOORBELL_MEC_RING4;
+	adev->doorbell_index.mec_ring5 = AMDGPU_DOORBELL_MEC_RING5;
+	adev->doorbell_index.mec_ring6 = AMDGPU_DOORBELL_MEC_RING6;
+	adev->doorbell_index.mec_ring7 = AMDGPU_DOORBELL_MEC_RING7;
+	adev->doorbell_index.gfx_ring0 = AMDGPU_DOORBELL_GFX_RING0;
+	adev->doorbell_index.sdma_engine0 = AMDGPU_DOORBELL_sDMA_ENGINE0;
+	adev->doorbell_index.sdma_engine1 = AMDGPU_DOORBELL_sDMA_ENGINE1;
+	adev->doorbell_index.ih = AMDGPU_DOORBELL_IH;
+	adev->doorbell_index.max_assignment = AMDGPU_DOORBELL_MAX_ASSIGNMENT;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.h b/drivers/gpu/drm/amd/amdgpu/vi.h
index 0429fe3..8de0772 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.h
+++ b/drivers/gpu/drm/amd/amdgpu/vi.h
@@ -30,4 +30,5 @@ void vi_srbm_select(struct amdgpu_device *adev,
 		    u32 me, u32 pipe, u32 queue, u32 vmid);
 int vi_set_ip_blocks(struct amdgpu_device *adev);
 
+void legacy_doorbell_index_init(struct amdgpu_device *adev);
 #endif
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
index 3783d12..c02adbb 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
@@ -133,6 +133,7 @@ static struct kfd_gpu_cache_info carrizo_cache_info[] = {
 #define fiji_cache_info  carrizo_cache_info
 #define polaris10_cache_info carrizo_cache_info
 #define polaris11_cache_info carrizo_cache_info
+#define polaris12_cache_info carrizo_cache_info
 /* TODO - check & update Vega10 cache details */
 #define vega10_cache_info carrizo_cache_info
 #define raven_cache_info carrizo_cache_info
@@ -647,7 +648,12 @@ static int kfd_fill_gpu_cache_info(struct kfd_dev *kdev,
 		pcache_info = polaris11_cache_info;
 		num_of_cache_types = ARRAY_SIZE(polaris11_cache_info);
 		break;
+	case CHIP_POLARIS12:
+		pcache_info = polaris12_cache_info;
+		num_of_cache_types = ARRAY_SIZE(polaris12_cache_info);
+		break;
 	case CHIP_VEGA10:
+	case CHIP_VEGA12:
 	case CHIP_VEGA20:
 		pcache_info = vega10_cache_info;
 		num_of_cache_types = ARRAY_SIZE(vega10_cache_info);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index c004647..9ed14a1 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -205,6 +205,22 @@ static const struct kfd_device_info polaris11_device_info = {
 	.num_sdma_queues_per_engine = 2,
 };
 
+static const struct kfd_device_info polaris12_device_info = {
+	.asic_family = CHIP_POLARIS12,
+	.max_pasid_bits = 16,
+	.max_no_of_hqd  = 24,
+	.doorbell_size  = 4,
+	.ih_ring_entry_size = 4 * sizeof(uint32_t),
+	.event_interrupt_class = &event_interrupt_class_cik,
+	.num_of_watch_points = 4,
+	.mqd_size_aligned = MQD_SIZE_ALIGNED,
+	.supports_cwsr = true,
+	.needs_iommu_device = false,
+	.needs_pci_atomics = true,
+	.num_sdma_engines = 2,
+	.num_sdma_queues_per_engine = 2,
+};
+
 static const struct kfd_device_info vega10_device_info = {
 	.asic_family = CHIP_VEGA10,
 	.max_pasid_bits = 16,
@@ -237,6 +253,22 @@ static const struct kfd_device_info vega10_vf_device_info = {
 	.num_sdma_queues_per_engine = 2,
 };
 
+static const struct kfd_device_info vega12_device_info = {
+	.asic_family = CHIP_VEGA12,
+	.max_pasid_bits = 16,
+	.max_no_of_hqd  = 24,
+	.doorbell_size  = 8,
+	.ih_ring_entry_size = 8 * sizeof(uint32_t),
+	.event_interrupt_class = &event_interrupt_class_v9,
+	.num_of_watch_points = 4,
+	.mqd_size_aligned = MQD_SIZE_ALIGNED,
+	.supports_cwsr = true,
+	.needs_iommu_device = false,
+	.needs_pci_atomics = false,
+	.num_sdma_engines = 2,
+	.num_sdma_queues_per_engine = 2,
+};
+
 static const struct kfd_device_info vega20_device_info = {
 	.asic_family = CHIP_VEGA20,
 	.max_pasid_bits = 16,
@@ -331,6 +363,14 @@ static const struct kfd_deviceid supported_devices[] = {
 	{ 0x67EB, &polaris11_device_info },	/* Polaris11 */
 	{ 0x67EF, &polaris11_device_info },	/* Polaris11 */
 	{ 0x67FF, &polaris11_device_info },	/* Polaris11 */
+	{ 0x6980, &polaris12_device_info },	/* Polaris12 */
+	{ 0x6981, &polaris12_device_info },	/* Polaris12 */
+	{ 0x6985, &polaris12_device_info },	/* Polaris12 */
+	{ 0x6986, &polaris12_device_info },	/* Polaris12 */
+	{ 0x6987, &polaris12_device_info },	/* Polaris12 */
+	{ 0x6995, &polaris12_device_info },	/* Polaris12 */
+	{ 0x6997, &polaris12_device_info },	/* Polaris12 */
+	{ 0x699F, &polaris12_device_info },	/* Polaris12 */
 	{ 0x6860, &vega10_device_info },	/* Vega10 */
 	{ 0x6861, &vega10_device_info },	/* Vega10 */
 	{ 0x6862, &vega10_device_info },	/* Vega10 */
@@ -340,6 +380,11 @@ static const struct kfd_deviceid supported_devices[] = {
 	{ 0x6868, &vega10_device_info },	/* Vega10 */
 	{ 0x686C, &vega10_vf_device_info },	/* Vega10  vf*/
 	{ 0x687F, &vega10_device_info },	/* Vega10 */
+	{ 0x69A0, &vega12_device_info },	/* Vega12 */
+	{ 0x69A1, &vega12_device_info },	/* Vega12 */
+	{ 0x69A2, &vega12_device_info },	/* Vega12 */
+	{ 0x69A3, &vega12_device_info },	/* Vega12 */
+	{ 0x69AF, &vega12_device_info },	/* Vega12 */
 	{ 0x66a0, &vega20_device_info },	/* Vega20 */
 	{ 0x66a1, &vega20_device_info },	/* Vega20 */
 	{ 0x66a2, &vega20_device_info },	/* Vega20 */
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index fb9d66e..8372556 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -1547,7 +1547,7 @@ static int get_wave_state(struct device_queue_manager *dqm,
 			  u32 *ctl_stack_used_size,
 			  u32 *save_area_used_size)
 {
-	struct mqd_manager *mqd;
+	struct mqd_manager *mqd_mgr;
 	int r;
 
 	dqm_lock(dqm);
@@ -1558,19 +1558,19 @@ static int get_wave_state(struct device_queue_manager *dqm,
 		goto dqm_unlock;
 	}
 
-	mqd = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_COMPUTE);
-	if (!mqd) {
+	mqd_mgr = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_COMPUTE);
+	if (!mqd_mgr) {
 		r = -ENOMEM;
 		goto dqm_unlock;
 	}
 
-	if (!mqd->get_wave_state) {
+	if (!mqd_mgr->get_wave_state) {
 		r = -EINVAL;
 		goto dqm_unlock;
 	}
 
-	r = mqd->get_wave_state(mqd, q->mqd, ctl_stack, ctl_stack_used_size,
-				save_area_used_size);
+	r = mqd_mgr->get_wave_state(mqd_mgr, q->mqd, ctl_stack,
+			ctl_stack_used_size, save_area_used_size);
 
 dqm_unlock:
 	dqm_unlock(dqm);
@@ -1741,10 +1741,12 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev)
 	case CHIP_FIJI:
 	case CHIP_POLARIS10:
 	case CHIP_POLARIS11:
+	case CHIP_POLARIS12:
 		device_queue_manager_init_vi_tonga(&dqm->asic_ops);
 		break;
 
 	case CHIP_VEGA10:
+	case CHIP_VEGA12:
 	case CHIP_VEGA20:
 	case CHIP_RAVEN:
 		device_queue_manager_init_v9(&dqm->asic_ops);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c
index fd60a11..c3a5dcf 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c
@@ -24,7 +24,6 @@
 #include "kfd_device_queue_manager.h"
 #include "gca/gfx_8_0_enum.h"
 #include "gca/gfx_8_0_sh_mask.h"
-#include "gca/gfx_8_0_enum.h"
 #include "oss/oss_3_0_sh_mask.h"
 
 static bool set_cache_memory_policy_vi(struct device_queue_manager *dqm,
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
index 3d66cec..213ea54 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
@@ -397,9 +397,11 @@ int kfd_init_apertures(struct kfd_process *process)
 			case CHIP_FIJI:
 			case CHIP_POLARIS10:
 			case CHIP_POLARIS11:
+			case CHIP_POLARIS12:
 				kfd_init_apertures_vi(pdd, id);
 				break;
 			case CHIP_VEGA10:
+			case CHIP_VEGA12:
 			case CHIP_VEGA20:
 			case CHIP_RAVEN:
 				kfd_init_apertures_v9(pdd, id);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
index f836897..a85904a 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
@@ -23,7 +23,7 @@
 #include "kfd_priv.h"
 #include "kfd_events.h"
 #include "soc15_int.h"
-
+#include "kfd_device_queue_manager.h"
 
 static bool event_interrupt_isr_v9(struct kfd_dev *dev,
 					const uint32_t *ih_ring_entry,
@@ -39,20 +39,39 @@ static bool event_interrupt_isr_v9(struct kfd_dev *dev,
 	    vmid > dev->vm_info.last_vmid_kfd)
 		return 0;
 
-	/* If there is no valid PASID, it's likely a firmware bug */
-	pasid = SOC15_PASID_FROM_IH_ENTRY(ih_ring_entry);
-	if (WARN_ONCE(pasid == 0, "FW bug: No PASID in KFD interrupt"))
-		return 0;
-
 	source_id = SOC15_SOURCE_ID_FROM_IH_ENTRY(ih_ring_entry);
 	client_id = SOC15_CLIENT_ID_FROM_IH_ENTRY(ih_ring_entry);
+	pasid = SOC15_PASID_FROM_IH_ENTRY(ih_ring_entry);
 
-	pr_debug("client id 0x%x, source id %d, pasid 0x%x. raw data:\n",
-		 client_id, source_id, pasid);
+	/* This is a known issue for gfx9. Under non HWS, pasid is not set
+	 * in the interrupt payload, so we need to find out the pasid on our
+	 * own.
+	 */
+	if (!pasid && dev->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) {
+		const uint32_t pasid_mask = 0xffff;
+
+		*patched_flag = true;
+		memcpy(patched_ihre, ih_ring_entry,
+				dev->device_info->ih_ring_entry_size);
+
+		pasid = dev->kfd2kgd->get_atc_vmid_pasid_mapping_pasid(
+				dev->kgd, vmid);
+
+		/* Patch the pasid field */
+		patched_ihre[3] = cpu_to_le32((le32_to_cpu(patched_ihre[3])
+					& ~pasid_mask) | pasid);
+	}
+
+	pr_debug("client id 0x%x, source id %d, vmid %d, pasid 0x%x. raw data:\n",
+		 client_id, source_id, vmid, pasid);
 	pr_debug("%8X, %8X, %8X, %8X, %8X, %8X, %8X, %8X.\n",
 		 data[0], data[1], data[2], data[3],
 		 data[4], data[5], data[6], data[7]);
 
+	/* If there is no valid PASID, it's likely a bug */
+	if (WARN_ONCE(pasid == 0, "Bug: No PASID in KFD interrupt"))
+		return 0;
+
 	/* Interrupt types we care about: various signals and faults.
 	 * They will be forwarded to a work queue (see below).
 	 */
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
index 6c31f73..f159688 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
@@ -313,6 +313,7 @@ struct kernel_queue *kernel_queue_init(struct kfd_dev *dev,
 	case CHIP_FIJI:
 	case CHIP_POLARIS10:
 	case CHIP_POLARIS11:
+	case CHIP_POLARIS12:
 		kernel_queue_init_vi(&kq->ops_asic_specific);
 		break;
 
@@ -322,6 +323,7 @@ struct kernel_queue *kernel_queue_init(struct kfd_dev *dev,
 		break;
 
 	case CHIP_VEGA10:
+	case CHIP_VEGA12:
 	case CHIP_VEGA20:
 	case CHIP_RAVEN:
 		kernel_queue_init_v9(&kq->ops_asic_specific);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
index 6910028..aed9b9b 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
@@ -38,8 +38,10 @@ struct mqd_manager *mqd_manager_init(enum KFD_MQD_TYPE type,
 	case CHIP_FIJI:
 	case CHIP_POLARIS10:
 	case CHIP_POLARIS11:
+	case CHIP_POLARIS12:
 		return mqd_manager_init_vi_tonga(type, dev);
 	case CHIP_VEGA10:
+	case CHIP_VEGA12:
 	case CHIP_VEGA20:
 	case CHIP_RAVEN:
 		return mqd_manager_init_v9(type, dev);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
index c6080ed3..045a229 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
@@ -226,9 +226,11 @@ int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm)
 	case CHIP_FIJI:
 	case CHIP_POLARIS10:
 	case CHIP_POLARIS11:
+	case CHIP_POLARIS12:
 		pm->pmf = &kfd_vi_pm_funcs;
 		break;
 	case CHIP_VEGA10:
+	case CHIP_VEGA12:
 	case CHIP_VEGA20:
 	case CHIP_RAVEN:
 		pm->pmf = &kfd_v9_pm_funcs;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
index c73b4ff..aa793fc 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
@@ -1272,12 +1272,14 @@ int kfd_topology_add_device(struct kfd_dev *gpu)
 	case CHIP_FIJI:
 	case CHIP_POLARIS10:
 	case CHIP_POLARIS11:
+	case CHIP_POLARIS12:
 		pr_debug("Adding doorbell packet type capability\n");
 		dev->node_props.capability |= ((HSA_CAP_DOORBELL_TYPE_1_0 <<
 			HSA_CAP_DOORBELL_TYPE_TOTALBITS_SHIFT) &
 			HSA_CAP_DOORBELL_TYPE_TOTALBITS_MASK);
 		break;
 	case CHIP_VEGA10:
+	case CHIP_VEGA12:
 	case CHIP_VEGA20:
 	case CHIP_RAVEN:
 		dev->node_props.capability |= ((HSA_CAP_DOORBELL_TYPE_2_0 <<
diff --git a/drivers/gpu/drm/amd/display/Makefile b/drivers/gpu/drm/amd/display/Makefile
index c97dc96..cfde156 100644
--- a/drivers/gpu/drm/amd/display/Makefile
+++ b/drivers/gpu/drm/amd/display/Makefile
@@ -32,11 +32,12 @@
 subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/freesync
 subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/color
 subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/info_packet
+subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/power
 
 #TODO: remove when Timing Sync feature is complete
 subdir-ccflags-y += -DBUILD_FEATURE_TIMING_SYNC=0
 
-DAL_LIBS = amdgpu_dm dc	modules/freesync modules/color modules/info_packet
+DAL_LIBS = amdgpu_dm dc	modules/freesync modules/color modules/info_packet modules/power
 
 AMD_DAL = $(addsuffix /Makefile, $(addprefix $(FULL_AMD_DISPLAY_PATH)/,$(DAL_LIBS)))
 
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index d8d0b20..32e791d 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -38,7 +38,6 @@
 #include "amd_shared.h"
 #include "amdgpu_dm_irq.h"
 #include "dm_helpers.h"
-#include "dm_services_types.h"
 #include "amdgpu_dm_mst_types.h"
 #if defined(CONFIG_DEBUG_FS)
 #include "amdgpu_dm_debugfs.h"
@@ -72,6 +71,7 @@
 #endif
 
 #include "modules/inc/mod_freesync.h"
+#include "modules/power/power_helpers.h"
 
 #define FIRMWARE_RAVEN_DMCU		"amdgpu/raven_dmcu.bin"
 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
@@ -643,6 +643,26 @@ static int dm_late_init(void *handle)
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
+	struct dmcu_iram_parameters params;
+	unsigned int linear_lut[16];
+	int i;
+	struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
+	bool ret;
+
+	for (i = 0; i < 16; i++)
+		linear_lut[i] = 0xFFFF * i / 15;
+
+	params.set = 0;
+	params.backlight_ramping_start = 0xCCCC;
+	params.backlight_ramping_reduction = 0xCCCCCCCC;
+	params.backlight_lut_array_size = 16;
+	params.backlight_lut_array = linear_lut;
+
+	ret = dmcu_load_iram(dmcu, params);
+
+	if (!ret)
+		return -EINVAL;
+
 	return detect_mst_link_for_all_connectors(adev->ddev);
 }
 
@@ -969,45 +989,6 @@ const struct amdgpu_ip_block_version dm_ip_block =
 };
 
 
-static struct drm_atomic_state *
-dm_atomic_state_alloc(struct drm_device *dev)
-{
-	struct dm_atomic_state *state = kzalloc(sizeof(*state), GFP_KERNEL);
-
-	if (!state)
-		return NULL;
-
-	if (drm_atomic_state_init(dev, &state->base) < 0)
-		goto fail;
-
-	return &state->base;
-
-fail:
-	kfree(state);
-	return NULL;
-}
-
-static void
-dm_atomic_state_clear(struct drm_atomic_state *state)
-{
-	struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
-
-	if (dm_state->context) {
-		dc_release_state(dm_state->context);
-		dm_state->context = NULL;
-	}
-
-	drm_atomic_state_default_clear(state);
-}
-
-static void
-dm_atomic_state_alloc_free(struct drm_atomic_state *state)
-{
-	struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
-	drm_atomic_state_default_release(state);
-	kfree(dm_state);
-}
-
 /**
  * DOC: atomic
  *
@@ -1019,9 +1000,6 @@ static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
 	.output_poll_changed = drm_fb_helper_output_poll_changed,
 	.atomic_check = amdgpu_dm_atomic_check,
 	.atomic_commit = amdgpu_dm_atomic_commit,
-	.atomic_state_alloc = dm_atomic_state_alloc,
-	.atomic_state_clear = dm_atomic_state_clear,
-	.atomic_state_free = dm_atomic_state_alloc_free
 };
 
 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
@@ -1543,8 +1521,117 @@ static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
 }
 #endif
 
+/*
+ * Acquires the lock for the atomic state object and returns
+ * the new atomic state.
+ *
+ * This should only be called during atomic check.
+ */
+static int dm_atomic_get_state(struct drm_atomic_state *state,
+			       struct dm_atomic_state **dm_state)
+{
+	struct drm_device *dev = state->dev;
+	struct amdgpu_device *adev = dev->dev_private;
+	struct amdgpu_display_manager *dm = &adev->dm;
+	struct drm_private_state *priv_state;
+	int ret;
+
+	if (*dm_state)
+		return 0;
+
+	ret = drm_modeset_lock(&dm->atomic_obj_lock, state->acquire_ctx);
+	if (ret)
+		return ret;
+
+	priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
+	if (IS_ERR(priv_state))
+		return PTR_ERR(priv_state);
+
+	*dm_state = to_dm_atomic_state(priv_state);
+
+	return 0;
+}
+
+struct dm_atomic_state *
+dm_atomic_get_new_state(struct drm_atomic_state *state)
+{
+	struct drm_device *dev = state->dev;
+	struct amdgpu_device *adev = dev->dev_private;
+	struct amdgpu_display_manager *dm = &adev->dm;
+	struct drm_private_obj *obj;
+	struct drm_private_state *new_obj_state;
+	int i;
+
+	for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
+		if (obj->funcs == dm->atomic_obj.funcs)
+			return to_dm_atomic_state(new_obj_state);
+	}
+
+	return NULL;
+}
+
+struct dm_atomic_state *
+dm_atomic_get_old_state(struct drm_atomic_state *state)
+{
+	struct drm_device *dev = state->dev;
+	struct amdgpu_device *adev = dev->dev_private;
+	struct amdgpu_display_manager *dm = &adev->dm;
+	struct drm_private_obj *obj;
+	struct drm_private_state *old_obj_state;
+	int i;
+
+	for_each_old_private_obj_in_state(state, obj, old_obj_state, i) {
+		if (obj->funcs == dm->atomic_obj.funcs)
+			return to_dm_atomic_state(old_obj_state);
+	}
+
+	return NULL;
+}
+
+static struct drm_private_state *
+dm_atomic_duplicate_state(struct drm_private_obj *obj)
+{
+	struct dm_atomic_state *old_state, *new_state;
+
+	new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
+	if (!new_state)
+		return NULL;
+
+	__drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
+
+	new_state->context = dc_create_state();
+	if (!new_state->context) {
+		kfree(new_state);
+		return NULL;
+	}
+
+	old_state = to_dm_atomic_state(obj->state);
+	if (old_state && old_state->context)
+		dc_resource_state_copy_construct(old_state->context,
+						 new_state->context);
+
+	return &new_state->base;
+}
+
+static void dm_atomic_destroy_state(struct drm_private_obj *obj,
+				    struct drm_private_state *state)
+{
+	struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
+
+	if (dm_state && dm_state->context)
+		dc_release_state(dm_state->context);
+
+	kfree(dm_state);
+}
+
+static struct drm_private_state_funcs dm_atomic_state_funcs = {
+	.atomic_duplicate_state = dm_atomic_duplicate_state,
+	.atomic_destroy_state = dm_atomic_destroy_state,
+};
+
 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
 {
+	struct dm_atomic_state *state;
 	int r;
 
 	adev->mode_info.mode_config_initialized = true;
@@ -1562,6 +1649,24 @@ static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
 
 	adev->ddev->mode_config.fb_base = adev->gmc.aper_base;
 
+	drm_modeset_lock_init(&adev->dm.atomic_obj_lock);
+
+	state = kzalloc(sizeof(*state), GFP_KERNEL);
+	if (!state)
+		return -ENOMEM;
+
+	state->context = dc_create_state();
+	if (!state->context) {
+		kfree(state);
+		return -ENOMEM;
+	}
+
+	dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
+
+	drm_atomic_private_obj_init(&adev->dm.atomic_obj,
+				    &state->base,
+				    &dm_atomic_state_funcs);
+
 	r = amdgpu_display_modeset_create_props(adev);
 	if (r)
 		return r;
@@ -1569,27 +1674,60 @@ static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
 	return 0;
 }
 
+#define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
+#define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
+
 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
 
+static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
+{
+#if defined(CONFIG_ACPI)
+	struct amdgpu_dm_backlight_caps caps;
+
+	if (dm->backlight_caps.caps_valid)
+		return;
+
+	amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
+	if (caps.caps_valid) {
+		dm->backlight_caps.min_input_signal = caps.min_input_signal;
+		dm->backlight_caps.max_input_signal = caps.max_input_signal;
+		dm->backlight_caps.caps_valid = true;
+	} else {
+		dm->backlight_caps.min_input_signal =
+				AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
+		dm->backlight_caps.max_input_signal =
+				AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
+	}
+#else
+	dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
+	dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
+#endif
+}
+
 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
 {
 	struct amdgpu_display_manager *dm = bl_get_data(bd);
+	struct amdgpu_dm_backlight_caps caps;
+	uint32_t brightness = bd->props.brightness;
 
-	/* backlight_pwm_u16_16 parameter is in unsigned 32 bit, 16 bit integer
-	 * and 16 bit fractional, where 1.0 is max backlight value.
-	 * bd->props.brightness is 8 bit format and needs to be converted by
-	 * scaling via copy lower byte to upper byte of 16 bit value.
-	 */
-	uint32_t brightness = bd->props.brightness * 0x101;
-
+	amdgpu_dm_update_backlight_caps(dm);
+	caps = dm->backlight_caps;
 	/*
-	 * PWM interperts 0 as 100% rather than 0% because of HW
-	 * limitation for level 0.  So limiting minimum brightness level
-	 * to 1.
+	 * The brightness input is in the range 0-255
+	 * It needs to be rescaled to be between the
+	 * requested min and max input signal
+	 *
+	 * It also needs to be scaled up by 0x101 to
+	 * match the DC interface which has a range of
+	 * 0 to 0xffff
 	 */
-	if (bd->props.brightness < 1)
-		brightness = 0x101;
+	brightness =
+		brightness
+		* 0x101
+		* (caps.max_input_signal - caps.min_input_signal)
+		/ AMDGPU_MAX_BL_LEVEL
+		+ caps.min_input_signal * 0x101;
 
 	if (dc_link_set_backlight_level(dm->backlight_link,
 			brightness, 0, 0))
@@ -1619,6 +1757,8 @@ amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
 	char bl_name[16];
 	struct backlight_properties props = { 0 };
 
+	amdgpu_dm_update_backlight_caps(dm);
+
 	props.max_brightness = AMDGPU_MAX_BL_LEVEL;
 	props.brightness = AMDGPU_MAX_BL_LEVEL;
 	props.type = BACKLIGHT_RAW;
@@ -1850,6 +1990,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
 {
 	drm_mode_config_cleanup(dm->ddev);
+	drm_atomic_private_obj_fini(&dm->atomic_obj);
 	return;
 }
 
@@ -1869,73 +2010,6 @@ static void dm_bandwidth_update(struct amdgpu_device *adev)
 	/* TODO: implement later */
 }
 
-static int amdgpu_notify_freesync(struct drm_device *dev, void *data,
-				struct drm_file *filp)
-{
-	struct drm_atomic_state *state;
-	struct drm_modeset_acquire_ctx ctx;
-	struct drm_crtc *crtc;
-	struct drm_connector *connector;
-	struct drm_connector_state *old_con_state, *new_con_state;
-	int ret = 0;
-	uint8_t i;
-	bool enable = false;
-
-	drm_modeset_acquire_init(&ctx, 0);
-
-	state = drm_atomic_state_alloc(dev);
-	if (!state) {
-		ret = -ENOMEM;
-		goto out;
-	}
-	state->acquire_ctx = &ctx;
-
-retry:
-	drm_for_each_crtc(crtc, dev) {
-		ret = drm_atomic_add_affected_connectors(state, crtc);
-		if (ret)
-			goto fail;
-
-		/* TODO rework amdgpu_dm_commit_planes so we don't need this */
-		ret = drm_atomic_add_affected_planes(state, crtc);
-		if (ret)
-			goto fail;
-	}
-
-	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
-		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
-		struct drm_crtc_state *new_crtc_state;
-		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
-		struct dm_crtc_state *dm_new_crtc_state;
-
-		if (!acrtc) {
-			ASSERT(0);
-			continue;
-		}
-
-		new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
-		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
-
-		dm_new_crtc_state->freesync_enabled = enable;
-	}
-
-	ret = drm_atomic_commit(state);
-
-fail:
-	if (ret == -EDEADLK) {
-		drm_atomic_state_clear(state);
-		drm_modeset_backoff(&ctx);
-		goto retry;
-	}
-
-	drm_atomic_state_put(state);
-
-out:
-	drm_modeset_drop_locks(&ctx);
-	drm_modeset_acquire_fini(&ctx);
-	return ret;
-}
-
 static const struct amdgpu_display_funcs dm_display_funcs = {
 	.bandwidth_update = dm_bandwidth_update, /* called unconditionally */
 	.vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
@@ -1948,8 +2022,6 @@ static const struct amdgpu_display_funcs dm_display_funcs = {
 		dm_crtc_get_scanoutpos,/* called unconditionally */
 	.add_encoder = NULL, /* VBIOS parsing. DAL does it. */
 	.add_connector = NULL, /* VBIOS parsing. DAL does it. */
-	.notify_freesync = amdgpu_notify_freesync,
-
 };
 
 #if defined(CONFIG_DEBUG_KERNEL_DC)
@@ -2550,7 +2622,8 @@ static void adjust_colour_depth_from_display_info(struct dc_crtc_timing *timing_
 static void
 fill_stream_properties_from_drm_display_mode(struct dc_stream_state *stream,
 					     const struct drm_display_mode *mode_in,
-					     const struct drm_connector *connector)
+					     const struct drm_connector *connector,
+					     const struct dc_stream_state *old_stream)
 {
 	struct dc_crtc_timing *timing_out = &stream->timing;
 	const struct drm_display_info *info = &connector->display_info;
@@ -2576,7 +2649,18 @@ fill_stream_properties_from_drm_display_mode(struct dc_stream_state *stream,
 			connector);
 	timing_out->scan_type = SCANNING_TYPE_NODATA;
 	timing_out->hdmi_vic = 0;
-	timing_out->vic = drm_match_cea_mode(mode_in);
+
+	if(old_stream) {
+		timing_out->vic = old_stream->timing.vic;
+		timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
+		timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
+	} else {
+		timing_out->vic = drm_match_cea_mode(mode_in);
+		if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
+			timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
+		if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
+			timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
+	}
 
 	timing_out->h_addressable = mode_in->crtc_hdisplay;
 	timing_out->h_total = mode_in->crtc_htotal;
@@ -2592,10 +2676,6 @@ fill_stream_properties_from_drm_display_mode(struct dc_stream_state *stream,
 		mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
 	timing_out->pix_clk_khz = mode_in->crtc_clock;
 	timing_out->aspect_ratio = get_aspect_ratio(mode_in);
-	if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
-		timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
-	if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
-		timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
 
 	stream->output_color_space = get_output_color_space(timing_out);
 
@@ -2758,13 +2838,18 @@ static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
 static struct dc_stream_state *
 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
 		       const struct drm_display_mode *drm_mode,
-		       const struct dm_connector_state *dm_state)
+		       const struct dm_connector_state *dm_state,
+		       const struct dc_stream_state *old_stream)
 {
 	struct drm_display_mode *preferred_mode = NULL;
 	struct drm_connector *drm_connector;
 	struct dc_stream_state *stream = NULL;
 	struct drm_display_mode mode = *drm_mode;
 	bool native_mode_found = false;
+	bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
+	int mode_refresh;
+	int preferred_refresh = 0;
+
 	struct dc_sink *sink = NULL;
 	if (aconnector == NULL) {
 		DRM_ERROR("aconnector is NULL!\n");
@@ -2803,6 +2888,8 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
 				struct drm_display_mode,
 				head);
 
+	mode_refresh = drm_mode_vrefresh(&mode);
+
 	if (preferred_mode == NULL) {
 		/*
 		 * This may not be an error, the use case is when we have no
@@ -2815,13 +2902,23 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
 		decide_crtc_timing_for_drm_display_mode(
 				&mode, preferred_mode,
 				dm_state ? (dm_state->scaling != RMX_OFF) : false);
+		preferred_refresh = drm_mode_vrefresh(preferred_mode);
 	}
 
 	if (!dm_state)
 		drm_mode_set_crtcinfo(&mode, 0);
 
-	fill_stream_properties_from_drm_display_mode(stream,
-			&mode, &aconnector->base);
+	/*
+	* If scaling is enabled and refresh rate didn't change
+	* we copy the vic and polarities of the old timings
+	*/
+	if (!scale || mode_refresh != preferred_refresh)
+		fill_stream_properties_from_drm_display_mode(stream,
+			&mode, &aconnector->base, NULL);
+	else
+		fill_stream_properties_from_drm_display_mode(stream,
+			&mode, &aconnector->base, old_stream);
+
 	update_stream_scaling_settings(&mode, dm_state, stream);
 
 	fill_audio_info(
@@ -2901,7 +2998,9 @@ dm_crtc_duplicate_state(struct drm_crtc *crtc)
 
 	state->adjust = cur->adjust;
 	state->vrr_infopacket = cur->vrr_infopacket;
-	state->freesync_enabled = cur->freesync_enabled;
+	state->abm_level = cur->abm_level;
+	state->vrr_supported = cur->vrr_supported;
+	state->freesync_config = cur->freesync_config;
 
 	/* TODO Duplicate dc_stream after objects are stream object is flattened */
 
@@ -2995,9 +3094,11 @@ int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
 			rmx_type = RMX_FULL;
 			break;
 		case DRM_MODE_SCALE_NONE:
-		default:
 			rmx_type = RMX_OFF;
 			break;
+		default:
+			rmx_type = RMX_ASPECT;
+			break;
 		}
 
 		if (dm_old_state->scaling == rmx_type)
@@ -3017,6 +3118,9 @@ int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
 	} else if (property == adev->mode_info.max_bpc_property) {
 		dm_new_state->max_bpc = val;
 		ret = 0;
+	} else if (property == adev->mode_info.abm_level_property) {
+		dm_new_state->abm_level = val;
+		ret = 0;
 	}
 
 	return ret;
@@ -3062,7 +3166,11 @@ int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
 	} else if (property == adev->mode_info.max_bpc_property) {
 		*val = dm_state->max_bpc;
 		ret = 0;
+	} else if (property == adev->mode_info.abm_level_property) {
+		*val = dm_state->abm_level;
+		ret = 0;
 	}
+
 	return ret;
 }
 
@@ -3102,7 +3210,7 @@ void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
 	state = kzalloc(sizeof(*state), GFP_KERNEL);
 
 	if (state) {
-		state->scaling = RMX_OFF;
+		state->scaling = RMX_ASPECT;
 		state->underscan_enable = false;
 		state->underscan_hborder = 0;
 		state->underscan_vborder = 0;
@@ -3126,7 +3234,7 @@ amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
 	__drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
 
 	new_state->freesync_capable = state->freesync_capable;
-	new_state->freesync_enable = state->freesync_enable;
+	new_state->abm_level = state->abm_level;
 
 	return &new_state->base;
 }
@@ -3228,7 +3336,7 @@ enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connec
 		goto fail;
 	}
 
-	stream = create_stream_for_sink(aconnector, mode, NULL);
+	stream = create_stream_for_sink(aconnector, mode, NULL, NULL);
 	if (stream == NULL) {
 		DRM_ERROR("Failed to create stream for sink!\n");
 		goto fail;
@@ -3876,6 +3984,17 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
 				adev->mode_info.max_bpc_property,
 				0);
 
+	if (connector_type == DRM_MODE_CONNECTOR_eDP &&
+	    dc_is_dmcu_initialized(adev->dm.dc)) {
+		drm_object_attach_property(&aconnector->base.base,
+				adev->mode_info.abm_level_property, 0);
+	}
+
+	if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
+	    connector_type == DRM_MODE_CONNECTOR_DisplayPort) {
+		drm_connector_attach_vrr_capable_property(
+			&aconnector->base);
+	}
 }
 
 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
@@ -4252,6 +4371,91 @@ static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
 						 acrtc->crtc_id);
 }
 
+struct dc_stream_status *dc_state_get_stream_status(
+	struct dc_state *state,
+	struct dc_stream_state *stream)
+{
+	uint8_t i;
+
+	for (i = 0; i < state->stream_count; i++) {
+		if (stream == state->streams[i])
+			return &state->stream_status[i];
+	}
+
+	return NULL;
+}
+
+static void update_freesync_state_on_stream(
+	struct amdgpu_display_manager *dm,
+	struct dm_crtc_state *new_crtc_state,
+	struct dc_stream_state *new_stream)
+{
+	struct mod_vrr_params vrr = {0};
+	struct dc_info_packet vrr_infopacket = {0};
+	struct mod_freesync_config config = new_crtc_state->freesync_config;
+
+	if (!new_stream)
+		return;
+
+	/*
+	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
+	 * For now it's sufficient to just guard against these conditions.
+	 */
+
+	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
+		return;
+
+	if (new_crtc_state->vrr_supported &&
+	    config.min_refresh_in_uhz &&
+	    config.max_refresh_in_uhz) {
+		config.state = new_crtc_state->base.vrr_enabled ?
+			VRR_STATE_ACTIVE_VARIABLE :
+			VRR_STATE_INACTIVE;
+	} else {
+		config.state = VRR_STATE_UNSUPPORTED;
+	}
+
+	mod_freesync_build_vrr_params(dm->freesync_module,
+				      new_stream,
+				      &config, &vrr);
+
+	mod_freesync_build_vrr_infopacket(
+		dm->freesync_module,
+		new_stream,
+		&vrr,
+		packet_type_vrr,
+		transfer_func_unknown,
+		&vrr_infopacket);
+
+	new_crtc_state->freesync_timing_changed =
+		(memcmp(&new_crtc_state->adjust,
+			&vrr.adjust,
+			sizeof(vrr.adjust)) != 0);
+
+	new_crtc_state->freesync_vrr_info_changed =
+		(memcmp(&new_crtc_state->vrr_infopacket,
+			&vrr_infopacket,
+			sizeof(vrr_infopacket)) != 0);
+
+	new_crtc_state->adjust = vrr.adjust;
+	new_crtc_state->vrr_infopacket = vrr_infopacket;
+
+	new_stream->adjust = new_crtc_state->adjust;
+	new_stream->vrr_infopacket = vrr_infopacket;
+
+	if (new_crtc_state->freesync_vrr_info_changed)
+		DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
+			      new_crtc_state->base.crtc->base.id,
+			      (int)new_crtc_state->base.vrr_enabled,
+			      (int)vrr.state);
+
+	if (new_crtc_state->freesync_timing_changed)
+		DRM_DEBUG_KMS("VRR timing update: crtc=%u min=%u max=%u\n",
+			      new_crtc_state->base.crtc->base.id,
+			      vrr.adjust.v_total_min,
+			      vrr.adjust.v_total_max);
+}
+
 /*
  * Executes flip
  *
@@ -4273,6 +4477,7 @@ static void amdgpu_dm_do_flip(struct drm_crtc *crtc,
 	struct dc_flip_addrs addr = { {0} };
 	/* TODO eliminate or rename surface_update */
 	struct dc_surface_update surface_updates[1] = { {0} };
+	struct dc_stream_update stream_update = {0};
 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
 	struct dc_stream_status *stream_status;
 
@@ -4345,11 +4550,26 @@ static void amdgpu_dm_do_flip(struct drm_crtc *crtc,
 	}
 	surface_updates->flip_addr = &addr;
 
+	if (acrtc_state->stream) {
+		update_freesync_state_on_stream(
+			&adev->dm,
+			acrtc_state,
+			acrtc_state->stream);
+
+		if (acrtc_state->freesync_timing_changed)
+			stream_update.adjust =
+				&acrtc_state->stream->adjust;
+
+		if (acrtc_state->freesync_vrr_info_changed)
+			stream_update.vrr_infopacket =
+				&acrtc_state->stream->vrr_infopacket;
+	}
+
 	dc_commit_updates_for_stream(adev->dm.dc,
 					     surface_updates,
 					     1,
 					     acrtc_state->stream,
-					     NULL,
+					     &stream_update,
 					     &surface_updates->surface,
 					     state);
 
@@ -4382,6 +4602,7 @@ static bool commit_planes_to_stream(
 	struct dc_stream_state *dc_stream = dm_new_crtc_state->stream;
 	struct dc_stream_update *stream_update =
 			kzalloc(sizeof(struct dc_stream_update), GFP_KERNEL);
+	unsigned int abm_level;
 
 	if (!stream_update) {
 		BREAK_TO_DEBUGGER();
@@ -4409,9 +4630,9 @@ static bool commit_planes_to_stream(
 	stream_update->dst = dc_stream->dst;
 	stream_update->out_transfer_func = dc_stream->out_transfer_func;
 
-	if (dm_new_crtc_state->freesync_enabled != dm_old_crtc_state->freesync_enabled) {
-		stream_update->vrr_infopacket = &dc_stream->vrr_infopacket;
-		stream_update->adjust = &dc_stream->adjust;
+	if (dm_new_crtc_state->abm_level != dm_old_crtc_state->abm_level) {
+		abm_level = dm_new_crtc_state->abm_level;
+		stream_update->abm_level = &abm_level;
 	}
 
 	for (i = 0; i < new_plane_count; i++) {
@@ -4455,6 +4676,7 @@ static bool commit_planes_to_stream(
 }
 
 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
+				    struct dc_state *dc_state,
 				    struct drm_device *dev,
 				    struct amdgpu_display_manager *dm,
 				    struct drm_crtc *pcrtc,
@@ -4471,7 +4693,6 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
 	struct dm_crtc_state *dm_old_crtc_state =
 			to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
-	struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
 	int planes_count = 0;
 	unsigned long flags;
 
@@ -4532,7 +4753,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
 				crtc,
 				fb,
 				(uint32_t)drm_crtc_vblank_count(crtc) + *wait_for_vblank,
-				dm_state->context);
+				dc_state);
 		}
 
 	}
@@ -4549,15 +4770,14 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
 		}
 
-		dc_stream_attach->adjust = acrtc_state->adjust;
-		dc_stream_attach->vrr_infopacket = acrtc_state->vrr_infopacket;
+		dc_stream_attach->abm_level = acrtc_state->abm_level;
 
 		if (false == commit_planes_to_stream(dm->dc,
 							plane_states_constructed,
 							planes_count,
 							acrtc_state,
 							dm_old_crtc_state,
-							dm_state->context))
+							dc_state))
 			dm_error("%s: Failed to attach plane!\n", __func__);
 	} else {
 		/*TODO BUG Here should go disable planes on CRTC. */
@@ -4625,6 +4845,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
 	struct amdgpu_device *adev = dev->dev_private;
 	struct amdgpu_display_manager *dm = &adev->dm;
 	struct dm_atomic_state *dm_state;
+	struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
 	uint32_t i, j;
 	struct drm_crtc *crtc;
 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
@@ -4637,7 +4858,16 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
 
 	drm_atomic_helper_update_legacy_modeset_state(dev, state);
 
-	dm_state = to_dm_atomic_state(state);
+	dm_state = dm_atomic_get_new_state(state);
+	if (dm_state && dm_state->context) {
+		dc_state = dm_state->context;
+	} else {
+		/* No state changes, retain current state. */
+		dc_state_temp = dc_create_state();
+		ASSERT(dc_state_temp);
+		dc_state = dc_state_temp;
+		dc_resource_state_copy_construct_current(dm->dc, dc_state);
+	}
 
 	/* update changed items */
 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
@@ -4710,9 +4940,9 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
 		}
 	} /* for_each_crtc_in_state() */
 
-	if (dm_state->context) {
-		dm_enable_per_frame_crtc_master_sync(dm_state->context);
-		WARN_ON(!dc_commit_state(dm->dc, dm_state->context));
+	if (dc_state) {
+		dm_enable_per_frame_crtc_master_sync(dc_state);
+		WARN_ON(!dc_commit_state(dm->dc, dc_state));
 	}
 
 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
@@ -4725,13 +4955,17 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
 					dc_stream_get_status(dm_new_crtc_state->stream);
 
 			if (!status)
+				status = dc_state_get_stream_status(dc_state,
+								    dm_new_crtc_state->stream);
+
+			if (!status)
 				DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
 			else
 				acrtc->otg_inst = status->primary_otg_inst;
 		}
 	}
 
-	/* Handle scaling and underscan changes*/
+	/* Handle scaling, underscan, and abm changes*/
 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
@@ -4747,11 +4981,14 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
 		if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
 			continue;
 
-		/* Skip anything that is not scaling or underscan changes */
-		if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
-			continue;
 
 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
+		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
+
+		/* Skip anything that is not scaling or underscan changes */
+		if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state) &&
+				(dm_new_crtc_state->abm_level == dm_old_crtc_state->abm_level))
+			continue;
 
 		update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
 				dm_new_con_state, (struct dc_stream_state *)dm_new_crtc_state->stream);
@@ -4763,8 +5000,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
 		WARN_ON(!status);
 		WARN_ON(!status->plane_count);
 
-		dm_new_crtc_state->stream->adjust = dm_new_crtc_state->adjust;
-		dm_new_crtc_state->stream->vrr_infopacket = dm_new_crtc_state->vrr_infopacket;
+		dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
 
 		/*TODO How it works with MPO ?*/
 		if (!commit_planes_to_stream(
@@ -4773,7 +5009,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
 				status->plane_count,
 				dm_new_crtc_state,
 				to_dm_crtc_state(old_crtc_state),
-				dm_state->context))
+				dc_state))
 			dm_error("%s: Failed to update stream scaling!\n", __func__);
 	}
 
@@ -4806,7 +5042,8 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
 
 		if (dm_new_crtc_state->stream)
-			amdgpu_dm_commit_planes(state, dev, dm, crtc, &wait_for_vblank);
+			amdgpu_dm_commit_planes(state, dc_state, dev,
+						dm, crtc, &wait_for_vblank);
 	}
 
 
@@ -4846,6 +5083,9 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
 	for (i = 0; i < crtc_disable_count; i++)
 		pm_runtime_put_autosuspend(dev->dev);
 	pm_runtime_mark_last_busy(dev->dev);
+
+	if (dc_state_temp)
+		dc_release_state(dc_state_temp);
 }
 
 
@@ -4989,20 +5229,18 @@ static int do_aquire_global_lock(struct drm_device *dev,
 	return ret < 0 ? ret : 0;
 }
 
-void set_freesync_on_stream(struct amdgpu_display_manager *dm,
-			    struct dm_crtc_state *new_crtc_state,
-			    struct dm_connector_state *new_con_state,
-			    struct dc_stream_state *new_stream)
+static void get_freesync_config_for_crtc(
+	struct dm_crtc_state *new_crtc_state,
+	struct dm_connector_state *new_con_state)
 {
 	struct mod_freesync_config config = {0};
-	struct mod_vrr_params vrr = {0};
-	struct dc_info_packet vrr_infopacket = {0};
 	struct amdgpu_dm_connector *aconnector =
 			to_amdgpu_dm_connector(new_con_state->base.connector);
 
-	if (new_con_state->freesync_capable &&
-	    new_con_state->freesync_enable) {
-		config.state = new_crtc_state->freesync_enabled ?
+	new_crtc_state->vrr_supported = new_con_state->freesync_capable;
+
+	if (new_con_state->freesync_capable) {
+		config.state = new_crtc_state->base.vrr_enabled ?
 				VRR_STATE_ACTIVE_VARIABLE :
 				VRR_STATE_INACTIVE;
 		config.min_refresh_in_uhz =
@@ -5012,19 +5250,18 @@ void set_freesync_on_stream(struct amdgpu_display_manager *dm,
 		config.vsif_supported = true;
 	}
 
-	mod_freesync_build_vrr_params(dm->freesync_module,
-				      new_stream,
-				      &config, &vrr);
+	new_crtc_state->freesync_config = config;
+}
 
-	mod_freesync_build_vrr_infopacket(dm->freesync_module,
-					  new_stream,
-					  &vrr,
-					  packet_type_fs1,
-					  NULL,
-					  &vrr_infopacket);
+static void reset_freesync_config_for_crtc(
+	struct dm_crtc_state *new_crtc_state)
+{
+	new_crtc_state->vrr_supported = false;
 
-	new_crtc_state->adjust = vrr.adjust;
-	new_crtc_state->vrr_infopacket = vrr_infopacket;
+	memset(&new_crtc_state->adjust, 0,
+	       sizeof(new_crtc_state->adjust));
+	memset(&new_crtc_state->vrr_infopacket, 0,
+	       sizeof(new_crtc_state->vrr_infopacket));
 }
 
 static int dm_update_crtcs_state(struct amdgpu_display_manager *dm,
@@ -5032,11 +5269,11 @@ static int dm_update_crtcs_state(struct amdgpu_display_manager *dm,
 				 bool enable,
 				 bool *lock_and_validation_needed)
 {
+	struct dm_atomic_state *dm_state = NULL;
 	struct drm_crtc *crtc;
 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
 	int i;
 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
-	struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
 	struct dc_stream_state *new_stream;
 	int ret = 0;
 
@@ -5084,7 +5321,8 @@ static int dm_update_crtcs_state(struct amdgpu_display_manager *dm,
 
 			new_stream = create_stream_for_sink(aconnector,
 							     &new_crtc_state->mode,
-							    dm_new_conn_state);
+							    dm_new_conn_state,
+							    dm_old_crtc_state->stream);
 
 			/*
 			 * we can have no stream on ACTION_SET if a display
@@ -5099,8 +5337,7 @@ static int dm_update_crtcs_state(struct amdgpu_display_manager *dm,
 				break;
 			}
 
-			set_freesync_on_stream(dm, dm_new_crtc_state,
-					       dm_new_conn_state, new_stream);
+			dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
 
 			if (dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
 			    dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
@@ -5110,9 +5347,6 @@ static int dm_update_crtcs_state(struct amdgpu_display_manager *dm,
 			}
 		}
 
-		if (dm_old_crtc_state->freesync_enabled != dm_new_crtc_state->freesync_enabled)
-			new_crtc_state->mode_changed = true;
-
 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
 			goto next_crtc;
 
@@ -5134,6 +5368,10 @@ static int dm_update_crtcs_state(struct amdgpu_display_manager *dm,
 			if (!dm_old_crtc_state->stream)
 				goto next_crtc;
 
+			ret = dm_atomic_get_state(state, &dm_state);
+			if (ret)
+				goto fail;
+
 			DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
 					crtc->base.id);
 
@@ -5149,6 +5387,8 @@ static int dm_update_crtcs_state(struct amdgpu_display_manager *dm,
 			dc_stream_release(dm_old_crtc_state->stream);
 			dm_new_crtc_state->stream = NULL;
 
+			reset_freesync_config_for_crtc(dm_new_crtc_state);
+
 			*lock_and_validation_needed = true;
 
 		} else {/* Add stream for any updated/enabled CRTC */
@@ -5168,6 +5408,10 @@ static int dm_update_crtcs_state(struct amdgpu_display_manager *dm,
 
 				WARN_ON(dm_new_crtc_state->stream);
 
+				ret = dm_atomic_get_state(state, &dm_state);
+				if (ret)
+					goto fail;
+
 				dm_new_crtc_state->stream = new_stream;
 
 				dc_stream_retain(new_stream);
@@ -5226,7 +5470,9 @@ static int dm_update_crtcs_state(struct amdgpu_display_manager *dm,
 			amdgpu_dm_set_ctm(dm_new_crtc_state);
 		}
 
-
+		/* Update Freesync settings. */
+		get_freesync_config_for_crtc(dm_new_crtc_state,
+					     dm_new_conn_state);
 	}
 
 	return ret;
@@ -5242,12 +5488,13 @@ static int dm_update_planes_state(struct dc *dc,
 				  bool enable,
 				  bool *lock_and_validation_needed)
 {
+
+	struct dm_atomic_state *dm_state = NULL;
 	struct drm_crtc *new_plane_crtc, *old_plane_crtc;
 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
 	struct drm_plane *plane;
 	struct drm_plane_state *old_plane_state, *new_plane_state;
 	struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
-	struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
 	struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
 	int i ;
 	/* TODO return page_flip_needed() function */
@@ -5285,6 +5532,10 @@ static int dm_update_planes_state(struct dc *dc,
 			DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
 					plane->base.id, old_plane_crtc->base.id);
 
+			ret = dm_atomic_get_state(state, &dm_state);
+			if (ret)
+				return ret;
+
 			if (!dc_remove_plane_from_context(
 					dc,
 					dm_old_crtc_state->stream,
@@ -5339,6 +5590,12 @@ static int dm_update_planes_state(struct dc *dc,
 				return ret;
 			}
 
+			ret = dm_atomic_get_state(state, &dm_state);
+			if (ret) {
+				dc_plane_state_release(dc_new_plane_state);
+				return ret;
+			}
+
 			/*
 			 * Any atomic check errors that occur after this will
 			 * not need a release. The plane state will be attached
@@ -5370,11 +5627,14 @@ static int dm_update_planes_state(struct dc *dc,
 
 	return ret;
 }
-enum surface_update_type dm_determine_update_type_for_commit(struct dc *dc, struct drm_atomic_state *state)
+
+static int
+dm_determine_update_type_for_commit(struct dc *dc,
+				    struct drm_atomic_state *state,
+				    enum surface_update_type *out_type)
 {
-
-
-	int i, j, num_plane;
+	struct dm_atomic_state *dm_state = NULL, *old_dm_state = NULL;
+	int i, j, num_plane, ret = 0;
 	struct drm_plane_state *old_plane_state, *new_plane_state;
 	struct dm_plane_state *new_dm_plane_state, *old_dm_plane_state;
 	struct drm_crtc *new_plane_crtc, *old_plane_crtc;
@@ -5394,7 +5654,7 @@ enum surface_update_type dm_determine_update_type_for_commit(struct dc *dc, stru
 		DRM_ERROR("Plane or surface update failed to allocate");
 		/* Set type to FULL to avoid crashing in DC*/
 		update_type = UPDATE_TYPE_FULL;
-		goto ret;
+		goto cleanup;
 	}
 
 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
@@ -5448,27 +5708,40 @@ enum surface_update_type dm_determine_update_type_for_commit(struct dc *dc, stru
 			}
 
 			if (num_plane > 0) {
-				status = dc_stream_get_status(new_dm_crtc_state->stream);
+				ret = dm_atomic_get_state(state, &dm_state);
+				if (ret)
+					goto cleanup;
+
+				old_dm_state = dm_atomic_get_old_state(state);
+				if (!old_dm_state) {
+					ret = -EINVAL;
+					goto cleanup;
+				}
+
+				status = dc_state_get_stream_status(old_dm_state->context,
+								    new_dm_crtc_state->stream);
+
 				update_type = dc_check_update_surfaces_for_stream(dc, updates, num_plane,
 										  &stream_update, status);
 
 				if (update_type > UPDATE_TYPE_MED) {
 					update_type = UPDATE_TYPE_FULL;
-					goto ret;
+					goto cleanup;
 				}
 			}
 
 		} else if (!new_dm_crtc_state->stream && old_dm_crtc_state->stream) {
 			update_type = UPDATE_TYPE_FULL;
-			goto ret;
+			goto cleanup;
 		}
 	}
 
-ret:
+cleanup:
 	kfree(updates);
 	kfree(surface);
 
-	return update_type;
+	*out_type = update_type;
+	return ret;
 }
 
 /**
@@ -5500,8 +5773,8 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
 				  struct drm_atomic_state *state)
 {
 	struct amdgpu_device *adev = dev->dev_private;
+	struct dm_atomic_state *dm_state = NULL;
 	struct dc *dc = adev->dm.dc;
-	struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
 	struct drm_connector *connector;
 	struct drm_connector_state *old_con_state, *new_con_state;
 	struct drm_crtc *crtc;
@@ -5522,12 +5795,9 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
 		goto fail;
 
 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
-		struct dm_crtc_state *dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
-		struct dm_crtc_state *dm_old_crtc_state  = to_dm_crtc_state(old_crtc_state);
-
 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
 		    !new_crtc_state->color_mgmt_changed &&
-		    (dm_old_crtc_state->freesync_enabled == dm_new_crtc_state->freesync_enabled))
+		    !new_crtc_state->vrr_enabled)
 			continue;
 
 		if (!new_crtc_state->enable)
@@ -5542,10 +5812,6 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
 			goto fail;
 	}
 
-	dm_state->context = dc_create_state();
-	ASSERT(dm_state->context);
-	dc_resource_state_copy_construct_current(dc, dm_state->context);
-
 	/* Remove exiting planes if they are modified */
 	ret = dm_update_planes_state(dc, state, false, &lock_and_validation_needed);
 	if (ret) {
@@ -5598,7 +5864,9 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
 		lock_and_validation_needed = true;
 	}
 
-	update_type = dm_determine_update_type_for_commit(dc, state);
+	ret = dm_determine_update_type_for_commit(dc, state, &update_type);
+	if (ret)
+		goto fail;
 
 	if (overall_update_type < update_type)
 		overall_update_type = update_type;
@@ -5616,6 +5884,9 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
 
 
 	if (overall_update_type > UPDATE_TYPE_FAST) {
+		ret = dm_atomic_get_state(state, &dm_state);
+		if (ret)
+			goto fail;
 
 		ret = do_aquire_global_lock(dev, state);
 		if (ret)
@@ -5670,14 +5941,15 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
 	struct detailed_data_monitor_range *range;
 	struct amdgpu_dm_connector *amdgpu_dm_connector =
 			to_amdgpu_dm_connector(connector);
-	struct dm_connector_state *dm_con_state;
+	struct dm_connector_state *dm_con_state = NULL;
 
 	struct drm_device *dev = connector->dev;
 	struct amdgpu_device *adev = dev->dev_private;
+	bool freesync_capable = false;
 
 	if (!connector->state) {
 		DRM_ERROR("%s - Connector has no state", __func__);
-		return;
+		goto update;
 	}
 
 	if (!edid) {
@@ -5687,9 +5959,7 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
 		amdgpu_dm_connector->max_vfreq = 0;
 		amdgpu_dm_connector->pixel_clock_mhz = 0;
 
-		dm_con_state->freesync_capable = false;
-		dm_con_state->freesync_enable = false;
-		return;
+		goto update;
 	}
 
 	dm_con_state = to_dm_connector_state(connector->state);
@@ -5697,10 +5967,10 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
 	edid_check_required = false;
 	if (!amdgpu_dm_connector->dc_sink) {
 		DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
-		return;
+		goto update;
 	}
 	if (!adev->dm.freesync_module)
-		return;
+		goto update;
 	/*
 	 * if edid non zero restrict freesync only for dp and edp
 	 */
@@ -5712,7 +5982,6 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
 						amdgpu_dm_connector);
 		}
 	}
-	dm_con_state->freesync_capable = false;
 	if (edid_check_required == true && (edid->version > 1 ||
 	   (edid->version == 1 && edid->revision > 1))) {
 		for (i = 0; i < 4; i++) {
@@ -5744,8 +6013,16 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
 		if (amdgpu_dm_connector->max_vfreq -
 		    amdgpu_dm_connector->min_vfreq > 10) {
 
-			dm_con_state->freesync_capable = true;
+			freesync_capable = true;
 		}
 	}
+
+update:
+	if (dm_con_state)
+		dm_con_state->freesync_capable = freesync_capable;
+
+	if (connector->vrr_capable_property)
+		drm_connector_set_vrr_capable_property(connector,
+						       freesync_capable);
 }
 
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
index 607c3cd..4326dc2 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
@@ -84,6 +84,18 @@ struct dm_comressor_info {
 };
 
 /**
+ * struct amdgpu_dm_backlight_caps - Usable range of backlight values from ACPI
+ * @min_input_signal: minimum possible input in range 0-255
+ * @max_input_signal: maximum possible input in range 0-255
+ * @caps_valid: true if these values are from the ACPI interface
+ */
+struct amdgpu_dm_backlight_caps {
+	int min_input_signal;
+	int max_input_signal;
+	bool caps_valid;
+};
+
+/**
  * struct amdgpu_display_manager - Central amdgpu display manager device
  *
  * @dc: Display Core control structure
@@ -112,6 +124,17 @@ struct amdgpu_display_manager {
 	u16 display_indexes_num;
 
 	/**
+	 * @atomic_obj
+	 *
+	 * In combination with &dm_atomic_state it helps manage
+	 * global atomic state that doesn't map cleanly into existing
+	 * drm resources, like &dc_context.
+	 */
+	struct drm_private_obj atomic_obj;
+
+	struct drm_modeset_lock atomic_obj_lock;
+
+	/**
 	 * @irq_handler_list_low_tab:
 	 *
 	 * Low priority IRQ handler table.
@@ -158,6 +181,7 @@ struct amdgpu_display_manager {
 	struct backlight_device *backlight_dev;
 
 	const struct dc_link *backlight_link;
+	struct amdgpu_dm_backlight_caps backlight_caps;
 
 	struct mod_freesync *freesync_module;
 
@@ -231,15 +255,21 @@ struct dm_crtc_state {
 	int crc_skip_count;
 	bool crc_enabled;
 
-	bool freesync_enabled;
+	bool freesync_timing_changed;
+	bool freesync_vrr_info_changed;
+
+	bool vrr_supported;
+	struct mod_freesync_config freesync_config;
 	struct dc_crtc_timing_adjust adjust;
 	struct dc_info_packet vrr_infopacket;
+
+	int abm_level;
 };
 
 #define to_dm_crtc_state(x) container_of(x, struct dm_crtc_state, base)
 
 struct dm_atomic_state {
-	struct drm_atomic_state base;
+	struct drm_private_state base;
 
 	struct dc_state *context;
 };
@@ -254,8 +284,8 @@ struct dm_connector_state {
 	uint8_t underscan_hborder;
 	uint8_t max_bpc;
 	bool underscan_enable;
-	bool freesync_enable;
 	bool freesync_capable;
+	uint8_t abm_level;
 };
 
 #define to_dm_connector_state(x)\
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 3279e26..dba6b57 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -328,7 +328,7 @@ void dc_stream_set_dither_option(struct dc_stream_state *stream,
 		enum dc_dither_option option)
 {
 	struct bit_depth_reduction_params params;
-	struct dc_link *link = stream->status.link;
+	struct dc_link *link = stream->sink->link;
 	struct pipe_ctx *pipes = NULL;
 	int i;
 
@@ -1686,6 +1686,15 @@ void dc_resume(struct dc *dc)
 		core_link_resume(dc->links[i]);
 }
 
+bool dc_is_dmcu_initialized(struct dc *dc)
+{
+	struct dmcu *dmcu = dc->res_pool->dmcu;
+
+	if (dmcu)
+		return dmcu->funcs->is_dmcu_initialized(dmcu);
+	return false;
+}
+
 bool dc_submit_i2c(
 		struct dc *dc,
 		uint32_t link_index,
@@ -1810,4 +1819,4 @@ void get_clock_requirements_for_state(struct dc_state *state, struct AsicStateEx
 	info->dcfClockDeepSleep			= (unsigned int)state->bw.dcn.clk.dcfclk_deep_sleep_khz;
 	info->fClock					= (unsigned int)state->bw.dcn.clk.fclk_khz;
 	info->phyClock					= (unsigned int)state->bw.dcn.clk.phyclk_khz;
-}
\ No newline at end of file
+}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index 7ee9c03..948596a 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -1396,8 +1396,6 @@ static enum dc_status enable_link_dp(
 	else
 		status = DC_FAIL_DP_LINK_TRAINING;
 
-	enable_stream_features(pipe_ctx);
-
 	return status;
 }
 
@@ -2175,11 +2173,11 @@ bool dc_link_set_backlight_level(const struct dc_link *link,
 			backlight_pwm_u16_16, backlight_pwm_u16_16);
 
 	if (dc_is_embedded_signal(link->connector_signal)) {
-		if (stream != NULL) {
-			for (i = 0; i < MAX_PIPES; i++) {
+		for (i = 0; i < MAX_PIPES; i++) {
+			if (core_dc->current_state->res_ctx.pipe_ctx[i].stream) {
 				if (core_dc->current_state->res_ctx.
-						pipe_ctx[i].stream
-						== stream)
+						pipe_ctx[i].stream->sink->link
+						== link)
 					/* DMCU -1 for all controller id values,
 					 * therefore +1 here
 					 */
@@ -2218,7 +2216,7 @@ bool dc_link_set_psr_enable(const struct dc_link *link, bool enable, bool wait)
 	struct dc  *core_dc = link->ctx->dc;
 	struct dmcu *dmcu = core_dc->res_pool->dmcu;
 
-	if (dmcu != NULL && link->psr_enabled)
+	if ((dmcu != NULL && dmcu->funcs->is_dmcu_initialized(dmcu)) && link->psr_enabled)
 		dmcu->funcs->set_psr_enable(dmcu, enable, wait);
 
 	return true;
@@ -2594,6 +2592,9 @@ void core_link_enable_stream(
 		core_dc->hwss.unblank_stream(pipe_ctx,
 			&pipe_ctx->stream->sink->link->cur_link_settings);
 
+		if (dc_is_dp_signal(pipe_ctx->stream->signal))
+			enable_stream_features(pipe_ctx);
+
 		dc_link_set_backlight_level(pipe_ctx->stream->sink->link,
 				pipe_ctx->stream->bl_pwm_level,
 				0,
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
index d91df5e..4d1f8ac 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -2371,11 +2371,22 @@ static bool retrieve_link_cap(struct dc_link *link)
 			dpcd_data[DP_TRAINING_AUX_RD_INTERVAL];
 
 		if (aux_rd_interval.bits.EXT_RECIEVER_CAP_FIELD_PRESENT == 1) {
-			core_link_read_dpcd(
+			uint8_t ext_cap_data[16];
+
+			memset(ext_cap_data, '\0', sizeof(ext_cap_data));
+			for (i = 0; i < read_dpcd_retry_cnt; i++) {
+				status = core_link_read_dpcd(
 				link,
 				DP_DP13_DPCD_REV,
-				dpcd_data,
-				sizeof(dpcd_data));
+				ext_cap_data,
+				sizeof(ext_cap_data));
+				if (status == DC_OK) {
+					memcpy(dpcd_data, ext_cap_data, sizeof(dpcd_data));
+					break;
+				}
+			}
+			if (status != DC_OK)
+				dm_error("%s: Read extend caps data failed, use cap from dpcd 0.\n", __func__);
 		}
 	}
 
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index fc65b00..0bb844a 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -478,10 +478,29 @@ static enum pixel_format convert_pixel_format_to_dalsurface(
 	return dal_pixel_format;
 }
 
-static void rect_swap_helper(struct rect *rect)
+static inline void get_vp_scan_direction(
+	enum dc_rotation_angle rotation,
+	bool horizontal_mirror,
+	bool *orthogonal_rotation,
+	bool *flip_vert_scan_dir,
+	bool *flip_horz_scan_dir)
 {
-	swap(rect->height, rect->width);
-	swap(rect->x, rect->y);
+	*orthogonal_rotation = false;
+	*flip_vert_scan_dir = false;
+	*flip_horz_scan_dir = false;
+	if (rotation == ROTATION_ANGLE_180) {
+		*flip_vert_scan_dir = true;
+		*flip_horz_scan_dir = true;
+	} else if (rotation == ROTATION_ANGLE_90) {
+		*orthogonal_rotation = true;
+		*flip_horz_scan_dir = true;
+	} else if (rotation == ROTATION_ANGLE_270) {
+		*orthogonal_rotation = true;
+		*flip_vert_scan_dir = true;
+	}
+
+	if (horizontal_mirror)
+		*flip_horz_scan_dir = !*flip_horz_scan_dir;
 }
 
 static void calculate_viewport(struct pipe_ctx *pipe_ctx)
@@ -490,33 +509,14 @@ static void calculate_viewport(struct pipe_ctx *pipe_ctx)
 	const struct dc_stream_state *stream = pipe_ctx->stream;
 	struct scaler_data *data = &pipe_ctx->plane_res.scl_data;
 	struct rect surf_src = plane_state->src_rect;
-	struct rect clip = { 0 };
+	struct rect clip, dest;
 	int vpc_div = (data->format == PIXEL_FORMAT_420BPP8
 			|| data->format == PIXEL_FORMAT_420BPP10) ? 2 : 1;
 	bool pri_split = pipe_ctx->bottom_pipe &&
 			pipe_ctx->bottom_pipe->plane_state == pipe_ctx->plane_state;
 	bool sec_split = pipe_ctx->top_pipe &&
 			pipe_ctx->top_pipe->plane_state == pipe_ctx->plane_state;
-	bool flip_vert_scan_dir = false, flip_horz_scan_dir = false;
-
-
-	/*
-	 * We need take horizontal mirror into account. On an unrotated surface this means
-	 * that the viewport offset is actually the offset from the other side of source
-	 * image so we have to subtract the right edge of the viewport from the right edge of
-	 * the source window. Similar to mirror we need to take into account how offset is
-	 * affected for 270/180 rotations
-	 */
-	if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_180) {
-		flip_vert_scan_dir = true;
-		flip_horz_scan_dir = true;
-	} else if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_90)
-		flip_vert_scan_dir = true;
-	else if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_270)
-		flip_horz_scan_dir = true;
-
-	if (pipe_ctx->plane_state->horizontal_mirror)
-		flip_horz_scan_dir = !flip_horz_scan_dir;
+	bool orthogonal_rotation, flip_y_start, flip_x_start;
 
 	if (stream->view_format == VIEW_3D_FORMAT_SIDE_BY_SIDE ||
 		stream->view_format == VIEW_3D_FORMAT_TOP_AND_BOTTOM) {
@@ -524,13 +524,10 @@ static void calculate_viewport(struct pipe_ctx *pipe_ctx)
 		sec_split = false;
 	}
 
-	if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_90 ||
-			pipe_ctx->plane_state->rotation == ROTATION_ANGLE_270)
-		rect_swap_helper(&surf_src);
-
 	/* The actual clip is an intersection between stream
 	 * source and surface clip
 	 */
+	dest = plane_state->dst_rect;
 	clip.x = stream->src.x > plane_state->clip_rect.x ?
 			stream->src.x : plane_state->clip_rect.x;
 
@@ -547,66 +544,77 @@ static void calculate_viewport(struct pipe_ctx *pipe_ctx)
 			stream->src.y + stream->src.height - clip.y :
 			plane_state->clip_rect.y + plane_state->clip_rect.height - clip.y ;
 
+	/*
+	 * Need to calculate how scan origin is shifted in vp space
+	 * to correctly rotate clip and dst
+	 */
+	get_vp_scan_direction(
+			plane_state->rotation,
+			plane_state->horizontal_mirror,
+			&orthogonal_rotation,
+			&flip_y_start,
+			&flip_x_start);
+
+	if (orthogonal_rotation) {
+		swap(clip.x, clip.y);
+		swap(clip.width, clip.height);
+		swap(dest.x, dest.y);
+		swap(dest.width, dest.height);
+	}
+	if (flip_x_start) {
+		clip.x = dest.x + dest.width - clip.x - clip.width;
+		dest.x = 0;
+	}
+	if (flip_y_start) {
+		clip.y = dest.y + dest.height - clip.y - clip.height;
+		dest.y = 0;
+	}
+
 	/* offset = surf_src.ofs + (clip.ofs - surface->dst_rect.ofs) * scl_ratio
-	 * note: surf_src.ofs should be added after rotation/mirror offset direction
-	 *       adjustment since it is already in viewport space
 	 * num_pixels = clip.num_pix * scl_ratio
 	 */
-	data->viewport.x = (clip.x - plane_state->dst_rect.x) *
-			surf_src.width / plane_state->dst_rect.width;
-	data->viewport.width = clip.width *
-			surf_src.width / plane_state->dst_rect.width;
+	data->viewport.x = surf_src.x + (clip.x - dest.x) * surf_src.width / dest.width;
+	data->viewport.width = clip.width * surf_src.width / dest.width;
 
-	data->viewport.y = (clip.y - plane_state->dst_rect.y) *
-			surf_src.height / plane_state->dst_rect.height;
-	data->viewport.height = clip.height *
-			surf_src.height / plane_state->dst_rect.height;
+	data->viewport.y = surf_src.y + (clip.y - dest.y) * surf_src.height / dest.height;
+	data->viewport.height = clip.height * surf_src.height / dest.height;
 
-	if (flip_vert_scan_dir)
-		data->viewport.y = surf_src.height - data->viewport.y - data->viewport.height;
-	if (flip_horz_scan_dir)
-		data->viewport.x = surf_src.width - data->viewport.x - data->viewport.width;
-
-	data->viewport.x += surf_src.x;
-	data->viewport.y += surf_src.y;
+	/* Handle split */
+	if (pri_split || sec_split) {
+		if (orthogonal_rotation) {
+			if (flip_y_start != pri_split)
+				data->viewport.height /= 2;
+			else {
+				data->viewport.y +=  data->viewport.height / 2;
+				/* Ceil offset pipe */
+				data->viewport.height = (data->viewport.height + 1) / 2;
+			}
+		} else {
+			if (flip_x_start != pri_split)
+				data->viewport.width /= 2;
+			else {
+				data->viewport.x +=  data->viewport.width / 2;
+				/* Ceil offset pipe */
+				data->viewport.width = (data->viewport.width + 1) / 2;
+			}
+		}
+	}
 
 	/* Round down, compensate in init */
 	data->viewport_c.x = data->viewport.x / vpc_div;
 	data->viewport_c.y = data->viewport.y / vpc_div;
-	data->inits.h_c = (data->viewport.x % vpc_div) != 0 ?
-			dc_fixpt_half : dc_fixpt_zero;
-	data->inits.v_c = (data->viewport.y % vpc_div) != 0 ?
-			dc_fixpt_half : dc_fixpt_zero;
+	data->inits.h_c = (data->viewport.x % vpc_div) != 0 ? dc_fixpt_half : dc_fixpt_zero;
+	data->inits.v_c = (data->viewport.y % vpc_div) != 0 ? dc_fixpt_half : dc_fixpt_zero;
+
 	/* Round up, assume original video size always even dimensions */
 	data->viewport_c.width = (data->viewport.width + vpc_div - 1) / vpc_div;
 	data->viewport_c.height = (data->viewport.height + vpc_div - 1) / vpc_div;
-
-	/* Handle hsplit */
-	if (sec_split) {
-		data->viewport.x +=  data->viewport.width / 2;
-		data->viewport_c.x +=  data->viewport_c.width / 2;
-		/* Ceil offset pipe */
-		data->viewport.width = (data->viewport.width + 1) / 2;
-		data->viewport_c.width = (data->viewport_c.width + 1) / 2;
-	} else if (pri_split) {
-		if (data->viewport.width > 1)
-			data->viewport.width /= 2;
-		if (data->viewport_c.width > 1)
-			data->viewport_c.width /= 2;
-	}
-
-	if (plane_state->rotation == ROTATION_ANGLE_90 ||
-			plane_state->rotation == ROTATION_ANGLE_270) {
-		rect_swap_helper(&data->viewport_c);
-		rect_swap_helper(&data->viewport);
-	}
 }
 
-static void calculate_recout(struct pipe_ctx *pipe_ctx, struct rect *recout_full)
+static void calculate_recout(struct pipe_ctx *pipe_ctx)
 {
 	const struct dc_plane_state *plane_state = pipe_ctx->plane_state;
 	const struct dc_stream_state *stream = pipe_ctx->stream;
-	struct rect surf_src = plane_state->src_rect;
 	struct rect surf_clip = plane_state->clip_rect;
 	bool pri_split = pipe_ctx->bottom_pipe &&
 			pipe_ctx->bottom_pipe->plane_state == pipe_ctx->plane_state;
@@ -614,10 +622,6 @@ static void calculate_recout(struct pipe_ctx *pipe_ctx, struct rect *recout_full
 			pipe_ctx->top_pipe->plane_state == pipe_ctx->plane_state;
 	bool top_bottom_split = stream->view_format == VIEW_3D_FORMAT_TOP_AND_BOTTOM;
 
-	if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_90 ||
-			pipe_ctx->plane_state->rotation == ROTATION_ANGLE_270)
-		rect_swap_helper(&surf_src);
-
 	pipe_ctx->plane_res.scl_data.recout.x = stream->dst.x;
 	if (stream->src.x < surf_clip.x)
 		pipe_ctx->plane_res.scl_data.recout.x += (surf_clip.x
@@ -646,7 +650,7 @@ static void calculate_recout(struct pipe_ctx *pipe_ctx, struct rect *recout_full
 			stream->dst.y + stream->dst.height
 						- pipe_ctx->plane_res.scl_data.recout.y;
 
-	/* Handle h & vsplit */
+	/* Handle h & v split, handle rotation using viewport */
 	if (sec_split && top_bottom_split) {
 		pipe_ctx->plane_res.scl_data.recout.y +=
 				pipe_ctx->plane_res.scl_data.recout.height / 2;
@@ -655,44 +659,14 @@ static void calculate_recout(struct pipe_ctx *pipe_ctx, struct rect *recout_full
 				(pipe_ctx->plane_res.scl_data.recout.height + 1) / 2;
 	} else if (pri_split && top_bottom_split)
 		pipe_ctx->plane_res.scl_data.recout.height /= 2;
-	else if (pri_split || sec_split) {
-		/* HMirror XOR Secondary_pipe XOR Rotation_180 */
-		bool right_view = (sec_split != plane_state->horizontal_mirror) !=
-					(plane_state->rotation == ROTATION_ANGLE_180);
-
-		if (plane_state->rotation == ROTATION_ANGLE_90
-				|| plane_state->rotation == ROTATION_ANGLE_270)
-			/* Secondary_pipe XOR Rotation_270 */
-			right_view = (plane_state->rotation == ROTATION_ANGLE_270) != sec_split;
-
-		if (right_view) {
-			pipe_ctx->plane_res.scl_data.recout.x +=
-					pipe_ctx->plane_res.scl_data.recout.width / 2;
-			/* Ceil offset pipe */
-			pipe_ctx->plane_res.scl_data.recout.width =
-					(pipe_ctx->plane_res.scl_data.recout.width + 1) / 2;
-		} else {
-			if (pipe_ctx->plane_res.scl_data.recout.width > 1)
-				pipe_ctx->plane_res.scl_data.recout.width /= 2;
-		}
-	}
-	/* Unclipped recout offset = stream dst offset + ((surf dst offset - stream surf_src offset)
-	 *			* 1/ stream scaling ratio) - (surf surf_src offset * 1/ full scl
-	 *			ratio)
-	 */
-	recout_full->x = stream->dst.x + (plane_state->dst_rect.x - stream->src.x)
-					* stream->dst.width / stream->src.width -
-			surf_src.x * plane_state->dst_rect.width / surf_src.width
-					* stream->dst.width / stream->src.width;
-	recout_full->y = stream->dst.y + (plane_state->dst_rect.y - stream->src.y)
-					* stream->dst.height / stream->src.height -
-			surf_src.y * plane_state->dst_rect.height / surf_src.height
-					* stream->dst.height / stream->src.height;
-
-	recout_full->width = plane_state->dst_rect.width
-					* stream->dst.width / stream->src.width;
-	recout_full->height = plane_state->dst_rect.height
-					* stream->dst.height / stream->src.height;
+	else if (sec_split) {
+		pipe_ctx->plane_res.scl_data.recout.x +=
+				pipe_ctx->plane_res.scl_data.recout.width / 2;
+		/* Ceil offset pipe */
+		pipe_ctx->plane_res.scl_data.recout.width =
+				(pipe_ctx->plane_res.scl_data.recout.width + 1) / 2;
+	} else if (pri_split)
+		pipe_ctx->plane_res.scl_data.recout.width /= 2;
 }
 
 static void calculate_scaling_ratios(struct pipe_ctx *pipe_ctx)
@@ -705,9 +679,10 @@ static void calculate_scaling_ratios(struct pipe_ctx *pipe_ctx)
 	const int out_w = stream->dst.width;
 	const int out_h = stream->dst.height;
 
+	/*Swap surf_src height and width since scaling ratios are in recout rotation*/
 	if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_90 ||
 			pipe_ctx->plane_state->rotation == ROTATION_ANGLE_270)
-		rect_swap_helper(&surf_src);
+		swap(surf_src.height, surf_src.width);
 
 	pipe_ctx->plane_res.scl_data.ratios.horz = dc_fixpt_from_fraction(
 					surf_src.width,
@@ -744,35 +719,133 @@ static void calculate_scaling_ratios(struct pipe_ctx *pipe_ctx)
 			pipe_ctx->plane_res.scl_data.ratios.vert_c, 19);
 }
 
-static void calculate_inits_and_adj_vp(struct pipe_ctx *pipe_ctx, struct rect *recout_full)
+static inline void adjust_vp_and_init_for_seamless_clip(
+		bool flip_scan_dir,
+		int recout_skip,
+		int src_size,
+		int taps,
+		struct fixed31_32 ratio,
+		struct fixed31_32 *init,
+		int *vp_offset,
+		int *vp_size)
 {
+	if (!flip_scan_dir) {
+		/* Adjust for viewport end clip-off */
+		if ((*vp_offset + *vp_size) < src_size) {
+			int vp_clip = src_size - *vp_size - *vp_offset;
+			int int_part = dc_fixpt_floor(dc_fixpt_sub(*init, ratio));
+
+			int_part = int_part > 0 ? int_part : 0;
+			*vp_size += int_part < vp_clip ? int_part : vp_clip;
+		}
+
+		/* Adjust for non-0 viewport offset */
+		if (*vp_offset) {
+			int int_part;
+
+			*init = dc_fixpt_add(*init, dc_fixpt_mul_int(ratio, recout_skip));
+			int_part = dc_fixpt_floor(*init) - *vp_offset;
+			if (int_part < taps) {
+				int int_adj = *vp_offset >= (taps - int_part) ?
+							(taps - int_part) : *vp_offset;
+				*vp_offset -= int_adj;
+				*vp_size += int_adj;
+				int_part += int_adj;
+			} else if (int_part > taps) {
+				*vp_offset += int_part - taps;
+				*vp_size -= int_part - taps;
+				int_part = taps;
+			}
+			init->value &= 0xffffffff;
+			*init = dc_fixpt_add_int(*init, int_part);
+		}
+	} else {
+		/* Adjust for non-0 viewport offset */
+		if (*vp_offset) {
+			int int_part = dc_fixpt_floor(dc_fixpt_sub(*init, ratio));
+
+			int_part = int_part > 0 ? int_part : 0;
+			*vp_size += int_part < *vp_offset ? int_part : *vp_offset;
+			*vp_offset -= int_part < *vp_offset ? int_part : *vp_offset;
+		}
+
+		/* Adjust for viewport end clip-off */
+		if ((*vp_offset + *vp_size) < src_size) {
+			int int_part;
+			int end_offset = src_size - *vp_offset - *vp_size;
+
+			/*
+			 * this is init if vp had no offset, keep in mind this is from the
+			 * right side of vp due to scan direction
+			 */
+			*init = dc_fixpt_add(*init, dc_fixpt_mul_int(ratio, recout_skip));
+			/*
+			 * this is the difference between first pixel of viewport available to read
+			 * and init position, takning into account scan direction
+			 */
+			int_part = dc_fixpt_floor(*init) - end_offset;
+			if (int_part < taps) {
+				int int_adj = end_offset >= (taps - int_part) ?
+							(taps - int_part) : end_offset;
+				*vp_size += int_adj;
+				int_part += int_adj;
+			} else if (int_part > taps) {
+				*vp_size += int_part - taps;
+				int_part = taps;
+			}
+			init->value &= 0xffffffff;
+			*init = dc_fixpt_add_int(*init, int_part);
+		}
+	}
+}
+
+static void calculate_inits_and_adj_vp(struct pipe_ctx *pipe_ctx)
+{
+	const struct dc_plane_state *plane_state = pipe_ctx->plane_state;
+	const struct dc_stream_state *stream = pipe_ctx->stream;
 	struct scaler_data *data = &pipe_ctx->plane_res.scl_data;
 	struct rect src = pipe_ctx->plane_state->src_rect;
+	int recout_skip_h, recout_skip_v, surf_size_h, surf_size_v;
 	int vpc_div = (data->format == PIXEL_FORMAT_420BPP8
 			|| data->format == PIXEL_FORMAT_420BPP10) ? 2 : 1;
-	bool flip_vert_scan_dir = false, flip_horz_scan_dir = false;
+	bool orthogonal_rotation, flip_vert_scan_dir, flip_horz_scan_dir;
 
 	/*
 	 * Need to calculate the scan direction for viewport to make adjustments
 	 */
-	if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_180) {
-		flip_vert_scan_dir = true;
-		flip_horz_scan_dir = true;
-	} else if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_90)
-		flip_vert_scan_dir = true;
-	else if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_270)
-		flip_horz_scan_dir = true;
+	get_vp_scan_direction(
+			plane_state->rotation,
+			plane_state->horizontal_mirror,
+			&orthogonal_rotation,
+			&flip_vert_scan_dir,
+			&flip_horz_scan_dir);
 
-	if (pipe_ctx->plane_state->horizontal_mirror)
-			flip_horz_scan_dir = !flip_horz_scan_dir;
-
-	if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_90 ||
-			pipe_ctx->plane_state->rotation == ROTATION_ANGLE_270) {
-		rect_swap_helper(&src);
-		rect_swap_helper(&data->viewport_c);
-		rect_swap_helper(&data->viewport);
+	/* Calculate src rect rotation adjusted to recout space */
+	surf_size_h = src.x + src.width;
+	surf_size_v = src.y + src.height;
+	if (flip_horz_scan_dir)
+		src.x = 0;
+	if (flip_vert_scan_dir)
+		src.y = 0;
+	if (orthogonal_rotation) {
+		swap(src.x, src.y);
+		swap(src.width, src.height);
 	}
 
+	/* Recout matching initial vp offset = recout_offset - (stream dst offset +
+	 *			((surf dst offset - stream src offset) * 1/ stream scaling ratio)
+	 *			- (surf surf_src offset * 1/ full scl ratio))
+	 */
+	recout_skip_h = data->recout.x - (stream->dst.x + (plane_state->dst_rect.x - stream->src.x)
+					* stream->dst.width / stream->src.width -
+					src.x * plane_state->dst_rect.width / src.width
+					* stream->dst.width / stream->src.width);
+	recout_skip_v = data->recout.y - (stream->dst.y + (plane_state->dst_rect.y - stream->src.y)
+					* stream->dst.height / stream->src.height -
+					src.y * plane_state->dst_rect.height / src.height
+					* stream->dst.height / stream->src.height);
+	if (orthogonal_rotation)
+		swap(recout_skip_h, recout_skip_v);
 	/*
 	 * Init calculated according to formula:
 	 * 	init = (scaling_ratio + number_of_taps + 1) / 2
@@ -791,304 +864,57 @@ static void calculate_inits_and_adj_vp(struct pipe_ctx *pipe_ctx, struct rect *r
 	data->inits.v_c = dc_fixpt_truncate(dc_fixpt_add(data->inits.v_c, dc_fixpt_div_int(
 			dc_fixpt_add_int(data->ratios.vert_c, data->taps.v_taps_c + 1), 2)), 19);
 
-	if (!flip_horz_scan_dir) {
-		/* Adjust for viewport end clip-off */
-		if ((data->viewport.x + data->viewport.width) < (src.x + src.width)) {
-			int vp_clip = src.x + src.width - data->viewport.width - data->viewport.x;
-			int int_part = dc_fixpt_floor(
-					dc_fixpt_sub(data->inits.h, data->ratios.horz));
-
-			int_part = int_part > 0 ? int_part : 0;
-			data->viewport.width += int_part < vp_clip ? int_part : vp_clip;
-		}
-		if ((data->viewport_c.x + data->viewport_c.width) < (src.x + src.width) / vpc_div) {
-			int vp_clip = (src.x + src.width) / vpc_div -
-					data->viewport_c.width - data->viewport_c.x;
-			int int_part = dc_fixpt_floor(
-					dc_fixpt_sub(data->inits.h_c, data->ratios.horz_c));
-
-			int_part = int_part > 0 ? int_part : 0;
-			data->viewport_c.width += int_part < vp_clip ? int_part : vp_clip;
-		}
-
-		/* Adjust for non-0 viewport offset */
-		if (data->viewport.x) {
-			int int_part;
-
-			data->inits.h = dc_fixpt_add(data->inits.h, dc_fixpt_mul_int(
-					data->ratios.horz, data->recout.x - recout_full->x));
-			int_part = dc_fixpt_floor(data->inits.h) - data->viewport.x;
-			if (int_part < data->taps.h_taps) {
-				int int_adj = data->viewport.x >= (data->taps.h_taps - int_part) ?
-							(data->taps.h_taps - int_part) : data->viewport.x;
-				data->viewport.x -= int_adj;
-				data->viewport.width += int_adj;
-				int_part += int_adj;
-			} else if (int_part > data->taps.h_taps) {
-				data->viewport.x += int_part - data->taps.h_taps;
-				data->viewport.width -= int_part - data->taps.h_taps;
-				int_part = data->taps.h_taps;
-			}
-			data->inits.h.value &= 0xffffffff;
-			data->inits.h = dc_fixpt_add_int(data->inits.h, int_part);
-		}
-
-		if (data->viewport_c.x) {
-			int int_part;
-
-			data->inits.h_c = dc_fixpt_add(data->inits.h_c, dc_fixpt_mul_int(
-					data->ratios.horz_c, data->recout.x - recout_full->x));
-			int_part = dc_fixpt_floor(data->inits.h_c) - data->viewport_c.x;
-			if (int_part < data->taps.h_taps_c) {
-				int int_adj = data->viewport_c.x >= (data->taps.h_taps_c - int_part) ?
-						(data->taps.h_taps_c - int_part) : data->viewport_c.x;
-				data->viewport_c.x -= int_adj;
-				data->viewport_c.width += int_adj;
-				int_part += int_adj;
-			} else if (int_part > data->taps.h_taps_c) {
-				data->viewport_c.x += int_part - data->taps.h_taps_c;
-				data->viewport_c.width -= int_part - data->taps.h_taps_c;
-				int_part = data->taps.h_taps_c;
-			}
-			data->inits.h_c.value &= 0xffffffff;
-			data->inits.h_c = dc_fixpt_add_int(data->inits.h_c, int_part);
-		}
-	} else {
-		/* Adjust for non-0 viewport offset */
-		if (data->viewport.x) {
-			int int_part = dc_fixpt_floor(
-					dc_fixpt_sub(data->inits.h, data->ratios.horz));
-
-			int_part = int_part > 0 ? int_part : 0;
-			data->viewport.width += int_part < data->viewport.x ? int_part : data->viewport.x;
-			data->viewport.x -= int_part < data->viewport.x ? int_part : data->viewport.x;
-		}
-		if (data->viewport_c.x) {
-			int int_part = dc_fixpt_floor(
-					dc_fixpt_sub(data->inits.h_c, data->ratios.horz_c));
-
-			int_part = int_part > 0 ? int_part : 0;
-			data->viewport_c.width += int_part < data->viewport_c.x ? int_part : data->viewport_c.x;
-			data->viewport_c.x -= int_part < data->viewport_c.x ? int_part : data->viewport_c.x;
-		}
-
-		/* Adjust for viewport end clip-off */
-		if ((data->viewport.x + data->viewport.width) < (src.x + src.width)) {
-			int int_part;
-			int end_offset = src.x + src.width
-					- data->viewport.x - data->viewport.width;
-
-			/*
-			 * this is init if vp had no offset, keep in mind this is from the
-			 * right side of vp due to scan direction
-			 */
-			data->inits.h = dc_fixpt_add(data->inits.h, dc_fixpt_mul_int(
-					data->ratios.horz, data->recout.x - recout_full->x));
-			/*
-			 * this is the difference between first pixel of viewport available to read
-			 * and init position, takning into account scan direction
-			 */
-			int_part = dc_fixpt_floor(data->inits.h) - end_offset;
-			if (int_part < data->taps.h_taps) {
-				int int_adj = end_offset >= (data->taps.h_taps - int_part) ?
-							(data->taps.h_taps - int_part) : end_offset;
-				data->viewport.width += int_adj;
-				int_part += int_adj;
-			} else if (int_part > data->taps.h_taps) {
-				data->viewport.width += int_part - data->taps.h_taps;
-				int_part = data->taps.h_taps;
-			}
-			data->inits.h.value &= 0xffffffff;
-			data->inits.h = dc_fixpt_add_int(data->inits.h, int_part);
-		}
-
-		if ((data->viewport_c.x + data->viewport_c.width) < (src.x + src.width) / vpc_div) {
-			int int_part;
-			int end_offset = (src.x + src.width) / vpc_div
-					- data->viewport_c.x - data->viewport_c.width;
-
-			/*
-			 * this is init if vp had no offset, keep in mind this is from the
-			 * right side of vp due to scan direction
-			 */
-			data->inits.h_c = dc_fixpt_add(data->inits.h_c, dc_fixpt_mul_int(
-					data->ratios.horz_c, data->recout.x - recout_full->x));
-			/*
-			 * this is the difference between first pixel of viewport available to read
-			 * and init position, takning into account scan direction
-			 */
-			int_part = dc_fixpt_floor(data->inits.h_c) - end_offset;
-			if (int_part < data->taps.h_taps_c) {
-				int int_adj = end_offset >= (data->taps.h_taps_c - int_part) ?
-							(data->taps.h_taps_c - int_part) : end_offset;
-				data->viewport_c.width += int_adj;
-				int_part += int_adj;
-			} else if (int_part > data->taps.h_taps_c) {
-				data->viewport_c.width += int_part - data->taps.h_taps_c;
-				int_part = data->taps.h_taps_c;
-			}
-			data->inits.h_c.value &= 0xffffffff;
-			data->inits.h_c = dc_fixpt_add_int(data->inits.h_c, int_part);
-		}
-
-	}
-	if (!flip_vert_scan_dir) {
-		/* Adjust for viewport end clip-off */
-		if ((data->viewport.y + data->viewport.height) < (src.y + src.height)) {
-			int vp_clip = src.y + src.height - data->viewport.height - data->viewport.y;
-			int int_part = dc_fixpt_floor(
-					dc_fixpt_sub(data->inits.v, data->ratios.vert));
-
-			int_part = int_part > 0 ? int_part : 0;
-			data->viewport.height += int_part < vp_clip ? int_part : vp_clip;
-		}
-		if ((data->viewport_c.y + data->viewport_c.height) < (src.y + src.height) / vpc_div) {
-			int vp_clip = (src.y + src.height) / vpc_div -
-					data->viewport_c.height - data->viewport_c.y;
-			int int_part = dc_fixpt_floor(
-					dc_fixpt_sub(data->inits.v_c, data->ratios.vert_c));
-
-			int_part = int_part > 0 ? int_part : 0;
-			data->viewport_c.height += int_part < vp_clip ? int_part : vp_clip;
-		}
-
-		/* Adjust for non-0 viewport offset */
-		if (data->viewport.y) {
-			int int_part;
-
-			data->inits.v = dc_fixpt_add(data->inits.v, dc_fixpt_mul_int(
-					data->ratios.vert, data->recout.y - recout_full->y));
-			int_part = dc_fixpt_floor(data->inits.v) - data->viewport.y;
-			if (int_part < data->taps.v_taps) {
-				int int_adj = data->viewport.y >= (data->taps.v_taps - int_part) ?
-							(data->taps.v_taps - int_part) : data->viewport.y;
-				data->viewport.y -= int_adj;
-				data->viewport.height += int_adj;
-				int_part += int_adj;
-			} else if (int_part > data->taps.v_taps) {
-				data->viewport.y += int_part - data->taps.v_taps;
-				data->viewport.height -= int_part - data->taps.v_taps;
-				int_part = data->taps.v_taps;
-			}
-			data->inits.v.value &= 0xffffffff;
-			data->inits.v = dc_fixpt_add_int(data->inits.v, int_part);
-		}
-
-		if (data->viewport_c.y) {
-			int int_part;
-
-			data->inits.v_c = dc_fixpt_add(data->inits.v_c, dc_fixpt_mul_int(
-					data->ratios.vert_c, data->recout.y - recout_full->y));
-			int_part = dc_fixpt_floor(data->inits.v_c) - data->viewport_c.y;
-			if (int_part < data->taps.v_taps_c) {
-				int int_adj = data->viewport_c.y >= (data->taps.v_taps_c - int_part) ?
-						(data->taps.v_taps_c - int_part) : data->viewport_c.y;
-				data->viewport_c.y -= int_adj;
-				data->viewport_c.height += int_adj;
-				int_part += int_adj;
-			} else if (int_part > data->taps.v_taps_c) {
-				data->viewport_c.y += int_part - data->taps.v_taps_c;
-				data->viewport_c.height -= int_part - data->taps.v_taps_c;
-				int_part = data->taps.v_taps_c;
-			}
-			data->inits.v_c.value &= 0xffffffff;
-			data->inits.v_c = dc_fixpt_add_int(data->inits.v_c, int_part);
-		}
-	} else {
-		/* Adjust for non-0 viewport offset */
-		if (data->viewport.y) {
-			int int_part = dc_fixpt_floor(
-					dc_fixpt_sub(data->inits.v, data->ratios.vert));
-
-			int_part = int_part > 0 ? int_part : 0;
-			data->viewport.height += int_part < data->viewport.y ? int_part : data->viewport.y;
-			data->viewport.y -= int_part < data->viewport.y ? int_part : data->viewport.y;
-		}
-		if (data->viewport_c.y) {
-			int int_part = dc_fixpt_floor(
-					dc_fixpt_sub(data->inits.v_c, data->ratios.vert_c));
-
-			int_part = int_part > 0 ? int_part : 0;
-			data->viewport_c.height += int_part < data->viewport_c.y ? int_part : data->viewport_c.y;
-			data->viewport_c.y -= int_part < data->viewport_c.y ? int_part : data->viewport_c.y;
-		}
-
-		/* Adjust for viewport end clip-off */
-		if ((data->viewport.y + data->viewport.height) < (src.y + src.height)) {
-			int int_part;
-			int end_offset = src.y + src.height
-					- data->viewport.y - data->viewport.height;
-
-			/*
-			 * this is init if vp had no offset, keep in mind this is from the
-			 * right side of vp due to scan direction
-			 */
-			data->inits.v = dc_fixpt_add(data->inits.v, dc_fixpt_mul_int(
-					data->ratios.vert, data->recout.y - recout_full->y));
-			/*
-			 * this is the difference between first pixel of viewport available to read
-			 * and init position, taking into account scan direction
-			 */
-			int_part = dc_fixpt_floor(data->inits.v) - end_offset;
-			if (int_part < data->taps.v_taps) {
-				int int_adj = end_offset >= (data->taps.v_taps - int_part) ?
-							(data->taps.v_taps - int_part) : end_offset;
-				data->viewport.height += int_adj;
-				int_part += int_adj;
-			} else if (int_part > data->taps.v_taps) {
-				data->viewport.height += int_part - data->taps.v_taps;
-				int_part = data->taps.v_taps;
-			}
-			data->inits.v.value &= 0xffffffff;
-			data->inits.v = dc_fixpt_add_int(data->inits.v, int_part);
-		}
-
-		if ((data->viewport_c.y + data->viewport_c.height) < (src.y + src.height) / vpc_div) {
-			int int_part;
-			int end_offset = (src.y + src.height) / vpc_div
-					- data->viewport_c.y - data->viewport_c.height;
-
-			/*
-			 * this is init if vp had no offset, keep in mind this is from the
-			 * right side of vp due to scan direction
-			 */
-			data->inits.v_c = dc_fixpt_add(data->inits.v_c, dc_fixpt_mul_int(
-					data->ratios.vert_c, data->recout.y - recout_full->y));
-			/*
-			 * this is the difference between first pixel of viewport available to read
-			 * and init position, taking into account scan direction
-			 */
-			int_part = dc_fixpt_floor(data->inits.v_c) - end_offset;
-			if (int_part < data->taps.v_taps_c) {
-				int int_adj = end_offset >= (data->taps.v_taps_c - int_part) ?
-							(data->taps.v_taps_c - int_part) : end_offset;
-				data->viewport_c.height += int_adj;
-				int_part += int_adj;
-			} else if (int_part > data->taps.v_taps_c) {
-				data->viewport_c.height += int_part - data->taps.v_taps_c;
-				int_part = data->taps.v_taps_c;
-			}
-			data->inits.v_c.value &= 0xffffffff;
-			data->inits.v_c = dc_fixpt_add_int(data->inits.v_c, int_part);
-		}
-	}
+	/*
+	 * Taps, inits and scaling ratios are in recout space need to rotate
+	 * to viewport rotation before adjustment
+	 */
+	adjust_vp_and_init_for_seamless_clip(
+			flip_horz_scan_dir,
+			recout_skip_h,
+			surf_size_h,
+			orthogonal_rotation ? data->taps.v_taps : data->taps.h_taps,
+			orthogonal_rotation ? data->ratios.vert : data->ratios.horz,
+			orthogonal_rotation ? &data->inits.v : &data->inits.h,
+			&data->viewport.x,
+			&data->viewport.width);
+	adjust_vp_and_init_for_seamless_clip(
+			flip_horz_scan_dir,
+			recout_skip_h,
+			surf_size_h / vpc_div,
+			orthogonal_rotation ? data->taps.v_taps_c : data->taps.h_taps_c,
+			orthogonal_rotation ? data->ratios.vert_c : data->ratios.horz_c,
+			orthogonal_rotation ? &data->inits.v_c : &data->inits.h_c,
+			&data->viewport_c.x,
+			&data->viewport_c.width);
+	adjust_vp_and_init_for_seamless_clip(
+			flip_vert_scan_dir,
+			recout_skip_v,
+			surf_size_v,
+			orthogonal_rotation ? data->taps.h_taps : data->taps.v_taps,
+			orthogonal_rotation ? data->ratios.horz : data->ratios.vert,
+			orthogonal_rotation ? &data->inits.h : &data->inits.v,
+			&data->viewport.y,
+			&data->viewport.height);
+	adjust_vp_and_init_for_seamless_clip(
+			flip_vert_scan_dir,
+			recout_skip_v,
+			surf_size_v / vpc_div,
+			orthogonal_rotation ? data->taps.h_taps_c : data->taps.v_taps_c,
+			orthogonal_rotation ? data->ratios.horz_c : data->ratios.vert_c,
+			orthogonal_rotation ? &data->inits.h_c : &data->inits.v_c,
+			&data->viewport_c.y,
+			&data->viewport_c.height);
 
 	/* Interlaced inits based on final vert inits */
 	data->inits.v_bot = dc_fixpt_add(data->inits.v, data->ratios.vert);
 	data->inits.v_c_bot = dc_fixpt_add(data->inits.v_c, data->ratios.vert_c);
 
-	if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_90 ||
-			pipe_ctx->plane_state->rotation == ROTATION_ANGLE_270) {
-		rect_swap_helper(&data->viewport_c);
-		rect_swap_helper(&data->viewport);
-	}
 }
 
 bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
 {
 	const struct dc_plane_state *plane_state = pipe_ctx->plane_state;
 	struct dc_crtc_timing *timing = &pipe_ctx->stream->timing;
-	struct rect recout_full = { 0 };
 	bool res = false;
 	DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger);
 	/* Important: scaling ratio calculation requires pixel format,
@@ -1105,7 +931,7 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
 	if (pipe_ctx->plane_res.scl_data.viewport.height < 16 || pipe_ctx->plane_res.scl_data.viewport.width < 16)
 		return false;
 
-	calculate_recout(pipe_ctx, &recout_full);
+	calculate_recout(pipe_ctx);
 
 	/**
 	 * Setting line buffer pixel depth to 24bpp yields banding
@@ -1146,7 +972,7 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
 
 	if (res)
 		/* May need to re-check lb size after this in some obscure scenario */
-		calculate_inits_and_adj_vp(pipe_ctx, &recout_full);
+		calculate_inits_and_adj_vp(pipe_ctx);
 
 	DC_LOG_SCALER(
 				"%s: Viewport:\nheight:%d width:%d x:%d "
@@ -1356,6 +1182,9 @@ bool dc_add_plane_to_context(
 		return false;
 	}
 
+	tail_pipe = resource_get_tail_pipe_for_stream(&context->res_ctx, stream);
+	ASSERT(tail_pipe);
+
 	free_pipe = acquire_free_pipe_for_stream(context, pool, stream);
 
 #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
@@ -1373,10 +1202,6 @@ bool dc_add_plane_to_context(
 	free_pipe->plane_state = plane_state;
 
 	if (head_pipe != free_pipe) {
-
-		tail_pipe = resource_get_tail_pipe_for_stream(&context->res_ctx, stream);
-		ASSERT(tail_pipe);
-
 		free_pipe->stream_res.tg = tail_pipe->stream_res.tg;
 		free_pipe->stream_res.abm = tail_pipe->stream_res.abm;
 		free_pipe->stream_res.opp = tail_pipe->stream_res.opp;
@@ -1796,11 +1621,11 @@ enum dc_status dc_add_stream_to_ctx(
 		struct dc_state *new_ctx,
 		struct dc_stream_state *stream)
 {
-	struct dc_context *dc_ctx = dc->ctx;
 	enum dc_status res;
+	DC_LOGGER_INIT(dc->ctx->logger);
 
 	if (new_ctx->stream_count >= dc->res_pool->timing_generator_count) {
-		DC_ERROR("Max streams reached, can't add stream %p !\n", stream);
+		DC_LOG_WARNING("Max streams reached, can't add stream %p !\n", stream);
 		return DC_ERROR_UNEXPECTED;
 	}
 
@@ -1810,7 +1635,7 @@ enum dc_status dc_add_stream_to_ctx(
 
 	res = dc->res_pool->funcs->add_stream_to_ctx(dc, new_ctx, stream);
 	if (res != DC_OK)
-		DC_ERROR("Adding stream %p to context failed with err %d!\n", stream, res);
+		DC_LOG_WARNING("Adding stream %p to context failed with err %d!\n", stream, res);
 
 	return res;
 }
@@ -1976,6 +1801,8 @@ enum dc_status resource_map_pool_resources(
 		}
 	*/
 
+	calculate_phy_pix_clks(stream);
+
 	/* acquire new resources */
 	pipe_idx = acquire_first_free_pipe(&context->res_ctx, pool, stream);
 
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
index e113439..780838a 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
@@ -100,8 +100,6 @@ static void construct(struct dc_stream_state *stream,
 	/* EDID CAP translation for HDMI 2.0 */
 	stream->timing.flags.LTE_340MCSC_SCRAMBLE = dc_sink_data->edid_caps.lte_340mcsc_scramble;
 
-	stream->status.link = stream->sink->link;
-
 	update_stream_signal(stream);
 
 	stream->out_transfer_func = dc_create_transfer_func();
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index d16a20c..dea8bc3 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -36,9 +36,10 @@
 
 #include "inc/hw_sequencer.h"
 #include "inc/compressor.h"
+#include "inc/hw/dmcu.h"
 #include "dml/display_mode_lib.h"
 
-#define DC_VER "3.2.04"
+#define DC_VER "3.2.06"
 
 #define MAX_SURFACES 3
 #define MAX_STREAMS 6
@@ -47,13 +48,6 @@
 /*******************************************************************************
  * Display Core Interfaces
  ******************************************************************************/
-struct dmcu_version {
-	unsigned int date;
-	unsigned int month;
-	unsigned int year;
-	unsigned int interface_version;
-};
-
 struct dc_versions {
 	const char *dc_ver;
 	struct dmcu_version dmcu_version;
@@ -748,5 +742,6 @@ void dc_set_power_state(
 		struct dc *dc,
 		enum dc_acpi_cm_power_state power_state);
 void dc_resume(struct dc *dc);
+bool dc_is_dmcu_initialized(struct dc *dc);
 
 #endif /* DC_INTERFACE_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h
index c5bd1fb..771d9f1 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_stream.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h
@@ -104,8 +104,6 @@ struct dc_stream_state {
 	bool dpms_off;
 	bool apply_edp_fast_boot_optimization;
 
-	struct dc_stream_status status;
-
 	struct dc_cursor_attributes cursor_attributes;
 	struct dc_cursor_position cursor_position;
 	uint32_t sdr_white_level; // for boosting (SDR) cursor in HDR mode
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c
index 9a28a04..bd22f518 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c
@@ -94,7 +94,7 @@ static const struct state_dependent_clocks dce120_max_clks_by_state[] = {
 /*ClocksStatePerformance*/
 { .display_clk_khz = 1133000, .pixel_clk_khz = 600000 } };
 
-static int dentist_get_divider_from_did(int did)
+int dentist_get_divider_from_did(int did)
 {
 	if (did < DENTIST_BASE_DID_1)
 		did = DENTIST_BASE_DID_1;
@@ -277,7 +277,8 @@ static int dce_set_clock(
 	if (requested_clk_khz == 0)
 		clk_mgr_dce->cur_min_clks_state = DM_PP_CLOCKS_STATE_NOMINAL;
 
-	dmcu->funcs->set_psr_wait_loop(dmcu, actual_clock / 1000 / 7);
+	if (dmcu && dmcu->funcs->is_dmcu_initialized(dmcu))
+		dmcu->funcs->set_psr_wait_loop(dmcu, actual_clock / 1000 / 7);
 
 	return actual_clock;
 }
@@ -324,9 +325,11 @@ int dce112_set_clock(struct clk_mgr *clk_mgr, int requested_clk_khz)
 	bp->funcs->set_dce_clock(bp, &dce_clk_params);
 
 	if (!IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment)) {
-		if (clk_mgr_dce->dfs_bypass_disp_clk != actual_clock)
-			dmcu->funcs->set_psr_wait_loop(dmcu,
-					actual_clock / 1000 / 7);
+		if (dmcu && dmcu->funcs->is_dmcu_initialized(dmcu)) {
+			if (clk_mgr_dce->dfs_bypass_disp_clk != actual_clock)
+				dmcu->funcs->set_psr_wait_loop(dmcu,
+						actual_clock / 1000 / 7);
+		}
 	}
 
 	clk_mgr_dce->dfs_bypass_disp_clk = actual_clock;
@@ -588,6 +591,8 @@ static void dce11_pplib_apply_display_requirements(
 			dc,
 			context->bw.dce.sclk_khz);
 
+	pp_display_cfg->min_dcfclock_khz = pp_display_cfg->min_engine_clock_khz;
+
 	pp_display_cfg->min_engine_clock_deep_sleep_khz
 			= context->bw.dce.sclk_deep_sleep_khz;
 
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.h b/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.h
index 0460777..3bceb31 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.h
@@ -165,4 +165,6 @@ struct clk_mgr *dce120_clk_mgr_create(struct dc_context *ctx);
 
 void dce_clk_mgr_destroy(struct clk_mgr **clk_mgr);
 
+int dentist_get_divider_from_did(int did);
+
 #endif /* _DCE_CLK_MGR_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
index c47c818..cce0d18 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
@@ -908,7 +908,6 @@ static void dce110_stream_encoder_dp_blank(
 	struct stream_encoder *enc)
 {
 	struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc);
-	uint32_t retries = 0;
 	uint32_t  reg1 = 0;
 	uint32_t max_retries = DP_BLANK_MAX_RETRY * 10;
 
@@ -926,30 +925,28 @@ static void dce110_stream_encoder_dp_blank(
 	 * (2 = start of the next vertical blank) */
 	REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_DIS_DEFER, 2);
 	/* Larger delay to wait until VBLANK - use max retry of
-	* 10us*3000=30ms. This covers 16.6ms of typical 60 Hz mode +
-	* a little more because we may not trust delay accuracy.
-	*/
+	 * 10us*3000=30ms. This covers 16.6ms of typical 60 Hz mode +
+	 * a little more because we may not trust delay accuracy.
+	 */
 	max_retries = DP_BLANK_MAX_RETRY * 150;
 
 	/* disable DP stream */
 	REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_ENABLE, 0);
 
 	/* the encoder stops sending the video stream
-	* at the start of the vertical blanking.
-	* Poll for DP_VID_STREAM_STATUS == 0
-	*/
+	 * at the start of the vertical blanking.
+	 * Poll for DP_VID_STREAM_STATUS == 0
+	 */
 
 	REG_WAIT(DP_VID_STREAM_CNTL, DP_VID_STREAM_STATUS,
 			0,
 			10, max_retries);
 
-	ASSERT(retries <= max_retries);
-
 	/* Tell the DP encoder to ignore timing from CRTC, must be done after
-	* the polling. If we set DP_STEER_FIFO_RESET before DP stream blank is
-	* complete, stream status will be stuck in video stream enabled state,
-	* i.e. DP_VID_STREAM_STATUS stuck at 1.
-	*/
+	 * the polling. If we set DP_STEER_FIFO_RESET before DP stream blank is
+	 * complete, stream status will be stuck in video stream enabled state,
+	 * i.e. DP_VID_STREAM_STATUS stuck at 1.
+	 */
 
 	REG_UPDATE(DP_STEER_FIFO, DP_STEER_FIFO_RESET, true);
 }
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
index 9724a17..2f062ba 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
@@ -2282,7 +2282,7 @@ static void dce110_enable_per_frame_crtc_position_reset(
 	int i;
 
 	gsl_params.gsl_group = 0;
-	gsl_params.gsl_master = grouped_pipes[0]->stream->triggered_crtc_reset.event_source->status.primary_otg_inst;
+	gsl_params.gsl_master = 0;
 
 	for (i = 0; i < group_size; i++)
 		grouped_pipes[i]->stream_res.tg->funcs->setup_global_swap_lock(
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
index 6d40b3d..cdd1d6b7 100644
--- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
@@ -41,7 +41,6 @@
 #include "dce/dce_mem_input.h"
 #include "dce/dce_link_encoder.h"
 #include "dce/dce_stream_encoder.h"
-#include "dce/dce_mem_input.h"
 #include "dce/dce_ipp.h"
 #include "dce/dce_transform.h"
 #include "dce/dce_opp.h"
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.c
index 20f531d..f9d7d2c 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.c
@@ -223,7 +223,7 @@ static void dcn1_update_clocks(struct clk_mgr *clk_mgr,
 			&dc->res_pool->pp_smu_req;
 	struct pp_smu_display_requirement_rv smu_req = *smu_req_cur;
 	struct pp_smu_funcs_rv *pp_smu = dc->res_pool->pp_smu;
-	struct dm_pp_clock_for_voltage_req clock_voltage_req = {0};
+	uint32_t requested_dcf_clock_in_khz = 0;
 	bool send_request_to_increase = false;
 	bool send_request_to_lower = false;
 	int display_count;
@@ -263,8 +263,6 @@ static void dcn1_update_clocks(struct clk_mgr *clk_mgr,
 	// F Clock
 	if (should_set_clock(safe_to_lower, new_clocks->fclk_khz, clk_mgr->clks.fclk_khz)) {
 		clk_mgr->clks.fclk_khz = new_clocks->fclk_khz;
-		clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_FCLK;
-		clock_voltage_req.clocks_in_khz = new_clocks->fclk_khz;
 		smu_req.hard_min_fclk_mhz = new_clocks->fclk_khz / 1000;
 
 		notify_hard_min_fclk_to_smu(pp_smu, new_clocks->fclk_khz);
@@ -293,10 +291,9 @@ static void dcn1_update_clocks(struct clk_mgr *clk_mgr,
 	 */
 	if (send_request_to_increase) {
 		/*use dcfclk to request voltage*/
-		clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DCFCLK;
-		clock_voltage_req.clocks_in_khz = dcn_find_dcfclk_suits_all(dc, new_clocks);
+		requested_dcf_clock_in_khz = dcn_find_dcfclk_suits_all(dc, new_clocks);
 
-		notify_hard_min_dcfclk_to_smu(pp_smu, clock_voltage_req.clocks_in_khz);
+		notify_hard_min_dcfclk_to_smu(pp_smu, requested_dcf_clock_in_khz);
 
 		if (pp_smu->set_display_requirement)
 			pp_smu->set_display_requirement(&pp_smu->pp_smu, &smu_req);
@@ -317,10 +314,9 @@ static void dcn1_update_clocks(struct clk_mgr *clk_mgr,
 
 	if (!send_request_to_increase && send_request_to_lower) {
 		/*use dcfclk to request voltage*/
-		clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DCFCLK;
-		clock_voltage_req.clocks_in_khz = dcn_find_dcfclk_suits_all(dc, new_clocks);
+		requested_dcf_clock_in_khz = dcn_find_dcfclk_suits_all(dc, new_clocks);
 
-		notify_hard_min_dcfclk_to_smu(pp_smu, clock_voltage_req.clocks_in_khz);
+		notify_hard_min_dcfclk_to_smu(pp_smu, requested_dcf_clock_in_khz);
 
 		if (pp_smu->set_display_requirement)
 			pp_smu->set_display_requirement(&pp_smu->pp_smu, &smu_req);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c
index 4254e7e..c7d1e67 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c
@@ -100,7 +100,7 @@ bool hububu1_is_allow_self_refresh_enabled(struct hubbub *hubbub)
 	REG_GET(DCHUBBUB_ARB_DRAM_STATE_CNTL,
 			DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE, &enable);
 
-	return true ? false : enable;
+	return enable ? true : false;
 }
 
 
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
index 74132a1..345af01 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
@@ -99,6 +99,14 @@ static unsigned int hubp1_get_underflow_status(struct hubp *hubp)
 	return hubp_underflow;
 }
 
+
+void hubp1_clear_underflow(struct hubp *hubp)
+{
+	struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
+
+	REG_UPDATE(DCHUBP_CNTL, HUBP_UNDERFLOW_CLEAR, 1);
+}
+
 static void hubp1_set_hubp_blank_en(struct hubp *hubp, bool blank)
 {
 	struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
@@ -565,19 +573,6 @@ void hubp1_program_deadline(
 		REFCYC_X_AFTER_SCALER, dlg_attr->refcyc_x_after_scaler,
 		DST_Y_AFTER_SCALER, dlg_attr->dst_y_after_scaler);
 
-	if (REG(PREFETCH_SETTINS))
-		REG_SET_2(PREFETCH_SETTINS, 0,
-			DST_Y_PREFETCH, dlg_attr->dst_y_prefetch,
-			VRATIO_PREFETCH, dlg_attr->vratio_prefetch);
-	else
-		REG_SET_2(PREFETCH_SETTINGS, 0,
-			DST_Y_PREFETCH, dlg_attr->dst_y_prefetch,
-			VRATIO_PREFETCH, dlg_attr->vratio_prefetch);
-
-	REG_SET_2(VBLANK_PARAMETERS_0, 0,
-		DST_Y_PER_VM_VBLANK, dlg_attr->dst_y_per_vm_vblank,
-		DST_Y_PER_ROW_VBLANK, dlg_attr->dst_y_per_row_vblank);
-
 	REG_SET(REF_FREQ_TO_PIX_FREQ, 0,
 		REF_FREQ_TO_PIX_FREQ, dlg_attr->ref_freq_to_pix_freq);
 
@@ -585,9 +580,6 @@ void hubp1_program_deadline(
 	REG_SET(VBLANK_PARAMETERS_1, 0,
 		REFCYC_PER_PTE_GROUP_VBLANK_L, dlg_attr->refcyc_per_pte_group_vblank_l);
 
-	REG_SET(VBLANK_PARAMETERS_3, 0,
-		REFCYC_PER_META_CHUNK_VBLANK_L, dlg_attr->refcyc_per_meta_chunk_vblank_l);
-
 	if (REG(NOM_PARAMETERS_0))
 		REG_SET(NOM_PARAMETERS_0, 0,
 			DST_Y_PER_PTE_ROW_NOM_L, dlg_attr->dst_y_per_pte_row_nom_l);
@@ -602,27 +594,13 @@ void hubp1_program_deadline(
 	REG_SET(NOM_PARAMETERS_5, 0,
 		REFCYC_PER_META_CHUNK_NOM_L, dlg_attr->refcyc_per_meta_chunk_nom_l);
 
-	REG_SET_2(PER_LINE_DELIVERY_PRE, 0,
-		REFCYC_PER_LINE_DELIVERY_PRE_L, dlg_attr->refcyc_per_line_delivery_pre_l,
-		REFCYC_PER_LINE_DELIVERY_PRE_C, dlg_attr->refcyc_per_line_delivery_pre_c);
-
 	REG_SET_2(PER_LINE_DELIVERY, 0,
 		REFCYC_PER_LINE_DELIVERY_L, dlg_attr->refcyc_per_line_delivery_l,
 		REFCYC_PER_LINE_DELIVERY_C, dlg_attr->refcyc_per_line_delivery_c);
 
-	if (REG(PREFETCH_SETTINS_C))
-		REG_SET(PREFETCH_SETTINS_C, 0,
-			VRATIO_PREFETCH_C, dlg_attr->vratio_prefetch_c);
-	else
-		REG_SET(PREFETCH_SETTINGS_C, 0,
-			VRATIO_PREFETCH_C, dlg_attr->vratio_prefetch_c);
-
 	REG_SET(VBLANK_PARAMETERS_2, 0,
 		REFCYC_PER_PTE_GROUP_VBLANK_C, dlg_attr->refcyc_per_pte_group_vblank_c);
 
-	REG_SET(VBLANK_PARAMETERS_4, 0,
-		REFCYC_PER_META_CHUNK_VBLANK_C, dlg_attr->refcyc_per_meta_chunk_vblank_c);
-
 	if (REG(NOM_PARAMETERS_2))
 		REG_SET(NOM_PARAMETERS_2, 0,
 			DST_Y_PER_PTE_ROW_NOM_C, dlg_attr->dst_y_per_pte_row_nom_c);
@@ -642,10 +620,6 @@ void hubp1_program_deadline(
 		QoS_LEVEL_LOW_WM, ttu_attr->qos_level_low_wm,
 		QoS_LEVEL_HIGH_WM, ttu_attr->qos_level_high_wm);
 
-	REG_SET_2(DCN_GLOBAL_TTU_CNTL, 0,
-		MIN_TTU_VBLANK, ttu_attr->min_ttu_vblank,
-		QoS_LEVEL_FLIP, ttu_attr->qos_level_flip);
-
 	/* TTU - per luma/chroma */
 	/* Assumed surf0 is luma and 1 is chroma */
 
@@ -654,25 +628,15 @@ void hubp1_program_deadline(
 		QoS_LEVEL_FIXED, ttu_attr->qos_level_fixed_l,
 		QoS_RAMP_DISABLE, ttu_attr->qos_ramp_disable_l);
 
-	REG_SET(DCN_SURF0_TTU_CNTL1, 0,
-		REFCYC_PER_REQ_DELIVERY_PRE,
-		ttu_attr->refcyc_per_req_delivery_pre_l);
-
 	REG_SET_3(DCN_SURF1_TTU_CNTL0, 0,
 		REFCYC_PER_REQ_DELIVERY, ttu_attr->refcyc_per_req_delivery_c,
 		QoS_LEVEL_FIXED, ttu_attr->qos_level_fixed_c,
 		QoS_RAMP_DISABLE, ttu_attr->qos_ramp_disable_c);
 
-	REG_SET(DCN_SURF1_TTU_CNTL1, 0,
-		REFCYC_PER_REQ_DELIVERY_PRE,
-		ttu_attr->refcyc_per_req_delivery_pre_c);
-
 	REG_SET_3(DCN_CUR0_TTU_CNTL0, 0,
 		REFCYC_PER_REQ_DELIVERY, ttu_attr->refcyc_per_req_delivery_cur0,
 		QoS_LEVEL_FIXED, ttu_attr->qos_level_fixed_cur0,
 		QoS_RAMP_DISABLE, ttu_attr->qos_ramp_disable_cur0);
-	REG_SET(DCN_CUR0_TTU_CNTL1, 0,
-		REFCYC_PER_REQ_DELIVERY_PRE, ttu_attr->refcyc_per_req_delivery_pre_cur0);
 }
 
 static void hubp1_setup(
@@ -690,6 +654,48 @@ static void hubp1_setup(
 	hubp1_vready_workaround(hubp, pipe_dest);
 }
 
+static void hubp1_setup_interdependent(
+		struct hubp *hubp,
+		struct _vcs_dpi_display_dlg_regs_st *dlg_attr,
+		struct _vcs_dpi_display_ttu_regs_st *ttu_attr)
+{
+	struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
+
+	REG_SET_2(PREFETCH_SETTINS, 0,
+		DST_Y_PREFETCH, dlg_attr->dst_y_prefetch,
+		VRATIO_PREFETCH, dlg_attr->vratio_prefetch);
+
+	REG_SET(PREFETCH_SETTINS_C, 0,
+		VRATIO_PREFETCH_C, dlg_attr->vratio_prefetch_c);
+
+	REG_SET_2(VBLANK_PARAMETERS_0, 0,
+		DST_Y_PER_VM_VBLANK, dlg_attr->dst_y_per_vm_vblank,
+		DST_Y_PER_ROW_VBLANK, dlg_attr->dst_y_per_row_vblank);
+
+	REG_SET(VBLANK_PARAMETERS_3, 0,
+		REFCYC_PER_META_CHUNK_VBLANK_L, dlg_attr->refcyc_per_meta_chunk_vblank_l);
+
+	REG_SET(VBLANK_PARAMETERS_4, 0,
+		REFCYC_PER_META_CHUNK_VBLANK_C, dlg_attr->refcyc_per_meta_chunk_vblank_c);
+
+	REG_SET_2(PER_LINE_DELIVERY_PRE, 0,
+		REFCYC_PER_LINE_DELIVERY_PRE_L, dlg_attr->refcyc_per_line_delivery_pre_l,
+		REFCYC_PER_LINE_DELIVERY_PRE_C, dlg_attr->refcyc_per_line_delivery_pre_c);
+
+	REG_SET(DCN_SURF0_TTU_CNTL1, 0,
+		REFCYC_PER_REQ_DELIVERY_PRE,
+		ttu_attr->refcyc_per_req_delivery_pre_l);
+	REG_SET(DCN_SURF1_TTU_CNTL1, 0,
+		REFCYC_PER_REQ_DELIVERY_PRE,
+		ttu_attr->refcyc_per_req_delivery_pre_c);
+	REG_SET(DCN_CUR0_TTU_CNTL1, 0,
+		REFCYC_PER_REQ_DELIVERY_PRE, ttu_attr->refcyc_per_req_delivery_pre_cur0);
+
+	REG_SET_2(DCN_GLOBAL_TTU_CNTL, 0,
+		MIN_TTU_VBLANK, ttu_attr->min_ttu_vblank,
+		QoS_LEVEL_FLIP, ttu_attr->qos_level_flip);
+}
+
 bool hubp1_is_flip_pending(struct hubp *hubp)
 {
 	uint32_t flip_pending = 0;
@@ -1178,6 +1184,7 @@ static const struct hubp_funcs dcn10_hubp_funcs = {
 			hubp1_program_surface_config,
 	.hubp_is_flip_pending = hubp1_is_flip_pending,
 	.hubp_setup = hubp1_setup,
+	.hubp_setup_interdependent = hubp1_setup_interdependent,
 	.hubp_set_vm_system_aperture_settings = hubp1_set_vm_system_aperture_settings,
 	.hubp_set_vm_context0_settings = hubp1_set_vm_context0_settings,
 	.set_blank = hubp1_set_blank,
@@ -1190,6 +1197,7 @@ static const struct hubp_funcs dcn10_hubp_funcs = {
 	.hubp_clk_cntl = hubp1_clk_cntl,
 	.hubp_vtg_sel = hubp1_vtg_sel,
 	.hubp_read_state = hubp1_read_state,
+	.hubp_clear_underflow = hubp1_clear_underflow,
 	.hubp_disable_control =  hubp1_disable_control,
 	.hubp_get_underflow_status = hubp1_get_underflow_status,
 
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
index 4890273..62d4232 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
@@ -251,6 +251,7 @@
 	HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_BLANK_EN, mask_sh),\
 	HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_TTU_DISABLE, mask_sh),\
 	HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_UNDERFLOW_STATUS, mask_sh),\
+	HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_UNDERFLOW_CLEAR, mask_sh),\
 	HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_NO_OUTSTANDING_REQ, mask_sh),\
 	HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_VTG_SEL, mask_sh),\
 	HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_DISABLE, mask_sh),\
@@ -435,6 +436,7 @@
 	type HUBP_NO_OUTSTANDING_REQ;\
 	type HUBP_VTG_SEL;\
 	type HUBP_UNDERFLOW_STATUS;\
+	type HUBP_UNDERFLOW_CLEAR;\
 	type NUM_PIPES;\
 	type NUM_BANKS;\
 	type PIPE_INTERLEAVE;\
@@ -739,6 +741,7 @@ void dcn10_hubp_construct(
 	const struct dcn_mi_mask *hubp_mask);
 
 void hubp1_read_state(struct hubp *hubp);
+void hubp1_clear_underflow(struct hubp *hubp);
 
 enum cursor_pitch hubp1_get_cursor_pitch(unsigned int pitch);
 
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index 87495de..0bd33a7 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -1227,7 +1227,8 @@ static bool dcn10_set_input_transfer_func(struct pipe_ctx *pipe_ctx,
 		tf = plane_state->in_transfer_func;
 
 	if (plane_state->gamma_correction &&
-		!plane_state->gamma_correction->is_identity
+		!dpp_base->ctx->dc->debug.always_use_regamma
+		&& !plane_state->gamma_correction->is_identity
 			&& dce_use_lut(plane_state->format))
 		dpp_base->funcs->dpp_program_input_lut(dpp_base, plane_state->gamma_correction);
 
@@ -1400,7 +1401,7 @@ static void dcn10_enable_per_frame_crtc_position_reset(
 		if (grouped_pipes[i]->stream_res.tg->funcs->enable_crtc_reset)
 			grouped_pipes[i]->stream_res.tg->funcs->enable_crtc_reset(
 					grouped_pipes[i]->stream_res.tg,
-					grouped_pipes[i]->stream->triggered_crtc_reset.event_source->status.primary_otg_inst,
+					0,
 					&grouped_pipes[i]->stream->triggered_crtc_reset);
 
 	DC_SYNC_INFO("Waiting for trigger\n");
@@ -1770,7 +1771,7 @@ bool is_rgb_cspace(enum dc_color_space output_color_space)
 	}
 }
 
-static void dcn10_get_surface_visual_confirm_color(
+void dcn10_get_surface_visual_confirm_color(
 		const struct pipe_ctx *pipe_ctx,
 		struct tg_color *color)
 {
@@ -1806,7 +1807,7 @@ static void dcn10_get_surface_visual_confirm_color(
 	}
 }
 
-static void dcn10_get_hdr_visual_confirm_color(
+void dcn10_get_hdr_visual_confirm_color(
 		struct pipe_ctx *pipe_ctx,
 		struct tg_color *color)
 {
@@ -2067,6 +2068,10 @@ void update_dchubp_dpp(
 			&pipe_ctx->ttu_regs,
 			&pipe_ctx->rq_regs,
 			&pipe_ctx->pipe_dlg_param);
+		hubp->funcs->hubp_setup_interdependent(
+			hubp,
+			&pipe_ctx->dlg_regs,
+			&pipe_ctx->ttu_regs);
 	}
 
 	size.grph.surface_size = pipe_ctx->plane_res.scl_data.viewport;
@@ -2337,6 +2342,32 @@ static void dcn10_apply_ctx_for_surface(
 
 	dcn10_pipe_control_lock(dc, top_pipe_to_program, false);
 
+	if (top_pipe_to_program->plane_state &&
+			top_pipe_to_program->plane_state->update_flags.bits.full_update)
+		for (i = 0; i < dc->res_pool->pipe_count; i++) {
+			struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+
+			/* Skip inactive pipes and ones already updated */
+			if (!pipe_ctx->stream || pipe_ctx->stream == stream)
+				continue;
+
+			pipe_ctx->stream_res.tg->funcs->lock(pipe_ctx->stream_res.tg);
+
+			pipe_ctx->plane_res.hubp->funcs->hubp_setup_interdependent(
+				pipe_ctx->plane_res.hubp,
+				&pipe_ctx->dlg_regs,
+				&pipe_ctx->ttu_regs);
+		}
+
+	for (i = 0; i < dc->res_pool->pipe_count; i++) {
+		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+
+		if (!pipe_ctx->stream || pipe_ctx->stream == stream)
+			continue;
+
+		dcn10_pipe_control_lock(dc, pipe_ctx, false);
+	}
+
 	if (num_planes == 0)
 		false_optc_underflow_wa(dc, stream, tg);
 
@@ -2710,6 +2741,7 @@ static const struct hw_sequencer_funcs dcn10_funcs = {
 	.set_avmute = dce110_set_avmute,
 	.log_hw_state = dcn10_log_hw_state,
 	.get_hw_state = dcn10_get_hw_state,
+	.clear_status_bits = dcn10_clear_status_bits,
 	.wait_for_mpcc_disconnect = dcn10_wait_for_mpcc_disconnect,
 	.edp_backlight_control = hwss_edp_backlight_control,
 	.edp_power_control = hwss_edp_power_control,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
index 5e5610c..f8eea10 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
@@ -51,6 +51,8 @@ void dcn10_get_hw_state(
 		char *pBuf, unsigned int bufSize,
 		unsigned int mask);
 
+void dcn10_clear_status_bits(struct dc *dc, unsigned int mask);
+
 bool is_lower_pipe_tree_visible(struct pipe_ctx *pipe_ctx);
 
 bool is_upper_pipe_tree_visible(struct pipe_ctx *pipe_ctx);
@@ -61,6 +63,14 @@ void dcn10_program_pte_vm(struct dce_hwseq *hws, struct hubp *hubp);
 
 void set_hdr_multiplier(struct pipe_ctx *pipe_ctx);
 
+void dcn10_get_surface_visual_confirm_color(
+		const struct pipe_ctx *pipe_ctx,
+		struct tg_color *color);
+
+void dcn10_get_hdr_visual_confirm_color(
+		struct pipe_ctx *pipe_ctx,
+		struct tg_color *color);
+
 void update_dchubp_dpp(
 	struct dc *dc,
 	struct pipe_ctx *pipe_ctx,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c
index 6415890..211bb24 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c
@@ -454,12 +454,6 @@ static unsigned int dcn10_get_otg_states(struct dc *dc, char *pBuf, unsigned int
 
 			remaining_buffer -= chars_printed;
 			pBuf += chars_printed;
-
-			// Clear underflow for debug purposes
-			// We want to keep underflow sticky bit on for the longevity tests outside of test environment.
-			// This function is called only from Windows or Diags test environment, hence it's safe to clear
-			// it from here without affecting the original intent.
-			tg->funcs->clear_optc_underflow(tg);
 		}
 	}
 
@@ -484,6 +478,59 @@ static unsigned int dcn10_get_clock_states(struct dc *dc, char *pBuf, unsigned i
 	return chars_printed;
 }
 
+static void dcn10_clear_otpc_underflow(struct dc *dc)
+{
+	struct resource_pool *pool = dc->res_pool;
+	int i;
+
+	for (i = 0; i < pool->timing_generator_count; i++) {
+		struct timing_generator *tg = pool->timing_generators[i];
+		struct dcn_otg_state s = {0};
+
+		optc1_read_otg_state(DCN10TG_FROM_TG(tg), &s);
+
+		if (s.otg_enabled & 1)
+			tg->funcs->clear_optc_underflow(tg);
+	}
+}
+
+static void dcn10_clear_hubp_underflow(struct dc *dc)
+{
+	struct resource_pool *pool = dc->res_pool;
+	int i;
+
+	for (i = 0; i < pool->pipe_count; i++) {
+		struct hubp *hubp = pool->hubps[i];
+		struct dcn_hubp_state *s = &(TO_DCN10_HUBP(hubp)->state);
+
+		hubp->funcs->hubp_read_state(hubp);
+
+		if (!s->blank_en)
+			hubp->funcs->hubp_clear_underflow(hubp);
+	}
+}
+
+void dcn10_clear_status_bits(struct dc *dc, unsigned int mask)
+{
+	/*
+	 *  Mask Format
+	 *  Bit 0 - 31: Status bit to clear
+	 *
+	 *  Mask = 0x0 means clear all status bits
+	 */
+	const unsigned int DC_HW_STATE_MASK_HUBP_UNDERFLOW	= 0x1;
+	const unsigned int DC_HW_STATE_MASK_OTPC_UNDERFLOW	= 0x2;
+
+	if (mask == 0x0)
+		mask = 0xFFFFFFFF;
+
+	if (mask & DC_HW_STATE_MASK_HUBP_UNDERFLOW)
+		dcn10_clear_hubp_underflow(dc);
+
+	if (mask & DC_HW_STATE_MASK_OTPC_UNDERFLOW)
+		dcn10_clear_otpc_underflow(dc);
+}
+
 void dcn10_get_hw_state(struct dc *dc, char *pBuf, unsigned int bufSize, unsigned int mask)
 {
 	/*
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
index 7d1f667..7c13861 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
@@ -335,9 +335,8 @@ void optc1_program_timing(
 	/* Enable stereo - only when we need to pack 3D frame. Other types
 	 * of stereo handled in explicit call
 	 */
-	h_div_2 = (dc_crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR420) ?
-			1 : 0;
 
+	h_div_2 = optc1_is_two_pixels_per_containter(&patched_crtc_timing);
 	REG_UPDATE(OTG_H_TIMING_CNTL,
 			OTG_H_TIMING_DIV_BY2, h_div_2);
 
@@ -360,20 +359,19 @@ void optc1_set_blank_data_double_buffer(struct timing_generator *optc, bool enab
 static void optc1_unblank_crtc(struct timing_generator *optc)
 {
 	struct optc *optc1 = DCN10TG_FROM_TG(optc);
-	uint32_t vertical_interrupt_enable = 0;
-
-	REG_GET(OTG_VERTICAL_INTERRUPT2_CONTROL,
-			OTG_VERTICAL_INTERRUPT2_INT_ENABLE, &vertical_interrupt_enable);
-
-	/* temporary work around for vertical interrupt, once vertical interrupt enabled,
-	 * this check will be removed.
-	 */
-	if (vertical_interrupt_enable)
-		optc1_set_blank_data_double_buffer(optc, true);
 
 	REG_UPDATE_2(OTG_BLANK_CONTROL,
 			OTG_BLANK_DATA_EN, 0,
 			OTG_BLANK_DE_MODE, 0);
+
+	/* W/A for automated testing
+	 * Automated testing will fail underflow test as there
+	 * sporadic underflows which occur during the optc blank
+	 * sequence.  As a w/a, clear underflow on unblank.
+	 * This prevents the failure, but will not mask actual
+	 * underflow that affect real use cases.
+	 */
+	optc1_clear_optc_underflow(optc);
 }
 
 /**
@@ -1422,3 +1420,9 @@ void dcn10_timing_generator_init(struct optc *optc1)
 	optc1->min_h_sync_width = 8;
 	optc1->min_v_sync_width = 1;
 }
+
+bool optc1_is_two_pixels_per_containter(const struct dc_crtc_timing *timing)
+{
+	return timing->pixel_encoding == PIXEL_ENCODING_YCBCR420;
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
index c1b1142..8bacf0b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
@@ -565,4 +565,6 @@ bool optc1_configure_crc(struct timing_generator *optc,
 bool optc1_get_crc(struct timing_generator *optc,
 		    uint32_t *r_cr, uint32_t *g_y, uint32_t *b_cb);
 
+bool optc1_is_two_pixels_per_containter(const struct dc_crtc_timing *timing);
+
 #endif /* __DC_TIMING_GENERATOR_DCN10_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
index 6f9078f..b8b5525 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
@@ -766,7 +766,6 @@ void enc1_stream_encoder_dp_blank(
 	struct stream_encoder *enc)
 {
 	struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
-	uint32_t retries = 0;
 	uint32_t  reg1 = 0;
 	uint32_t max_retries = DP_BLANK_MAX_RETRY * 10;
 
@@ -803,8 +802,6 @@ void enc1_stream_encoder_dp_blank(
 			0,
 			10, max_retries);
 
-	ASSERT(retries <= max_retries);
-
 	/* Tell the DP encoder to ignore timing from CRTC, must be done after
 	 * the polling. If we set DP_STEER_FIFO_RESET before DP stream blank is
 	 * complete, stream status will be stuck in video stream enabled state,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h
index 4550747..cb85eaa 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h
@@ -32,6 +32,13 @@ enum dmcu_state {
 	DMCU_RUNNING = 1
 };
 
+struct dmcu_version {
+	unsigned int date;
+	unsigned int month;
+	unsigned int year;
+	unsigned int interface_version;
+};
+
 struct dmcu {
 	struct dc_context *ctx;
 	const struct dmcu_funcs *funcs;
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
index 334c48c..04c6989 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
@@ -63,6 +63,11 @@ struct hubp_funcs {
 			struct _vcs_dpi_display_rq_regs_st *rq_regs,
 			struct _vcs_dpi_display_pipe_dest_params_st *pipe_dest);
 
+	void (*hubp_setup_interdependent)(
+			struct hubp *hubp,
+			struct _vcs_dpi_display_dlg_regs_st *dlg_regs,
+			struct _vcs_dpi_display_ttu_regs_st *ttu_regs);
+
 	void (*dcc_control)(struct hubp *hubp, bool enable,
 			bool independent_64b_blks);
 	void (*mem_program_viewport)(
@@ -121,6 +126,7 @@ struct hubp_funcs {
 	void (*hubp_clk_cntl)(struct hubp *hubp, bool enable);
 	void (*hubp_vtg_sel)(struct hubp *hubp, uint32_t otg_inst);
 	void (*hubp_read_state)(struct hubp *hubp);
+	void (*hubp_clear_underflow)(struct hubp *hubp);
 	void (*hubp_disable_control)(struct hubp *hubp, bool disable_hubp);
 	unsigned int (*hubp_get_underflow_status)(struct hubp *hubp);
 
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
index e9b702c..d6a85f4 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
@@ -200,6 +200,7 @@ struct hw_sequencer_funcs {
 	void (*log_hw_state)(struct dc *dc,
 		struct dc_log_buffer_ctx *log_ctx);
 	void (*get_hw_state)(struct dc *dc, char *pBuf, unsigned int bufSize, unsigned int mask);
+	void (*clear_status_bits)(struct dc *dc, unsigned int mask);
 
 	void (*wait_for_mpcc_disconnect)(struct dc *dc,
 			struct resource_pool *res_pool,
diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
index 7480f07..bbecbae 100644
--- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
+++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
@@ -813,20 +813,26 @@ static bool build_freesync_hdr(struct pwl_float_data_ex *rgb_regamma,
 	const struct hw_x_point *coord_x = coordinate_x;
 	struct fixed31_32 scaledX = dc_fixpt_zero;
 	struct fixed31_32 scaledX1 = dc_fixpt_zero;
-	struct fixed31_32 max_display = dc_fixpt_from_int(fs_params->max_display);
-	struct fixed31_32 min_display = dc_fixpt_from_fraction(fs_params->min_display, 10000);
-	struct fixed31_32 max_content = dc_fixpt_from_int(fs_params->max_content);
-	struct fixed31_32 min_content = dc_fixpt_from_fraction(fs_params->min_content, 10000);
+	struct fixed31_32 max_display;
+	struct fixed31_32 min_display;
+	struct fixed31_32 max_content;
+	struct fixed31_32 min_content;
 	struct fixed31_32 clip = dc_fixpt_one;
 	struct fixed31_32 output;
 	bool use_eetf = false;
 	bool is_clipped = false;
-	struct fixed31_32 sdr_white_level = dc_fixpt_from_int(fs_params->sdr_white_level);
+	struct fixed31_32 sdr_white_level;
 
 	if (fs_params == NULL || fs_params->max_content == 0 ||
 			fs_params->max_display == 0)
 		return false;
 
+	max_display = dc_fixpt_from_int(fs_params->max_display);
+	min_display = dc_fixpt_from_fraction(fs_params->min_display, 10000);
+	max_content = dc_fixpt_from_int(fs_params->max_content);
+	min_content = dc_fixpt_from_fraction(fs_params->min_content, 10000);
+	sdr_white_level = dc_fixpt_from_int(fs_params->sdr_white_level);
+
 	if (fs_params->min_display > 1000) // cap at 0.1 at the bottom
 		min_display = dc_fixpt_from_fraction(1, 10);
 	if (fs_params->max_display < 100) // cap at 100 at the top
diff --git a/drivers/gpu/drm/amd/display/modules/power/Makefile b/drivers/gpu/drm/amd/display/modules/power/Makefile
new file mode 100644
index 0000000..87851f8
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/modules/power/Makefile
@@ -0,0 +1,31 @@
+#
+# Copyright 2017 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
+#
+# Makefile for the 'power' sub-module of DAL.
+#
+
+MOD_POWER = power_helpers.o
+
+AMD_DAL_MOD_POWER = $(addprefix $(AMDDALPATH)/modules/power/,$(MOD_POWER))
+#$(info ************  DAL POWER MODULE MAKEFILE ************)
+
+AMD_DISPLAY_FILES += $(AMD_DAL_MOD_POWER)
\ No newline at end of file
diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
new file mode 100644
index 0000000..00f63b7
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
@@ -0,0 +1,326 @@
+/* Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "power_helpers.h"
+#include "dc/inc/hw/dmcu.h"
+
+#define DIV_ROUNDUP(a, b) (((a)+((b)/2))/(b))
+
+/* Possible Min Reduction config from least aggressive to most aggressive
+ *  0    1     2     3     4     5     6     7     8     9     10    11   12
+ * 100  98.0 94.1  94.1  85.1  80.3  75.3  69.4  60.0  57.6  50.2  49.8  40.0 %
+ */
+static const unsigned char min_reduction_table[13] = {
+0xff, 0xfa, 0xf0, 0xf0, 0xd9, 0xcd, 0xc0, 0xb1, 0x99, 0x93, 0x80, 0x82, 0x66};
+
+/* Possible Max Reduction configs from least aggressive to most aggressive
+ *  0    1     2     3     4     5     6     7     8     9     10    11   12
+ * 96.1 89.8 85.1  80.3  69.4  64.7  64.7  50.2  39.6  30.2  30.2  30.2  19.6 %
+ */
+static const unsigned char max_reduction_table[13] = {
+0xf5, 0xe5, 0xd9, 0xcd, 0xb1, 0xa5, 0xa5, 0x80, 0x65, 0x4d, 0x4d, 0x4d, 0x32};
+
+/* Predefined ABM configuration sets. We may have different configuration sets
+ * in order to satisfy different power/quality requirements.
+ */
+static const unsigned char abm_config[abm_defines_max_config][abm_defines_max_level] = {
+/*  ABM Level 1,    ABM Level 2,    ABM Level 3,    ABM Level 4 */
+{       2,              5,              7,              8       },	/* Default - Medium aggressiveness */
+{       2,              5,              8,              11      },	/* Alt #1  - Increased aggressiveness */
+{       0,              2,              4,              8       },	/* Alt #2  - Minimal aggressiveness */
+{       3,              6,              10,             12      },	/* Alt #3  - Super aggressiveness */
+};
+
+#define NUM_AMBI_LEVEL    5
+#define NUM_AGGR_LEVEL    4
+#define NUM_POWER_FN_SEGS 8
+#define NUM_BL_CURVE_SEGS 16
+
+/* NOTE: iRAM is 256B in size */
+struct iram_table_v_2 {
+	/* flags                      */
+	uint16_t flags;							/* 0x00 U16  */
+
+	/* parameters for ABM2.0 algorithm */
+	uint8_t min_reduction[NUM_AMBI_LEVEL][NUM_AGGR_LEVEL];		/* 0x02 U0.8 */
+	uint8_t max_reduction[NUM_AMBI_LEVEL][NUM_AGGR_LEVEL];		/* 0x16 U0.8 */
+	uint8_t bright_pos_gain[NUM_AMBI_LEVEL][NUM_AGGR_LEVEL];	/* 0x2a U2.6 */
+	uint8_t bright_neg_gain[NUM_AMBI_LEVEL][NUM_AGGR_LEVEL];	/* 0x3e U2.6 */
+	uint8_t dark_pos_gain[NUM_AMBI_LEVEL][NUM_AGGR_LEVEL];		/* 0x52 U2.6 */
+	uint8_t dark_neg_gain[NUM_AMBI_LEVEL][NUM_AGGR_LEVEL];		/* 0x66 U2.6 */
+	uint8_t iir_curve[NUM_AMBI_LEVEL];				/* 0x7a U0.8 */
+	uint8_t deviation_gain;						/* 0x7f U0.8 */
+
+	/* parameters for crgb conversion */
+	uint16_t crgb_thresh[NUM_POWER_FN_SEGS];			/* 0x80 U3.13 */
+	uint16_t crgb_offset[NUM_POWER_FN_SEGS];			/* 0x90 U1.15 */
+	uint16_t crgb_slope[NUM_POWER_FN_SEGS];				/* 0xa0 U4.12 */
+
+	/* parameters for custom curve */
+	/* thresholds for brightness --> backlight */
+	uint16_t backlight_thresholds[NUM_BL_CURVE_SEGS];		/* 0xb0 U16.0 */
+	/* offsets for brightness --> backlight */
+	uint16_t backlight_offsets[NUM_BL_CURVE_SEGS];			/* 0xd0 U16.0 */
+
+	/* For reading PSR State directly from IRAM */
+	uint8_t psr_state;						/* 0xf0       */
+	uint8_t dmcu_interface_version;					/* 0xf1       */
+	uint8_t dmcu_date_version_year_b0;				/* 0xf2       */
+	uint8_t dmcu_date_version_year_b1;				/* 0xf3       */
+	uint8_t dmcu_date_version_month;				/* 0xf4       */
+	uint8_t dmcu_date_version_day;					/* 0xf5       */
+	uint8_t dmcu_state;						/* 0xf6       */
+
+	uint16_t blRampReduction;					/* 0xf7       */
+	uint16_t blRampStart;						/* 0xf9       */
+	uint8_t dummy5;							/* 0xfb       */
+	uint8_t dummy6;							/* 0xfc       */
+	uint8_t dummy7;							/* 0xfd       */
+	uint8_t dummy8;							/* 0xfe       */
+	uint8_t dummy9;							/* 0xff       */
+};
+
+static uint16_t backlight_8_to_16(unsigned int backlight_8bit)
+{
+	return (uint16_t)(backlight_8bit * 0x101);
+}
+
+static void fill_backlight_transform_table(struct dmcu_iram_parameters params,
+		struct iram_table_v_2 *table)
+{
+	unsigned int i;
+	unsigned int num_entries = NUM_BL_CURVE_SEGS;
+	unsigned int query_input_8bit;
+	unsigned int query_output_8bit;
+	unsigned int lut_index;
+
+	table->backlight_thresholds[0] = 0;
+	table->backlight_offsets[0] = params.backlight_lut_array[0];
+	table->backlight_thresholds[num_entries-1] = 0xFFFF;
+	table->backlight_offsets[num_entries-1] =
+		params.backlight_lut_array[params.backlight_lut_array_size - 1];
+
+	/* Setup all brightness levels between 0% and 100% exclusive
+	 * Fills brightness-to-backlight transform table. Backlight custom curve
+	 * describes transform from brightness to backlight. It will be defined
+	 * as set of thresholds and set of offsets, together, implying
+	 * extrapolation of custom curve into 16 uniformly spanned linear
+	 * segments.  Each threshold/offset represented by 16 bit entry in
+	 * format U4.10.
+	 */
+	for (i = 1; i+1 < num_entries; i++) {
+		query_input_8bit = DIV_ROUNDUP((i * 256), num_entries);
+
+		lut_index = (params.backlight_lut_array_size - 1) * i / (num_entries - 1);
+		ASSERT(lut_index < params.backlight_lut_array_size);
+		query_output_8bit = params.backlight_lut_array[lut_index] >> 8;
+
+		table->backlight_thresholds[i] =
+				backlight_8_to_16(query_input_8bit);
+		table->backlight_offsets[i] =
+				backlight_8_to_16(query_output_8bit);
+	}
+}
+
+bool dmcu_load_iram(struct dmcu *dmcu,
+	struct dmcu_iram_parameters params)
+{
+	struct iram_table_v_2 ram_table;
+	unsigned int set = params.set;
+
+	if (dmcu == NULL)
+		return false;
+
+	if (!dmcu->funcs->is_dmcu_initialized(dmcu))
+		return true;
+
+	memset(&ram_table, 0, sizeof(ram_table));
+
+	ram_table.flags = 0x0;
+	ram_table.deviation_gain = 0xb3;
+
+	ram_table.blRampReduction =
+		cpu_to_be16(params.backlight_ramping_reduction);
+	ram_table.blRampStart =
+		cpu_to_be16(params.backlight_ramping_start);
+
+	ram_table.min_reduction[0][0] = min_reduction_table[abm_config[set][0]];
+	ram_table.min_reduction[1][0] = min_reduction_table[abm_config[set][0]];
+	ram_table.min_reduction[2][0] = min_reduction_table[abm_config[set][0]];
+	ram_table.min_reduction[3][0] = min_reduction_table[abm_config[set][0]];
+	ram_table.min_reduction[4][0] = min_reduction_table[abm_config[set][0]];
+	ram_table.max_reduction[0][0] = max_reduction_table[abm_config[set][0]];
+	ram_table.max_reduction[1][0] = max_reduction_table[abm_config[set][0]];
+	ram_table.max_reduction[2][0] = max_reduction_table[abm_config[set][0]];
+	ram_table.max_reduction[3][0] = max_reduction_table[abm_config[set][0]];
+	ram_table.max_reduction[4][0] = max_reduction_table[abm_config[set][0]];
+
+	ram_table.min_reduction[0][1] = min_reduction_table[abm_config[set][1]];
+	ram_table.min_reduction[1][1] = min_reduction_table[abm_config[set][1]];
+	ram_table.min_reduction[2][1] = min_reduction_table[abm_config[set][1]];
+	ram_table.min_reduction[3][1] = min_reduction_table[abm_config[set][1]];
+	ram_table.min_reduction[4][1] = min_reduction_table[abm_config[set][1]];
+	ram_table.max_reduction[0][1] = max_reduction_table[abm_config[set][1]];
+	ram_table.max_reduction[1][1] = max_reduction_table[abm_config[set][1]];
+	ram_table.max_reduction[2][1] = max_reduction_table[abm_config[set][1]];
+	ram_table.max_reduction[3][1] = max_reduction_table[abm_config[set][1]];
+	ram_table.max_reduction[4][1] = max_reduction_table[abm_config[set][1]];
+
+	ram_table.min_reduction[0][2] = min_reduction_table[abm_config[set][2]];
+	ram_table.min_reduction[1][2] = min_reduction_table[abm_config[set][2]];
+	ram_table.min_reduction[2][2] = min_reduction_table[abm_config[set][2]];
+	ram_table.min_reduction[3][2] = min_reduction_table[abm_config[set][2]];
+	ram_table.min_reduction[4][2] = min_reduction_table[abm_config[set][2]];
+	ram_table.max_reduction[0][2] = max_reduction_table[abm_config[set][2]];
+	ram_table.max_reduction[1][2] = max_reduction_table[abm_config[set][2]];
+	ram_table.max_reduction[2][2] = max_reduction_table[abm_config[set][2]];
+	ram_table.max_reduction[3][2] = max_reduction_table[abm_config[set][2]];
+	ram_table.max_reduction[4][2] = max_reduction_table[abm_config[set][2]];
+
+	ram_table.min_reduction[0][3] = min_reduction_table[abm_config[set][3]];
+	ram_table.min_reduction[1][3] = min_reduction_table[abm_config[set][3]];
+	ram_table.min_reduction[2][3] = min_reduction_table[abm_config[set][3]];
+	ram_table.min_reduction[3][3] = min_reduction_table[abm_config[set][3]];
+	ram_table.min_reduction[4][3] = min_reduction_table[abm_config[set][3]];
+	ram_table.max_reduction[0][3] = max_reduction_table[abm_config[set][3]];
+	ram_table.max_reduction[1][3] = max_reduction_table[abm_config[set][3]];
+	ram_table.max_reduction[2][3] = max_reduction_table[abm_config[set][3]];
+	ram_table.max_reduction[3][3] = max_reduction_table[abm_config[set][3]];
+	ram_table.max_reduction[4][3] = max_reduction_table[abm_config[set][3]];
+
+	ram_table.bright_pos_gain[0][0] = 0x20;
+	ram_table.bright_pos_gain[0][1] = 0x20;
+	ram_table.bright_pos_gain[0][2] = 0x20;
+	ram_table.bright_pos_gain[0][3] = 0x20;
+	ram_table.bright_pos_gain[1][0] = 0x20;
+	ram_table.bright_pos_gain[1][1] = 0x20;
+	ram_table.bright_pos_gain[1][2] = 0x20;
+	ram_table.bright_pos_gain[1][3] = 0x20;
+	ram_table.bright_pos_gain[2][0] = 0x20;
+	ram_table.bright_pos_gain[2][1] = 0x20;
+	ram_table.bright_pos_gain[2][2] = 0x20;
+	ram_table.bright_pos_gain[2][3] = 0x20;
+	ram_table.bright_pos_gain[3][0] = 0x20;
+	ram_table.bright_pos_gain[3][1] = 0x20;
+	ram_table.bright_pos_gain[3][2] = 0x20;
+	ram_table.bright_pos_gain[3][3] = 0x20;
+	ram_table.bright_pos_gain[4][0] = 0x20;
+	ram_table.bright_pos_gain[4][1] = 0x20;
+	ram_table.bright_pos_gain[4][2] = 0x20;
+	ram_table.bright_pos_gain[4][3] = 0x20;
+	ram_table.bright_neg_gain[0][1] = 0x00;
+	ram_table.bright_neg_gain[0][2] = 0x00;
+	ram_table.bright_neg_gain[0][3] = 0x00;
+	ram_table.bright_neg_gain[1][0] = 0x00;
+	ram_table.bright_neg_gain[1][1] = 0x00;
+	ram_table.bright_neg_gain[1][2] = 0x00;
+	ram_table.bright_neg_gain[1][3] = 0x00;
+	ram_table.bright_neg_gain[2][0] = 0x00;
+	ram_table.bright_neg_gain[2][1] = 0x00;
+	ram_table.bright_neg_gain[2][2] = 0x00;
+	ram_table.bright_neg_gain[2][3] = 0x00;
+	ram_table.bright_neg_gain[3][0] = 0x00;
+	ram_table.bright_neg_gain[3][1] = 0x00;
+	ram_table.bright_neg_gain[3][2] = 0x00;
+	ram_table.bright_neg_gain[3][3] = 0x00;
+	ram_table.bright_neg_gain[4][0] = 0x00;
+	ram_table.bright_neg_gain[4][1] = 0x00;
+	ram_table.bright_neg_gain[4][2] = 0x00;
+	ram_table.bright_neg_gain[4][3] = 0x00;
+	ram_table.dark_pos_gain[0][0] = 0x00;
+	ram_table.dark_pos_gain[0][1] = 0x00;
+	ram_table.dark_pos_gain[0][2] = 0x00;
+	ram_table.dark_pos_gain[0][3] = 0x00;
+	ram_table.dark_pos_gain[1][0] = 0x00;
+	ram_table.dark_pos_gain[1][1] = 0x00;
+	ram_table.dark_pos_gain[1][2] = 0x00;
+	ram_table.dark_pos_gain[1][3] = 0x00;
+	ram_table.dark_pos_gain[2][0] = 0x00;
+	ram_table.dark_pos_gain[2][1] = 0x00;
+	ram_table.dark_pos_gain[2][2] = 0x00;
+	ram_table.dark_pos_gain[2][3] = 0x00;
+	ram_table.dark_pos_gain[3][0] = 0x00;
+	ram_table.dark_pos_gain[3][1] = 0x00;
+	ram_table.dark_pos_gain[3][2] = 0x00;
+	ram_table.dark_pos_gain[3][3] = 0x00;
+	ram_table.dark_pos_gain[4][0] = 0x00;
+	ram_table.dark_pos_gain[4][1] = 0x00;
+	ram_table.dark_pos_gain[4][2] = 0x00;
+	ram_table.dark_pos_gain[4][3] = 0x00;
+	ram_table.dark_neg_gain[0][0] = 0x00;
+	ram_table.dark_neg_gain[0][1] = 0x00;
+	ram_table.dark_neg_gain[0][2] = 0x00;
+	ram_table.dark_neg_gain[0][3] = 0x00;
+	ram_table.dark_neg_gain[1][0] = 0x00;
+	ram_table.dark_neg_gain[1][1] = 0x00;
+	ram_table.dark_neg_gain[1][2] = 0x00;
+	ram_table.dark_neg_gain[1][3] = 0x00;
+	ram_table.dark_neg_gain[2][0] = 0x00;
+	ram_table.dark_neg_gain[2][1] = 0x00;
+	ram_table.dark_neg_gain[2][2] = 0x00;
+	ram_table.dark_neg_gain[2][3] = 0x00;
+	ram_table.dark_neg_gain[3][0] = 0x00;
+	ram_table.dark_neg_gain[3][1] = 0x00;
+	ram_table.dark_neg_gain[3][2] = 0x00;
+	ram_table.dark_neg_gain[3][3] = 0x00;
+	ram_table.dark_neg_gain[4][0] = 0x00;
+	ram_table.dark_neg_gain[4][1] = 0x00;
+	ram_table.dark_neg_gain[4][2] = 0x00;
+	ram_table.dark_neg_gain[4][3] = 0x00;
+	ram_table.iir_curve[0] = 0x65;
+	ram_table.iir_curve[1] = 0x65;
+	ram_table.iir_curve[2] = 0x65;
+	ram_table.iir_curve[3] = 0x65;
+	ram_table.iir_curve[4] = 0x65;
+	ram_table.crgb_thresh[0] = cpu_to_be16(0x13b6);
+	ram_table.crgb_thresh[1] = cpu_to_be16(0x1648);
+	ram_table.crgb_thresh[2] = cpu_to_be16(0x18e3);
+	ram_table.crgb_thresh[3] = cpu_to_be16(0x1b41);
+	ram_table.crgb_thresh[4] = cpu_to_be16(0x1d46);
+	ram_table.crgb_thresh[5] = cpu_to_be16(0x1f21);
+	ram_table.crgb_thresh[6] = cpu_to_be16(0x2167);
+	ram_table.crgb_thresh[7] = cpu_to_be16(0x2384);
+	ram_table.crgb_offset[0] = cpu_to_be16(0x2999);
+	ram_table.crgb_offset[1] = cpu_to_be16(0x3999);
+	ram_table.crgb_offset[2] = cpu_to_be16(0x4666);
+	ram_table.crgb_offset[3] = cpu_to_be16(0x5999);
+	ram_table.crgb_offset[4] = cpu_to_be16(0x6333);
+	ram_table.crgb_offset[5] = cpu_to_be16(0x7800);
+	ram_table.crgb_offset[6] = cpu_to_be16(0x8c00);
+	ram_table.crgb_offset[7] = cpu_to_be16(0xa000);
+	ram_table.crgb_slope[0]  = cpu_to_be16(0x3147);
+	ram_table.crgb_slope[1]  = cpu_to_be16(0x2978);
+	ram_table.crgb_slope[2]  = cpu_to_be16(0x23a2);
+	ram_table.crgb_slope[3]  = cpu_to_be16(0x1f55);
+	ram_table.crgb_slope[4]  = cpu_to_be16(0x1c63);
+	ram_table.crgb_slope[5]  = cpu_to_be16(0x1a0f);
+	ram_table.crgb_slope[6]  = cpu_to_be16(0x178d);
+	ram_table.crgb_slope[7]  = cpu_to_be16(0x15ab);
+
+	fill_backlight_transform_table(
+			params, &ram_table);
+
+	return dmcu->funcs->load_iram(
+			dmcu, 0, (char *)(&ram_table), sizeof(ram_table));
+}
diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.h b/drivers/gpu/drm/amd/display/modules/power/power_helpers.h
new file mode 100644
index 0000000..da5df00f
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.h
@@ -0,0 +1,47 @@
+/* Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef MODULES_POWER_POWER_HELPERS_H_
+#define MODULES_POWER_POWER_HELPERS_H_
+
+#include "dc/inc/hw/dmcu.h"
+
+
+enum abm_defines {
+	abm_defines_max_level = 4,
+	abm_defines_max_config = 4,
+};
+
+struct dmcu_iram_parameters {
+	unsigned int *backlight_lut_array;
+	unsigned int backlight_lut_array_size;
+	unsigned int backlight_ramping_reduction;
+	unsigned int backlight_ramping_start;
+	unsigned int set;
+};
+
+bool dmcu_load_iram(struct dmcu *dmcu,
+		struct dmcu_iram_parameters params);
+
+#endif /* MODULES_POWER_POWER_HELPERS_H_ */
diff --git a/drivers/gpu/drm/amd/include/amd_acpi.h b/drivers/gpu/drm/amd/include/amd_acpi.h
index 9b9699f..c72cbfe 100644
--- a/drivers/gpu/drm/amd/include/amd_acpi.h
+++ b/drivers/gpu/drm/amd/include/amd_acpi.h
@@ -52,6 +52,30 @@ struct atif_sbios_requests {
 	u8 backlight_level;	/* panel backlight level (0-255) */
 } __packed;
 
+struct atif_qbtc_arguments {
+	u16 size;		/* structure size in bytes (includes size field) */
+	u8 requested_display;	/* which display is requested */
+} __packed;
+
+#define ATIF_QBTC_MAX_DATA_POINTS 99
+
+struct atif_qbtc_data_point {
+	u8 luminance;		/* luminance in percent */
+	u8 ipnut_signal;	/* input signal in range 0-255 */
+} __packed;
+
+struct atif_qbtc_output {
+	u16 size;		/* structure size in bytes (includes size field) */
+	u16 flags;		/* all zeroes */
+	u8 error_code;		/* error code */
+	u8 ac_level;		/* default brightness on AC power */
+	u8 dc_level;		/* default brightness on DC power */
+	u8 min_input_signal;	/* max input signal in range 0-255 */
+	u8 max_input_signal;	/* min input signal in range 0-255 */
+	u8 number_of_points;	/* number of data points */
+	struct atif_qbtc_data_point data_points[ATIF_QBTC_MAX_DATA_POINTS];
+} __packed;
+
 #define ATIF_NOTIFY_MASK	0x3
 #define ATIF_NOTIFY_NONE	0
 #define ATIF_NOTIFY_81		1
@@ -126,26 +150,18 @@ struct atcs_pref_req_output {
  * DWORD - supported functions bit vector
  */
 /* Notifications mask */
-#       define ATIF_DISPLAY_SWITCH_REQUEST_SUPPORTED               (1 << 0)
-#       define ATIF_EXPANSION_MODE_CHANGE_REQUEST_SUPPORTED        (1 << 1)
 #       define ATIF_THERMAL_STATE_CHANGE_REQUEST_SUPPORTED         (1 << 2)
 #       define ATIF_FORCED_POWER_STATE_CHANGE_REQUEST_SUPPORTED    (1 << 3)
 #       define ATIF_SYSTEM_POWER_SOURCE_CHANGE_REQUEST_SUPPORTED   (1 << 4)
-#       define ATIF_DISPLAY_CONF_CHANGE_REQUEST_SUPPORTED          (1 << 5)
-#       define ATIF_PX_GFX_SWITCH_REQUEST_SUPPORTED                (1 << 6)
 #       define ATIF_PANEL_BRIGHTNESS_CHANGE_REQUEST_SUPPORTED      (1 << 7)
 #       define ATIF_DGPU_DISPLAY_EVENT_SUPPORTED                   (1 << 8)
+#       define ATIF_GPU_PACKAGE_POWER_LIMIT_REQUEST_SUPPORTED      (1 << 12)
 /* supported functions vector */
 #       define ATIF_GET_SYSTEM_PARAMETERS_SUPPORTED               (1 << 0)
 #       define ATIF_GET_SYSTEM_BIOS_REQUESTS_SUPPORTED            (1 << 1)
-#       define ATIF_SELECT_ACTIVE_DISPLAYS_SUPPORTED              (1 << 2)
-#       define ATIF_GET_LID_STATE_SUPPORTED                       (1 << 3)
-#       define ATIF_GET_TV_STANDARD_FROM_CMOS_SUPPORTED           (1 << 4)
-#       define ATIF_SET_TV_STANDARD_IN_CMOS_SUPPORTED             (1 << 5)
-#       define ATIF_GET_PANEL_EXPANSION_MODE_FROM_CMOS_SUPPORTED  (1 << 6)
-#       define ATIF_SET_PANEL_EXPANSION_MODE_IN_CMOS_SUPPORTED    (1 << 7)
 #       define ATIF_TEMPERATURE_CHANGE_NOTIFICATION_SUPPORTED     (1 << 12)
-#       define ATIF_GET_GRAPHICS_DEVICE_TYPES_SUPPORTED           (1 << 14)
+#       define ATIF_QUERY_BACKLIGHT_TRANSFER_CHARACTERISTICS_SUPPORTED (1 << 15)
+#       define ATIF_READY_TO_UNDOCK_NOTIFICATION_SUPPORTED        (1 << 16)
 #       define ATIF_GET_EXTERNAL_GPU_INFORMATION_SUPPORTED        (1 << 20)
 #define ATIF_FUNCTION_GET_SYSTEM_PARAMETERS                        0x1
 /* ARG0: ATIF_FUNCTION_GET_SYSTEM_PARAMETERS
@@ -170,6 +186,10 @@ struct atcs_pref_req_output {
  * n (0xd0-0xd9) is specified in notify command code.
  * bit 2:
  * 1 - lid changes not reported though int10
+ * bit 3:
+ * 1 - system bios controls overclocking
+ * bit 4:
+ * 1 - enable overclocking
  */
 #define ATIF_FUNCTION_GET_SYSTEM_BIOS_REQUESTS                     0x2
 /* ARG0: ATIF_FUNCTION_GET_SYSTEM_BIOS_REQUESTS
@@ -177,28 +197,23 @@ struct atcs_pref_req_output {
  * OUTPUT:
  * WORD  - structure size in bytes (includes size field)
  * DWORD - pending sbios requests
- * BYTE  - panel expansion mode
+ * BYTE  - reserved (all zeroes)
  * BYTE  - thermal state: target gfx controller
  * BYTE  - thermal state: state id (0: exit state, non-0: state)
  * BYTE  - forced power state: target gfx controller
- * BYTE  - forced power state: state id
+ * BYTE  - forced power state: state id (0: forced state, non-0: state)
  * BYTE  - system power source
  * BYTE  - panel backlight level (0-255)
+ * BYTE  - GPU package power limit: target gfx controller
+ * DWORD - GPU package power limit: value (24:8 fractional format, Watts)
  */
 /* pending sbios requests */
-#       define ATIF_DISPLAY_SWITCH_REQUEST                         (1 << 0)
-#       define ATIF_EXPANSION_MODE_CHANGE_REQUEST                  (1 << 1)
 #       define ATIF_THERMAL_STATE_CHANGE_REQUEST                   (1 << 2)
 #       define ATIF_FORCED_POWER_STATE_CHANGE_REQUEST              (1 << 3)
 #       define ATIF_SYSTEM_POWER_SOURCE_CHANGE_REQUEST             (1 << 4)
-#       define ATIF_DISPLAY_CONF_CHANGE_REQUEST                    (1 << 5)
-#       define ATIF_PX_GFX_SWITCH_REQUEST                          (1 << 6)
 #       define ATIF_PANEL_BRIGHTNESS_CHANGE_REQUEST                (1 << 7)
 #       define ATIF_DGPU_DISPLAY_EVENT                             (1 << 8)
-/* panel expansion mode */
-#       define ATIF_PANEL_EXPANSION_DISABLE                        0
-#       define ATIF_PANEL_EXPANSION_FULL                           1
-#       define ATIF_PANEL_EXPANSION_ASPECT                         2
+#       define ATIF_GPU_PACKAGE_POWER_LIMIT_REQUEST                (1 << 12)
 /* target gfx controller */
 #       define ATIF_TARGET_GFX_SINGLE                              0
 #       define ATIF_TARGET_GFX_PX_IGPU                             1
@@ -208,76 +223,6 @@ struct atcs_pref_req_output {
 #       define ATIF_POWER_SOURCE_DC                                2
 #       define ATIF_POWER_SOURCE_RESTRICTED_AC_1                   3
 #       define ATIF_POWER_SOURCE_RESTRICTED_AC_2                   4
-#define ATIF_FUNCTION_SELECT_ACTIVE_DISPLAYS                       0x3
-/* ARG0: ATIF_FUNCTION_SELECT_ACTIVE_DISPLAYS
- * ARG1:
- * WORD  - structure size in bytes (includes size field)
- * WORD  - selected displays
- * WORD  - connected displays
- * OUTPUT:
- * WORD  - structure size in bytes (includes size field)
- * WORD  - selected displays
- */
-#       define ATIF_LCD1                                           (1 << 0)
-#       define ATIF_CRT1                                           (1 << 1)
-#       define ATIF_TV                                             (1 << 2)
-#       define ATIF_DFP1                                           (1 << 3)
-#       define ATIF_CRT2                                           (1 << 4)
-#       define ATIF_LCD2                                           (1 << 5)
-#       define ATIF_DFP2                                           (1 << 7)
-#       define ATIF_CV                                             (1 << 8)
-#       define ATIF_DFP3                                           (1 << 9)
-#       define ATIF_DFP4                                           (1 << 10)
-#       define ATIF_DFP5                                           (1 << 11)
-#       define ATIF_DFP6                                           (1 << 12)
-#define ATIF_FUNCTION_GET_LID_STATE                                0x4
-/* ARG0: ATIF_FUNCTION_GET_LID_STATE
- * ARG1: none
- * OUTPUT:
- * WORD  - structure size in bytes (includes size field)
- * BYTE  - lid state (0: open, 1: closed)
- *
- * GET_LID_STATE only works at boot and resume, for general lid
- * status, use the kernel provided status
- */
-#define ATIF_FUNCTION_GET_TV_STANDARD_FROM_CMOS                    0x5
-/* ARG0: ATIF_FUNCTION_GET_TV_STANDARD_FROM_CMOS
- * ARG1: none
- * OUTPUT:
- * WORD  - structure size in bytes (includes size field)
- * BYTE  - 0
- * BYTE  - TV standard
- */
-#       define ATIF_TV_STD_NTSC                                    0
-#       define ATIF_TV_STD_PAL                                     1
-#       define ATIF_TV_STD_PALM                                    2
-#       define ATIF_TV_STD_PAL60                                   3
-#       define ATIF_TV_STD_NTSCJ                                   4
-#       define ATIF_TV_STD_PALCN                                   5
-#       define ATIF_TV_STD_PALN                                    6
-#       define ATIF_TV_STD_SCART_RGB                               9
-#define ATIF_FUNCTION_SET_TV_STANDARD_IN_CMOS                      0x6
-/* ARG0: ATIF_FUNCTION_SET_TV_STANDARD_IN_CMOS
- * ARG1:
- * WORD  - structure size in bytes (includes size field)
- * BYTE  - 0
- * BYTE  - TV standard
- * OUTPUT: none
- */
-#define ATIF_FUNCTION_GET_PANEL_EXPANSION_MODE_FROM_CMOS           0x7
-/* ARG0: ATIF_FUNCTION_GET_PANEL_EXPANSION_MODE_FROM_CMOS
- * ARG1: none
- * OUTPUT:
- * WORD  - structure size in bytes (includes size field)
- * BYTE  - panel expansion mode
- */
-#define ATIF_FUNCTION_SET_PANEL_EXPANSION_MODE_IN_CMOS             0x8
-/* ARG0: ATIF_FUNCTION_SET_PANEL_EXPANSION_MODE_IN_CMOS
- * ARG1:
- * WORD  - structure size in bytes (includes size field)
- * BYTE  - panel expansion mode
- * OUTPUT: none
- */
 #define ATIF_FUNCTION_TEMPERATURE_CHANGE_NOTIFICATION              0xD
 /* ARG0: ATIF_FUNCTION_TEMPERATURE_CHANGE_NOTIFICATION
  * ARG1:
@@ -286,21 +231,43 @@ struct atcs_pref_req_output {
  * BYTE  - current temperature (degress Celsius)
  * OUTPUT: none
  */
-#define ATIF_FUNCTION_GET_GRAPHICS_DEVICE_TYPES                    0xF
-/* ARG0: ATIF_FUNCTION_GET_GRAPHICS_DEVICE_TYPES
- * ARG1: none
+#define ATIF_FUNCTION_QUERY_BRIGHTNESS_TRANSFER_CHARACTERISTICS    0x10
+/* ARG0: ATIF_FUNCTION_QUERY_BRIGHTNESS_TRANSFER_CHARACTERISTICS
+ * ARG1:
+ * WORD  - structure size in bytes (includes size field)
+ * BYTE  - requested display
  * OUTPUT:
- * WORD  - number of gfx devices
- * WORD  - device structure size in bytes (excludes device size field)
- * DWORD - flags         \
- * WORD  - bus number     } repeated structure
- * WORD  - device number /
+ * WORD  - structure size in bytes (includes size field)
+ * WORD  - flags (currently all 16 bits are reserved)
+ * BYTE  - error code (on failure, disregard all below fields)
+ * BYTE  - AC level (default brightness in percent when machine has full power)
+ * BYTE  - DC level (default brightness in percent when machine is on battery)
+ * BYTE  - min input signal, in range 0-255, corresponding to 0% backlight
+ * BYTE  - max input signal, in range 0-255, corresponding to 100% backlight
+ * BYTE  - number of reported data points
+ * BYTE  - luminance level in percent  \ repeated structure
+ * BYTE  - input signal in range 0-255 / does not have entries for 0% and 100%
  */
-/* flags */
-#       define ATIF_PX_REMOVABLE_GRAPHICS_DEVICE                   (1 << 0)
-#       define ATIF_XGP_PORT                                       (1 << 1)
-#       define ATIF_VGA_ENABLED_GRAPHICS_DEVICE                    (1 << 2)
-#       define ATIF_XGP_PORT_IN_DOCK                               (1 << 3)
+/* requested display */
+#       define ATIF_QBTC_REQUEST_LCD1                              0
+#       define ATIF_QBTC_REQUEST_CRT1                              1
+#       define ATIF_QBTC_REQUEST_DFP1                              3
+#       define ATIF_QBTC_REQUEST_CRT2                              4
+#       define ATIF_QBTC_REQUEST_LCD2                              5
+#       define ATIF_QBTC_REQUEST_DFP2                              7
+#       define ATIF_QBTC_REQUEST_DFP3                              9
+#       define ATIF_QBTC_REQUEST_DFP4                              10
+#       define ATIF_QBTC_REQUEST_DFP5                              11
+#       define ATIF_QBTC_REQUEST_DFP6                              12
+/* error code */
+#       define ATIF_QBTC_ERROR_CODE_SUCCESS                        0
+#       define ATIF_QBTC_ERROR_CODE_FAILURE                        1
+#       define ATIF_QBTC_ERROR_CODE_DEVICE_NOT_SUPPORTED           2
+#define ATIF_FUNCTION_READY_TO_UNDOCK_NOTIFICATION                 0x11
+/* ARG0: ATIF_FUNCTION_READY_TO_UNDOCK_NOTIFICATION
+ * ARG1: none
+ * OUTPUT: none
+ */
 #define ATIF_FUNCTION_GET_EXTERNAL_GPU_INFORMATION                 0x15
 /* ARG0: ATIF_FUNCTION_GET_EXTERNAL_GPU_INFORMATION
  * ARG1: none
diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
index d6aa1d4..b68c2e0 100644
--- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
+++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
@@ -300,7 +300,7 @@ static int pp_set_clockgating_by_smu(void *handle, uint32_t msg_id)
 		return -EINVAL;
 
 	if (hwmgr->hwmgr_func->update_clock_gatings == NULL) {
-		pr_info("%s was not implemented.\n", __func__);
+		pr_info_ratelimited("%s was not implemented.\n", __func__);
 		return 0;
 	}
 
@@ -387,7 +387,7 @@ static uint32_t pp_dpm_get_sclk(void *handle, bool low)
 		return 0;
 
 	if (hwmgr->hwmgr_func->get_sclk == NULL) {
-		pr_info("%s was not implemented.\n", __func__);
+		pr_info_ratelimited("%s was not implemented.\n", __func__);
 		return 0;
 	}
 	mutex_lock(&hwmgr->smu_lock);
@@ -405,7 +405,7 @@ static uint32_t pp_dpm_get_mclk(void *handle, bool low)
 		return 0;
 
 	if (hwmgr->hwmgr_func->get_mclk == NULL) {
-		pr_info("%s was not implemented.\n", __func__);
+		pr_info_ratelimited("%s was not implemented.\n", __func__);
 		return 0;
 	}
 	mutex_lock(&hwmgr->smu_lock);
@@ -422,7 +422,7 @@ static void pp_dpm_powergate_vce(void *handle, bool gate)
 		return;
 
 	if (hwmgr->hwmgr_func->powergate_vce == NULL) {
-		pr_info("%s was not implemented.\n", __func__);
+		pr_info_ratelimited("%s was not implemented.\n", __func__);
 		return;
 	}
 	mutex_lock(&hwmgr->smu_lock);
@@ -438,7 +438,7 @@ static void pp_dpm_powergate_uvd(void *handle, bool gate)
 		return;
 
 	if (hwmgr->hwmgr_func->powergate_uvd == NULL) {
-		pr_info("%s was not implemented.\n", __func__);
+		pr_info_ratelimited("%s was not implemented.\n", __func__);
 		return;
 	}
 	mutex_lock(&hwmgr->smu_lock);
@@ -505,7 +505,7 @@ static void pp_dpm_set_fan_control_mode(void *handle, uint32_t mode)
 		return;
 
 	if (hwmgr->hwmgr_func->set_fan_control_mode == NULL) {
-		pr_info("%s was not implemented.\n", __func__);
+		pr_info_ratelimited("%s was not implemented.\n", __func__);
 		return;
 	}
 	mutex_lock(&hwmgr->smu_lock);
@@ -522,7 +522,7 @@ static uint32_t pp_dpm_get_fan_control_mode(void *handle)
 		return 0;
 
 	if (hwmgr->hwmgr_func->get_fan_control_mode == NULL) {
-		pr_info("%s was not implemented.\n", __func__);
+		pr_info_ratelimited("%s was not implemented.\n", __func__);
 		return 0;
 	}
 	mutex_lock(&hwmgr->smu_lock);
@@ -540,7 +540,7 @@ static int pp_dpm_set_fan_speed_percent(void *handle, uint32_t percent)
 		return -EINVAL;
 
 	if (hwmgr->hwmgr_func->set_fan_speed_percent == NULL) {
-		pr_info("%s was not implemented.\n", __func__);
+		pr_info_ratelimited("%s was not implemented.\n", __func__);
 		return 0;
 	}
 	mutex_lock(&hwmgr->smu_lock);
@@ -558,7 +558,7 @@ static int pp_dpm_get_fan_speed_percent(void *handle, uint32_t *speed)
 		return -EINVAL;
 
 	if (hwmgr->hwmgr_func->get_fan_speed_percent == NULL) {
-		pr_info("%s was not implemented.\n", __func__);
+		pr_info_ratelimited("%s was not implemented.\n", __func__);
 		return 0;
 	}
 
@@ -594,7 +594,7 @@ static int pp_dpm_set_fan_speed_rpm(void *handle, uint32_t rpm)
 		return -EINVAL;
 
 	if (hwmgr->hwmgr_func->set_fan_speed_rpm == NULL) {
-		pr_info("%s was not implemented.\n", __func__);
+		pr_info_ratelimited("%s was not implemented.\n", __func__);
 		return 0;
 	}
 	mutex_lock(&hwmgr->smu_lock);
@@ -720,7 +720,7 @@ static int pp_dpm_force_clock_level(void *handle,
 		return -EINVAL;
 
 	if (hwmgr->hwmgr_func->force_clock_level == NULL) {
-		pr_info("%s was not implemented.\n", __func__);
+		pr_info_ratelimited("%s was not implemented.\n", __func__);
 		return 0;
 	}
 
@@ -745,7 +745,7 @@ static int pp_dpm_print_clock_levels(void *handle,
 		return -EINVAL;
 
 	if (hwmgr->hwmgr_func->print_clock_levels == NULL) {
-		pr_info("%s was not implemented.\n", __func__);
+		pr_info_ratelimited("%s was not implemented.\n", __func__);
 		return 0;
 	}
 	mutex_lock(&hwmgr->smu_lock);
@@ -763,7 +763,7 @@ static int pp_dpm_get_sclk_od(void *handle)
 		return -EINVAL;
 
 	if (hwmgr->hwmgr_func->get_sclk_od == NULL) {
-		pr_info("%s was not implemented.\n", __func__);
+		pr_info_ratelimited("%s was not implemented.\n", __func__);
 		return 0;
 	}
 	mutex_lock(&hwmgr->smu_lock);
@@ -781,7 +781,7 @@ static int pp_dpm_set_sclk_od(void *handle, uint32_t value)
 		return -EINVAL;
 
 	if (hwmgr->hwmgr_func->set_sclk_od == NULL) {
-		pr_info("%s was not implemented.\n", __func__);
+		pr_info_ratelimited("%s was not implemented.\n", __func__);
 		return 0;
 	}
 
@@ -800,7 +800,7 @@ static int pp_dpm_get_mclk_od(void *handle)
 		return -EINVAL;
 
 	if (hwmgr->hwmgr_func->get_mclk_od == NULL) {
-		pr_info("%s was not implemented.\n", __func__);
+		pr_info_ratelimited("%s was not implemented.\n", __func__);
 		return 0;
 	}
 	mutex_lock(&hwmgr->smu_lock);
@@ -818,7 +818,7 @@ static int pp_dpm_set_mclk_od(void *handle, uint32_t value)
 		return -EINVAL;
 
 	if (hwmgr->hwmgr_func->set_mclk_od == NULL) {
-		pr_info("%s was not implemented.\n", __func__);
+		pr_info_ratelimited("%s was not implemented.\n", __func__);
 		return 0;
 	}
 	mutex_lock(&hwmgr->smu_lock);
@@ -878,7 +878,7 @@ static int pp_get_power_profile_mode(void *handle, char *buf)
 		return -EINVAL;
 
 	if (hwmgr->hwmgr_func->get_power_profile_mode == NULL) {
-		pr_info("%s was not implemented.\n", __func__);
+		pr_info_ratelimited("%s was not implemented.\n", __func__);
 		return snprintf(buf, PAGE_SIZE, "\n");
 	}
 
@@ -894,7 +894,7 @@ static int pp_set_power_profile_mode(void *handle, long *input, uint32_t size)
 		return ret;
 
 	if (hwmgr->hwmgr_func->set_power_profile_mode == NULL) {
-		pr_info("%s was not implemented.\n", __func__);
+		pr_info_ratelimited("%s was not implemented.\n", __func__);
 		return ret;
 	}
 
@@ -917,7 +917,7 @@ static int pp_odn_edit_dpm_table(void *handle, uint32_t type, long *input, uint3
 		return -EINVAL;
 
 	if (hwmgr->hwmgr_func->odn_edit_dpm_table == NULL) {
-		pr_info("%s was not implemented.\n", __func__);
+		pr_info_ratelimited("%s was not implemented.\n", __func__);
 		return -EINVAL;
 	}
 
@@ -935,7 +935,7 @@ static int pp_dpm_switch_power_profile(void *handle,
 		return -EINVAL;
 
 	if (hwmgr->hwmgr_func->set_power_profile_mode == NULL) {
-		pr_info("%s was not implemented.\n", __func__);
+		pr_info_ratelimited("%s was not implemented.\n", __func__);
 		return -EINVAL;
 	}
 
@@ -972,7 +972,7 @@ static int pp_set_power_limit(void *handle, uint32_t limit)
 		return -EINVAL;
 
 	if (hwmgr->hwmgr_func->set_power_limit == NULL) {
-		pr_info("%s was not implemented.\n", __func__);
+		pr_info_ratelimited("%s was not implemented.\n", __func__);
 		return -EINVAL;
 	}
 
@@ -1212,7 +1212,7 @@ static int pp_dpm_powergate_mmhub(void *handle)
 		return -EINVAL;
 
 	if (hwmgr->hwmgr_func->powergate_mmhub == NULL) {
-		pr_info("%s was not implemented.\n", __func__);
+		pr_info_ratelimited("%s was not implemented.\n", __func__);
 		return 0;
 	}
 
@@ -1227,7 +1227,7 @@ static int pp_dpm_powergate_gfx(void *handle, bool gate)
 		return 0;
 
 	if (hwmgr->hwmgr_func->powergate_gfx == NULL) {
-		pr_info("%s was not implemented.\n", __func__);
+		pr_info_ratelimited("%s was not implemented.\n", __func__);
 		return 0;
 	}
 
@@ -1242,7 +1242,7 @@ static void pp_dpm_powergate_acp(void *handle, bool gate)
 		return;
 
 	if (hwmgr->hwmgr_func->powergate_acp == NULL) {
-		pr_info("%s was not implemented.\n", __func__);
+		pr_info_ratelimited("%s was not implemented.\n", __func__);
 		return;
 	}
 
@@ -1257,7 +1257,7 @@ static void pp_dpm_powergate_sdma(void *handle, bool gate)
 		return;
 
 	if (hwmgr->hwmgr_func->powergate_sdma == NULL) {
-		pr_info("%s was not implemented.\n", __func__);
+		pr_info_ratelimited("%s was not implemented.\n", __func__);
 		return;
 	}
 
@@ -1303,7 +1303,7 @@ static int pp_notify_smu_enable_pwe(void *handle)
 		return -EINVAL;
 
 	if (hwmgr->hwmgr_func->smus_notify_pwe == NULL) {
-		pr_info("%s was not implemented.\n", __func__);
+		pr_info_ratelimited("%s was not implemented.\n", __func__);
 		return -EINVAL;;
 	}
 
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
index 88f6b35..5dcd21d 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
@@ -269,7 +269,7 @@ static int smu7_construct_voltage_tables(struct pp_hwmgr *hwmgr)
 					hwmgr->dyn_state.mvdd_dependency_on_mclk);
 
 		PP_ASSERT_WITH_CODE((0 == result),
-				"Failed to retrieve SVI2 MVDD table from dependancy table.",
+				"Failed to retrieve SVI2 MVDD table from dependency table.",
 				return result;);
 	}
 
@@ -288,7 +288,7 @@ static int smu7_construct_voltage_tables(struct pp_hwmgr *hwmgr)
 			result = phm_get_svi2_voltage_table_v0(&(data->vddci_voltage_table),
 					hwmgr->dyn_state.vddci_dependency_on_mclk);
 		PP_ASSERT_WITH_CODE((0 == result),
-				"Failed to retrieve SVI2 VDDCI table from dependancy table.",
+				"Failed to retrieve SVI2 VDDCI table from dependency table.",
 				return result);
 	}
 
@@ -317,7 +317,7 @@ static int smu7_construct_voltage_tables(struct pp_hwmgr *hwmgr)
 				table_info->vddc_lookup_table);
 
 		PP_ASSERT_WITH_CODE((0 == result),
-			"Failed to retrieve SVI2 VDDC table from dependancy table.", return result;);
+			"Failed to retrieve SVI2 VDDC table from dependency table.", return result;);
 	}
 
 	tmp = smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_VDDC);
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c
index fef111d..553a203 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c
@@ -1228,17 +1228,14 @@ static int smu8_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
 
 static int smu8_dpm_powerdown_uvd(struct pp_hwmgr *hwmgr)
 {
-	if (PP_CAP(PHM_PlatformCaps_UVDPowerGating)) {
-		smu8_nbdpm_pstate_enable_disable(hwmgr, true, true);
+	if (PP_CAP(PHM_PlatformCaps_UVDPowerGating))
 		return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_UVDPowerOFF);
-	}
 	return 0;
 }
 
 static int smu8_dpm_powerup_uvd(struct pp_hwmgr *hwmgr)
 {
 	if (PP_CAP(PHM_PlatformCaps_UVDPowerGating)) {
-		smu8_nbdpm_pstate_enable_disable(hwmgr, false, true);
 		return smum_send_msg_to_smc_with_parameter(
 			hwmgr,
 			PPSMC_MSG_UVDPowerON,
@@ -1995,6 +1992,7 @@ static const struct pp_hwmgr_func smu8_hwmgr_funcs = {
 	.power_state_set = smu8_set_power_state_tasks,
 	.dynamic_state_management_disable = smu8_disable_dpm_tasks,
 	.notify_cac_buffer_info = smu8_notify_cac_buffer_info,
+	.update_nbdpm_pstate = smu8_nbdpm_pstate_enable_disable,
 	.get_thermal_temperature_range = smu8_get_thermal_temperature_range,
 };
 
diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
index 07d180ce..fb0f96f 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
@@ -317,6 +317,9 @@ struct pp_hwmgr_func {
 					uint32_t mc_addr_low,
 					uint32_t mc_addr_hi,
 					uint32_t size);
+	int (*update_nbdpm_pstate)(struct pp_hwmgr *hwmgr,
+					bool enable,
+					bool lock);
 	int (*get_thermal_temperature_range)(struct pp_hwmgr *hwmgr,
 					struct PP_TemperatureRange *range);
 	int (*get_power_profile_mode)(struct pp_hwmgr *hwmgr, char *buf);
diff --git a/drivers/gpu/drm/drm_atomic_uapi.c b/drivers/gpu/drm/drm_atomic_uapi.c
index 0876c69..c408898 100644
--- a/drivers/gpu/drm/drm_atomic_uapi.c
+++ b/drivers/gpu/drm/drm_atomic_uapi.c
@@ -433,6 +433,8 @@ static int drm_atomic_crtc_set_property(struct drm_crtc *crtc,
 		ret = drm_atomic_set_mode_prop_for_crtc(state, mode);
 		drm_property_blob_put(mode);
 		return ret;
+	} else if (property == config->prop_vrr_enabled) {
+		state->vrr_enabled = val;
 	} else if (property == config->degamma_lut_property) {
 		ret = drm_atomic_replace_property_blob_from_id(dev,
 					&state->degamma_lut,
@@ -491,6 +493,8 @@ drm_atomic_crtc_get_property(struct drm_crtc *crtc,
 		*val = state->active;
 	else if (property == config->prop_mode_id)
 		*val = (state->mode_blob) ? state->mode_blob->base.id : 0;
+	else if (property == config->prop_vrr_enabled)
+		*val = state->vrr_enabled;
 	else if (property == config->degamma_lut_property)
 		*val = (state->degamma_lut) ? state->degamma_lut->base.id : 0;
 	else if (property == config->ctm_property)
diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c
index fa9baac..da8ae80 100644
--- a/drivers/gpu/drm/drm_connector.c
+++ b/drivers/gpu/drm/drm_connector.c
@@ -1279,6 +1279,105 @@ int drm_mode_create_scaling_mode_property(struct drm_device *dev)
 EXPORT_SYMBOL(drm_mode_create_scaling_mode_property);
 
 /**
+ * DOC: Variable refresh properties
+ *
+ * Variable refresh rate capable displays can dynamically adjust their
+ * refresh rate by extending the duration of their vertical front porch
+ * until page flip or timeout occurs. This can reduce or remove stuttering
+ * and latency in scenarios where the page flip does not align with the
+ * vblank interval.
+ *
+ * An example scenario would be an application flipping at a constant rate
+ * of 48Hz on a 60Hz display. The page flip will frequently miss the vblank
+ * interval and the same contents will be displayed twice. This can be
+ * observed as stuttering for content with motion.
+ *
+ * If variable refresh rate was active on a display that supported a
+ * variable refresh range from 35Hz to 60Hz no stuttering would be observable
+ * for the example scenario. The minimum supported variable refresh rate of
+ * 35Hz is below the page flip frequency and the vertical front porch can
+ * be extended until the page flip occurs. The vblank interval will be
+ * directly aligned to the page flip rate.
+ *
+ * Not all userspace content is suitable for use with variable refresh rate.
+ * Large and frequent changes in vertical front porch duration may worsen
+ * perceived stuttering for input sensitive applications.
+ *
+ * Panel brightness will also vary with vertical front porch duration. Some
+ * panels may have noticeable differences in brightness between the minimum
+ * vertical front porch duration and the maximum vertical front porch duration.
+ * Large and frequent changes in vertical front porch duration may produce
+ * observable flickering for such panels.
+ *
+ * Userspace control for variable refresh rate is supported via properties
+ * on the &drm_connector and &drm_crtc objects.
+ *
+ * "vrr_capable":
+ *	Optional &drm_connector boolean property that drivers should attach
+ *	with drm_connector_attach_vrr_capable_property() on connectors that
+ *	could support variable refresh rates. Drivers should update the
+ *	property value by calling drm_connector_set_vrr_capable_property().
+ *
+ *	Absence of the property should indicate absence of support.
+ *
+ * "vrr_enabled":
+ *	Default &drm_crtc boolean property that notifies the driver that the
+ *	content on the CRTC is suitable for variable refresh rate presentation.
+ *	The driver will take this property as a hint to enable variable
+ *	refresh rate support if the receiver supports it, ie. if the
+ *	"vrr_capable" property is true on the &drm_connector object. The
+ *	vertical front porch duration will be extended until page-flip or
+ *	timeout when enabled.
+ *
+ *	The minimum vertical front porch duration is defined as the vertical
+ *	front porch duration for the current mode.
+ *
+ *	The maximum vertical front porch duration is greater than or equal to
+ *	the minimum vertical front porch duration. The duration is derived
+ *	from the minimum supported variable refresh rate for the connector.
+ *
+ *	The driver may place further restrictions within these minimum
+ *	and maximum bounds.
+ *
+ *	The semantics for the vertical blank timestamp differ when
+ *	variable refresh rate is active. The vertical blank timestamp
+ *	is defined to be an estimate using the current mode's fixed
+ *	refresh rate timings. The semantics for the page-flip event
+ *	timestamp remain the same.
+ */
+
+/**
+ * drm_connector_attach_vrr_capable_property - creates the
+ * vrr_capable property
+ * @connector: connector to create the vrr_capable property on.
+ *
+ * This is used by atomic drivers to add support for querying
+ * variable refresh rate capability for a connector.
+ *
+ * Returns:
+ * Zero on success, negative errono on failure.
+ */
+int drm_connector_attach_vrr_capable_property(
+	struct drm_connector *connector)
+{
+	struct drm_device *dev = connector->dev;
+	struct drm_property *prop;
+
+	if (!connector->vrr_capable_property) {
+		prop = drm_property_create_bool(dev, DRM_MODE_PROP_IMMUTABLE,
+			"vrr_capable");
+		if (!prop)
+			return -ENOMEM;
+
+		connector->vrr_capable_property = prop;
+		drm_object_attach_property(&connector->base, prop, 0);
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(drm_connector_attach_vrr_capable_property);
+
+/**
  * drm_connector_attach_scaling_mode_property - attach atomic scaling mode property
  * @connector: connector to attach scaling mode property on.
  * @scaling_mode_mask: or'ed mask of BIT(%DRM_MODE_SCALE_\*).
@@ -1641,6 +1740,24 @@ int drm_connector_attach_max_bpc_property(struct drm_connector *connector,
 EXPORT_SYMBOL(drm_connector_attach_max_bpc_property);
 
 /**
+ * drm_connector_set_vrr_capable_property - sets the variable refresh rate
+ * capable property for a connector
+ * @connector: drm connector
+ * @capable: True if the connector is variable refresh rate capable
+ *
+ * Should be used by atomic drivers to update the indicated support for
+ * variable refresh rate over a connector.
+ */
+void drm_connector_set_vrr_capable_property(
+		struct drm_connector *connector, bool capable)
+{
+	drm_object_property_set_value(&connector->base,
+				      connector->vrr_capable_property,
+				      capable);
+}
+EXPORT_SYMBOL(drm_connector_set_vrr_capable_property);
+
+/**
  * drm_connector_init_panel_orientation_property -
  *	initialize the connecters panel_orientation property
  * @connector: connector for which to init the panel-orientation property.
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 268a182..6f8ddfc 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -340,6 +340,8 @@ int drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc,
 		drm_object_attach_property(&crtc->base, config->prop_mode_id, 0);
 		drm_object_attach_property(&crtc->base,
 					   config->prop_out_fence_ptr, 0);
+		drm_object_attach_property(&crtc->base,
+					   config->prop_vrr_enabled, 0);
 	}
 
 	return 0;
diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c
index 6d48348..2d6c491 100644
--- a/drivers/gpu/drm/drm_dp_helper.c
+++ b/drivers/gpu/drm/drm_dp_helper.c
@@ -1428,17 +1428,19 @@ u8 drm_dp_dsc_sink_line_buf_depth(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE])
 }
 EXPORT_SYMBOL(drm_dp_dsc_sink_line_buf_depth);
 
-u8 drm_dp_dsc_sink_max_color_depth(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE])
+int drm_dp_dsc_sink_supported_input_bpcs(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE],
+					 u8 dsc_bpc[3])
 {
+	int num_bpc = 0;
 	u8 color_depth = dsc_dpcd[DP_DSC_DEC_COLOR_DEPTH_CAP - DP_DSC_SUPPORT];
 
 	if (color_depth & DP_DSC_12_BPC)
-		return 12;
+		dsc_bpc[num_bpc++] = 12;
 	if (color_depth & DP_DSC_10_BPC)
-		return 10;
+		dsc_bpc[num_bpc++] = 10;
 	if (color_depth & DP_DSC_8_BPC)
-		return 8;
+		dsc_bpc[num_bpc++] = 8;
 
-	return 0;
+	return num_bpc;
 }
-EXPORT_SYMBOL(drm_dp_dsc_sink_max_color_depth);
+EXPORT_SYMBOL(drm_dp_dsc_sink_supported_input_bpcs);
diff --git a/drivers/gpu/drm/drm_dsc.c b/drivers/gpu/drm/drm_dsc.c
new file mode 100644
index 0000000..bc2b23a
--- /dev/null
+++ b/drivers/gpu/drm/drm_dsc.c
@@ -0,0 +1,228 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2018 Intel Corp
+ *
+ * Author:
+ * Manasi Navare <manasi.d.navare@intel.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/byteorder/generic.h>
+#include <drm/drm_dp_helper.h>
+#include <drm/drm_dsc.h>
+
+/**
+ * DOC: dsc helpers
+ *
+ * These functions contain some common logic and helpers to deal with VESA
+ * Display Stream Compression standard required for DSC on Display Port/eDP or
+ * MIPI display interfaces.
+ */
+
+/**
+ * drm_dsc_dp_pps_header_init() - Initializes the PPS Header
+ * for DisplayPort as per the DP 1.4 spec.
+ * @pps_sdp: Secondary data packet for DSC Picture Parameter Set
+ */
+void drm_dsc_dp_pps_header_init(struct drm_dsc_pps_infoframe *pps_sdp)
+{
+	memset(&pps_sdp->pps_header, 0, sizeof(pps_sdp->pps_header));
+
+	pps_sdp->pps_header.HB1 = DP_SDP_PPS;
+	pps_sdp->pps_header.HB2 = DP_SDP_PPS_HEADER_PAYLOAD_BYTES_MINUS_1;
+}
+EXPORT_SYMBOL(drm_dsc_dp_pps_header_init);
+
+/**
+ * drm_dsc_pps_infoframe_pack() - Populates the DSC PPS infoframe
+ * using the DSC configuration parameters in the order expected
+ * by the DSC Display Sink device. For the DSC, the sink device
+ * expects the PPS payload in the big endian format for the fields
+ * that span more than 1 byte.
+ *
+ * @pps_sdp:
+ * Secondary data packet for DSC Picture Parameter Set
+ * @dsc_cfg:
+ * DSC Configuration data filled by driver
+ */
+void drm_dsc_pps_infoframe_pack(struct drm_dsc_pps_infoframe *pps_sdp,
+				const struct drm_dsc_config *dsc_cfg)
+{
+	int i;
+
+	/* Protect against someone accidently changing struct size */
+	BUILD_BUG_ON(sizeof(pps_sdp->pps_payload) !=
+		     DP_SDP_PPS_HEADER_PAYLOAD_BYTES_MINUS_1 + 1);
+
+	memset(&pps_sdp->pps_payload, 0, sizeof(pps_sdp->pps_payload));
+
+	/* PPS 0 */
+	pps_sdp->pps_payload.dsc_version =
+		dsc_cfg->dsc_version_minor |
+		dsc_cfg->dsc_version_major << DSC_PPS_VERSION_MAJOR_SHIFT;
+
+	/* PPS 1, 2 is 0 */
+
+	/* PPS 3 */
+	pps_sdp->pps_payload.pps_3 =
+		dsc_cfg->line_buf_depth |
+		dsc_cfg->bits_per_component << DSC_PPS_BPC_SHIFT;
+
+	/* PPS 4 */
+	pps_sdp->pps_payload.pps_4 =
+		((dsc_cfg->bits_per_pixel & DSC_PPS_BPP_HIGH_MASK) >>
+		 DSC_PPS_MSB_SHIFT) |
+		dsc_cfg->vbr_enable << DSC_PPS_VBR_EN_SHIFT |
+		dsc_cfg->enable422 << DSC_PPS_SIMPLE422_SHIFT |
+		dsc_cfg->convert_rgb << DSC_PPS_CONVERT_RGB_SHIFT |
+		dsc_cfg->block_pred_enable << DSC_PPS_BLOCK_PRED_EN_SHIFT;
+
+	/* PPS 5 */
+	pps_sdp->pps_payload.bits_per_pixel_low =
+		(dsc_cfg->bits_per_pixel & DSC_PPS_LSB_MASK);
+
+	/*
+	 * The DSC panel expects the PPS packet to have big endian format
+	 * for data spanning 2 bytes. Use a macro cpu_to_be16() to convert
+	 * to big endian format. If format is little endian, it will swap
+	 * bytes to convert to Big endian else keep it unchanged.
+	 */
+
+	/* PPS 6, 7 */
+	pps_sdp->pps_payload.pic_height = cpu_to_be16(dsc_cfg->pic_height);
+
+	/* PPS 8, 9 */
+	pps_sdp->pps_payload.pic_width = cpu_to_be16(dsc_cfg->pic_width);
+
+	/* PPS 10, 11 */
+	pps_sdp->pps_payload.slice_height = cpu_to_be16(dsc_cfg->slice_height);
+
+	/* PPS 12, 13 */
+	pps_sdp->pps_payload.slice_width = cpu_to_be16(dsc_cfg->slice_width);
+
+	/* PPS 14, 15 */
+	pps_sdp->pps_payload.chunk_size = cpu_to_be16(dsc_cfg->slice_chunk_size);
+
+	/* PPS 16 */
+	pps_sdp->pps_payload.initial_xmit_delay_high =
+		((dsc_cfg->initial_xmit_delay &
+		  DSC_PPS_INIT_XMIT_DELAY_HIGH_MASK) >>
+		 DSC_PPS_MSB_SHIFT);
+
+	/* PPS 17 */
+	pps_sdp->pps_payload.initial_xmit_delay_low =
+		(dsc_cfg->initial_xmit_delay & DSC_PPS_LSB_MASK);
+
+	/* PPS 18, 19 */
+	pps_sdp->pps_payload.initial_dec_delay =
+		cpu_to_be16(dsc_cfg->initial_dec_delay);
+
+	/* PPS 20 is 0 */
+
+	/* PPS 21 */
+	pps_sdp->pps_payload.initial_scale_value =
+		dsc_cfg->initial_scale_value;
+
+	/* PPS 22, 23 */
+	pps_sdp->pps_payload.scale_increment_interval =
+		cpu_to_be16(dsc_cfg->scale_increment_interval);
+
+	/* PPS 24 */
+	pps_sdp->pps_payload.scale_decrement_interval_high =
+		((dsc_cfg->scale_decrement_interval &
+		  DSC_PPS_SCALE_DEC_INT_HIGH_MASK) >>
+		 DSC_PPS_MSB_SHIFT);
+
+	/* PPS 25 */
+	pps_sdp->pps_payload.scale_decrement_interval_low =
+		(dsc_cfg->scale_decrement_interval & DSC_PPS_LSB_MASK);
+
+	/* PPS 26[7:0], PPS 27[7:5] RESERVED */
+
+	/* PPS 27 */
+	pps_sdp->pps_payload.first_line_bpg_offset =
+		dsc_cfg->first_line_bpg_offset;
+
+	/* PPS 28, 29 */
+	pps_sdp->pps_payload.nfl_bpg_offset =
+		cpu_to_be16(dsc_cfg->nfl_bpg_offset);
+
+	/* PPS 30, 31 */
+	pps_sdp->pps_payload.slice_bpg_offset =
+		cpu_to_be16(dsc_cfg->slice_bpg_offset);
+
+	/* PPS 32, 33 */
+	pps_sdp->pps_payload.initial_offset =
+		cpu_to_be16(dsc_cfg->initial_offset);
+
+	/* PPS 34, 35 */
+	pps_sdp->pps_payload.final_offset = cpu_to_be16(dsc_cfg->final_offset);
+
+	/* PPS 36 */
+	pps_sdp->pps_payload.flatness_min_qp = dsc_cfg->flatness_min_qp;
+
+	/* PPS 37 */
+	pps_sdp->pps_payload.flatness_max_qp = dsc_cfg->flatness_max_qp;
+
+	/* PPS 38, 39 */
+	pps_sdp->pps_payload.rc_model_size =
+		cpu_to_be16(DSC_RC_MODEL_SIZE_CONST);
+
+	/* PPS 40 */
+	pps_sdp->pps_payload.rc_edge_factor = DSC_RC_EDGE_FACTOR_CONST;
+
+	/* PPS 41 */
+	pps_sdp->pps_payload.rc_quant_incr_limit0 =
+		dsc_cfg->rc_quant_incr_limit0;
+
+	/* PPS 42 */
+	pps_sdp->pps_payload.rc_quant_incr_limit1 =
+		dsc_cfg->rc_quant_incr_limit1;
+
+	/* PPS 43 */
+	pps_sdp->pps_payload.rc_tgt_offset = DSC_RC_TGT_OFFSET_LO_CONST |
+		DSC_RC_TGT_OFFSET_HI_CONST << DSC_PPS_RC_TGT_OFFSET_HI_SHIFT;
+
+	/* PPS 44 - 57 */
+	for (i = 0; i < DSC_NUM_BUF_RANGES - 1; i++)
+		pps_sdp->pps_payload.rc_buf_thresh[i] =
+			dsc_cfg->rc_buf_thresh[i];
+
+	/* PPS 58 - 87 */
+	/*
+	 * For DSC sink programming the RC Range parameter fields
+	 * are as follows: Min_qp[15:11], max_qp[10:6], offset[5:0]
+	 */
+	for (i = 0; i < DSC_NUM_BUF_RANGES; i++) {
+		pps_sdp->pps_payload.rc_range_parameters[i] =
+			((dsc_cfg->rc_range_params[i].range_min_qp <<
+			  DSC_PPS_RC_RANGE_MINQP_SHIFT) |
+			 (dsc_cfg->rc_range_params[i].range_max_qp <<
+			  DSC_PPS_RC_RANGE_MAXQP_SHIFT) |
+			 (dsc_cfg->rc_range_params[i].range_bpg_offset));
+		pps_sdp->pps_payload.rc_range_parameters[i] =
+			cpu_to_be16(pps_sdp->pps_payload.rc_range_parameters[i]);
+	}
+
+	/* PPS 88 */
+	pps_sdp->pps_payload.native_422_420 = dsc_cfg->native_422 |
+		dsc_cfg->native_420 << DSC_PPS_NATIVE_420_SHIFT;
+
+	/* PPS 89 */
+	pps_sdp->pps_payload.second_line_bpg_offset =
+		dsc_cfg->second_line_bpg_offset;
+
+	/* PPS 90, 91 */
+	pps_sdp->pps_payload.nsl_bpg_offset =
+		cpu_to_be16(dsc_cfg->nsl_bpg_offset);
+
+	/* PPS 92, 93 */
+	pps_sdp->pps_payload.second_line_offset_adj =
+		cpu_to_be16(dsc_cfg->second_line_offset_adj);
+
+	/* PPS 94 - 127 are O */
+}
+EXPORT_SYMBOL(drm_dsc_pps_infoframe_pack);
diff --git a/drivers/gpu/drm/drm_mode_config.c b/drivers/gpu/drm/drm_mode_config.c
index 05cd5e9..703bfce 100644
--- a/drivers/gpu/drm/drm_mode_config.c
+++ b/drivers/gpu/drm/drm_mode_config.c
@@ -316,6 +316,12 @@ static int drm_mode_create_standard_properties(struct drm_device *dev)
 		return -ENOMEM;
 	dev->mode_config.prop_mode_id = prop;
 
+	prop = drm_property_create_bool(dev, 0,
+			"VRR_ENABLED");
+	if (!prop)
+		return -ENOMEM;
+	dev->mode_config.prop_vrr_enabled = prop;
+
 	prop = drm_property_create(dev,
 			DRM_MODE_PROP_BLOB,
 			"DEGAMMA_LUT", 0);
diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig
index 208bc27..3691a14 100644
--- a/drivers/gpu/drm/exynos/Kconfig
+++ b/drivers/gpu/drm/exynos/Kconfig
@@ -10,11 +10,6 @@
 
 if DRM_EXYNOS
 
-config DRM_EXYNOS_IOMMU
-	bool
-	depends on EXYNOS_IOMMU
-	default y
-
 comment "CRTCs"
 
 config DRM_EXYNOS_FIMD
diff --git a/drivers/gpu/drm/exynos/Makefile b/drivers/gpu/drm/exynos/Makefile
index 2ad146b..2fd2f3e 100644
--- a/drivers/gpu/drm/exynos/Makefile
+++ b/drivers/gpu/drm/exynos/Makefile
@@ -4,10 +4,9 @@
 # Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
 
 exynosdrm-y := exynos_drm_drv.o exynos_drm_crtc.o exynos_drm_fb.o \
-		exynos_drm_gem.o exynos_drm_plane.o
+		exynos_drm_gem.o exynos_drm_plane.o exynos_drm_dma.o
 
 exynosdrm-$(CONFIG_DRM_FBDEV_EMULATION) += exynos_drm_fbdev.o
-exynosdrm-$(CONFIG_DRM_EXYNOS_IOMMU) += exynos_drm_iommu.o
 exynosdrm-$(CONFIG_DRM_EXYNOS_FIMD)	+= exynos_drm_fimd.o
 exynosdrm-$(CONFIG_DRM_EXYNOS5433_DECON)	+= exynos5433_drm_decon.o
 exynosdrm-$(CONFIG_DRM_EXYNOS7_DECON)	+= exynos7_drm_decon.o
diff --git a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
index aef487d..5b4e0e8 100644
--- a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
@@ -25,7 +25,6 @@
 #include "exynos_drm_crtc.h"
 #include "exynos_drm_fb.h"
 #include "exynos_drm_plane.h"
-#include "exynos_drm_iommu.h"
 #include "regs-decon5433.h"
 
 #define DSD_CFG_MUX 0x1004
@@ -84,6 +83,14 @@ static const enum drm_plane_type decon_win_types[WINDOWS_NR] = {
 	[CURSON_WIN] = DRM_PLANE_TYPE_CURSOR,
 };
 
+static const unsigned int capabilities[WINDOWS_NR] = {
+	0,
+	EXYNOS_DRM_PLANE_CAP_WIN_BLEND | EXYNOS_DRM_PLANE_CAP_PIX_BLEND,
+	EXYNOS_DRM_PLANE_CAP_WIN_BLEND | EXYNOS_DRM_PLANE_CAP_PIX_BLEND,
+	EXYNOS_DRM_PLANE_CAP_WIN_BLEND | EXYNOS_DRM_PLANE_CAP_PIX_BLEND,
+	EXYNOS_DRM_PLANE_CAP_WIN_BLEND | EXYNOS_DRM_PLANE_CAP_PIX_BLEND,
+};
+
 static inline void decon_set_bits(struct decon_context *ctx, u32 reg, u32 mask,
 				  u32 val)
 {
@@ -252,11 +259,76 @@ static void decon_commit(struct exynos_drm_crtc *crtc)
 	decon_set_bits(ctx, DECON_UPDATE, STANDALONE_UPDATE_F, ~0);
 }
 
+static void decon_win_set_bldeq(struct decon_context *ctx, unsigned int win,
+				unsigned int alpha, unsigned int pixel_alpha)
+{
+	u32 mask = BLENDERQ_A_FUNC_F(0xf) | BLENDERQ_B_FUNC_F(0xf);
+	u32 val = 0;
+
+	switch (pixel_alpha) {
+	case DRM_MODE_BLEND_PIXEL_NONE:
+	case DRM_MODE_BLEND_COVERAGE:
+		val |= BLENDERQ_A_FUNC_F(BLENDERQ_ALPHA_A);
+		val |= BLENDERQ_B_FUNC_F(BLENDERQ_ONE_MINUS_ALPHA_A);
+		break;
+	case DRM_MODE_BLEND_PREMULTI:
+	default:
+		if (alpha != DRM_BLEND_ALPHA_OPAQUE) {
+			val |= BLENDERQ_A_FUNC_F(BLENDERQ_ALPHA0);
+			val |= BLENDERQ_B_FUNC_F(BLENDERQ_ONE_MINUS_ALPHA_A);
+		} else {
+			val |= BLENDERQ_A_FUNC_F(BLENDERQ_ONE);
+			val |= BLENDERQ_B_FUNC_F(BLENDERQ_ONE_MINUS_ALPHA_A);
+		}
+		break;
+	}
+	decon_set_bits(ctx, DECON_BLENDERQx(win), mask, val);
+}
+
+static void decon_win_set_bldmod(struct decon_context *ctx, unsigned int win,
+				 unsigned int alpha, unsigned int pixel_alpha)
+{
+	u32 win_alpha = alpha >> 8;
+	u32 val = 0;
+
+	switch (pixel_alpha) {
+	case DRM_MODE_BLEND_PIXEL_NONE:
+		break;
+	case DRM_MODE_BLEND_COVERAGE:
+	case DRM_MODE_BLEND_PREMULTI:
+	default:
+		val |= WINCONx_ALPHA_SEL_F;
+		val |= WINCONx_BLD_PIX_F;
+		val |= WINCONx_ALPHA_MUL_F;
+		break;
+	}
+	decon_set_bits(ctx, DECON_WINCONx(win), WINCONx_BLEND_MODE_MASK, val);
+
+	if (alpha != DRM_BLEND_ALPHA_OPAQUE) {
+		val = VIDOSD_Wx_ALPHA_R_F(win_alpha) |
+		      VIDOSD_Wx_ALPHA_G_F(win_alpha) |
+		      VIDOSD_Wx_ALPHA_B_F(win_alpha);
+		decon_set_bits(ctx, DECON_VIDOSDxC(win),
+			       VIDOSDxC_ALPHA0_RGB_MASK, val);
+		decon_set_bits(ctx, DECON_BLENDCON, BLEND_NEW, BLEND_NEW);
+	}
+}
+
 static void decon_win_set_pixfmt(struct decon_context *ctx, unsigned int win,
 				 struct drm_framebuffer *fb)
 {
+	struct exynos_drm_plane plane = ctx->planes[win];
+	struct exynos_drm_plane_state *state =
+		to_exynos_plane_state(plane.base.state);
+	unsigned int alpha = state->base.alpha;
+	unsigned int pixel_alpha;
 	unsigned long val;
 
+	if (fb->format->has_alpha)
+		pixel_alpha = state->base.pixel_blend_mode;
+	else
+		pixel_alpha = DRM_MODE_BLEND_PIXEL_NONE;
+
 	val = readl(ctx->addr + DECON_WINCONx(win));
 	val &= WINCONx_ENWIN_F;
 
@@ -279,7 +351,7 @@ static void decon_win_set_pixfmt(struct decon_context *ctx, unsigned int win,
 	case DRM_FORMAT_ARGB8888:
 	default:
 		val |= WINCONx_BPPMODE_32BPP_A8888;
-		val |= WINCONx_WSWP_F | WINCONx_BLD_PIX_F | WINCONx_ALPHA_SEL_F;
+		val |= WINCONx_WSWP_F;
 		val |= WINCONx_BURSTLEN_16WORD;
 		break;
 	}
@@ -298,8 +370,12 @@ static void decon_win_set_pixfmt(struct decon_context *ctx, unsigned int win,
 		val &= ~WINCONx_BURSTLEN_MASK;
 		val |= WINCONx_BURSTLEN_8WORD;
 	}
+	decon_set_bits(ctx, DECON_WINCONx(win), ~WINCONx_BLEND_MODE_MASK, val);
 
-	writel(val, ctx->addr + DECON_WINCONx(win));
+	if (win > 0) {
+		decon_win_set_bldmod(ctx, win, alpha, pixel_alpha);
+		decon_win_set_bldeq(ctx, win, alpha, pixel_alpha);
+	}
 }
 
 static void decon_shadow_protect(struct decon_context *ctx, bool protect)
@@ -552,6 +628,7 @@ static int decon_bind(struct device *dev, struct device *master, void *data)
 		ctx->configs[win].num_pixel_formats = ARRAY_SIZE(decon_formats);
 		ctx->configs[win].zpos = win - ctx->first_win;
 		ctx->configs[win].type = decon_win_types[win];
+		ctx->configs[win].capabilities = capabilities[win];
 
 		ret = exynos_plane_init(drm_dev, &ctx->planes[win], win,
 					&ctx->configs[win]);
@@ -569,7 +646,7 @@ static int decon_bind(struct device *dev, struct device *master, void *data)
 
 	decon_clear_channels(ctx->crtc);
 
-	return drm_iommu_attach_device(drm_dev, dev);
+	return exynos_drm_register_dma(drm_dev, dev);
 }
 
 static void decon_unbind(struct device *dev, struct device *master, void *data)
@@ -579,7 +656,7 @@ static void decon_unbind(struct device *dev, struct device *master, void *data)
 	decon_disable(ctx->crtc);
 
 	/* detach this sub driver from iommu mapping if supported. */
-	drm_iommu_detach_device(ctx->drm_dev, ctx->dev);
+	exynos_drm_unregister_dma(ctx->drm_dev, ctx->dev);
 }
 
 static const struct component_ops decon_component_ops = {
diff --git a/drivers/gpu/drm/exynos/exynos7_drm_decon.c b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
index 88cbd00..381aa3d 100644
--- a/drivers/gpu/drm/exynos/exynos7_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
@@ -30,7 +30,6 @@
 #include "exynos_drm_plane.h"
 #include "exynos_drm_drv.h"
 #include "exynos_drm_fb.h"
-#include "exynos_drm_iommu.h"
 #include "regs-decon7.h"
 
 /*
@@ -133,13 +132,13 @@ static int decon_ctx_initialize(struct decon_context *ctx,
 
 	decon_clear_channels(ctx->crtc);
 
-	return drm_iommu_attach_device(drm_dev, ctx->dev);
+	return exynos_drm_register_dma(drm_dev, ctx->dev);
 }
 
 static void decon_ctx_remove(struct decon_context *ctx)
 {
 	/* detach this sub driver from iommu mapping if supported. */
-	drm_iommu_detach_device(ctx->drm_dev, ctx->dev);
+	exynos_drm_unregister_dma(ctx->drm_dev, ctx->dev);
 }
 
 static u32 decon_calc_clkdiv(struct decon_context *ctx,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dma.c b/drivers/gpu/drm/exynos/exynos_drm_dma.c
new file mode 100644
index 0000000..3432c5e
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_dma.c
@@ -0,0 +1,157 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (c) 2012 Samsung Electronics Co., Ltd.
+// Author: Inki Dae <inki.dae@samsung.com>
+// Author: Andrzej Hajda <a.hajda@samsung.com>
+
+#include <drm/drmP.h>
+#include <drm/exynos_drm.h>
+#include <linux/dma-iommu.h>
+#include <linux/dma-mapping.h>
+#include <linux/iommu.h>
+
+#include "exynos_drm_drv.h"
+
+#if defined(CONFIG_ARM_DMA_USE_IOMMU)
+#include <asm/dma-iommu.h>
+#else
+#define arm_iommu_create_mapping(...)	({ NULL; })
+#define arm_iommu_attach_device(...)	({ -ENODEV; })
+#define arm_iommu_release_mapping(...)	({ })
+#define arm_iommu_detach_device(...)	({ })
+#define to_dma_iommu_mapping(dev) NULL
+#endif
+
+#if !defined(CONFIG_IOMMU_DMA)
+#define iommu_dma_init_domain(...) ({ -EINVAL; })
+#endif
+
+#define EXYNOS_DEV_ADDR_START	0x20000000
+#define EXYNOS_DEV_ADDR_SIZE	0x40000000
+
+static inline int configure_dma_max_seg_size(struct device *dev)
+{
+	if (!dev->dma_parms)
+		dev->dma_parms = kzalloc(sizeof(*dev->dma_parms), GFP_KERNEL);
+	if (!dev->dma_parms)
+		return -ENOMEM;
+
+	dma_set_max_seg_size(dev, DMA_BIT_MASK(32));
+	return 0;
+}
+
+static inline void clear_dma_max_seg_size(struct device *dev)
+{
+	kfree(dev->dma_parms);
+	dev->dma_parms = NULL;
+}
+
+/*
+ * drm_iommu_attach_device- attach device to iommu mapping
+ *
+ * @drm_dev: DRM device
+ * @subdrv_dev: device to be attach
+ *
+ * This function should be called by sub drivers to attach it to iommu
+ * mapping.
+ */
+static int drm_iommu_attach_device(struct drm_device *drm_dev,
+				struct device *subdrv_dev)
+{
+	struct exynos_drm_private *priv = drm_dev->dev_private;
+	int ret;
+
+	if (get_dma_ops(priv->dma_dev) != get_dma_ops(subdrv_dev)) {
+		DRM_ERROR("Device %s lacks support for IOMMU\n",
+			  dev_name(subdrv_dev));
+		return -EINVAL;
+	}
+
+	ret = configure_dma_max_seg_size(subdrv_dev);
+	if (ret)
+		return ret;
+
+	if (IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)) {
+		if (to_dma_iommu_mapping(subdrv_dev))
+			arm_iommu_detach_device(subdrv_dev);
+
+		ret = arm_iommu_attach_device(subdrv_dev, priv->mapping);
+	} else if (IS_ENABLED(CONFIG_IOMMU_DMA)) {
+		ret = iommu_attach_device(priv->mapping, subdrv_dev);
+	}
+
+	if (ret)
+		clear_dma_max_seg_size(subdrv_dev);
+
+	return 0;
+}
+
+/*
+ * drm_iommu_detach_device -detach device address space mapping from device
+ *
+ * @drm_dev: DRM device
+ * @subdrv_dev: device to be detached
+ *
+ * This function should be called by sub drivers to detach it from iommu
+ * mapping
+ */
+static void drm_iommu_detach_device(struct drm_device *drm_dev,
+				struct device *subdrv_dev)
+{
+	struct exynos_drm_private *priv = drm_dev->dev_private;
+
+	if (IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU))
+		arm_iommu_detach_device(subdrv_dev);
+	else if (IS_ENABLED(CONFIG_IOMMU_DMA))
+		iommu_detach_device(priv->mapping, subdrv_dev);
+
+	clear_dma_max_seg_size(subdrv_dev);
+}
+
+int exynos_drm_register_dma(struct drm_device *drm, struct device *dev)
+{
+	struct exynos_drm_private *priv = drm->dev_private;
+
+	if (!priv->dma_dev) {
+		priv->dma_dev = dev;
+		DRM_INFO("Exynos DRM: using %s device for DMA mapping operations\n",
+			 dev_name(dev));
+	}
+
+	if (!IS_ENABLED(CONFIG_EXYNOS_IOMMU))
+		return 0;
+
+	if (!priv->mapping) {
+		void *mapping;
+
+		if (IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU))
+			mapping = arm_iommu_create_mapping(&platform_bus_type,
+				EXYNOS_DEV_ADDR_START, EXYNOS_DEV_ADDR_SIZE);
+		else if (IS_ENABLED(CONFIG_IOMMU_DMA))
+			mapping = iommu_get_domain_for_dev(priv->dma_dev);
+
+		if (IS_ERR(mapping))
+			return PTR_ERR(mapping);
+		priv->mapping = mapping;
+	}
+
+	return drm_iommu_attach_device(drm, dev);
+}
+
+void exynos_drm_unregister_dma(struct drm_device *drm, struct device *dev)
+{
+	if (IS_ENABLED(CONFIG_EXYNOS_IOMMU))
+		drm_iommu_detach_device(drm, dev);
+}
+
+void exynos_drm_cleanup_dma(struct drm_device *drm)
+{
+	struct exynos_drm_private *priv = drm->dev_private;
+
+	if (!IS_ENABLED(CONFIG_EXYNOS_IOMMU))
+		return;
+
+	arm_iommu_release_mapping(priv->mapping);
+	priv->mapping = NULL;
+	priv->dma_dev = NULL;
+}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index 6f76baf..2c75e78 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -30,7 +30,6 @@
 #include "exynos_drm_ipp.h"
 #include "exynos_drm_vidi.h"
 #include "exynos_drm_g2d.h"
-#include "exynos_drm_iommu.h"
 
 #define DRIVER_NAME	"exynos"
 #define DRIVER_DESC	"Samsung SoC DRM"
@@ -175,8 +174,7 @@ struct exynos_drm_driver_info {
 
 #define DRM_COMPONENT_DRIVER	BIT(0)	/* supports component framework */
 #define DRM_VIRTUAL_DEVICE	BIT(1)	/* create virtual platform device */
-#define DRM_DMA_DEVICE		BIT(2)	/* can be used for dma allocations */
-#define DRM_FIMC_DEVICE		BIT(3)	/* devices shared with V4L2 subsystem */
+#define DRM_FIMC_DEVICE		BIT(2)	/* devices shared with V4L2 subsystem */
 
 #define DRV_PTR(drv, cond) (IS_ENABLED(cond) ? &drv : NULL)
 
@@ -187,16 +185,16 @@ struct exynos_drm_driver_info {
 static struct exynos_drm_driver_info exynos_drm_drivers[] = {
 	{
 		DRV_PTR(fimd_driver, CONFIG_DRM_EXYNOS_FIMD),
-		DRM_COMPONENT_DRIVER | DRM_DMA_DEVICE
+		DRM_COMPONENT_DRIVER
 	}, {
 		DRV_PTR(exynos5433_decon_driver, CONFIG_DRM_EXYNOS5433_DECON),
-		DRM_COMPONENT_DRIVER | DRM_DMA_DEVICE
+		DRM_COMPONENT_DRIVER
 	}, {
 		DRV_PTR(decon_driver, CONFIG_DRM_EXYNOS7_DECON),
-		DRM_COMPONENT_DRIVER | DRM_DMA_DEVICE
+		DRM_COMPONENT_DRIVER
 	}, {
 		DRV_PTR(mixer_driver, CONFIG_DRM_EXYNOS_MIXER),
-		DRM_COMPONENT_DRIVER | DRM_DMA_DEVICE
+		DRM_COMPONENT_DRIVER
 	}, {
 		DRV_PTR(mic_driver, CONFIG_DRM_EXYNOS_MIC),
 		DRM_COMPONENT_DRIVER
@@ -267,27 +265,6 @@ static struct component_match *exynos_drm_match_add(struct device *dev)
 	return match ?: ERR_PTR(-ENODEV);
 }
 
-static struct device *exynos_drm_get_dma_device(void)
-{
-	int i;
-
-	for (i = 0; i < ARRAY_SIZE(exynos_drm_drivers); ++i) {
-		struct exynos_drm_driver_info *info = &exynos_drm_drivers[i];
-		struct device *dev;
-
-		if (!info->driver || !(info->flags & DRM_DMA_DEVICE))
-			continue;
-
-		while ((dev = bus_find_device(&platform_bus_type, NULL,
-					    &info->driver->driver,
-					    (void *)platform_bus_type.match))) {
-			put_device(dev);
-			return dev;
-		}
-	}
-	return NULL;
-}
-
 static int exynos_drm_bind(struct device *dev)
 {
 	struct exynos_drm_private *private;
@@ -312,23 +289,6 @@ static int exynos_drm_bind(struct device *dev)
 	dev_set_drvdata(dev, drm);
 	drm->dev_private = (void *)private;
 
-	/* the first real CRTC device is used for all dma mapping operations */
-	private->dma_dev = exynos_drm_get_dma_device();
-	if (!private->dma_dev) {
-		DRM_ERROR("no device found for DMA mapping operations.\n");
-		ret = -ENODEV;
-		goto err_free_private;
-	}
-	DRM_INFO("Exynos DRM: using %s device for DMA mapping operations\n",
-		 dev_name(private->dma_dev));
-
-	/* create common IOMMU mapping for all devices attached to Exynos DRM */
-	ret = drm_create_iommu_mapping(drm);
-	if (ret < 0) {
-		DRM_ERROR("failed to create iommu mapping.\n");
-		goto err_free_private;
-	}
-
 	drm_mode_config_init(drm);
 
 	exynos_drm_mode_config_init(drm);
@@ -385,8 +345,7 @@ static int exynos_drm_bind(struct device *dev)
 	component_unbind_all(drm->dev, drm);
 err_mode_config_cleanup:
 	drm_mode_config_cleanup(drm);
-	drm_release_iommu_mapping(drm);
-err_free_private:
+	exynos_drm_cleanup_dma(drm);
 	kfree(private);
 err_free_drm:
 	drm_dev_put(drm);
@@ -405,7 +364,7 @@ static void exynos_drm_unbind(struct device *dev)
 
 	component_unbind_all(drm->dev, drm);
 	drm_mode_config_cleanup(drm);
-	drm_release_iommu_mapping(drm);
+	exynos_drm_cleanup_dma(drm);
 
 	kfree(drm->dev_private);
 	drm->dev_private = NULL;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h
index 5e61e70..71eb240 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h
@@ -214,6 +214,17 @@ static inline struct device *to_dma_dev(struct drm_device *dev)
 	return priv->dma_dev;
 }
 
+static inline bool is_drm_iommu_supported(struct drm_device *drm_dev)
+{
+	struct exynos_drm_private *priv = drm_dev->dev_private;
+
+	return priv->mapping ? true : false;
+}
+
+int exynos_drm_register_dma(struct drm_device *drm, struct device *dev);
+void exynos_drm_unregister_dma(struct drm_device *drm, struct device *dev);
+void exynos_drm_cleanup_dma(struct drm_device *drm);
+
 #ifdef CONFIG_DRM_EXYNOS_DPI
 struct drm_encoder *exynos_dpi_probe(struct device *dev);
 int exynos_dpi_remove(struct drm_encoder *encoder);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c
index 9f52382..31eb538 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fb.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c
@@ -24,7 +24,6 @@
 #include "exynos_drm_drv.h"
 #include "exynos_drm_fb.h"
 #include "exynos_drm_fbdev.h"
-#include "exynos_drm_iommu.h"
 #include "exynos_drm_crtc.h"
 
 static int check_fb_gem_memory_type(struct drm_device *drm_dev,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
index 01d1822..ce9604c 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
@@ -23,7 +23,6 @@
 #include "exynos_drm_drv.h"
 #include "exynos_drm_fb.h"
 #include "exynos_drm_fbdev.h"
-#include "exynos_drm_iommu.h"
 
 #define MAX_CONNECTOR		4
 #define PREFERRED_BPP		32
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
index e8d0670..90dfea0 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
@@ -25,7 +25,6 @@
 #include <drm/exynos_drm.h>
 #include "regs-fimc.h"
 #include "exynos_drm_drv.h"
-#include "exynos_drm_iommu.h"
 #include "exynos_drm_ipp.h"
 
 /*
@@ -1129,7 +1128,7 @@ static int fimc_bind(struct device *dev, struct device *master, void *data)
 	struct exynos_drm_ipp *ipp = &ctx->ipp;
 
 	ctx->drm_dev = drm_dev;
-	drm_iommu_attach_device(drm_dev, dev);
+	exynos_drm_register_dma(drm_dev, dev);
 
 	exynos_drm_ipp_register(drm_dev, ipp, &ipp_funcs,
 			DRM_EXYNOS_IPP_CAP_CROP | DRM_EXYNOS_IPP_CAP_ROTATE |
@@ -1149,7 +1148,7 @@ static void fimc_unbind(struct device *dev, struct device *master,
 	struct exynos_drm_ipp *ipp = &ctx->ipp;
 
 	exynos_drm_ipp_unregister(drm_dev, ipp);
-	drm_iommu_detach_device(drm_dev, dev);
+	exynos_drm_unregister_dma(drm_dev, dev);
 }
 
 static const struct component_ops fimc_component_ops = {
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index b7f5693..e3d6a85 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -32,7 +32,6 @@
 #include "exynos_drm_fb.h"
 #include "exynos_drm_crtc.h"
 #include "exynos_drm_plane.h"
-#include "exynos_drm_iommu.h"
 
 /*
  * FIMD stands for Fully Interactive Mobile Display and
@@ -1011,7 +1010,7 @@ static int fimd_bind(struct device *dev, struct device *master, void *data)
 	if (is_drm_iommu_supported(drm_dev))
 		fimd_clear_channels(ctx->crtc);
 
-	return drm_iommu_attach_device(drm_dev, dev);
+	return exynos_drm_register_dma(drm_dev, dev);
 }
 
 static void fimd_unbind(struct device *dev, struct device *master,
@@ -1021,7 +1020,7 @@ static void fimd_unbind(struct device *dev, struct device *master,
 
 	fimd_disable(ctx->crtc);
 
-	drm_iommu_detach_device(ctx->drm_dev, ctx->dev);
+	exynos_drm_unregister_dma(ctx->drm_dev, ctx->dev);
 
 	if (ctx->encoder)
 		exynos_dpi_remove(ctx->encoder);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
index f2481a2..24c536d 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
@@ -25,7 +25,6 @@
 #include "exynos_drm_drv.h"
 #include "exynos_drm_g2d.h"
 #include "exynos_drm_gem.h"
-#include "exynos_drm_iommu.h"
 
 #define G2D_HW_MAJOR_VER		4
 #define G2D_HW_MINOR_VER		1
@@ -1405,7 +1404,7 @@ static int g2d_bind(struct device *dev, struct device *master, void *data)
 		return ret;
 	}
 
-	ret = drm_iommu_attach_device(drm_dev, dev);
+	ret = exynos_drm_register_dma(drm_dev, dev);
 	if (ret < 0) {
 		dev_err(dev, "failed to enable iommu.\n");
 		g2d_fini_cmdlist(g2d);
@@ -1430,7 +1429,7 @@ static void g2d_unbind(struct device *dev, struct device *master, void *data)
 	priv->g2d_dev = NULL;
 
 	cancel_work_sync(&g2d->runqueue_work);
-	drm_iommu_detach_device(g2d->drm_dev, dev);
+	exynos_drm_unregister_dma(g2d->drm_dev, dev);
 }
 
 static const struct component_ops g2d_component_ops = {
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index 34ace85..df66c38 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -19,7 +19,6 @@
 
 #include "exynos_drm_drv.h"
 #include "exynos_drm_gem.h"
-#include "exynos_drm_iommu.h"
 
 static int exynos_drm_alloc_buf(struct exynos_drm_gem *exynos_gem)
 {
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
index ce15d46..f048d97 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gsc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
@@ -24,7 +24,6 @@
 #include <drm/exynos_drm.h>
 #include "regs-gsc.h"
 #include "exynos_drm_drv.h"
-#include "exynos_drm_iommu.h"
 #include "exynos_drm_ipp.h"
 
 /*
@@ -1170,7 +1169,7 @@ static int gsc_bind(struct device *dev, struct device *master, void *data)
 	struct exynos_drm_ipp *ipp = &ctx->ipp;
 
 	ctx->drm_dev = drm_dev;
-	drm_iommu_attach_device(drm_dev, dev);
+	exynos_drm_register_dma(drm_dev, dev);
 
 	exynos_drm_ipp_register(drm_dev, ipp, &ipp_funcs,
 			DRM_EXYNOS_IPP_CAP_CROP | DRM_EXYNOS_IPP_CAP_ROTATE |
@@ -1190,7 +1189,7 @@ static void gsc_unbind(struct device *dev, struct device *master,
 	struct exynos_drm_ipp *ipp = &ctx->ipp;
 
 	exynos_drm_ipp_unregister(drm_dev, ipp);
-	drm_iommu_detach_device(drm_dev, dev);
+	exynos_drm_unregister_dma(drm_dev, dev);
 }
 
 static const struct component_ops gsc_component_ops = {
diff --git a/drivers/gpu/drm/exynos/exynos_drm_iommu.c b/drivers/gpu/drm/exynos/exynos_drm_iommu.c
deleted file mode 100644
index 0f37370..0000000
--- a/drivers/gpu/drm/exynos/exynos_drm_iommu.c
+++ /dev/null
@@ -1,111 +0,0 @@
-/* exynos_drm_iommu.c
- *
- * Copyright (c) 2012 Samsung Electronics Co., Ltd.
- * Author: Inki Dae <inki.dae@samsung.com>
- *
- * This program is free software; you can redistribute  it and/or modify it
- * under  the terms of  the GNU General  Public License as published by the
- * Free Software Foundation;  either version 2 of the  License, or (at your
- * option) any later version.
- */
-
-#include <drm/drmP.h>
-#include <drm/exynos_drm.h>
-
-#include <linux/dma-mapping.h>
-#include <linux/iommu.h>
-
-#include "exynos_drm_drv.h"
-#include "exynos_drm_iommu.h"
-
-static inline int configure_dma_max_seg_size(struct device *dev)
-{
-	if (!dev->dma_parms)
-		dev->dma_parms = kzalloc(sizeof(*dev->dma_parms), GFP_KERNEL);
-	if (!dev->dma_parms)
-		return -ENOMEM;
-
-	dma_set_max_seg_size(dev, DMA_BIT_MASK(32));
-	return 0;
-}
-
-static inline void clear_dma_max_seg_size(struct device *dev)
-{
-	kfree(dev->dma_parms);
-	dev->dma_parms = NULL;
-}
-
-/*
- * drm_create_iommu_mapping - create a mapping structure
- *
- * @drm_dev: DRM device
- */
-int drm_create_iommu_mapping(struct drm_device *drm_dev)
-{
-	struct exynos_drm_private *priv = drm_dev->dev_private;
-
-	return __exynos_iommu_create_mapping(priv, EXYNOS_DEV_ADDR_START,
-					     EXYNOS_DEV_ADDR_SIZE);
-}
-
-/*
- * drm_release_iommu_mapping - release iommu mapping structure
- *
- * @drm_dev: DRM device
- */
-void drm_release_iommu_mapping(struct drm_device *drm_dev)
-{
-	struct exynos_drm_private *priv = drm_dev->dev_private;
-
-	__exynos_iommu_release_mapping(priv);
-}
-
-/*
- * drm_iommu_attach_device- attach device to iommu mapping
- *
- * @drm_dev: DRM device
- * @subdrv_dev: device to be attach
- *
- * This function should be called by sub drivers to attach it to iommu
- * mapping.
- */
-int drm_iommu_attach_device(struct drm_device *drm_dev,
-				struct device *subdrv_dev)
-{
-	struct exynos_drm_private *priv = drm_dev->dev_private;
-	int ret;
-
-	if (get_dma_ops(priv->dma_dev) != get_dma_ops(subdrv_dev)) {
-		DRM_ERROR("Device %s lacks support for IOMMU\n",
-			  dev_name(subdrv_dev));
-		return -EINVAL;
-	}
-
-	ret = configure_dma_max_seg_size(subdrv_dev);
-	if (ret)
-		return ret;
-
-	ret = __exynos_iommu_attach(priv, subdrv_dev);
-	if (ret)
-		clear_dma_max_seg_size(subdrv_dev);
-
-	return 0;
-}
-
-/*
- * drm_iommu_detach_device -detach device address space mapping from device
- *
- * @drm_dev: DRM device
- * @subdrv_dev: device to be detached
- *
- * This function should be called by sub drivers to detach it from iommu
- * mapping
- */
-void drm_iommu_detach_device(struct drm_device *drm_dev,
-				struct device *subdrv_dev)
-{
-	struct exynos_drm_private *priv = drm_dev->dev_private;
-
-	__exynos_iommu_detach(priv, subdrv_dev);
-	clear_dma_max_seg_size(subdrv_dev);
-}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_iommu.h b/drivers/gpu/drm/exynos/exynos_drm_iommu.h
deleted file mode 100644
index 797d9ee..0000000
--- a/drivers/gpu/drm/exynos/exynos_drm_iommu.h
+++ /dev/null
@@ -1,134 +0,0 @@
-/* exynos_drm_iommu.h
- *
- * Copyright (c) 2012 Samsung Electronics Co., Ltd.
- * Authoer: Inki Dae <inki.dae@samsung.com>
- *
- * This program is free software; you can redistribute  it and/or modify it
- * under  the terms of  the GNU General  Public License as published by the
- * Free Software Foundation;  either version 2 of the  License, or (at your
- * option) any later version.
- */
-
-#ifndef _EXYNOS_DRM_IOMMU_H_
-#define _EXYNOS_DRM_IOMMU_H_
-
-#define EXYNOS_DEV_ADDR_START	0x20000000
-#define EXYNOS_DEV_ADDR_SIZE	0x40000000
-
-#ifdef CONFIG_DRM_EXYNOS_IOMMU
-
-#if defined(CONFIG_ARM_DMA_USE_IOMMU)
-#include <asm/dma-iommu.h>
-
-static inline int __exynos_iommu_create_mapping(struct exynos_drm_private *priv,
-					unsigned long start, unsigned long size)
-{
-	priv->mapping = arm_iommu_create_mapping(&platform_bus_type, start,
-						 size);
-	return IS_ERR(priv->mapping);
-}
-
-static inline void
-__exynos_iommu_release_mapping(struct exynos_drm_private *priv)
-{
-	arm_iommu_release_mapping(priv->mapping);
-}
-
-static inline int __exynos_iommu_attach(struct exynos_drm_private *priv,
-					struct device *dev)
-{
-	if (dev->archdata.mapping)
-		arm_iommu_detach_device(dev);
-
-	return arm_iommu_attach_device(dev, priv->mapping);
-}
-
-static inline void __exynos_iommu_detach(struct exynos_drm_private *priv,
-					 struct device *dev)
-{
-	arm_iommu_detach_device(dev);
-}
-
-#elif defined(CONFIG_IOMMU_DMA)
-#include <linux/dma-iommu.h>
-
-static inline int __exynos_iommu_create_mapping(struct exynos_drm_private *priv,
-					unsigned long start, unsigned long size)
-{
-	priv->mapping = iommu_get_domain_for_dev(priv->dma_dev);
-	return 0;
-}
-
-static inline void __exynos_iommu_release_mapping(struct exynos_drm_private *priv)
-{
-	priv->mapping = NULL;
-}
-
-static inline int __exynos_iommu_attach(struct exynos_drm_private *priv,
-					struct device *dev)
-{
-	struct iommu_domain *domain = priv->mapping;
-
-	if (dev != priv->dma_dev)
-		return iommu_attach_device(domain, dev);
-	return 0;
-}
-
-static inline void __exynos_iommu_detach(struct exynos_drm_private *priv,
-					 struct device *dev)
-{
-	struct iommu_domain *domain = priv->mapping;
-
-	if (dev != priv->dma_dev)
-		iommu_detach_device(domain, dev);
-}
-#else
-#error Unsupported architecture and IOMMU/DMA-mapping glue code
-#endif
-
-int drm_create_iommu_mapping(struct drm_device *drm_dev);
-
-void drm_release_iommu_mapping(struct drm_device *drm_dev);
-
-int drm_iommu_attach_device(struct drm_device *drm_dev,
-				struct device *subdrv_dev);
-
-void drm_iommu_detach_device(struct drm_device *dev_dev,
-				struct device *subdrv_dev);
-
-static inline bool is_drm_iommu_supported(struct drm_device *drm_dev)
-{
-	struct exynos_drm_private *priv = drm_dev->dev_private;
-
-	return priv->mapping ? true : false;
-}
-
-#else
-
-static inline int drm_create_iommu_mapping(struct drm_device *drm_dev)
-{
-	return 0;
-}
-
-static inline void drm_release_iommu_mapping(struct drm_device *drm_dev)
-{
-}
-
-static inline int drm_iommu_attach_device(struct drm_device *drm_dev,
-						struct device *subdrv_dev)
-{
-	return 0;
-}
-
-static inline void drm_iommu_detach_device(struct drm_device *drm_dev,
-						struct device *subdrv_dev)
-{
-}
-
-static inline bool is_drm_iommu_supported(struct drm_device *drm_dev)
-{
-	return false;
-}
-
-#endif
-#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.c b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
index a820a68..8d67b2a 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_rotator.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
@@ -23,7 +23,6 @@
 #include <drm/exynos_drm.h>
 #include "regs-rotator.h"
 #include "exynos_drm_drv.h"
-#include "exynos_drm_iommu.h"
 #include "exynos_drm_ipp.h"
 
 /*
@@ -244,7 +243,7 @@ static int rotator_bind(struct device *dev, struct device *master, void *data)
 	struct exynos_drm_ipp *ipp = &rot->ipp;
 
 	rot->drm_dev = drm_dev;
-	drm_iommu_attach_device(drm_dev, dev);
+	exynos_drm_register_dma(drm_dev, dev);
 
 	exynos_drm_ipp_register(drm_dev, ipp, &ipp_funcs,
 			   DRM_EXYNOS_IPP_CAP_CROP | DRM_EXYNOS_IPP_CAP_ROTATE,
@@ -263,7 +262,7 @@ static void rotator_unbind(struct device *dev, struct device *master,
 	struct exynos_drm_ipp *ipp = &rot->ipp;
 
 	exynos_drm_ipp_unregister(drm_dev, ipp);
-	drm_iommu_detach_device(rot->drm_dev, rot->dev);
+	exynos_drm_unregister_dma(rot->drm_dev, rot->dev);
 }
 
 static const struct component_ops rotator_component_ops = {
diff --git a/drivers/gpu/drm/exynos/exynos_drm_scaler.c b/drivers/gpu/drm/exynos/exynos_drm_scaler.c
index cd66774..71270ef 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_scaler.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_scaler.c
@@ -23,7 +23,6 @@
 #include "regs-scaler.h"
 #include "exynos_drm_fb.h"
 #include "exynos_drm_drv.h"
-#include "exynos_drm_iommu.h"
 #include "exynos_drm_ipp.h"
 
 #define scaler_read(offset)		readl(scaler->regs + (offset))
@@ -452,7 +451,7 @@ static int scaler_bind(struct device *dev, struct device *master, void *data)
 	struct exynos_drm_ipp *ipp = &scaler->ipp;
 
 	scaler->drm_dev = drm_dev;
-	drm_iommu_attach_device(drm_dev, dev);
+	exynos_drm_register_dma(drm_dev, dev);
 
 	exynos_drm_ipp_register(drm_dev, ipp, &ipp_funcs,
 			DRM_EXYNOS_IPP_CAP_CROP | DRM_EXYNOS_IPP_CAP_ROTATE |
@@ -473,7 +472,7 @@ static void scaler_unbind(struct device *dev, struct device *master,
 	struct exynos_drm_ipp *ipp = &scaler->ipp;
 
 	exynos_drm_ipp_unregister(drm_dev, ipp);
-	drm_iommu_detach_device(scaler->drm_dev, scaler->dev);
+	exynos_drm_unregister_dma(scaler->drm_dev, scaler->dev);
 }
 
 static const struct component_ops scaler_component_ops = {
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index e3a4ecb..0573eab 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -40,7 +40,6 @@
 #include "exynos_drm_crtc.h"
 #include "exynos_drm_fb.h"
 #include "exynos_drm_plane.h"
-#include "exynos_drm_iommu.h"
 
 #define MIXER_WIN_NR		3
 #define VP_DEFAULT_WIN		2
@@ -381,19 +380,16 @@ static void mixer_cfg_scan(struct mixer_context *ctx, int width, int height)
 	mixer_reg_writemask(ctx, MXR_CFG, val, MXR_CFG_SCAN_MASK);
 }
 
-static void mixer_cfg_rgb_fmt(struct mixer_context *ctx, unsigned int height)
+static void mixer_cfg_rgb_fmt(struct mixer_context *ctx, struct drm_display_mode *mode)
 {
+	enum hdmi_quantization_range range = drm_default_rgb_quant_range(mode);
 	u32 val;
 
-	switch (height) {
-	case 480:
-	case 576:
-		val = MXR_CFG_RGB601_0_255;
-		break;
-	case 720:
-	case 1080:
-	default:
-		val = MXR_CFG_RGB709_16_235;
+	if (mode->vdisplay < 720) {
+		val = MXR_CFG_RGB601;
+	} else {
+		val = MXR_CFG_RGB709;
+
 		/* Configure the BT.709 CSC matrix for full range RGB. */
 		mixer_reg_write(ctx, MXR_CM_COEFF_Y,
 			MXR_CSC_CT( 0.184,  0.614,  0.063) |
@@ -402,9 +398,13 @@ static void mixer_cfg_rgb_fmt(struct mixer_context *ctx, unsigned int height)
 			MXR_CSC_CT(-0.102, -0.338,  0.440));
 		mixer_reg_write(ctx, MXR_CM_COEFF_CR,
 			MXR_CSC_CT( 0.440, -0.399, -0.040));
-		break;
 	}
 
+	if (range == HDMI_QUANTIZATION_RANGE_FULL)
+		val |= MXR_CFG_QUANT_RANGE_FULL;
+	else
+		val |= MXR_CFG_QUANT_RANGE_LIMITED;
+
 	mixer_reg_writemask(ctx, MXR_CFG, val, MXR_CFG_RGB_FMT_MASK);
 }
 
@@ -461,7 +461,7 @@ static void mixer_commit(struct mixer_context *ctx)
 	struct drm_display_mode *mode = &ctx->crtc->base.state->adjusted_mode;
 
 	mixer_cfg_scan(ctx, mode->hdisplay, mode->vdisplay);
-	mixer_cfg_rgb_fmt(ctx, mode->vdisplay);
+	mixer_cfg_rgb_fmt(ctx, mode);
 	mixer_run(ctx);
 }
 
@@ -878,12 +878,12 @@ static int mixer_initialize(struct mixer_context *mixer_ctx,
 		}
 	}
 
-	return drm_iommu_attach_device(drm_dev, mixer_ctx->dev);
+	return exynos_drm_register_dma(drm_dev, mixer_ctx->dev);
 }
 
 static void mixer_ctx_remove(struct mixer_context *mixer_ctx)
 {
-	drm_iommu_detach_device(mixer_ctx->drm_dev, mixer_ctx->dev);
+	exynos_drm_unregister_dma(mixer_ctx->drm_dev, mixer_ctx->dev);
 }
 
 static int mixer_enable_vblank(struct exynos_drm_crtc *crtc)
diff --git a/drivers/gpu/drm/exynos/regs-decon5433.h b/drivers/gpu/drm/exynos/regs-decon5433.h
index 19ad9e4..63db697 100644
--- a/drivers/gpu/drm/exynos/regs-decon5433.h
+++ b/drivers/gpu/drm/exynos/regs-decon5433.h
@@ -104,6 +104,7 @@
 #define WINCONx_BURSTLEN_16WORD		(0x0 << 10)
 #define WINCONx_BURSTLEN_8WORD		(0x1 << 10)
 #define WINCONx_BURSTLEN_4WORD		(0x2 << 10)
+#define WINCONx_ALPHA_MUL_F		(1 << 7)
 #define WINCONx_BLD_PIX_F		(1 << 6)
 #define WINCONx_BPPMODE_MASK		(0xf << 2)
 #define WINCONx_BPPMODE_16BPP_565	(0x5 << 2)
@@ -116,11 +117,15 @@
 #define WINCONx_BPPMODE_16BPP_A4444	(0xe << 2)
 #define WINCONx_ALPHA_SEL_F		(1 << 1)
 #define WINCONx_ENWIN_F			(1 << 0)
+#define WINCONx_BLEND_MODE_MASK		(0xc2)
 
 /* SHADOWCON */
 #define SHADOWCON_PROTECT_MASK		GENMASK(14, 10)
 #define SHADOWCON_Wx_PROTECT(n)		(1 << (10 + (n)))
 
+/* VIDOSDxC */
+#define VIDOSDxC_ALPHA0_RGB_MASK	(0xffffff)
+
 /* VIDOSDxD */
 #define VIDOSD_Wx_ALPHA_R_F(n)		(((n) & 0xff) << 16)
 #define VIDOSD_Wx_ALPHA_G_F(n)		(((n) & 0xff) << 8)
@@ -206,4 +211,21 @@
 #define CRCCTRL_CRCEN			(0x1 << 0)
 #define CRCCTRL_MASK			(0x7)
 
+/* BLENDCON */
+#define BLEND_NEW			(1 << 0)
+
+/* BLENDERQx */
+#define BLENDERQ_ZERO			0x0
+#define BLENDERQ_ONE			0x1
+#define BLENDERQ_ALPHA_A		0x2
+#define BLENDERQ_ONE_MINUS_ALPHA_A	0x3
+#define BLENDERQ_ALPHA0			0x6
+#define BLENDERQ_Q_FUNC_F(n)		(n << 18)
+#define BLENDERQ_P_FUNC_F(n)		(n << 12)
+#define BLENDERQ_B_FUNC_F(n)		(n << 6)
+#define BLENDERQ_A_FUNC_F(n)		(n << 0)
+
+/* BLENDCON */
+#define BLEND_NEW			(1 << 0)
+
 #endif /* EXYNOS_REGS_DECON5433_H */
diff --git a/drivers/gpu/drm/exynos/regs-mixer.h b/drivers/gpu/drm/exynos/regs-mixer.h
index d2b8194..5ff095b 100644
--- a/drivers/gpu/drm/exynos/regs-mixer.h
+++ b/drivers/gpu/drm/exynos/regs-mixer.h
@@ -85,10 +85,11 @@
 /* bits for MXR_CFG */
 #define MXR_CFG_LAYER_UPDATE		(1 << 31)
 #define MXR_CFG_LAYER_UPDATE_COUNT_MASK (3 << 29)
-#define MXR_CFG_RGB601_0_255		(0 << 9)
-#define MXR_CFG_RGB601_16_235		(1 << 9)
-#define MXR_CFG_RGB709_0_255		(2 << 9)
-#define MXR_CFG_RGB709_16_235		(3 << 9)
+#define MXR_CFG_QUANT_RANGE_FULL	(0 << 9)
+#define MXR_CFG_QUANT_RANGE_LIMITED	(1 << 9)
+#define MXR_CFG_RGB601			(0 << 10)
+#define MXR_CFG_RGB709			(1 << 10)
+
 #define MXR_CFG_RGB_FMT_MASK		0x600
 #define MXR_CFG_OUT_YUV444		(0 << 8)
 #define MXR_CFG_OUT_RGB888		(1 << 8)
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 0ff878c..19b5fe5 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -157,14 +157,17 @@
 	  intel_sdvo.o \
 	  intel_tv.o \
 	  vlv_dsi.o \
-	  vlv_dsi_pll.o
+	  vlv_dsi_pll.o \
+	  intel_vdsc.o
 
 # Post-mortem debug and GPU hang state capture
 i915-$(CONFIG_DRM_I915_CAPTURE_ERROR) += i915_gpu_error.o
 i915-$(CONFIG_DRM_I915_SELFTEST) += \
 	selftests/i915_random.o \
 	selftests/i915_selftest.o \
-	selftests/igt_flush_test.o
+	selftests/igt_flush_test.o \
+	selftests/igt_reset.o \
+	selftests/igt_spinner.o
 
 # virtual gpu code
 i915-y += i915_vgpu.o
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 7f455bca..38dcee1 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -943,30 +943,30 @@ static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
 static ssize_t gpu_state_read(struct file *file, char __user *ubuf,
 			      size_t count, loff_t *pos)
 {
-	struct i915_gpu_state *error = file->private_data;
-	struct drm_i915_error_state_buf str;
+	struct i915_gpu_state *error;
 	ssize_t ret;
-	loff_t tmp;
+	void *buf;
 
+	error = file->private_data;
 	if (!error)
 		return 0;
 
-	ret = i915_error_state_buf_init(&str, error->i915, count, *pos);
-	if (ret)
-		return ret;
+	/* Bounce buffer required because of kernfs __user API convenience. */
+	buf = kmalloc(count, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
 
-	ret = i915_error_state_to_str(&str, error);
-	if (ret)
+	ret = i915_gpu_state_copy_to_buffer(error, buf, *pos, count);
+	if (ret <= 0)
 		goto out;
 
-	tmp = 0;
-	ret = simple_read_from_buffer(ubuf, count, &tmp, str.buf, str.bytes);
-	if (ret < 0)
-		goto out;
+	if (!copy_to_user(ubuf, buf, ret))
+		*pos += ret;
+	else
+		ret = -EFAULT;
 
-	*pos = str.start + ret;
 out:
-	i915_error_state_buf_release(&str);
+	kfree(buf);
 	return ret;
 }
 
@@ -3375,13 +3375,15 @@ static int i915_shared_dplls_info(struct seq_file *m, void *unused)
 
 static int i915_wa_registers(struct seq_file *m, void *unused)
 {
-	struct i915_workarounds *wa = &node_to_i915(m->private)->workarounds;
-	int i;
+	struct drm_i915_private *i915 = node_to_i915(m->private);
+	const struct i915_wa_list *wal = &i915->engine[RCS]->ctx_wa_list;
+	struct i915_wa *wa;
+	unsigned int i;
 
-	seq_printf(m, "Workarounds applied: %d\n", wa->count);
-	for (i = 0; i < wa->count; ++i)
+	seq_printf(m, "Workarounds applied: %u\n", wal->count);
+	for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
 		seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X\n",
-			   wa->reg[i].addr, wa->reg[i].value, wa->reg[i].mask);
+			   i915_mmio_reg_offset(wa->reg), wa->val, wa->mask);
 
 	return 0;
 }
@@ -3441,31 +3443,32 @@ static int i915_ddb_info(struct seq_file *m, void *unused)
 {
 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
 	struct drm_device *dev = &dev_priv->drm;
-	struct skl_ddb_allocation *ddb;
 	struct skl_ddb_entry *entry;
-	enum pipe pipe;
-	int plane;
+	struct intel_crtc *crtc;
 
 	if (INTEL_GEN(dev_priv) < 9)
 		return -ENODEV;
 
 	drm_modeset_lock_all(dev);
 
-	ddb = &dev_priv->wm.skl_hw.ddb;
-
 	seq_printf(m, "%-15s%8s%8s%8s\n", "", "Start", "End", "Size");
 
-	for_each_pipe(dev_priv, pipe) {
+	for_each_intel_crtc(&dev_priv->drm, crtc) {
+		struct intel_crtc_state *crtc_state =
+			to_intel_crtc_state(crtc->base.state);
+		enum pipe pipe = crtc->pipe;
+		enum plane_id plane_id;
+
 		seq_printf(m, "Pipe %c\n", pipe_name(pipe));
 
-		for_each_universal_plane(dev_priv, pipe, plane) {
-			entry = &ddb->plane[pipe][plane];
-			seq_printf(m, "  Plane%-8d%8u%8u%8u\n", plane + 1,
+		for_each_plane_id_on_crtc(crtc, plane_id) {
+			entry = &crtc_state->wm.skl.plane_ddb_y[plane_id];
+			seq_printf(m, "  Plane%-8d%8u%8u%8u\n", plane_id + 1,
 				   entry->start, entry->end,
 				   skl_ddb_entry_size(entry));
 		}
 
-		entry = &ddb->plane[pipe][PLANE_CURSOR];
+		entry = &crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR];
 		seq_printf(m, "  %-13s%8u%8u%8u\n", "Cursor", entry->start,
 			   entry->end, skl_ddb_entry_size(entry));
 	}
@@ -4592,6 +4595,13 @@ static int i915_hpd_storm_ctl_show(struct seq_file *m, void *data)
 	struct drm_i915_private *dev_priv = m->private;
 	struct i915_hotplug *hotplug = &dev_priv->hotplug;
 
+	/* Synchronize with everything first in case there's been an HPD
+	 * storm, but we haven't finished handling it in the kernel yet
+	 */
+	synchronize_irq(dev_priv->drm.irq);
+	flush_work(&dev_priv->hotplug.dig_port_work);
+	flush_work(&dev_priv->hotplug.hotplug_work);
+
 	seq_printf(m, "Threshold: %d\n", hotplug->hpd_storm_threshold);
 	seq_printf(m, "Detected: %s\n",
 		   yesno(delayed_work_pending(&hotplug->reenable_work)));
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index b1d23c7..b310a89 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -53,6 +53,7 @@
 #include "i915_vgpu.h"
 #include "intel_drv.h"
 #include "intel_uc.h"
+#include "intel_workarounds.h"
 
 static struct drm_driver driver;
 
@@ -287,7 +288,7 @@ static void intel_detect_pch(struct drm_i915_private *dev_priv)
 	 * Use PCH_NOP (PCH but no South Display) for PCH platforms without
 	 * display.
 	 */
-	if (pch && INTEL_INFO(dev_priv)->num_pipes == 0) {
+	if (pch && !HAS_DISPLAY(dev_priv)) {
 		DRM_DEBUG_KMS("Display disabled, reverting to NOP PCH\n");
 		dev_priv->pch_type = PCH_NOP;
 		dev_priv->pch_id = 0;
@@ -645,7 +646,7 @@ static int i915_load_modeset_init(struct drm_device *dev)
 	if (i915_inject_load_failure())
 		return -ENODEV;
 
-	if (INTEL_INFO(dev_priv)->num_pipes) {
+	if (HAS_DISPLAY(dev_priv)) {
 		ret = drm_vblank_init(&dev_priv->drm,
 				      INTEL_INFO(dev_priv)->num_pipes);
 		if (ret)
@@ -696,7 +697,7 @@ static int i915_load_modeset_init(struct drm_device *dev)
 
 	intel_overlay_setup(dev_priv);
 
-	if (INTEL_INFO(dev_priv)->num_pipes == 0)
+	if (!HAS_DISPLAY(dev_priv))
 		return 0;
 
 	ret = intel_fbdev_init(dev);
@@ -868,6 +869,7 @@ static void intel_detect_preproduction_hw(struct drm_i915_private *dev_priv)
 	pre |= IS_HSW_EARLY_SDV(dev_priv);
 	pre |= IS_SKL_REVID(dev_priv, 0, SKL_REVID_F0);
 	pre |= IS_BXT_REVID(dev_priv, 0, BXT_REVID_B_LAST);
+	pre |= IS_KBL_REVID(dev_priv, 0, KBL_REVID_A0);
 
 	if (pre) {
 		DRM_ERROR("This is a pre-production stepping. "
@@ -1383,6 +1385,20 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
 		}
 	}
 
+	if (HAS_EXECLISTS(dev_priv)) {
+		/*
+		 * Older GVT emulation depends upon intercepting CSB mmio,
+		 * which we no longer use, preferring to use the HWSP cache
+		 * instead.
+		 */
+		if (intel_vgpu_active(dev_priv) &&
+		    !intel_vgpu_has_hwsp_emulation(dev_priv)) {
+			i915_report_error(dev_priv,
+					  "old vGPU host found, support for HWSP emulation required\n");
+			return -ENXIO;
+		}
+	}
+
 	intel_sanitize_options(dev_priv);
 
 	i915_perf_init(dev_priv);
@@ -1452,6 +1468,7 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
 
 	intel_uncore_sanitize(dev_priv);
 
+	intel_gt_init_workarounds(dev_priv);
 	i915_gem_load_init_fences(dev_priv);
 
 	/* On the 945G/GM, the chipset reports the MSI capability on the
@@ -1551,7 +1568,7 @@ static void i915_driver_register(struct drm_i915_private *dev_priv)
 	} else
 		DRM_ERROR("Failed to register driver for userspace access!\n");
 
-	if (INTEL_INFO(dev_priv)->num_pipes) {
+	if (HAS_DISPLAY(dev_priv)) {
 		/* Must be done after probing outputs */
 		intel_opregion_register(dev_priv);
 		acpi_video_register();
@@ -1575,7 +1592,7 @@ static void i915_driver_register(struct drm_i915_private *dev_priv)
 	 * We need to coordinate the hotplugs with the asynchronous fbdev
 	 * configuration, for which we use the fbdev->async_cookie.
 	 */
-	if (INTEL_INFO(dev_priv)->num_pipes)
+	if (HAS_DISPLAY(dev_priv))
 		drm_kms_helper_poll_init(dev);
 
 	intel_power_domains_enable(dev_priv);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 4064e49..b1c319671 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -53,6 +53,7 @@
 #include <drm/drm_auth.h>
 #include <drm/drm_cache.h>
 #include <drm/drm_util.h>
+#include <drm/drm_dsc.h>
 
 #include "i915_fixed.h"
 #include "i915_params.h"
@@ -68,6 +69,7 @@
 #include "intel_ringbuffer.h"
 #include "intel_uncore.h"
 #include "intel_wopcm.h"
+#include "intel_workarounds.h"
 #include "intel_uc.h"
 
 #include "i915_gem.h"
@@ -88,8 +90,8 @@
 
 #define DRIVER_NAME		"i915"
 #define DRIVER_DESC		"Intel Graphics"
-#define DRIVER_DATE		"20181122"
-#define DRIVER_TIMESTAMP	1542898187
+#define DRIVER_DATE		"20181204"
+#define DRIVER_TIMESTAMP	1543944377
 
 /* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and
  * WARN_ON()) for hw state sanity checks to check for unexpected conditions
@@ -494,6 +496,7 @@ struct i915_psr {
 	bool sink_support;
 	bool prepared, enabled;
 	struct intel_dp *dp;
+	enum pipe pipe;
 	bool active;
 	struct work_struct work;
 	unsigned busy_frontbuffer_bits;
@@ -504,6 +507,8 @@ struct i915_psr {
 	u8 sink_sync_latency;
 	ktime_t last_entry_attempt;
 	ktime_t last_exit;
+	bool sink_not_reliable;
+	bool irq_aux_error;
 };
 
 enum intel_pch {
@@ -1093,9 +1098,6 @@ static inline bool skl_ddb_entry_equal(const struct skl_ddb_entry *e1,
 }
 
 struct skl_ddb_allocation {
-	/* packed/y */
-	struct skl_ddb_entry plane[I915_MAX_PIPES][I915_MAX_PLANES];
-	struct skl_ddb_entry uv_plane[I915_MAX_PIPES][I915_MAX_PLANES];
 	u8 enabled_slices; /* GEN11 has configurable 2 slices */
 };
 
@@ -1188,20 +1190,6 @@ struct i915_frontbuffer_tracking {
 	unsigned flip_bits;
 };
 
-struct i915_wa_reg {
-	u32 addr;
-	u32 value;
-	/* bitmask representing WA bits */
-	u32 mask;
-};
-
-#define I915_MAX_WA_REGS 16
-
-struct i915_workarounds {
-	struct i915_wa_reg reg[I915_MAX_WA_REGS];
-	u32 count;
-};
-
 struct i915_virtual_gpu {
 	bool active;
 	u32 caps;
@@ -1651,7 +1639,7 @@ struct drm_i915_private {
 
 	int dpio_phy_iosf_port[I915_NUM_PHYS_VLV];
 
-	struct i915_workarounds workarounds;
+	struct i915_wa_list gt_wa_list;
 
 	struct i915_frontbuffer_tracking fb_tracking;
 
@@ -1995,6 +1983,8 @@ struct drm_i915_private {
 		struct delayed_work idle_work;
 
 		ktime_t last_init_time;
+
+		struct i915_vma *scratch;
 	} gt;
 
 	/* perform PHY state sanity checks? */
@@ -2448,9 +2438,9 @@ intel_info(const struct drm_i915_private *dev_priv)
 	((sizes) & ~(dev_priv)->info.page_sizes) == 0; \
 })
 
-#define HAS_OVERLAY(dev_priv)		 ((dev_priv)->info.has_overlay)
+#define HAS_OVERLAY(dev_priv)		 ((dev_priv)->info.display.has_overlay)
 #define OVERLAY_NEEDS_PHYSICAL(dev_priv) \
-		((dev_priv)->info.overlay_needs_physical)
+		((dev_priv)->info.display.overlay_needs_physical)
 
 /* Early gen2 have a totally busted CS tlb and require pinned batches. */
 #define HAS_BROKEN_CS_TLB(dev_priv)	(IS_I830(dev_priv) || IS_I845G(dev_priv))
@@ -2471,31 +2461,31 @@ intel_info(const struct drm_i915_private *dev_priv)
 #define HAS_128_BYTE_Y_TILING(dev_priv) (!IS_GEN2(dev_priv) && \
 					 !(IS_I915G(dev_priv) || \
 					 IS_I915GM(dev_priv)))
-#define SUPPORTS_TV(dev_priv)		((dev_priv)->info.supports_tv)
-#define I915_HAS_HOTPLUG(dev_priv)	((dev_priv)->info.has_hotplug)
+#define SUPPORTS_TV(dev_priv)		((dev_priv)->info.display.supports_tv)
+#define I915_HAS_HOTPLUG(dev_priv)	((dev_priv)->info.display.has_hotplug)
 
 #define HAS_FW_BLC(dev_priv) 	(INTEL_GEN(dev_priv) > 2)
-#define HAS_FBC(dev_priv)	((dev_priv)->info.has_fbc)
+#define HAS_FBC(dev_priv)	((dev_priv)->info.display.has_fbc)
 #define HAS_CUR_FBC(dev_priv)	(!HAS_GMCH_DISPLAY(dev_priv) && INTEL_GEN(dev_priv) >= 7)
 
 #define HAS_IPS(dev_priv)	(IS_HSW_ULT(dev_priv) || IS_BROADWELL(dev_priv))
 
-#define HAS_DP_MST(dev_priv)	((dev_priv)->info.has_dp_mst)
+#define HAS_DP_MST(dev_priv)	((dev_priv)->info.display.has_dp_mst)
 
-#define HAS_DDI(dev_priv)		 ((dev_priv)->info.has_ddi)
+#define HAS_DDI(dev_priv)		 ((dev_priv)->info.display.has_ddi)
 #define HAS_FPGA_DBG_UNCLAIMED(dev_priv) ((dev_priv)->info.has_fpga_dbg)
-#define HAS_PSR(dev_priv)		 ((dev_priv)->info.has_psr)
+#define HAS_PSR(dev_priv)		 ((dev_priv)->info.display.has_psr)
 
 #define HAS_RC6(dev_priv)		 ((dev_priv)->info.has_rc6)
 #define HAS_RC6p(dev_priv)		 ((dev_priv)->info.has_rc6p)
 #define HAS_RC6pp(dev_priv)		 (false) /* HW was never validated */
 
-#define HAS_CSR(dev_priv)	((dev_priv)->info.has_csr)
+#define HAS_CSR(dev_priv)	((dev_priv)->info.display.has_csr)
 
 #define HAS_RUNTIME_PM(dev_priv) ((dev_priv)->info.has_runtime_pm)
 #define HAS_64BIT_RELOC(dev_priv) ((dev_priv)->info.has_64bit_reloc)
 
-#define HAS_IPC(dev_priv)		 ((dev_priv)->info.has_ipc)
+#define HAS_IPC(dev_priv)		 ((dev_priv)->info.display.has_ipc)
 
 /*
  * For now, anything with a GuC requires uCode loading, and then supports
@@ -2556,7 +2546,7 @@ intel_info(const struct drm_i915_private *dev_priv)
 #define HAS_PCH_NOP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_NOP)
 #define HAS_PCH_SPLIT(dev_priv) (INTEL_PCH_TYPE(dev_priv) != PCH_NONE)
 
-#define HAS_GMCH_DISPLAY(dev_priv) ((dev_priv)->info.has_gmch_display)
+#define HAS_GMCH_DISPLAY(dev_priv) ((dev_priv)->info.display.has_gmch_display)
 
 #define HAS_LSPCON(dev_priv) (INTEL_GEN(dev_priv) >= 9)
 
@@ -2568,6 +2558,8 @@ intel_info(const struct drm_i915_private *dev_priv)
 #define GT_FREQUENCY_MULTIPLIER 50
 #define GEN9_FREQ_SCALER 3
 
+#define HAS_DISPLAY(dev_priv) (INTEL_INFO(dev_priv)->num_pipes > 0)
+
 #include "i915_trace.h"
 
 static inline bool intel_vtd_active(void)
@@ -3340,6 +3332,9 @@ extern void intel_rps_mark_interactive(struct drm_i915_private *i915,
 				       bool interactive);
 extern bool intel_set_memory_cxsr(struct drm_i915_private *dev_priv,
 				  bool enable);
+void intel_dsc_enable(struct intel_encoder *encoder,
+		      const struct intel_crtc_state *crtc_state);
+void intel_dsc_disable(const struct intel_crtc_state *crtc_state);
 
 int i915_reg_read_ioctl(struct drm_device *dev, void *data,
 			struct drm_file *file);
@@ -3720,4 +3715,9 @@ static inline int intel_hws_csb_write_index(struct drm_i915_private *i915)
 		return I915_HWS_CSB_WRITE_INDEX;
 }
 
+static inline u32 i915_scratch_offset(const struct drm_i915_private *i915)
+{
+	return i915_ggtt_offset(i915->gt.scratch);
+}
+
 #endif
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index c55b1f7..d36a975 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -3309,16 +3309,6 @@ void i915_gem_reset_finish(struct drm_i915_private *dev_priv)
 
 static void nop_submit_request(struct i915_request *request)
 {
-	GEM_TRACE("%s fence %llx:%d -> -EIO\n",
-		  request->engine->name,
-		  request->fence.context, request->fence.seqno);
-	dma_fence_set_error(&request->fence, -EIO);
-
-	i915_request_submit(request);
-}
-
-static void nop_complete_submit_request(struct i915_request *request)
-{
 	unsigned long flags;
 
 	GEM_TRACE("%s fence %llx:%d -> -EIO\n",
@@ -3354,57 +3344,33 @@ void i915_gem_set_wedged(struct drm_i915_private *i915)
 	 * rolling the global seqno forward (since this would complete requests
 	 * for which we haven't set the fence error to EIO yet).
 	 */
-	for_each_engine(engine, i915, id) {
+	for_each_engine(engine, i915, id)
 		i915_gem_reset_prepare_engine(engine);
 
-		engine->submit_request = nop_submit_request;
-		engine->schedule = NULL;
-	}
-	i915->caps.scheduler = 0;
-
 	/* Even if the GPU reset fails, it should still stop the engines */
 	if (INTEL_GEN(i915) >= 5)
 		intel_gpu_reset(i915, ALL_ENGINES);
 
-	/*
-	 * Make sure no one is running the old callback before we proceed with
-	 * cancelling requests and resetting the completion tracking. Otherwise
-	 * we might submit a request to the hardware which never completes.
-	 */
-	synchronize_rcu();
-
 	for_each_engine(engine, i915, id) {
-		/* Mark all executing requests as skipped */
-		engine->cancel_requests(engine);
-
-		/*
-		 * Only once we've force-cancelled all in-flight requests can we
-		 * start to complete all requests.
-		 */
-		engine->submit_request = nop_complete_submit_request;
+		engine->submit_request = nop_submit_request;
+		engine->schedule = NULL;
 	}
+	i915->caps.scheduler = 0;
 
 	/*
 	 * Make sure no request can slip through without getting completed by
 	 * either this call here to intel_engine_init_global_seqno, or the one
-	 * in nop_complete_submit_request.
+	 * in nop_submit_request.
 	 */
 	synchronize_rcu();
 
+	/* Mark all executing requests as skipped */
+	for_each_engine(engine, i915, id)
+		engine->cancel_requests(engine);
+
 	for_each_engine(engine, i915, id) {
-		unsigned long flags;
-
-		/*
-		 * Mark all pending requests as complete so that any concurrent
-		 * (lockless) lookup doesn't try and wait upon the request as we
-		 * reset it.
-		 */
-		spin_lock_irqsave(&engine->timeline.lock, flags);
-		intel_engine_init_global_seqno(engine,
-					       intel_engine_last_submit(engine));
-		spin_unlock_irqrestore(&engine->timeline.lock, flags);
-
 		i915_gem_reset_finish_engine(engine);
+		intel_engine_wakeup(engine);
 	}
 
 out:
@@ -5334,7 +5300,10 @@ int i915_gem_init_hw(struct drm_i915_private *dev_priv)
 		I915_WRITE(MI_PREDICATE_RESULT_2, IS_HSW_GT3(dev_priv) ?
 			   LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
 
-	intel_gt_workarounds_apply(dev_priv);
+	/* Apply the GT workarounds... */
+	intel_gt_apply_workarounds(dev_priv);
+	/* ...and determine whether they are sticking. */
+	intel_gt_verify_workarounds(dev_priv, "init");
 
 	i915_gem_init_swizzling(dev_priv);
 
@@ -5529,6 +5498,44 @@ static int __intel_engines_record_defaults(struct drm_i915_private *i915)
 	goto out_ctx;
 }
 
+static int
+i915_gem_init_scratch(struct drm_i915_private *i915, unsigned int size)
+{
+	struct drm_i915_gem_object *obj;
+	struct i915_vma *vma;
+	int ret;
+
+	obj = i915_gem_object_create_stolen(i915, size);
+	if (!obj)
+		obj = i915_gem_object_create_internal(i915, size);
+	if (IS_ERR(obj)) {
+		DRM_ERROR("Failed to allocate scratch page\n");
+		return PTR_ERR(obj);
+	}
+
+	vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
+	if (IS_ERR(vma)) {
+		ret = PTR_ERR(vma);
+		goto err_unref;
+	}
+
+	ret = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
+	if (ret)
+		goto err_unref;
+
+	i915->gt.scratch = vma;
+	return 0;
+
+err_unref:
+	i915_gem_object_put(obj);
+	return ret;
+}
+
+static void i915_gem_fini_scratch(struct drm_i915_private *i915)
+{
+	i915_vma_unpin_and_release(&i915->gt.scratch, 0);
+}
+
 int i915_gem_init(struct drm_i915_private *dev_priv)
 {
 	int ret;
@@ -5575,12 +5582,19 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
 		goto err_unlock;
 	}
 
-	ret = i915_gem_contexts_init(dev_priv);
+	ret = i915_gem_init_scratch(dev_priv,
+				    IS_GEN2(dev_priv) ? SZ_256K : PAGE_SIZE);
 	if (ret) {
 		GEM_BUG_ON(ret == -EIO);
 		goto err_ggtt;
 	}
 
+	ret = i915_gem_contexts_init(dev_priv);
+	if (ret) {
+		GEM_BUG_ON(ret == -EIO);
+		goto err_scratch;
+	}
+
 	ret = intel_engines_init(dev_priv);
 	if (ret) {
 		GEM_BUG_ON(ret == -EIO);
@@ -5653,6 +5667,8 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
 err_context:
 	if (ret != -EIO)
 		i915_gem_contexts_fini(dev_priv);
+err_scratch:
+	i915_gem_fini_scratch(dev_priv);
 err_ggtt:
 err_unlock:
 	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
@@ -5704,8 +5720,11 @@ void i915_gem_fini(struct drm_i915_private *dev_priv)
 	intel_uc_fini(dev_priv);
 	i915_gem_cleanup_engines(dev_priv);
 	i915_gem_contexts_fini(dev_priv);
+	i915_gem_fini_scratch(dev_priv);
 	mutex_unlock(&dev_priv->drm.struct_mutex);
 
+	intel_wa_list_free(&dev_priv->gt_wa_list);
+
 	intel_cleanup_gt_powersave(dev_priv);
 
 	intel_uc_fini_misc(dev_priv);
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index b97963d..371c070 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -535,16 +535,12 @@ static bool needs_preempt_context(struct drm_i915_private *i915)
 int i915_gem_contexts_init(struct drm_i915_private *dev_priv)
 {
 	struct i915_gem_context *ctx;
-	int ret;
 
 	/* Reassure ourselves we are only called once */
 	GEM_BUG_ON(dev_priv->kernel_context);
 	GEM_BUG_ON(dev_priv->preempt_context);
 
-	ret = intel_ctx_workarounds_init(dev_priv);
-	if (ret)
-		return ret;
-
+	intel_engine_init_ctx_wa(dev_priv->engine[RCS]);
 	init_contexts(dev_priv);
 
 	/* lowest priority; idle task */
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 8123bf0..0746512 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -27,11 +27,14 @@
  *
  */
 
-#include <linux/utsname.h>
-#include <linux/stop_machine.h>
-#include <linux/zlib.h>
-#include <drm/drm_print.h>
 #include <linux/ascii85.h>
+#include <linux/nmi.h>
+#include <linux/scatterlist.h>
+#include <linux/stop_machine.h>
+#include <linux/utsname.h>
+#include <linux/zlib.h>
+
+#include <drm/drm_print.h>
 
 #include "i915_gpu_error.h"
 #include "i915_drv.h"
@@ -77,112 +80,110 @@ static const char *purgeable_flag(int purgeable)
 	return purgeable ? " purgeable" : "";
 }
 
-static bool __i915_error_ok(struct drm_i915_error_state_buf *e)
+static void __sg_set_buf(struct scatterlist *sg,
+			 void *addr, unsigned int len, loff_t it)
 {
-
-	if (!e->err && WARN(e->bytes > (e->size - 1), "overflow")) {
-		e->err = -ENOSPC;
-		return false;
-	}
-
-	if (e->bytes == e->size - 1 || e->err)
-		return false;
-
-	return true;
+	sg->page_link = (unsigned long)virt_to_page(addr);
+	sg->offset = offset_in_page(addr);
+	sg->length = len;
+	sg->dma_address = it;
 }
 
-static bool __i915_error_seek(struct drm_i915_error_state_buf *e,
-			      unsigned len)
+static bool __i915_error_grow(struct drm_i915_error_state_buf *e, size_t len)
 {
-	if (e->pos + len <= e->start) {
-		e->pos += len;
+	if (!len)
 		return false;
+
+	if (e->bytes + len + 1 <= e->size)
+		return true;
+
+	if (e->bytes) {
+		__sg_set_buf(e->cur++, e->buf, e->bytes, e->iter);
+		e->iter += e->bytes;
+		e->buf = NULL;
+		e->bytes = 0;
 	}
 
-	/* First vsnprintf needs to fit in its entirety for memmove */
-	if (len >= e->size) {
-		e->err = -EIO;
-		return false;
-	}
+	if (e->cur == e->end) {
+		struct scatterlist *sgl;
 
-	return true;
-}
-
-static void __i915_error_advance(struct drm_i915_error_state_buf *e,
-				 unsigned len)
-{
-	/* If this is first printf in this window, adjust it so that
-	 * start position matches start of the buffer
-	 */
-
-	if (e->pos < e->start) {
-		const size_t off = e->start - e->pos;
-
-		/* Should not happen but be paranoid */
-		if (off > len || e->bytes) {
-			e->err = -EIO;
-			return;
+		sgl = (typeof(sgl))__get_free_page(GFP_KERNEL);
+		if (!sgl) {
+			e->err = -ENOMEM;
+			return false;
 		}
 
-		memmove(e->buf, e->buf + off, len - off);
-		e->bytes = len - off;
-		e->pos = e->start;
-		return;
+		if (e->cur) {
+			e->cur->offset = 0;
+			e->cur->length = 0;
+			e->cur->page_link =
+				(unsigned long)sgl | SG_CHAIN;
+		} else {
+			e->sgl = sgl;
+		}
+
+		e->cur = sgl;
+		e->end = sgl + SG_MAX_SINGLE_ALLOC - 1;
 	}
 
-	e->bytes += len;
-	e->pos += len;
+	e->size = ALIGN(len + 1, SZ_64K);
+	e->buf = kmalloc(e->size, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
+	if (!e->buf) {
+		e->size = PAGE_ALIGN(len + 1);
+		e->buf = kmalloc(e->size, GFP_KERNEL);
+	}
+	if (!e->buf) {
+		e->err = -ENOMEM;
+		return false;
+	}
+
+	return true;
 }
 
 __printf(2, 0)
 static void i915_error_vprintf(struct drm_i915_error_state_buf *e,
-			       const char *f, va_list args)
+			       const char *fmt, va_list args)
 {
-	unsigned len;
+	va_list ap;
+	int len;
 
-	if (!__i915_error_ok(e))
+	if (e->err)
 		return;
 
-	/* Seek the first printf which is hits start position */
-	if (e->pos < e->start) {
-		va_list tmp;
-
-		va_copy(tmp, args);
-		len = vsnprintf(NULL, 0, f, tmp);
-		va_end(tmp);
-
-		if (!__i915_error_seek(e, len))
-			return;
+	va_copy(ap, args);
+	len = vsnprintf(NULL, 0, fmt, ap);
+	va_end(ap);
+	if (len <= 0) {
+		e->err = len;
+		return;
 	}
 
-	len = vsnprintf(e->buf + e->bytes, e->size - e->bytes, f, args);
-	if (len >= e->size - e->bytes)
-		len = e->size - e->bytes - 1;
+	if (!__i915_error_grow(e, len))
+		return;
 
-	__i915_error_advance(e, len);
+	GEM_BUG_ON(e->bytes >= e->size);
+	len = vscnprintf(e->buf + e->bytes, e->size - e->bytes, fmt, args);
+	if (len < 0) {
+		e->err = len;
+		return;
+	}
+	e->bytes += len;
 }
 
-static void i915_error_puts(struct drm_i915_error_state_buf *e,
-			    const char *str)
+static void i915_error_puts(struct drm_i915_error_state_buf *e, const char *str)
 {
 	unsigned len;
 
-	if (!__i915_error_ok(e))
+	if (e->err || !str)
 		return;
 
 	len = strlen(str);
+	if (!__i915_error_grow(e, len))
+		return;
 
-	/* Seek the first printf which is hits start position */
-	if (e->pos < e->start) {
-		if (!__i915_error_seek(e, len))
-			return;
-	}
-
-	if (len >= e->size - e->bytes)
-		len = e->size - e->bytes - 1;
+	GEM_BUG_ON(e->bytes + len > e->size);
 	memcpy(e->buf + e->bytes, str, len);
-
-	__i915_error_advance(e, len);
+	e->bytes += len;
 }
 
 #define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
@@ -268,6 +269,8 @@ static int compress_page(struct compress *c,
 
 		if (zlib_deflate(zstream, Z_NO_FLUSH) != Z_OK)
 			return -EIO;
+
+		touch_nmi_watchdog();
 	} while (zstream->avail_in);
 
 	/* Fallback to uncompressed if we increase size? */
@@ -635,22 +638,30 @@ static void err_print_uc(struct drm_i915_error_state_buf *m,
 	print_error_obj(m, NULL, "GuC log buffer", error_uc->guc_log);
 }
 
-int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
-			    const struct i915_gpu_state *error)
+static void err_free_sgl(struct scatterlist *sgl)
 {
-	struct drm_i915_private *dev_priv = m->i915;
+	while (sgl) {
+		struct scatterlist *sg;
+
+		for (sg = sgl; !sg_is_chain(sg); sg++) {
+			kfree(sg_virt(sg));
+			if (sg_is_last(sg))
+				break;
+		}
+
+		sg = sg_is_last(sg) ? NULL : sg_chain_ptr(sg);
+		free_page((unsigned long)sgl);
+		sgl = sg;
+	}
+}
+
+static void __err_print_to_sgl(struct drm_i915_error_state_buf *m,
+			       struct i915_gpu_state *error)
+{
 	struct drm_i915_error_object *obj;
 	struct timespec64 ts;
 	int i, j;
 
-	if (!error) {
-		err_printf(m, "No error state collected\n");
-		return 0;
-	}
-
-	if (IS_ERR(error))
-		return PTR_ERR(error);
-
 	if (*error->error_msg)
 		err_printf(m, "%s\n", error->error_msg);
 	err_printf(m, "Kernel: %s\n", init_utsname()->release);
@@ -683,12 +694,12 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
 	err_printf(m, "Reset count: %u\n", error->reset_count);
 	err_printf(m, "Suspend count: %u\n", error->suspend_count);
 	err_printf(m, "Platform: %s\n", intel_platform_name(error->device_info.platform));
-	err_print_pciid(m, error->i915);
+	err_print_pciid(m, m->i915);
 
 	err_printf(m, "IOMMU enabled?: %d\n", error->iommu);
 
-	if (HAS_CSR(dev_priv)) {
-		struct intel_csr *csr = &dev_priv->csr;
+	if (HAS_CSR(m->i915)) {
+		struct intel_csr *csr = &m->i915->csr;
 
 		err_printf(m, "DMC loaded: %s\n",
 			   yesno(csr->dmc_payload != NULL));
@@ -708,22 +719,23 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
 	err_printf(m, "FORCEWAKE: 0x%08x\n", error->forcewake);
 	err_printf(m, "DERRMR: 0x%08x\n", error->derrmr);
 	err_printf(m, "CCID: 0x%08x\n", error->ccid);
-	err_printf(m, "Missed interrupts: 0x%08lx\n", dev_priv->gpu_error.missed_irq_rings);
+	err_printf(m, "Missed interrupts: 0x%08lx\n",
+		   m->i915->gpu_error.missed_irq_rings);
 
 	for (i = 0; i < error->nfence; i++)
 		err_printf(m, "  fence[%d] = %08llx\n", i, error->fence[i]);
 
-	if (INTEL_GEN(dev_priv) >= 6) {
+	if (INTEL_GEN(m->i915) >= 6) {
 		err_printf(m, "ERROR: 0x%08x\n", error->error);
 
-		if (INTEL_GEN(dev_priv) >= 8)
+		if (INTEL_GEN(m->i915) >= 8)
 			err_printf(m, "FAULT_TLB_DATA: 0x%08x 0x%08x\n",
 				   error->fault_data1, error->fault_data0);
 
 		err_printf(m, "DONE_REG: 0x%08x\n", error->done_reg);
 	}
 
-	if (IS_GEN7(dev_priv))
+	if (IS_GEN7(m->i915))
 		err_printf(m, "ERR_INT: 0x%08x\n", error->err_int);
 
 	for (i = 0; i < ARRAY_SIZE(error->engine); i++) {
@@ -745,7 +757,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
 
 			len += scnprintf(buf + len, sizeof(buf), "%s%s",
 					 first ? "" : ", ",
-					 dev_priv->engine[j]->name);
+					 m->i915->engine[j]->name);
 			first = 0;
 		}
 		scnprintf(buf + len, sizeof(buf), ")");
@@ -763,7 +775,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
 
 		obj = ee->batchbuffer;
 		if (obj) {
-			err_puts(m, dev_priv->engine[i]->name);
+			err_puts(m, m->i915->engine[i]->name);
 			if (ee->context.pid)
 				err_printf(m, " (submitted by %s [%d], ctx %d [%d], score %d%s)",
 					   ee->context.comm,
@@ -775,16 +787,16 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
 			err_printf(m, " --- gtt_offset = 0x%08x %08x\n",
 				   upper_32_bits(obj->gtt_offset),
 				   lower_32_bits(obj->gtt_offset));
-			print_error_obj(m, dev_priv->engine[i], NULL, obj);
+			print_error_obj(m, m->i915->engine[i], NULL, obj);
 		}
 
 		for (j = 0; j < ee->user_bo_count; j++)
-			print_error_obj(m, dev_priv->engine[i],
+			print_error_obj(m, m->i915->engine[i],
 					"user", ee->user_bo[j]);
 
 		if (ee->num_requests) {
 			err_printf(m, "%s --- %d requests\n",
-				   dev_priv->engine[i]->name,
+				   m->i915->engine[i]->name,
 				   ee->num_requests);
 			for (j = 0; j < ee->num_requests; j++)
 				error_print_request(m, " ",
@@ -794,10 +806,10 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
 
 		if (IS_ERR(ee->waiters)) {
 			err_printf(m, "%s --- ? waiters [unable to acquire spinlock]\n",
-				   dev_priv->engine[i]->name);
+				   m->i915->engine[i]->name);
 		} else if (ee->num_waiters) {
 			err_printf(m, "%s --- %d waiters\n",
-				   dev_priv->engine[i]->name,
+				   m->i915->engine[i]->name,
 				   ee->num_waiters);
 			for (j = 0; j < ee->num_waiters; j++) {
 				err_printf(m, " seqno 0x%08x for %s [%d]\n",
@@ -807,22 +819,22 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
 			}
 		}
 
-		print_error_obj(m, dev_priv->engine[i],
+		print_error_obj(m, m->i915->engine[i],
 				"ringbuffer", ee->ringbuffer);
 
-		print_error_obj(m, dev_priv->engine[i],
+		print_error_obj(m, m->i915->engine[i],
 				"HW Status", ee->hws_page);
 
-		print_error_obj(m, dev_priv->engine[i],
+		print_error_obj(m, m->i915->engine[i],
 				"HW context", ee->ctx);
 
-		print_error_obj(m, dev_priv->engine[i],
+		print_error_obj(m, m->i915->engine[i],
 				"WA context", ee->wa_ctx);
 
-		print_error_obj(m, dev_priv->engine[i],
+		print_error_obj(m, m->i915->engine[i],
 				"WA batchbuffer", ee->wa_batchbuffer);
 
-		print_error_obj(m, dev_priv->engine[i],
+		print_error_obj(m, m->i915->engine[i],
 				"NULL context", ee->default_state);
 	}
 
@@ -835,43 +847,107 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
 	err_print_capabilities(m, &error->device_info, &error->driver_caps);
 	err_print_params(m, &error->params);
 	err_print_uc(m, &error->uc);
+}
 
-	if (m->bytes == 0 && m->err)
-		return m->err;
+static int err_print_to_sgl(struct i915_gpu_state *error)
+{
+	struct drm_i915_error_state_buf m;
+
+	if (IS_ERR(error))
+		return PTR_ERR(error);
+
+	if (READ_ONCE(error->sgl))
+		return 0;
+
+	memset(&m, 0, sizeof(m));
+	m.i915 = error->i915;
+
+	__err_print_to_sgl(&m, error);
+
+	if (m.buf) {
+		__sg_set_buf(m.cur++, m.buf, m.bytes, m.iter);
+		m.bytes = 0;
+		m.buf = NULL;
+	}
+	if (m.cur) {
+		GEM_BUG_ON(m.end < m.cur);
+		sg_mark_end(m.cur - 1);
+	}
+	GEM_BUG_ON(m.sgl && !m.cur);
+
+	if (m.err) {
+		err_free_sgl(m.sgl);
+		return m.err;
+	}
+
+	if (cmpxchg(&error->sgl, NULL, m.sgl))
+		err_free_sgl(m.sgl);
 
 	return 0;
 }
 
-int i915_error_state_buf_init(struct drm_i915_error_state_buf *ebuf,
-			      struct drm_i915_private *i915,
-			      size_t count, loff_t pos)
+ssize_t i915_gpu_state_copy_to_buffer(struct i915_gpu_state *error,
+				      char *buf, loff_t off, size_t rem)
 {
-	memset(ebuf, 0, sizeof(*ebuf));
-	ebuf->i915 = i915;
+	struct scatterlist *sg;
+	size_t count;
+	loff_t pos;
+	int err;
 
-	/* We need to have enough room to store any i915_error_state printf
-	 * so that we can move it to start position.
-	 */
-	ebuf->size = count + 1 > PAGE_SIZE ? count + 1 : PAGE_SIZE;
-	ebuf->buf = kmalloc(ebuf->size,
-				GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN);
+	if (!error || !rem)
+		return 0;
 
-	if (ebuf->buf == NULL) {
-		ebuf->size = PAGE_SIZE;
-		ebuf->buf = kmalloc(ebuf->size, GFP_KERNEL);
-	}
+	err = err_print_to_sgl(error);
+	if (err)
+		return err;
 
-	if (ebuf->buf == NULL) {
-		ebuf->size = 128;
-		ebuf->buf = kmalloc(ebuf->size, GFP_KERNEL);
-	}
+	sg = READ_ONCE(error->fit);
+	if (!sg || off < sg->dma_address)
+		sg = error->sgl;
+	if (!sg)
+		return 0;
 
-	if (ebuf->buf == NULL)
-		return -ENOMEM;
+	pos = sg->dma_address;
+	count = 0;
+	do {
+		size_t len, start;
 
-	ebuf->start = pos;
+		if (sg_is_chain(sg)) {
+			sg = sg_chain_ptr(sg);
+			GEM_BUG_ON(sg_is_chain(sg));
+		}
 
-	return 0;
+		len = sg->length;
+		if (pos + len <= off) {
+			pos += len;
+			continue;
+		}
+
+		start = sg->offset;
+		if (pos < off) {
+			GEM_BUG_ON(off - pos > len);
+			len -= off - pos;
+			start += off - pos;
+			pos = off;
+		}
+
+		len = min(len, rem);
+		GEM_BUG_ON(!len || len > sg->length);
+
+		memcpy(buf, page_address(sg_page(sg)) + start, len);
+
+		count += len;
+		pos += len;
+
+		buf += len;
+		rem -= len;
+		if (!rem) {
+			WRITE_ONCE(error->fit, sg);
+			break;
+		}
+	} while (!sg_is_last(sg++));
+
+	return count;
 }
 
 static void i915_error_object_free(struct drm_i915_error_object *obj)
@@ -944,6 +1020,7 @@ void __i915_gpu_state_free(struct kref *error_ref)
 	cleanup_params(error);
 	cleanup_uc_state(error);
 
+	err_free_sgl(error->sgl);
 	kfree(error);
 }
 
@@ -1494,7 +1571,7 @@ static void gem_record_rings(struct i915_gpu_state *error)
 			if (HAS_BROKEN_CS_TLB(i915))
 				ee->wa_batchbuffer =
 					i915_error_object_create(i915,
-								 engine->scratch);
+								 i915->gt.scratch);
 			request_record_user_bo(request, ee);
 
 			ee->ctx =
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.h b/drivers/gpu/drm/i915/i915_gpu_error.h
index 3ec89a5..ff2652b 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.h
+++ b/drivers/gpu/drm/i915/i915_gpu_error.h
@@ -192,6 +192,8 @@ struct i915_gpu_state {
 	} *active_bo[I915_NUM_ENGINES], *pinned_bo;
 	u32 active_bo_count[I915_NUM_ENGINES], pinned_bo_count;
 	struct i915_address_space *active_vm[I915_NUM_ENGINES];
+
+	struct scatterlist *sgl, *fit;
 };
 
 struct i915_gpu_error {
@@ -298,29 +300,20 @@ struct i915_gpu_error {
 
 struct drm_i915_error_state_buf {
 	struct drm_i915_private *i915;
-	unsigned int bytes;
-	unsigned int size;
+	struct scatterlist *sgl, *cur, *end;
+
+	char *buf;
+	size_t bytes;
+	size_t size;
+	loff_t iter;
+
 	int err;
-	u8 *buf;
-	loff_t start;
-	loff_t pos;
 };
 
 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
 
 __printf(2, 3)
 void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...);
-int i915_error_state_to_str(struct drm_i915_error_state_buf *estr,
-			    const struct i915_gpu_state *gpu);
-int i915_error_state_buf_init(struct drm_i915_error_state_buf *eb,
-			      struct drm_i915_private *i915,
-			      size_t count, loff_t pos);
-
-static inline void
-i915_error_state_buf_release(struct drm_i915_error_state_buf *eb)
-{
-	kfree(eb->buf);
-}
 
 struct i915_gpu_state *i915_capture_gpu_state(struct drm_i915_private *i915);
 void i915_capture_error_state(struct drm_i915_private *dev_priv,
@@ -334,6 +327,9 @@ i915_gpu_state_get(struct i915_gpu_state *gpu)
 	return gpu;
 }
 
+ssize_t i915_gpu_state_copy_to_buffer(struct i915_gpu_state *error,
+				      char *buf, loff_t offset, size_t count);
+
 void __i915_gpu_state_free(struct kref *kref);
 static inline void i915_gpu_state_put(struct i915_gpu_state *gpu)
 {
diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
index 1b81d7c..6350db5 100644
--- a/drivers/gpu/drm/i915/i915_pci.c
+++ b/drivers/gpu/drm/i915/i915_pci.c
@@ -79,8 +79,9 @@
 #define GEN2_FEATURES \
 	GEN(2), \
 	.num_pipes = 1, \
-	.has_overlay = 1, .overlay_needs_physical = 1, \
-	.has_gmch_display = 1, \
+	.display.has_overlay = 1, \
+	.display.overlay_needs_physical = 1, \
+	.display.has_gmch_display = 1, \
 	.hws_needs_physical = 1, \
 	.unfenced_needs_alignment = 1, \
 	.ring_mask = RENDER_RING, \
@@ -93,7 +94,8 @@
 static const struct intel_device_info intel_i830_info = {
 	GEN2_FEATURES,
 	PLATFORM(INTEL_I830),
-	.is_mobile = 1, .cursor_needs_physical = 1,
+	.is_mobile = 1,
+	.display.cursor_needs_physical = 1,
 	.num_pipes = 2, /* legal, last one wins */
 };
 
@@ -107,8 +109,8 @@ static const struct intel_device_info intel_i85x_info = {
 	PLATFORM(INTEL_I85X),
 	.is_mobile = 1,
 	.num_pipes = 2, /* legal, last one wins */
-	.cursor_needs_physical = 1,
-	.has_fbc = 1,
+	.display.cursor_needs_physical = 1,
+	.display.has_fbc = 1,
 };
 
 static const struct intel_device_info intel_i865g_info = {
@@ -119,7 +121,7 @@ static const struct intel_device_info intel_i865g_info = {
 #define GEN3_FEATURES \
 	GEN(3), \
 	.num_pipes = 2, \
-	.has_gmch_display = 1, \
+	.display.has_gmch_display = 1, \
 	.ring_mask = RENDER_RING, \
 	.has_snoop = true, \
 	.has_coherent_ggtt = true, \
@@ -131,8 +133,9 @@ static const struct intel_device_info intel_i915g_info = {
 	GEN3_FEATURES,
 	PLATFORM(INTEL_I915G),
 	.has_coherent_ggtt = false,
-	.cursor_needs_physical = 1,
-	.has_overlay = 1, .overlay_needs_physical = 1,
+	.display.cursor_needs_physical = 1,
+	.display.has_overlay = 1,
+	.display.overlay_needs_physical = 1,
 	.hws_needs_physical = 1,
 	.unfenced_needs_alignment = 1,
 };
@@ -141,10 +144,11 @@ static const struct intel_device_info intel_i915gm_info = {
 	GEN3_FEATURES,
 	PLATFORM(INTEL_I915GM),
 	.is_mobile = 1,
-	.cursor_needs_physical = 1,
-	.has_overlay = 1, .overlay_needs_physical = 1,
-	.supports_tv = 1,
-	.has_fbc = 1,
+	.display.cursor_needs_physical = 1,
+	.display.has_overlay = 1,
+	.display.overlay_needs_physical = 1,
+	.display.supports_tv = 1,
+	.display.has_fbc = 1,
 	.hws_needs_physical = 1,
 	.unfenced_needs_alignment = 1,
 };
@@ -152,8 +156,10 @@ static const struct intel_device_info intel_i915gm_info = {
 static const struct intel_device_info intel_i945g_info = {
 	GEN3_FEATURES,
 	PLATFORM(INTEL_I945G),
-	.has_hotplug = 1, .cursor_needs_physical = 1,
-	.has_overlay = 1, .overlay_needs_physical = 1,
+	.display.has_hotplug = 1,
+	.display.cursor_needs_physical = 1,
+	.display.has_overlay = 1,
+	.display.overlay_needs_physical = 1,
 	.hws_needs_physical = 1,
 	.unfenced_needs_alignment = 1,
 };
@@ -162,10 +168,12 @@ static const struct intel_device_info intel_i945gm_info = {
 	GEN3_FEATURES,
 	PLATFORM(INTEL_I945GM),
 	.is_mobile = 1,
-	.has_hotplug = 1, .cursor_needs_physical = 1,
-	.has_overlay = 1, .overlay_needs_physical = 1,
-	.supports_tv = 1,
-	.has_fbc = 1,
+	.display.has_hotplug = 1,
+	.display.cursor_needs_physical = 1,
+	.display.has_overlay = 1,
+	.display.overlay_needs_physical = 1,
+	.display.supports_tv = 1,
+	.display.has_fbc = 1,
 	.hws_needs_physical = 1,
 	.unfenced_needs_alignment = 1,
 };
@@ -173,23 +181,23 @@ static const struct intel_device_info intel_i945gm_info = {
 static const struct intel_device_info intel_g33_info = {
 	GEN3_FEATURES,
 	PLATFORM(INTEL_G33),
-	.has_hotplug = 1,
-	.has_overlay = 1,
+	.display.has_hotplug = 1,
+	.display.has_overlay = 1,
 };
 
 static const struct intel_device_info intel_pineview_info = {
 	GEN3_FEATURES,
 	PLATFORM(INTEL_PINEVIEW),
 	.is_mobile = 1,
-	.has_hotplug = 1,
-	.has_overlay = 1,
+	.display.has_hotplug = 1,
+	.display.has_overlay = 1,
 };
 
 #define GEN4_FEATURES \
 	GEN(4), \
 	.num_pipes = 2, \
-	.has_hotplug = 1, \
-	.has_gmch_display = 1, \
+	.display.has_hotplug = 1, \
+	.display.has_gmch_display = 1, \
 	.ring_mask = RENDER_RING, \
 	.has_snoop = true, \
 	.has_coherent_ggtt = true, \
@@ -200,7 +208,7 @@ static const struct intel_device_info intel_pineview_info = {
 static const struct intel_device_info intel_i965g_info = {
 	GEN4_FEATURES,
 	PLATFORM(INTEL_I965G),
-	.has_overlay = 1,
+	.display.has_overlay = 1,
 	.hws_needs_physical = 1,
 	.has_snoop = false,
 };
@@ -208,9 +216,10 @@ static const struct intel_device_info intel_i965g_info = {
 static const struct intel_device_info intel_i965gm_info = {
 	GEN4_FEATURES,
 	PLATFORM(INTEL_I965GM),
-	.is_mobile = 1, .has_fbc = 1,
-	.has_overlay = 1,
-	.supports_tv = 1,
+	.is_mobile = 1,
+	.display.has_fbc = 1,
+	.display.has_overlay = 1,
+	.display.supports_tv = 1,
 	.hws_needs_physical = 1,
 	.has_snoop = false,
 };
@@ -224,15 +233,16 @@ static const struct intel_device_info intel_g45_info = {
 static const struct intel_device_info intel_gm45_info = {
 	GEN4_FEATURES,
 	PLATFORM(INTEL_GM45),
-	.is_mobile = 1, .has_fbc = 1,
-	.supports_tv = 1,
+	.is_mobile = 1,
+	.display.has_fbc = 1,
+	.display.supports_tv = 1,
 	.ring_mask = RENDER_RING | BSD_RING,
 };
 
 #define GEN5_FEATURES \
 	GEN(5), \
 	.num_pipes = 2, \
-	.has_hotplug = 1, \
+	.display.has_hotplug = 1, \
 	.ring_mask = RENDER_RING | BSD_RING, \
 	.has_snoop = true, \
 	.has_coherent_ggtt = true, \
@@ -250,14 +260,15 @@ static const struct intel_device_info intel_ironlake_d_info = {
 static const struct intel_device_info intel_ironlake_m_info = {
 	GEN5_FEATURES,
 	PLATFORM(INTEL_IRONLAKE),
-	.is_mobile = 1, .has_fbc = 1,
+	.is_mobile = 1,
+	.display.has_fbc = 1,
 };
 
 #define GEN6_FEATURES \
 	GEN(6), \
 	.num_pipes = 2, \
-	.has_hotplug = 1, \
-	.has_fbc = 1, \
+	.display.has_hotplug = 1, \
+	.display.has_fbc = 1, \
 	.ring_mask = RENDER_RING | BSD_RING | BLT_RING, \
 	.has_coherent_ggtt = true, \
 	.has_llc = 1, \
@@ -301,8 +312,8 @@ static const struct intel_device_info intel_sandybridge_m_gt2_info = {
 #define GEN7_FEATURES  \
 	GEN(7), \
 	.num_pipes = 3, \
-	.has_hotplug = 1, \
-	.has_fbc = 1, \
+	.display.has_hotplug = 1, \
+	.display.has_fbc = 1, \
 	.ring_mask = RENDER_RING | BSD_RING | BLT_RING, \
 	.has_coherent_ggtt = true, \
 	.has_llc = 1, \
@@ -359,8 +370,8 @@ static const struct intel_device_info intel_valleyview_info = {
 	.num_pipes = 2,
 	.has_runtime_pm = 1,
 	.has_rc6 = 1,
-	.has_gmch_display = 1,
-	.has_hotplug = 1,
+	.display.has_gmch_display = 1,
+	.display.has_hotplug = 1,
 	.ppgtt = INTEL_PPGTT_FULL,
 	.has_snoop = true,
 	.has_coherent_ggtt = false,
@@ -374,10 +385,10 @@ static const struct intel_device_info intel_valleyview_info = {
 #define G75_FEATURES  \
 	GEN7_FEATURES, \
 	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, \
-	.has_ddi = 1, \
+	.display.has_ddi = 1, \
 	.has_fpga_dbg = 1, \
-	.has_psr = 1, \
-	.has_dp_mst = 1, \
+	.display.has_psr = 1, \
+	.display.has_dp_mst = 1, \
 	.has_rc6p = 0 /* RC6p removed-by HSW */, \
 	.has_runtime_pm = 1
 
@@ -444,14 +455,14 @@ static const struct intel_device_info intel_cherryview_info = {
 	PLATFORM(INTEL_CHERRYVIEW),
 	GEN(8),
 	.num_pipes = 3,
-	.has_hotplug = 1,
+	.display.has_hotplug = 1,
 	.is_lp = 1,
 	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
 	.has_64bit_reloc = 1,
 	.has_runtime_pm = 1,
 	.has_rc6 = 1,
 	.has_logical_ring_contexts = 1,
-	.has_gmch_display = 1,
+	.display.has_gmch_display = 1,
 	.ppgtt = INTEL_PPGTT_FULL,
 	.has_reset_engine = 1,
 	.has_snoop = true,
@@ -473,15 +484,15 @@ static const struct intel_device_info intel_cherryview_info = {
 	GEN(9), \
 	GEN9_DEFAULT_PAGE_SIZES, \
 	.has_logical_ring_preemption = 1, \
-	.has_csr = 1, \
+	.display.has_csr = 1, \
 	.has_guc = 1, \
-	.has_ipc = 1, \
+	.display.has_ipc = 1, \
 	.ddb_size = 896
 
 #define SKL_PLATFORM \
 	GEN9_FEATURES, \
 	/* Display WA #0477 WaDisableIPC: skl */ \
-	.has_ipc = 0, \
+	.display.has_ipc = 0, \
 	PLATFORM(INTEL_SKYLAKE)
 
 static const struct intel_device_info intel_skylake_gt1_info = {
@@ -512,19 +523,19 @@ static const struct intel_device_info intel_skylake_gt4_info = {
 #define GEN9_LP_FEATURES \
 	GEN(9), \
 	.is_lp = 1, \
-	.has_hotplug = 1, \
+	.display.has_hotplug = 1, \
 	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, \
 	.num_pipes = 3, \
 	.has_64bit_reloc = 1, \
-	.has_ddi = 1, \
+	.display.has_ddi = 1, \
 	.has_fpga_dbg = 1, \
-	.has_fbc = 1, \
-	.has_psr = 1, \
+	.display.has_fbc = 1, \
+	.display.has_psr = 1, \
 	.has_runtime_pm = 1, \
 	.has_pooled_eu = 0, \
-	.has_csr = 1, \
+	.display.has_csr = 1, \
 	.has_rc6 = 1, \
-	.has_dp_mst = 1, \
+	.display.has_dp_mst = 1, \
 	.has_logical_ring_contexts = 1, \
 	.has_logical_ring_preemption = 1, \
 	.has_guc = 1, \
@@ -532,7 +543,7 @@ static const struct intel_device_info intel_skylake_gt4_info = {
 	.has_reset_engine = 1, \
 	.has_snoop = true, \
 	.has_coherent_ggtt = false, \
-	.has_ipc = 1, \
+	.display.has_ipc = 1, \
 	GEN9_DEFAULT_PAGE_SIZES, \
 	GEN_DEFAULT_PIPEOFFSETS, \
 	IVB_CURSOR_OFFSETS, \
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 47baf2fe..0a7d605 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -4570,6 +4570,7 @@ enum {
  * of the infoframe structure specified by CEA-861. */
 #define   VIDEO_DIP_DATA_SIZE	32
 #define   VIDEO_DIP_VSC_DATA_SIZE	36
+#define   VIDEO_DIP_PPS_DATA_SIZE	132
 #define VIDEO_DIP_CTL		_MMIO(0x61170)
 /* Pre HSW: */
 #define   VIDEO_DIP_ENABLE		(1 << 31)
@@ -4617,6 +4618,17 @@ enum {
 #define _PP_STATUS			0x61200
 #define PP_STATUS(pps_idx)		_MMIO_PPS(pps_idx, _PP_STATUS)
 #define   PP_ON				(1 << 31)
+
+#define _PP_CONTROL_1			0xc7204
+#define _PP_CONTROL_2			0xc7304
+#define ICP_PP_CONTROL(x)		_MMIO(((x) == 1) ? _PP_CONTROL_1 : \
+					      _PP_CONTROL_2)
+#define  POWER_CYCLE_DELAY_MASK	(0x1f << 4)
+#define  POWER_CYCLE_DELAY_SHIFT	4
+#define  VDD_OVERRIDE_FORCE		(1 << 3)
+#define  BACKLIGHT_ENABLE		(1 << 2)
+#define  PWR_DOWN_ON_RESET		(1 << 1)
+#define  PWR_STATE_TARGET		(1 << 0)
 /*
  * Indicates that all dependencies of the panel are on:
  *
@@ -7750,6 +7762,7 @@ enum {
 #define   ICP_DDIB_HPD_LONG_DETECT		(2 << 4)
 #define   ICP_DDIB_HPD_SHORT_LONG_DETECT	(3 << 4)
 #define   ICP_DDIA_HPD_ENABLE			(1 << 3)
+#define   ICP_DDIA_HPD_OP_DRIVE_1		(1 << 2)
 #define   ICP_DDIA_HPD_STATUS_MASK		(3 << 0)
 #define   ICP_DDIA_HPD_NO_DETECT		(0 << 0)
 #define   ICP_DDIA_HPD_SHORT_DETECT		(1 << 0)
@@ -9197,6 +9210,7 @@ enum skl_power_gate {
 #define _DP_TP_CTL_B			0x64140
 #define DP_TP_CTL(port) _MMIO_PORT(port, _DP_TP_CTL_A, _DP_TP_CTL_B)
 #define  DP_TP_CTL_ENABLE			(1 << 31)
+#define  DP_TP_CTL_FEC_ENABLE			(1 << 30)
 #define  DP_TP_CTL_MODE_SST			(0 << 27)
 #define  DP_TP_CTL_MODE_MST			(1 << 27)
 #define  DP_TP_CTL_FORCE_ACT			(1 << 25)
@@ -9215,6 +9229,7 @@ enum skl_power_gate {
 #define _DP_TP_STATUS_A			0x64044
 #define _DP_TP_STATUS_B			0x64144
 #define DP_TP_STATUS(port) _MMIO_PORT(port, _DP_TP_STATUS_A, _DP_TP_STATUS_B)
+#define  DP_TP_STATUS_FEC_ENABLE_LIVE		(1 << 28)
 #define  DP_TP_STATUS_IDLE_DONE			(1 << 25)
 #define  DP_TP_STATUS_ACT_SENT			(1 << 24)
 #define  DP_TP_STATUS_MODE_STATUS_MST		(1 << 23)
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index 7110754..ca95ab2 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -136,6 +136,9 @@ static int reset_all_global_seqno(struct drm_i915_private *i915, u32 seqno)
 			  intel_engine_get_seqno(engine),
 			  seqno);
 
+		if (seqno == engine->timeline.seqno)
+			continue;
+
 		kthread_park(engine->breadcrumbs.signaler);
 
 		if (!i915_seqno_passed(seqno, engine->timeline.seqno)) {
diff --git a/drivers/gpu/drm/i915/i915_sw_fence.c b/drivers/gpu/drm/i915/i915_sw_fence.c
index 6dbeed0..fc2eeab 100644
--- a/drivers/gpu/drm/i915/i915_sw_fence.c
+++ b/drivers/gpu/drm/i915/i915_sw_fence.c
@@ -1,10 +1,7 @@
 /*
- * (C) Copyright 2016 Intel Corporation
+ * SPDX-License-Identifier: MIT
  *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; version 2
- * of the License.
+ * (C) Copyright 2016 Intel Corporation
  */
 
 #include <linux/slab.h>
diff --git a/drivers/gpu/drm/i915/i915_sw_fence.h b/drivers/gpu/drm/i915/i915_sw_fence.h
index fe2ef4d..0e055ea 100644
--- a/drivers/gpu/drm/i915/i915_sw_fence.h
+++ b/drivers/gpu/drm/i915/i915_sw_fence.h
@@ -1,10 +1,9 @@
 /*
+ * SPDX-License-Identifier: MIT
+ *
  * i915_sw_fence.h - library routines for N:M synchronisation points
  *
  * Copyright (C) 2016 Intel Corporation
- *
- * This file is released under the GPLv2.
- *
  */
 
 #ifndef _I915_SW_FENCE_H_
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index e5e6f6b..535caeb 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -483,7 +483,7 @@ static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr
 	return snprintf(buf, PAGE_SIZE, "%d\n", val);
 }
 
-static const struct attribute *gen6_attrs[] = {
+static const struct attribute * const gen6_attrs[] = {
 	&dev_attr_gt_act_freq_mhz.attr,
 	&dev_attr_gt_cur_freq_mhz.attr,
 	&dev_attr_gt_boost_freq_mhz.attr,
@@ -495,7 +495,7 @@ static const struct attribute *gen6_attrs[] = {
 	NULL,
 };
 
-static const struct attribute *vlv_attrs[] = {
+static const struct attribute * const vlv_attrs[] = {
 	&dev_attr_gt_act_freq_mhz.attr,
 	&dev_attr_gt_cur_freq_mhz.attr,
 	&dev_attr_gt_boost_freq_mhz.attr,
@@ -516,26 +516,21 @@ static ssize_t error_state_read(struct file *filp, struct kobject *kobj,
 {
 
 	struct device *kdev = kobj_to_dev(kobj);
-	struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
-	struct drm_i915_error_state_buf error_str;
+	struct drm_i915_private *i915 = kdev_minor_to_i915(kdev);
 	struct i915_gpu_state *gpu;
 	ssize_t ret;
 
-	ret = i915_error_state_buf_init(&error_str, dev_priv, count, off);
-	if (ret)
-		return ret;
+	gpu = i915_first_error_state(i915);
+	if (gpu) {
+		ret = i915_gpu_state_copy_to_buffer(gpu, buf, off, count);
+		i915_gpu_state_put(gpu);
+	} else {
+		const char *str = "No error state collected\n";
+		size_t len = strlen(str);
 
-	gpu = i915_first_error_state(dev_priv);
-	ret = i915_error_state_to_str(&error_str, gpu);
-	if (ret)
-		goto out;
-
-	ret = count < error_str.bytes ? count : error_str.bytes;
-	memcpy(buf, error_str.buf, ret);
-
-out:
-	i915_gpu_state_put(gpu);
-	i915_error_state_buf_release(&error_str);
+		ret = min_t(size_t, count, len - off);
+		memcpy(buf, str + off, ret);
+	}
 
 	return ret;
 }
diff --git a/drivers/gpu/drm/i915/i915_utils.h b/drivers/gpu/drm/i915/i915_utils.h
index 5858a43e..9726df3 100644
--- a/drivers/gpu/drm/i915/i915_utils.h
+++ b/drivers/gpu/drm/i915/i915_utils.h
@@ -44,16 +44,19 @@
 			     __stringify(x), (long)(x))
 
 #if defined(GCC_VERSION) && GCC_VERSION >= 70000
-#define add_overflows(A, B) \
-	__builtin_add_overflow_p((A), (B), (typeof((A) + (B)))0)
+#define add_overflows_t(T, A, B) \
+	__builtin_add_overflow_p((A), (B), (T)0)
 #else
-#define add_overflows(A, B) ({ \
+#define add_overflows_t(T, A, B) ({ \
 	typeof(A) a = (A); \
 	typeof(B) b = (B); \
-	a + b < a; \
+	(T)(a + b) < a; \
 })
 #endif
 
+#define add_overflows(A, B) \
+	add_overflows_t(typeof((A) + (B)), (A), (B))
+
 #define range_overflows(start, size, max) ({ \
 	typeof(start) start__ = (start); \
 	typeof(size) size__ = (size); \
diff --git a/drivers/gpu/drm/i915/icl_dsi.c b/drivers/gpu/drm/i915/icl_dsi.c
index 01f422d..4dd793b 100644
--- a/drivers/gpu/drm/i915/icl_dsi.c
+++ b/drivers/gpu/drm/i915/icl_dsi.c
@@ -26,6 +26,7 @@
  */
 
 #include <drm/drm_mipi_dsi.h>
+#include <drm/drm_atomic_helper.h>
 #include "intel_dsi.h"
 
 static inline int header_credits_available(struct drm_i915_private *dev_priv,
@@ -107,6 +108,90 @@ static void wait_for_cmds_dispatched_to_panel(struct intel_encoder *encoder)
 	}
 }
 
+static bool add_payld_to_queue(struct intel_dsi_host *host, const u8 *data,
+			       u32 len)
+{
+	struct intel_dsi *intel_dsi = host->intel_dsi;
+	struct drm_i915_private *dev_priv = to_i915(intel_dsi->base.base.dev);
+	enum transcoder dsi_trans = dsi_port_to_transcoder(host->port);
+	int free_credits;
+	int i, j;
+
+	for (i = 0; i < len; i += 4) {
+		u32 tmp = 0;
+
+		free_credits = payload_credits_available(dev_priv, dsi_trans);
+		if (free_credits < 1) {
+			DRM_ERROR("Payload credit not available\n");
+			return false;
+		}
+
+		for (j = 0; j < min_t(u32, len - i, 4); j++)
+			tmp |= *data++ << 8 * j;
+
+		I915_WRITE(DSI_CMD_TXPYLD(dsi_trans), tmp);
+	}
+
+	return true;
+}
+
+static int dsi_send_pkt_hdr(struct intel_dsi_host *host,
+			    struct mipi_dsi_packet pkt, bool enable_lpdt)
+{
+	struct intel_dsi *intel_dsi = host->intel_dsi;
+	struct drm_i915_private *dev_priv = to_i915(intel_dsi->base.base.dev);
+	enum transcoder dsi_trans = dsi_port_to_transcoder(host->port);
+	u32 tmp;
+	int free_credits;
+
+	/* check if header credit available */
+	free_credits = header_credits_available(dev_priv, dsi_trans);
+	if (free_credits < 1) {
+		DRM_ERROR("send pkt header failed, not enough hdr credits\n");
+		return -1;
+	}
+
+	tmp = I915_READ(DSI_CMD_TXHDR(dsi_trans));
+
+	if (pkt.payload)
+		tmp |= PAYLOAD_PRESENT;
+	else
+		tmp &= ~PAYLOAD_PRESENT;
+
+	tmp &= ~VBLANK_FENCE;
+
+	if (enable_lpdt)
+		tmp |= LP_DATA_TRANSFER;
+
+	tmp &= ~(PARAM_WC_MASK | VC_MASK | DT_MASK);
+	tmp |= ((pkt.header[0] & VC_MASK) << VC_SHIFT);
+	tmp |= ((pkt.header[0] & DT_MASK) << DT_SHIFT);
+	tmp |= (pkt.header[1] << PARAM_WC_LOWER_SHIFT);
+	tmp |= (pkt.header[2] << PARAM_WC_UPPER_SHIFT);
+	I915_WRITE(DSI_CMD_TXHDR(dsi_trans), tmp);
+
+	return 0;
+}
+
+static int dsi_send_pkt_payld(struct intel_dsi_host *host,
+			      struct mipi_dsi_packet pkt)
+{
+	/* payload queue can accept *256 bytes*, check limit */
+	if (pkt.payload_length > MAX_PLOAD_CREDIT * 4) {
+		DRM_ERROR("payload size exceeds max queue limit\n");
+		return -1;
+	}
+
+	/* load data into command payload queue */
+	if (!add_payld_to_queue(host, pkt.payload,
+				pkt.payload_length)) {
+		DRM_ERROR("adding payload to queue failed\n");
+		return -1;
+	}
+
+	return 0;
+}
+
 static void dsi_program_swing_and_deemphasis(struct intel_encoder *encoder)
 {
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
@@ -172,6 +257,45 @@ static void dsi_program_swing_and_deemphasis(struct intel_encoder *encoder)
 	}
 }
 
+static void configure_dual_link_mode(struct intel_encoder *encoder,
+				     const struct intel_crtc_state *pipe_config)
+{
+	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+	u32 dss_ctl1;
+
+	dss_ctl1 = I915_READ(DSS_CTL1);
+	dss_ctl1 |= SPLITTER_ENABLE;
+	dss_ctl1 &= ~OVERLAP_PIXELS_MASK;
+	dss_ctl1 |= OVERLAP_PIXELS(intel_dsi->pixel_overlap);
+
+	if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK) {
+		const struct drm_display_mode *adjusted_mode =
+					&pipe_config->base.adjusted_mode;
+		u32 dss_ctl2;
+		u16 hactive = adjusted_mode->crtc_hdisplay;
+		u16 dl_buffer_depth;
+
+		dss_ctl1 &= ~DUAL_LINK_MODE_INTERLEAVE;
+		dl_buffer_depth = hactive / 2 + intel_dsi->pixel_overlap;
+
+		if (dl_buffer_depth > MAX_DL_BUFFER_TARGET_DEPTH)
+			DRM_ERROR("DL buffer depth exceed max value\n");
+
+		dss_ctl1 &= ~LEFT_DL_BUF_TARGET_DEPTH_MASK;
+		dss_ctl1 |= LEFT_DL_BUF_TARGET_DEPTH(dl_buffer_depth);
+		dss_ctl2 = I915_READ(DSS_CTL2);
+		dss_ctl2 &= ~RIGHT_DL_BUF_TARGET_DEPTH_MASK;
+		dss_ctl2 |= RIGHT_DL_BUF_TARGET_DEPTH(dl_buffer_depth);
+		I915_WRITE(DSS_CTL2, dss_ctl2);
+	} else {
+		/* Interleave */
+		dss_ctl1 |= DUAL_LINK_MODE_INTERLEAVE;
+	}
+
+	I915_WRITE(DSS_CTL1, dss_ctl1);
+}
+
 static void gen11_dsi_program_esc_clk_div(struct intel_encoder *encoder)
 {
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
@@ -412,6 +536,62 @@ static void gen11_dsi_setup_dphy_timings(struct intel_encoder *encoder)
 	}
 }
 
+static void gen11_dsi_gate_clocks(struct intel_encoder *encoder)
+{
+	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+	u32 tmp;
+	enum port port;
+
+	mutex_lock(&dev_priv->dpll_lock);
+	tmp = I915_READ(DPCLKA_CFGCR0_ICL);
+	for_each_dsi_port(port, intel_dsi->ports) {
+		tmp |= DPCLKA_CFGCR0_DDI_CLK_OFF(port);
+	}
+
+	I915_WRITE(DPCLKA_CFGCR0_ICL, tmp);
+	mutex_unlock(&dev_priv->dpll_lock);
+}
+
+static void gen11_dsi_ungate_clocks(struct intel_encoder *encoder)
+{
+	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+	u32 tmp;
+	enum port port;
+
+	mutex_lock(&dev_priv->dpll_lock);
+	tmp = I915_READ(DPCLKA_CFGCR0_ICL);
+	for_each_dsi_port(port, intel_dsi->ports) {
+		tmp &= ~DPCLKA_CFGCR0_DDI_CLK_OFF(port);
+	}
+
+	I915_WRITE(DPCLKA_CFGCR0_ICL, tmp);
+	mutex_unlock(&dev_priv->dpll_lock);
+}
+
+static void gen11_dsi_map_pll(struct intel_encoder *encoder,
+			      const struct intel_crtc_state *crtc_state)
+{
+	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+	struct intel_shared_dpll *pll = crtc_state->shared_dpll;
+	enum port port;
+	u32 val;
+
+	mutex_lock(&dev_priv->dpll_lock);
+
+	val = I915_READ(DPCLKA_CFGCR0_ICL);
+	for_each_dsi_port(port, intel_dsi->ports) {
+		val &= ~DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port);
+		val |= DPCLKA_CFGCR0_DDI_CLK_SEL(pll->info->id, port);
+	}
+	I915_WRITE(DPCLKA_CFGCR0_ICL, val);
+	POSTING_READ(DPCLKA_CFGCR0_ICL);
+
+	mutex_unlock(&dev_priv->dpll_lock);
+}
+
 static void
 gen11_dsi_configure_transcoder(struct intel_encoder *encoder,
 			       const struct intel_crtc_state *pipe_config)
@@ -506,7 +686,8 @@ gen11_dsi_configure_transcoder(struct intel_encoder *encoder,
 			I915_WRITE(TRANS_DDI_FUNC_CTL2(dsi_trans), tmp);
 		}
 
-		//TODO: configure DSS_CTL1
+		/* configure stream splitting */
+		configure_dual_link_mode(encoder, pipe_config);
 	}
 
 	for_each_dsi_port(port, intel_dsi->ports) {
@@ -758,6 +939,9 @@ gen11_dsi_enable_port_and_phy(struct intel_encoder *encoder,
 
 	/* Step (4h, 4i, 4j, 4k): Configure transcoder */
 	gen11_dsi_configure_transcoder(encoder, pipe_config);
+
+	/* Step 4l: Gate DDI clocks */
+	gen11_dsi_gate_clocks(encoder);
 }
 
 static void gen11_dsi_powerup_panel(struct intel_encoder *encoder)
@@ -799,18 +983,25 @@ static void gen11_dsi_powerup_panel(struct intel_encoder *encoder)
 	wait_for_cmds_dispatched_to_panel(encoder);
 }
 
-static void __attribute__((unused))
-gen11_dsi_pre_enable(struct intel_encoder *encoder,
-		     const struct intel_crtc_state *pipe_config,
-		     const struct drm_connector_state *conn_state)
+static void gen11_dsi_pre_pll_enable(struct intel_encoder *encoder,
+				     const struct intel_crtc_state *pipe_config,
+				     const struct drm_connector_state *conn_state)
 {
-	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
-
 	/* step2: enable IO power */
 	gen11_dsi_enable_io_power(encoder);
 
 	/* step3: enable DSI PLL */
 	gen11_dsi_program_esc_clk_div(encoder);
+}
+
+static void gen11_dsi_pre_enable(struct intel_encoder *encoder,
+				 const struct intel_crtc_state *pipe_config,
+				 const struct drm_connector_state *conn_state)
+{
+	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+
+	/* step3b */
+	gen11_dsi_map_pll(encoder, pipe_config);
 
 	/* step4: enable DSI port and DPHY */
 	gen11_dsi_enable_port_and_phy(encoder, pipe_config);
@@ -912,6 +1103,7 @@ static void gen11_dsi_disable_port(struct intel_encoder *encoder)
 	u32 tmp;
 	enum port port;
 
+	gen11_dsi_ungate_clocks(encoder);
 	for_each_dsi_port(port, intel_dsi->ports) {
 		tmp = I915_READ(DDI_BUF_CTL(port));
 		tmp &= ~DDI_BUF_CTL_ENABLE;
@@ -923,6 +1115,7 @@ static void gen11_dsi_disable_port(struct intel_encoder *encoder)
 			DRM_ERROR("DDI port:%c buffer not idle\n",
 				  port_name(port));
 	}
+	gen11_dsi_ungate_clocks(encoder);
 }
 
 static void gen11_dsi_disable_io_power(struct intel_encoder *encoder)
@@ -945,10 +1138,9 @@ static void gen11_dsi_disable_io_power(struct intel_encoder *encoder)
 	}
 }
 
-static void __attribute__((unused)) gen11_dsi_disable(
-			struct intel_encoder *encoder,
-			const struct intel_crtc_state *old_crtc_state,
-			const struct drm_connector_state *old_conn_state)
+static void gen11_dsi_disable(struct intel_encoder *encoder,
+			      const struct intel_crtc_state *old_crtc_state,
+			      const struct drm_connector_state *old_conn_state)
 {
 	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
 
@@ -972,10 +1164,289 @@ static void __attribute__((unused)) gen11_dsi_disable(
 	gen11_dsi_disable_io_power(encoder);
 }
 
+static void gen11_dsi_get_config(struct intel_encoder *encoder,
+				 struct intel_crtc_state *pipe_config)
+{
+	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+	u32 pll_id;
+
+	/* FIXME: adapt icl_ddi_clock_get() for DSI and use that? */
+	pll_id = intel_get_shared_dpll_id(dev_priv, pipe_config->shared_dpll);
+	pipe_config->port_clock = cnl_calc_wrpll_link(dev_priv, pll_id);
+	pipe_config->base.adjusted_mode.crtc_clock = intel_dsi->pclk;
+	pipe_config->output_types |= BIT(INTEL_OUTPUT_DSI);
+}
+
+static bool gen11_dsi_compute_config(struct intel_encoder *encoder,
+				     struct intel_crtc_state *pipe_config,
+				     struct drm_connector_state *conn_state)
+{
+	struct intel_dsi *intel_dsi = container_of(encoder, struct intel_dsi,
+						   base);
+	struct intel_connector *intel_connector = intel_dsi->attached_connector;
+	struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
+	const struct drm_display_mode *fixed_mode =
+					intel_connector->panel.fixed_mode;
+	struct drm_display_mode *adjusted_mode =
+					&pipe_config->base.adjusted_mode;
+
+	intel_fixed_panel_mode(fixed_mode, adjusted_mode);
+	intel_pch_panel_fitting(crtc, pipe_config, conn_state->scaling_mode);
+
+	adjusted_mode->flags = 0;
+
+	/* Dual link goes to trancoder DSI'0' */
+	if (intel_dsi->ports == BIT(PORT_B))
+		pipe_config->cpu_transcoder = TRANSCODER_DSI_1;
+	else
+		pipe_config->cpu_transcoder = TRANSCODER_DSI_0;
+
+	pipe_config->clock_set = true;
+	pipe_config->port_clock = intel_dsi_bitrate(intel_dsi) / 5;
+
+	return true;
+}
+
+static u64 gen11_dsi_get_power_domains(struct intel_encoder *encoder,
+				       struct intel_crtc_state *crtc_state)
+{
+	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+	u64 domains = 0;
+	enum port port;
+
+	for_each_dsi_port(port, intel_dsi->ports)
+		if (port == PORT_A)
+			domains |= BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO);
+		else
+			domains |= BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO);
+
+	return domains;
+}
+
+static bool gen11_dsi_get_hw_state(struct intel_encoder *encoder,
+				   enum pipe *pipe)
+{
+	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+	u32 tmp;
+	enum port port;
+	enum transcoder dsi_trans;
+	bool ret = false;
+
+	if (!intel_display_power_get_if_enabled(dev_priv,
+						encoder->power_domain))
+		return false;
+
+	for_each_dsi_port(port, intel_dsi->ports) {
+		dsi_trans = dsi_port_to_transcoder(port);
+		tmp = I915_READ(TRANS_DDI_FUNC_CTL(dsi_trans));
+		switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
+		case TRANS_DDI_EDP_INPUT_A_ON:
+			*pipe = PIPE_A;
+			break;
+		case TRANS_DDI_EDP_INPUT_B_ONOFF:
+			*pipe = PIPE_B;
+			break;
+		case TRANS_DDI_EDP_INPUT_C_ONOFF:
+			*pipe = PIPE_C;
+			break;
+		default:
+			DRM_ERROR("Invalid PIPE input\n");
+			goto out;
+		}
+
+		tmp = I915_READ(PIPECONF(dsi_trans));
+		ret = tmp & PIPECONF_ENABLE;
+	}
+out:
+	intel_display_power_put(dev_priv, encoder->power_domain);
+	return ret;
+}
+
+static void gen11_dsi_encoder_destroy(struct drm_encoder *encoder)
+{
+	intel_encoder_destroy(encoder);
+}
+
+static const struct drm_encoder_funcs gen11_dsi_encoder_funcs = {
+	.destroy = gen11_dsi_encoder_destroy,
+};
+
+static const struct drm_connector_funcs gen11_dsi_connector_funcs = {
+	.late_register = intel_connector_register,
+	.early_unregister = intel_connector_unregister,
+	.destroy = intel_connector_destroy,
+	.fill_modes = drm_helper_probe_single_connector_modes,
+	.atomic_get_property = intel_digital_connector_atomic_get_property,
+	.atomic_set_property = intel_digital_connector_atomic_set_property,
+	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+	.atomic_duplicate_state = intel_digital_connector_duplicate_state,
+};
+
+static const struct drm_connector_helper_funcs gen11_dsi_connector_helper_funcs = {
+	.get_modes = intel_dsi_get_modes,
+	.mode_valid = intel_dsi_mode_valid,
+	.atomic_check = intel_digital_connector_atomic_check,
+};
+
+static int gen11_dsi_host_attach(struct mipi_dsi_host *host,
+				 struct mipi_dsi_device *dsi)
+{
+	return 0;
+}
+
+static int gen11_dsi_host_detach(struct mipi_dsi_host *host,
+				 struct mipi_dsi_device *dsi)
+{
+	return 0;
+}
+
+static ssize_t gen11_dsi_host_transfer(struct mipi_dsi_host *host,
+				       const struct mipi_dsi_msg *msg)
+{
+	struct intel_dsi_host *intel_dsi_host = to_intel_dsi_host(host);
+	struct mipi_dsi_packet dsi_pkt;
+	ssize_t ret;
+	bool enable_lpdt = false;
+
+	ret = mipi_dsi_create_packet(&dsi_pkt, msg);
+	if (ret < 0)
+		return ret;
+
+	if (msg->flags & MIPI_DSI_MSG_USE_LPM)
+		enable_lpdt = true;
+
+	/* send packet header */
+	ret  = dsi_send_pkt_hdr(intel_dsi_host, dsi_pkt, enable_lpdt);
+	if (ret < 0)
+		return ret;
+
+	/* only long packet contains payload */
+	if (mipi_dsi_packet_format_is_long(msg->type)) {
+		ret = dsi_send_pkt_payld(intel_dsi_host, dsi_pkt);
+		if (ret < 0)
+			return ret;
+	}
+
+	//TODO: add payload receive code if needed
+
+	ret = sizeof(dsi_pkt.header) + dsi_pkt.payload_length;
+
+	return ret;
+}
+
+static const struct mipi_dsi_host_ops gen11_dsi_host_ops = {
+	.attach = gen11_dsi_host_attach,
+	.detach = gen11_dsi_host_detach,
+	.transfer = gen11_dsi_host_transfer,
+};
+
 void icl_dsi_init(struct drm_i915_private *dev_priv)
 {
+	struct drm_device *dev = &dev_priv->drm;
+	struct intel_dsi *intel_dsi;
+	struct intel_encoder *encoder;
+	struct intel_connector *intel_connector;
+	struct drm_connector *connector;
+	struct drm_display_mode *scan, *fixed_mode = NULL;
 	enum port port;
 
 	if (!intel_bios_is_dsi_present(dev_priv, &port))
 		return;
+
+	intel_dsi = kzalloc(sizeof(*intel_dsi), GFP_KERNEL);
+	if (!intel_dsi)
+		return;
+
+	intel_connector = intel_connector_alloc();
+	if (!intel_connector) {
+		kfree(intel_dsi);
+		return;
+	}
+
+	encoder = &intel_dsi->base;
+	intel_dsi->attached_connector = intel_connector;
+	connector = &intel_connector->base;
+
+	/* register DSI encoder with DRM subsystem */
+	drm_encoder_init(dev, &encoder->base, &gen11_dsi_encoder_funcs,
+			 DRM_MODE_ENCODER_DSI, "DSI %c", port_name(port));
+
+	encoder->pre_pll_enable = gen11_dsi_pre_pll_enable;
+	encoder->pre_enable = gen11_dsi_pre_enable;
+	encoder->disable = gen11_dsi_disable;
+	encoder->port = port;
+	encoder->get_config = gen11_dsi_get_config;
+	encoder->compute_config = gen11_dsi_compute_config;
+	encoder->get_hw_state = gen11_dsi_get_hw_state;
+	encoder->type = INTEL_OUTPUT_DSI;
+	encoder->cloneable = 0;
+	encoder->crtc_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C);
+	encoder->power_domain = POWER_DOMAIN_PORT_DSI;
+	encoder->get_power_domains = gen11_dsi_get_power_domains;
+
+	/* register DSI connector with DRM subsystem */
+	drm_connector_init(dev, connector, &gen11_dsi_connector_funcs,
+			   DRM_MODE_CONNECTOR_DSI);
+	drm_connector_helper_add(connector, &gen11_dsi_connector_helper_funcs);
+	connector->display_info.subpixel_order = SubPixelHorizontalRGB;
+	connector->interlace_allowed = false;
+	connector->doublescan_allowed = false;
+	intel_connector->get_hw_state = intel_connector_get_hw_state;
+
+	/* attach connector to encoder */
+	intel_connector_attach_encoder(intel_connector, encoder);
+
+	/* fill mode info from VBT */
+	mutex_lock(&dev->mode_config.mutex);
+	intel_dsi_vbt_get_modes(intel_dsi);
+	list_for_each_entry(scan, &connector->probed_modes, head) {
+		if (scan->type & DRM_MODE_TYPE_PREFERRED) {
+			fixed_mode = drm_mode_duplicate(dev, scan);
+			break;
+		}
+	}
+	mutex_unlock(&dev->mode_config.mutex);
+
+	if (!fixed_mode) {
+		DRM_ERROR("DSI fixed mode info missing\n");
+		goto err;
+	}
+
+	connector->display_info.width_mm = fixed_mode->width_mm;
+	connector->display_info.height_mm = fixed_mode->height_mm;
+	intel_panel_init(&intel_connector->panel, fixed_mode, NULL);
+	intel_panel_setup_backlight(connector, INVALID_PIPE);
+
+
+	if (dev_priv->vbt.dsi.config->dual_link)
+		intel_dsi->ports = BIT(PORT_A) | BIT(PORT_B);
+	else
+		intel_dsi->ports = BIT(port);
+
+	intel_dsi->dcs_backlight_ports = dev_priv->vbt.dsi.bl_ports;
+	intel_dsi->dcs_cabc_ports = dev_priv->vbt.dsi.cabc_ports;
+
+	for_each_dsi_port(port, intel_dsi->ports) {
+		struct intel_dsi_host *host;
+
+		host = intel_dsi_host_init(intel_dsi, &gen11_dsi_host_ops, port);
+		if (!host)
+			goto err;
+
+		intel_dsi->dsi_hosts[port] = host;
+	}
+
+	if (!intel_dsi_vbt_init(intel_dsi, MIPI_DSI_GENERIC_PANEL_ID)) {
+		DRM_DEBUG_KMS("no device found\n");
+		goto err;
+	}
+
+	return;
+
+err:
+	drm_encoder_cleanup(&encoder->base);
+	kfree(intel_dsi);
+	kfree(intel_connector);
 }
diff --git a/drivers/gpu/drm/i915/intel_atomic.c b/drivers/gpu/drm/i915/intel_atomic.c
index a5a2c8f..8cb02f2 100644
--- a/drivers/gpu/drm/i915/intel_atomic.c
+++ b/drivers/gpu/drm/i915/intel_atomic.c
@@ -184,6 +184,7 @@ intel_crtc_duplicate_state(struct drm_crtc *crtc)
 	crtc_state->fifo_changed = false;
 	crtc_state->wm.need_postvbl_update = false;
 	crtc_state->fb_bits = 0;
+	crtc_state->update_planes = 0;
 
 	return &crtc_state->base;
 }
diff --git a/drivers/gpu/drm/i915/intel_atomic_plane.c b/drivers/gpu/drm/i915/intel_atomic_plane.c
index 905f8ef..0a73e6e 100644
--- a/drivers/gpu/drm/i915/intel_atomic_plane.c
+++ b/drivers/gpu/drm/i915/intel_atomic_plane.c
@@ -139,6 +139,9 @@ int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_
 	if (state->visible && state->fb->format->format == DRM_FORMAT_NV12)
 		crtc_state->nv12_planes |= BIT(intel_plane->id);
 
+	if (state->visible || old_plane_state->base.visible)
+		crtc_state->update_planes |= BIT(intel_plane->id);
+
 	return intel_plane_atomic_calc_changes(old_crtc_state,
 					       &crtc_state->base,
 					       old_plane_state,
@@ -168,27 +171,75 @@ static int intel_plane_atomic_check(struct drm_plane *plane,
 						   to_intel_plane_state(new_plane_state));
 }
 
-void intel_update_planes_on_crtc(struct intel_atomic_state *old_state,
-				 struct intel_crtc *crtc,
-				 struct intel_crtc_state *old_crtc_state,
-				 struct intel_crtc_state *new_crtc_state)
+static struct intel_plane *
+skl_next_plane_to_commit(struct intel_atomic_state *state,
+			 struct intel_crtc *crtc,
+			 struct skl_ddb_entry entries_y[I915_MAX_PLANES],
+			 struct skl_ddb_entry entries_uv[I915_MAX_PLANES],
+			 unsigned int *update_mask)
 {
-	struct intel_plane_state *new_plane_state;
+	struct intel_crtc_state *crtc_state =
+		intel_atomic_get_new_crtc_state(state, crtc);
+	struct intel_plane_state *plane_state;
 	struct intel_plane *plane;
-	u32 update_mask;
 	int i;
 
-	update_mask = old_crtc_state->active_planes;
-	update_mask |= new_crtc_state->active_planes;
+	if (*update_mask == 0)
+		return NULL;
 
-	for_each_new_intel_plane_in_state(old_state, plane, new_plane_state, i) {
+	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
+		enum plane_id plane_id = plane->id;
+
 		if (crtc->pipe != plane->pipe ||
-		    !(update_mask & BIT(plane->id)))
+		    !(*update_mask & BIT(plane_id)))
 			continue;
 
+		if (skl_ddb_allocation_overlaps(&crtc_state->wm.skl.plane_ddb_y[plane_id],
+						entries_y,
+						I915_MAX_PLANES, plane_id) ||
+		    skl_ddb_allocation_overlaps(&crtc_state->wm.skl.plane_ddb_uv[plane_id],
+						entries_uv,
+						I915_MAX_PLANES, plane_id))
+			continue;
+
+		*update_mask &= ~BIT(plane_id);
+		entries_y[plane_id] = crtc_state->wm.skl.plane_ddb_y[plane_id];
+		entries_uv[plane_id] = crtc_state->wm.skl.plane_ddb_uv[plane_id];
+
+		return plane;
+	}
+
+	/* should never happen */
+	WARN_ON(1);
+
+	return NULL;
+}
+
+void skl_update_planes_on_crtc(struct intel_atomic_state *state,
+			       struct intel_crtc *crtc)
+{
+	struct intel_crtc_state *old_crtc_state =
+		intel_atomic_get_old_crtc_state(state, crtc);
+	struct intel_crtc_state *new_crtc_state =
+		intel_atomic_get_new_crtc_state(state, crtc);
+	struct skl_ddb_entry entries_y[I915_MAX_PLANES];
+	struct skl_ddb_entry entries_uv[I915_MAX_PLANES];
+	u32 update_mask = new_crtc_state->update_planes;
+	struct intel_plane *plane;
+
+	memcpy(entries_y, old_crtc_state->wm.skl.plane_ddb_y,
+	       sizeof(old_crtc_state->wm.skl.plane_ddb_y));
+	memcpy(entries_uv, old_crtc_state->wm.skl.plane_ddb_uv,
+	       sizeof(old_crtc_state->wm.skl.plane_ddb_uv));
+
+	while ((plane = skl_next_plane_to_commit(state, crtc,
+						 entries_y, entries_uv,
+						 &update_mask))) {
+		struct intel_plane_state *new_plane_state =
+			intel_atomic_get_new_plane_state(state, plane);
+
 		if (new_plane_state->base.visible) {
 			trace_intel_update_plane(&plane->base, crtc);
-
 			plane->update_plane(plane, new_crtc_state, new_plane_state);
 		} else if (new_plane_state->slave) {
 			struct intel_plane *master =
@@ -204,15 +255,38 @@ void intel_update_planes_on_crtc(struct intel_atomic_state *old_state,
 			 * plane_state.
 			 */
 			new_plane_state =
-				intel_atomic_get_new_plane_state(old_state, master);
+				intel_atomic_get_new_plane_state(state, master);
 
 			trace_intel_update_plane(&plane->base, crtc);
-
 			plane->update_slave(plane, new_crtc_state, new_plane_state);
 		} else {
 			trace_intel_disable_plane(&plane->base, crtc);
+			plane->disable_plane(plane, new_crtc_state);
+		}
+	}
+}
 
-			plane->disable_plane(plane, crtc);
+void i9xx_update_planes_on_crtc(struct intel_atomic_state *state,
+				struct intel_crtc *crtc)
+{
+	struct intel_crtc_state *new_crtc_state =
+		intel_atomic_get_new_crtc_state(state, crtc);
+	u32 update_mask = new_crtc_state->update_planes;
+	struct intel_plane_state *new_plane_state;
+	struct intel_plane *plane;
+	int i;
+
+	for_each_new_intel_plane_in_state(state, plane, new_plane_state, i) {
+		if (crtc->pipe != plane->pipe ||
+		    !(update_mask & BIT(plane->id)))
+			continue;
+
+		if (new_plane_state->base.visible) {
+			trace_intel_update_plane(&plane->base, crtc);
+			plane->update_plane(plane, new_crtc_state, new_plane_state);
+		} else {
+			trace_intel_disable_plane(&plane->base, crtc);
+			plane->disable_plane(plane, new_crtc_state);
 		}
 	}
 }
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 0694aa8..6d3e026 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -1752,7 +1752,7 @@ void intel_bios_init(struct drm_i915_private *dev_priv)
 	const struct bdb_header *bdb;
 	u8 __iomem *bios = NULL;
 
-	if (INTEL_INFO(dev_priv)->num_pipes == 0) {
+	if (!HAS_DISPLAY(dev_priv)) {
 		DRM_DEBUG_KMS("Skipping VBT init due to disabled display.\n");
 		return;
 	}
diff --git a/drivers/gpu/drm/i915/intel_breadcrumbs.c b/drivers/gpu/drm/i915/intel_breadcrumbs.c
index 84bf8d8..447c525 100644
--- a/drivers/gpu/drm/i915/intel_breadcrumbs.c
+++ b/drivers/gpu/drm/i915/intel_breadcrumbs.c
@@ -27,11 +27,7 @@
 
 #include "i915_drv.h"
 
-#ifdef CONFIG_SMP
-#define task_asleep(tsk) ((tsk)->state & TASK_NORMAL && !(tsk)->on_cpu)
-#else
-#define task_asleep(tsk) ((tsk)->state & TASK_NORMAL)
-#endif
+#define task_asleep(tsk) ((tsk)->state & TASK_NORMAL && !(tsk)->on_rq)
 
 static unsigned int __intel_breadcrumbs_wakeup(struct intel_breadcrumbs *b)
 {
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index ad11540..f3e1d6a 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -28,6 +28,7 @@
 #include <drm/drm_scdc_helper.h>
 #include "i915_drv.h"
 #include "intel_drv.h"
+#include "intel_dsi.h"
 
 struct ddi_buf_trans {
 	u32 trans1;	/* balance leg enable, de-emph level */
@@ -1363,8 +1364,8 @@ static int skl_calc_wrpll_link(struct drm_i915_private *dev_priv,
 	return dco_freq / (p0 * p1 * p2 * 5);
 }
 
-static int cnl_calc_wrpll_link(struct drm_i915_private *dev_priv,
-			       enum intel_dpll_id pll_id)
+int cnl_calc_wrpll_link(struct drm_i915_private *dev_priv,
+			enum intel_dpll_id pll_id)
 {
 	uint32_t cfgcr0, cfgcr1;
 	uint32_t p0, p1, p2, dco_freq, ref_clock;
@@ -2154,6 +2155,12 @@ static u64 intel_ddi_get_power_domains(struct intel_encoder *encoder,
 	    intel_port_is_tc(dev_priv, encoder->port))
 		domains |= BIT_ULL(intel_ddi_main_link_aux_domain(dig_port));
 
+	/*
+	 * VDSC power is needed when DSC is enabled
+	 */
+	if (crtc_state->dsc_params.compression_enable)
+		domains |= BIT_ULL(intel_dsc_power_domain(crtc_state));
+
 	return domains;
 }
 
@@ -2785,77 +2792,54 @@ uint32_t icl_dpclka_cfgcr0_clk_off(struct drm_i915_private *dev_priv,
 	return 0;
 }
 
-void icl_map_plls_to_ports(struct drm_crtc *crtc,
-			   struct intel_crtc_state *crtc_state,
-			   struct drm_atomic_state *old_state)
+static void icl_map_plls_to_ports(struct intel_encoder *encoder,
+				  const struct intel_crtc_state *crtc_state)
 {
+	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 	struct intel_shared_dpll *pll = crtc_state->shared_dpll;
-	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
-	struct drm_connector_state *conn_state;
-	struct drm_connector *conn;
-	int i;
+	enum port port = encoder->port;
+	u32 val;
 
-	for_each_new_connector_in_state(old_state, conn, conn_state, i) {
-		struct intel_encoder *encoder =
-			to_intel_encoder(conn_state->best_encoder);
-		enum port port;
-		uint32_t val;
+	mutex_lock(&dev_priv->dpll_lock);
 
-		if (conn_state->crtc != crtc)
-			continue;
+	val = I915_READ(DPCLKA_CFGCR0_ICL);
+	WARN_ON((val & icl_dpclka_cfgcr0_clk_off(dev_priv, port)) == 0);
 
-		port = encoder->port;
-		mutex_lock(&dev_priv->dpll_lock);
-
-		val = I915_READ(DPCLKA_CFGCR0_ICL);
-		WARN_ON((val & icl_dpclka_cfgcr0_clk_off(dev_priv, port)) == 0);
-
-		if (intel_port_is_combophy(dev_priv, port)) {
-			val &= ~DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port);
-			val |= DPCLKA_CFGCR0_DDI_CLK_SEL(pll->info->id, port);
-			I915_WRITE(DPCLKA_CFGCR0_ICL, val);
-			POSTING_READ(DPCLKA_CFGCR0_ICL);
-		}
-
-		val &= ~icl_dpclka_cfgcr0_clk_off(dev_priv, port);
+	if (intel_port_is_combophy(dev_priv, port)) {
+		val &= ~DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port);
+		val |= DPCLKA_CFGCR0_DDI_CLK_SEL(pll->info->id, port);
 		I915_WRITE(DPCLKA_CFGCR0_ICL, val);
-
-		mutex_unlock(&dev_priv->dpll_lock);
+		POSTING_READ(DPCLKA_CFGCR0_ICL);
 	}
+
+	val &= ~icl_dpclka_cfgcr0_clk_off(dev_priv, port);
+	I915_WRITE(DPCLKA_CFGCR0_ICL, val);
+
+	mutex_unlock(&dev_priv->dpll_lock);
 }
 
-void icl_unmap_plls_to_ports(struct drm_crtc *crtc,
-			     struct intel_crtc_state *crtc_state,
-			     struct drm_atomic_state *old_state)
+static void icl_unmap_plls_to_ports(struct intel_encoder *encoder)
 {
-	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
-	struct drm_connector_state *old_conn_state;
-	struct drm_connector *conn;
-	int i;
+	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	enum port port = encoder->port;
+	u32 val;
 
-	for_each_old_connector_in_state(old_state, conn, old_conn_state, i) {
-		struct intel_encoder *encoder =
-			to_intel_encoder(old_conn_state->best_encoder);
-		enum port port;
+	mutex_lock(&dev_priv->dpll_lock);
 
-		if (old_conn_state->crtc != crtc)
-			continue;
+	val = I915_READ(DPCLKA_CFGCR0_ICL);
+	val |= icl_dpclka_cfgcr0_clk_off(dev_priv, port);
+	I915_WRITE(DPCLKA_CFGCR0_ICL, val);
 
-		port = encoder->port;
-		mutex_lock(&dev_priv->dpll_lock);
-		I915_WRITE(DPCLKA_CFGCR0_ICL,
-			   I915_READ(DPCLKA_CFGCR0_ICL) |
-			   icl_dpclka_cfgcr0_clk_off(dev_priv, port));
-		mutex_unlock(&dev_priv->dpll_lock);
-	}
+	mutex_unlock(&dev_priv->dpll_lock);
 }
 
 void icl_sanitize_encoder_pll_mapping(struct intel_encoder *encoder)
 {
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 	u32 val;
-	enum port port = encoder->port;
-	bool clk_enabled;
+	enum port port;
+	u32 port_mask;
+	bool ddi_clk_needed;
 
 	/*
 	 * In case of DP MST, we sanitize the primary encoder only, not the
@@ -2864,9 +2848,6 @@ void icl_sanitize_encoder_pll_mapping(struct intel_encoder *encoder)
 	if (encoder->type == INTEL_OUTPUT_DP_MST)
 		return;
 
-	val = I915_READ(DPCLKA_CFGCR0_ICL);
-	clk_enabled = !(val & icl_dpclka_cfgcr0_clk_off(dev_priv, port));
-
 	if (!encoder->base.crtc && intel_encoder_is_dp(encoder)) {
 		u8 pipe_mask;
 		bool is_mst;
@@ -2880,20 +2861,52 @@ void icl_sanitize_encoder_pll_mapping(struct intel_encoder *encoder)
 			return;
 	}
 
-	if (clk_enabled == !!encoder->base.crtc)
-		return;
+	port_mask = BIT(encoder->port);
+	ddi_clk_needed = encoder->base.crtc;
 
-	/*
-	 * Punt on the case now where clock is disabled, but the encoder is
-	 * enabled, something else is really broken then.
-	 */
-	if (WARN_ON(!clk_enabled))
-		return;
+	if (encoder->type == INTEL_OUTPUT_DSI) {
+		struct intel_encoder *other_encoder;
 
-	DRM_NOTE("Port %c is disabled but it has a mapped PLL, unmap it\n",
-		 port_name(port));
-	val |= icl_dpclka_cfgcr0_clk_off(dev_priv, port);
-	I915_WRITE(DPCLKA_CFGCR0_ICL, val);
+		port_mask = intel_dsi_encoder_ports(encoder);
+		/*
+		 * Sanity check that we haven't incorrectly registered another
+		 * encoder using any of the ports of this DSI encoder.
+		 */
+		for_each_intel_encoder(&dev_priv->drm, other_encoder) {
+			if (other_encoder == encoder)
+				continue;
+
+			if (WARN_ON(port_mask & BIT(other_encoder->port)))
+				return;
+		}
+		/*
+		 * DSI ports should have their DDI clock ungated when disabled
+		 * and gated when enabled.
+		 */
+		ddi_clk_needed = !encoder->base.crtc;
+	}
+
+	val = I915_READ(DPCLKA_CFGCR0_ICL);
+	for_each_port_masked(port, port_mask) {
+		bool ddi_clk_ungated = !(val &
+					 icl_dpclka_cfgcr0_clk_off(dev_priv,
+								   port));
+
+		if (ddi_clk_needed == ddi_clk_ungated)
+			continue;
+
+		/*
+		 * Punt on the case now where clock is gated, but it would
+		 * be needed by the port. Something else is really broken then.
+		 */
+		if (WARN_ON(ddi_clk_needed))
+			continue;
+
+		DRM_NOTE("Port %c is disabled/in DSI mode with an ungated DDI clock, gate it\n",
+			 port_name(port));
+		val |= icl_dpclka_cfgcr0_clk_off(dev_priv, port);
+		I915_WRITE(DPCLKA_CFGCR0_ICL, val);
+	}
 }
 
 static void intel_ddi_clk_select(struct intel_encoder *encoder,
@@ -3096,6 +3109,53 @@ static void icl_program_mg_dp_mode(struct intel_digital_port *intel_dig_port)
 	I915_WRITE(MG_DP_MODE(port, 1), ln1);
 }
 
+static void intel_dp_sink_set_fec_ready(struct intel_dp *intel_dp,
+					const struct intel_crtc_state *crtc_state)
+{
+	if (!crtc_state->fec_enable)
+		return;
+
+	if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_FEC_CONFIGURATION, DP_FEC_READY) <= 0)
+		DRM_DEBUG_KMS("Failed to set FEC_READY in the sink\n");
+}
+
+static void intel_ddi_enable_fec(struct intel_encoder *encoder,
+				 const struct intel_crtc_state *crtc_state)
+{
+	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	enum port port = encoder->port;
+	u32 val;
+
+	if (!crtc_state->fec_enable)
+		return;
+
+	val = I915_READ(DP_TP_CTL(port));
+	val |= DP_TP_CTL_FEC_ENABLE;
+	I915_WRITE(DP_TP_CTL(port), val);
+
+	if (intel_wait_for_register(dev_priv, DP_TP_STATUS(port),
+				    DP_TP_STATUS_FEC_ENABLE_LIVE,
+				    DP_TP_STATUS_FEC_ENABLE_LIVE,
+				    1))
+		DRM_ERROR("Timed out waiting for FEC Enable Status\n");
+}
+
+static void intel_ddi_disable_fec_state(struct intel_encoder *encoder,
+					const struct intel_crtc_state *crtc_state)
+{
+	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	enum port port = encoder->port;
+	u32 val;
+
+	if (!crtc_state->fec_enable)
+		return;
+
+	val = I915_READ(DP_TP_CTL(port));
+	val &= ~DP_TP_CTL_FEC_ENABLE;
+	I915_WRITE(DP_TP_CTL(port), val);
+	POSTING_READ(DP_TP_CTL(port));
+}
+
 static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder,
 				    const struct intel_crtc_state *crtc_state,
 				    const struct drm_connector_state *conn_state)
@@ -3134,14 +3194,21 @@ static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder,
 	intel_ddi_init_dp_buf_reg(encoder);
 	if (!is_mst)
 		intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
+	intel_dp_sink_set_decompression_state(intel_dp, crtc_state,
+					      true);
+	intel_dp_sink_set_fec_ready(intel_dp, crtc_state);
 	intel_dp_start_link_train(intel_dp);
 	if (port != PORT_A || INTEL_GEN(dev_priv) >= 9)
 		intel_dp_stop_link_train(intel_dp);
 
+	intel_ddi_enable_fec(encoder, crtc_state);
+
 	icl_enable_phy_clock_gating(dig_port);
 
 	if (!is_mst)
 		intel_ddi_enable_pipe_clock(crtc_state);
+
+	intel_dsc_enable(encoder, crtc_state);
 }
 
 static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder,
@@ -3208,6 +3275,9 @@ static void intel_ddi_pre_enable(struct intel_encoder *encoder,
 
 	WARN_ON(crtc_state->has_pch_encoder);
 
+	if (INTEL_GEN(dev_priv) >= 11)
+		icl_map_plls_to_ports(encoder, crtc_state);
+
 	intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
 
 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
@@ -3228,7 +3298,8 @@ static void intel_ddi_pre_enable(struct intel_encoder *encoder,
 	}
 }
 
-static void intel_disable_ddi_buf(struct intel_encoder *encoder)
+static void intel_disable_ddi_buf(struct intel_encoder *encoder,
+				  const struct intel_crtc_state *crtc_state)
 {
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 	enum port port = encoder->port;
@@ -3247,6 +3318,9 @@ static void intel_disable_ddi_buf(struct intel_encoder *encoder)
 	val |= DP_TP_CTL_LINK_TRAIN_PAT1;
 	I915_WRITE(DP_TP_CTL(port), val);
 
+	/* Disable FEC in DP Sink */
+	intel_ddi_disable_fec_state(encoder, crtc_state);
+
 	if (wait)
 		intel_wait_ddi_buf_idle(dev_priv, port);
 }
@@ -3270,7 +3344,7 @@ static void intel_ddi_post_disable_dp(struct intel_encoder *encoder,
 		intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
 	}
 
-	intel_disable_ddi_buf(encoder);
+	intel_disable_ddi_buf(encoder, old_crtc_state);
 
 	intel_edp_panel_vdd_on(intel_dp);
 	intel_edp_panel_off(intel_dp);
@@ -3293,7 +3367,7 @@ static void intel_ddi_post_disable_hdmi(struct intel_encoder *encoder,
 
 	intel_ddi_disable_pipe_clock(old_crtc_state);
 
-	intel_disable_ddi_buf(encoder);
+	intel_disable_ddi_buf(encoder, old_crtc_state);
 
 	intel_display_power_put(dev_priv, dig_port->ddi_io_power_domain);
 
@@ -3306,6 +3380,8 @@ static void intel_ddi_post_disable(struct intel_encoder *encoder,
 				   const struct intel_crtc_state *old_crtc_state,
 				   const struct drm_connector_state *old_conn_state)
 {
+	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+
 	/*
 	 * When called from DP MST code:
 	 * - old_conn_state will be NULL
@@ -3325,6 +3401,9 @@ static void intel_ddi_post_disable(struct intel_encoder *encoder,
 	else
 		intel_ddi_post_disable_dp(encoder,
 					  old_crtc_state, old_conn_state);
+
+	if (INTEL_GEN(dev_priv) >= 11)
+		icl_unmap_plls_to_ports(encoder);
 }
 
 void intel_ddi_fdi_post_disable(struct intel_encoder *encoder,
@@ -3344,7 +3423,7 @@ void intel_ddi_fdi_post_disable(struct intel_encoder *encoder,
 	val &= ~FDI_RX_ENABLE;
 	I915_WRITE(FDI_RX_CTL(PIPE_A), val);
 
-	intel_disable_ddi_buf(encoder);
+	intel_disable_ddi_buf(encoder, old_crtc_state);
 	intel_ddi_clk_disable(encoder);
 
 	val = I915_READ(FDI_RX_MISC(PIPE_A));
@@ -3491,6 +3570,9 @@ static void intel_disable_ddi_dp(struct intel_encoder *encoder,
 	intel_edp_drrs_disable(intel_dp, old_crtc_state);
 	intel_psr_disable(intel_dp, old_crtc_state);
 	intel_edp_backlight_off(old_conn_state);
+	/* Disable the decompression in DP Sink */
+	intel_dp_sink_set_decompression_state(intel_dp, old_crtc_state,
+					      false);
 }
 
 static void intel_disable_ddi_hdmi(struct intel_encoder *encoder,
diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c
index ceecb5b..1e56319 100644
--- a/drivers/gpu/drm/i915/intel_device_info.c
+++ b/drivers/gpu/drm/i915/intel_device_info.c
@@ -77,6 +77,10 @@ void intel_device_info_dump_flags(const struct intel_device_info *info,
 #define PRINT_FLAG(name) drm_printf(p, "%s: %s\n", #name, yesno(info->name));
 	DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG);
 #undef PRINT_FLAG
+
+#define PRINT_FLAG(name) drm_printf(p, "%s: %s\n", #name, yesno(info->display.name));
+	DEV_INFO_DISPLAY_FOR_EACH_FLAG(PRINT_FLAG);
+#undef PRINT_FLAG
 }
 
 static void sseu_dump(const struct sseu_dev_info *sseu, struct drm_printer *p)
@@ -782,7 +786,7 @@ void intel_device_info_runtime_init(struct intel_device_info *info)
 	if (i915_modparams.disable_display) {
 		DRM_INFO("Display disabled (module parameter)\n");
 		info->num_pipes = 0;
-	} else if (info->num_pipes > 0 &&
+	} else if (HAS_DISPLAY(dev_priv) &&
 		   (IS_GEN7(dev_priv) || IS_GEN8(dev_priv)) &&
 		   HAS_PCH_SPLIT(dev_priv)) {
 		u32 fuse_strap = I915_READ(FUSE_STRAP);
@@ -807,7 +811,7 @@ void intel_device_info_runtime_init(struct intel_device_info *info)
 			DRM_INFO("PipeC fused off\n");
 			info->num_pipes -= 1;
 		}
-	} else if (info->num_pipes > 0 && IS_GEN9(dev_priv)) {
+	} else if (HAS_DISPLAY(dev_priv) && IS_GEN9(dev_priv)) {
 		u32 dfsm = I915_READ(SKL_DFSM);
 		u8 disabled_mask = 0;
 		bool invalid;
diff --git a/drivers/gpu/drm/i915/intel_device_info.h b/drivers/gpu/drm/i915/intel_device_info.h
index 88f9721..1caf24e 100644
--- a/drivers/gpu/drm/i915/intel_device_info.h
+++ b/drivers/gpu/drm/i915/intel_device_info.h
@@ -89,35 +89,38 @@ enum intel_ppgtt {
 	func(is_alpha_support); \
 	/* Keep has_* in alphabetical order */ \
 	func(has_64bit_reloc); \
-	func(has_csr); \
-	func(has_ddi); \
-	func(has_dp_mst); \
 	func(has_reset_engine); \
-	func(has_fbc); \
 	func(has_fpga_dbg); \
-	func(has_gmch_display); \
 	func(has_guc); \
 	func(has_guc_ct); \
-	func(has_hotplug); \
 	func(has_l3_dpf); \
 	func(has_llc); \
 	func(has_logical_ring_contexts); \
 	func(has_logical_ring_elsq); \
 	func(has_logical_ring_preemption); \
-	func(has_overlay); \
 	func(has_pooled_eu); \
-	func(has_psr); \
 	func(has_rc6); \
 	func(has_rc6p); \
 	func(has_runtime_pm); \
 	func(has_snoop); \
 	func(has_coherent_ggtt); \
 	func(unfenced_needs_alignment); \
+	func(hws_needs_physical);
+
+#define DEV_INFO_DISPLAY_FOR_EACH_FLAG(func) \
+	/* Keep in alphabetical order */ \
 	func(cursor_needs_physical); \
-	func(hws_needs_physical); \
+	func(has_csr); \
+	func(has_ddi); \
+	func(has_dp_mst); \
+	func(has_fbc); \
+	func(has_gmch_display); \
+	func(has_hotplug); \
+	func(has_ipc); \
+	func(has_overlay); \
+	func(has_psr); \
 	func(overlay_needs_physical); \
-	func(supports_tv); \
-	func(has_ipc);
+	func(supports_tv);
 
 #define GEN_MAX_SLICES		(6) /* CNL upper bound */
 #define GEN_MAX_SUBSLICES	(8) /* ICL upper bound */
@@ -172,6 +175,13 @@ struct intel_device_info {
 #define DEFINE_FLAG(name) u8 name:1
 	DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG);
 #undef DEFINE_FLAG
+
+	struct {
+#define DEFINE_FLAG(name) u8 name:1
+		DEV_INFO_DISPLAY_FOR_EACH_FLAG(DEFINE_FLAG);
+#undef DEFINE_FLAG
+	} display;
+
 	u16 ddb_size; /* in blocks */
 
 	/* Register offsets for the various display pipes and transcoders */
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 812ec5a..07c8618 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -2341,10 +2341,26 @@ static int intel_fb_offset_to_xy(int *x, int *y,
 				 int color_plane)
 {
 	struct drm_i915_private *dev_priv = to_i915(fb->dev);
+	unsigned int height;
 
 	if (fb->modifier != DRM_FORMAT_MOD_LINEAR &&
-	    fb->offsets[color_plane] % intel_tile_size(dev_priv))
+	    fb->offsets[color_plane] % intel_tile_size(dev_priv)) {
+		DRM_DEBUG_KMS("Misaligned offset 0x%08x for color plane %d\n",
+			      fb->offsets[color_plane], color_plane);
 		return -EINVAL;
+	}
+
+	height = drm_framebuffer_plane_height(fb->height, fb, color_plane);
+	height = ALIGN(height, intel_tile_height(fb, color_plane));
+
+	/* Catch potential overflows early */
+	if (add_overflows_t(u32, mul_u32_u32(height, fb->pitches[color_plane]),
+			    fb->offsets[color_plane])) {
+		DRM_DEBUG_KMS("Bad offset 0x%08x or pitch %d for color plane %d\n",
+			      fb->offsets[color_plane], fb->pitches[color_plane],
+			      color_plane);
+		return -ERANGE;
+	}
 
 	*x = 0;
 	*y = 0;
@@ -2767,7 +2783,7 @@ static void intel_plane_disable_noatomic(struct intel_crtc *crtc,
 		intel_pre_disable_primary_noatomic(&crtc->base);
 
 	trace_intel_disable_plane(&plane->base, crtc);
-	plane->disable_plane(plane, crtc);
+	plane->disable_plane(plane, crtc_state);
 }
 
 static void
@@ -3315,7 +3331,6 @@ static void i9xx_update_plane(struct intel_plane *plane,
 	enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
 	u32 linear_offset;
 	u32 dspcntr = plane_state->ctl;
-	i915_reg_t reg = DSPCNTR(i9xx_plane);
 	int x = plane_state->color_plane[0].x;
 	int y = plane_state->color_plane[0].y;
 	unsigned long irqflags;
@@ -3330,47 +3345,51 @@ static void i9xx_update_plane(struct intel_plane *plane,
 
 	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
 
+	I915_WRITE_FW(DSPSTRIDE(i9xx_plane), plane_state->color_plane[0].stride);
+
 	if (INTEL_GEN(dev_priv) < 4) {
 		/* pipesrc and dspsize control the size that is scaled from,
 		 * which should always be the user's requested size.
 		 */
+		I915_WRITE_FW(DSPPOS(i9xx_plane), 0);
 		I915_WRITE_FW(DSPSIZE(i9xx_plane),
 			      ((crtc_state->pipe_src_h - 1) << 16) |
 			      (crtc_state->pipe_src_w - 1));
-		I915_WRITE_FW(DSPPOS(i9xx_plane), 0);
 	} else if (IS_CHERRYVIEW(dev_priv) && i9xx_plane == PLANE_B) {
+		I915_WRITE_FW(PRIMPOS(i9xx_plane), 0);
 		I915_WRITE_FW(PRIMSIZE(i9xx_plane),
 			      ((crtc_state->pipe_src_h - 1) << 16) |
 			      (crtc_state->pipe_src_w - 1));
-		I915_WRITE_FW(PRIMPOS(i9xx_plane), 0);
 		I915_WRITE_FW(PRIMCNSTALPHA(i9xx_plane), 0);
 	}
 
-	I915_WRITE_FW(reg, dspcntr);
-
-	I915_WRITE_FW(DSPSTRIDE(i9xx_plane), plane_state->color_plane[0].stride);
 	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
-		I915_WRITE_FW(DSPSURF(i9xx_plane),
-			      intel_plane_ggtt_offset(plane_state) +
-			      dspaddr_offset);
 		I915_WRITE_FW(DSPOFFSET(i9xx_plane), (y << 16) | x);
 	} else if (INTEL_GEN(dev_priv) >= 4) {
+		I915_WRITE_FW(DSPLINOFF(i9xx_plane), linear_offset);
+		I915_WRITE_FW(DSPTILEOFF(i9xx_plane), (y << 16) | x);
+	}
+
+	/*
+	 * The control register self-arms if the plane was previously
+	 * disabled. Try to make the plane enable atomic by writing
+	 * the control register just before the surface register.
+	 */
+	I915_WRITE_FW(DSPCNTR(i9xx_plane), dspcntr);
+	if (INTEL_GEN(dev_priv) >= 4)
 		I915_WRITE_FW(DSPSURF(i9xx_plane),
 			      intel_plane_ggtt_offset(plane_state) +
 			      dspaddr_offset);
-		I915_WRITE_FW(DSPTILEOFF(i9xx_plane), (y << 16) | x);
-		I915_WRITE_FW(DSPLINOFF(i9xx_plane), linear_offset);
-	} else {
+	else
 		I915_WRITE_FW(DSPADDR(i9xx_plane),
 			      intel_plane_ggtt_offset(plane_state) +
 			      dspaddr_offset);
-	}
 
 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
 }
 
 static void i9xx_disable_plane(struct intel_plane *plane,
-			       struct intel_crtc *crtc)
+			       const struct intel_crtc_state *crtc_state)
 {
 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
 	enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
@@ -3456,6 +3475,21 @@ static void skl_detach_scalers(const struct intel_crtc_state *crtc_state)
 	}
 }
 
+static unsigned int skl_plane_stride_mult(const struct drm_framebuffer *fb,
+					  int color_plane, unsigned int rotation)
+{
+	/*
+	 * The stride is either expressed as a multiple of 64 bytes chunks for
+	 * linear buffers or in number of tiles for tiled buffers.
+	 */
+	if (fb->modifier == DRM_FORMAT_MOD_LINEAR)
+		return 64;
+	else if (drm_rotation_90_or_270(rotation))
+		return intel_tile_height(fb, color_plane);
+	else
+		return intel_tile_width_bytes(fb, color_plane);
+}
+
 u32 skl_plane_stride(const struct intel_plane_state *plane_state,
 		     int color_plane)
 {
@@ -3466,16 +3500,7 @@ u32 skl_plane_stride(const struct intel_plane_state *plane_state,
 	if (color_plane >= fb->format->num_planes)
 		return 0;
 
-	/*
-	 * The stride is either expressed as a multiple of 64 bytes chunks for
-	 * linear buffers or in number of tiles for tiled buffers.
-	 */
-	if (drm_rotation_90_or_270(rotation))
-		stride /= intel_tile_height(fb, color_plane);
-	else
-		stride /= intel_fb_stride_alignment(fb, color_plane);
-
-	return stride;
+	return stride / skl_plane_stride_mult(fb, color_plane, rotation);
 }
 
 static u32 skl_plane_ctl_format(uint32_t pixel_format)
@@ -5403,23 +5428,32 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state,
 		intel_update_watermarks(crtc);
 }
 
-static void intel_crtc_disable_planes(struct intel_crtc *crtc, unsigned plane_mask)
+static void intel_crtc_disable_planes(struct intel_atomic_state *state,
+				      struct intel_crtc *crtc)
 {
-	struct drm_device *dev = crtc->base.dev;
+	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	const struct intel_crtc_state *new_crtc_state =
+		intel_atomic_get_new_crtc_state(state, crtc);
+	unsigned int update_mask = new_crtc_state->update_planes;
+	const struct intel_plane_state *old_plane_state;
 	struct intel_plane *plane;
 	unsigned fb_bits = 0;
+	int i;
 
 	intel_crtc_dpms_overlay_disable(crtc);
 
-	for_each_intel_plane_on_crtc(dev, crtc, plane) {
-		if (plane_mask & BIT(plane->id)) {
-			plane->disable_plane(plane, crtc);
+	for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) {
+		if (crtc->pipe != plane->pipe ||
+		    !(update_mask & BIT(plane->id)))
+			continue;
 
+		plane->disable_plane(plane, new_crtc_state);
+
+		if (old_plane_state->base.visible)
 			fb_bits |= plane->frontbuffer_bit;
-		}
 	}
 
-	intel_frontbuffer_flip(to_i915(dev), fb_bits);
+	intel_frontbuffer_flip(dev_priv, fb_bits);
 }
 
 static void intel_encoders_pre_pll_enable(struct drm_crtc *crtc,
@@ -5692,9 +5726,6 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
 	if (pipe_config->shared_dpll)
 		intel_enable_shared_dpll(pipe_config);
 
-	if (INTEL_GEN(dev_priv) >= 11)
-		icl_map_plls_to_ports(crtc, pipe_config, old_state);
-
 	intel_encoders_pre_enable(crtc, pipe_config, old_state);
 
 	if (intel_crtc_has_dp_encoder(pipe_config))
@@ -5889,6 +5920,8 @@ static void haswell_crtc_disable(struct intel_crtc_state *old_crtc_state,
 	if (!transcoder_is_dsi(cpu_transcoder))
 		intel_ddi_disable_transcoder_func(old_crtc_state);
 
+	intel_dsc_disable(old_crtc_state);
+
 	if (INTEL_GEN(dev_priv) >= 9)
 		skylake_scaler_disable(intel_crtc);
 	else
@@ -5896,9 +5929,6 @@ static void haswell_crtc_disable(struct intel_crtc_state *old_crtc_state,
 
 	intel_encoders_post_disable(crtc, old_crtc_state, old_state);
 
-	if (INTEL_GEN(dev_priv) >= 11)
-		icl_unmap_plls_to_ports(crtc, old_crtc_state, old_state);
-
 	intel_encoders_post_pll_disable(crtc, old_crtc_state, old_state);
 }
 
@@ -6724,7 +6754,7 @@ static void compute_m_n(unsigned int m, unsigned int n,
 }
 
 void
-intel_link_compute_m_n(int bits_per_pixel, int nlanes,
+intel_link_compute_m_n(u16 bits_per_pixel, int nlanes,
 		       int pixel_clock, int link_clock,
 		       struct intel_link_m_n *m_n,
 		       bool constant_n)
@@ -8939,7 +8969,7 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc,
 	fb->width = ((val >> 0) & 0x1fff) + 1;
 
 	val = I915_READ(PLANE_STRIDE(pipe, plane_id));
-	stride_mult = intel_fb_stride_alignment(fb, 0);
+	stride_mult = skl_plane_stride_mult(fb, 0, DRM_MODE_ROTATE_0);
 	fb->pitches[0] = (val & 0x3ff) * stride_mult;
 
 	aligned_height = intel_fb_align_height(fb, 0, fb->height);
@@ -9303,10 +9333,12 @@ void hsw_disable_pc8(struct drm_i915_private *dev_priv)
 static int haswell_crtc_compute_clock(struct intel_crtc *crtc,
 				      struct intel_crtc_state *crtc_state)
 {
+	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 	struct intel_atomic_state *state =
 		to_intel_atomic_state(crtc_state->base.state);
 
-	if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI)) {
+	if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI) ||
+	    IS_ICELAKE(dev_priv)) {
 		struct intel_encoder *encoder =
 			intel_get_crtc_new_encoder(state, crtc_state);
 
@@ -9444,11 +9476,18 @@ static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
 	struct drm_device *dev = crtc->base.dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	enum intel_display_power_domain power_domain;
+	unsigned long panel_transcoder_mask = BIT(TRANSCODER_EDP);
+	unsigned long enabled_panel_transcoders = 0;
+	enum transcoder panel_transcoder;
 	u32 tmp;
 
+	if (IS_ICELAKE(dev_priv))
+		panel_transcoder_mask |=
+			BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1);
+
 	/*
 	 * The pipe->transcoder mapping is fixed with the exception of the eDP
-	 * transcoder handled below.
+	 * and DSI transcoders handled below.
 	 */
 	pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
 
@@ -9456,29 +9495,49 @@ static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
 	 * XXX: Do intel_display_power_get_if_enabled before reading this (for
 	 * consistency and less surprising code; it's in always on power).
 	 */
-	tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP));
-	if (tmp & TRANS_DDI_FUNC_ENABLE) {
-		enum pipe trans_edp_pipe;
+	for_each_set_bit(panel_transcoder, &panel_transcoder_mask, 32) {
+		enum pipe trans_pipe;
+
+		tmp = I915_READ(TRANS_DDI_FUNC_CTL(panel_transcoder));
+		if (!(tmp & TRANS_DDI_FUNC_ENABLE))
+			continue;
+
+		/*
+		 * Log all enabled ones, only use the first one.
+		 *
+		 * FIXME: This won't work for two separate DSI displays.
+		 */
+		enabled_panel_transcoders |= BIT(panel_transcoder);
+		if (enabled_panel_transcoders != BIT(panel_transcoder))
+			continue;
+
 		switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
 		default:
-			WARN(1, "unknown pipe linked to edp transcoder\n");
+			WARN(1, "unknown pipe linked to transcoder %s\n",
+			     transcoder_name(panel_transcoder));
 			/* fall through */
 		case TRANS_DDI_EDP_INPUT_A_ONOFF:
 		case TRANS_DDI_EDP_INPUT_A_ON:
-			trans_edp_pipe = PIPE_A;
+			trans_pipe = PIPE_A;
 			break;
 		case TRANS_DDI_EDP_INPUT_B_ONOFF:
-			trans_edp_pipe = PIPE_B;
+			trans_pipe = PIPE_B;
 			break;
 		case TRANS_DDI_EDP_INPUT_C_ONOFF:
-			trans_edp_pipe = PIPE_C;
+			trans_pipe = PIPE_C;
 			break;
 		}
 
-		if (trans_edp_pipe == crtc->pipe)
-			pipe_config->cpu_transcoder = TRANSCODER_EDP;
+		if (trans_pipe == crtc->pipe)
+			pipe_config->cpu_transcoder = panel_transcoder;
 	}
 
+	/*
+	 * Valid combos: none, eDP, DSI0, DSI1, DSI0+DSI1
+	 */
+	WARN_ON((enabled_panel_transcoders & BIT(TRANSCODER_EDP)) &&
+		enabled_panel_transcoders != BIT(TRANSCODER_EDP));
+
 	power_domain = POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder);
 	if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
 		return false;
@@ -9611,7 +9670,8 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
 	if (!active)
 		goto out;
 
-	if (!transcoder_is_dsi(pipe_config->cpu_transcoder)) {
+	if (!transcoder_is_dsi(pipe_config->cpu_transcoder) ||
+	    IS_ICELAKE(dev_priv)) {
 		haswell_get_ddi_port_state(crtc, pipe_config);
 		intel_get_pipe_timings(crtc, pipe_config);
 	}
@@ -9667,7 +9727,7 @@ static u32 intel_cursor_base(const struct intel_plane_state *plane_state)
 	const struct drm_i915_gem_object *obj = intel_fb_obj(fb);
 	u32 base;
 
-	if (INTEL_INFO(dev_priv)->cursor_needs_physical)
+	if (INTEL_INFO(dev_priv)->display.cursor_needs_physical)
 		base = obj->phys_handle->busaddr;
 	else
 		base = intel_plane_ggtt_offset(plane_state);
@@ -9894,9 +9954,9 @@ static void i845_update_cursor(struct intel_plane *plane,
 }
 
 static void i845_disable_cursor(struct intel_plane *plane,
-				struct intel_crtc *crtc)
+				const struct intel_crtc_state *crtc_state)
 {
-	i845_update_cursor(plane, NULL, NULL);
+	i845_update_cursor(plane, crtc_state, NULL);
 }
 
 static bool i845_cursor_get_hw_state(struct intel_plane *plane,
@@ -10087,8 +10147,8 @@ static void i9xx_update_cursor(struct intel_plane *plane,
 	 * On some platforms writing CURCNTR first will also
 	 * cause CURPOS to be armed by the CURBASE write.
 	 * Without the CURCNTR write the CURPOS write would
-	 * arm itself. Thus we always start the full update
-	 * with a CURCNTR write.
+	 * arm itself. Thus we always update CURCNTR before
+	 * CURPOS.
 	 *
 	 * On other platforms CURPOS always requires the
 	 * CURBASE write to arm the update. Additonally
@@ -10098,15 +10158,20 @@ static void i9xx_update_cursor(struct intel_plane *plane,
 	 * cursor that doesn't appear to move, or even change
 	 * shape. Thus we always write CURBASE.
 	 *
-	 * CURCNTR and CUR_FBC_CTL are always
-	 * armed by the CURBASE write only.
+	 * The other registers are armed by by the CURBASE write
+	 * except when the plane is getting enabled at which time
+	 * the CURCNTR write arms the update.
 	 */
+
+	if (INTEL_GEN(dev_priv) >= 9)
+		skl_write_cursor_wm(plane, crtc_state);
+
 	if (plane->cursor.base != base ||
 	    plane->cursor.size != fbc_ctl ||
 	    plane->cursor.cntl != cntl) {
-		I915_WRITE_FW(CURCNTR(pipe), cntl);
 		if (HAS_CUR_FBC(dev_priv))
 			I915_WRITE_FW(CUR_FBC_CTL(pipe), fbc_ctl);
+		I915_WRITE_FW(CURCNTR(pipe), cntl);
 		I915_WRITE_FW(CURPOS(pipe), pos);
 		I915_WRITE_FW(CURBASE(pipe), base);
 
@@ -10122,9 +10187,9 @@ static void i9xx_update_cursor(struct intel_plane *plane,
 }
 
 static void i9xx_disable_cursor(struct intel_plane *plane,
-				struct intel_crtc *crtc)
+				const struct intel_crtc_state *crtc_state)
 {
-	i9xx_update_cursor(plane, NULL, NULL);
+	i9xx_update_cursor(plane, crtc_state, NULL);
 }
 
 static bool i9xx_cursor_get_hw_state(struct intel_plane *plane,
@@ -10835,8 +10900,10 @@ static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state)
 			continue;
 
 		plane_state->linked_plane = NULL;
-		if (plane_state->slave && !plane_state->base.visible)
+		if (plane_state->slave && !plane_state->base.visible) {
 			crtc_state->active_planes &= ~BIT(plane->id);
+			crtc_state->update_planes |= BIT(plane->id);
+		}
 
 		plane_state->slave = false;
 	}
@@ -10877,6 +10944,7 @@ static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state)
 		linked_state->slave = true;
 		linked_state->linked_plane = plane;
 		crtc_state->active_planes |= BIT(linked->id);
+		crtc_state->update_planes |= BIT(linked->id);
 		DRM_DEBUG_KMS("Using %s as Y plane for %s\n", linked->base.name, plane->base.name);
 	}
 
@@ -11887,6 +11955,8 @@ static void verify_wm_state(struct drm_crtc *crtc,
 	struct skl_pipe_wm hw_wm, *sw_wm;
 	struct skl_plane_wm *hw_plane_wm, *sw_plane_wm;
 	struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry;
+	struct skl_ddb_entry hw_ddb_y[I915_MAX_PLANES];
+	struct skl_ddb_entry hw_ddb_uv[I915_MAX_PLANES];
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	const enum pipe pipe = intel_crtc->pipe;
 	int plane, level, max_level = ilk_wm_max_level(dev_priv);
@@ -11897,6 +11967,8 @@ static void verify_wm_state(struct drm_crtc *crtc,
 	skl_pipe_wm_get_hw_state(crtc, &hw_wm);
 	sw_wm = &to_intel_crtc_state(new_state)->wm.skl.optimal;
 
+	skl_pipe_ddb_get_hw_state(intel_crtc, hw_ddb_y, hw_ddb_uv);
+
 	skl_ddb_get_hw_state(dev_priv, &hw_ddb);
 	sw_ddb = &dev_priv->wm.skl_hw.ddb;
 
@@ -11939,8 +12011,8 @@ static void verify_wm_state(struct drm_crtc *crtc,
 		}
 
 		/* DDB */
-		hw_ddb_entry = &hw_ddb.plane[pipe][plane];
-		sw_ddb_entry = &sw_ddb->plane[pipe][plane];
+		hw_ddb_entry = &hw_ddb_y[plane];
+		sw_ddb_entry = &to_intel_crtc_state(new_state)->wm.skl.plane_ddb_y[plane];
 
 		if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
 			DRM_ERROR("mismatch in DDB state pipe %c plane %d (expected (%u,%u), found (%u,%u))\n",
@@ -11989,8 +12061,8 @@ static void verify_wm_state(struct drm_crtc *crtc,
 		}
 
 		/* DDB */
-		hw_ddb_entry = &hw_ddb.plane[pipe][PLANE_CURSOR];
-		sw_ddb_entry = &sw_ddb->plane[pipe][PLANE_CURSOR];
+		hw_ddb_entry = &hw_ddb_y[PLANE_CURSOR];
+		sw_ddb_entry = &to_intel_crtc_state(new_state)->wm.skl.plane_ddb_y[PLANE_CURSOR];
 
 		if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
 			DRM_ERROR("mismatch in DDB state pipe %c cursor (expected (%u,%u), found (%u,%u))\n",
@@ -12668,7 +12740,6 @@ static void intel_update_crtc(struct drm_crtc *crtc,
 	struct drm_device *dev = crtc->dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-	struct intel_crtc_state *old_intel_cstate = to_intel_crtc_state(old_crtc_state);
 	struct intel_crtc_state *pipe_config = to_intel_crtc_state(new_crtc_state);
 	bool modeset = needs_modeset(new_crtc_state);
 	struct intel_plane_state *new_plane_state =
@@ -12691,8 +12762,10 @@ static void intel_update_crtc(struct drm_crtc *crtc,
 
 	intel_begin_crtc_commit(crtc, old_crtc_state);
 
-	intel_update_planes_on_crtc(to_intel_atomic_state(state), intel_crtc,
-				    old_intel_cstate, pipe_config);
+	if (INTEL_GEN(dev_priv) >= 9)
+		skl_update_planes_on_crtc(to_intel_atomic_state(state), intel_crtc);
+	else
+		i9xx_update_planes_on_crtc(to_intel_atomic_state(state), intel_crtc);
 
 	intel_finish_crtc_commit(crtc, old_crtc_state);
 }
@@ -12885,7 +12958,7 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
 		intel_pre_plane_update(old_intel_crtc_state, new_intel_crtc_state);
 
 		if (old_crtc_state->active) {
-			intel_crtc_disable_planes(intel_crtc, old_intel_crtc_state->active_planes);
+			intel_crtc_disable_planes(intel_state, intel_crtc);
 
 			/*
 			 * We need to disable pipe CRC before disabling the pipe,
@@ -13240,7 +13313,7 @@ static int intel_plane_pin_fb(struct intel_plane_state *plane_state)
 	struct i915_vma *vma;
 
 	if (plane->id == PLANE_CURSOR &&
-	    INTEL_INFO(dev_priv)->cursor_needs_physical) {
+	    INTEL_INFO(dev_priv)->display.cursor_needs_physical) {
 		struct drm_i915_gem_object *obj = intel_fb_obj(fb);
 		const int align = intel_cursor_alignment(dev_priv);
 		int err;
@@ -13735,7 +13808,7 @@ intel_legacy_cursor_update(struct drm_plane *plane,
 					  to_intel_plane_state(plane->state));
 	} else {
 		trace_intel_disable_plane(plane, to_intel_crtc(crtc));
-		intel_plane->disable_plane(intel_plane, to_intel_crtc(crtc));
+		intel_plane->disable_plane(intel_plane, crtc_state);
 	}
 
 	intel_plane_unpin_fb(to_intel_plane_state(old_plane_state));
@@ -14186,7 +14259,7 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
 
 	intel_pps_init(dev_priv);
 
-	if (INTEL_INFO(dev_priv)->num_pipes == 0)
+	if (!HAS_DISPLAY(dev_priv))
 		return;
 
 	/*
@@ -14451,7 +14524,6 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
 {
 	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
 	struct drm_framebuffer *fb = &intel_fb->base;
-	struct drm_format_name_buf format_name;
 	u32 pitch_limit;
 	unsigned int tiling, stride;
 	int ret = -EINVAL;
@@ -14482,39 +14554,14 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
 		}
 	}
 
-	/* Passed in modifier sanity checking. */
-	switch (mode_cmd->modifier[0]) {
-	case I915_FORMAT_MOD_Y_TILED_CCS:
-	case I915_FORMAT_MOD_Yf_TILED_CCS:
-		switch (mode_cmd->pixel_format) {
-		case DRM_FORMAT_XBGR8888:
-		case DRM_FORMAT_ABGR8888:
-		case DRM_FORMAT_XRGB8888:
-		case DRM_FORMAT_ARGB8888:
-			break;
-		default:
-			DRM_DEBUG_KMS("RC supported only with RGB8888 formats\n");
-			goto err;
-		}
-		/* fall through */
-	case I915_FORMAT_MOD_Yf_TILED:
-		if (mode_cmd->pixel_format == DRM_FORMAT_C8) {
-			DRM_DEBUG_KMS("Indexed format does not support Yf tiling\n");
-			goto err;
-		}
-		/* fall through */
-	case I915_FORMAT_MOD_Y_TILED:
-		if (INTEL_GEN(dev_priv) < 9) {
-			DRM_DEBUG_KMS("Unsupported tiling 0x%llx!\n",
-				      mode_cmd->modifier[0]);
-			goto err;
-		}
-		break;
-	case DRM_FORMAT_MOD_LINEAR:
-	case I915_FORMAT_MOD_X_TILED:
-		break;
-	default:
-		DRM_DEBUG_KMS("Unsupported fb modifier 0x%llx!\n",
+	if (!drm_any_plane_has_format(&dev_priv->drm,
+				      mode_cmd->pixel_format,
+				      mode_cmd->modifier[0])) {
+		struct drm_format_name_buf format_name;
+
+		DRM_DEBUG_KMS("unsupported pixel format %s / modifier 0x%llx\n",
+			      drm_get_format_name(mode_cmd->pixel_format,
+						  &format_name),
 			      mode_cmd->modifier[0]);
 		goto err;
 	}
@@ -14549,69 +14596,6 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
 		goto err;
 	}
 
-	/* Reject formats not supported by any plane early. */
-	switch (mode_cmd->pixel_format) {
-	case DRM_FORMAT_C8:
-	case DRM_FORMAT_RGB565:
-	case DRM_FORMAT_XRGB8888:
-	case DRM_FORMAT_ARGB8888:
-		break;
-	case DRM_FORMAT_XRGB1555:
-		if (INTEL_GEN(dev_priv) > 3) {
-			DRM_DEBUG_KMS("unsupported pixel format: %s\n",
-				      drm_get_format_name(mode_cmd->pixel_format, &format_name));
-			goto err;
-		}
-		break;
-	case DRM_FORMAT_ABGR8888:
-		if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
-		    INTEL_GEN(dev_priv) < 9) {
-			DRM_DEBUG_KMS("unsupported pixel format: %s\n",
-				      drm_get_format_name(mode_cmd->pixel_format, &format_name));
-			goto err;
-		}
-		break;
-	case DRM_FORMAT_XBGR8888:
-	case DRM_FORMAT_XRGB2101010:
-	case DRM_FORMAT_XBGR2101010:
-		if (INTEL_GEN(dev_priv) < 4) {
-			DRM_DEBUG_KMS("unsupported pixel format: %s\n",
-				      drm_get_format_name(mode_cmd->pixel_format, &format_name));
-			goto err;
-		}
-		break;
-	case DRM_FORMAT_ABGR2101010:
-		if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) {
-			DRM_DEBUG_KMS("unsupported pixel format: %s\n",
-				      drm_get_format_name(mode_cmd->pixel_format, &format_name));
-			goto err;
-		}
-		break;
-	case DRM_FORMAT_YUYV:
-	case DRM_FORMAT_UYVY:
-	case DRM_FORMAT_YVYU:
-	case DRM_FORMAT_VYUY:
-		if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv)) {
-			DRM_DEBUG_KMS("unsupported pixel format: %s\n",
-				      drm_get_format_name(mode_cmd->pixel_format, &format_name));
-			goto err;
-		}
-		break;
-	case DRM_FORMAT_NV12:
-		if (INTEL_GEN(dev_priv) < 9 || IS_SKYLAKE(dev_priv) ||
-		    IS_BROXTON(dev_priv)) {
-			DRM_DEBUG_KMS("unsupported pixel format: %s\n",
-				      drm_get_format_name(mode_cmd->pixel_format,
-							  &format_name));
-			goto err;
-		}
-		break;
-	default:
-		DRM_DEBUG_KMS("unsupported pixel format: %s\n",
-			      drm_get_format_name(mode_cmd->pixel_format, &format_name));
-		goto err;
-	}
-
 	/* FIXME need to adjust LINOFF/TILEOFF accordingly. */
 	if (mode_cmd->offsets[0] != 0)
 		goto err;
@@ -16066,7 +16050,7 @@ intel_display_capture_error_state(struct drm_i915_private *dev_priv)
 	};
 	int i;
 
-	if (INTEL_INFO(dev_priv)->num_pipes == 0)
+	if (!HAS_DISPLAY(dev_priv))
 		return NULL;
 
 	error = kzalloc(sizeof(*error), GFP_ATOMIC);
diff --git a/drivers/gpu/drm/i915/intel_display.h b/drivers/gpu/drm/i915/intel_display.h
index 5f2955b..4262452 100644
--- a/drivers/gpu/drm/i915/intel_display.h
+++ b/drivers/gpu/drm/i915/intel_display.h
@@ -242,6 +242,7 @@ enum intel_display_power_domain {
 	POWER_DOMAIN_TRANSCODER_B,
 	POWER_DOMAIN_TRANSCODER_C,
 	POWER_DOMAIN_TRANSCODER_EDP,
+	POWER_DOMAIN_TRANSCODER_EDP_VDSC,
 	POWER_DOMAIN_TRANSCODER_DSI_A,
 	POWER_DOMAIN_TRANSCODER_DSI_C,
 	POWER_DOMAIN_PORT_DDI_A_LANES,
@@ -398,6 +399,14 @@ struct intel_link_m_n {
 	for_each_power_well_reverse(__dev_priv, __power_well)		        \
 		for_each_if((__power_well)->desc->domains & (__domain_mask))
 
+#define for_each_old_intel_plane_in_state(__state, plane, old_plane_state, __i) \
+	for ((__i) = 0; \
+	     (__i) < (__state)->base.dev->mode_config.num_total_plane && \
+		     ((plane) = to_intel_plane((__state)->base.planes[__i].ptr), \
+		      (old_plane_state) = to_intel_plane_state((__state)->base.planes[__i].old_state), 1); \
+	     (__i)++) \
+		for_each_if(plane)
+
 #define for_each_new_intel_plane_in_state(__state, plane, new_plane_state, __i) \
 	for ((__i) = 0; \
 	     (__i) < (__state)->base.dev->mode_config.num_total_plane && \
@@ -423,10 +432,18 @@ struct intel_link_m_n {
 	     (__i)++) \
 		for_each_if(plane)
 
-void intel_link_compute_m_n(int bpp, int nlanes,
+#define for_each_oldnew_intel_crtc_in_state(__state, crtc, old_crtc_state, new_crtc_state, __i) \
+	for ((__i) = 0; \
+	     (__i) < (__state)->base.dev->mode_config.num_crtc && \
+		     ((crtc) = to_intel_crtc((__state)->base.crtcs[__i].ptr), \
+		      (old_crtc_state) = to_intel_crtc_state((__state)->base.crtcs[__i].old_state), \
+		      (new_crtc_state) = to_intel_crtc_state((__state)->base.crtcs[__i].new_state), 1); \
+	     (__i)++) \
+		for_each_if(crtc)
+
+void intel_link_compute_m_n(u16 bpp, int nlanes,
 			    int pixel_clock, int link_clock,
 			    struct intel_link_m_n *m_n,
 			    bool constant_n);
-
 bool is_ccs_modifier(u64 modifier);
 #endif
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 7699f9b..fdd2cbc 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -47,6 +47,8 @@
 
 /* DP DSC small joiner has 2 FIFOs each of 640 x 6 bytes */
 #define DP_DSC_MAX_SMALL_JOINER_RAM_BUFFER	61440
+#define DP_DSC_MIN_SUPPORTED_BPC		8
+#define DP_DSC_MAX_SUPPORTED_BPC		10
 
 /* DP DSC throughput values used for slice count calculations KPixels/s */
 #define DP_DSC_PEAK_PIXEL_RATE			2720000
@@ -543,7 +545,7 @@ intel_dp_mode_valid(struct drm_connector *connector,
 			dsc_slice_count =
 				drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd,
 								true);
-		} else {
+		} else if (drm_dp_sink_supports_fec(intel_dp->fec_capable)) {
 			dsc_max_output_bpp =
 				intel_dp_dsc_get_output_bpp(max_link_clock,
 							    max_lanes,
@@ -1708,6 +1710,41 @@ struct link_config_limits {
 	int min_bpp, max_bpp;
 };
 
+static bool intel_dp_source_supports_fec(struct intel_dp *intel_dp,
+					 const struct intel_crtc_state *pipe_config)
+{
+	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+
+	return INTEL_GEN(dev_priv) >= 11 &&
+		pipe_config->cpu_transcoder != TRANSCODER_A;
+}
+
+static bool intel_dp_supports_fec(struct intel_dp *intel_dp,
+				  const struct intel_crtc_state *pipe_config)
+{
+	return intel_dp_source_supports_fec(intel_dp, pipe_config) &&
+		drm_dp_sink_supports_fec(intel_dp->fec_capable);
+}
+
+static bool intel_dp_source_supports_dsc(struct intel_dp *intel_dp,
+					 const struct intel_crtc_state *pipe_config)
+{
+	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+
+	return INTEL_GEN(dev_priv) >= 10 &&
+		pipe_config->cpu_transcoder != TRANSCODER_A;
+}
+
+static bool intel_dp_supports_dsc(struct intel_dp *intel_dp,
+				  const struct intel_crtc_state *pipe_config)
+{
+	if (!intel_dp_is_edp(intel_dp) && !pipe_config->fec_enable)
+		return false;
+
+	return intel_dp_source_supports_dsc(intel_dp, pipe_config) &&
+		drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd);
+}
+
 static int intel_dp_compute_bpp(struct intel_dp *intel_dp,
 				struct intel_crtc_state *pipe_config)
 {
@@ -1842,14 +1879,122 @@ intel_dp_compute_link_config_fast(struct intel_dp *intel_dp,
 	return false;
 }
 
+static int intel_dp_dsc_compute_bpp(struct intel_dp *intel_dp, u8 dsc_max_bpc)
+{
+	int i, num_bpc;
+	u8 dsc_bpc[3] = {0};
+
+	num_bpc = drm_dp_dsc_sink_supported_input_bpcs(intel_dp->dsc_dpcd,
+						       dsc_bpc);
+	for (i = 0; i < num_bpc; i++) {
+		if (dsc_max_bpc >= dsc_bpc[i])
+			return dsc_bpc[i] * 3;
+	}
+
+	return 0;
+}
+
+static bool intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
+					struct intel_crtc_state *pipe_config,
+					struct drm_connector_state *conn_state,
+					struct link_config_limits *limits)
+{
+	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+	struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
+	struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
+	u8 dsc_max_bpc;
+	int pipe_bpp;
+
+	if (!intel_dp_supports_dsc(intel_dp, pipe_config))
+		return false;
+
+	dsc_max_bpc = min_t(u8, DP_DSC_MAX_SUPPORTED_BPC,
+			    conn_state->max_requested_bpc);
+
+	pipe_bpp = intel_dp_dsc_compute_bpp(intel_dp, dsc_max_bpc);
+	if (pipe_bpp < DP_DSC_MIN_SUPPORTED_BPC * 3) {
+		DRM_DEBUG_KMS("No DSC support for less than 8bpc\n");
+		return false;
+	}
+
+	/*
+	 * For now enable DSC for max bpp, max link rate, max lane count.
+	 * Optimize this later for the minimum possible link rate/lane count
+	 * with DSC enabled for the requested mode.
+	 */
+	pipe_config->pipe_bpp = pipe_bpp;
+	pipe_config->port_clock = intel_dp->common_rates[limits->max_clock];
+	pipe_config->lane_count = limits->max_lane_count;
+
+	if (intel_dp_is_edp(intel_dp)) {
+		pipe_config->dsc_params.compressed_bpp =
+			min_t(u16, drm_edp_dsc_sink_output_bpp(intel_dp->dsc_dpcd) >> 4,
+			      pipe_config->pipe_bpp);
+		pipe_config->dsc_params.slice_count =
+			drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd,
+							true);
+	} else {
+		u16 dsc_max_output_bpp;
+		u8 dsc_dp_slice_count;
+
+		dsc_max_output_bpp =
+			intel_dp_dsc_get_output_bpp(pipe_config->port_clock,
+						    pipe_config->lane_count,
+						    adjusted_mode->crtc_clock,
+						    adjusted_mode->crtc_hdisplay);
+		dsc_dp_slice_count =
+			intel_dp_dsc_get_slice_count(intel_dp,
+						     adjusted_mode->crtc_clock,
+						     adjusted_mode->crtc_hdisplay);
+		if (!dsc_max_output_bpp || !dsc_dp_slice_count) {
+			DRM_DEBUG_KMS("Compressed BPP/Slice Count not supported\n");
+			return false;
+		}
+		pipe_config->dsc_params.compressed_bpp = min_t(u16,
+							       dsc_max_output_bpp >> 4,
+							       pipe_config->pipe_bpp);
+		pipe_config->dsc_params.slice_count = dsc_dp_slice_count;
+	}
+	/*
+	 * VDSC engine operates at 1 Pixel per clock, so if peak pixel rate
+	 * is greater than the maximum Cdclock and if slice count is even
+	 * then we need to use 2 VDSC instances.
+	 */
+	if (adjusted_mode->crtc_clock > dev_priv->max_cdclk_freq) {
+		if (pipe_config->dsc_params.slice_count > 1) {
+			pipe_config->dsc_params.dsc_split = true;
+		} else {
+			DRM_DEBUG_KMS("Cannot split stream to use 2 VDSC instances\n");
+			return false;
+		}
+	}
+	if (intel_dp_compute_dsc_params(intel_dp, pipe_config) < 0) {
+		DRM_DEBUG_KMS("Cannot compute valid DSC parameters for Input Bpp = %d "
+			      "Compressed BPP = %d\n",
+			      pipe_config->pipe_bpp,
+			      pipe_config->dsc_params.compressed_bpp);
+		return false;
+	}
+	pipe_config->dsc_params.compression_enable = true;
+	DRM_DEBUG_KMS("DP DSC computed with Input Bpp = %d "
+		      "Compressed Bpp = %d Slice Count = %d\n",
+		      pipe_config->pipe_bpp,
+		      pipe_config->dsc_params.compressed_bpp,
+		      pipe_config->dsc_params.slice_count);
+
+	return true;
+}
+
 static bool
 intel_dp_compute_link_config(struct intel_encoder *encoder,
-			     struct intel_crtc_state *pipe_config)
+			     struct intel_crtc_state *pipe_config,
+			     struct drm_connector_state *conn_state)
 {
 	struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
 	struct link_config_limits limits;
 	int common_len;
+	bool ret;
 
 	common_len = intel_dp_common_len_rate_limit(intel_dp,
 						    intel_dp->max_link_rate);
@@ -1888,7 +2033,7 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
 		      intel_dp->common_rates[limits.max_clock],
 		      limits.max_bpp, adjusted_mode->crtc_clock);
 
-	if (intel_dp_is_edp(intel_dp)) {
+	if (intel_dp_is_edp(intel_dp))
 		/*
 		 * Optimize for fast and narrow. eDP 1.3 section 3.3 and eDP 1.4
 		 * section A.1: "It is recommended that the minimum number of
@@ -1898,26 +2043,42 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
 		 * Note that we use the max clock and lane count for eDP 1.3 and
 		 * earlier, and fast vs. wide is irrelevant.
 		 */
-		if (!intel_dp_compute_link_config_fast(intel_dp, pipe_config,
-						       &limits))
-			return false;
-	} else {
+		ret = intel_dp_compute_link_config_fast(intel_dp, pipe_config,
+							&limits);
+	else
 		/* Optimize for slow and wide. */
-		if (!intel_dp_compute_link_config_wide(intel_dp, pipe_config,
-						       &limits))
+		ret = intel_dp_compute_link_config_wide(intel_dp, pipe_config,
+							&limits);
+
+	/* enable compression if the mode doesn't fit available BW */
+	if (!ret) {
+		if (!intel_dp_dsc_compute_config(intel_dp, pipe_config,
+						 conn_state, &limits))
 			return false;
 	}
 
-	DRM_DEBUG_KMS("DP lane count %d clock %d bpp %d\n",
-		      pipe_config->lane_count, pipe_config->port_clock,
-		      pipe_config->pipe_bpp);
+	if (pipe_config->dsc_params.compression_enable) {
+		DRM_DEBUG_KMS("DP lane count %d clock %d Input bpp %d Compressed bpp %d\n",
+			      pipe_config->lane_count, pipe_config->port_clock,
+			      pipe_config->pipe_bpp,
+			      pipe_config->dsc_params.compressed_bpp);
 
-	DRM_DEBUG_KMS("DP link rate required %i available %i\n",
-		      intel_dp_link_required(adjusted_mode->crtc_clock,
-					     pipe_config->pipe_bpp),
-		      intel_dp_max_data_rate(pipe_config->port_clock,
-					     pipe_config->lane_count));
+		DRM_DEBUG_KMS("DP link rate required %i available %i\n",
+			      intel_dp_link_required(adjusted_mode->crtc_clock,
+						     pipe_config->dsc_params.compressed_bpp),
+			      intel_dp_max_data_rate(pipe_config->port_clock,
+						     pipe_config->lane_count));
+	} else {
+		DRM_DEBUG_KMS("DP lane count %d clock %d bpp %d\n",
+			      pipe_config->lane_count, pipe_config->port_clock,
+			      pipe_config->pipe_bpp);
 
+		DRM_DEBUG_KMS("DP link rate required %i available %i\n",
+			      intel_dp_link_required(adjusted_mode->crtc_clock,
+						     pipe_config->pipe_bpp),
+			      intel_dp_max_data_rate(pipe_config->port_clock,
+						     pipe_config->lane_count));
+	}
 	return true;
 }
 
@@ -1983,7 +2144,10 @@ intel_dp_compute_config(struct intel_encoder *encoder,
 	if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
 		return false;
 
-	if (!intel_dp_compute_link_config(encoder, pipe_config))
+	pipe_config->fec_enable = !intel_dp_is_edp(intel_dp) &&
+				  intel_dp_supports_fec(intel_dp, pipe_config);
+
+	if (!intel_dp_compute_link_config(encoder, pipe_config, conn_state))
 		return false;
 
 	if (intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_AUTO) {
@@ -2001,11 +2165,20 @@ intel_dp_compute_config(struct intel_encoder *encoder,
 			intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_LIMITED;
 	}
 
-	intel_link_compute_m_n(pipe_config->pipe_bpp, pipe_config->lane_count,
-			       adjusted_mode->crtc_clock,
-			       pipe_config->port_clock,
-			       &pipe_config->dp_m_n,
-			       constant_n);
+	if (!pipe_config->dsc_params.compression_enable)
+		intel_link_compute_m_n(pipe_config->pipe_bpp,
+				       pipe_config->lane_count,
+				       adjusted_mode->crtc_clock,
+				       pipe_config->port_clock,
+				       &pipe_config->dp_m_n,
+				       constant_n);
+	else
+		intel_link_compute_m_n(pipe_config->dsc_params.compressed_bpp,
+				       pipe_config->lane_count,
+				       adjusted_mode->crtc_clock,
+				       pipe_config->port_clock,
+				       &pipe_config->dp_m_n,
+				       constant_n);
 
 	if (intel_connector->panel.downclock_mode != NULL &&
 		dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
@@ -2702,6 +2875,22 @@ static bool downstream_hpd_needs_d0(struct intel_dp *intel_dp)
 		intel_dp->downstream_ports[0] & DP_DS_PORT_HPD;
 }
 
+void intel_dp_sink_set_decompression_state(struct intel_dp *intel_dp,
+					   const struct intel_crtc_state *crtc_state,
+					   bool enable)
+{
+	int ret;
+
+	if (!crtc_state->dsc_params.compression_enable)
+		return;
+
+	ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_DSC_ENABLE,
+				 enable ? DP_DECOMPRESSION_EN : 0);
+	if (ret < 0)
+		DRM_DEBUG_KMS("Failed to %s sink decompression state\n",
+			      enable ? "enable" : "disable");
+}
+
 /* If the sink supports it, try to set the power state appropriately */
 void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
 {
@@ -3837,15 +4026,14 @@ static void intel_dp_get_dsc_sink_cap(struct intel_dp *intel_dp)
 		DRM_DEBUG_KMS("DSC DPCD: %*ph\n",
 			      (int)sizeof(intel_dp->dsc_dpcd),
 			      intel_dp->dsc_dpcd);
-		/* FEC is supported only on DP 1.4 */
-		if (!intel_dp_is_edp(intel_dp)) {
-			if (drm_dp_dpcd_readb(&intel_dp->aux, DP_FEC_CAPABILITY,
-					      &intel_dp->fec_capable) < 0)
-				DRM_ERROR("Failed to read FEC DPCD register\n");
 
-		DRM_DEBUG_KMS("FEC CAPABILITY: %x\n",
-			      intel_dp->fec_capable);
-		}
+		/* FEC is supported only on DP 1.4 */
+		if (!intel_dp_is_edp(intel_dp) &&
+		    drm_dp_dpcd_readb(&intel_dp->aux, DP_FEC_CAPABILITY,
+				      &intel_dp->fec_capable) < 0)
+			DRM_ERROR("Failed to read FEC DPCD register\n");
+
+		DRM_DEBUG_KMS("FEC CAPABILITY: %x\n", intel_dp->fec_capable);
 	}
 }
 
@@ -3936,8 +4124,6 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp)
 static bool
 intel_dp_get_dpcd(struct intel_dp *intel_dp)
 {
-	u8 sink_count;
-
 	if (!intel_dp_read_dpcd(intel_dp))
 		return false;
 
@@ -3947,25 +4133,35 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
 		intel_dp_set_common_rates(intel_dp);
 	}
 
-	if (drm_dp_dpcd_readb(&intel_dp->aux, DP_SINK_COUNT, &sink_count) <= 0)
-		return false;
-
 	/*
-	 * Sink count can change between short pulse hpd hence
-	 * a member variable in intel_dp will track any changes
-	 * between short pulse interrupts.
+	 * Some eDP panels do not set a valid value for sink count, that is why
+	 * it don't care about read it here and in intel_edp_init_dpcd().
 	 */
-	intel_dp->sink_count = DP_GET_SINK_COUNT(sink_count);
+	if (!intel_dp_is_edp(intel_dp)) {
+		u8 count;
+		ssize_t r;
 
-	/*
-	 * SINK_COUNT == 0 and DOWNSTREAM_PORT_PRESENT == 1 implies that
-	 * a dongle is present but no display. Unless we require to know
-	 * if a dongle is present or not, we don't need to update
-	 * downstream port information. So, an early return here saves
-	 * time from performing other operations which are not required.
-	 */
-	if (!intel_dp_is_edp(intel_dp) && !intel_dp->sink_count)
-		return false;
+		r = drm_dp_dpcd_readb(&intel_dp->aux, DP_SINK_COUNT, &count);
+		if (r < 1)
+			return false;
+
+		/*
+		 * Sink count can change between short pulse hpd hence
+		 * a member variable in intel_dp will track any changes
+		 * between short pulse interrupts.
+		 */
+		intel_dp->sink_count = DP_GET_SINK_COUNT(count);
+
+		/*
+		 * SINK_COUNT == 0 and DOWNSTREAM_PORT_PRESENT == 1 implies that
+		 * a dongle is present but no display. Unless we require to know
+		 * if a dongle is present or not, we don't need to update
+		 * downstream port information. So, an early return here saves
+		 * time from performing other operations which are not required.
+		 */
+		if (!intel_dp->sink_count)
+			return false;
+	}
 
 	if (!drm_dp_is_branch(intel_dp->dpcd))
 		return true; /* native DP sink */
@@ -4375,6 +4571,17 @@ intel_dp_needs_link_retrain(struct intel_dp *intel_dp)
 	if (!intel_dp->link_trained)
 		return false;
 
+	/*
+	 * While PSR source HW is enabled, it will control main-link sending
+	 * frames, enabling and disabling it so trying to do a retrain will fail
+	 * as the link would or not be on or it could mix training patterns
+	 * and frame data at the same time causing retrain to fail.
+	 * Also when exiting PSR, HW will retrain the link anyways fixing
+	 * any link status error.
+	 */
+	if (intel_psr_enabled(intel_dp))
+		return false;
+
 	if (!intel_dp_get_link_status(intel_dp, link_status))
 		return false;
 
diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.c b/drivers/gpu/drm/i915/intel_dpll_mgr.c
index 901e150..d513ca8 100644
--- a/drivers/gpu/drm/i915/intel_dpll_mgr.c
+++ b/drivers/gpu/drm/i915/intel_dpll_mgr.c
@@ -2523,7 +2523,8 @@ static bool icl_calc_dpll_state(struct intel_crtc_state *crtc_state,
 
 	if (intel_port_is_tc(dev_priv, encoder->port))
 		ret = icl_calc_tbt_pll(dev_priv, clock, &pll_params);
-	else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
+	else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
+		 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
 		ret = cnl_ddi_calculate_wrpll(clock, dev_priv, &pll_params);
 	else
 		ret = icl_calc_dp_combo_pll(dev_priv, clock, &pll_params);
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index a7d9ac9..f94a04b 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -706,6 +706,8 @@ struct intel_crtc_wm_state {
 			/* gen9+ only needs 1-step wm programming */
 			struct skl_pipe_wm optimal;
 			struct skl_ddb_entry ddb;
+			struct skl_ddb_entry plane_ddb_y[I915_MAX_PLANES];
+			struct skl_ddb_entry plane_ddb_uv[I915_MAX_PLANES];
 		} skl;
 
 		struct {
@@ -926,6 +928,9 @@ struct intel_crtc_state {
 	u8 active_planes;
 	u8 nv12_planes;
 
+	/* bitmask of planes that will be updated during the commit */
+	u8 update_planes;
+
 	/* HDMI scrambling status */
 	bool hdmi_scrambling;
 
@@ -937,6 +942,18 @@ struct intel_crtc_state {
 
 	/* Output down scaling is done in LSPCON device */
 	bool lspcon_downsampling;
+
+	/* Display Stream compression state */
+	struct {
+		bool compression_enable;
+		bool dsc_split;
+		u16 compressed_bpp;
+		u8 slice_count;
+	} dsc_params;
+	struct drm_dsc_config dp_dsc_cfg;
+
+	/* Forward Error correction State */
+	bool fec_enable;
 };
 
 struct intel_crtc {
@@ -1013,7 +1030,7 @@ struct intel_plane {
 			     const struct intel_crtc_state *crtc_state,
 			     const struct intel_plane_state *plane_state);
 	void (*disable_plane)(struct intel_plane *plane,
-			      struct intel_crtc *crtc);
+			      const struct intel_crtc_state *crtc_state);
 	bool (*get_hw_state)(struct intel_plane *plane, enum pipe *pipe);
 	int (*check_plane)(struct intel_crtc_state *crtc_state,
 			   struct intel_plane_state *plane_state);
@@ -1517,13 +1534,9 @@ u8 intel_ddi_dp_pre_emphasis_max(struct intel_encoder *encoder,
 				 u8 voltage_swing);
 int intel_ddi_toggle_hdcp_signalling(struct intel_encoder *intel_encoder,
 				     bool enable);
-void icl_map_plls_to_ports(struct drm_crtc *crtc,
-			   struct intel_crtc_state *crtc_state,
-			   struct drm_atomic_state *old_state);
-void icl_unmap_plls_to_ports(struct drm_crtc *crtc,
-			     struct intel_crtc_state *crtc_state,
-			     struct drm_atomic_state *old_state);
 void icl_sanitize_encoder_pll_mapping(struct intel_encoder *encoder);
+int cnl_calc_wrpll_link(struct drm_i915_private *dev_priv,
+			enum intel_dpll_id pll_id);
 
 unsigned int intel_fb_align_height(const struct drm_framebuffer *fb,
 				   int color_plane, unsigned int height);
@@ -1788,6 +1801,9 @@ void intel_dp_stop_link_train(struct intel_dp *intel_dp);
 int intel_dp_retrain_link(struct intel_encoder *encoder,
 			  struct drm_modeset_acquire_ctx *ctx);
 void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode);
+void intel_dp_sink_set_decompression_state(struct intel_dp *intel_dp,
+					   const struct intel_crtc_state *crtc_state,
+					   bool enable);
 void intel_dp_encoder_reset(struct drm_encoder *encoder);
 void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder);
 void intel_dp_encoder_destroy(struct drm_encoder *encoder);
@@ -1843,6 +1859,12 @@ uint16_t intel_dp_dsc_get_output_bpp(int link_clock, uint8_t lane_count,
 uint8_t intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp, int mode_clock,
 				     int mode_hdisplay);
 
+/* intel_vdsc.c */
+int intel_dp_compute_dsc_params(struct intel_dp *intel_dp,
+				struct intel_crtc_state *pipe_config);
+enum intel_display_power_domain
+intel_dsc_power_domain(const struct intel_crtc_state *crtc_state);
+
 static inline unsigned int intel_dp_unused_lane_mask(int lane_count)
 {
 	return ~((1 << lane_count) - 1) & 0xf;
@@ -2047,6 +2069,7 @@ void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir);
 void intel_psr_short_pulse(struct intel_dp *intel_dp);
 int intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state,
 			    u32 *out_value);
+bool intel_psr_enabled(struct intel_dp *intel_dp);
 
 /* intel_quirks.c */
 void intel_init_quirks(struct drm_i915_private *dev_priv);
@@ -2181,6 +2204,9 @@ void g4x_wm_get_hw_state(struct drm_device *dev);
 void vlv_wm_get_hw_state(struct drm_device *dev);
 void ilk_wm_get_hw_state(struct drm_device *dev);
 void skl_wm_get_hw_state(struct drm_device *dev);
+void skl_pipe_ddb_get_hw_state(struct intel_crtc *crtc,
+			       struct skl_ddb_entry *ddb_y,
+			       struct skl_ddb_entry *ddb_uv);
 void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
 			  struct skl_ddb_allocation *ddb /* out */);
 void skl_pipe_wm_get_hw_state(struct drm_crtc *crtc,
@@ -2195,6 +2221,10 @@ bool skl_wm_level_equals(const struct skl_wm_level *l1,
 bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry *ddb,
 				 const struct skl_ddb_entry entries[],
 				 int num_entries, int ignore_idx);
+void skl_write_plane_wm(struct intel_plane *plane,
+			const struct intel_crtc_state *crtc_state);
+void skl_write_cursor_wm(struct intel_plane *plane,
+			 const struct intel_crtc_state *crtc_state);
 bool ilk_disable_lp_wm(struct drm_device *dev);
 int skl_check_pipe_max_pixel_rate(struct intel_crtc *intel_crtc,
 				  struct intel_crtc_state *cstate);
@@ -2287,10 +2317,10 @@ struct drm_plane_state *intel_plane_duplicate_state(struct drm_plane *plane);
 void intel_plane_destroy_state(struct drm_plane *plane,
 			       struct drm_plane_state *state);
 extern const struct drm_plane_helper_funcs intel_plane_helper_funcs;
-void intel_update_planes_on_crtc(struct intel_atomic_state *old_state,
-				 struct intel_crtc *crtc,
-				 struct intel_crtc_state *old_crtc_state,
-				 struct intel_crtc_state *new_crtc_state);
+void skl_update_planes_on_crtc(struct intel_atomic_state *state,
+			       struct intel_crtc *crtc);
+void i9xx_update_planes_on_crtc(struct intel_atomic_state *state,
+				struct intel_crtc *crtc);
 int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_state,
 					struct intel_crtc_state *crtc_state,
 					const struct intel_plane_state *old_plane_state,
diff --git a/drivers/gpu/drm/i915/intel_dsi.h b/drivers/gpu/drm/i915/intel_dsi.h
index ee93137..d968f1f 100644
--- a/drivers/gpu/drm/i915/intel_dsi.h
+++ b/drivers/gpu/drm/i915/intel_dsi.h
@@ -146,6 +146,11 @@ static inline bool is_cmd_mode(struct intel_dsi *intel_dsi)
 	return intel_dsi->operation_mode == INTEL_DSI_COMMAND_MODE;
 }
 
+static inline u16 intel_dsi_encoder_ports(struct intel_encoder *encoder)
+{
+	return enc_to_intel_dsi(&encoder->base)->ports;
+}
+
 /* intel_dsi.c */
 int intel_dsi_bitrate(const struct intel_dsi *intel_dsi);
 int intel_dsi_tlpx_ns(const struct intel_dsi *intel_dsi);
diff --git a/drivers/gpu/drm/i915/intel_dsi_vbt.c b/drivers/gpu/drm/i915/intel_dsi_vbt.c
index a72de81..a1a8b37 100644
--- a/drivers/gpu/drm/i915/intel_dsi_vbt.c
+++ b/drivers/gpu/drm/i915/intel_dsi_vbt.c
@@ -103,6 +103,18 @@ static struct gpio_map vlv_gpio_table[] = {
 #define CHV_GPIO_PAD_CFG1(f, i)		(0x4400 + (f) * 0x400 + (i) * 8 + 4)
 #define  CHV_GPIO_CFGLOCK		(1 << 31)
 
+/* ICL DSI Display GPIO Pins */
+#define  ICL_GPIO_DDSP_HPD_A		0
+#define  ICL_GPIO_L_VDDEN_1		1
+#define  ICL_GPIO_L_BKLTEN_1		2
+#define  ICL_GPIO_DDPA_CTRLCLK_1	3
+#define  ICL_GPIO_DDPA_CTRLDATA_1	4
+#define  ICL_GPIO_DDSP_HPD_B		5
+#define  ICL_GPIO_L_VDDEN_2		6
+#define  ICL_GPIO_L_BKLTEN_2		7
+#define  ICL_GPIO_DDPA_CTRLCLK_2	8
+#define  ICL_GPIO_DDPA_CTRLDATA_2	9
+
 static inline enum port intel_dsi_seq_port_to_port(u8 port)
 {
 	return port ? PORT_C : PORT_A;
@@ -324,6 +336,12 @@ static void bxt_exec_gpio(struct drm_i915_private *dev_priv,
 	gpiod_set_value(gpio_desc, value);
 }
 
+static void icl_exec_gpio(struct drm_i915_private *dev_priv,
+			  u8 gpio_source, u8 gpio_index, bool value)
+{
+	DRM_DEBUG_KMS("Skipping ICL GPIO element execution\n");
+}
+
 static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
 {
 	struct drm_device *dev = intel_dsi->base.base.dev;
@@ -347,7 +365,9 @@ static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
 	/* pull up/down */
 	value = *data++ & 1;
 
-	if (IS_VALLEYVIEW(dev_priv))
+	if (IS_ICELAKE(dev_priv))
+		icl_exec_gpio(dev_priv, gpio_source, gpio_index, value);
+	else if (IS_VALLEYVIEW(dev_priv))
 		vlv_exec_gpio(dev_priv, gpio_source, gpio_number, value);
 	else if (IS_CHERRYVIEW(dev_priv))
 		chv_exec_gpio(dev_priv, gpio_source, gpio_number, value);
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index 759c0fd..af28734 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -493,46 +493,6 @@ void intel_engine_setup_common(struct intel_engine_cs *engine)
 	intel_engine_init_cmd_parser(engine);
 }
 
-int intel_engine_create_scratch(struct intel_engine_cs *engine,
-				unsigned int size)
-{
-	struct drm_i915_gem_object *obj;
-	struct i915_vma *vma;
-	int ret;
-
-	WARN_ON(engine->scratch);
-
-	obj = i915_gem_object_create_stolen(engine->i915, size);
-	if (!obj)
-		obj = i915_gem_object_create_internal(engine->i915, size);
-	if (IS_ERR(obj)) {
-		DRM_ERROR("Failed to allocate scratch page\n");
-		return PTR_ERR(obj);
-	}
-
-	vma = i915_vma_instance(obj, &engine->i915->ggtt.vm, NULL);
-	if (IS_ERR(vma)) {
-		ret = PTR_ERR(vma);
-		goto err_unref;
-	}
-
-	ret = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
-	if (ret)
-		goto err_unref;
-
-	engine->scratch = vma;
-	return 0;
-
-err_unref:
-	i915_gem_object_put(obj);
-	return ret;
-}
-
-void intel_engine_cleanup_scratch(struct intel_engine_cs *engine)
-{
-	i915_vma_unpin_and_release(&engine->scratch, 0);
-}
-
 static void cleanup_status_page(struct intel_engine_cs *engine)
 {
 	if (HWS_NEEDS_PHYSICAL(engine->i915)) {
@@ -707,8 +667,6 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine)
 {
 	struct drm_i915_private *i915 = engine->i915;
 
-	intel_engine_cleanup_scratch(engine);
-
 	cleanup_status_page(engine);
 
 	intel_engine_fini_breadcrumbs(engine);
@@ -723,6 +681,10 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine)
 	__intel_context_unpin(i915->kernel_context, engine);
 
 	i915_timeline_fini(&engine->timeline);
+
+	intel_wa_list_free(&engine->ctx_wa_list);
+	intel_wa_list_free(&engine->wa_list);
+	intel_wa_list_free(&engine->whitelist);
 }
 
 u64 intel_engine_get_active_head(const struct intel_engine_cs *engine)
diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c
index 14cbaf4..f23570c 100644
--- a/drivers/gpu/drm/i915/intel_fbc.c
+++ b/drivers/gpu/drm/i915/intel_fbc.c
@@ -1309,7 +1309,7 @@ void intel_fbc_init(struct drm_i915_private *dev_priv)
 	fbc->active = false;
 
 	if (need_fbc_vtd_wa(dev_priv))
-		mkwrite_device_info(dev_priv)->has_fbc = false;
+		mkwrite_device_info(dev_priv)->display.has_fbc = false;
 
 	i915_modparams.enable_fbc = intel_sanitize_fbc_option(dev_priv);
 	DRM_DEBUG_KMS("Sanitized enable_fbc value: %d\n",
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
index 2480c7d..fb5bb5b 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -672,7 +672,7 @@ int intel_fbdev_init(struct drm_device *dev)
 	struct intel_fbdev *ifbdev;
 	int ret;
 
-	if (WARN_ON(INTEL_INFO(dev_priv)->num_pipes == 0))
+	if (WARN_ON(!HAS_DISPLAY(dev_priv)))
 		return -ENODEV;
 
 	ifbdev = kzalloc(sizeof(struct intel_fbdev), GFP_KERNEL);
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index e2c6a2b..07e803a 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -115,6 +115,8 @@ static u32 hsw_infoframe_enable(unsigned int type)
 	switch (type) {
 	case DP_SDP_VSC:
 		return VIDEO_DIP_ENABLE_VSC_HSW;
+	case DP_SDP_PPS:
+		return VDIP_ENABLE_PPS;
 	case HDMI_INFOFRAME_TYPE_AVI:
 		return VIDEO_DIP_ENABLE_AVI_HSW;
 	case HDMI_INFOFRAME_TYPE_SPD:
@@ -136,6 +138,8 @@ hsw_dip_data_reg(struct drm_i915_private *dev_priv,
 	switch (type) {
 	case DP_SDP_VSC:
 		return HSW_TVIDEO_DIP_VSC_DATA(cpu_transcoder, i);
+	case DP_SDP_PPS:
+		return ICL_VIDEO_DIP_PPS_DATA(cpu_transcoder, i);
 	case HDMI_INFOFRAME_TYPE_AVI:
 		return HSW_TVIDEO_DIP_AVI_DATA(cpu_transcoder, i);
 	case HDMI_INFOFRAME_TYPE_SPD:
@@ -148,6 +152,18 @@ hsw_dip_data_reg(struct drm_i915_private *dev_priv,
 	}
 }
 
+static int hsw_dip_data_size(unsigned int type)
+{
+	switch (type) {
+	case DP_SDP_VSC:
+		return VIDEO_DIP_VSC_DATA_SIZE;
+	case DP_SDP_PPS:
+		return VIDEO_DIP_PPS_DATA_SIZE;
+	default:
+		return VIDEO_DIP_DATA_SIZE;
+	}
+}
+
 static void g4x_write_infoframe(struct intel_encoder *encoder,
 				const struct intel_crtc_state *crtc_state,
 				unsigned int type,
@@ -382,11 +398,12 @@ static void hsw_write_infoframe(struct intel_encoder *encoder,
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
 	i915_reg_t ctl_reg = HSW_TVIDEO_DIP_CTL(cpu_transcoder);
-	int data_size = type == DP_SDP_VSC ?
-		VIDEO_DIP_VSC_DATA_SIZE : VIDEO_DIP_DATA_SIZE;
+	int data_size;
 	int i;
 	u32 val = I915_READ(ctl_reg);
 
+	data_size = hsw_dip_data_size(type);
+
 	val &= ~hsw_infoframe_enable(type);
 	I915_WRITE(ctl_reg, val);
 
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index 33d87ab..802d039 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -817,7 +817,7 @@ int intel_setup_gmbus(struct drm_i915_private *dev_priv)
 	unsigned int pin;
 	int ret;
 
-	if (INTEL_INFO(dev_priv)->num_pipes == 0)
+	if (!HAS_DISPLAY(dev_priv))
 		return 0;
 
 	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 08fd9b1..d7fa301 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -767,6 +767,8 @@ execlists_cancel_port_requests(struct intel_engine_execlists * const execlists)
 
 static void reset_csb_pointers(struct intel_engine_execlists *execlists)
 {
+	const unsigned int reset_value = GEN8_CSB_ENTRIES - 1;
+
 	/*
 	 * After a reset, the HW starts writing into CSB entry [0]. We
 	 * therefore have to set our HEAD pointer back one entry so that
@@ -776,8 +778,8 @@ static void reset_csb_pointers(struct intel_engine_execlists *execlists)
 	 * inline comparison of our cached head position against the last HW
 	 * write works even before the first interrupt.
 	 */
-	execlists->csb_head = execlists->csb_write_reset;
-	WRITE_ONCE(*execlists->csb_write, execlists->csb_write_reset);
+	execlists->csb_head = reset_value;
+	WRITE_ONCE(*execlists->csb_write, reset_value);
 }
 
 static void nop_submission_tasklet(unsigned long data)
@@ -818,8 +820,11 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine)
 	/* Mark all executing requests as skipped. */
 	list_for_each_entry(rq, &engine->timeline.requests, link) {
 		GEM_BUG_ON(!rq->global_seqno);
-		if (!i915_request_completed(rq))
-			dma_fence_set_error(&rq->fence, -EIO);
+
+		if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags))
+			continue;
+
+		dma_fence_set_error(&rq->fence, -EIO);
 	}
 
 	/* Flush the queued requests to the timeline list (for retiring). */
@@ -839,6 +844,10 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine)
 			kmem_cache_free(engine->i915->priorities, p);
 	}
 
+	intel_write_status_page(engine,
+				I915_GEM_HWS_INDEX,
+				intel_engine_last_submit(engine));
+
 	/* Remaining _unready_ requests will be nop'ed when submitted */
 
 	execlists->queue_priority = INT_MIN;
@@ -1279,9 +1288,10 @@ static int execlists_request_alloc(struct i915_request *request)
 static u32 *
 gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine, u32 *batch)
 {
+	/* NB no one else is allowed to scribble over scratch + 256! */
 	*batch++ = MI_STORE_REGISTER_MEM_GEN8 | MI_SRM_LRM_GLOBAL_GTT;
 	*batch++ = i915_mmio_reg_offset(GEN8_L3SQCREG4);
-	*batch++ = i915_ggtt_offset(engine->scratch) + 256;
+	*batch++ = i915_scratch_offset(engine->i915) + 256;
 	*batch++ = 0;
 
 	*batch++ = MI_LOAD_REGISTER_IMM(1);
@@ -1295,7 +1305,7 @@ gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine, u32 *batch)
 
 	*batch++ = MI_LOAD_REGISTER_MEM_GEN8 | MI_SRM_LRM_GLOBAL_GTT;
 	*batch++ = i915_mmio_reg_offset(GEN8_L3SQCREG4);
-	*batch++ = i915_ggtt_offset(engine->scratch) + 256;
+	*batch++ = i915_scratch_offset(engine->i915) + 256;
 	*batch++ = 0;
 
 	return batch;
@@ -1332,7 +1342,7 @@ static u32 *gen8_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch)
 				       PIPE_CONTROL_GLOBAL_GTT_IVB |
 				       PIPE_CONTROL_CS_STALL |
 				       PIPE_CONTROL_QW_WRITE,
-				       i915_ggtt_offset(engine->scratch) +
+				       i915_scratch_offset(engine->i915) +
 				       2 * CACHELINE_BYTES);
 
 	*batch++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
@@ -1401,18 +1411,6 @@ static u32 *gen9_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch)
 
 	batch = emit_lri(batch, lri, ARRAY_SIZE(lri));
 
-	/* WaClearSlmSpaceAtContextSwitch:kbl */
-	/* Actual scratch location is at 128 bytes offset */
-	if (IS_KBL_REVID(engine->i915, 0, KBL_REVID_A0)) {
-		batch = gen8_emit_pipe_control(batch,
-					       PIPE_CONTROL_FLUSH_L3 |
-					       PIPE_CONTROL_GLOBAL_GTT_IVB |
-					       PIPE_CONTROL_CS_STALL |
-					       PIPE_CONTROL_QW_WRITE,
-					       i915_ggtt_offset(engine->scratch)
-					       + 2 * CACHELINE_BYTES);
-	}
-
 	/* WaMediaPoolStateCmdInWABB:bxt,glk */
 	if (HAS_POOLED_EU(engine->i915)) {
 		/*
@@ -1629,6 +1627,8 @@ static bool unexpected_starting_state(struct intel_engine_cs *engine)
 
 static int gen8_init_common_ring(struct intel_engine_cs *engine)
 {
+	intel_engine_apply_workarounds(engine);
+
 	intel_mocs_init_engine(engine);
 
 	intel_engine_reset_breadcrumbs(engine);
@@ -1653,7 +1653,7 @@ static int gen8_init_render_ring(struct intel_engine_cs *engine)
 	if (ret)
 		return ret;
 
-	intel_whitelist_workarounds_apply(engine);
+	intel_engine_apply_whitelist(engine);
 
 	/* We need to disable the AsyncFlip performance optimisations in order
 	 * to use MI_WAIT_FOR_EVENT within the CS. It should already be
@@ -1676,7 +1676,7 @@ static int gen9_init_render_ring(struct intel_engine_cs *engine)
 	if (ret)
 		return ret;
 
-	intel_whitelist_workarounds_apply(engine);
+	intel_engine_apply_whitelist(engine);
 
 	return 0;
 }
@@ -1974,7 +1974,7 @@ static int gen8_emit_flush_render(struct i915_request *request,
 {
 	struct intel_engine_cs *engine = request->engine;
 	u32 scratch_addr =
-		i915_ggtt_offset(engine->scratch) + 2 * CACHELINE_BYTES;
+		i915_scratch_offset(engine->i915) + 2 * CACHELINE_BYTES;
 	bool vf_flush_wa = false, dc_flush_wa = false;
 	u32 *cs, flags = 0;
 	int len;
@@ -2088,7 +2088,7 @@ static int gen8_init_rcs_context(struct i915_request *rq)
 {
 	int ret;
 
-	ret = intel_ctx_workarounds_emit(rq);
+	ret = intel_engine_emit_ctx_wa(rq);
 	if (ret)
 		return ret;
 
@@ -2229,12 +2229,6 @@ logical_ring_setup(struct intel_engine_cs *engine)
 	logical_ring_default_irqs(engine);
 }
 
-static bool csb_force_mmio(struct drm_i915_private *i915)
-{
-	/* Older GVT emulation depends upon intercepting CSB mmio */
-	return intel_vgpu_active(i915) && !intel_vgpu_has_hwsp_emulation(i915);
-}
-
 static int logical_ring_init(struct intel_engine_cs *engine)
 {
 	struct drm_i915_private *i915 = engine->i915;
@@ -2264,24 +2258,12 @@ static int logical_ring_init(struct intel_engine_cs *engine)
 			upper_32_bits(ce->lrc_desc);
 	}
 
-	execlists->csb_read =
-		i915->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_PTR(engine));
-	if (csb_force_mmio(i915)) {
-		execlists->csb_status = (u32 __force *)
-			(i915->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_BUF_LO(engine, 0)));
+	execlists->csb_status =
+		&engine->status_page.page_addr[I915_HWS_CSB_BUF0_INDEX];
 
-		execlists->csb_write = (u32 __force *)execlists->csb_read;
-		execlists->csb_write_reset =
-			_MASKED_FIELD(GEN8_CSB_WRITE_PTR_MASK,
-				      GEN8_CSB_ENTRIES - 1);
-	} else {
-		execlists->csb_status =
-			&engine->status_page.page_addr[I915_HWS_CSB_BUF0_INDEX];
+	execlists->csb_write =
+		&engine->status_page.page_addr[intel_hws_csb_write_index(i915)];
 
-		execlists->csb_write =
-			&engine->status_page.page_addr[intel_hws_csb_write_index(i915)];
-		execlists->csb_write_reset = GEN8_CSB_ENTRIES - 1;
-	}
 	reset_csb_pointers(execlists);
 
 	return 0;
@@ -2311,10 +2293,6 @@ int logical_render_ring_init(struct intel_engine_cs *engine)
 	if (ret)
 		return ret;
 
-	ret = intel_engine_create_scratch(engine, PAGE_SIZE);
-	if (ret)
-		goto err_cleanup_common;
-
 	ret = intel_init_workaround_bb(engine);
 	if (ret) {
 		/*
@@ -2326,11 +2304,10 @@ int logical_render_ring_init(struct intel_engine_cs *engine)
 			  ret);
 	}
 
-	return 0;
+	intel_engine_init_whitelist(engine);
+	intel_engine_init_workarounds(engine);
 
-err_cleanup_common:
-	intel_engine_cleanup_common(engine);
-	return ret;
+	return 0;
 }
 
 int logical_xcs_ring_init(struct intel_engine_cs *engine)
diff --git a/drivers/gpu/drm/i915/intel_opregion.h b/drivers/gpu/drm/i915/intel_opregion.h
index d84b6d2..4aa68ff 100644
--- a/drivers/gpu/drm/i915/intel_opregion.h
+++ b/drivers/gpu/drm/i915/intel_opregion.h
@@ -87,12 +87,12 @@ static inline void intel_opregion_unregister(struct drm_i915_private *dev_priv)
 {
 }
 
-void intel_opregion_resume(struct drm_i915_private *dev_priv)
+static inline void intel_opregion_resume(struct drm_i915_private *dev_priv)
 {
 }
 
-void intel_opregion_suspend(struct drm_i915_private *dev_priv,
-			    pci_power_t state)
+static inline void intel_opregion_suspend(struct drm_i915_private *dev_priv,
+					  pci_power_t state)
 {
 }
 
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 897a791..a26b4ed 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -3951,68 +3951,68 @@ static void
 skl_ddb_get_hw_plane_state(struct drm_i915_private *dev_priv,
 			   const enum pipe pipe,
 			   const enum plane_id plane_id,
-			   struct skl_ddb_allocation *ddb /* out */)
+			   struct skl_ddb_entry *ddb_y,
+			   struct skl_ddb_entry *ddb_uv)
 {
-	u32 val, val2 = 0;
-	int fourcc, pixel_format;
+	u32 val, val2;
+	u32 fourcc = 0;
 
 	/* Cursor doesn't support NV12/planar, so no extra calculation needed */
 	if (plane_id == PLANE_CURSOR) {
 		val = I915_READ(CUR_BUF_CFG(pipe));
-		skl_ddb_entry_init_from_hw(dev_priv,
-					   &ddb->plane[pipe][plane_id], val);
+		skl_ddb_entry_init_from_hw(dev_priv, ddb_y, val);
 		return;
 	}
 
 	val = I915_READ(PLANE_CTL(pipe, plane_id));
 
 	/* No DDB allocated for disabled planes */
-	if (!(val & PLANE_CTL_ENABLE))
-		return;
+	if (val & PLANE_CTL_ENABLE)
+		fourcc = skl_format_to_fourcc(val & PLANE_CTL_FORMAT_MASK,
+					      val & PLANE_CTL_ORDER_RGBX,
+					      val & PLANE_CTL_ALPHA_MASK);
 
-	pixel_format = val & PLANE_CTL_FORMAT_MASK;
-	fourcc = skl_format_to_fourcc(pixel_format,
-				      val & PLANE_CTL_ORDER_RGBX,
-				      val & PLANE_CTL_ALPHA_MASK);
-
-	val = I915_READ(PLANE_BUF_CFG(pipe, plane_id));
-	if (fourcc == DRM_FORMAT_NV12 && INTEL_GEN(dev_priv) < 11) {
+	if (INTEL_GEN(dev_priv) >= 11) {
+		val = I915_READ(PLANE_BUF_CFG(pipe, plane_id));
+		skl_ddb_entry_init_from_hw(dev_priv, ddb_y, val);
+	} else {
+		val = I915_READ(PLANE_BUF_CFG(pipe, plane_id));
 		val2 = I915_READ(PLANE_NV12_BUF_CFG(pipe, plane_id));
 
-		skl_ddb_entry_init_from_hw(dev_priv,
-					   &ddb->plane[pipe][plane_id], val2);
-		skl_ddb_entry_init_from_hw(dev_priv,
-					   &ddb->uv_plane[pipe][plane_id], val);
-	} else {
-		skl_ddb_entry_init_from_hw(dev_priv,
-					   &ddb->plane[pipe][plane_id], val);
+		if (fourcc == DRM_FORMAT_NV12)
+			swap(val, val2);
+
+		skl_ddb_entry_init_from_hw(dev_priv, ddb_y, val);
+		skl_ddb_entry_init_from_hw(dev_priv, ddb_uv, val2);
 	}
 }
 
+void skl_pipe_ddb_get_hw_state(struct intel_crtc *crtc,
+			       struct skl_ddb_entry *ddb_y,
+			       struct skl_ddb_entry *ddb_uv)
+{
+	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	enum intel_display_power_domain power_domain;
+	enum pipe pipe = crtc->pipe;
+	enum plane_id plane_id;
+
+	power_domain = POWER_DOMAIN_PIPE(pipe);
+	if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
+		return;
+
+	for_each_plane_id_on_crtc(crtc, plane_id)
+		skl_ddb_get_hw_plane_state(dev_priv, pipe,
+					   plane_id,
+					   &ddb_y[plane_id],
+					   &ddb_uv[plane_id]);
+
+	intel_display_power_put(dev_priv, power_domain);
+}
+
 void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
 			  struct skl_ddb_allocation *ddb /* out */)
 {
-	struct intel_crtc *crtc;
-
-	memset(ddb, 0, sizeof(*ddb));
-
 	ddb->enabled_slices = intel_enabled_dbuf_slices_num(dev_priv);
-
-	for_each_intel_crtc(&dev_priv->drm, crtc) {
-		enum intel_display_power_domain power_domain;
-		enum plane_id plane_id;
-		enum pipe pipe = crtc->pipe;
-
-		power_domain = POWER_DOMAIN_PIPE(pipe);
-		if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
-			continue;
-
-		for_each_plane_id_on_crtc(crtc, plane_id)
-			skl_ddb_get_hw_plane_state(dev_priv, pipe,
-						   plane_id, ddb);
-
-		intel_display_power_put(dev_priv, power_domain);
-	}
 }
 
 /*
@@ -4410,7 +4410,6 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
 	struct drm_crtc *crtc = cstate->base.crtc;
 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-	enum pipe pipe = intel_crtc->pipe;
 	struct skl_ddb_entry *alloc = &cstate->wm.skl.ddb;
 	uint16_t alloc_size, start;
 	uint16_t minimum[I915_MAX_PLANES] = {};
@@ -4423,8 +4422,8 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
 	uint16_t total_min_blocks = 0;
 
 	/* Clear the partitioning for disabled planes. */
-	memset(ddb->plane[pipe], 0, sizeof(ddb->plane[pipe]));
-	memset(ddb->uv_plane[pipe], 0, sizeof(ddb->uv_plane[pipe]));
+	memset(cstate->wm.skl.plane_ddb_y, 0, sizeof(cstate->wm.skl.plane_ddb_y));
+	memset(cstate->wm.skl.plane_ddb_uv, 0, sizeof(cstate->wm.skl.plane_ddb_uv));
 
 	if (WARN_ON(!state))
 		return 0;
@@ -4471,8 +4470,8 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
 	}
 
 	alloc_size -= total_min_blocks;
-	ddb->plane[pipe][PLANE_CURSOR].start = alloc->end - minimum[PLANE_CURSOR];
-	ddb->plane[pipe][PLANE_CURSOR].end = alloc->end;
+	cstate->wm.skl.plane_ddb_y[PLANE_CURSOR].start = alloc->end - minimum[PLANE_CURSOR];
+	cstate->wm.skl.plane_ddb_y[PLANE_CURSOR].end = alloc->end;
 
 	/*
 	 * 2. Distribute the remaining space in proportion to the amount of
@@ -4503,8 +4502,8 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
 
 		/* Leave disabled planes at (0,0) */
 		if (data_rate) {
-			ddb->plane[pipe][plane_id].start = start;
-			ddb->plane[pipe][plane_id].end = start + plane_blocks;
+			cstate->wm.skl.plane_ddb_y[plane_id].start = start;
+			cstate->wm.skl.plane_ddb_y[plane_id].end = start + plane_blocks;
 		}
 
 		start += plane_blocks;
@@ -4519,8 +4518,8 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
 		WARN_ON(INTEL_GEN(dev_priv) >= 11 && uv_plane_blocks);
 
 		if (uv_data_rate) {
-			ddb->uv_plane[pipe][plane_id].start = start;
-			ddb->uv_plane[pipe][plane_id].end =
+			cstate->wm.skl.plane_ddb_uv[plane_id].start = start;
+			cstate->wm.skl.plane_ddb_uv[plane_id].end =
 				start + uv_plane_blocks;
 		}
 
@@ -4617,12 +4616,12 @@ skl_adjusted_plane_pixel_rate(const struct intel_crtc_state *cstate,
 }
 
 static int
-skl_compute_plane_wm_params(const struct drm_i915_private *dev_priv,
-			    const struct intel_crtc_state *cstate,
+skl_compute_plane_wm_params(const struct intel_crtc_state *cstate,
 			    const struct intel_plane_state *intel_pstate,
-			    struct skl_wm_params *wp, int plane_id)
+			    struct skl_wm_params *wp, int color_plane)
 {
 	struct intel_plane *plane = to_intel_plane(intel_pstate->base.plane);
+	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
 	const struct drm_plane_state *pstate = &intel_pstate->base;
 	const struct drm_framebuffer *fb = pstate->fb;
 	uint32_t interm_pbpl;
@@ -4630,11 +4629,8 @@ skl_compute_plane_wm_params(const struct drm_i915_private *dev_priv,
 		to_intel_atomic_state(cstate->base.state);
 	bool apply_memory_bw_wa = skl_needs_memory_bw_wa(state);
 
-	if (!intel_wm_plane_visible(cstate, intel_pstate))
-		return 0;
-
 	/* only NV12 format has two planes */
-	if (plane_id == 1 && fb->format->format != DRM_FORMAT_NV12) {
+	if (color_plane == 1 && fb->format->format != DRM_FORMAT_NV12) {
 		DRM_DEBUG_KMS("Non NV12 format have single plane\n");
 		return -EINVAL;
 	}
@@ -4659,10 +4655,10 @@ skl_compute_plane_wm_params(const struct drm_i915_private *dev_priv,
 		wp->width = drm_rect_width(&intel_pstate->base.src) >> 16;
 	}
 
-	if (plane_id == 1 && wp->is_planar)
+	if (color_plane == 1 && wp->is_planar)
 		wp->width /= 2;
 
-	wp->cpp = fb->format->cpp[plane_id];
+	wp->cpp = fb->format->cpp[color_plane];
 	wp->plane_pixel_rate = skl_adjusted_plane_pixel_rate(cstate,
 							     intel_pstate);
 
@@ -4724,8 +4720,7 @@ skl_compute_plane_wm_params(const struct drm_i915_private *dev_priv,
 	return 0;
 }
 
-static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
-				const struct intel_crtc_state *cstate,
+static int skl_compute_plane_wm(const struct intel_crtc_state *cstate,
 				const struct intel_plane_state *intel_pstate,
 				uint16_t ddb_allocation,
 				int level,
@@ -4733,6 +4728,8 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
 				const struct skl_wm_level *result_prev,
 				struct skl_wm_level *result /* out */)
 {
+	struct drm_i915_private *dev_priv =
+		to_i915(intel_pstate->base.plane->dev);
 	const struct drm_plane_state *pstate = &intel_pstate->base;
 	uint32_t latency = dev_priv->wm.skl_latency[level];
 	uint_fixed_16_16_t method1, method2;
@@ -4743,11 +4740,8 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
 	bool apply_memory_bw_wa = skl_needs_memory_bw_wa(state);
 	uint32_t min_disp_buf_needed;
 
-	if (latency == 0 ||
-	    !intel_wm_plane_visible(cstate, intel_pstate)) {
-		result->plane_en = false;
-		return 0;
-	}
+	if (latency == 0)
+		return level == 0 ? -EINVAL : 0;
 
 	/* Display WA #1141: kbl,cfl */
 	if ((IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv) ||
@@ -4844,8 +4838,6 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
 	if ((level > 0 && res_lines > 31) ||
 	    res_blocks >= ddb_allocation ||
 	    min_disp_buf_needed >= ddb_allocation) {
-		result->plane_en = false;
-
 		/*
 		 * If there are no valid level 0 watermarks, then we can't
 		 * support this display configuration.
@@ -4872,27 +4864,22 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
 }
 
 static int
-skl_compute_wm_levels(const struct drm_i915_private *dev_priv,
-		      struct skl_ddb_allocation *ddb,
-		      const struct intel_crtc_state *cstate,
+skl_compute_wm_levels(const struct intel_crtc_state *cstate,
 		      const struct intel_plane_state *intel_pstate,
 		      uint16_t ddb_blocks,
 		      const struct skl_wm_params *wm_params,
-		      struct skl_plane_wm *wm,
 		      struct skl_wm_level *levels)
 {
+	struct drm_i915_private *dev_priv =
+		to_i915(intel_pstate->base.plane->dev);
 	int level, max_level = ilk_wm_max_level(dev_priv);
 	struct skl_wm_level *result_prev = &levels[0];
 	int ret;
 
-	if (WARN_ON(!intel_pstate->base.fb))
-		return -EINVAL;
-
 	for (level = 0; level <= max_level; level++) {
 		struct skl_wm_level *result = &levels[level];
 
-		ret = skl_compute_plane_wm(dev_priv,
-					   cstate,
+		ret = skl_compute_plane_wm(cstate,
 					   intel_pstate,
 					   ddb_blocks,
 					   level,
@@ -4905,9 +4892,6 @@ skl_compute_wm_levels(const struct drm_i915_private *dev_priv,
 		result_prev = result;
 	}
 
-	if (intel_pstate->base.fb->format->format == DRM_FORMAT_NV12)
-		wm->is_planar = true;
-
 	return 0;
 }
 
@@ -4935,10 +4919,9 @@ skl_compute_linetime_wm(const struct intel_crtc_state *cstate)
 }
 
 static void skl_compute_transition_wm(const struct intel_crtc_state *cstate,
-				      struct skl_wm_params *wp,
-				      struct skl_wm_level *wm_l0,
-				      uint16_t ddb_allocation,
-				      struct skl_wm_level *trans_wm /* out */)
+				      const struct skl_wm_params *wp,
+				      struct skl_plane_wm *wm,
+				      uint16_t ddb_allocation)
 {
 	struct drm_device *dev = cstate->base.crtc->dev;
 	const struct drm_i915_private *dev_priv = to_i915(dev);
@@ -4946,16 +4929,13 @@ static void skl_compute_transition_wm(const struct intel_crtc_state *cstate,
 	const uint16_t trans_amount = 10; /* This is configurable amount */
 	uint16_t wm0_sel_res_b, trans_offset_b, res_blocks;
 
-	if (!cstate->base.active)
-		goto exit;
-
 	/* Transition WM are not recommended by HW team for GEN9 */
 	if (INTEL_GEN(dev_priv) <= 9)
-		goto exit;
+		return;
 
 	/* Transition WM don't make any sense if ipc is disabled */
 	if (!dev_priv->ipc_enabled)
-		goto exit;
+		return;
 
 	trans_min = 14;
 	if (INTEL_GEN(dev_priv) >= 11)
@@ -4973,7 +4953,7 @@ static void skl_compute_transition_wm(const struct intel_crtc_state *cstate,
 	 * Result Blocks is Result Blocks minus 1 and it should work for the
 	 * current platforms.
 	 */
-	wm0_sel_res_b = wm_l0->plane_res_b - 1;
+	wm0_sel_res_b = wm->wm[0].plane_res_b - 1;
 
 	if (wp->y_tiled) {
 		trans_y_tile_min = (uint16_t) mul_round_up_u32_fixed16(2,
@@ -4992,107 +4972,129 @@ static void skl_compute_transition_wm(const struct intel_crtc_state *cstate,
 	res_blocks += 1;
 
 	if (res_blocks < ddb_allocation) {
-		trans_wm->plane_res_b = res_blocks;
-		trans_wm->plane_en = true;
-		return;
+		wm->trans_wm.plane_res_b = res_blocks;
+		wm->trans_wm.plane_en = true;
 	}
-
-exit:
-	trans_wm->plane_en = false;
 }
 
-static int __skl_build_plane_wm_single(struct skl_ddb_allocation *ddb,
-				       struct skl_pipe_wm *pipe_wm,
-				       enum plane_id plane_id,
-				       const struct intel_crtc_state *cstate,
-				       const struct intel_plane_state *pstate,
-				       int color_plane)
+static int skl_build_plane_wm_single(struct intel_crtc_state *crtc_state,
+				     const struct intel_plane_state *plane_state,
+				     enum plane_id plane_id, int color_plane)
 {
-	struct drm_i915_private *dev_priv = to_i915(pstate->base.plane->dev);
-	struct skl_plane_wm *wm = &pipe_wm->planes[plane_id];
-	enum pipe pipe = to_intel_plane(pstate->base.plane)->pipe;
+	struct skl_plane_wm *wm = &crtc_state->wm.skl.optimal.planes[plane_id];
+	u16 ddb_blocks = skl_ddb_entry_size(&crtc_state->wm.skl.plane_ddb_y[plane_id]);
 	struct skl_wm_params wm_params;
-	uint16_t ddb_blocks = skl_ddb_entry_size(&ddb->plane[pipe][plane_id]);
 	int ret;
 
-	ret = skl_compute_plane_wm_params(dev_priv, cstate, pstate,
+	ret = skl_compute_plane_wm_params(crtc_state, plane_state,
 					  &wm_params, color_plane);
 	if (ret)
 		return ret;
 
-	ret = skl_compute_wm_levels(dev_priv, ddb, cstate, pstate,
-				    ddb_blocks, &wm_params, wm, wm->wm);
-
+	ret = skl_compute_wm_levels(crtc_state, plane_state,
+				    ddb_blocks, &wm_params, wm->wm);
 	if (ret)
 		return ret;
 
-	skl_compute_transition_wm(cstate, &wm_params, &wm->wm[0],
-				  ddb_blocks, &wm->trans_wm);
+	skl_compute_transition_wm(crtc_state, &wm_params, wm, ddb_blocks);
 
 	return 0;
 }
 
-static int skl_build_plane_wm_single(struct skl_ddb_allocation *ddb,
-				     struct skl_pipe_wm *pipe_wm,
-				     const struct intel_crtc_state *cstate,
-				     const struct intel_plane_state *pstate)
+static int skl_build_plane_wm_uv(struct intel_crtc_state *crtc_state,
+				 const struct intel_plane_state *plane_state,
+				 enum plane_id plane_id)
 {
-	enum plane_id plane_id = to_intel_plane(pstate->base.plane)->id;
-
-	return __skl_build_plane_wm_single(ddb, pipe_wm, plane_id, cstate, pstate, 0);
-}
-
-static int skl_build_plane_wm_planar(struct skl_ddb_allocation *ddb,
-				     struct skl_pipe_wm *pipe_wm,
-				     const struct intel_crtc_state *cstate,
-				     const struct intel_plane_state *pstate)
-{
-	struct intel_plane *plane = to_intel_plane(pstate->base.plane);
-	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
-	enum plane_id plane_id = plane->id;
-	struct skl_plane_wm *wm = &pipe_wm->planes[plane_id];
+	struct skl_plane_wm *wm = &crtc_state->wm.skl.optimal.planes[plane_id];
+	u16 ddb_blocks = skl_ddb_entry_size(&crtc_state->wm.skl.plane_ddb_uv[plane_id]);
 	struct skl_wm_params wm_params;
-	enum pipe pipe = plane->pipe;
-	uint16_t ddb_blocks = skl_ddb_entry_size(&ddb->plane[pipe][plane_id]);
 	int ret;
 
-	ret = __skl_build_plane_wm_single(ddb, pipe_wm, plane_id, cstate, pstate, 0);
-	if (ret)
-		return ret;
+	wm->is_planar = true;
 
 	/* uv plane watermarks must also be validated for NV12/Planar */
-	ddb_blocks = skl_ddb_entry_size(&ddb->uv_plane[pipe][plane_id]);
-
-	ret = skl_compute_plane_wm_params(dev_priv, cstate, pstate, &wm_params, 1);
+	ret = skl_compute_plane_wm_params(crtc_state, plane_state,
+					  &wm_params, 1);
 	if (ret)
 		return ret;
 
-	return skl_compute_wm_levels(dev_priv, ddb, cstate, pstate,
-				     ddb_blocks, &wm_params, wm, wm->uv_wm);
+	ret = skl_compute_wm_levels(crtc_state, plane_state,
+				    ddb_blocks, &wm_params, wm->uv_wm);
+	if (ret)
+		return ret;
+
+	return 0;
 }
 
-static int icl_build_plane_wm_planar(struct skl_ddb_allocation *ddb,
-				     struct skl_pipe_wm *pipe_wm,
-				     const struct intel_crtc_state *cstate,
-				     const struct intel_plane_state *pstate)
+static int skl_build_plane_wm(struct skl_pipe_wm *pipe_wm,
+			      struct intel_crtc_state *crtc_state,
+			      const struct intel_plane_state *plane_state)
 {
+	struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+	const struct drm_framebuffer *fb = plane_state->base.fb;
+	enum plane_id plane_id = plane->id;
 	int ret;
-	enum plane_id y_plane_id = pstate->linked_plane->id;
-	enum plane_id uv_plane_id = to_intel_plane(pstate->base.plane)->id;
 
-	ret = __skl_build_plane_wm_single(ddb, pipe_wm, y_plane_id,
-					  cstate, pstate, 0);
+	if (!intel_wm_plane_visible(crtc_state, plane_state))
+		return 0;
+
+	ret = skl_build_plane_wm_single(crtc_state, plane_state,
+					plane_id, 0);
 	if (ret)
 		return ret;
 
-	return __skl_build_plane_wm_single(ddb, pipe_wm, uv_plane_id,
-					   cstate, pstate, 1);
+	if (fb->format->is_yuv && fb->format->num_planes > 1) {
+		ret = skl_build_plane_wm_uv(crtc_state, plane_state,
+					    plane_id);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
+static int icl_build_plane_wm(struct skl_pipe_wm *pipe_wm,
+			      struct intel_crtc_state *crtc_state,
+			      const struct intel_plane_state *plane_state)
+{
+	enum plane_id plane_id = to_intel_plane(plane_state->base.plane)->id;
+	int ret;
+
+	/* Watermarks calculated in master */
+	if (plane_state->slave)
+		return 0;
+
+	if (plane_state->linked_plane) {
+		const struct drm_framebuffer *fb = plane_state->base.fb;
+		enum plane_id y_plane_id = plane_state->linked_plane->id;
+
+		WARN_ON(!intel_wm_plane_visible(crtc_state, plane_state));
+		WARN_ON(!fb->format->is_yuv ||
+			fb->format->num_planes == 1);
+
+		ret = skl_build_plane_wm_single(crtc_state, plane_state,
+						y_plane_id, 0);
+		if (ret)
+			return ret;
+
+		ret = skl_build_plane_wm_single(crtc_state, plane_state,
+						plane_id, 1);
+		if (ret)
+			return ret;
+	} else if (intel_wm_plane_visible(crtc_state, plane_state)) {
+		ret = skl_build_plane_wm_single(crtc_state, plane_state,
+						plane_id, 0);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
 }
 
 static int skl_build_pipe_wm(struct intel_crtc_state *cstate,
-			     struct skl_ddb_allocation *ddb,
 			     struct skl_pipe_wm *pipe_wm)
 {
+	struct drm_i915_private *dev_priv = to_i915(cstate->base.crtc->dev);
 	struct drm_crtc_state *crtc_state = &cstate->base;
 	struct drm_plane *plane;
 	const struct drm_plane_state *pstate;
@@ -5108,18 +5110,12 @@ static int skl_build_pipe_wm(struct intel_crtc_state *cstate,
 		const struct intel_plane_state *intel_pstate =
 						to_intel_plane_state(pstate);
 
-		/* Watermarks calculated in master */
-		if (intel_pstate->slave)
-			continue;
-
-		if (intel_pstate->linked_plane)
-			ret = icl_build_plane_wm_planar(ddb, pipe_wm, cstate, intel_pstate);
-		else if (intel_pstate->base.fb &&
-			 intel_pstate->base.fb->format->format == DRM_FORMAT_NV12)
-			ret = skl_build_plane_wm_planar(ddb, pipe_wm, cstate, intel_pstate);
+		if (INTEL_GEN(dev_priv) >= 11)
+			ret = icl_build_plane_wm(pipe_wm,
+						 cstate, intel_pstate);
 		else
-			ret = skl_build_plane_wm_single(ddb, pipe_wm, cstate, intel_pstate);
-
+			ret = skl_build_plane_wm(pipe_wm,
+						 cstate, intel_pstate);
 		if (ret)
 			return ret;
 	}
@@ -5134,9 +5130,9 @@ static void skl_ddb_entry_write(struct drm_i915_private *dev_priv,
 				const struct skl_ddb_entry *entry)
 {
 	if (entry->end)
-		I915_WRITE(reg, (entry->end - 1) << 16 | entry->start);
+		I915_WRITE_FW(reg, (entry->end - 1) << 16 | entry->start);
 	else
-		I915_WRITE(reg, 0);
+		I915_WRITE_FW(reg, 0);
 }
 
 static void skl_write_wm_level(struct drm_i915_private *dev_priv,
@@ -5151,19 +5147,22 @@ static void skl_write_wm_level(struct drm_i915_private *dev_priv,
 		val |= level->plane_res_l << PLANE_WM_LINES_SHIFT;
 	}
 
-	I915_WRITE(reg, val);
+	I915_WRITE_FW(reg, val);
 }
 
-static void skl_write_plane_wm(struct intel_crtc *intel_crtc,
-			       const struct skl_plane_wm *wm,
-			       const struct skl_ddb_allocation *ddb,
-			       enum plane_id plane_id)
+void skl_write_plane_wm(struct intel_plane *plane,
+			const struct intel_crtc_state *crtc_state)
 {
-	struct drm_crtc *crtc = &intel_crtc->base;
-	struct drm_device *dev = crtc->dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
 	int level, max_level = ilk_wm_max_level(dev_priv);
-	enum pipe pipe = intel_crtc->pipe;
+	enum plane_id plane_id = plane->id;
+	enum pipe pipe = plane->pipe;
+	const struct skl_plane_wm *wm =
+		&crtc_state->wm.skl.optimal.planes[plane_id];
+	const struct skl_ddb_entry *ddb_y =
+		&crtc_state->wm.skl.plane_ddb_y[plane_id];
+	const struct skl_ddb_entry *ddb_uv =
+		&crtc_state->wm.skl.plane_ddb_uv[plane_id];
 
 	for (level = 0; level <= max_level; level++) {
 		skl_write_wm_level(dev_priv, PLANE_WM(pipe, plane_id, level),
@@ -5172,29 +5171,32 @@ static void skl_write_plane_wm(struct intel_crtc *intel_crtc,
 	skl_write_wm_level(dev_priv, PLANE_WM_TRANS(pipe, plane_id),
 			   &wm->trans_wm);
 
-	if (wm->is_planar && INTEL_GEN(dev_priv) < 11) {
-		skl_ddb_entry_write(dev_priv, PLANE_BUF_CFG(pipe, plane_id),
-				    &ddb->uv_plane[pipe][plane_id]);
+	if (INTEL_GEN(dev_priv) >= 11) {
 		skl_ddb_entry_write(dev_priv,
-				    PLANE_NV12_BUF_CFG(pipe, plane_id),
-				    &ddb->plane[pipe][plane_id]);
-	} else {
-		skl_ddb_entry_write(dev_priv, PLANE_BUF_CFG(pipe, plane_id),
-				    &ddb->plane[pipe][plane_id]);
-		if (INTEL_GEN(dev_priv) < 11)
-			I915_WRITE(PLANE_NV12_BUF_CFG(pipe, plane_id), 0x0);
+				    PLANE_BUF_CFG(pipe, plane_id), ddb_y);
+		return;
 	}
+
+	if (wm->is_planar)
+		swap(ddb_y, ddb_uv);
+
+	skl_ddb_entry_write(dev_priv,
+			    PLANE_BUF_CFG(pipe, plane_id), ddb_y);
+	skl_ddb_entry_write(dev_priv,
+			    PLANE_NV12_BUF_CFG(pipe, plane_id), ddb_uv);
 }
 
-static void skl_write_cursor_wm(struct intel_crtc *intel_crtc,
-				const struct skl_plane_wm *wm,
-				const struct skl_ddb_allocation *ddb)
+void skl_write_cursor_wm(struct intel_plane *plane,
+			 const struct intel_crtc_state *crtc_state)
 {
-	struct drm_crtc *crtc = &intel_crtc->base;
-	struct drm_device *dev = crtc->dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
 	int level, max_level = ilk_wm_max_level(dev_priv);
-	enum pipe pipe = intel_crtc->pipe;
+	enum plane_id plane_id = plane->id;
+	enum pipe pipe = plane->pipe;
+	const struct skl_plane_wm *wm =
+		&crtc_state->wm.skl.optimal.planes[plane_id];
+	const struct skl_ddb_entry *ddb =
+		&crtc_state->wm.skl.plane_ddb_y[plane_id];
 
 	for (level = 0; level <= max_level; level++) {
 		skl_write_wm_level(dev_priv, CUR_WM(pipe, level),
@@ -5202,22 +5204,30 @@ static void skl_write_cursor_wm(struct intel_crtc *intel_crtc,
 	}
 	skl_write_wm_level(dev_priv, CUR_WM_TRANS(pipe), &wm->trans_wm);
 
-	skl_ddb_entry_write(dev_priv, CUR_BUF_CFG(pipe),
-			    &ddb->plane[pipe][PLANE_CURSOR]);
+	skl_ddb_entry_write(dev_priv, CUR_BUF_CFG(pipe), ddb);
 }
 
 bool skl_wm_level_equals(const struct skl_wm_level *l1,
 			 const struct skl_wm_level *l2)
 {
-	if (l1->plane_en != l2->plane_en)
-		return false;
+	return l1->plane_en == l2->plane_en &&
+		l1->plane_res_l == l2->plane_res_l &&
+		l1->plane_res_b == l2->plane_res_b;
+}
 
-	/* If both planes aren't enabled, the rest shouldn't matter */
-	if (!l1->plane_en)
-		return true;
+static bool skl_plane_wm_equals(struct drm_i915_private *dev_priv,
+				const struct skl_plane_wm *wm1,
+				const struct skl_plane_wm *wm2)
+{
+	int level, max_level = ilk_wm_max_level(dev_priv);
 
-	return (l1->plane_res_l == l2->plane_res_l &&
-		l1->plane_res_b == l2->plane_res_b);
+	for (level = 0; level <= max_level; level++) {
+		if (!skl_wm_level_equals(&wm1->wm[level], &wm2->wm[level]) ||
+		    !skl_wm_level_equals(&wm1->uv_wm[level], &wm2->uv_wm[level]))
+			return false;
+	}
+
+	return skl_wm_level_equals(&wm1->trans_wm, &wm2->trans_wm);
 }
 
 static inline bool skl_ddb_entries_overlap(const struct skl_ddb_entry *a,
@@ -5244,13 +5254,12 @@ bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry *ddb,
 static int skl_update_pipe_wm(struct drm_crtc_state *cstate,
 			      const struct skl_pipe_wm *old_pipe_wm,
 			      struct skl_pipe_wm *pipe_wm, /* out */
-			      struct skl_ddb_allocation *ddb, /* out */
 			      bool *changed /* out */)
 {
 	struct intel_crtc_state *intel_cstate = to_intel_crtc_state(cstate);
 	int ret;
 
-	ret = skl_build_pipe_wm(intel_cstate, ddb, pipe_wm);
+	ret = skl_build_pipe_wm(intel_cstate, pipe_wm);
 	if (ret)
 		return ret;
 
@@ -5276,42 +5285,29 @@ pipes_modified(struct drm_atomic_state *state)
 }
 
 static int
-skl_ddb_add_affected_planes(struct intel_crtc_state *cstate)
+skl_ddb_add_affected_planes(const struct intel_crtc_state *old_crtc_state,
+			    struct intel_crtc_state *new_crtc_state)
 {
-	struct drm_atomic_state *state = cstate->base.state;
-	struct drm_device *dev = state->dev;
-	struct drm_crtc *crtc = cstate->base.crtc;
-	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
-	struct skl_ddb_allocation *new_ddb = &intel_state->wm_results.ddb;
-	struct skl_ddb_allocation *cur_ddb = &dev_priv->wm.skl_hw.ddb;
-	struct drm_plane *plane;
-	enum pipe pipe = intel_crtc->pipe;
+	struct intel_atomic_state *state = to_intel_atomic_state(new_crtc_state->base.state);
+	struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
+	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	struct intel_plane *plane;
 
-	drm_for_each_plane_mask(plane, dev, cstate->base.plane_mask) {
-		struct drm_plane_state *plane_state;
-		struct intel_plane *linked;
-		enum plane_id plane_id = to_intel_plane(plane)->id;
+	for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
+		struct intel_plane_state *plane_state;
+		enum plane_id plane_id = plane->id;
 
-		if (skl_ddb_entry_equal(&cur_ddb->plane[pipe][plane_id],
-					&new_ddb->plane[pipe][plane_id]) &&
-		    skl_ddb_entry_equal(&cur_ddb->uv_plane[pipe][plane_id],
-					&new_ddb->uv_plane[pipe][plane_id]))
+		if (skl_ddb_entry_equal(&old_crtc_state->wm.skl.plane_ddb_y[plane_id],
+					&new_crtc_state->wm.skl.plane_ddb_y[plane_id]) &&
+		    skl_ddb_entry_equal(&old_crtc_state->wm.skl.plane_ddb_uv[plane_id],
+					&new_crtc_state->wm.skl.plane_ddb_uv[plane_id]))
 			continue;
 
-		plane_state = drm_atomic_get_plane_state(state, plane);
+		plane_state = intel_atomic_get_plane_state(state, plane);
 		if (IS_ERR(plane_state))
 			return PTR_ERR(plane_state);
 
-		/* Make sure linked plane is updated too */
-		linked = to_intel_plane_state(plane_state)->linked_plane;
-		if (!linked)
-			continue;
-
-		plane_state = drm_atomic_get_plane_state(state, &linked->base);
-		if (IS_ERR(plane_state))
-			return PTR_ERR(plane_state);
+		new_crtc_state->update_planes |= BIT(plane_id);
 	}
 
 	return 0;
@@ -5323,18 +5319,21 @@ skl_compute_ddb(struct drm_atomic_state *state)
 	const struct drm_i915_private *dev_priv = to_i915(state->dev);
 	struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
 	struct skl_ddb_allocation *ddb = &intel_state->wm_results.ddb;
+	struct intel_crtc_state *old_crtc_state;
+	struct intel_crtc_state *new_crtc_state;
 	struct intel_crtc *crtc;
-	struct intel_crtc_state *cstate;
 	int ret, i;
 
 	memcpy(ddb, &dev_priv->wm.skl_hw.ddb, sizeof(*ddb));
 
-	for_each_new_intel_crtc_in_state(intel_state, crtc, cstate, i) {
-		ret = skl_allocate_pipe_ddb(cstate, ddb);
+	for_each_oldnew_intel_crtc_in_state(intel_state, crtc, old_crtc_state,
+					    new_crtc_state, i) {
+		ret = skl_allocate_pipe_ddb(new_crtc_state, ddb);
 		if (ret)
 			return ret;
 
-		ret = skl_ddb_add_affected_planes(cstate);
+		ret = skl_ddb_add_affected_planes(old_crtc_state,
+						  new_crtc_state);
 		if (ret)
 			return ret;
 	}
@@ -5343,36 +5342,29 @@ skl_compute_ddb(struct drm_atomic_state *state)
 }
 
 static void
-skl_print_wm_changes(const struct drm_atomic_state *state)
+skl_print_wm_changes(struct intel_atomic_state *state)
 {
-	const struct drm_device *dev = state->dev;
-	const struct drm_i915_private *dev_priv = to_i915(dev);
-	const struct intel_atomic_state *intel_state =
-		to_intel_atomic_state(state);
-	const struct drm_crtc *crtc;
-	const struct drm_crtc_state *cstate;
-	const struct intel_plane *intel_plane;
-	const struct skl_ddb_allocation *old_ddb = &dev_priv->wm.skl_hw.ddb;
-	const struct skl_ddb_allocation *new_ddb = &intel_state->wm_results.ddb;
+	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+	const struct intel_crtc_state *old_crtc_state;
+	const struct intel_crtc_state *new_crtc_state;
+	struct intel_plane *plane;
+	struct intel_crtc *crtc;
 	int i;
 
-	for_each_new_crtc_in_state(state, crtc, cstate, i) {
-		const struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-		enum pipe pipe = intel_crtc->pipe;
-
-		for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
-			enum plane_id plane_id = intel_plane->id;
+	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
+					    new_crtc_state, i) {
+		for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
+			enum plane_id plane_id = plane->id;
 			const struct skl_ddb_entry *old, *new;
 
-			old = &old_ddb->plane[pipe][plane_id];
-			new = &new_ddb->plane[pipe][plane_id];
+			old = &old_crtc_state->wm.skl.plane_ddb_y[plane_id];
+			new = &new_crtc_state->wm.skl.plane_ddb_y[plane_id];
 
 			if (skl_ddb_entry_equal(old, new))
 				continue;
 
 			DRM_DEBUG_KMS("[PLANE:%d:%s] ddb (%d - %d) -> (%d - %d)\n",
-				      intel_plane->base.base.id,
-				      intel_plane->base.name,
+				      plane->base.base.id, plane->base.name,
 				      old->start, old->end,
 				      new->start, new->end);
 		}
@@ -5469,6 +5461,66 @@ skl_ddb_add_affected_pipes(struct drm_atomic_state *state, bool *changed)
 	return 0;
 }
 
+/*
+ * To make sure the cursor watermark registers are always consistent
+ * with our computed state the following scenario needs special
+ * treatment:
+ *
+ * 1. enable cursor
+ * 2. move cursor entirely offscreen
+ * 3. disable cursor
+ *
+ * Step 2. does call .disable_plane() but does not zero the watermarks
+ * (since we consider an offscreen cursor still active for the purposes
+ * of watermarks). Step 3. would not normally call .disable_plane()
+ * because the actual plane visibility isn't changing, and we don't
+ * deallocate the cursor ddb until the pipe gets disabled. So we must
+ * force step 3. to call .disable_plane() to update the watermark
+ * registers properly.
+ *
+ * Other planes do not suffer from this issues as their watermarks are
+ * calculated based on the actual plane visibility. The only time this
+ * can trigger for the other planes is during the initial readout as the
+ * default value of the watermarks registers is not zero.
+ */
+static int skl_wm_add_affected_planes(struct intel_atomic_state *state,
+				      struct intel_crtc *crtc)
+{
+	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	const struct intel_crtc_state *old_crtc_state =
+		intel_atomic_get_old_crtc_state(state, crtc);
+	struct intel_crtc_state *new_crtc_state =
+		intel_atomic_get_new_crtc_state(state, crtc);
+	struct intel_plane *plane;
+
+	for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
+		struct intel_plane_state *plane_state;
+		enum plane_id plane_id = plane->id;
+
+		/*
+		 * Force a full wm update for every plane on modeset.
+		 * Required because the reset value of the wm registers
+		 * is non-zero, whereas we want all disabled planes to
+		 * have zero watermarks. So if we turn off the relevant
+		 * power well the hardware state will go out of sync
+		 * with the software state.
+		 */
+		if (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) &&
+		    skl_plane_wm_equals(dev_priv,
+					&old_crtc_state->wm.skl.optimal.planes[plane_id],
+					&new_crtc_state->wm.skl.optimal.planes[plane_id]))
+			continue;
+
+		plane_state = intel_atomic_get_plane_state(state, plane);
+		if (IS_ERR(plane_state))
+			return PTR_ERR(plane_state);
+
+		new_crtc_state->update_planes |= BIT(plane_id);
+	}
+
+	return 0;
+}
+
 static int
 skl_compute_wm(struct drm_atomic_state *state)
 {
@@ -5508,8 +5560,12 @@ skl_compute_wm(struct drm_atomic_state *state)
 			&to_intel_crtc_state(crtc->state)->wm.skl.optimal;
 
 		pipe_wm = &intel_cstate->wm.skl.optimal;
-		ret = skl_update_pipe_wm(cstate, old_pipe_wm, pipe_wm,
-					 &results->ddb, &changed);
+		ret = skl_update_pipe_wm(cstate, old_pipe_wm, pipe_wm, &changed);
+		if (ret)
+			return ret;
+
+		ret = skl_wm_add_affected_planes(intel_state,
+						 to_intel_crtc(crtc));
 		if (ret)
 			return ret;
 
@@ -5523,7 +5579,7 @@ skl_compute_wm(struct drm_atomic_state *state)
 		intel_cstate->update_wm_pre = true;
 	}
 
-	skl_print_wm_changes(state);
+	skl_print_wm_changes(intel_state);
 
 	return 0;
 }
@@ -5534,23 +5590,12 @@ static void skl_atomic_update_crtc_wm(struct intel_atomic_state *state,
 	struct intel_crtc *crtc = to_intel_crtc(cstate->base.crtc);
 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
 	struct skl_pipe_wm *pipe_wm = &cstate->wm.skl.optimal;
-	const struct skl_ddb_allocation *ddb = &state->wm_results.ddb;
 	enum pipe pipe = crtc->pipe;
-	enum plane_id plane_id;
 
 	if (!(state->wm_results.dirty_pipes & drm_crtc_mask(&crtc->base)))
 		return;
 
 	I915_WRITE(PIPE_WM_LINETIME(pipe), pipe_wm->linetime);
-
-	for_each_plane_id_on_crtc(crtc, plane_id) {
-		if (plane_id != PLANE_CURSOR)
-			skl_write_plane_wm(crtc, &pipe_wm->planes[plane_id],
-					   ddb, plane_id);
-		else
-			skl_write_cursor_wm(crtc, &pipe_wm->planes[plane_id],
-					    ddb);
-	}
 }
 
 static void skl_initial_wm(struct intel_atomic_state *state,
@@ -5560,8 +5605,6 @@ static void skl_initial_wm(struct intel_atomic_state *state,
 	struct drm_device *dev = intel_crtc->base.dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct skl_ddb_values *results = &state->wm_results;
-	struct skl_ddb_values *hw_vals = &dev_priv->wm.skl_hw;
-	enum pipe pipe = intel_crtc->pipe;
 
 	if ((results->dirty_pipes & drm_crtc_mask(&intel_crtc->base)) == 0)
 		return;
@@ -5571,11 +5614,6 @@ static void skl_initial_wm(struct intel_atomic_state *state,
 	if (cstate->base.active_changed)
 		skl_atomic_update_crtc_wm(state, cstate);
 
-	memcpy(hw_vals->ddb.uv_plane[pipe], results->ddb.uv_plane[pipe],
-	       sizeof(hw_vals->ddb.uv_plane[pipe]));
-	memcpy(hw_vals->ddb.plane[pipe], results->ddb.plane[pipe],
-	       sizeof(hw_vals->ddb.plane[pipe]));
-
 	mutex_unlock(&dev_priv->wm.wm_mutex);
 }
 
@@ -5726,13 +5764,6 @@ void skl_wm_get_hw_state(struct drm_device *dev)
 	if (dev_priv->active_crtcs) {
 		/* Fully recompute DDB on first atomic commit */
 		dev_priv->wm.distrust_bios_wm = true;
-	} else {
-		/*
-		 * Easy/common case; just sanitize DDB now if everything off
-		 * Keep dbuf slice info intact
-		 */
-		memset(ddb->plane, 0, sizeof(ddb->plane));
-		memset(ddb->uv_plane, 0, sizeof(ddb->uv_plane));
 	}
 }
 
diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c
index 54fa17a..419e563 100644
--- a/drivers/gpu/drm/i915/intel_psr.c
+++ b/drivers/gpu/drm/i915/intel_psr.c
@@ -75,6 +75,10 @@ static bool intel_psr2_enabled(struct drm_i915_private *dev_priv,
 	if (i915_modparams.enable_psr == -1)
 		return false;
 
+	/* Cannot enable DSC and PSR2 simultaneously */
+	WARN_ON(crtc_state->dsc_params.compression_enable &&
+		crtc_state->has_psr2);
+
 	switch (dev_priv->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
 	case I915_PSR_DEBUG_FORCE_PSR1:
 		return false;
@@ -169,6 +173,7 @@ void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir)
 	u32 transcoders = BIT(TRANSCODER_EDP);
 	enum transcoder cpu_transcoder;
 	ktime_t time_ns =  ktime_get();
+	u32 mask = 0;
 
 	if (INTEL_GEN(dev_priv) >= 8)
 		transcoders |= BIT(TRANSCODER_A) |
@@ -178,10 +183,22 @@ void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir)
 	for_each_cpu_transcoder_masked(dev_priv, cpu_transcoder, transcoders) {
 		int shift = edp_psr_shift(cpu_transcoder);
 
-		/* FIXME: Exit PSR and link train manually when this happens. */
-		if (psr_iir & EDP_PSR_ERROR(shift))
-			DRM_DEBUG_KMS("[transcoder %s] PSR aux error\n",
-				      transcoder_name(cpu_transcoder));
+		if (psr_iir & EDP_PSR_ERROR(shift)) {
+			DRM_WARN("[transcoder %s] PSR aux error\n",
+				 transcoder_name(cpu_transcoder));
+
+			dev_priv->psr.irq_aux_error = true;
+
+			/*
+			 * If this interruption is not masked it will keep
+			 * interrupting so fast that it prevents the scheduled
+			 * work to run.
+			 * Also after a PSR error, we don't want to arm PSR
+			 * again so we don't care about unmask the interruption
+			 * or unset irq_aux_error.
+			 */
+			mask |= EDP_PSR_ERROR(shift);
+		}
 
 		if (psr_iir & EDP_PSR_PRE_ENTRY(shift)) {
 			dev_priv->psr.last_entry_attempt = time_ns;
@@ -203,6 +220,13 @@ void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir)
 			}
 		}
 	}
+
+	if (mask) {
+		mask |= I915_READ(EDP_PSR_IMR);
+		I915_WRITE(EDP_PSR_IMR, mask);
+
+		schedule_work(&dev_priv->psr.work);
+	}
 }
 
 static bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp)
@@ -482,6 +506,16 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
 	if (!dev_priv->psr.sink_psr2_support)
 		return false;
 
+	/*
+	 * DSC and PSR2 cannot be enabled simultaneously. If a requested
+	 * resolution requires DSC to be enabled, priority is given to DSC
+	 * over PSR2.
+	 */
+	if (crtc_state->dsc_params.compression_enable) {
+		DRM_DEBUG_KMS("PSR2 cannot be enabled since DSC is enabled\n");
+		return false;
+	}
+
 	if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
 		psr_max_h = 4096;
 		psr_max_v = 2304;
@@ -527,10 +561,8 @@ void intel_psr_compute_config(struct intel_dp *intel_dp,
 		return;
 	}
 
-	if (IS_HASWELL(dev_priv) &&
-	    I915_READ(HSW_STEREO_3D_CTL(crtc_state->cpu_transcoder)) &
-		      S3D_ENABLE) {
-		DRM_DEBUG_KMS("PSR condition failed: Stereo 3D is Enabled\n");
+	if (dev_priv->psr.sink_not_reliable) {
+		DRM_DEBUG_KMS("PSR sink implementation is not reliable\n");
 		return;
 	}
 
@@ -687,6 +719,7 @@ void intel_psr_enable(struct intel_dp *intel_dp,
 	dev_priv->psr.psr2_enabled = intel_psr2_enabled(dev_priv, crtc_state);
 	dev_priv->psr.busy_frontbuffer_bits = 0;
 	dev_priv->psr.prepared = true;
+	dev_priv->psr.pipe = to_intel_crtc(crtc_state->base.crtc)->pipe;
 
 	if (psr_global_enabled(dev_priv->psr.debug))
 		intel_psr_enable_locked(dev_priv, crtc_state);
@@ -933,6 +966,16 @@ int intel_psr_set_debugfs_mode(struct drm_i915_private *dev_priv,
 	return ret;
 }
 
+static void intel_psr_handle_irq(struct drm_i915_private *dev_priv)
+{
+	struct i915_psr *psr = &dev_priv->psr;
+
+	intel_psr_disable_locked(psr->dp);
+	psr->sink_not_reliable = true;
+	/* let's make sure that sink is awaken */
+	drm_dp_dpcd_writeb(&psr->dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
+}
+
 static void intel_psr_work(struct work_struct *work)
 {
 	struct drm_i915_private *dev_priv =
@@ -943,6 +986,9 @@ static void intel_psr_work(struct work_struct *work)
 	if (!dev_priv->psr.enabled)
 		goto unlock;
 
+	if (READ_ONCE(dev_priv->psr.irq_aux_error))
+		intel_psr_handle_irq(dev_priv);
+
 	/*
 	 * We have to make sure PSR is ready for re-enable
 	 * otherwise it keeps disabled until next full enable/disable cycle.
@@ -981,9 +1027,6 @@ static void intel_psr_work(struct work_struct *work)
 void intel_psr_invalidate(struct drm_i915_private *dev_priv,
 			  unsigned frontbuffer_bits, enum fb_op_origin origin)
 {
-	struct drm_crtc *crtc;
-	enum pipe pipe;
-
 	if (!CAN_PSR(dev_priv))
 		return;
 
@@ -996,10 +1039,7 @@ void intel_psr_invalidate(struct drm_i915_private *dev_priv,
 		return;
 	}
 
-	crtc = dp_to_dig_port(dev_priv->psr.dp)->base.base.crtc;
-	pipe = to_intel_crtc(crtc)->pipe;
-
-	frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
+	frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(dev_priv->psr.pipe);
 	dev_priv->psr.busy_frontbuffer_bits |= frontbuffer_bits;
 
 	if (frontbuffer_bits)
@@ -1024,9 +1064,6 @@ void intel_psr_invalidate(struct drm_i915_private *dev_priv,
 void intel_psr_flush(struct drm_i915_private *dev_priv,
 		     unsigned frontbuffer_bits, enum fb_op_origin origin)
 {
-	struct drm_crtc *crtc;
-	enum pipe pipe;
-
 	if (!CAN_PSR(dev_priv))
 		return;
 
@@ -1039,10 +1076,7 @@ void intel_psr_flush(struct drm_i915_private *dev_priv,
 		return;
 	}
 
-	crtc = dp_to_dig_port(dev_priv->psr.dp)->base.base.crtc;
-	pipe = to_intel_crtc(crtc)->pipe;
-
-	frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
+	frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(dev_priv->psr.pipe);
 	dev_priv->psr.busy_frontbuffer_bits &= ~frontbuffer_bits;
 
 	/* By definition flush = invalidate + flush */
@@ -1056,7 +1090,7 @@ void intel_psr_flush(struct drm_i915_private *dev_priv,
 		 * but it makes more sense write to the current active
 		 * pipe.
 		 */
-		I915_WRITE(CURSURFLIVE(pipe), 0);
+		I915_WRITE(CURSURFLIVE(dev_priv->psr.pipe), 0);
 	}
 
 	if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits)
@@ -1073,6 +1107,8 @@ void intel_psr_flush(struct drm_i915_private *dev_priv,
  */
 void intel_psr_init(struct drm_i915_private *dev_priv)
 {
+	u32 val;
+
 	if (!HAS_PSR(dev_priv))
 		return;
 
@@ -1086,6 +1122,22 @@ void intel_psr_init(struct drm_i915_private *dev_priv)
 		if (INTEL_GEN(dev_priv) < 9 || !dev_priv->vbt.psr.enable)
 			i915_modparams.enable_psr = 0;
 
+	/*
+	 * If a PSR error happened and the driver is reloaded, the EDP_PSR_IIR
+	 * will still keep the error set even after the reset done in the
+	 * irq_preinstall and irq_uninstall hooks.
+	 * And enabling in this situation cause the screen to freeze in the
+	 * first time that PSR HW tries to activate so lets keep PSR disabled
+	 * to avoid any rendering problems.
+	 */
+	val = I915_READ(EDP_PSR_IIR);
+	val &= EDP_PSR_ERROR(edp_psr_shift(TRANSCODER_EDP));
+	if (val) {
+		DRM_DEBUG_KMS("PSR interruption error set\n");
+		dev_priv->psr.sink_not_reliable = true;
+		return;
+	}
+
 	/* Set link_standby x link_off defaults */
 	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
 		/* HSW and BDW require workarounds that we don't implement. */
@@ -1123,6 +1175,7 @@ void intel_psr_short_pulse(struct intel_dp *intel_dp)
 	if ((val & DP_PSR_SINK_STATE_MASK) == DP_PSR_SINK_INTERNAL_ERROR) {
 		DRM_DEBUG_KMS("PSR sink internal error, disabling PSR\n");
 		intel_psr_disable_locked(intel_dp);
+		psr->sink_not_reliable = true;
 	}
 
 	if (drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_ERROR_STATUS, &val) != 1) {
@@ -1140,10 +1193,27 @@ void intel_psr_short_pulse(struct intel_dp *intel_dp)
 	if (val & ~errors)
 		DRM_ERROR("PSR_ERROR_STATUS unhandled errors %x\n",
 			  val & ~errors);
-	if (val & errors)
+	if (val & errors) {
 		intel_psr_disable_locked(intel_dp);
+		psr->sink_not_reliable = true;
+	}
 	/* clear status register */
 	drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ERROR_STATUS, val);
 exit:
 	mutex_unlock(&psr->lock);
 }
+
+bool intel_psr_enabled(struct intel_dp *intel_dp)
+{
+	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+	bool ret;
+
+	if (!CAN_PSR(dev_priv) || !intel_dp_is_edp(intel_dp))
+		return false;
+
+	mutex_lock(&dev_priv->psr.lock);
+	ret = (dev_priv->psr.dp == intel_dp && dev_priv->psr.enabled);
+	mutex_unlock(&dev_priv->psr.lock);
+
+	return ret;
+}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 87eebc1..c5eb26a 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -150,8 +150,7 @@ gen4_render_ring_flush(struct i915_request *rq, u32 mode)
 	 */
 	if (mode & EMIT_INVALIDATE) {
 		*cs++ = GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE;
-		*cs++ = i915_ggtt_offset(rq->engine->scratch) |
-			PIPE_CONTROL_GLOBAL_GTT;
+		*cs++ = i915_scratch_offset(rq->i915) | PIPE_CONTROL_GLOBAL_GTT;
 		*cs++ = 0;
 		*cs++ = 0;
 
@@ -159,8 +158,7 @@ gen4_render_ring_flush(struct i915_request *rq, u32 mode)
 			*cs++ = MI_FLUSH;
 
 		*cs++ = GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE;
-		*cs++ = i915_ggtt_offset(rq->engine->scratch) |
-			PIPE_CONTROL_GLOBAL_GTT;
+		*cs++ = i915_scratch_offset(rq->i915) | PIPE_CONTROL_GLOBAL_GTT;
 		*cs++ = 0;
 		*cs++ = 0;
 	}
@@ -212,8 +210,7 @@ gen4_render_ring_flush(struct i915_request *rq, u32 mode)
 static int
 intel_emit_post_sync_nonzero_flush(struct i915_request *rq)
 {
-	u32 scratch_addr =
-		i915_ggtt_offset(rq->engine->scratch) + 2 * CACHELINE_BYTES;
+	u32 scratch_addr = i915_scratch_offset(rq->i915) + 2 * CACHELINE_BYTES;
 	u32 *cs;
 
 	cs = intel_ring_begin(rq, 6);
@@ -246,8 +243,7 @@ intel_emit_post_sync_nonzero_flush(struct i915_request *rq)
 static int
 gen6_render_ring_flush(struct i915_request *rq, u32 mode)
 {
-	u32 scratch_addr =
-		i915_ggtt_offset(rq->engine->scratch) + 2 * CACHELINE_BYTES;
+	u32 scratch_addr = i915_scratch_offset(rq->i915) + 2 * CACHELINE_BYTES;
 	u32 *cs, flags = 0;
 	int ret;
 
@@ -316,8 +312,7 @@ gen7_render_ring_cs_stall_wa(struct i915_request *rq)
 static int
 gen7_render_ring_flush(struct i915_request *rq, u32 mode)
 {
-	u32 scratch_addr =
-		i915_ggtt_offset(rq->engine->scratch) + 2 * CACHELINE_BYTES;
+	u32 scratch_addr = i915_scratch_offset(rq->i915) + 2 * CACHELINE_BYTES;
 	u32 *cs, flags = 0;
 
 	/*
@@ -529,6 +524,13 @@ static int init_ring_common(struct intel_engine_cs *engine)
 
 	intel_engine_reset_breadcrumbs(engine);
 
+	if (HAS_LEGACY_SEMAPHORES(engine->i915)) {
+		I915_WRITE(RING_SYNC_0(engine->mmio_base), 0);
+		I915_WRITE(RING_SYNC_1(engine->mmio_base), 0);
+		if (HAS_VEBOX(dev_priv))
+			I915_WRITE(RING_SYNC_2(engine->mmio_base), 0);
+	}
+
 	/* Enforce ordering by reading HEAD register back */
 	I915_READ_HEAD(engine);
 
@@ -546,10 +548,11 @@ static int init_ring_common(struct intel_engine_cs *engine)
 	/* Check that the ring offsets point within the ring! */
 	GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->head));
 	GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->tail));
-
 	intel_ring_update_space(ring);
+
+	/* First wake the ring up to an empty/idle ring */
 	I915_WRITE_HEAD(engine, ring->head);
-	I915_WRITE_TAIL(engine, ring->tail);
+	I915_WRITE_TAIL(engine, ring->head);
 	(void)I915_READ_TAIL(engine);
 
 	I915_WRITE_CTL(engine, RING_CTL_SIZE(ring->size) | RING_VALID);
@@ -574,6 +577,12 @@ static int init_ring_common(struct intel_engine_cs *engine)
 	if (INTEL_GEN(dev_priv) > 2)
 		I915_WRITE_MODE(engine, _MASKED_BIT_DISABLE(STOP_RING));
 
+	/* Now awake, let it get started */
+	if (ring->tail != ring->head) {
+		I915_WRITE_TAIL(engine, ring->tail);
+		(void)I915_READ_TAIL(engine);
+	}
+
 	/* Papering over lost _interrupts_ immediately following the restart */
 	intel_engine_wakeup(engine);
 out:
@@ -642,7 +651,7 @@ static int intel_rcs_ctx_init(struct i915_request *rq)
 {
 	int ret;
 
-	ret = intel_ctx_workarounds_emit(rq);
+	ret = intel_engine_emit_ctx_wa(rq);
 	if (ret != 0)
 		return ret;
 
@@ -660,8 +669,6 @@ static int init_render_ring(struct intel_engine_cs *engine)
 	if (ret)
 		return ret;
 
-	intel_whitelist_workarounds_apply(engine);
-
 	/* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */
 	if (IS_GEN(dev_priv, 4, 6))
 		I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH));
@@ -743,9 +750,18 @@ static void cancel_requests(struct intel_engine_cs *engine)
 	/* Mark all submitted requests as skipped. */
 	list_for_each_entry(request, &engine->timeline.requests, link) {
 		GEM_BUG_ON(!request->global_seqno);
-		if (!i915_request_completed(request))
-			dma_fence_set_error(&request->fence, -EIO);
+
+		if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
+			     &request->fence.flags))
+			continue;
+
+		dma_fence_set_error(&request->fence, -EIO);
 	}
+
+	intel_write_status_page(engine,
+				I915_GEM_HWS_INDEX,
+				intel_engine_last_submit(engine));
+
 	/* Remaining _unready_ requests will be nop'ed when submitted */
 
 	spin_unlock_irqrestore(&engine->timeline.lock, flags);
@@ -973,7 +989,7 @@ i965_emit_bb_start(struct i915_request *rq,
 }
 
 /* Just userspace ABI convention to limit the wa batch bo to a resonable size */
-#define I830_BATCH_LIMIT (256*1024)
+#define I830_BATCH_LIMIT SZ_256K
 #define I830_TLB_ENTRIES (2)
 #define I830_WA_SIZE max(I830_TLB_ENTRIES*4096, I830_BATCH_LIMIT)
 static int
@@ -981,7 +997,9 @@ i830_emit_bb_start(struct i915_request *rq,
 		   u64 offset, u32 len,
 		   unsigned int dispatch_flags)
 {
-	u32 *cs, cs_offset = i915_ggtt_offset(rq->engine->scratch);
+	u32 *cs, cs_offset = i915_scratch_offset(rq->i915);
+
+	GEM_BUG_ON(rq->i915->gt.scratch->size < I830_WA_SIZE);
 
 	cs = intel_ring_begin(rq, 6);
 	if (IS_ERR(cs))
@@ -1438,7 +1456,6 @@ static int intel_init_ring_buffer(struct intel_engine_cs *engine)
 {
 	struct i915_timeline *timeline;
 	struct intel_ring *ring;
-	unsigned int size;
 	int err;
 
 	intel_engine_setup_common(engine);
@@ -1463,21 +1480,12 @@ static int intel_init_ring_buffer(struct intel_engine_cs *engine)
 	GEM_BUG_ON(engine->buffer);
 	engine->buffer = ring;
 
-	size = PAGE_SIZE;
-	if (HAS_BROKEN_CS_TLB(engine->i915))
-		size = I830_WA_SIZE;
-	err = intel_engine_create_scratch(engine, size);
+	err = intel_engine_init_common(engine);
 	if (err)
 		goto err_unpin;
 
-	err = intel_engine_init_common(engine);
-	if (err)
-		goto err_scratch;
-
 	return 0;
 
-err_scratch:
-	intel_engine_cleanup_scratch(engine);
 err_unpin:
 	intel_ring_unpin(ring);
 err_ring:
@@ -1551,7 +1559,7 @@ static int flush_pd_dir(struct i915_request *rq)
 	/* Stall until the page table load is complete */
 	*cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
 	*cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine));
-	*cs++ = i915_ggtt_offset(engine->scratch);
+	*cs++ = i915_scratch_offset(rq->i915);
 	*cs++ = MI_NOOP;
 
 	intel_ring_advance(rq, cs);
@@ -1660,7 +1668,7 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags)
 			/* Insert a delay before the next switch! */
 			*cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
 			*cs++ = i915_mmio_reg_offset(last_reg);
-			*cs++ = i915_ggtt_offset(engine->scratch);
+			*cs++ = i915_scratch_offset(rq->i915);
 			*cs++ = MI_NOOP;
 		}
 		*cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 8a2270b..72edaa7 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -15,6 +15,7 @@
 #include "i915_selftest.h"
 #include "i915_timeline.h"
 #include "intel_gpu_commands.h"
+#include "intel_workarounds.h"
 
 struct drm_printer;
 struct i915_sched_attr;
@@ -313,13 +314,6 @@ struct intel_engine_execlists {
 	struct rb_root_cached queue;
 
 	/**
-	 * @csb_read: control register for Context Switch buffer
-	 *
-	 * Note this register is always in mmio.
-	 */
-	u32 __iomem *csb_read;
-
-	/**
 	 * @csb_write: control register for Context Switch buffer
 	 *
 	 * Note this register may be either mmio or HWSP shadow.
@@ -339,15 +333,6 @@ struct intel_engine_execlists {
 	u32 preempt_complete_status;
 
 	/**
-	 * @csb_write_reset: reset value for CSB write pointer
-	 *
-	 * As the CSB write pointer maybe either in HWSP or as a field
-	 * inside an mmio register, we want to reprogram it slightly
-	 * differently to avoid later confusion.
-	 */
-	u32 csb_write_reset;
-
-	/**
 	 * @csb_head: context status buffer head
 	 */
 	u8 csb_head;
@@ -451,7 +436,9 @@ struct intel_engine_cs {
 
 	struct intel_hw_status_page status_page;
 	struct i915_ctx_workarounds wa_ctx;
-	struct i915_vma *scratch;
+	struct i915_wa_list ctx_wa_list;
+	struct i915_wa_list wa_list;
+	struct i915_wa_list whitelist;
 
 	u32             irq_keep_mask; /* always keep these interrupts */
 	u32		irq_enable_mask; /* bitmask to enable ring interrupt */
@@ -908,10 +895,6 @@ void intel_engine_setup_common(struct intel_engine_cs *engine);
 int intel_engine_init_common(struct intel_engine_cs *engine);
 void intel_engine_cleanup_common(struct intel_engine_cs *engine);
 
-int intel_engine_create_scratch(struct intel_engine_cs *engine,
-				unsigned int size);
-void intel_engine_cleanup_scratch(struct intel_engine_cs *engine);
-
 int intel_init_render_ring_buffer(struct intel_engine_cs *engine);
 int intel_init_bsd_ring_buffer(struct intel_engine_cs *engine);
 int intel_init_blt_ring_buffer(struct intel_engine_cs *engine);
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index 1c2de9b..4350a52 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -76,6 +76,8 @@ intel_display_power_domain_str(enum intel_display_power_domain domain)
 		return "TRANSCODER_C";
 	case POWER_DOMAIN_TRANSCODER_EDP:
 		return "TRANSCODER_EDP";
+	case POWER_DOMAIN_TRANSCODER_EDP_VDSC:
+		return "TRANSCODER_EDP_VDSC";
 	case POWER_DOMAIN_TRANSCODER_DSI_A:
 		return "TRANSCODER_DSI_A";
 	case POWER_DOMAIN_TRANSCODER_DSI_C:
@@ -2028,9 +2030,9 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
 	 */
 #define ICL_PW_2_POWER_DOMAINS (			\
 	ICL_PW_3_POWER_DOMAINS |			\
+	BIT_ULL(POWER_DOMAIN_TRANSCODER_EDP_VDSC) |		\
 	BIT_ULL(POWER_DOMAIN_INIT))
 	/*
-	 * - eDP/DSI VDSC
 	 * - KVMR (HW control)
 	 */
 #define ICL_DISPLAY_DC_OFF_POWER_DOMAINS (		\
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index abe1938..d2e003d 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -373,14 +373,12 @@ skl_program_scaler(struct intel_plane *plane,
 #define  BOFF(x)          (((x) & 0xffff) << 16)
 
 static void
-icl_program_input_csc_coeff(const struct intel_crtc_state *crtc_state,
-			    const struct intel_plane_state *plane_state)
+icl_program_input_csc(struct intel_plane *plane,
+		      const struct intel_crtc_state *crtc_state,
+		      const struct intel_plane_state *plane_state)
 {
-	struct drm_i915_private *dev_priv =
-		to_i915(plane_state->base.plane->dev);
-	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-	enum pipe pipe = crtc->pipe;
-	struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+	enum pipe pipe = plane->pipe;
 	enum plane_id plane_id = plane->id;
 
 	static const u16 input_csc_matrix[][9] = {
@@ -508,28 +506,12 @@ skl_program_plane(struct intel_plane *plane,
 
 	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
 
-	if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
-		I915_WRITE_FW(PLANE_COLOR_CTL(pipe, plane_id),
-			      plane_state->color_ctl);
-
-	if (fb->format->is_yuv && icl_is_hdr_plane(plane))
-		icl_program_input_csc_coeff(crtc_state, plane_state);
-
-	I915_WRITE_FW(PLANE_KEYVAL(pipe, plane_id), key->min_value);
-	I915_WRITE_FW(PLANE_KEYMAX(pipe, plane_id), keymax);
-	I915_WRITE_FW(PLANE_KEYMSK(pipe, plane_id), keymsk);
-
-	I915_WRITE_FW(PLANE_OFFSET(pipe, plane_id), (y << 16) | x);
 	I915_WRITE_FW(PLANE_STRIDE(pipe, plane_id), stride);
+	I915_WRITE_FW(PLANE_POS(pipe, plane_id), (crtc_y << 16) | crtc_x);
 	I915_WRITE_FW(PLANE_SIZE(pipe, plane_id), (src_h << 16) | src_w);
 	I915_WRITE_FW(PLANE_AUX_DIST(pipe, plane_id),
 		      (plane_state->color_plane[1].offset - surf_addr) | aux_stride);
 
-	if (INTEL_GEN(dev_priv) < 11)
-		I915_WRITE_FW(PLANE_AUX_OFFSET(pipe, plane_id),
-			      (plane_state->color_plane[1].y << 16) |
-			       plane_state->color_plane[1].x);
-
 	if (icl_is_hdr_plane(plane)) {
 		u32 cus_ctl = 0;
 
@@ -551,15 +533,38 @@ skl_program_plane(struct intel_plane *plane,
 		I915_WRITE_FW(PLANE_CUS_CTL(pipe, plane_id), cus_ctl);
 	}
 
-	if (!slave && plane_state->scaler_id >= 0)
-		skl_program_scaler(plane, crtc_state, plane_state);
+	if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
+		I915_WRITE_FW(PLANE_COLOR_CTL(pipe, plane_id),
+			      plane_state->color_ctl);
 
-	I915_WRITE_FW(PLANE_POS(pipe, plane_id), (crtc_y << 16) | crtc_x);
+	if (fb->format->is_yuv && icl_is_hdr_plane(plane))
+		icl_program_input_csc(plane, crtc_state, plane_state);
 
+	skl_write_plane_wm(plane, crtc_state);
+
+	I915_WRITE_FW(PLANE_KEYVAL(pipe, plane_id), key->min_value);
+	I915_WRITE_FW(PLANE_KEYMSK(pipe, plane_id), keymsk);
+	I915_WRITE_FW(PLANE_KEYMAX(pipe, plane_id), keymax);
+
+	I915_WRITE_FW(PLANE_OFFSET(pipe, plane_id), (y << 16) | x);
+
+	if (INTEL_GEN(dev_priv) < 11)
+		I915_WRITE_FW(PLANE_AUX_OFFSET(pipe, plane_id),
+			      (plane_state->color_plane[1].y << 16) |
+			      plane_state->color_plane[1].x);
+
+	/*
+	 * The control register self-arms if the plane was previously
+	 * disabled. Try to make the plane enable atomic by writing
+	 * the control register just before the surface register.
+	 */
 	I915_WRITE_FW(PLANE_CTL(pipe, plane_id), plane_ctl);
 	I915_WRITE_FW(PLANE_SURF(pipe, plane_id),
 		      intel_plane_ggtt_offset(plane_state) + surf_addr);
 
+	if (!slave && plane_state->scaler_id >= 0)
+		skl_program_scaler(plane, crtc_state, plane_state);
+
 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
 }
 
@@ -589,7 +594,8 @@ icl_update_slave(struct intel_plane *plane,
 }
 
 static void
-skl_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc)
+skl_disable_plane(struct intel_plane *plane,
+		  const struct intel_crtc_state *crtc_state)
 {
 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
 	enum plane_id plane_id = plane->id;
@@ -598,6 +604,8 @@ skl_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc)
 
 	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
 
+	skl_write_plane_wm(plane, crtc_state);
+
 	I915_WRITE_FW(PLANE_CTL(pipe, plane_id), 0);
 	I915_WRITE_FW(PLANE_SURF(pipe, plane_id), 0);
 
@@ -819,35 +827,41 @@ vlv_update_plane(struct intel_plane *plane,
 
 	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
 
-	vlv_update_clrc(plane_state);
+	I915_WRITE_FW(SPSTRIDE(pipe, plane_id),
+		      plane_state->color_plane[0].stride);
+	I915_WRITE_FW(SPPOS(pipe, plane_id), (crtc_y << 16) | crtc_x);
+	I915_WRITE_FW(SPSIZE(pipe, plane_id), (crtc_h << 16) | crtc_w);
+	I915_WRITE_FW(SPCONSTALPHA(pipe, plane_id), 0);
 
 	if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B)
 		chv_update_csc(plane_state);
 
 	if (key->flags) {
 		I915_WRITE_FW(SPKEYMINVAL(pipe, plane_id), key->min_value);
-		I915_WRITE_FW(SPKEYMAXVAL(pipe, plane_id), key->max_value);
 		I915_WRITE_FW(SPKEYMSK(pipe, plane_id), key->channel_mask);
+		I915_WRITE_FW(SPKEYMAXVAL(pipe, plane_id), key->max_value);
 	}
-	I915_WRITE_FW(SPSTRIDE(pipe, plane_id),
-		      plane_state->color_plane[0].stride);
-	I915_WRITE_FW(SPPOS(pipe, plane_id), (crtc_y << 16) | crtc_x);
 
-	I915_WRITE_FW(SPTILEOFF(pipe, plane_id), (y << 16) | x);
 	I915_WRITE_FW(SPLINOFF(pipe, plane_id), linear_offset);
+	I915_WRITE_FW(SPTILEOFF(pipe, plane_id), (y << 16) | x);
 
-	I915_WRITE_FW(SPCONSTALPHA(pipe, plane_id), 0);
-
-	I915_WRITE_FW(SPSIZE(pipe, plane_id), (crtc_h << 16) | crtc_w);
+	/*
+	 * The control register self-arms if the plane was previously
+	 * disabled. Try to make the plane enable atomic by writing
+	 * the control register just before the surface register.
+	 */
 	I915_WRITE_FW(SPCNTR(pipe, plane_id), sprctl);
 	I915_WRITE_FW(SPSURF(pipe, plane_id),
 		      intel_plane_ggtt_offset(plane_state) + sprsurf_offset);
 
+	vlv_update_clrc(plane_state);
+
 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
 }
 
 static void
-vlv_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc)
+vlv_disable_plane(struct intel_plane *plane,
+		  const struct intel_crtc_state *crtc_state)
 {
 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
 	enum pipe pipe = plane->pipe;
@@ -980,27 +994,32 @@ ivb_update_plane(struct intel_plane *plane,
 
 	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
 
-	if (key->flags) {
-		I915_WRITE_FW(SPRKEYVAL(pipe), key->min_value);
-		I915_WRITE_FW(SPRKEYMAX(pipe), key->max_value);
-		I915_WRITE_FW(SPRKEYMSK(pipe), key->channel_mask);
-	}
-
 	I915_WRITE_FW(SPRSTRIDE(pipe), plane_state->color_plane[0].stride);
 	I915_WRITE_FW(SPRPOS(pipe), (crtc_y << 16) | crtc_x);
+	I915_WRITE_FW(SPRSIZE(pipe), (crtc_h << 16) | crtc_w);
+	if (IS_IVYBRIDGE(dev_priv))
+		I915_WRITE_FW(SPRSCALE(pipe), sprscale);
+
+	if (key->flags) {
+		I915_WRITE_FW(SPRKEYVAL(pipe), key->min_value);
+		I915_WRITE_FW(SPRKEYMSK(pipe), key->channel_mask);
+		I915_WRITE_FW(SPRKEYMAX(pipe), key->max_value);
+	}
 
 	/* HSW consolidates SPRTILEOFF and SPRLINOFF into a single SPROFFSET
 	 * register */
 	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
 		I915_WRITE_FW(SPROFFSET(pipe), (y << 16) | x);
 	} else {
-		I915_WRITE_FW(SPRTILEOFF(pipe), (y << 16) | x);
 		I915_WRITE_FW(SPRLINOFF(pipe), linear_offset);
+		I915_WRITE_FW(SPRTILEOFF(pipe), (y << 16) | x);
 	}
 
-	I915_WRITE_FW(SPRSIZE(pipe), (crtc_h << 16) | crtc_w);
-	if (IS_IVYBRIDGE(dev_priv))
-		I915_WRITE_FW(SPRSCALE(pipe), sprscale);
+	/*
+	 * The control register self-arms if the plane was previously
+	 * disabled. Try to make the plane enable atomic by writing
+	 * the control register just before the surface register.
+	 */
 	I915_WRITE_FW(SPRCTL(pipe), sprctl);
 	I915_WRITE_FW(SPRSURF(pipe),
 		      intel_plane_ggtt_offset(plane_state) + sprsurf_offset);
@@ -1009,7 +1028,8 @@ ivb_update_plane(struct intel_plane *plane,
 }
 
 static void
-ivb_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc)
+ivb_disable_plane(struct intel_plane *plane,
+		  const struct intel_crtc_state *crtc_state)
 {
 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
 	enum pipe pipe = plane->pipe;
@@ -1018,7 +1038,7 @@ ivb_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc)
 	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
 
 	I915_WRITE_FW(SPRCTL(pipe), 0);
-	/* Can't leave the scaler enabled... */
+	/* Disable the scaler */
 	if (IS_IVYBRIDGE(dev_priv))
 		I915_WRITE_FW(SPRSCALE(pipe), 0);
 	I915_WRITE_FW(SPRSURF(pipe), 0);
@@ -1148,20 +1168,25 @@ g4x_update_plane(struct intel_plane *plane,
 
 	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
 
-	if (key->flags) {
-		I915_WRITE_FW(DVSKEYVAL(pipe), key->min_value);
-		I915_WRITE_FW(DVSKEYMAX(pipe), key->max_value);
-		I915_WRITE_FW(DVSKEYMSK(pipe), key->channel_mask);
-	}
-
 	I915_WRITE_FW(DVSSTRIDE(pipe), plane_state->color_plane[0].stride);
 	I915_WRITE_FW(DVSPOS(pipe), (crtc_y << 16) | crtc_x);
-
-	I915_WRITE_FW(DVSTILEOFF(pipe), (y << 16) | x);
-	I915_WRITE_FW(DVSLINOFF(pipe), linear_offset);
-
 	I915_WRITE_FW(DVSSIZE(pipe), (crtc_h << 16) | crtc_w);
 	I915_WRITE_FW(DVSSCALE(pipe), dvsscale);
+
+	if (key->flags) {
+		I915_WRITE_FW(DVSKEYVAL(pipe), key->min_value);
+		I915_WRITE_FW(DVSKEYMSK(pipe), key->channel_mask);
+		I915_WRITE_FW(DVSKEYMAX(pipe), key->max_value);
+	}
+
+	I915_WRITE_FW(DVSLINOFF(pipe), linear_offset);
+	I915_WRITE_FW(DVSTILEOFF(pipe), (y << 16) | x);
+
+	/*
+	 * The control register self-arms if the plane was previously
+	 * disabled. Try to make the plane enable atomic by writing
+	 * the control register just before the surface register.
+	 */
 	I915_WRITE_FW(DVSCNTR(pipe), dvscntr);
 	I915_WRITE_FW(DVSSURF(pipe),
 		      intel_plane_ggtt_offset(plane_state) + dvssurf_offset);
@@ -1170,7 +1195,8 @@ g4x_update_plane(struct intel_plane *plane,
 }
 
 static void
-g4x_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc)
+g4x_disable_plane(struct intel_plane *plane,
+		  const struct intel_crtc_state *crtc_state)
 {
 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
 	enum pipe pipe = plane->pipe;
diff --git a/drivers/gpu/drm/i915/intel_vdsc.c b/drivers/gpu/drm/i915/intel_vdsc.c
new file mode 100644
index 0000000..c56ba0e
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_vdsc.c
@@ -0,0 +1,1088 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2018 Intel Corporation
+ *
+ * Author: Gaurav K Singh <gaurav.k.singh@intel.com>
+ *         Manasi Navare <manasi.d.navare@intel.com>
+ */
+
+#include <drm/drmP.h>
+#include <drm/i915_drm.h>
+#include "i915_drv.h"
+#include "intel_drv.h"
+
+enum ROW_INDEX_BPP {
+	ROW_INDEX_6BPP = 0,
+	ROW_INDEX_8BPP,
+	ROW_INDEX_10BPP,
+	ROW_INDEX_12BPP,
+	ROW_INDEX_15BPP,
+	MAX_ROW_INDEX
+};
+
+enum COLUMN_INDEX_BPC {
+	COLUMN_INDEX_8BPC = 0,
+	COLUMN_INDEX_10BPC,
+	COLUMN_INDEX_12BPC,
+	COLUMN_INDEX_14BPC,
+	COLUMN_INDEX_16BPC,
+	MAX_COLUMN_INDEX
+};
+
+#define DSC_SUPPORTED_VERSION_MIN		1
+
+/* From DSC_v1.11 spec, rc_parameter_Set syntax element typically constant */
+static u16 rc_buf_thresh[] = {
+	896, 1792, 2688, 3584, 4480, 5376, 6272, 6720, 7168, 7616,
+	7744, 7872, 8000, 8064
+};
+
+struct rc_parameters {
+	u16 initial_xmit_delay;
+	u8 first_line_bpg_offset;
+	u16 initial_offset;
+	u8 flatness_min_qp;
+	u8 flatness_max_qp;
+	u8 rc_quant_incr_limit0;
+	u8 rc_quant_incr_limit1;
+	struct drm_dsc_rc_range_parameters rc_range_params[DSC_NUM_BUF_RANGES];
+};
+
+/*
+ * Selected Rate Control Related Parameter Recommended Values
+ * from DSC_v1.11 spec & C Model release: DSC_model_20161212
+ */
+static struct rc_parameters rc_params[][MAX_COLUMN_INDEX] = {
+{
+	/* 6BPP/8BPC */
+	{ 768, 15, 6144, 3, 13, 11, 11, {
+		{ 0, 4, 0 }, { 1, 6, -2 }, { 3, 8, -2 }, { 4, 8, -4 },
+		{ 5, 9, -6 }, { 5, 9, -6 }, { 6, 9, -6 }, { 6, 10, -8 },
+		{ 7, 11, -8 }, { 8, 12, -10 }, { 9, 12, -10 }, { 10, 12, -12 },
+		{ 10, 12, -12 }, { 11, 12, -12 }, { 13, 14, -12 }
+		}
+	},
+	/* 6BPP/10BPC */
+	{ 768, 15, 6144, 7, 17, 15, 15, {
+		{ 0, 8, 0 }, { 3, 10, -2 }, { 7, 12, -2 }, { 8, 12, -4 },
+		{ 9, 13, -6 }, { 9, 13, -6 }, { 10, 13, -6 }, { 10, 14, -8 },
+		{ 11, 15, -8 }, { 12, 16, -10 }, { 13, 16, -10 },
+		{ 14, 16, -12 }, { 14, 16, -12 }, { 15, 16, -12 },
+		{ 17, 18, -12 }
+		}
+	},
+	/* 6BPP/12BPC */
+	{ 768, 15, 6144, 11, 21, 19, 19, {
+		{ 0, 12, 0 }, { 5, 14, -2 }, { 11, 16, -2 }, { 12, 16, -4 },
+		{ 13, 17, -6 }, { 13, 17, -6 }, { 14, 17, -6 }, { 14, 18, -8 },
+		{ 15, 19, -8 }, { 16, 20, -10 }, { 17, 20, -10 },
+		{ 18, 20, -12 }, { 18, 20, -12 }, { 19, 20, -12 },
+		{ 21, 22, -12 }
+		}
+	},
+	/* 6BPP/14BPC */
+	{ 768, 15, 6144, 15, 25, 23, 27, {
+		{ 0, 16, 0 }, { 7, 18, -2 }, { 15, 20, -2 }, { 16, 20, -4 },
+		{ 17, 21, -6 }, { 17, 21, -6 }, { 18, 21, -6 }, { 18, 22, -8 },
+		{ 19, 23, -8 }, { 20, 24, -10 }, { 21, 24, -10 },
+		{ 22, 24, -12 }, { 22, 24, -12 }, { 23, 24, -12 },
+		{ 25, 26, -12 }
+		}
+	},
+	/* 6BPP/16BPC */
+	{ 768, 15, 6144, 19, 29, 27, 27, {
+		{ 0, 20, 0 }, { 9, 22, -2 }, { 19, 24, -2 }, { 20, 24, -4 },
+		{ 21, 25, -6 }, { 21, 25, -6 }, { 22, 25, -6 }, { 22, 26, -8 },
+		{ 23, 27, -8 }, { 24, 28, -10 }, { 25, 28, -10 },
+		{ 26, 28, -12 }, { 26, 28, -12 }, { 27, 28, -12 },
+		{ 29, 30, -12 }
+		}
+	},
+},
+{
+	/* 8BPP/8BPC */
+	{ 512, 12, 6144, 3, 12, 11, 11, {
+		{ 0, 4, 2 }, { 0, 4, 0 }, { 1, 5, 0 }, { 1, 6, -2 },
+		{ 3, 7, -4 }, { 3, 7, -6 }, { 3, 7, -8 }, { 3, 8, -8 },
+		{ 3, 9, -8 }, { 3, 10, -10 }, { 5, 11, -10 }, { 5, 12, -12 },
+		{ 5, 13, -12 }, { 7, 13, -12 }, { 13, 15, -12 }
+		}
+	},
+	/* 8BPP/10BPC */
+	{ 512, 12, 6144, 7, 16, 15, 15, {
+		{ 0, 4, 2 }, { 4, 8, 0 }, { 5, 9, 0 }, { 5, 10, -2 },
+		{ 7, 11, -4 }, { 7, 11, -6 }, { 7, 11, -8 }, { 7, 12, -8 },
+		{ 7, 13, -8 }, { 7, 14, -10 }, { 9, 15, -10 }, { 9, 16, -12 },
+		{ 9, 17, -12 }, { 11, 17, -12 }, { 17, 19, -12 }
+		}
+	},
+	/* 8BPP/12BPC */
+	{ 512, 12, 6144, 11, 20, 19, 19, {
+		{ 0, 12, 2 }, { 4, 12, 0 }, { 9, 13, 0 }, { 9, 14, -2 },
+		{ 11, 15, -4 }, { 11, 15, -6 }, { 11, 15, -8 }, { 11, 16, -8 },
+		{ 11, 17, -8 }, { 11, 18, -10 }, { 13, 19, -10 },
+		{ 13, 20, -12 }, { 13, 21, -12 }, { 15, 21, -12 },
+		{ 21, 23, -12 }
+		}
+	},
+	/* 8BPP/14BPC */
+	{ 512, 12, 6144, 15, 24, 23, 23, {
+		{ 0, 12, 0 }, { 5, 13, 0 }, { 11, 15, 0 }, { 12, 17, -2 },
+		{ 15, 19, -4 }, { 15, 19, -6 }, { 15, 19, -8 }, { 15, 20, -8 },
+		{ 15, 21, -8 }, { 15, 22, -10 }, { 17, 22, -10 },
+		{ 17, 23, -12 }, { 17, 23, -12 }, { 21, 24, -12 },
+		{ 24, 25, -12 }
+		}
+	},
+	/* 8BPP/16BPC */
+	{ 512, 12, 6144, 19, 28, 27, 27, {
+		{ 0, 12, 2 }, { 6, 14, 0 }, { 13, 17, 0 }, { 15, 20, -2 },
+		{ 19, 23, -4 }, { 19, 23, -6 }, { 19, 23, -8 }, { 19, 24, -8 },
+		{ 19, 25, -8 }, { 19, 26, -10 }, { 21, 26, -10 },
+		{ 21, 27, -12 }, { 21, 27, -12 }, { 25, 28, -12 },
+		{ 28, 29, -12 }
+		}
+	},
+},
+{
+	/* 10BPP/8BPC */
+	{ 410, 15, 5632, 3, 12, 11, 11, {
+		{ 0, 3, 2 }, { 0, 4, 0 }, { 1, 5, 0 }, { 2, 6, -2 },
+		{ 3, 7, -4 }, { 3, 7, -6 }, { 3, 7, -8 }, { 3, 8, -8 },
+		{ 3, 9, -8 }, { 3, 9, -10 }, { 5, 10, -10 }, { 5, 10, -10 },
+		{ 5, 11, -12 }, { 7, 11, -12 }, { 11, 12, -12 }
+		}
+	},
+	/* 10BPP/10BPC */
+	{ 410, 15, 5632, 7, 16, 15, 15, {
+		{ 0, 7, 2 }, { 4, 8, 0 }, { 5, 9, 0 }, { 6, 10, -2 },
+		{ 7, 11, -4 }, { 7, 11, -6 }, { 7, 11, -8 }, { 7, 12, -8 },
+		{ 7, 13, -8 }, { 7, 13, -10 }, { 9, 14, -10 }, { 9, 14, -10 },
+		{ 9, 15, -12 }, { 11, 15, -12 }, { 15, 16, -12 }
+		}
+	},
+	/* 10BPP/12BPC */
+	{ 410, 15, 5632, 11, 20, 19, 19, {
+		{ 0, 11, 2 }, { 4, 12, 0 }, { 9, 13, 0 }, { 10, 14, -2 },
+		{ 11, 15, -4 }, { 11, 15, -6 }, { 11, 15, -8 }, { 11, 16, -8 },
+		{ 11, 17, -8 }, { 11, 17, -10 }, { 13, 18, -10 },
+		{ 13, 18, -10 }, { 13, 19, -12 }, { 15, 19, -12 },
+		{ 19, 20, -12 }
+		}
+	},
+	/* 10BPP/14BPC */
+	{ 410, 15, 5632, 15, 24, 23, 23, {
+		{ 0, 11, 2 }, { 5, 13, 0 }, { 11, 15, 0 }, { 13, 18, -2 },
+		{ 15, 19, -4 }, { 15, 19, -6 }, { 15, 19, -8 }, { 15, 20, -8 },
+		{ 15, 21, -8 }, { 15, 21, -10 }, { 17, 22, -10 },
+		{ 17, 22, -10 }, { 17, 23, -12 }, { 19, 23, -12 },
+		{ 23, 24, -12 }
+		}
+	},
+	/* 10BPP/16BPC */
+	{ 410, 15, 5632, 19, 28, 27, 27, {
+		{ 0, 11, 2 }, { 6, 14, 0 }, { 13, 17, 0 }, { 16, 20, -2 },
+		{ 19, 23, -4 }, { 19, 23, -6 }, { 19, 23, -8 }, { 19, 24, -8 },
+		{ 19, 25, -8 }, { 19, 25, -10 }, { 21, 26, -10 },
+		{ 21, 26, -10 }, { 21, 27, -12 }, { 23, 27, -12 },
+		{ 27, 28, -12 }
+		}
+	},
+},
+{
+	/* 12BPP/8BPC */
+	{ 341, 15, 2048, 3, 12, 11, 11, {
+		{ 0, 2, 2 }, { 0, 4, 0 }, { 1, 5, 0 }, { 1, 6, -2 },
+		{ 3, 7, -4 }, { 3, 7, -6 }, { 3, 7, -8 }, { 3, 8, -8 },
+		{ 3, 9, -8 }, { 3, 10, -10 }, { 5, 11, -10 },
+		{ 5, 12, -12 }, { 5, 13, -12 }, { 7, 13, -12 }, { 13, 15, -12 }
+		}
+	},
+	/* 12BPP/10BPC */
+	{ 341, 15, 2048, 7, 16, 15, 15, {
+		{ 0, 2, 2 }, { 2, 5, 0 }, { 3, 7, 0 }, { 4, 8, -2 },
+		{ 6, 9, -4 }, { 7, 10, -6 }, { 7, 11, -8 }, { 7, 12, -8 },
+		{ 7, 13, -8 }, { 7, 14, -10 }, { 9, 15, -10 }, { 9, 16, -12 },
+		{ 9, 17, -12 }, { 11, 17, -12 }, { 17, 19, -12 }
+		}
+	},
+	/* 12BPP/12BPC */
+	{ 341, 15, 2048, 11, 20, 19, 19, {
+		{ 0, 6, 2 }, { 4, 9, 0 }, { 7, 11, 0 }, { 8, 12, -2 },
+		{ 10, 13, -4 }, { 11, 14, -6 }, { 11, 15, -8 }, { 11, 16, -8 },
+		{ 11, 17, -8 }, { 11, 18, -10 }, { 13, 19, -10 },
+		{ 13, 20, -12 }, { 13, 21, -12 }, { 15, 21, -12 },
+		{ 21, 23, -12 }
+		}
+	},
+	/* 12BPP/14BPC */
+	{ 341, 15, 2048, 15, 24, 23, 23, {
+		{ 0, 6, 2 }, { 7, 10, 0 }, { 9, 13, 0 }, { 11, 16, -2 },
+		{ 14, 17, -4 }, { 15, 18, -6 }, { 15, 19, -8 }, { 15, 20, -8 },
+		{ 15, 20, -8 }, { 15, 21, -10 }, { 17, 21, -10 },
+		{ 17, 21, -12 }, { 17, 21, -12 }, { 19, 22, -12 },
+		{ 22, 23, -12 }
+		}
+	},
+	/* 12BPP/16BPC */
+	{ 341, 15, 2048, 19, 28, 27, 27, {
+		{ 0, 6, 2 }, { 6, 11, 0 }, { 11, 15, 0 }, { 14, 18, -2 },
+		{ 18, 21, -4 }, { 19, 22, -6 }, { 19, 23, -8 }, { 19, 24, -8 },
+		{ 19, 24, -8 }, { 19, 25, -10 }, { 21, 25, -10 },
+		{ 21, 25, -12 }, { 21, 25, -12 }, { 23, 26, -12 },
+		{ 26, 27, -12 }
+		}
+	},
+},
+{
+	/* 15BPP/8BPC */
+	{ 273, 15, 2048, 3, 12, 11, 11, {
+		{ 0, 0, 10 }, { 0, 1, 8 }, { 0, 1, 6 }, { 0, 2, 4 },
+		{ 1, 2, 2 }, { 1, 3, 0 }, { 1, 3, -2 }, { 2, 4, -4 },
+		{ 2, 5, -6 }, { 3, 5, -8 }, { 4, 6, -10 }, { 4, 7, -10 },
+		{ 5, 7, -12 }, { 7, 8, -12 }, { 8, 9, -12 }
+		}
+	},
+	/* 15BPP/10BPC */
+	{ 273, 15, 2048, 7, 16, 15, 15, {
+		{ 0, 2, 10 }, { 2, 5, 8 }, { 3, 5, 6 }, { 4, 6, 4 },
+		{ 5, 6, 2 }, { 5, 7, 0 }, { 5, 7, -2 }, { 6, 8, -4 },
+		{ 6, 9, -6 }, { 7, 9, -8 }, { 8, 10, -10 }, { 8, 11, -10 },
+		{ 9, 11, -12 }, { 11, 12, -12 }, { 12, 13, -12 }
+		}
+	},
+	/* 15BPP/12BPC */
+	{ 273, 15, 2048, 11, 20, 19, 19, {
+		{ 0, 4, 10 }, { 2, 7, 8 }, { 4, 9, 6 }, { 6, 11, 4 },
+		{ 9, 11, 2 }, { 9, 11, 0 }, { 9, 12, -2 }, { 10, 12, -4 },
+		{ 11, 13, -6 }, { 11, 13, -8 }, { 12, 14, -10 },
+		{ 13, 15, -10 }, { 13, 15, -12 }, { 15, 16, -12 },
+		{ 16, 17, -12 }
+		}
+	},
+	/* 15BPP/14BPC */
+	{ 273, 15, 2048, 15, 24, 23, 23, {
+		{ 0, 4, 10 }, { 3, 8, 8 }, { 6, 11, 6 }, { 9, 14, 4 },
+		{ 13, 15, 2 }, { 13, 15, 0 }, { 13, 16, -2 }, { 14, 16, -4 },
+		{ 15, 17, -6 }, { 15, 17, -8 }, { 16, 18, -10 },
+		{ 17, 19, -10 }, { 17, 19, -12 }, { 19, 20, -12 },
+		{ 20, 21, -12 }
+		}
+	},
+	/* 15BPP/16BPC */
+	{ 273, 15, 2048, 19, 28, 27, 27, {
+		{ 0, 4, 10 }, { 4, 9, 8 }, { 8, 13, 6 }, { 12, 17, 4 },
+		{ 17, 19, 2 }, { 17, 20, 0 }, { 17, 20, -2 }, { 18, 20, -4 },
+		{ 19, 21, -6 }, { 19, 21, -8 }, { 20, 22, -10 },
+		{ 21, 23, -10 }, { 21, 23, -12 }, { 23, 24, -12 },
+		{ 24, 25, -12 }
+		}
+	}
+}
+
+};
+
+static int get_row_index_for_rc_params(u16 compressed_bpp)
+{
+	switch (compressed_bpp) {
+	case 6:
+		return ROW_INDEX_6BPP;
+	case 8:
+		return ROW_INDEX_8BPP;
+	case 10:
+		return ROW_INDEX_10BPP;
+	case 12:
+		return ROW_INDEX_12BPP;
+	case 15:
+		return ROW_INDEX_15BPP;
+	default:
+		return -EINVAL;
+	}
+}
+
+static int get_column_index_for_rc_params(u8 bits_per_component)
+{
+	switch (bits_per_component) {
+	case 8:
+		return COLUMN_INDEX_8BPC;
+	case 10:
+		return COLUMN_INDEX_10BPC;
+	case 12:
+		return COLUMN_INDEX_12BPC;
+	case 14:
+		return COLUMN_INDEX_14BPC;
+	case 16:
+		return COLUMN_INDEX_16BPC;
+	default:
+		return -EINVAL;
+	}
+}
+
+static int intel_compute_rc_parameters(struct drm_dsc_config *vdsc_cfg)
+{
+	unsigned long groups_per_line = 0;
+	unsigned long groups_total = 0;
+	unsigned long num_extra_mux_bits = 0;
+	unsigned long slice_bits = 0;
+	unsigned long hrd_delay = 0;
+	unsigned long final_scale = 0;
+	unsigned long rbs_min = 0;
+
+	/* Number of groups used to code each line of a slice */
+	groups_per_line = DIV_ROUND_UP(vdsc_cfg->slice_width,
+				       DSC_RC_PIXELS_PER_GROUP);
+
+	/* chunksize in Bytes */
+	vdsc_cfg->slice_chunk_size = DIV_ROUND_UP(vdsc_cfg->slice_width *
+						  vdsc_cfg->bits_per_pixel,
+						  (8 * 16));
+
+	if (vdsc_cfg->convert_rgb)
+		num_extra_mux_bits = 3 * (vdsc_cfg->mux_word_size +
+					  (4 * vdsc_cfg->bits_per_component + 4)
+					  - 2);
+	else
+		num_extra_mux_bits = 3 * vdsc_cfg->mux_word_size +
+			(4 * vdsc_cfg->bits_per_component + 4) +
+			2 * (4 * vdsc_cfg->bits_per_component) - 2;
+	/* Number of bits in one Slice */
+	slice_bits = 8 * vdsc_cfg->slice_chunk_size * vdsc_cfg->slice_height;
+
+	while ((num_extra_mux_bits > 0) &&
+	       ((slice_bits - num_extra_mux_bits) % vdsc_cfg->mux_word_size))
+		num_extra_mux_bits--;
+
+	if (groups_per_line < vdsc_cfg->initial_scale_value - 8)
+		vdsc_cfg->initial_scale_value = groups_per_line + 8;
+
+	/* scale_decrement_interval calculation according to DSC spec 1.11 */
+	if (vdsc_cfg->initial_scale_value > 8)
+		vdsc_cfg->scale_decrement_interval = groups_per_line /
+			(vdsc_cfg->initial_scale_value - 8);
+	else
+		vdsc_cfg->scale_decrement_interval = DSC_SCALE_DECREMENT_INTERVAL_MAX;
+
+	vdsc_cfg->final_offset = vdsc_cfg->rc_model_size -
+		(vdsc_cfg->initial_xmit_delay *
+		 vdsc_cfg->bits_per_pixel + 8) / 16 + num_extra_mux_bits;
+
+	if (vdsc_cfg->final_offset >= vdsc_cfg->rc_model_size) {
+		DRM_DEBUG_KMS("FinalOfs < RcModelSze for this InitialXmitDelay\n");
+		return -ERANGE;
+	}
+
+	final_scale = (vdsc_cfg->rc_model_size * 8) /
+		(vdsc_cfg->rc_model_size - vdsc_cfg->final_offset);
+	if (vdsc_cfg->slice_height > 1)
+		/*
+		 * NflBpgOffset is 16 bit value with 11 fractional bits
+		 * hence we multiply by 2^11 for preserving the
+		 * fractional part
+		 */
+		vdsc_cfg->nfl_bpg_offset = DIV_ROUND_UP((vdsc_cfg->first_line_bpg_offset << 11),
+							(vdsc_cfg->slice_height - 1));
+	else
+		vdsc_cfg->nfl_bpg_offset = 0;
+
+	/* 2^16 - 1 */
+	if (vdsc_cfg->nfl_bpg_offset > 65535) {
+		DRM_DEBUG_KMS("NflBpgOffset is too large for this slice height\n");
+		return -ERANGE;
+	}
+
+	/* Number of groups used to code the entire slice */
+	groups_total = groups_per_line * vdsc_cfg->slice_height;
+
+	/* slice_bpg_offset is 16 bit value with 11 fractional bits */
+	vdsc_cfg->slice_bpg_offset = DIV_ROUND_UP(((vdsc_cfg->rc_model_size -
+						    vdsc_cfg->initial_offset +
+						    num_extra_mux_bits) << 11),
+						  groups_total);
+
+	if (final_scale > 9) {
+		/*
+		 * ScaleIncrementInterval =
+		 * finaloffset/((NflBpgOffset + SliceBpgOffset)*8(finalscale - 1.125))
+		 * as (NflBpgOffset + SliceBpgOffset) has 11 bit fractional value,
+		 * we need divide by 2^11 from pstDscCfg values
+		 */
+		vdsc_cfg->scale_increment_interval =
+				(vdsc_cfg->final_offset * (1 << 11)) /
+				((vdsc_cfg->nfl_bpg_offset +
+				vdsc_cfg->slice_bpg_offset) *
+				(final_scale - 9));
+	} else {
+		/*
+		 * If finalScaleValue is less than or equal to 9, a value of 0 should
+		 * be used to disable the scale increment at the end of the slice
+		 */
+		vdsc_cfg->scale_increment_interval = 0;
+	}
+
+	if (vdsc_cfg->scale_increment_interval > 65535) {
+		DRM_DEBUG_KMS("ScaleIncrementInterval is large for slice height\n");
+		return -ERANGE;
+	}
+
+	/*
+	 * DSC spec mentions that bits_per_pixel specifies the target
+	 * bits/pixel (bpp) rate that is used by the encoder,
+	 * in steps of 1/16 of a bit per pixel
+	 */
+	rbs_min = vdsc_cfg->rc_model_size - vdsc_cfg->initial_offset +
+		DIV_ROUND_UP(vdsc_cfg->initial_xmit_delay *
+			     vdsc_cfg->bits_per_pixel, 16) +
+		groups_per_line * vdsc_cfg->first_line_bpg_offset;
+
+	hrd_delay = DIV_ROUND_UP((rbs_min * 16), vdsc_cfg->bits_per_pixel);
+	vdsc_cfg->rc_bits = (hrd_delay * vdsc_cfg->bits_per_pixel) / 16;
+	vdsc_cfg->initial_dec_delay = hrd_delay - vdsc_cfg->initial_xmit_delay;
+
+	return 0;
+}
+
+int intel_dp_compute_dsc_params(struct intel_dp *intel_dp,
+				struct intel_crtc_state *pipe_config)
+{
+	struct drm_dsc_config *vdsc_cfg = &pipe_config->dp_dsc_cfg;
+	u16 compressed_bpp = pipe_config->dsc_params.compressed_bpp;
+	u8 i = 0;
+	int row_index = 0;
+	int column_index = 0;
+	u8 line_buf_depth = 0;
+
+	vdsc_cfg->pic_width = pipe_config->base.adjusted_mode.crtc_hdisplay;
+	vdsc_cfg->pic_height = pipe_config->base.adjusted_mode.crtc_vdisplay;
+	vdsc_cfg->slice_width = DIV_ROUND_UP(vdsc_cfg->pic_width,
+					     pipe_config->dsc_params.slice_count);
+	/*
+	 * Slice Height of 8 works for all currently available panels. So start
+	 * with that if pic_height is an integral multiple of 8.
+	 * Eventually add logic to try multiple slice heights.
+	 */
+	if (vdsc_cfg->pic_height % 8 == 0)
+		vdsc_cfg->slice_height = 8;
+	else if (vdsc_cfg->pic_height % 4 == 0)
+		vdsc_cfg->slice_height = 4;
+	else
+		vdsc_cfg->slice_height = 2;
+
+	/* Values filled from DSC Sink DPCD */
+	vdsc_cfg->dsc_version_major =
+		(intel_dp->dsc_dpcd[DP_DSC_REV - DP_DSC_SUPPORT] &
+		 DP_DSC_MAJOR_MASK) >> DP_DSC_MAJOR_SHIFT;
+	vdsc_cfg->dsc_version_minor =
+		min(DSC_SUPPORTED_VERSION_MIN,
+		    (intel_dp->dsc_dpcd[DP_DSC_REV - DP_DSC_SUPPORT] &
+		     DP_DSC_MINOR_MASK) >> DP_DSC_MINOR_SHIFT);
+
+	vdsc_cfg->convert_rgb = intel_dp->dsc_dpcd[DP_DSC_DEC_COLOR_FORMAT_CAP - DP_DSC_SUPPORT] &
+		DP_DSC_RGB;
+
+	line_buf_depth = drm_dp_dsc_sink_line_buf_depth(intel_dp->dsc_dpcd);
+	if (!line_buf_depth) {
+		DRM_DEBUG_KMS("DSC Sink Line Buffer Depth invalid\n");
+		return -EINVAL;
+	}
+	if (vdsc_cfg->dsc_version_minor == 2)
+		vdsc_cfg->line_buf_depth = (line_buf_depth == DSC_1_2_MAX_LINEBUF_DEPTH_BITS) ?
+			DSC_1_2_MAX_LINEBUF_DEPTH_VAL : line_buf_depth;
+	else
+		vdsc_cfg->line_buf_depth = (line_buf_depth > DSC_1_1_MAX_LINEBUF_DEPTH_BITS) ?
+			DSC_1_1_MAX_LINEBUF_DEPTH_BITS : line_buf_depth;
+
+	/* Gen 11 does not support YCbCr */
+	vdsc_cfg->enable422 = false;
+	/* Gen 11 does not support VBR */
+	vdsc_cfg->vbr_enable = false;
+	vdsc_cfg->block_pred_enable =
+			intel_dp->dsc_dpcd[DP_DSC_BLK_PREDICTION_SUPPORT - DP_DSC_SUPPORT] &
+		DP_DSC_BLK_PREDICTION_IS_SUPPORTED;
+
+	/* Gen 11 only supports integral values of bpp */
+	vdsc_cfg->bits_per_pixel = compressed_bpp << 4;
+	vdsc_cfg->bits_per_component = pipe_config->pipe_bpp / 3;
+
+	for (i = 0; i < DSC_NUM_BUF_RANGES - 1; i++) {
+		/*
+		 * six 0s are appended to the lsb of each threshold value
+		 * internally in h/w.
+		 * Only 8 bits are allowed for programming RcBufThreshold
+		 */
+		vdsc_cfg->rc_buf_thresh[i] = rc_buf_thresh[i] >> 6;
+	}
+
+	/*
+	 * For 6bpp, RC Buffer threshold 12 and 13 need a different value
+	 * as per C Model
+	 */
+	if (compressed_bpp == 6) {
+		vdsc_cfg->rc_buf_thresh[12] = 0x7C;
+		vdsc_cfg->rc_buf_thresh[13] = 0x7D;
+	}
+
+	row_index = get_row_index_for_rc_params(compressed_bpp);
+	column_index =
+		get_column_index_for_rc_params(vdsc_cfg->bits_per_component);
+
+	if (row_index < 0 || column_index < 0)
+		return -EINVAL;
+
+	vdsc_cfg->first_line_bpg_offset =
+		rc_params[row_index][column_index].first_line_bpg_offset;
+	vdsc_cfg->initial_xmit_delay =
+		rc_params[row_index][column_index].initial_xmit_delay;
+	vdsc_cfg->initial_offset =
+		rc_params[row_index][column_index].initial_offset;
+	vdsc_cfg->flatness_min_qp =
+		rc_params[row_index][column_index].flatness_min_qp;
+	vdsc_cfg->flatness_max_qp =
+		rc_params[row_index][column_index].flatness_max_qp;
+	vdsc_cfg->rc_quant_incr_limit0 =
+		rc_params[row_index][column_index].rc_quant_incr_limit0;
+	vdsc_cfg->rc_quant_incr_limit1 =
+		rc_params[row_index][column_index].rc_quant_incr_limit1;
+
+	for (i = 0; i < DSC_NUM_BUF_RANGES; i++) {
+		vdsc_cfg->rc_range_params[i].range_min_qp =
+			rc_params[row_index][column_index].rc_range_params[i].range_min_qp;
+		vdsc_cfg->rc_range_params[i].range_max_qp =
+			rc_params[row_index][column_index].rc_range_params[i].range_max_qp;
+		/*
+		 * Range BPG Offset uses 2's complement and is only a 6 bits. So
+		 * mask it to get only 6 bits.
+		 */
+		vdsc_cfg->rc_range_params[i].range_bpg_offset =
+			rc_params[row_index][column_index].rc_range_params[i].range_bpg_offset &
+			DSC_RANGE_BPG_OFFSET_MASK;
+	}
+
+	/*
+	 * BitsPerComponent value determines mux_word_size:
+	 * When BitsPerComponent is 12bpc, muxWordSize will be equal to 64 bits
+	 * When BitsPerComponent is 8 or 10bpc, muxWordSize will be equal to
+	 * 48 bits
+	 */
+	if (vdsc_cfg->bits_per_component == 8 ||
+	    vdsc_cfg->bits_per_component == 10)
+		vdsc_cfg->mux_word_size = DSC_MUX_WORD_SIZE_8_10_BPC;
+	else if (vdsc_cfg->bits_per_component == 12)
+		vdsc_cfg->mux_word_size = DSC_MUX_WORD_SIZE_12_BPC;
+
+	/* RC_MODEL_SIZE is a constant across all configurations */
+	vdsc_cfg->rc_model_size = DSC_RC_MODEL_SIZE_CONST;
+	/* InitialScaleValue is a 6 bit value with 3 fractional bits (U3.3) */
+	vdsc_cfg->initial_scale_value = (vdsc_cfg->rc_model_size << 3) /
+		(vdsc_cfg->rc_model_size - vdsc_cfg->initial_offset);
+
+	return intel_compute_rc_parameters(vdsc_cfg);
+}
+
+enum intel_display_power_domain
+intel_dsc_power_domain(const struct intel_crtc_state *crtc_state)
+{
+	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+
+	/*
+	 * On ICL VDSC/joining for eDP transcoder uses a separate power well PW2
+	 * This requires POWER_DOMAIN_TRANSCODER_EDP_VDSC power domain.
+	 * For any other transcoder, VDSC/joining uses the power well associated
+	 * with the pipe/transcoder in use. Hence another reference on the
+	 * transcoder power domain will suffice.
+	 */
+	if (cpu_transcoder == TRANSCODER_EDP)
+		return POWER_DOMAIN_TRANSCODER_EDP_VDSC;
+	else
+		return POWER_DOMAIN_TRANSCODER(cpu_transcoder);
+}
+
+static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder,
+						const struct intel_crtc_state *crtc_state)
+{
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	const struct drm_dsc_config *vdsc_cfg = &crtc_state->dp_dsc_cfg;
+	enum pipe pipe = crtc->pipe;
+	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+	u32 pps_val = 0;
+	u32 rc_buf_thresh_dword[4];
+	u32 rc_range_params_dword[8];
+	u8 num_vdsc_instances = (crtc_state->dsc_params.dsc_split) ? 2 : 1;
+	int i = 0;
+
+	/* Populate PICTURE_PARAMETER_SET_0 registers */
+	pps_val = DSC_VER_MAJ | vdsc_cfg->dsc_version_minor <<
+		DSC_VER_MIN_SHIFT |
+		vdsc_cfg->bits_per_component << DSC_BPC_SHIFT |
+		vdsc_cfg->line_buf_depth << DSC_LINE_BUF_DEPTH_SHIFT;
+	if (vdsc_cfg->block_pred_enable)
+		pps_val |= DSC_BLOCK_PREDICTION;
+	if (vdsc_cfg->convert_rgb)
+		pps_val |= DSC_COLOR_SPACE_CONVERSION;
+	if (vdsc_cfg->enable422)
+		pps_val |= DSC_422_ENABLE;
+	if (vdsc_cfg->vbr_enable)
+		pps_val |= DSC_VBR_ENABLE;
+	DRM_INFO("PPS0 = 0x%08x\n", pps_val);
+	if (cpu_transcoder == TRANSCODER_EDP) {
+		I915_WRITE(DSCA_PICTURE_PARAMETER_SET_0, pps_val);
+		/*
+		 * If 2 VDSC instances are needed, configure PPS for second
+		 * VDSC
+		 */
+		if (crtc_state->dsc_params.dsc_split)
+			I915_WRITE(DSCC_PICTURE_PARAMETER_SET_0, pps_val);
+	} else {
+		I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_0(pipe), pps_val);
+		if (crtc_state->dsc_params.dsc_split)
+			I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_0(pipe),
+				   pps_val);
+	}
+
+	/* Populate PICTURE_PARAMETER_SET_1 registers */
+	pps_val = 0;
+	pps_val |= DSC_BPP(vdsc_cfg->bits_per_pixel);
+	DRM_INFO("PPS1 = 0x%08x\n", pps_val);
+	if (cpu_transcoder == TRANSCODER_EDP) {
+		I915_WRITE(DSCA_PICTURE_PARAMETER_SET_1, pps_val);
+		/*
+		 * If 2 VDSC instances are needed, configure PPS for second
+		 * VDSC
+		 */
+		if (crtc_state->dsc_params.dsc_split)
+			I915_WRITE(DSCC_PICTURE_PARAMETER_SET_1, pps_val);
+	} else {
+		I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_1(pipe), pps_val);
+		if (crtc_state->dsc_params.dsc_split)
+			I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_1(pipe),
+				   pps_val);
+	}
+
+	/* Populate PICTURE_PARAMETER_SET_2 registers */
+	pps_val = 0;
+	pps_val |= DSC_PIC_HEIGHT(vdsc_cfg->pic_height) |
+		DSC_PIC_WIDTH(vdsc_cfg->pic_width / num_vdsc_instances);
+	DRM_INFO("PPS2 = 0x%08x\n", pps_val);
+	if (encoder->type == INTEL_OUTPUT_EDP) {
+		I915_WRITE(DSCA_PICTURE_PARAMETER_SET_2, pps_val);
+		/*
+		 * If 2 VDSC instances are needed, configure PPS for second
+		 * VDSC
+		 */
+		if (crtc_state->dsc_params.dsc_split)
+			I915_WRITE(DSCC_PICTURE_PARAMETER_SET_2, pps_val);
+	} else {
+		I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_2(pipe), pps_val);
+		if (crtc_state->dsc_params.dsc_split)
+			I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_2(pipe),
+				   pps_val);
+	}
+
+	/* Populate PICTURE_PARAMETER_SET_3 registers */
+	pps_val = 0;
+	pps_val |= DSC_SLICE_HEIGHT(vdsc_cfg->slice_height) |
+		DSC_SLICE_WIDTH(vdsc_cfg->slice_width);
+	DRM_INFO("PPS3 = 0x%08x\n", pps_val);
+	if (cpu_transcoder == TRANSCODER_EDP) {
+		I915_WRITE(DSCA_PICTURE_PARAMETER_SET_3, pps_val);
+		/*
+		 * If 2 VDSC instances are needed, configure PPS for second
+		 * VDSC
+		 */
+		if (crtc_state->dsc_params.dsc_split)
+			I915_WRITE(DSCC_PICTURE_PARAMETER_SET_3, pps_val);
+	} else {
+		I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_3(pipe), pps_val);
+		if (crtc_state->dsc_params.dsc_split)
+			I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_3(pipe),
+				   pps_val);
+	}
+
+	/* Populate PICTURE_PARAMETER_SET_4 registers */
+	pps_val = 0;
+	pps_val |= DSC_INITIAL_XMIT_DELAY(vdsc_cfg->initial_xmit_delay) |
+		DSC_INITIAL_DEC_DELAY(vdsc_cfg->initial_dec_delay);
+	DRM_INFO("PPS4 = 0x%08x\n", pps_val);
+	if (cpu_transcoder == TRANSCODER_EDP) {
+		I915_WRITE(DSCA_PICTURE_PARAMETER_SET_4, pps_val);
+		/*
+		 * If 2 VDSC instances are needed, configure PPS for second
+		 * VDSC
+		 */
+		if (crtc_state->dsc_params.dsc_split)
+			I915_WRITE(DSCC_PICTURE_PARAMETER_SET_4, pps_val);
+	} else {
+		I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_4(pipe), pps_val);
+		if (crtc_state->dsc_params.dsc_split)
+			I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_4(pipe),
+				   pps_val);
+	}
+
+	/* Populate PICTURE_PARAMETER_SET_5 registers */
+	pps_val = 0;
+	pps_val |= DSC_SCALE_INC_INT(vdsc_cfg->scale_increment_interval) |
+		DSC_SCALE_DEC_INT(vdsc_cfg->scale_decrement_interval);
+	DRM_INFO("PPS5 = 0x%08x\n", pps_val);
+	if (cpu_transcoder == TRANSCODER_EDP) {
+		I915_WRITE(DSCA_PICTURE_PARAMETER_SET_5, pps_val);
+		/*
+		 * If 2 VDSC instances are needed, configure PPS for second
+		 * VDSC
+		 */
+		if (crtc_state->dsc_params.dsc_split)
+			I915_WRITE(DSCC_PICTURE_PARAMETER_SET_5, pps_val);
+	} else {
+		I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_5(pipe), pps_val);
+		if (crtc_state->dsc_params.dsc_split)
+			I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_5(pipe),
+				   pps_val);
+	}
+
+	/* Populate PICTURE_PARAMETER_SET_6 registers */
+	pps_val = 0;
+	pps_val |= DSC_INITIAL_SCALE_VALUE(vdsc_cfg->initial_scale_value) |
+		DSC_FIRST_LINE_BPG_OFFSET(vdsc_cfg->first_line_bpg_offset) |
+		DSC_FLATNESS_MIN_QP(vdsc_cfg->flatness_min_qp) |
+		DSC_FLATNESS_MAX_QP(vdsc_cfg->flatness_max_qp);
+	DRM_INFO("PPS6 = 0x%08x\n", pps_val);
+	if (cpu_transcoder == TRANSCODER_EDP) {
+		I915_WRITE(DSCA_PICTURE_PARAMETER_SET_6, pps_val);
+		/*
+		 * If 2 VDSC instances are needed, configure PPS for second
+		 * VDSC
+		 */
+		if (crtc_state->dsc_params.dsc_split)
+			I915_WRITE(DSCC_PICTURE_PARAMETER_SET_6, pps_val);
+	} else {
+		I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_6(pipe), pps_val);
+		if (crtc_state->dsc_params.dsc_split)
+			I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_6(pipe),
+				   pps_val);
+	}
+
+	/* Populate PICTURE_PARAMETER_SET_7 registers */
+	pps_val = 0;
+	pps_val |= DSC_SLICE_BPG_OFFSET(vdsc_cfg->slice_bpg_offset) |
+		DSC_NFL_BPG_OFFSET(vdsc_cfg->nfl_bpg_offset);
+	DRM_INFO("PPS7 = 0x%08x\n", pps_val);
+	if (cpu_transcoder == TRANSCODER_EDP) {
+		I915_WRITE(DSCA_PICTURE_PARAMETER_SET_7, pps_val);
+		/*
+		 * If 2 VDSC instances are needed, configure PPS for second
+		 * VDSC
+		 */
+		if (crtc_state->dsc_params.dsc_split)
+			I915_WRITE(DSCC_PICTURE_PARAMETER_SET_7, pps_val);
+	} else {
+		I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_7(pipe), pps_val);
+		if (crtc_state->dsc_params.dsc_split)
+			I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_7(pipe),
+				   pps_val);
+	}
+
+	/* Populate PICTURE_PARAMETER_SET_8 registers */
+	pps_val = 0;
+	pps_val |= DSC_FINAL_OFFSET(vdsc_cfg->final_offset) |
+		DSC_INITIAL_OFFSET(vdsc_cfg->initial_offset);
+	DRM_INFO("PPS8 = 0x%08x\n", pps_val);
+	if (cpu_transcoder == TRANSCODER_EDP) {
+		I915_WRITE(DSCA_PICTURE_PARAMETER_SET_8, pps_val);
+		/*
+		 * If 2 VDSC instances are needed, configure PPS for second
+		 * VDSC
+		 */
+		if (crtc_state->dsc_params.dsc_split)
+			I915_WRITE(DSCC_PICTURE_PARAMETER_SET_8, pps_val);
+	} else {
+		I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_8(pipe), pps_val);
+		if (crtc_state->dsc_params.dsc_split)
+			I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_8(pipe),
+				   pps_val);
+	}
+
+	/* Populate PICTURE_PARAMETER_SET_9 registers */
+	pps_val = 0;
+	pps_val |= DSC_RC_MODEL_SIZE(DSC_RC_MODEL_SIZE_CONST) |
+		DSC_RC_EDGE_FACTOR(DSC_RC_EDGE_FACTOR_CONST);
+	DRM_INFO("PPS9 = 0x%08x\n", pps_val);
+	if (cpu_transcoder == TRANSCODER_EDP) {
+		I915_WRITE(DSCA_PICTURE_PARAMETER_SET_9, pps_val);
+		/*
+		 * If 2 VDSC instances are needed, configure PPS for second
+		 * VDSC
+		 */
+		if (crtc_state->dsc_params.dsc_split)
+			I915_WRITE(DSCC_PICTURE_PARAMETER_SET_9, pps_val);
+	} else {
+		I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_9(pipe), pps_val);
+		if (crtc_state->dsc_params.dsc_split)
+			I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_9(pipe),
+				   pps_val);
+	}
+
+	/* Populate PICTURE_PARAMETER_SET_10 registers */
+	pps_val = 0;
+	pps_val |= DSC_RC_QUANT_INC_LIMIT0(vdsc_cfg->rc_quant_incr_limit0) |
+		DSC_RC_QUANT_INC_LIMIT1(vdsc_cfg->rc_quant_incr_limit1) |
+		DSC_RC_TARGET_OFF_HIGH(DSC_RC_TGT_OFFSET_HI_CONST) |
+		DSC_RC_TARGET_OFF_LOW(DSC_RC_TGT_OFFSET_LO_CONST);
+	DRM_INFO("PPS10 = 0x%08x\n", pps_val);
+	if (cpu_transcoder == TRANSCODER_EDP) {
+		I915_WRITE(DSCA_PICTURE_PARAMETER_SET_10, pps_val);
+		/*
+		 * If 2 VDSC instances are needed, configure PPS for second
+		 * VDSC
+		 */
+		if (crtc_state->dsc_params.dsc_split)
+			I915_WRITE(DSCC_PICTURE_PARAMETER_SET_10, pps_val);
+	} else {
+		I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_10(pipe), pps_val);
+		if (crtc_state->dsc_params.dsc_split)
+			I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_10(pipe),
+				   pps_val);
+	}
+
+	/* Populate Picture parameter set 16 */
+	pps_val = 0;
+	pps_val |= DSC_SLICE_CHUNK_SIZE(vdsc_cfg->slice_chunk_size) |
+		DSC_SLICE_PER_LINE((vdsc_cfg->pic_width / num_vdsc_instances) /
+				   vdsc_cfg->slice_width) |
+		DSC_SLICE_ROW_PER_FRAME(vdsc_cfg->pic_height /
+					vdsc_cfg->slice_height);
+	DRM_INFO("PPS16 = 0x%08x\n", pps_val);
+	if (cpu_transcoder == TRANSCODER_EDP) {
+		I915_WRITE(DSCA_PICTURE_PARAMETER_SET_16, pps_val);
+		/*
+		 * If 2 VDSC instances are needed, configure PPS for second
+		 * VDSC
+		 */
+		if (crtc_state->dsc_params.dsc_split)
+			I915_WRITE(DSCC_PICTURE_PARAMETER_SET_16, pps_val);
+	} else {
+		I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_16(pipe), pps_val);
+		if (crtc_state->dsc_params.dsc_split)
+			I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_16(pipe),
+				   pps_val);
+	}
+
+	/* Populate the RC_BUF_THRESH registers */
+	memset(rc_buf_thresh_dword, 0, sizeof(rc_buf_thresh_dword));
+	for (i = 0; i < DSC_NUM_BUF_RANGES - 1; i++) {
+		rc_buf_thresh_dword[i / 4] |=
+			(u32)(vdsc_cfg->rc_buf_thresh[i] <<
+			      BITS_PER_BYTE * (i % 4));
+		DRM_INFO(" RC_BUF_THRESH%d = 0x%08x\n", i,
+			 rc_buf_thresh_dword[i / 4]);
+	}
+	if (cpu_transcoder == TRANSCODER_EDP) {
+		I915_WRITE(DSCA_RC_BUF_THRESH_0, rc_buf_thresh_dword[0]);
+		I915_WRITE(DSCA_RC_BUF_THRESH_0_UDW, rc_buf_thresh_dword[1]);
+		I915_WRITE(DSCA_RC_BUF_THRESH_1, rc_buf_thresh_dword[2]);
+		I915_WRITE(DSCA_RC_BUF_THRESH_1_UDW, rc_buf_thresh_dword[3]);
+		if (crtc_state->dsc_params.dsc_split) {
+			I915_WRITE(DSCC_RC_BUF_THRESH_0,
+				   rc_buf_thresh_dword[0]);
+			I915_WRITE(DSCC_RC_BUF_THRESH_0_UDW,
+				   rc_buf_thresh_dword[1]);
+			I915_WRITE(DSCC_RC_BUF_THRESH_1,
+				   rc_buf_thresh_dword[2]);
+			I915_WRITE(DSCC_RC_BUF_THRESH_1_UDW,
+				   rc_buf_thresh_dword[3]);
+		}
+	} else {
+		I915_WRITE(ICL_DSC0_RC_BUF_THRESH_0(pipe),
+			   rc_buf_thresh_dword[0]);
+		I915_WRITE(ICL_DSC0_RC_BUF_THRESH_0_UDW(pipe),
+			   rc_buf_thresh_dword[1]);
+		I915_WRITE(ICL_DSC0_RC_BUF_THRESH_1(pipe),
+			   rc_buf_thresh_dword[2]);
+		I915_WRITE(ICL_DSC0_RC_BUF_THRESH_1_UDW(pipe),
+			   rc_buf_thresh_dword[3]);
+		if (crtc_state->dsc_params.dsc_split) {
+			I915_WRITE(ICL_DSC1_RC_BUF_THRESH_0(pipe),
+				   rc_buf_thresh_dword[0]);
+			I915_WRITE(ICL_DSC1_RC_BUF_THRESH_0_UDW(pipe),
+				   rc_buf_thresh_dword[1]);
+			I915_WRITE(ICL_DSC1_RC_BUF_THRESH_1(pipe),
+				   rc_buf_thresh_dword[2]);
+			I915_WRITE(ICL_DSC1_RC_BUF_THRESH_1_UDW(pipe),
+				   rc_buf_thresh_dword[3]);
+		}
+	}
+
+	/* Populate the RC_RANGE_PARAMETERS registers */
+	memset(rc_range_params_dword, 0, sizeof(rc_range_params_dword));
+	for (i = 0; i < DSC_NUM_BUF_RANGES; i++) {
+		rc_range_params_dword[i / 2] |=
+			(u32)(((vdsc_cfg->rc_range_params[i].range_bpg_offset <<
+				RC_BPG_OFFSET_SHIFT) |
+			       (vdsc_cfg->rc_range_params[i].range_max_qp <<
+				RC_MAX_QP_SHIFT) |
+			       (vdsc_cfg->rc_range_params[i].range_min_qp <<
+				RC_MIN_QP_SHIFT)) << 16 * (i % 2));
+		DRM_INFO(" RC_RANGE_PARAM_%d = 0x%08x\n", i,
+			 rc_range_params_dword[i / 2]);
+	}
+	if (cpu_transcoder == TRANSCODER_EDP) {
+		I915_WRITE(DSCA_RC_RANGE_PARAMETERS_0,
+			   rc_range_params_dword[0]);
+		I915_WRITE(DSCA_RC_RANGE_PARAMETERS_0_UDW,
+			   rc_range_params_dword[1]);
+		I915_WRITE(DSCA_RC_RANGE_PARAMETERS_1,
+			   rc_range_params_dword[2]);
+		I915_WRITE(DSCA_RC_RANGE_PARAMETERS_1_UDW,
+			   rc_range_params_dword[3]);
+		I915_WRITE(DSCA_RC_RANGE_PARAMETERS_2,
+			   rc_range_params_dword[4]);
+		I915_WRITE(DSCA_RC_RANGE_PARAMETERS_2_UDW,
+			   rc_range_params_dword[5]);
+		I915_WRITE(DSCA_RC_RANGE_PARAMETERS_3,
+			   rc_range_params_dword[6]);
+		I915_WRITE(DSCA_RC_RANGE_PARAMETERS_3_UDW,
+			   rc_range_params_dword[7]);
+		if (crtc_state->dsc_params.dsc_split) {
+			I915_WRITE(DSCC_RC_RANGE_PARAMETERS_0,
+				   rc_range_params_dword[0]);
+			I915_WRITE(DSCC_RC_RANGE_PARAMETERS_0_UDW,
+				   rc_range_params_dword[1]);
+			I915_WRITE(DSCC_RC_RANGE_PARAMETERS_1,
+				   rc_range_params_dword[2]);
+			I915_WRITE(DSCC_RC_RANGE_PARAMETERS_1_UDW,
+				   rc_range_params_dword[3]);
+			I915_WRITE(DSCC_RC_RANGE_PARAMETERS_2,
+				   rc_range_params_dword[4]);
+			I915_WRITE(DSCC_RC_RANGE_PARAMETERS_2_UDW,
+				   rc_range_params_dword[5]);
+			I915_WRITE(DSCC_RC_RANGE_PARAMETERS_3,
+				   rc_range_params_dword[6]);
+			I915_WRITE(DSCC_RC_RANGE_PARAMETERS_3_UDW,
+				   rc_range_params_dword[7]);
+		}
+	} else {
+		I915_WRITE(ICL_DSC0_RC_RANGE_PARAMETERS_0(pipe),
+			   rc_range_params_dword[0]);
+		I915_WRITE(ICL_DSC0_RC_RANGE_PARAMETERS_0_UDW(pipe),
+			   rc_range_params_dword[1]);
+		I915_WRITE(ICL_DSC0_RC_RANGE_PARAMETERS_1(pipe),
+			   rc_range_params_dword[2]);
+		I915_WRITE(ICL_DSC0_RC_RANGE_PARAMETERS_1_UDW(pipe),
+			   rc_range_params_dword[3]);
+		I915_WRITE(ICL_DSC0_RC_RANGE_PARAMETERS_2(pipe),
+			   rc_range_params_dword[4]);
+		I915_WRITE(ICL_DSC0_RC_RANGE_PARAMETERS_2_UDW(pipe),
+			   rc_range_params_dword[5]);
+		I915_WRITE(ICL_DSC0_RC_RANGE_PARAMETERS_3(pipe),
+			   rc_range_params_dword[6]);
+		I915_WRITE(ICL_DSC0_RC_RANGE_PARAMETERS_3_UDW(pipe),
+			   rc_range_params_dword[7]);
+		if (crtc_state->dsc_params.dsc_split) {
+			I915_WRITE(ICL_DSC1_RC_RANGE_PARAMETERS_0(pipe),
+				   rc_range_params_dword[0]);
+			I915_WRITE(ICL_DSC1_RC_RANGE_PARAMETERS_0_UDW(pipe),
+				   rc_range_params_dword[1]);
+			I915_WRITE(ICL_DSC1_RC_RANGE_PARAMETERS_1(pipe),
+				   rc_range_params_dword[2]);
+			I915_WRITE(ICL_DSC1_RC_RANGE_PARAMETERS_1_UDW(pipe),
+				   rc_range_params_dword[3]);
+			I915_WRITE(ICL_DSC1_RC_RANGE_PARAMETERS_2(pipe),
+				   rc_range_params_dword[4]);
+			I915_WRITE(ICL_DSC1_RC_RANGE_PARAMETERS_2_UDW(pipe),
+				   rc_range_params_dword[5]);
+			I915_WRITE(ICL_DSC1_RC_RANGE_PARAMETERS_3(pipe),
+				   rc_range_params_dword[6]);
+			I915_WRITE(ICL_DSC1_RC_RANGE_PARAMETERS_3_UDW(pipe),
+				   rc_range_params_dword[7]);
+		}
+	}
+}
+
+static void intel_dp_write_dsc_pps_sdp(struct intel_encoder *encoder,
+				       const struct intel_crtc_state *crtc_state)
+{
+	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+	const struct drm_dsc_config *vdsc_cfg = &crtc_state->dp_dsc_cfg;
+	struct drm_dsc_pps_infoframe dp_dsc_pps_sdp;
+
+	/* Prepare DP SDP PPS header as per DP 1.4 spec, Table 2-123 */
+	drm_dsc_dp_pps_header_init(&dp_dsc_pps_sdp);
+
+	/* Fill the PPS payload bytes as per DSC spec 1.2 Table 4-1 */
+	drm_dsc_pps_infoframe_pack(&dp_dsc_pps_sdp, vdsc_cfg);
+
+	intel_dig_port->write_infoframe(encoder, crtc_state,
+					DP_SDP_PPS, &dp_dsc_pps_sdp,
+					sizeof(dp_dsc_pps_sdp));
+}
+
+void intel_dsc_enable(struct intel_encoder *encoder,
+		      const struct intel_crtc_state *crtc_state)
+{
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	enum pipe pipe = crtc->pipe;
+	i915_reg_t dss_ctl1_reg, dss_ctl2_reg;
+	u32 dss_ctl1_val = 0;
+	u32 dss_ctl2_val = 0;
+
+	if (!crtc_state->dsc_params.compression_enable)
+		return;
+
+	/* Enable Power wells for VDSC/joining */
+	intel_display_power_get(dev_priv,
+				intel_dsc_power_domain(crtc_state));
+
+	intel_configure_pps_for_dsc_encoder(encoder, crtc_state);
+
+	intel_dp_write_dsc_pps_sdp(encoder, crtc_state);
+
+	if (crtc_state->cpu_transcoder == TRANSCODER_EDP) {
+		dss_ctl1_reg = DSS_CTL1;
+		dss_ctl2_reg = DSS_CTL2;
+	} else {
+		dss_ctl1_reg = ICL_PIPE_DSS_CTL1(pipe);
+		dss_ctl2_reg = ICL_PIPE_DSS_CTL2(pipe);
+	}
+	dss_ctl2_val |= LEFT_BRANCH_VDSC_ENABLE;
+	if (crtc_state->dsc_params.dsc_split) {
+		dss_ctl2_val |= RIGHT_BRANCH_VDSC_ENABLE;
+		dss_ctl1_val |= JOINER_ENABLE;
+	}
+	I915_WRITE(dss_ctl1_reg, dss_ctl1_val);
+	I915_WRITE(dss_ctl2_reg, dss_ctl2_val);
+}
+
+void intel_dsc_disable(const struct intel_crtc_state *old_crtc_state)
+{
+	struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
+	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	enum pipe pipe = crtc->pipe;
+	i915_reg_t dss_ctl1_reg, dss_ctl2_reg;
+	u32 dss_ctl1_val = 0, dss_ctl2_val = 0;
+
+	if (!old_crtc_state->dsc_params.compression_enable)
+		return;
+
+	if (old_crtc_state->cpu_transcoder == TRANSCODER_EDP) {
+		dss_ctl1_reg = DSS_CTL1;
+		dss_ctl2_reg = DSS_CTL2;
+	} else {
+		dss_ctl1_reg = ICL_PIPE_DSS_CTL1(pipe);
+		dss_ctl2_reg = ICL_PIPE_DSS_CTL2(pipe);
+	}
+	dss_ctl1_val = I915_READ(dss_ctl1_reg);
+	if (dss_ctl1_val & JOINER_ENABLE)
+		dss_ctl1_val &= ~JOINER_ENABLE;
+	I915_WRITE(dss_ctl1_reg, dss_ctl1_val);
+
+	dss_ctl2_val = I915_READ(dss_ctl2_reg);
+	if (dss_ctl2_val & LEFT_BRANCH_VDSC_ENABLE ||
+	    dss_ctl2_val & RIGHT_BRANCH_VDSC_ENABLE)
+		dss_ctl2_val &= ~(LEFT_BRANCH_VDSC_ENABLE |
+				  RIGHT_BRANCH_VDSC_ENABLE);
+	I915_WRITE(dss_ctl2_reg, dss_ctl2_val);
+
+	/* Disable Power wells for VDSC/joining */
+	intel_display_power_put(dev_priv,
+				intel_dsc_power_domain(old_crtc_state));
+}
diff --git a/drivers/gpu/drm/i915/intel_workarounds.c b/drivers/gpu/drm/i915/intel_workarounds.c
index ca1f78a..4f41e32 100644
--- a/drivers/gpu/drm/i915/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/intel_workarounds.c
@@ -48,58 +48,112 @@
  * - Public functions to init or apply the given workaround type.
  */
 
-static void wa_add(struct drm_i915_private *i915,
-		   i915_reg_t reg, const u32 mask, const u32 val)
+static void wa_init_start(struct i915_wa_list *wal, const char *name)
 {
-	struct i915_workarounds *wa = &i915->workarounds;
-	unsigned int start = 0, end = wa->count;
-	unsigned int addr = i915_mmio_reg_offset(reg);
-	struct i915_wa_reg *r;
+	wal->name = name;
+}
+
+#define WA_LIST_CHUNK (1 << 4)
+
+static void wa_init_finish(struct i915_wa_list *wal)
+{
+	/* Trim unused entries. */
+	if (!IS_ALIGNED(wal->count, WA_LIST_CHUNK)) {
+		struct i915_wa *list = kmemdup(wal->list,
+					       wal->count * sizeof(*list),
+					       GFP_KERNEL);
+
+		if (list) {
+			kfree(wal->list);
+			wal->list = list;
+		}
+	}
+
+	if (!wal->count)
+		return;
+
+	DRM_DEBUG_DRIVER("Initialized %u %s workarounds\n",
+			 wal->wa_count, wal->name);
+}
+
+static void _wa_add(struct i915_wa_list *wal, const struct i915_wa *wa)
+{
+	unsigned int addr = i915_mmio_reg_offset(wa->reg);
+	unsigned int start = 0, end = wal->count;
+	const unsigned int grow = WA_LIST_CHUNK;
+	struct i915_wa *wa_;
+
+	GEM_BUG_ON(!is_power_of_2(grow));
+
+	if (IS_ALIGNED(wal->count, grow)) { /* Either uninitialized or full. */
+		struct i915_wa *list;
+
+		list = kmalloc_array(ALIGN(wal->count + 1, grow), sizeof(*wa),
+				     GFP_KERNEL);
+		if (!list) {
+			DRM_ERROR("No space for workaround init!\n");
+			return;
+		}
+
+		if (wal->list)
+			memcpy(list, wal->list, sizeof(*wa) * wal->count);
+
+		wal->list = list;
+	}
 
 	while (start < end) {
 		unsigned int mid = start + (end - start) / 2;
 
-		if (wa->reg[mid].addr < addr) {
+		if (i915_mmio_reg_offset(wal->list[mid].reg) < addr) {
 			start = mid + 1;
-		} else if (wa->reg[mid].addr > addr) {
+		} else if (i915_mmio_reg_offset(wal->list[mid].reg) > addr) {
 			end = mid;
 		} else {
-			r = &wa->reg[mid];
+			wa_ = &wal->list[mid];
 
-			if ((mask & ~r->mask) == 0) {
+			if ((wa->mask & ~wa_->mask) == 0) {
 				DRM_ERROR("Discarding overwritten w/a for reg %04x (mask: %08x, value: %08x)\n",
-					  addr, r->mask, r->value);
+					  i915_mmio_reg_offset(wa_->reg),
+					  wa_->mask, wa_->val);
 
-				r->value &= ~mask;
+				wa_->val &= ~wa->mask;
 			}
 
-			r->value |= val;
-			r->mask  |= mask;
+			wal->wa_count++;
+			wa_->val |= wa->val;
+			wa_->mask |= wa->mask;
 			return;
 		}
 	}
 
-	if (WARN_ON_ONCE(wa->count >= I915_MAX_WA_REGS)) {
-		DRM_ERROR("Dropping w/a for reg %04x (mask: %08x, value: %08x)\n",
-			  addr, mask, val);
-		return;
-	}
+	wal->wa_count++;
+	wa_ = &wal->list[wal->count++];
+	*wa_ = *wa;
 
-	r = &wa->reg[wa->count++];
-	r->addr  = addr;
-	r->value = val;
-	r->mask  = mask;
-
-	while (r-- > wa->reg) {
-		GEM_BUG_ON(r[0].addr == r[1].addr);
-		if (r[1].addr > r[0].addr)
+	while (wa_-- > wal->list) {
+		GEM_BUG_ON(i915_mmio_reg_offset(wa_[0].reg) ==
+			   i915_mmio_reg_offset(wa_[1].reg));
+		if (i915_mmio_reg_offset(wa_[1].reg) >
+		    i915_mmio_reg_offset(wa_[0].reg))
 			break;
 
-		swap(r[1], r[0]);
+		swap(wa_[1], wa_[0]);
 	}
 }
 
-#define WA_REG(addr, mask, val) wa_add(dev_priv, (addr), (mask), (val))
+static void
+__wa_add(struct i915_wa_list *wal, i915_reg_t reg, u32 mask, u32 val)
+{
+	struct i915_wa wa = {
+		.reg = reg,
+		.mask = mask,
+		.val = val
+	};
+
+	_wa_add(wal, &wa);
+}
+
+#define WA_REG(addr, mask, val) __wa_add(wal, (addr), (mask), (val))
 
 #define WA_SET_BIT_MASKED(addr, mask) \
 	WA_REG(addr, (mask), _MASKED_BIT_ENABLE(mask))
@@ -110,8 +164,10 @@ static void wa_add(struct drm_i915_private *i915,
 #define WA_SET_FIELD_MASKED(addr, mask, value) \
 	WA_REG(addr, (mask), _MASKED_FIELD(mask, value))
 
-static int gen8_ctx_workarounds_init(struct drm_i915_private *dev_priv)
+static void gen8_ctx_workarounds_init(struct intel_engine_cs *engine)
 {
+	struct i915_wa_list *wal = &engine->ctx_wa_list;
+
 	WA_SET_BIT_MASKED(INSTPM, INSTPM_FORCE_ORDERING);
 
 	/* WaDisableAsyncFlipPerfMode:bdw,chv */
@@ -155,17 +211,14 @@ static int gen8_ctx_workarounds_init(struct drm_i915_private *dev_priv)
 	WA_SET_FIELD_MASKED(GEN7_GT_MODE,
 			    GEN6_WIZ_HASHING_MASK,
 			    GEN6_WIZ_HASHING_16x4);
-
-	return 0;
 }
 
-static int bdw_ctx_workarounds_init(struct drm_i915_private *dev_priv)
+static void bdw_ctx_workarounds_init(struct intel_engine_cs *engine)
 {
-	int ret;
+	struct drm_i915_private *i915 = engine->i915;
+	struct i915_wa_list *wal = &engine->ctx_wa_list;
 
-	ret = gen8_ctx_workarounds_init(dev_priv);
-	if (ret)
-		return ret;
+	gen8_ctx_workarounds_init(engine);
 
 	/* WaDisableThreadStallDopClockGating:bdw (pre-production) */
 	WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE);
@@ -185,31 +238,28 @@ static int bdw_ctx_workarounds_init(struct drm_i915_private *dev_priv)
 			  /* WaForceContextSaveRestoreNonCoherent:bdw */
 			  HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
 			  /* WaDisableFenceDestinationToSLM:bdw (pre-prod) */
-			  (IS_BDW_GT3(dev_priv) ? HDC_FENCE_DEST_SLM_DISABLE : 0));
-
-	return 0;
+			  (IS_BDW_GT3(i915) ? HDC_FENCE_DEST_SLM_DISABLE : 0));
 }
 
-static int chv_ctx_workarounds_init(struct drm_i915_private *dev_priv)
+static void chv_ctx_workarounds_init(struct intel_engine_cs *engine)
 {
-	int ret;
+	struct i915_wa_list *wal = &engine->ctx_wa_list;
 
-	ret = gen8_ctx_workarounds_init(dev_priv);
-	if (ret)
-		return ret;
+	gen8_ctx_workarounds_init(engine);
 
 	/* WaDisableThreadStallDopClockGating:chv */
 	WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE);
 
 	/* Improve HiZ throughput on CHV. */
 	WA_SET_BIT_MASKED(HIZ_CHICKEN, CHV_HZ_8X8_MODE_IN_1X);
-
-	return 0;
 }
 
-static int gen9_ctx_workarounds_init(struct drm_i915_private *dev_priv)
+static void gen9_ctx_workarounds_init(struct intel_engine_cs *engine)
 {
-	if (HAS_LLC(dev_priv)) {
+	struct drm_i915_private *i915 = engine->i915;
+	struct i915_wa_list *wal = &engine->ctx_wa_list;
+
+	if (HAS_LLC(i915)) {
 		/* WaCompressedResourceSamplerPbeMediaNewHashMode:skl,kbl
 		 *
 		 * Must match Display Engine. See
@@ -228,7 +278,7 @@ static int gen9_ctx_workarounds_init(struct drm_i915_private *dev_priv)
 			  PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
 
 	/* Syncing dependencies between camera and graphics:skl,bxt,kbl */
-	if (!IS_COFFEELAKE(dev_priv))
+	if (!IS_COFFEELAKE(i915))
 		WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
 				  GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC);
 
@@ -271,9 +321,7 @@ static int gen9_ctx_workarounds_init(struct drm_i915_private *dev_priv)
 			  HDC_FORCE_NON_COHERENT);
 
 	/* WaDisableSamplerPowerBypassForSOPingPong:skl,bxt,kbl,cfl */
-	if (IS_SKYLAKE(dev_priv) ||
-	    IS_KABYLAKE(dev_priv) ||
-	    IS_COFFEELAKE(dev_priv))
+	if (IS_SKYLAKE(i915) || IS_KABYLAKE(i915) || IS_COFFEELAKE(i915))
 		WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
 				  GEN8_SAMPLER_POWER_BYPASS_DIS);
 
@@ -300,14 +348,14 @@ static int gen9_ctx_workarounds_init(struct drm_i915_private *dev_priv)
 			    GEN9_PREEMPT_GPGPU_COMMAND_LEVEL);
 
 	/* WaClearHIZ_WM_CHICKEN3:bxt,glk */
-	if (IS_GEN9_LP(dev_priv))
+	if (IS_GEN9_LP(i915))
 		WA_SET_BIT_MASKED(GEN9_WM_CHICKEN3, GEN9_FACTOR_IN_CLR_VAL_HIZ);
-
-	return 0;
 }
 
-static int skl_tune_iz_hashing(struct drm_i915_private *dev_priv)
+static void skl_tune_iz_hashing(struct intel_engine_cs *engine)
 {
+	struct drm_i915_private *i915 = engine->i915;
+	struct i915_wa_list *wal = &engine->ctx_wa_list;
 	u8 vals[3] = { 0, 0, 0 };
 	unsigned int i;
 
@@ -318,7 +366,7 @@ static int skl_tune_iz_hashing(struct drm_i915_private *dev_priv)
 		 * Only consider slices where one, and only one, subslice has 7
 		 * EUs
 		 */
-		if (!is_power_of_2(INTEL_INFO(dev_priv)->sseu.subslice_7eu[i]))
+		if (!is_power_of_2(INTEL_INFO(i915)->sseu.subslice_7eu[i]))
 			continue;
 
 		/*
@@ -327,12 +375,12 @@ static int skl_tune_iz_hashing(struct drm_i915_private *dev_priv)
 		 *
 		 * ->    0 <= ss <= 3;
 		 */
-		ss = ffs(INTEL_INFO(dev_priv)->sseu.subslice_7eu[i]) - 1;
+		ss = ffs(INTEL_INFO(i915)->sseu.subslice_7eu[i]) - 1;
 		vals[i] = 3 - ss;
 	}
 
 	if (vals[0] == 0 && vals[1] == 0 && vals[2] == 0)
-		return 0;
+		return;
 
 	/* Tune IZ hashing. See intel_device_info_runtime_init() */
 	WA_SET_FIELD_MASKED(GEN7_GT_MODE,
@@ -342,28 +390,19 @@ static int skl_tune_iz_hashing(struct drm_i915_private *dev_priv)
 			    GEN9_IZ_HASHING(2, vals[2]) |
 			    GEN9_IZ_HASHING(1, vals[1]) |
 			    GEN9_IZ_HASHING(0, vals[0]));
-
-	return 0;
 }
 
-static int skl_ctx_workarounds_init(struct drm_i915_private *dev_priv)
+static void skl_ctx_workarounds_init(struct intel_engine_cs *engine)
 {
-	int ret;
-
-	ret = gen9_ctx_workarounds_init(dev_priv);
-	if (ret)
-		return ret;
-
-	return skl_tune_iz_hashing(dev_priv);
+	gen9_ctx_workarounds_init(engine);
+	skl_tune_iz_hashing(engine);
 }
 
-static int bxt_ctx_workarounds_init(struct drm_i915_private *dev_priv)
+static void bxt_ctx_workarounds_init(struct intel_engine_cs *engine)
 {
-	int ret;
+	struct i915_wa_list *wal = &engine->ctx_wa_list;
 
-	ret = gen9_ctx_workarounds_init(dev_priv);
-	if (ret)
-		return ret;
+	gen9_ctx_workarounds_init(engine);
 
 	/* WaDisableThreadStallDopClockGating:bxt */
 	WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
@@ -372,57 +411,41 @@ static int bxt_ctx_workarounds_init(struct drm_i915_private *dev_priv)
 	/* WaToEnableHwFixForPushConstHWBug:bxt */
 	WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
 			  GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
-
-	return 0;
 }
 
-static int kbl_ctx_workarounds_init(struct drm_i915_private *dev_priv)
+static void kbl_ctx_workarounds_init(struct intel_engine_cs *engine)
 {
-	int ret;
+	struct drm_i915_private *i915 = engine->i915;
+	struct i915_wa_list *wal = &engine->ctx_wa_list;
 
-	ret = gen9_ctx_workarounds_init(dev_priv);
-	if (ret)
-		return ret;
-
-	/* WaDisableFenceDestinationToSLM:kbl (pre-prod) */
-	if (IS_KBL_REVID(dev_priv, KBL_REVID_A0, KBL_REVID_A0))
-		WA_SET_BIT_MASKED(HDC_CHICKEN0,
-				  HDC_FENCE_DEST_SLM_DISABLE);
+	gen9_ctx_workarounds_init(engine);
 
 	/* WaToEnableHwFixForPushConstHWBug:kbl */
-	if (IS_KBL_REVID(dev_priv, KBL_REVID_C0, REVID_FOREVER))
+	if (IS_KBL_REVID(i915, KBL_REVID_C0, REVID_FOREVER))
 		WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
 				  GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
 
 	/* WaDisableSbeCacheDispatchPortSharing:kbl */
 	WA_SET_BIT_MASKED(GEN7_HALF_SLICE_CHICKEN1,
 			  GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
-
-	return 0;
 }
 
-static int glk_ctx_workarounds_init(struct drm_i915_private *dev_priv)
+static void glk_ctx_workarounds_init(struct intel_engine_cs *engine)
 {
-	int ret;
+	struct i915_wa_list *wal = &engine->ctx_wa_list;
 
-	ret = gen9_ctx_workarounds_init(dev_priv);
-	if (ret)
-		return ret;
+	gen9_ctx_workarounds_init(engine);
 
 	/* WaToEnableHwFixForPushConstHWBug:glk */
 	WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
 			  GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
-
-	return 0;
 }
 
-static int cfl_ctx_workarounds_init(struct drm_i915_private *dev_priv)
+static void cfl_ctx_workarounds_init(struct intel_engine_cs *engine)
 {
-	int ret;
+	struct i915_wa_list *wal = &engine->ctx_wa_list;
 
-	ret = gen9_ctx_workarounds_init(dev_priv);
-	if (ret)
-		return ret;
+	gen9_ctx_workarounds_init(engine);
 
 	/* WaToEnableHwFixForPushConstHWBug:cfl */
 	WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
@@ -431,18 +454,19 @@ static int cfl_ctx_workarounds_init(struct drm_i915_private *dev_priv)
 	/* WaDisableSbeCacheDispatchPortSharing:cfl */
 	WA_SET_BIT_MASKED(GEN7_HALF_SLICE_CHICKEN1,
 			  GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
-
-	return 0;
 }
 
-static int cnl_ctx_workarounds_init(struct drm_i915_private *dev_priv)
+static void cnl_ctx_workarounds_init(struct intel_engine_cs *engine)
 {
+	struct drm_i915_private *i915 = engine->i915;
+	struct i915_wa_list *wal = &engine->ctx_wa_list;
+
 	/* WaForceContextSaveRestoreNonCoherent:cnl */
 	WA_SET_BIT_MASKED(CNL_HDC_CHICKEN0,
 			  HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT);
 
 	/* WaThrottleEUPerfToAvoidTDBackPressure:cnl(pre-prod) */
-	if (IS_CNL_REVID(dev_priv, CNL_REVID_B0, CNL_REVID_B0))
+	if (IS_CNL_REVID(i915, CNL_REVID_B0, CNL_REVID_B0))
 		WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, THROTTLE_12_5);
 
 	/* WaDisableReplayBufferBankArbitrationOptimization:cnl */
@@ -450,7 +474,7 @@ static int cnl_ctx_workarounds_init(struct drm_i915_private *dev_priv)
 			  GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
 
 	/* WaDisableEnhancedSBEVertexCaching:cnl (pre-prod) */
-	if (IS_CNL_REVID(dev_priv, 0, CNL_REVID_B0))
+	if (IS_CNL_REVID(i915, 0, CNL_REVID_B0))
 		WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
 				  GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE);
 
@@ -470,16 +494,17 @@ static int cnl_ctx_workarounds_init(struct drm_i915_private *dev_priv)
 
 	/* WaDisableEarlyEOT:cnl */
 	WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, DISABLE_EARLY_EOT);
-
-	return 0;
 }
 
-static int icl_ctx_workarounds_init(struct drm_i915_private *dev_priv)
+static void icl_ctx_workarounds_init(struct intel_engine_cs *engine)
 {
+	struct drm_i915_private *i915 = engine->i915;
+	struct i915_wa_list *wal = &engine->ctx_wa_list;
+
 	/* Wa_1604370585:icl (pre-prod)
 	 * Formerly known as WaPushConstantDereferenceHoldDisable
 	 */
-	if (IS_ICL_REVID(dev_priv, ICL_REVID_A0, ICL_REVID_B0))
+	if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_B0))
 		WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2,
 				  PUSH_CONSTANT_DEREF_DISABLE);
 
@@ -495,7 +520,7 @@ static int icl_ctx_workarounds_init(struct drm_i915_private *dev_priv)
 	/* Wa_2006611047:icl (pre-prod)
 	 * Formerly known as WaDisableImprovedTdlClkGating
 	 */
-	if (IS_ICL_REVID(dev_priv, ICL_REVID_A0, ICL_REVID_A0))
+	if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_A0))
 		WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2,
 				  GEN11_TDL_CLOCK_GATING_FIX_DISABLE);
 
@@ -504,70 +529,67 @@ static int icl_ctx_workarounds_init(struct drm_i915_private *dev_priv)
 			  GEN11_STATE_CACHE_REDIRECT_TO_CS);
 
 	/* Wa_2006665173:icl (pre-prod) */
-	if (IS_ICL_REVID(dev_priv, ICL_REVID_A0, ICL_REVID_A0))
+	if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_A0))
 		WA_SET_BIT_MASKED(GEN11_COMMON_SLICE_CHICKEN3,
 				  GEN11_BLEND_EMB_FIX_DISABLE_IN_RCC);
-
-	return 0;
 }
 
-int intel_ctx_workarounds_init(struct drm_i915_private *dev_priv)
+void intel_engine_init_ctx_wa(struct intel_engine_cs *engine)
 {
-	int err = 0;
+	struct drm_i915_private *i915 = engine->i915;
+	struct i915_wa_list *wal = &engine->ctx_wa_list;
 
-	dev_priv->workarounds.count = 0;
+	wa_init_start(wal, "context");
 
-	if (INTEL_GEN(dev_priv) < 8)
-		err = 0;
-	else if (IS_BROADWELL(dev_priv))
-		err = bdw_ctx_workarounds_init(dev_priv);
-	else if (IS_CHERRYVIEW(dev_priv))
-		err = chv_ctx_workarounds_init(dev_priv);
-	else if (IS_SKYLAKE(dev_priv))
-		err = skl_ctx_workarounds_init(dev_priv);
-	else if (IS_BROXTON(dev_priv))
-		err = bxt_ctx_workarounds_init(dev_priv);
-	else if (IS_KABYLAKE(dev_priv))
-		err = kbl_ctx_workarounds_init(dev_priv);
-	else if (IS_GEMINILAKE(dev_priv))
-		err = glk_ctx_workarounds_init(dev_priv);
-	else if (IS_COFFEELAKE(dev_priv))
-		err = cfl_ctx_workarounds_init(dev_priv);
-	else if (IS_CANNONLAKE(dev_priv))
-		err = cnl_ctx_workarounds_init(dev_priv);
-	else if (IS_ICELAKE(dev_priv))
-		err = icl_ctx_workarounds_init(dev_priv);
+	if (INTEL_GEN(i915) < 8)
+		return;
+	else if (IS_BROADWELL(i915))
+		bdw_ctx_workarounds_init(engine);
+	else if (IS_CHERRYVIEW(i915))
+		chv_ctx_workarounds_init(engine);
+	else if (IS_SKYLAKE(i915))
+		skl_ctx_workarounds_init(engine);
+	else if (IS_BROXTON(i915))
+		bxt_ctx_workarounds_init(engine);
+	else if (IS_KABYLAKE(i915))
+		kbl_ctx_workarounds_init(engine);
+	else if (IS_GEMINILAKE(i915))
+		glk_ctx_workarounds_init(engine);
+	else if (IS_COFFEELAKE(i915))
+		cfl_ctx_workarounds_init(engine);
+	else if (IS_CANNONLAKE(i915))
+		cnl_ctx_workarounds_init(engine);
+	else if (IS_ICELAKE(i915))
+		icl_ctx_workarounds_init(engine);
 	else
-		MISSING_CASE(INTEL_GEN(dev_priv));
-	if (err)
-		return err;
+		MISSING_CASE(INTEL_GEN(i915));
 
-	DRM_DEBUG_DRIVER("Number of context specific w/a: %d\n",
-			 dev_priv->workarounds.count);
-	return 0;
+	wa_init_finish(wal);
 }
 
-int intel_ctx_workarounds_emit(struct i915_request *rq)
+int intel_engine_emit_ctx_wa(struct i915_request *rq)
 {
-	struct i915_workarounds *w = &rq->i915->workarounds;
+	struct i915_wa_list *wal = &rq->engine->ctx_wa_list;
+	struct i915_wa *wa;
+	unsigned int i;
 	u32 *cs;
-	int ret, i;
+	int ret;
 
-	if (w->count == 0)
+	if (wal->count == 0)
 		return 0;
 
 	ret = rq->engine->emit_flush(rq, EMIT_BARRIER);
 	if (ret)
 		return ret;
 
-	cs = intel_ring_begin(rq, (w->count * 2 + 2));
+	cs = intel_ring_begin(rq, (wal->count * 2 + 2));
 	if (IS_ERR(cs))
 		return PTR_ERR(cs);
 
-	*cs++ = MI_LOAD_REGISTER_IMM(w->count);
-	for (i = 0; i < w->count; i++) {
-		*cs++ = w->reg[i].addr;
-		*cs++ = w->reg[i].value;
+	*cs++ = MI_LOAD_REGISTER_IMM(wal->count);
+	for (i = 0, wa = wal->list; i < wal->count; i++, wa++) {
+		*cs++ = i915_mmio_reg_offset(wa->reg);
+		*cs++ = wa->val;
 	}
 	*cs++ = MI_NOOP;
 
@@ -580,160 +602,149 @@ int intel_ctx_workarounds_emit(struct i915_request *rq)
 	return 0;
 }
 
-static void bdw_gt_workarounds_apply(struct drm_i915_private *dev_priv)
+static void
+wa_masked_en(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
 {
+	struct i915_wa wa = {
+		.reg = reg,
+		.mask = val,
+		.val = _MASKED_BIT_ENABLE(val)
+	};
+
+	_wa_add(wal, &wa);
 }
 
-static void chv_gt_workarounds_apply(struct drm_i915_private *dev_priv)
+static void
+wa_write_masked_or(struct i915_wa_list *wal, i915_reg_t reg, u32 mask,
+		   u32 val)
 {
+	struct i915_wa wa = {
+		.reg = reg,
+		.mask = mask,
+		.val = val
+	};
+
+	_wa_add(wal, &wa);
 }
 
-static void gen9_gt_workarounds_apply(struct drm_i915_private *dev_priv)
+static void
+wa_write(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
 {
-	/* WaContextSwitchWithConcurrentTLBInvalidate:skl,bxt,kbl,glk,cfl */
-	I915_WRITE(GEN9_CSFE_CHICKEN1_RCS,
-		   _MASKED_BIT_ENABLE(GEN9_PREEMPT_GPGPU_SYNC_SWITCH_DISABLE));
+	wa_write_masked_or(wal, reg, ~0, val);
+}
 
-	/* WaEnableLbsSlaRetryTimerDecrement:skl,bxt,kbl,glk,cfl */
-	I915_WRITE(BDW_SCRATCH1, I915_READ(BDW_SCRATCH1) |
-		   GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE);
+static void
+wa_write_or(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
+{
+	wa_write_masked_or(wal, reg, val, val);
+}
+
+static void gen9_gt_workarounds_init(struct drm_i915_private *i915)
+{
+	struct i915_wa_list *wal = &i915->gt_wa_list;
 
 	/* WaDisableKillLogic:bxt,skl,kbl */
-	if (!IS_COFFEELAKE(dev_priv))
-		I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
-			   ECOCHK_DIS_TLB);
+	if (!IS_COFFEELAKE(i915))
+		wa_write_or(wal,
+			    GAM_ECOCHK,
+			    ECOCHK_DIS_TLB);
 
-	if (HAS_LLC(dev_priv)) {
+	if (HAS_LLC(i915)) {
 		/* WaCompressedResourceSamplerPbeMediaNewHashMode:skl,kbl
 		 *
 		 * Must match Display Engine. See
 		 * WaCompressedResourceDisplayNewHashMode.
 		 */
-		I915_WRITE(MMCD_MISC_CTRL,
-			   I915_READ(MMCD_MISC_CTRL) |
-			   MMCD_PCLA |
-			   MMCD_HOTSPOT_EN);
+		wa_write_or(wal,
+			    MMCD_MISC_CTRL,
+			    MMCD_PCLA | MMCD_HOTSPOT_EN);
 	}
 
 	/* WaDisableHDCInvalidation:skl,bxt,kbl,cfl */
-	I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
-		   BDW_DISABLE_HDC_INVALIDATION);
-
-	/* WaProgramL3SqcReg1DefaultForPerf:bxt,glk */
-	if (IS_GEN9_LP(dev_priv)) {
-		u32 val = I915_READ(GEN8_L3SQCREG1);
-
-		val &= ~L3_PRIO_CREDITS_MASK;
-		val |= L3_GENERAL_PRIO_CREDITS(62) | L3_HIGH_PRIO_CREDITS(2);
-		I915_WRITE(GEN8_L3SQCREG1, val);
-	}
-
-	/* WaOCLCoherentLineFlush:skl,bxt,kbl,cfl */
-	I915_WRITE(GEN8_L3SQCREG4,
-		   I915_READ(GEN8_L3SQCREG4) | GEN8_LQSC_FLUSH_COHERENT_LINES);
-
-	/* WaEnablePreemptionGranularityControlByUMD:skl,bxt,kbl,cfl,[cnl] */
-	I915_WRITE(GEN7_FF_SLICE_CS_CHICKEN1,
-		   _MASKED_BIT_ENABLE(GEN9_FFSC_PERCTX_PREEMPT_CTRL));
+	wa_write_or(wal,
+		    GAM_ECOCHK,
+		    BDW_DISABLE_HDC_INVALIDATION);
 }
 
-static void skl_gt_workarounds_apply(struct drm_i915_private *dev_priv)
+static void skl_gt_workarounds_init(struct drm_i915_private *i915)
 {
-	gen9_gt_workarounds_apply(dev_priv);
+	struct i915_wa_list *wal = &i915->gt_wa_list;
 
-	/* WaEnableGapsTsvCreditFix:skl */
-	I915_WRITE(GEN8_GARBCNTL,
-		   I915_READ(GEN8_GARBCNTL) | GEN9_GAPS_TSV_CREDIT_DISABLE);
+	gen9_gt_workarounds_init(i915);
 
 	/* WaDisableGafsUnitClkGating:skl */
-	I915_WRITE(GEN7_UCGCTL4,
-		   I915_READ(GEN7_UCGCTL4) | GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
+	wa_write_or(wal,
+		    GEN7_UCGCTL4,
+		    GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
 
 	/* WaInPlaceDecompressionHang:skl */
-	if (IS_SKL_REVID(dev_priv, SKL_REVID_H0, REVID_FOREVER))
-		I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA,
-			   I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
-			   GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
+	if (IS_SKL_REVID(i915, SKL_REVID_H0, REVID_FOREVER))
+		wa_write_or(wal,
+			    GEN9_GAMT_ECO_REG_RW_IA,
+			    GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
 }
 
-static void bxt_gt_workarounds_apply(struct drm_i915_private *dev_priv)
+static void bxt_gt_workarounds_init(struct drm_i915_private *i915)
 {
-	gen9_gt_workarounds_apply(dev_priv);
+	struct i915_wa_list *wal = &i915->gt_wa_list;
 
-	/* WaDisablePooledEuLoadBalancingFix:bxt */
-	I915_WRITE(FF_SLICE_CS_CHICKEN2,
-		   _MASKED_BIT_ENABLE(GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE));
+	gen9_gt_workarounds_init(i915);
 
 	/* WaInPlaceDecompressionHang:bxt */
-	I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA,
-		   I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
-		   GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
+	wa_write_or(wal,
+		    GEN9_GAMT_ECO_REG_RW_IA,
+		    GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
 }
 
-static void kbl_gt_workarounds_apply(struct drm_i915_private *dev_priv)
+static void kbl_gt_workarounds_init(struct drm_i915_private *i915)
 {
-	gen9_gt_workarounds_apply(dev_priv);
+	struct i915_wa_list *wal = &i915->gt_wa_list;
 
-	/* WaEnableGapsTsvCreditFix:kbl */
-	I915_WRITE(GEN8_GARBCNTL,
-		   I915_READ(GEN8_GARBCNTL) | GEN9_GAPS_TSV_CREDIT_DISABLE);
+	gen9_gt_workarounds_init(i915);
 
 	/* WaDisableDynamicCreditSharing:kbl */
-	if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0))
-		I915_WRITE(GAMT_CHKN_BIT_REG,
-			   I915_READ(GAMT_CHKN_BIT_REG) |
-			   GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING);
+	if (IS_KBL_REVID(i915, 0, KBL_REVID_B0))
+		wa_write_or(wal,
+			    GAMT_CHKN_BIT_REG,
+			    GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING);
 
 	/* WaDisableGafsUnitClkGating:kbl */
-	I915_WRITE(GEN7_UCGCTL4,
-		   I915_READ(GEN7_UCGCTL4) | GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
+	wa_write_or(wal,
+		    GEN7_UCGCTL4,
+		    GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
 
 	/* WaInPlaceDecompressionHang:kbl */
-	I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA,
-		   I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
-		   GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
-
-	/* WaKBLVECSSemaphoreWaitPoll:kbl */
-	if (IS_KBL_REVID(dev_priv, KBL_REVID_A0, KBL_REVID_E0)) {
-		struct intel_engine_cs *engine;
-		unsigned int tmp;
-
-		for_each_engine(engine, dev_priv, tmp) {
-			if (engine->id == RCS)
-				continue;
-
-			I915_WRITE(RING_SEMA_WAIT_POLL(engine->mmio_base), 1);
-		}
-	}
+	wa_write_or(wal,
+		    GEN9_GAMT_ECO_REG_RW_IA,
+		    GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
 }
 
-static void glk_gt_workarounds_apply(struct drm_i915_private *dev_priv)
+static void glk_gt_workarounds_init(struct drm_i915_private *i915)
 {
-	gen9_gt_workarounds_apply(dev_priv);
+	gen9_gt_workarounds_init(i915);
 }
 
-static void cfl_gt_workarounds_apply(struct drm_i915_private *dev_priv)
+static void cfl_gt_workarounds_init(struct drm_i915_private *i915)
 {
-	gen9_gt_workarounds_apply(dev_priv);
+	struct i915_wa_list *wal = &i915->gt_wa_list;
 
-	/* WaEnableGapsTsvCreditFix:cfl */
-	I915_WRITE(GEN8_GARBCNTL,
-		   I915_READ(GEN8_GARBCNTL) | GEN9_GAPS_TSV_CREDIT_DISABLE);
+	gen9_gt_workarounds_init(i915);
 
 	/* WaDisableGafsUnitClkGating:cfl */
-	I915_WRITE(GEN7_UCGCTL4,
-		   I915_READ(GEN7_UCGCTL4) | GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
+	wa_write_or(wal,
+		    GEN7_UCGCTL4,
+		    GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
 
 	/* WaInPlaceDecompressionHang:cfl */
-	I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA,
-		   I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
-		   GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
+	wa_write_or(wal,
+		    GEN9_GAMT_ECO_REG_RW_IA,
+		    GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
 }
 
 static void wa_init_mcr(struct drm_i915_private *dev_priv)
 {
 	const struct sseu_dev_info *sseu = &(INTEL_INFO(dev_priv)->sseu);
-	u32 mcr;
+	struct i915_wa_list *wal = &dev_priv->gt_wa_list;
 	u32 mcr_slice_subslice_mask;
 
 	/*
@@ -770,8 +781,6 @@ static void wa_init_mcr(struct drm_i915_private *dev_priv)
 		WARN_ON((enabled_mask & disabled_mask) != enabled_mask);
 	}
 
-	mcr = I915_READ(GEN8_MCR_SELECTOR);
-
 	if (INTEL_GEN(dev_priv) >= 11)
 		mcr_slice_subslice_mask = GEN11_MCR_SLICE_MASK |
 					  GEN11_MCR_SUBSLICE_MASK;
@@ -789,186 +798,220 @@ static void wa_init_mcr(struct drm_i915_private *dev_priv)
 	 * occasions, such as INSTDONE, where this value is dependent
 	 * on s/ss combo, the read should be done with read_subslice_reg.
 	 */
-	mcr &= ~mcr_slice_subslice_mask;
-	mcr |= intel_calculate_mcr_s_ss_select(dev_priv);
-	I915_WRITE(GEN8_MCR_SELECTOR, mcr);
+	wa_write_masked_or(wal,
+			   GEN8_MCR_SELECTOR,
+			   mcr_slice_subslice_mask,
+			   intel_calculate_mcr_s_ss_select(dev_priv));
 }
 
-static void cnl_gt_workarounds_apply(struct drm_i915_private *dev_priv)
+static void cnl_gt_workarounds_init(struct drm_i915_private *i915)
 {
-	wa_init_mcr(dev_priv);
+	struct i915_wa_list *wal = &i915->gt_wa_list;
+
+	wa_init_mcr(i915);
 
 	/* WaDisableI2mCycleOnWRPort:cnl (pre-prod) */
-	if (IS_CNL_REVID(dev_priv, CNL_REVID_B0, CNL_REVID_B0))
-		I915_WRITE(GAMT_CHKN_BIT_REG,
-			   I915_READ(GAMT_CHKN_BIT_REG) |
-			   GAMT_CHKN_DISABLE_I2M_CYCLE_ON_WR_PORT);
+	if (IS_CNL_REVID(i915, CNL_REVID_B0, CNL_REVID_B0))
+		wa_write_or(wal,
+			    GAMT_CHKN_BIT_REG,
+			    GAMT_CHKN_DISABLE_I2M_CYCLE_ON_WR_PORT);
 
 	/* WaInPlaceDecompressionHang:cnl */
-	I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA,
-		   I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
-		   GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
-
-	/* WaEnablePreemptionGranularityControlByUMD:cnl */
-	I915_WRITE(GEN7_FF_SLICE_CS_CHICKEN1,
-		   _MASKED_BIT_ENABLE(GEN9_FFSC_PERCTX_PREEMPT_CTRL));
+	wa_write_or(wal,
+		    GEN9_GAMT_ECO_REG_RW_IA,
+		    GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
 }
 
-static void icl_gt_workarounds_apply(struct drm_i915_private *dev_priv)
+static void icl_gt_workarounds_init(struct drm_i915_private *i915)
 {
-	wa_init_mcr(dev_priv);
+	struct i915_wa_list *wal = &i915->gt_wa_list;
 
-	/* This is not an Wa. Enable for better image quality */
-	I915_WRITE(_3D_CHICKEN3,
-		   _MASKED_BIT_ENABLE(_3D_CHICKEN3_AA_LINE_QUALITY_FIX_ENABLE));
+	wa_init_mcr(i915);
 
 	/* WaInPlaceDecompressionHang:icl */
-	I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA,
-		   I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
-		   GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
-
-	/* WaPipelineFlushCoherentLines:icl */
-	I915_WRITE(GEN8_L3SQCREG4,
-		   I915_READ(GEN8_L3SQCREG4) |
-		   GEN8_LQSC_FLUSH_COHERENT_LINES);
-
-	/* Wa_1405543622:icl
-	 * Formerly known as WaGAPZPriorityScheme
-	 */
-	I915_WRITE(GEN8_GARBCNTL,
-		   I915_READ(GEN8_GARBCNTL) |
-		   GEN11_ARBITRATION_PRIO_ORDER_MASK);
-
-	/* Wa_1604223664:icl
-	 * Formerly known as WaL3BankAddressHashing
-	 */
-	I915_WRITE(GEN8_GARBCNTL,
-		   (I915_READ(GEN8_GARBCNTL) & ~GEN11_HASH_CTRL_EXCL_MASK) |
-		   GEN11_HASH_CTRL_EXCL_BIT0);
-	I915_WRITE(GEN11_GLBLINVL,
-		   (I915_READ(GEN11_GLBLINVL) & ~GEN11_BANK_HASH_ADDR_EXCL_MASK) |
-		   GEN11_BANK_HASH_ADDR_EXCL_BIT0);
+	wa_write_or(wal,
+		    GEN9_GAMT_ECO_REG_RW_IA,
+		    GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
 
 	/* WaModifyGamTlbPartitioning:icl */
-	I915_WRITE(GEN11_GACB_PERF_CTRL,
-		   (I915_READ(GEN11_GACB_PERF_CTRL) & ~GEN11_HASH_CTRL_MASK) |
-		   GEN11_HASH_CTRL_BIT0 | GEN11_HASH_CTRL_BIT4);
-
-	/* Wa_1405733216:icl
-	 * Formerly known as WaDisableCleanEvicts
-	 */
-	I915_WRITE(GEN8_L3SQCREG4,
-		   I915_READ(GEN8_L3SQCREG4) |
-		   GEN11_LQSC_CLEAN_EVICT_DISABLE);
+	wa_write_masked_or(wal,
+			   GEN11_GACB_PERF_CTRL,
+			   GEN11_HASH_CTRL_MASK,
+			   GEN11_HASH_CTRL_BIT0 | GEN11_HASH_CTRL_BIT4);
 
 	/* Wa_1405766107:icl
 	 * Formerly known as WaCL2SFHalfMaxAlloc
 	 */
-	I915_WRITE(GEN11_LSN_UNSLCVC,
-		   I915_READ(GEN11_LSN_UNSLCVC) |
-		   GEN11_LSN_UNSLCVC_GAFS_HALF_SF_MAXALLOC |
-		   GEN11_LSN_UNSLCVC_GAFS_HALF_CL2_MAXALLOC);
+	wa_write_or(wal,
+		    GEN11_LSN_UNSLCVC,
+		    GEN11_LSN_UNSLCVC_GAFS_HALF_SF_MAXALLOC |
+		    GEN11_LSN_UNSLCVC_GAFS_HALF_CL2_MAXALLOC);
 
 	/* Wa_220166154:icl
 	 * Formerly known as WaDisCtxReload
 	 */
-	I915_WRITE(GEN8_GAMW_ECO_DEV_RW_IA,
-		   I915_READ(GEN8_GAMW_ECO_DEV_RW_IA) |
-		   GAMW_ECO_DEV_CTX_RELOAD_DISABLE);
+	wa_write_or(wal,
+		    GEN8_GAMW_ECO_DEV_RW_IA,
+		    GAMW_ECO_DEV_CTX_RELOAD_DISABLE);
 
 	/* Wa_1405779004:icl (pre-prod) */
-	if (IS_ICL_REVID(dev_priv, ICL_REVID_A0, ICL_REVID_A0))
-		I915_WRITE(SLICE_UNIT_LEVEL_CLKGATE,
-			   I915_READ(SLICE_UNIT_LEVEL_CLKGATE) |
-			   MSCUNIT_CLKGATE_DIS);
+	if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_A0))
+		wa_write_or(wal,
+			    SLICE_UNIT_LEVEL_CLKGATE,
+			    MSCUNIT_CLKGATE_DIS);
 
 	/* Wa_1406680159:icl */
-	I915_WRITE(SUBSLICE_UNIT_LEVEL_CLKGATE,
-		   I915_READ(SUBSLICE_UNIT_LEVEL_CLKGATE) |
-		   GWUNIT_CLKGATE_DIS);
-
-	/* Wa_1604302699:icl */
-	I915_WRITE(GEN10_L3_CHICKEN_MODE_REGISTER,
-		   I915_READ(GEN10_L3_CHICKEN_MODE_REGISTER) |
-		   GEN11_I2M_WRITE_DISABLE);
+	wa_write_or(wal,
+		    SUBSLICE_UNIT_LEVEL_CLKGATE,
+		    GWUNIT_CLKGATE_DIS);
 
 	/* Wa_1406838659:icl (pre-prod) */
-	if (IS_ICL_REVID(dev_priv, ICL_REVID_A0, ICL_REVID_B0))
-		I915_WRITE(INF_UNIT_LEVEL_CLKGATE,
-			   I915_READ(INF_UNIT_LEVEL_CLKGATE) |
-			   CGPSF_CLKGATE_DIS);
-
-	/* WaForwardProgressSoftReset:icl */
-	I915_WRITE(GEN10_SCRATCH_LNCF2,
-		   I915_READ(GEN10_SCRATCH_LNCF2) |
-		   PMFLUSHDONE_LNICRSDROP |
-		   PMFLUSH_GAPL3UNBLOCK |
-		   PMFLUSHDONE_LNEBLK);
+	if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_B0))
+		wa_write_or(wal,
+			    INF_UNIT_LEVEL_CLKGATE,
+			    CGPSF_CLKGATE_DIS);
 
 	/* Wa_1406463099:icl
 	 * Formerly known as WaGamTlbPendError
 	 */
-	I915_WRITE(GAMT_CHKN_BIT_REG,
-		   I915_READ(GAMT_CHKN_BIT_REG) |
-		   GAMT_CHKN_DISABLE_L3_COH_PIPE);
-
-	/* Wa_1406609255:icl (pre-prod) */
-	if (IS_ICL_REVID(dev_priv, ICL_REVID_A0, ICL_REVID_B0))
-		I915_WRITE(GEN7_SARCHKMD,
-			   I915_READ(GEN7_SARCHKMD) |
-			   GEN7_DISABLE_DEMAND_PREFETCH |
-			   GEN7_DISABLE_SAMPLER_PREFETCH);
+	wa_write_or(wal,
+		    GAMT_CHKN_BIT_REG,
+		    GAMT_CHKN_DISABLE_L3_COH_PIPE);
 }
 
-void intel_gt_workarounds_apply(struct drm_i915_private *dev_priv)
+void intel_gt_init_workarounds(struct drm_i915_private *i915)
 {
-	if (INTEL_GEN(dev_priv) < 8)
+	struct i915_wa_list *wal = &i915->gt_wa_list;
+
+	wa_init_start(wal, "GT");
+
+	if (INTEL_GEN(i915) < 8)
 		return;
-	else if (IS_BROADWELL(dev_priv))
-		bdw_gt_workarounds_apply(dev_priv);
-	else if (IS_CHERRYVIEW(dev_priv))
-		chv_gt_workarounds_apply(dev_priv);
-	else if (IS_SKYLAKE(dev_priv))
-		skl_gt_workarounds_apply(dev_priv);
-	else if (IS_BROXTON(dev_priv))
-		bxt_gt_workarounds_apply(dev_priv);
-	else if (IS_KABYLAKE(dev_priv))
-		kbl_gt_workarounds_apply(dev_priv);
-	else if (IS_GEMINILAKE(dev_priv))
-		glk_gt_workarounds_apply(dev_priv);
-	else if (IS_COFFEELAKE(dev_priv))
-		cfl_gt_workarounds_apply(dev_priv);
-	else if (IS_CANNONLAKE(dev_priv))
-		cnl_gt_workarounds_apply(dev_priv);
-	else if (IS_ICELAKE(dev_priv))
-		icl_gt_workarounds_apply(dev_priv);
+	else if (IS_BROADWELL(i915))
+		return;
+	else if (IS_CHERRYVIEW(i915))
+		return;
+	else if (IS_SKYLAKE(i915))
+		skl_gt_workarounds_init(i915);
+	else if (IS_BROXTON(i915))
+		bxt_gt_workarounds_init(i915);
+	else if (IS_KABYLAKE(i915))
+		kbl_gt_workarounds_init(i915);
+	else if (IS_GEMINILAKE(i915))
+		glk_gt_workarounds_init(i915);
+	else if (IS_COFFEELAKE(i915))
+		cfl_gt_workarounds_init(i915);
+	else if (IS_CANNONLAKE(i915))
+		cnl_gt_workarounds_init(i915);
+	else if (IS_ICELAKE(i915))
+		icl_gt_workarounds_init(i915);
 	else
-		MISSING_CASE(INTEL_GEN(dev_priv));
+		MISSING_CASE(INTEL_GEN(i915));
+
+	wa_init_finish(wal);
 }
 
-struct whitelist {
-	i915_reg_t reg[RING_MAX_NONPRIV_SLOTS];
-	unsigned int count;
-	u32 nopid;
-};
-
-static void whitelist_reg(struct whitelist *w, i915_reg_t reg)
+static enum forcewake_domains
+wal_get_fw_for_rmw(struct drm_i915_private *dev_priv,
+		   const struct i915_wa_list *wal)
 {
-	if (GEM_DEBUG_WARN_ON(w->count >= RING_MAX_NONPRIV_SLOTS))
+	enum forcewake_domains fw = 0;
+	struct i915_wa *wa;
+	unsigned int i;
+
+	for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
+		fw |= intel_uncore_forcewake_for_reg(dev_priv,
+						     wa->reg,
+						     FW_REG_READ |
+						     FW_REG_WRITE);
+
+	return fw;
+}
+
+static void
+wa_list_apply(struct drm_i915_private *dev_priv, const struct i915_wa_list *wal)
+{
+	enum forcewake_domains fw;
+	unsigned long flags;
+	struct i915_wa *wa;
+	unsigned int i;
+
+	if (!wal->count)
 		return;
 
-	w->reg[w->count++] = reg;
+	fw = wal_get_fw_for_rmw(dev_priv, wal);
+
+	spin_lock_irqsave(&dev_priv->uncore.lock, flags);
+	intel_uncore_forcewake_get__locked(dev_priv, fw);
+
+	for (i = 0, wa = wal->list; i < wal->count; i++, wa++) {
+		u32 val = I915_READ_FW(wa->reg);
+
+		val &= ~wa->mask;
+		val |= wa->val;
+
+		I915_WRITE_FW(wa->reg, val);
+	}
+
+	intel_uncore_forcewake_put__locked(dev_priv, fw);
+	spin_unlock_irqrestore(&dev_priv->uncore.lock, flags);
+
+	DRM_DEBUG_DRIVER("Applied %u %s workarounds\n", wal->count, wal->name);
 }
 
-static void bdw_whitelist_build(struct whitelist *w)
+void intel_gt_apply_workarounds(struct drm_i915_private *dev_priv)
 {
+	wa_list_apply(dev_priv, &dev_priv->gt_wa_list);
 }
 
-static void chv_whitelist_build(struct whitelist *w)
+static bool
+wa_verify(const struct i915_wa *wa, u32 cur, const char *name, const char *from)
 {
+	if ((cur ^ wa->val) & wa->mask) {
+		DRM_ERROR("%s workaround lost on %s! (%x=%x/%x, expected %x, mask=%x)\n",
+			  name, from, i915_mmio_reg_offset(wa->reg), cur,
+			  cur & wa->mask, wa->val, wa->mask);
+
+		return false;
+	}
+
+	return true;
 }
 
-static void gen9_whitelist_build(struct whitelist *w)
+static bool wa_list_verify(struct drm_i915_private *dev_priv,
+			   const struct i915_wa_list *wal,
+			   const char *from)
+{
+	struct i915_wa *wa;
+	unsigned int i;
+	bool ok = true;
+
+	for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
+		ok &= wa_verify(wa, I915_READ(wa->reg), wal->name, from);
+
+	return ok;
+}
+
+bool intel_gt_verify_workarounds(struct drm_i915_private *dev_priv,
+				 const char *from)
+{
+	return wa_list_verify(dev_priv, &dev_priv->gt_wa_list, from);
+}
+
+static void
+whitelist_reg(struct i915_wa_list *wal, i915_reg_t reg)
+{
+	struct i915_wa wa = {
+		.reg = reg
+	};
+
+	if (GEM_DEBUG_WARN_ON(wal->count >= RING_MAX_NONPRIV_SLOTS))
+		return;
+
+	_wa_add(wal, &wa);
+}
+
+static void gen9_whitelist_build(struct i915_wa_list *w)
 {
 	/* WaVFEStateAfterPipeControlwithMediaStateClear:skl,bxt,glk,cfl */
 	whitelist_reg(w, GEN9_CTX_PREEMPT_REG);
@@ -980,7 +1023,7 @@ static void gen9_whitelist_build(struct whitelist *w)
 	whitelist_reg(w, GEN8_HDC_CHICKEN1);
 }
 
-static void skl_whitelist_build(struct whitelist *w)
+static void skl_whitelist_build(struct i915_wa_list *w)
 {
 	gen9_whitelist_build(w);
 
@@ -988,12 +1031,12 @@ static void skl_whitelist_build(struct whitelist *w)
 	whitelist_reg(w, GEN8_L3SQCREG4);
 }
 
-static void bxt_whitelist_build(struct whitelist *w)
+static void bxt_whitelist_build(struct i915_wa_list *w)
 {
 	gen9_whitelist_build(w);
 }
 
-static void kbl_whitelist_build(struct whitelist *w)
+static void kbl_whitelist_build(struct i915_wa_list *w)
 {
 	gen9_whitelist_build(w);
 
@@ -1001,7 +1044,7 @@ static void kbl_whitelist_build(struct whitelist *w)
 	whitelist_reg(w, GEN8_L3SQCREG4);
 }
 
-static void glk_whitelist_build(struct whitelist *w)
+static void glk_whitelist_build(struct i915_wa_list *w)
 {
 	gen9_whitelist_build(w);
 
@@ -1009,18 +1052,18 @@ static void glk_whitelist_build(struct whitelist *w)
 	whitelist_reg(w, GEN9_SLICE_COMMON_ECO_CHICKEN1);
 }
 
-static void cfl_whitelist_build(struct whitelist *w)
+static void cfl_whitelist_build(struct i915_wa_list *w)
 {
 	gen9_whitelist_build(w);
 }
 
-static void cnl_whitelist_build(struct whitelist *w)
+static void cnl_whitelist_build(struct i915_wa_list *w)
 {
 	/* WaEnablePreemptionGranularityControlByUMD:cnl */
 	whitelist_reg(w, GEN8_CS_CHICKEN1);
 }
 
-static void icl_whitelist_build(struct whitelist *w)
+static void icl_whitelist_build(struct i915_wa_list *w)
 {
 	/* WaAllowUMDToModifyHalfSliceChicken7:icl */
 	whitelist_reg(w, GEN9_HALF_SLICE_CHICKEN7);
@@ -1029,22 +1072,21 @@ static void icl_whitelist_build(struct whitelist *w)
 	whitelist_reg(w, GEN10_SAMPLER_MODE);
 }
 
-static struct whitelist *whitelist_build(struct intel_engine_cs *engine,
-					 struct whitelist *w)
+void intel_engine_init_whitelist(struct intel_engine_cs *engine)
 {
 	struct drm_i915_private *i915 = engine->i915;
+	struct i915_wa_list *w = &engine->whitelist;
 
 	GEM_BUG_ON(engine->id != RCS);
 
-	w->count = 0;
-	w->nopid = i915_mmio_reg_offset(RING_NOPID(engine->mmio_base));
+	wa_init_start(w, "whitelist");
 
 	if (INTEL_GEN(i915) < 8)
-		return NULL;
+		return;
 	else if (IS_BROADWELL(i915))
-		bdw_whitelist_build(w);
+		return;
 	else if (IS_CHERRYVIEW(i915))
-		chv_whitelist_build(w);
+		return;
 	else if (IS_SKYLAKE(i915))
 		skl_whitelist_build(w);
 	else if (IS_BROXTON(i915))
@@ -1062,39 +1104,180 @@ static struct whitelist *whitelist_build(struct intel_engine_cs *engine,
 	else
 		MISSING_CASE(INTEL_GEN(i915));
 
-	return w;
+	wa_init_finish(w);
 }
 
-static void whitelist_apply(struct intel_engine_cs *engine,
-			    const struct whitelist *w)
+void intel_engine_apply_whitelist(struct intel_engine_cs *engine)
 {
 	struct drm_i915_private *dev_priv = engine->i915;
+	const struct i915_wa_list *wal = &engine->whitelist;
 	const u32 base = engine->mmio_base;
+	struct i915_wa *wa;
 	unsigned int i;
 
-	if (!w)
+	if (!wal->count)
 		return;
 
-	intel_uncore_forcewake_get(engine->i915, FORCEWAKE_ALL);
-
-	for (i = 0; i < w->count; i++)
-		I915_WRITE_FW(RING_FORCE_TO_NONPRIV(base, i),
-			      i915_mmio_reg_offset(w->reg[i]));
+	for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
+		I915_WRITE(RING_FORCE_TO_NONPRIV(base, i),
+			   i915_mmio_reg_offset(wa->reg));
 
 	/* And clear the rest just in case of garbage */
 	for (; i < RING_MAX_NONPRIV_SLOTS; i++)
-		I915_WRITE_FW(RING_FORCE_TO_NONPRIV(base, i), w->nopid);
+		I915_WRITE(RING_FORCE_TO_NONPRIV(base, i),
+			   i915_mmio_reg_offset(RING_NOPID(base)));
 
-	intel_uncore_forcewake_put(engine->i915, FORCEWAKE_ALL);
+	DRM_DEBUG_DRIVER("Applied %u %s workarounds\n", wal->count, wal->name);
 }
 
-void intel_whitelist_workarounds_apply(struct intel_engine_cs *engine)
+static void rcs_engine_wa_init(struct intel_engine_cs *engine)
 {
-	struct whitelist w;
+	struct drm_i915_private *i915 = engine->i915;
+	struct i915_wa_list *wal = &engine->wa_list;
 
-	whitelist_apply(engine, whitelist_build(engine, &w));
+	if (IS_ICELAKE(i915)) {
+		/* This is not an Wa. Enable for better image quality */
+		wa_masked_en(wal,
+			     _3D_CHICKEN3,
+			     _3D_CHICKEN3_AA_LINE_QUALITY_FIX_ENABLE);
+
+		/* WaPipelineFlushCoherentLines:icl */
+		wa_write_or(wal,
+			    GEN8_L3SQCREG4,
+			    GEN8_LQSC_FLUSH_COHERENT_LINES);
+
+		/*
+		 * Wa_1405543622:icl
+		 * Formerly known as WaGAPZPriorityScheme
+		 */
+		wa_write_or(wal,
+			    GEN8_GARBCNTL,
+			    GEN11_ARBITRATION_PRIO_ORDER_MASK);
+
+		/*
+		 * Wa_1604223664:icl
+		 * Formerly known as WaL3BankAddressHashing
+		 */
+		wa_write_masked_or(wal,
+				   GEN8_GARBCNTL,
+				   GEN11_HASH_CTRL_EXCL_MASK,
+				   GEN11_HASH_CTRL_EXCL_BIT0);
+		wa_write_masked_or(wal,
+				   GEN11_GLBLINVL,
+				   GEN11_BANK_HASH_ADDR_EXCL_MASK,
+				   GEN11_BANK_HASH_ADDR_EXCL_BIT0);
+
+		/*
+		 * Wa_1405733216:icl
+		 * Formerly known as WaDisableCleanEvicts
+		 */
+		wa_write_or(wal,
+			    GEN8_L3SQCREG4,
+			    GEN11_LQSC_CLEAN_EVICT_DISABLE);
+
+		/* WaForwardProgressSoftReset:icl */
+		wa_write_or(wal,
+			    GEN10_SCRATCH_LNCF2,
+			    PMFLUSHDONE_LNICRSDROP |
+			    PMFLUSH_GAPL3UNBLOCK |
+			    PMFLUSHDONE_LNEBLK);
+
+		/* Wa_1406609255:icl (pre-prod) */
+		if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_B0))
+			wa_write_or(wal,
+				    GEN7_SARCHKMD,
+				    GEN7_DISABLE_DEMAND_PREFETCH |
+				    GEN7_DISABLE_SAMPLER_PREFETCH);
+	}
+
+	if (IS_GEN9(i915) || IS_CANNONLAKE(i915)) {
+		/* WaEnablePreemptionGranularityControlByUMD:skl,bxt,kbl,cfl,cnl */
+		wa_masked_en(wal,
+			     GEN7_FF_SLICE_CS_CHICKEN1,
+			     GEN9_FFSC_PERCTX_PREEMPT_CTRL);
+	}
+
+	if (IS_SKYLAKE(i915) || IS_KABYLAKE(i915) || IS_COFFEELAKE(i915)) {
+		/* WaEnableGapsTsvCreditFix:skl,kbl,cfl */
+		wa_write_or(wal,
+			    GEN8_GARBCNTL,
+			    GEN9_GAPS_TSV_CREDIT_DISABLE);
+	}
+
+	if (IS_BROXTON(i915)) {
+		/* WaDisablePooledEuLoadBalancingFix:bxt */
+		wa_masked_en(wal,
+			     FF_SLICE_CS_CHICKEN2,
+			     GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE);
+	}
+
+	if (IS_GEN9(i915)) {
+		/* WaContextSwitchWithConcurrentTLBInvalidate:skl,bxt,kbl,glk,cfl */
+		wa_masked_en(wal,
+			     GEN9_CSFE_CHICKEN1_RCS,
+			     GEN9_PREEMPT_GPGPU_SYNC_SWITCH_DISABLE);
+
+		/* WaEnableLbsSlaRetryTimerDecrement:skl,bxt,kbl,glk,cfl */
+		wa_write_or(wal,
+			    BDW_SCRATCH1,
+			    GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE);
+
+		/* WaProgramL3SqcReg1DefaultForPerf:bxt,glk */
+		if (IS_GEN9_LP(i915))
+			wa_write_masked_or(wal,
+					   GEN8_L3SQCREG1,
+					   L3_PRIO_CREDITS_MASK,
+					   L3_GENERAL_PRIO_CREDITS(62) |
+					   L3_HIGH_PRIO_CREDITS(2));
+
+		/* WaOCLCoherentLineFlush:skl,bxt,kbl,cfl */
+		wa_write_or(wal,
+			    GEN8_L3SQCREG4,
+			    GEN8_LQSC_FLUSH_COHERENT_LINES);
+	}
+}
+
+static void xcs_engine_wa_init(struct intel_engine_cs *engine)
+{
+	struct drm_i915_private *i915 = engine->i915;
+	struct i915_wa_list *wal = &engine->wa_list;
+
+	/* WaKBLVECSSemaphoreWaitPoll:kbl */
+	if (IS_KBL_REVID(i915, KBL_REVID_A0, KBL_REVID_E0)) {
+		wa_write(wal,
+			 RING_SEMA_WAIT_POLL(engine->mmio_base),
+			 1);
+	}
+}
+
+void intel_engine_init_workarounds(struct intel_engine_cs *engine)
+{
+	struct i915_wa_list *wal = &engine->wa_list;
+
+	if (GEM_WARN_ON(INTEL_GEN(engine->i915) < 8))
+		return;
+
+	wa_init_start(wal, engine->name);
+
+	if (engine->id == RCS)
+		rcs_engine_wa_init(engine);
+	else
+		xcs_engine_wa_init(engine);
+
+	wa_init_finish(wal);
+}
+
+void intel_engine_apply_workarounds(struct intel_engine_cs *engine)
+{
+	wa_list_apply(engine->i915, &engine->wa_list);
 }
 
 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+static bool intel_engine_verify_workarounds(struct intel_engine_cs *engine,
+					    const char *from)
+{
+	return wa_list_verify(engine->i915, &engine->wa_list, from);
+}
+
 #include "selftests/intel_workarounds.c"
 #endif
diff --git a/drivers/gpu/drm/i915/intel_workarounds.h b/drivers/gpu/drm/i915/intel_workarounds.h
index b11d0623..7c73471 100644
--- a/drivers/gpu/drm/i915/intel_workarounds.h
+++ b/drivers/gpu/drm/i915/intel_workarounds.h
@@ -7,11 +7,39 @@
 #ifndef _I915_WORKAROUNDS_H_
 #define _I915_WORKAROUNDS_H_
 
-int intel_ctx_workarounds_init(struct drm_i915_private *dev_priv);
-int intel_ctx_workarounds_emit(struct i915_request *rq);
+#include <linux/slab.h>
 
-void intel_gt_workarounds_apply(struct drm_i915_private *dev_priv);
+struct i915_wa {
+	i915_reg_t	  reg;
+	u32		  mask;
+	u32		  val;
+};
 
-void intel_whitelist_workarounds_apply(struct intel_engine_cs *engine);
+struct i915_wa_list {
+	const char	*name;
+	struct i915_wa	*list;
+	unsigned int	count;
+	unsigned int	wa_count;
+};
+
+static inline void intel_wa_list_free(struct i915_wa_list *wal)
+{
+	kfree(wal->list);
+	memset(wal, 0, sizeof(*wal));
+}
+
+void intel_engine_init_ctx_wa(struct intel_engine_cs *engine);
+int intel_engine_emit_ctx_wa(struct i915_request *rq);
+
+void intel_gt_init_workarounds(struct drm_i915_private *dev_priv);
+void intel_gt_apply_workarounds(struct drm_i915_private *dev_priv);
+bool intel_gt_verify_workarounds(struct drm_i915_private *dev_priv,
+				 const char *from);
+
+void intel_engine_init_whitelist(struct intel_engine_cs *engine);
+void intel_engine_apply_whitelist(struct intel_engine_cs *engine);
+
+void intel_engine_init_workarounds(struct intel_engine_cs *engine);
+void intel_engine_apply_workarounds(struct intel_engine_cs *engine);
 
 #endif
diff --git a/drivers/gpu/drm/i915/selftests/igt_reset.c b/drivers/gpu/drm/i915/selftests/igt_reset.c
new file mode 100644
index 0000000..208a966
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/igt_reset.c
@@ -0,0 +1,44 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2018 Intel Corporation
+ */
+
+#include "igt_reset.h"
+
+#include "../i915_drv.h"
+#include "../intel_ringbuffer.h"
+
+void igt_global_reset_lock(struct drm_i915_private *i915)
+{
+	struct intel_engine_cs *engine;
+	enum intel_engine_id id;
+
+	pr_debug("%s: current gpu_error=%08lx\n",
+		 __func__, i915->gpu_error.flags);
+
+	while (test_and_set_bit(I915_RESET_BACKOFF, &i915->gpu_error.flags))
+		wait_event(i915->gpu_error.reset_queue,
+			   !test_bit(I915_RESET_BACKOFF,
+				     &i915->gpu_error.flags));
+
+	for_each_engine(engine, i915, id) {
+		while (test_and_set_bit(I915_RESET_ENGINE + id,
+					&i915->gpu_error.flags))
+			wait_on_bit(&i915->gpu_error.flags,
+				    I915_RESET_ENGINE + id,
+				    TASK_UNINTERRUPTIBLE);
+	}
+}
+
+void igt_global_reset_unlock(struct drm_i915_private *i915)
+{
+	struct intel_engine_cs *engine;
+	enum intel_engine_id id;
+
+	for_each_engine(engine, i915, id)
+		clear_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags);
+
+	clear_bit(I915_RESET_BACKOFF, &i915->gpu_error.flags);
+	wake_up_all(&i915->gpu_error.reset_queue);
+}
diff --git a/drivers/gpu/drm/i915/selftests/igt_reset.h b/drivers/gpu/drm/i915/selftests/igt_reset.h
new file mode 100644
index 0000000..5f0234d
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/igt_reset.h
@@ -0,0 +1,15 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2018 Intel Corporation
+ */
+
+#ifndef __I915_SELFTESTS_IGT_RESET_H__
+#define __I915_SELFTESTS_IGT_RESET_H__
+
+#include "../i915_drv.h"
+
+void igt_global_reset_lock(struct drm_i915_private *i915);
+void igt_global_reset_unlock(struct drm_i915_private *i915);
+
+#endif
diff --git a/drivers/gpu/drm/i915/selftests/igt_spinner.c b/drivers/gpu/drm/i915/selftests/igt_spinner.c
new file mode 100644
index 0000000..8cd34f6
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/igt_spinner.c
@@ -0,0 +1,199 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2018 Intel Corporation
+ */
+
+#include "igt_spinner.h"
+
+int igt_spinner_init(struct igt_spinner *spin, struct drm_i915_private *i915)
+{
+	unsigned int mode;
+	void *vaddr;
+	int err;
+
+	GEM_BUG_ON(INTEL_GEN(i915) < 8);
+
+	memset(spin, 0, sizeof(*spin));
+	spin->i915 = i915;
+
+	spin->hws = i915_gem_object_create_internal(i915, PAGE_SIZE);
+	if (IS_ERR(spin->hws)) {
+		err = PTR_ERR(spin->hws);
+		goto err;
+	}
+
+	spin->obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
+	if (IS_ERR(spin->obj)) {
+		err = PTR_ERR(spin->obj);
+		goto err_hws;
+	}
+
+	i915_gem_object_set_cache_level(spin->hws, I915_CACHE_LLC);
+	vaddr = i915_gem_object_pin_map(spin->hws, I915_MAP_WB);
+	if (IS_ERR(vaddr)) {
+		err = PTR_ERR(vaddr);
+		goto err_obj;
+	}
+	spin->seqno = memset(vaddr, 0xff, PAGE_SIZE);
+
+	mode = i915_coherent_map_type(i915);
+	vaddr = i915_gem_object_pin_map(spin->obj, mode);
+	if (IS_ERR(vaddr)) {
+		err = PTR_ERR(vaddr);
+		goto err_unpin_hws;
+	}
+	spin->batch = vaddr;
+
+	return 0;
+
+err_unpin_hws:
+	i915_gem_object_unpin_map(spin->hws);
+err_obj:
+	i915_gem_object_put(spin->obj);
+err_hws:
+	i915_gem_object_put(spin->hws);
+err:
+	return err;
+}
+
+static unsigned int seqno_offset(u64 fence)
+{
+	return offset_in_page(sizeof(u32) * fence);
+}
+
+static u64 hws_address(const struct i915_vma *hws,
+		       const struct i915_request *rq)
+{
+	return hws->node.start + seqno_offset(rq->fence.context);
+}
+
+static int emit_recurse_batch(struct igt_spinner *spin,
+			      struct i915_request *rq,
+			      u32 arbitration_command)
+{
+	struct i915_address_space *vm = &rq->gem_context->ppgtt->vm;
+	struct i915_vma *hws, *vma;
+	u32 *batch;
+	int err;
+
+	vma = i915_vma_instance(spin->obj, vm, NULL);
+	if (IS_ERR(vma))
+		return PTR_ERR(vma);
+
+	hws = i915_vma_instance(spin->hws, vm, NULL);
+	if (IS_ERR(hws))
+		return PTR_ERR(hws);
+
+	err = i915_vma_pin(vma, 0, 0, PIN_USER);
+	if (err)
+		return err;
+
+	err = i915_vma_pin(hws, 0, 0, PIN_USER);
+	if (err)
+		goto unpin_vma;
+
+	err = i915_vma_move_to_active(vma, rq, 0);
+	if (err)
+		goto unpin_hws;
+
+	if (!i915_gem_object_has_active_reference(vma->obj)) {
+		i915_gem_object_get(vma->obj);
+		i915_gem_object_set_active_reference(vma->obj);
+	}
+
+	err = i915_vma_move_to_active(hws, rq, 0);
+	if (err)
+		goto unpin_hws;
+
+	if (!i915_gem_object_has_active_reference(hws->obj)) {
+		i915_gem_object_get(hws->obj);
+		i915_gem_object_set_active_reference(hws->obj);
+	}
+
+	batch = spin->batch;
+
+	*batch++ = MI_STORE_DWORD_IMM_GEN4;
+	*batch++ = lower_32_bits(hws_address(hws, rq));
+	*batch++ = upper_32_bits(hws_address(hws, rq));
+	*batch++ = rq->fence.seqno;
+
+	*batch++ = arbitration_command;
+
+	*batch++ = MI_BATCH_BUFFER_START | 1 << 8 | 1;
+	*batch++ = lower_32_bits(vma->node.start);
+	*batch++ = upper_32_bits(vma->node.start);
+	*batch++ = MI_BATCH_BUFFER_END; /* not reached */
+
+	i915_gem_chipset_flush(spin->i915);
+
+	err = rq->engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, 0);
+
+unpin_hws:
+	i915_vma_unpin(hws);
+unpin_vma:
+	i915_vma_unpin(vma);
+	return err;
+}
+
+struct i915_request *
+igt_spinner_create_request(struct igt_spinner *spin,
+			   struct i915_gem_context *ctx,
+			   struct intel_engine_cs *engine,
+			   u32 arbitration_command)
+{
+	struct i915_request *rq;
+	int err;
+
+	rq = i915_request_alloc(engine, ctx);
+	if (IS_ERR(rq))
+		return rq;
+
+	err = emit_recurse_batch(spin, rq, arbitration_command);
+	if (err) {
+		i915_request_add(rq);
+		return ERR_PTR(err);
+	}
+
+	return rq;
+}
+
+static u32
+hws_seqno(const struct igt_spinner *spin, const struct i915_request *rq)
+{
+	u32 *seqno = spin->seqno + seqno_offset(rq->fence.context);
+
+	return READ_ONCE(*seqno);
+}
+
+void igt_spinner_end(struct igt_spinner *spin)
+{
+	*spin->batch = MI_BATCH_BUFFER_END;
+	i915_gem_chipset_flush(spin->i915);
+}
+
+void igt_spinner_fini(struct igt_spinner *spin)
+{
+	igt_spinner_end(spin);
+
+	i915_gem_object_unpin_map(spin->obj);
+	i915_gem_object_put(spin->obj);
+
+	i915_gem_object_unpin_map(spin->hws);
+	i915_gem_object_put(spin->hws);
+}
+
+bool igt_wait_for_spinner(struct igt_spinner *spin, struct i915_request *rq)
+{
+	if (!wait_event_timeout(rq->execute,
+				READ_ONCE(rq->global_seqno),
+				msecs_to_jiffies(10)))
+		return false;
+
+	return !(wait_for_us(i915_seqno_passed(hws_seqno(spin, rq),
+					       rq->fence.seqno),
+			     10) &&
+		 wait_for(i915_seqno_passed(hws_seqno(spin, rq),
+					    rq->fence.seqno),
+			  1000));
+}
diff --git a/drivers/gpu/drm/i915/selftests/igt_spinner.h b/drivers/gpu/drm/i915/selftests/igt_spinner.h
new file mode 100644
index 0000000..391777c
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/igt_spinner.h
@@ -0,0 +1,37 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2018 Intel Corporation
+ */
+
+#ifndef __I915_SELFTESTS_IGT_SPINNER_H__
+#define __I915_SELFTESTS_IGT_SPINNER_H__
+
+#include "../i915_selftest.h"
+
+#include "../i915_drv.h"
+#include "../i915_request.h"
+#include "../intel_ringbuffer.h"
+#include "../i915_gem_context.h"
+
+struct igt_spinner {
+	struct drm_i915_private *i915;
+	struct drm_i915_gem_object *hws;
+	struct drm_i915_gem_object *obj;
+	u32 *batch;
+	void *seqno;
+};
+
+int igt_spinner_init(struct igt_spinner *spin, struct drm_i915_private *i915);
+void igt_spinner_fini(struct igt_spinner *spin);
+
+struct i915_request *
+igt_spinner_create_request(struct igt_spinner *spin,
+			   struct i915_gem_context *ctx,
+			   struct intel_engine_cs *engine,
+			   u32 arbitration_command);
+void igt_spinner_end(struct igt_spinner *spin);
+
+bool igt_wait_for_spinner(struct igt_spinner *spin, struct i915_request *rq);
+
+#endif
diff --git a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
index defe671..40efbed 100644
--- a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
+++ b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
@@ -27,6 +27,7 @@
 #include "../i915_selftest.h"
 #include "i915_random.h"
 #include "igt_flush_test.h"
+#include "igt_reset.h"
 #include "igt_wedge_me.h"
 
 #include "mock_context.h"
@@ -308,6 +309,7 @@ static int igt_hang_sanitycheck(void *arg)
 		goto unlock;
 
 	for_each_engine(engine, i915, id) {
+		struct igt_wedge_me w;
 		long timeout;
 
 		if (!intel_engine_can_store_dword(engine))
@@ -328,9 +330,14 @@ static int igt_hang_sanitycheck(void *arg)
 
 		i915_request_add(rq);
 
-		timeout = i915_request_wait(rq,
-					    I915_WAIT_LOCKED,
-					    MAX_SCHEDULE_TIMEOUT);
+		timeout = 0;
+		igt_wedge_on_timeout(&w, i915, HZ / 10 /* 100ms timeout*/)
+			timeout = i915_request_wait(rq,
+						    I915_WAIT_LOCKED,
+						    MAX_SCHEDULE_TIMEOUT);
+		if (i915_terminally_wedged(&i915->gpu_error))
+			timeout = -EIO;
+
 		i915_request_put(rq);
 
 		if (timeout < 0) {
@@ -348,40 +355,6 @@ static int igt_hang_sanitycheck(void *arg)
 	return err;
 }
 
-static void global_reset_lock(struct drm_i915_private *i915)
-{
-	struct intel_engine_cs *engine;
-	enum intel_engine_id id;
-
-	pr_debug("%s: current gpu_error=%08lx\n",
-		 __func__, i915->gpu_error.flags);
-
-	while (test_and_set_bit(I915_RESET_BACKOFF, &i915->gpu_error.flags))
-		wait_event(i915->gpu_error.reset_queue,
-			   !test_bit(I915_RESET_BACKOFF,
-				     &i915->gpu_error.flags));
-
-	for_each_engine(engine, i915, id) {
-		while (test_and_set_bit(I915_RESET_ENGINE + id,
-					&i915->gpu_error.flags))
-			wait_on_bit(&i915->gpu_error.flags,
-				    I915_RESET_ENGINE + id,
-				    TASK_UNINTERRUPTIBLE);
-	}
-}
-
-static void global_reset_unlock(struct drm_i915_private *i915)
-{
-	struct intel_engine_cs *engine;
-	enum intel_engine_id id;
-
-	for_each_engine(engine, i915, id)
-		clear_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags);
-
-	clear_bit(I915_RESET_BACKOFF, &i915->gpu_error.flags);
-	wake_up_all(&i915->gpu_error.reset_queue);
-}
-
 static int igt_global_reset(void *arg)
 {
 	struct drm_i915_private *i915 = arg;
@@ -390,7 +363,7 @@ static int igt_global_reset(void *arg)
 
 	/* Check that we can issue a global GPU reset */
 
-	global_reset_lock(i915);
+	igt_global_reset_lock(i915);
 	set_bit(I915_RESET_HANDOFF, &i915->gpu_error.flags);
 
 	mutex_lock(&i915->drm.struct_mutex);
@@ -405,7 +378,7 @@ static int igt_global_reset(void *arg)
 	mutex_unlock(&i915->drm.struct_mutex);
 
 	GEM_BUG_ON(test_bit(I915_RESET_HANDOFF, &i915->gpu_error.flags));
-	global_reset_unlock(i915);
+	igt_global_reset_unlock(i915);
 
 	if (i915_terminally_wedged(&i915->gpu_error))
 		err = -EIO;
@@ -936,7 +909,7 @@ static int igt_reset_wait(void *arg)
 
 	/* Check that we detect a stuck waiter and issue a reset */
 
-	global_reset_lock(i915);
+	igt_global_reset_lock(i915);
 
 	mutex_lock(&i915->drm.struct_mutex);
 	err = hang_init(&h, i915);
@@ -988,7 +961,7 @@ static int igt_reset_wait(void *arg)
 	hang_fini(&h);
 unlock:
 	mutex_unlock(&i915->drm.struct_mutex);
-	global_reset_unlock(i915);
+	igt_global_reset_unlock(i915);
 
 	if (i915_terminally_wedged(&i915->gpu_error))
 		return -EIO;
@@ -1066,7 +1039,7 @@ static int __igt_reset_evict_vma(struct drm_i915_private *i915,
 
 	/* Check that we can recover an unbind stuck on a hanging request */
 
-	global_reset_lock(i915);
+	igt_global_reset_lock(i915);
 
 	mutex_lock(&i915->drm.struct_mutex);
 	err = hang_init(&h, i915);
@@ -1186,7 +1159,7 @@ static int __igt_reset_evict_vma(struct drm_i915_private *i915,
 	hang_fini(&h);
 unlock:
 	mutex_unlock(&i915->drm.struct_mutex);
-	global_reset_unlock(i915);
+	igt_global_reset_unlock(i915);
 
 	if (i915_terminally_wedged(&i915->gpu_error))
 		return -EIO;
@@ -1266,7 +1239,7 @@ static int igt_reset_queue(void *arg)
 
 	/* Check that we replay pending requests following a hang */
 
-	global_reset_lock(i915);
+	igt_global_reset_lock(i915);
 
 	mutex_lock(&i915->drm.struct_mutex);
 	err = hang_init(&h, i915);
@@ -1397,7 +1370,7 @@ static int igt_reset_queue(void *arg)
 	hang_fini(&h);
 unlock:
 	mutex_unlock(&i915->drm.struct_mutex);
-	global_reset_unlock(i915);
+	igt_global_reset_unlock(i915);
 
 	if (i915_terminally_wedged(&i915->gpu_error))
 		return -EIO;
diff --git a/drivers/gpu/drm/i915/selftests/intel_lrc.c b/drivers/gpu/drm/i915/selftests/intel_lrc.c
index 94fc0e5..ca461e3 100644
--- a/drivers/gpu/drm/i915/selftests/intel_lrc.c
+++ b/drivers/gpu/drm/i915/selftests/intel_lrc.c
@@ -6,216 +6,18 @@
 
 #include "../i915_selftest.h"
 #include "igt_flush_test.h"
+#include "igt_spinner.h"
 #include "i915_random.h"
 
 #include "mock_context.h"
 
-struct spinner {
-	struct drm_i915_private *i915;
-	struct drm_i915_gem_object *hws;
-	struct drm_i915_gem_object *obj;
-	u32 *batch;
-	void *seqno;
-};
-
-static int spinner_init(struct spinner *spin, struct drm_i915_private *i915)
-{
-	unsigned int mode;
-	void *vaddr;
-	int err;
-
-	GEM_BUG_ON(INTEL_GEN(i915) < 8);
-
-	memset(spin, 0, sizeof(*spin));
-	spin->i915 = i915;
-
-	spin->hws = i915_gem_object_create_internal(i915, PAGE_SIZE);
-	if (IS_ERR(spin->hws)) {
-		err = PTR_ERR(spin->hws);
-		goto err;
-	}
-
-	spin->obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
-	if (IS_ERR(spin->obj)) {
-		err = PTR_ERR(spin->obj);
-		goto err_hws;
-	}
-
-	i915_gem_object_set_cache_level(spin->hws, I915_CACHE_LLC);
-	vaddr = i915_gem_object_pin_map(spin->hws, I915_MAP_WB);
-	if (IS_ERR(vaddr)) {
-		err = PTR_ERR(vaddr);
-		goto err_obj;
-	}
-	spin->seqno = memset(vaddr, 0xff, PAGE_SIZE);
-
-	mode = i915_coherent_map_type(i915);
-	vaddr = i915_gem_object_pin_map(spin->obj, mode);
-	if (IS_ERR(vaddr)) {
-		err = PTR_ERR(vaddr);
-		goto err_unpin_hws;
-	}
-	spin->batch = vaddr;
-
-	return 0;
-
-err_unpin_hws:
-	i915_gem_object_unpin_map(spin->hws);
-err_obj:
-	i915_gem_object_put(spin->obj);
-err_hws:
-	i915_gem_object_put(spin->hws);
-err:
-	return err;
-}
-
-static unsigned int seqno_offset(u64 fence)
-{
-	return offset_in_page(sizeof(u32) * fence);
-}
-
-static u64 hws_address(const struct i915_vma *hws,
-		       const struct i915_request *rq)
-{
-	return hws->node.start + seqno_offset(rq->fence.context);
-}
-
-static int emit_recurse_batch(struct spinner *spin,
-			      struct i915_request *rq,
-			      u32 arbitration_command)
-{
-	struct i915_address_space *vm = &rq->gem_context->ppgtt->vm;
-	struct i915_vma *hws, *vma;
-	u32 *batch;
-	int err;
-
-	vma = i915_vma_instance(spin->obj, vm, NULL);
-	if (IS_ERR(vma))
-		return PTR_ERR(vma);
-
-	hws = i915_vma_instance(spin->hws, vm, NULL);
-	if (IS_ERR(hws))
-		return PTR_ERR(hws);
-
-	err = i915_vma_pin(vma, 0, 0, PIN_USER);
-	if (err)
-		return err;
-
-	err = i915_vma_pin(hws, 0, 0, PIN_USER);
-	if (err)
-		goto unpin_vma;
-
-	err = i915_vma_move_to_active(vma, rq, 0);
-	if (err)
-		goto unpin_hws;
-
-	if (!i915_gem_object_has_active_reference(vma->obj)) {
-		i915_gem_object_get(vma->obj);
-		i915_gem_object_set_active_reference(vma->obj);
-	}
-
-	err = i915_vma_move_to_active(hws, rq, 0);
-	if (err)
-		goto unpin_hws;
-
-	if (!i915_gem_object_has_active_reference(hws->obj)) {
-		i915_gem_object_get(hws->obj);
-		i915_gem_object_set_active_reference(hws->obj);
-	}
-
-	batch = spin->batch;
-
-	*batch++ = MI_STORE_DWORD_IMM_GEN4;
-	*batch++ = lower_32_bits(hws_address(hws, rq));
-	*batch++ = upper_32_bits(hws_address(hws, rq));
-	*batch++ = rq->fence.seqno;
-
-	*batch++ = arbitration_command;
-
-	*batch++ = MI_BATCH_BUFFER_START | 1 << 8 | 1;
-	*batch++ = lower_32_bits(vma->node.start);
-	*batch++ = upper_32_bits(vma->node.start);
-	*batch++ = MI_BATCH_BUFFER_END; /* not reached */
-
-	i915_gem_chipset_flush(spin->i915);
-
-	err = rq->engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, 0);
-
-unpin_hws:
-	i915_vma_unpin(hws);
-unpin_vma:
-	i915_vma_unpin(vma);
-	return err;
-}
-
-static struct i915_request *
-spinner_create_request(struct spinner *spin,
-		       struct i915_gem_context *ctx,
-		       struct intel_engine_cs *engine,
-		       u32 arbitration_command)
-{
-	struct i915_request *rq;
-	int err;
-
-	rq = i915_request_alloc(engine, ctx);
-	if (IS_ERR(rq))
-		return rq;
-
-	err = emit_recurse_batch(spin, rq, arbitration_command);
-	if (err) {
-		i915_request_add(rq);
-		return ERR_PTR(err);
-	}
-
-	return rq;
-}
-
-static u32 hws_seqno(const struct spinner *spin, const struct i915_request *rq)
-{
-	u32 *seqno = spin->seqno + seqno_offset(rq->fence.context);
-
-	return READ_ONCE(*seqno);
-}
-
-static void spinner_end(struct spinner *spin)
-{
-	*spin->batch = MI_BATCH_BUFFER_END;
-	i915_gem_chipset_flush(spin->i915);
-}
-
-static void spinner_fini(struct spinner *spin)
-{
-	spinner_end(spin);
-
-	i915_gem_object_unpin_map(spin->obj);
-	i915_gem_object_put(spin->obj);
-
-	i915_gem_object_unpin_map(spin->hws);
-	i915_gem_object_put(spin->hws);
-}
-
-static bool wait_for_spinner(struct spinner *spin, struct i915_request *rq)
-{
-	if (!wait_event_timeout(rq->execute,
-				READ_ONCE(rq->global_seqno),
-				msecs_to_jiffies(10)))
-		return false;
-
-	return !(wait_for_us(i915_seqno_passed(hws_seqno(spin, rq),
-					       rq->fence.seqno),
-			     10) &&
-		 wait_for(i915_seqno_passed(hws_seqno(spin, rq),
-					    rq->fence.seqno),
-			  1000));
-}
-
 static int live_sanitycheck(void *arg)
 {
 	struct drm_i915_private *i915 = arg;
 	struct intel_engine_cs *engine;
 	struct i915_gem_context *ctx;
 	enum intel_engine_id id;
-	struct spinner spin;
+	struct igt_spinner spin;
 	int err = -ENOMEM;
 
 	if (!HAS_LOGICAL_RING_CONTEXTS(i915))
@@ -224,7 +26,7 @@ static int live_sanitycheck(void *arg)
 	mutex_lock(&i915->drm.struct_mutex);
 	intel_runtime_pm_get(i915);
 
-	if (spinner_init(&spin, i915))
+	if (igt_spinner_init(&spin, i915))
 		goto err_unlock;
 
 	ctx = kernel_context(i915);
@@ -234,14 +36,14 @@ static int live_sanitycheck(void *arg)
 	for_each_engine(engine, i915, id) {
 		struct i915_request *rq;
 
-		rq = spinner_create_request(&spin, ctx, engine, MI_NOOP);
+		rq = igt_spinner_create_request(&spin, ctx, engine, MI_NOOP);
 		if (IS_ERR(rq)) {
 			err = PTR_ERR(rq);
 			goto err_ctx;
 		}
 
 		i915_request_add(rq);
-		if (!wait_for_spinner(&spin, rq)) {
+		if (!igt_wait_for_spinner(&spin, rq)) {
 			GEM_TRACE("spinner failed to start\n");
 			GEM_TRACE_DUMP();
 			i915_gem_set_wedged(i915);
@@ -249,7 +51,7 @@ static int live_sanitycheck(void *arg)
 			goto err_ctx;
 		}
 
-		spinner_end(&spin);
+		igt_spinner_end(&spin);
 		if (igt_flush_test(i915, I915_WAIT_LOCKED)) {
 			err = -EIO;
 			goto err_ctx;
@@ -260,7 +62,7 @@ static int live_sanitycheck(void *arg)
 err_ctx:
 	kernel_context_close(ctx);
 err_spin:
-	spinner_fini(&spin);
+	igt_spinner_fini(&spin);
 err_unlock:
 	igt_flush_test(i915, I915_WAIT_LOCKED);
 	intel_runtime_pm_put(i915);
@@ -272,7 +74,7 @@ static int live_preempt(void *arg)
 {
 	struct drm_i915_private *i915 = arg;
 	struct i915_gem_context *ctx_hi, *ctx_lo;
-	struct spinner spin_hi, spin_lo;
+	struct igt_spinner spin_hi, spin_lo;
 	struct intel_engine_cs *engine;
 	enum intel_engine_id id;
 	int err = -ENOMEM;
@@ -283,10 +85,10 @@ static int live_preempt(void *arg)
 	mutex_lock(&i915->drm.struct_mutex);
 	intel_runtime_pm_get(i915);
 
-	if (spinner_init(&spin_hi, i915))
+	if (igt_spinner_init(&spin_hi, i915))
 		goto err_unlock;
 
-	if (spinner_init(&spin_lo, i915))
+	if (igt_spinner_init(&spin_lo, i915))
 		goto err_spin_hi;
 
 	ctx_hi = kernel_context(i915);
@@ -304,15 +106,15 @@ static int live_preempt(void *arg)
 	for_each_engine(engine, i915, id) {
 		struct i915_request *rq;
 
-		rq = spinner_create_request(&spin_lo, ctx_lo, engine,
-					    MI_ARB_CHECK);
+		rq = igt_spinner_create_request(&spin_lo, ctx_lo, engine,
+						MI_ARB_CHECK);
 		if (IS_ERR(rq)) {
 			err = PTR_ERR(rq);
 			goto err_ctx_lo;
 		}
 
 		i915_request_add(rq);
-		if (!wait_for_spinner(&spin_lo, rq)) {
+		if (!igt_wait_for_spinner(&spin_lo, rq)) {
 			GEM_TRACE("lo spinner failed to start\n");
 			GEM_TRACE_DUMP();
 			i915_gem_set_wedged(i915);
@@ -320,16 +122,16 @@ static int live_preempt(void *arg)
 			goto err_ctx_lo;
 		}
 
-		rq = spinner_create_request(&spin_hi, ctx_hi, engine,
-					    MI_ARB_CHECK);
+		rq = igt_spinner_create_request(&spin_hi, ctx_hi, engine,
+						MI_ARB_CHECK);
 		if (IS_ERR(rq)) {
-			spinner_end(&spin_lo);
+			igt_spinner_end(&spin_lo);
 			err = PTR_ERR(rq);
 			goto err_ctx_lo;
 		}
 
 		i915_request_add(rq);
-		if (!wait_for_spinner(&spin_hi, rq)) {
+		if (!igt_wait_for_spinner(&spin_hi, rq)) {
 			GEM_TRACE("hi spinner failed to start\n");
 			GEM_TRACE_DUMP();
 			i915_gem_set_wedged(i915);
@@ -337,8 +139,8 @@ static int live_preempt(void *arg)
 			goto err_ctx_lo;
 		}
 
-		spinner_end(&spin_hi);
-		spinner_end(&spin_lo);
+		igt_spinner_end(&spin_hi);
+		igt_spinner_end(&spin_lo);
 		if (igt_flush_test(i915, I915_WAIT_LOCKED)) {
 			err = -EIO;
 			goto err_ctx_lo;
@@ -351,9 +153,9 @@ static int live_preempt(void *arg)
 err_ctx_hi:
 	kernel_context_close(ctx_hi);
 err_spin_lo:
-	spinner_fini(&spin_lo);
+	igt_spinner_fini(&spin_lo);
 err_spin_hi:
-	spinner_fini(&spin_hi);
+	igt_spinner_fini(&spin_hi);
 err_unlock:
 	igt_flush_test(i915, I915_WAIT_LOCKED);
 	intel_runtime_pm_put(i915);
@@ -365,7 +167,7 @@ static int live_late_preempt(void *arg)
 {
 	struct drm_i915_private *i915 = arg;
 	struct i915_gem_context *ctx_hi, *ctx_lo;
-	struct spinner spin_hi, spin_lo;
+	struct igt_spinner spin_hi, spin_lo;
 	struct intel_engine_cs *engine;
 	struct i915_sched_attr attr = {};
 	enum intel_engine_id id;
@@ -377,10 +179,10 @@ static int live_late_preempt(void *arg)
 	mutex_lock(&i915->drm.struct_mutex);
 	intel_runtime_pm_get(i915);
 
-	if (spinner_init(&spin_hi, i915))
+	if (igt_spinner_init(&spin_hi, i915))
 		goto err_unlock;
 
-	if (spinner_init(&spin_lo, i915))
+	if (igt_spinner_init(&spin_lo, i915))
 		goto err_spin_hi;
 
 	ctx_hi = kernel_context(i915);
@@ -394,28 +196,29 @@ static int live_late_preempt(void *arg)
 	for_each_engine(engine, i915, id) {
 		struct i915_request *rq;
 
-		rq = spinner_create_request(&spin_lo, ctx_lo, engine,
-					    MI_ARB_CHECK);
+		rq = igt_spinner_create_request(&spin_lo, ctx_lo, engine,
+						MI_ARB_CHECK);
 		if (IS_ERR(rq)) {
 			err = PTR_ERR(rq);
 			goto err_ctx_lo;
 		}
 
 		i915_request_add(rq);
-		if (!wait_for_spinner(&spin_lo, rq)) {
+		if (!igt_wait_for_spinner(&spin_lo, rq)) {
 			pr_err("First context failed to start\n");
 			goto err_wedged;
 		}
 
-		rq = spinner_create_request(&spin_hi, ctx_hi, engine, MI_NOOP);
+		rq = igt_spinner_create_request(&spin_hi, ctx_hi, engine,
+						MI_NOOP);
 		if (IS_ERR(rq)) {
-			spinner_end(&spin_lo);
+			igt_spinner_end(&spin_lo);
 			err = PTR_ERR(rq);
 			goto err_ctx_lo;
 		}
 
 		i915_request_add(rq);
-		if (wait_for_spinner(&spin_hi, rq)) {
+		if (igt_wait_for_spinner(&spin_hi, rq)) {
 			pr_err("Second context overtook first?\n");
 			goto err_wedged;
 		}
@@ -423,14 +226,14 @@ static int live_late_preempt(void *arg)
 		attr.priority = I915_USER_PRIORITY(I915_PRIORITY_MAX);
 		engine->schedule(rq, &attr);
 
-		if (!wait_for_spinner(&spin_hi, rq)) {
+		if (!igt_wait_for_spinner(&spin_hi, rq)) {
 			pr_err("High priority context failed to preempt the low priority context\n");
 			GEM_TRACE_DUMP();
 			goto err_wedged;
 		}
 
-		spinner_end(&spin_hi);
-		spinner_end(&spin_lo);
+		igt_spinner_end(&spin_hi);
+		igt_spinner_end(&spin_lo);
 		if (igt_flush_test(i915, I915_WAIT_LOCKED)) {
 			err = -EIO;
 			goto err_ctx_lo;
@@ -443,9 +246,9 @@ static int live_late_preempt(void *arg)
 err_ctx_hi:
 	kernel_context_close(ctx_hi);
 err_spin_lo:
-	spinner_fini(&spin_lo);
+	igt_spinner_fini(&spin_lo);
 err_spin_hi:
-	spinner_fini(&spin_hi);
+	igt_spinner_fini(&spin_hi);
 err_unlock:
 	igt_flush_test(i915, I915_WAIT_LOCKED);
 	intel_runtime_pm_put(i915);
@@ -453,8 +256,8 @@ static int live_late_preempt(void *arg)
 	return err;
 
 err_wedged:
-	spinner_end(&spin_hi);
-	spinner_end(&spin_lo);
+	igt_spinner_end(&spin_hi);
+	igt_spinner_end(&spin_lo);
 	i915_gem_set_wedged(i915);
 	err = -EIO;
 	goto err_ctx_lo;
@@ -464,7 +267,7 @@ static int live_preempt_hang(void *arg)
 {
 	struct drm_i915_private *i915 = arg;
 	struct i915_gem_context *ctx_hi, *ctx_lo;
-	struct spinner spin_hi, spin_lo;
+	struct igt_spinner spin_hi, spin_lo;
 	struct intel_engine_cs *engine;
 	enum intel_engine_id id;
 	int err = -ENOMEM;
@@ -478,10 +281,10 @@ static int live_preempt_hang(void *arg)
 	mutex_lock(&i915->drm.struct_mutex);
 	intel_runtime_pm_get(i915);
 
-	if (spinner_init(&spin_hi, i915))
+	if (igt_spinner_init(&spin_hi, i915))
 		goto err_unlock;
 
-	if (spinner_init(&spin_lo, i915))
+	if (igt_spinner_init(&spin_lo, i915))
 		goto err_spin_hi;
 
 	ctx_hi = kernel_context(i915);
@@ -500,15 +303,15 @@ static int live_preempt_hang(void *arg)
 		if (!intel_engine_has_preemption(engine))
 			continue;
 
-		rq = spinner_create_request(&spin_lo, ctx_lo, engine,
-					    MI_ARB_CHECK);
+		rq = igt_spinner_create_request(&spin_lo, ctx_lo, engine,
+						MI_ARB_CHECK);
 		if (IS_ERR(rq)) {
 			err = PTR_ERR(rq);
 			goto err_ctx_lo;
 		}
 
 		i915_request_add(rq);
-		if (!wait_for_spinner(&spin_lo, rq)) {
+		if (!igt_wait_for_spinner(&spin_lo, rq)) {
 			GEM_TRACE("lo spinner failed to start\n");
 			GEM_TRACE_DUMP();
 			i915_gem_set_wedged(i915);
@@ -516,10 +319,10 @@ static int live_preempt_hang(void *arg)
 			goto err_ctx_lo;
 		}
 
-		rq = spinner_create_request(&spin_hi, ctx_hi, engine,
-					    MI_ARB_CHECK);
+		rq = igt_spinner_create_request(&spin_hi, ctx_hi, engine,
+						MI_ARB_CHECK);
 		if (IS_ERR(rq)) {
-			spinner_end(&spin_lo);
+			igt_spinner_end(&spin_lo);
 			err = PTR_ERR(rq);
 			goto err_ctx_lo;
 		}
@@ -544,7 +347,7 @@ static int live_preempt_hang(void *arg)
 
 		engine->execlists.preempt_hang.inject_hang = false;
 
-		if (!wait_for_spinner(&spin_hi, rq)) {
+		if (!igt_wait_for_spinner(&spin_hi, rq)) {
 			GEM_TRACE("hi spinner failed to start\n");
 			GEM_TRACE_DUMP();
 			i915_gem_set_wedged(i915);
@@ -552,8 +355,8 @@ static int live_preempt_hang(void *arg)
 			goto err_ctx_lo;
 		}
 
-		spinner_end(&spin_hi);
-		spinner_end(&spin_lo);
+		igt_spinner_end(&spin_hi);
+		igt_spinner_end(&spin_lo);
 		if (igt_flush_test(i915, I915_WAIT_LOCKED)) {
 			err = -EIO;
 			goto err_ctx_lo;
@@ -566,9 +369,9 @@ static int live_preempt_hang(void *arg)
 err_ctx_hi:
 	kernel_context_close(ctx_hi);
 err_spin_lo:
-	spinner_fini(&spin_lo);
+	igt_spinner_fini(&spin_lo);
 err_spin_hi:
-	spinner_fini(&spin_hi);
+	igt_spinner_fini(&spin_hi);
 err_unlock:
 	igt_flush_test(i915, I915_WAIT_LOCKED);
 	intel_runtime_pm_put(i915);
diff --git a/drivers/gpu/drm/i915/selftests/intel_workarounds.c b/drivers/gpu/drm/i915/selftests/intel_workarounds.c
index d1a0923..67017d5 100644
--- a/drivers/gpu/drm/i915/selftests/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/selftests/intel_workarounds.c
@@ -6,6 +6,9 @@
 
 #include "../i915_selftest.h"
 
+#include "igt_flush_test.h"
+#include "igt_reset.h"
+#include "igt_spinner.h"
 #include "igt_wedge_me.h"
 #include "mock_context.h"
 
@@ -91,17 +94,23 @@ read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
 	return ERR_PTR(err);
 }
 
-static u32 get_whitelist_reg(const struct whitelist *w, unsigned int i)
+static u32
+get_whitelist_reg(const struct intel_engine_cs *engine, unsigned int i)
 {
-	return i < w->count ? i915_mmio_reg_offset(w->reg[i]) : w->nopid;
+	i915_reg_t reg = i < engine->whitelist.count ?
+			 engine->whitelist.list[i].reg :
+			 RING_NOPID(engine->mmio_base);
+
+	return i915_mmio_reg_offset(reg);
 }
 
-static void print_results(const struct whitelist *w, const u32 *results)
+static void
+print_results(const struct intel_engine_cs *engine, const u32 *results)
 {
 	unsigned int i;
 
 	for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++) {
-		u32 expected = get_whitelist_reg(w, i);
+		u32 expected = get_whitelist_reg(engine, i);
 		u32 actual = results[i];
 
 		pr_info("RING_NONPRIV[%d]: expected 0x%08x, found 0x%08x\n",
@@ -109,8 +118,7 @@ static void print_results(const struct whitelist *w, const u32 *results)
 	}
 }
 
-static int check_whitelist(const struct whitelist *w,
-			   struct i915_gem_context *ctx,
+static int check_whitelist(struct i915_gem_context *ctx,
 			   struct intel_engine_cs *engine)
 {
 	struct drm_i915_gem_object *results;
@@ -138,11 +146,11 @@ static int check_whitelist(const struct whitelist *w,
 	}
 
 	for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++) {
-		u32 expected = get_whitelist_reg(w, i);
+		u32 expected = get_whitelist_reg(engine, i);
 		u32 actual = vaddr[i];
 
 		if (expected != actual) {
-			print_results(w, vaddr);
+			print_results(engine, vaddr);
 			pr_err("Invalid RING_NONPRIV[%d], expected 0x%08x, found 0x%08x\n",
 			       i, expected, actual);
 
@@ -159,66 +167,107 @@ static int check_whitelist(const struct whitelist *w,
 
 static int do_device_reset(struct intel_engine_cs *engine)
 {
-	i915_reset(engine->i915, ENGINE_MASK(engine->id), NULL);
+	set_bit(I915_RESET_HANDOFF, &engine->i915->gpu_error.flags);
+	i915_reset(engine->i915, ENGINE_MASK(engine->id), "live_workarounds");
 	return 0;
 }
 
 static int do_engine_reset(struct intel_engine_cs *engine)
 {
-	return i915_reset_engine(engine, NULL);
+	return i915_reset_engine(engine, "live_workarounds");
 }
 
-static int switch_to_scratch_context(struct intel_engine_cs *engine)
+static int
+switch_to_scratch_context(struct intel_engine_cs *engine,
+			  struct igt_spinner *spin)
 {
 	struct i915_gem_context *ctx;
 	struct i915_request *rq;
+	int err = 0;
 
 	ctx = kernel_context(engine->i915);
 	if (IS_ERR(ctx))
 		return PTR_ERR(ctx);
 
 	intel_runtime_pm_get(engine->i915);
-	rq = i915_request_alloc(engine, ctx);
+
+	if (spin)
+		rq = igt_spinner_create_request(spin, ctx, engine, MI_NOOP);
+	else
+		rq = i915_request_alloc(engine, ctx);
+
 	intel_runtime_pm_put(engine->i915);
 
 	kernel_context_close(ctx);
-	if (IS_ERR(rq))
-		return PTR_ERR(rq);
+
+	if (IS_ERR(rq)) {
+		spin = NULL;
+		err = PTR_ERR(rq);
+		goto err;
+	}
 
 	i915_request_add(rq);
 
-	return 0;
+	if (spin && !igt_wait_for_spinner(spin, rq)) {
+		pr_err("Spinner failed to start\n");
+		err = -ETIMEDOUT;
+	}
+
+err:
+	if (err && spin)
+		igt_spinner_end(spin);
+
+	return err;
 }
 
 static int check_whitelist_across_reset(struct intel_engine_cs *engine,
 					int (*reset)(struct intel_engine_cs *),
-					const struct whitelist *w,
 					const char *name)
 {
+	struct drm_i915_private *i915 = engine->i915;
+	bool want_spin = reset == do_engine_reset;
 	struct i915_gem_context *ctx;
+	struct igt_spinner spin;
 	int err;
 
-	ctx = kernel_context(engine->i915);
+	pr_info("Checking %d whitelisted registers (RING_NONPRIV) [%s]\n",
+		engine->whitelist.count, name);
+
+	if (want_spin) {
+		err = igt_spinner_init(&spin, i915);
+		if (err)
+			return err;
+	}
+
+	ctx = kernel_context(i915);
 	if (IS_ERR(ctx))
 		return PTR_ERR(ctx);
 
-	err = check_whitelist(w, ctx, engine);
+	err = check_whitelist(ctx, engine);
 	if (err) {
 		pr_err("Invalid whitelist *before* %s reset!\n", name);
 		goto out;
 	}
 
-	err = switch_to_scratch_context(engine);
+	err = switch_to_scratch_context(engine, want_spin ? &spin : NULL);
 	if (err)
 		goto out;
 
+	intel_runtime_pm_get(i915);
 	err = reset(engine);
+	intel_runtime_pm_put(i915);
+
+	if (want_spin) {
+		igt_spinner_end(&spin);
+		igt_spinner_fini(&spin);
+	}
+
 	if (err) {
 		pr_err("%s reset failed\n", name);
 		goto out;
 	}
 
-	err = check_whitelist(w, ctx, engine);
+	err = check_whitelist(ctx, engine);
 	if (err) {
 		pr_err("Whitelist not preserved in context across %s reset!\n",
 		       name);
@@ -227,11 +276,11 @@ static int check_whitelist_across_reset(struct intel_engine_cs *engine,
 
 	kernel_context_close(ctx);
 
-	ctx = kernel_context(engine->i915);
+	ctx = kernel_context(i915);
 	if (IS_ERR(ctx))
 		return PTR_ERR(ctx);
 
-	err = check_whitelist(w, ctx, engine);
+	err = check_whitelist(ctx, engine);
 	if (err) {
 		pr_err("Invalid whitelist *after* %s reset in fresh context!\n",
 		       name);
@@ -247,26 +296,18 @@ static int live_reset_whitelist(void *arg)
 {
 	struct drm_i915_private *i915 = arg;
 	struct intel_engine_cs *engine = i915->engine[RCS];
-	struct i915_gpu_error *error = &i915->gpu_error;
-	struct whitelist w;
 	int err = 0;
 
 	/* If we reset the gpu, we should not lose the RING_NONPRIV */
 
-	if (!engine)
+	if (!engine || engine->whitelist.count == 0)
 		return 0;
 
-	if (!whitelist_build(engine, &w))
-		return 0;
-
-	pr_info("Checking %d whitelisted registers (RING_NONPRIV)\n", w.count);
-
-	set_bit(I915_RESET_BACKOFF, &error->flags);
-	set_bit(I915_RESET_ENGINE + engine->id, &error->flags);
+	igt_global_reset_lock(i915);
 
 	if (intel_has_reset_engine(i915)) {
 		err = check_whitelist_across_reset(engine,
-						   do_engine_reset, &w,
+						   do_engine_reset,
 						   "engine");
 		if (err)
 			goto out;
@@ -274,22 +315,156 @@ static int live_reset_whitelist(void *arg)
 
 	if (intel_has_gpu_reset(i915)) {
 		err = check_whitelist_across_reset(engine,
-						   do_device_reset, &w,
+						   do_device_reset,
 						   "device");
 		if (err)
 			goto out;
 	}
 
 out:
-	clear_bit(I915_RESET_ENGINE + engine->id, &error->flags);
-	clear_bit(I915_RESET_BACKOFF, &error->flags);
+	igt_global_reset_unlock(i915);
 	return err;
 }
 
+static bool verify_gt_engine_wa(struct drm_i915_private *i915, const char *str)
+{
+	struct intel_engine_cs *engine;
+	enum intel_engine_id id;
+	bool ok = true;
+
+	ok &= intel_gt_verify_workarounds(i915, str);
+
+	for_each_engine(engine, i915, id)
+		ok &= intel_engine_verify_workarounds(engine, str);
+
+	return ok;
+}
+
+static int
+live_gpu_reset_gt_engine_workarounds(void *arg)
+{
+	struct drm_i915_private *i915 = arg;
+	struct i915_gpu_error *error = &i915->gpu_error;
+	bool ok;
+
+	if (!intel_has_gpu_reset(i915))
+		return 0;
+
+	pr_info("Verifying after GPU reset...\n");
+
+	igt_global_reset_lock(i915);
+
+	ok = verify_gt_engine_wa(i915, "before reset");
+	if (!ok)
+		goto out;
+
+	intel_runtime_pm_get(i915);
+	set_bit(I915_RESET_HANDOFF, &error->flags);
+	i915_reset(i915, ALL_ENGINES, "live_workarounds");
+	intel_runtime_pm_put(i915);
+
+	ok = verify_gt_engine_wa(i915, "after reset");
+
+out:
+	igt_global_reset_unlock(i915);
+
+	return ok ? 0 : -ESRCH;
+}
+
+static int
+live_engine_reset_gt_engine_workarounds(void *arg)
+{
+	struct drm_i915_private *i915 = arg;
+	struct intel_engine_cs *engine;
+	struct i915_gem_context *ctx;
+	struct igt_spinner spin;
+	enum intel_engine_id id;
+	struct i915_request *rq;
+	int ret = 0;
+
+	if (!intel_has_reset_engine(i915))
+		return 0;
+
+	ctx = kernel_context(i915);
+	if (IS_ERR(ctx))
+		return PTR_ERR(ctx);
+
+	igt_global_reset_lock(i915);
+
+	for_each_engine(engine, i915, id) {
+		bool ok;
+
+		pr_info("Verifying after %s reset...\n", engine->name);
+
+		ok = verify_gt_engine_wa(i915, "before reset");
+		if (!ok) {
+			ret = -ESRCH;
+			goto err;
+		}
+
+		intel_runtime_pm_get(i915);
+		i915_reset_engine(engine, "live_workarounds");
+		intel_runtime_pm_put(i915);
+
+		ok = verify_gt_engine_wa(i915, "after idle reset");
+		if (!ok) {
+			ret = -ESRCH;
+			goto err;
+		}
+
+		ret = igt_spinner_init(&spin, i915);
+		if (ret)
+			goto err;
+
+		intel_runtime_pm_get(i915);
+
+		rq = igt_spinner_create_request(&spin, ctx, engine, MI_NOOP);
+		if (IS_ERR(rq)) {
+			ret = PTR_ERR(rq);
+			igt_spinner_fini(&spin);
+			intel_runtime_pm_put(i915);
+			goto err;
+		}
+
+		i915_request_add(rq);
+
+		if (!igt_wait_for_spinner(&spin, rq)) {
+			pr_err("Spinner failed to start\n");
+			igt_spinner_fini(&spin);
+			intel_runtime_pm_put(i915);
+			ret = -ETIMEDOUT;
+			goto err;
+		}
+
+		i915_reset_engine(engine, "live_workarounds");
+
+		intel_runtime_pm_put(i915);
+
+		igt_spinner_end(&spin);
+		igt_spinner_fini(&spin);
+
+		ok = verify_gt_engine_wa(i915, "after busy reset");
+		if (!ok) {
+			ret = -ESRCH;
+			goto err;
+		}
+	}
+
+err:
+	igt_global_reset_unlock(i915);
+	kernel_context_close(ctx);
+
+	igt_flush_test(i915, I915_WAIT_LOCKED);
+
+	return ret;
+}
+
 int intel_workarounds_live_selftests(struct drm_i915_private *i915)
 {
 	static const struct i915_subtest tests[] = {
 		SUBTEST(live_reset_whitelist),
+		SUBTEST(live_gpu_reset_gt_engine_workarounds),
+		SUBTEST(live_engine_reset_gt_engine_workarounds),
 	};
 	int err;
 
diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c
index fed11ec..a3d2ca0 100644
--- a/drivers/gpu/drm/radeon/radeon_vm.c
+++ b/drivers/gpu/drm/radeon/radeon_vm.c
@@ -946,7 +946,7 @@ int radeon_vm_bo_update(struct radeon_device *rdev,
 		bo_va->flags &= ~RADEON_VM_PAGE_WRITEABLE;
 
 	if (mem) {
-		addr = mem->start << PAGE_SHIFT;
+		addr = (u64)mem->start << PAGE_SHIFT;
 		if (mem->mem_type != TTM_PL_SYSTEM) {
 			bo_va->flags |= RADEON_VM_PAGE_VALID;
 		}
diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
index 18ebbb0..9d4cd19 100644
--- a/drivers/gpu/drm/scheduler/sched_main.c
+++ b/drivers/gpu/drm/scheduler/sched_main.c
@@ -60,6 +60,8 @@
 
 static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb);
 
+static void drm_sched_expel_job_unlocked(struct drm_sched_job *s_job);
+
 /**
  * drm_sched_rq_init - initialize a given run queue struct
  *
@@ -228,7 +230,7 @@ static void drm_sched_job_finish(struct work_struct *work)
 
 	spin_lock(&sched->job_list_lock);
 	/* remove job from ring_mirror_list */
-	list_del(&s_job->node);
+	list_del_init(&s_job->node);
 	/* queue TDR for next job */
 	drm_sched_start_timeout(sched);
 	spin_unlock(&sched->job_list_lock);
@@ -261,40 +263,15 @@ static void drm_sched_job_timedout(struct work_struct *work)
 {
 	struct drm_gpu_scheduler *sched;
 	struct drm_sched_job *job;
-	int r;
 
 	sched = container_of(work, struct drm_gpu_scheduler, work_tdr.work);
-
-	spin_lock(&sched->job_list_lock);
-	list_for_each_entry_reverse(job, &sched->ring_mirror_list, node) {
-		struct drm_sched_fence *fence = job->s_fence;
-
-		if (!dma_fence_remove_callback(fence->parent, &fence->cb))
-			goto already_signaled;
-	}
-
 	job = list_first_entry_or_null(&sched->ring_mirror_list,
 				       struct drm_sched_job, node);
-	spin_unlock(&sched->job_list_lock);
 
 	if (job)
-		sched->ops->timedout_job(job);
+		job->sched->ops->timedout_job(job);
 
 	spin_lock(&sched->job_list_lock);
-	list_for_each_entry(job, &sched->ring_mirror_list, node) {
-		struct drm_sched_fence *fence = job->s_fence;
-
-		if (!fence->parent || !list_empty(&fence->cb.node))
-			continue;
-
-		r = dma_fence_add_callback(fence->parent, &fence->cb,
-					   drm_sched_process_job);
-		if (r)
-			drm_sched_process_job(fence->parent, &fence->cb);
-
-already_signaled:
-		;
-	}
 	drm_sched_start_timeout(sched);
 	spin_unlock(&sched->job_list_lock);
 }
@@ -391,6 +368,8 @@ void drm_sched_job_recovery(struct drm_gpu_scheduler *sched)
 					  r);
 			dma_fence_put(fence);
 		} else {
+			if (s_fence->finished.error < 0)
+				drm_sched_expel_job_unlocked(s_job);
 			drm_sched_process_job(NULL, &s_fence->cb);
 		}
 		spin_lock(&sched->job_list_lock);
@@ -595,6 +574,8 @@ static int drm_sched_main(void *param)
 					  r);
 			dma_fence_put(fence);
 		} else {
+			if (s_fence->finished.error < 0)
+				drm_sched_expel_job_unlocked(sched_job);
 			drm_sched_process_job(NULL, &s_fence->cb);
 		}
 
@@ -603,6 +584,15 @@ static int drm_sched_main(void *param)
 	return 0;
 }
 
+static void drm_sched_expel_job_unlocked(struct drm_sched_job *s_job)
+{
+	struct drm_gpu_scheduler *sched = s_job->sched;
+
+	spin_lock(&sched->job_list_lock);
+	list_del_init(&s_job->node);
+	spin_unlock(&sched->job_list_lock);
+}
+
 /**
  * drm_sched_init - Init a gpu scheduler instance
  *
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index ba80150..895d77d 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -492,8 +492,10 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
 	if (!fbo)
 		return -ENOMEM;
 
-	ttm_bo_get(bo);
 	fbo->base = *bo;
+	fbo->base.mem.placement |= TTM_PL_FLAG_NO_EVICT;
+
+	ttm_bo_get(bo);
 	fbo->bo = bo;
 
 	/**
diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
index 0a7252a..bb71db6 100644
--- a/fs/sysfs/file.c
+++ b/fs/sysfs/file.c
@@ -334,7 +334,7 @@ int sysfs_create_file_ns(struct kobject *kobj, const struct attribute *attr,
 }
 EXPORT_SYMBOL_GPL(sysfs_create_file_ns);
 
-int sysfs_create_files(struct kobject *kobj, const struct attribute **ptr)
+int sysfs_create_files(struct kobject *kobj, const struct attribute * const *ptr)
 {
 	int err = 0;
 	int i;
@@ -493,7 +493,7 @@ bool sysfs_remove_file_self(struct kobject *kobj, const struct attribute *attr)
 	return ret;
 }
 
-void sysfs_remove_files(struct kobject *kobj, const struct attribute **ptr)
+void sysfs_remove_files(struct kobject *kobj, const struct attribute * const *ptr)
 {
 	int i;
 	for (i = 0; ptr[i]; i++)
diff --git a/include/drm/drm_connector.h b/include/drm/drm_connector.h
index 665b9ca..9be2181 100644
--- a/include/drm/drm_connector.h
+++ b/include/drm/drm_connector.h
@@ -972,6 +972,17 @@ struct drm_connector {
 	struct drm_property *scaling_mode_property;
 
 	/**
+	 * @vrr_capable_property: Optional property to help userspace
+	 * query hardware support for variable refresh rate on a connector.
+	 * connector. Drivers can add the property to a connector by
+	 * calling drm_connector_attach_vrr_capable_property().
+	 *
+	 * This should be updated only by calling
+	 * drm_connector_set_vrr_capable_property().
+	 */
+	struct drm_property *vrr_capable_property;
+
+	/**
 	 * @content_protection_property: DRM ENUM property for content
 	 * protection. See drm_connector_attach_content_protection_property().
 	 */
@@ -1245,6 +1256,8 @@ int drm_mode_create_scaling_mode_property(struct drm_device *dev);
 int drm_connector_attach_content_type_property(struct drm_connector *dev);
 int drm_connector_attach_scaling_mode_property(struct drm_connector *connector,
 					       u32 scaling_mode_mask);
+int drm_connector_attach_vrr_capable_property(
+		struct drm_connector *connector);
 int drm_connector_attach_content_protection_property(
 		struct drm_connector *connector);
 int drm_mode_create_aspect_ratio_property(struct drm_device *dev);
@@ -1261,6 +1274,8 @@ int drm_connector_update_edid_property(struct drm_connector *connector,
 				       const struct edid *edid);
 void drm_connector_set_link_status_property(struct drm_connector *connector,
 					    uint64_t link_status);
+void drm_connector_set_vrr_capable_property(
+		struct drm_connector *connector, bool capable);
 int drm_connector_init_panel_orientation_property(
 	struct drm_connector *connector, int width, int height);
 int drm_connector_attach_max_bpc_property(struct drm_connector *connector,
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index b21437b..39c3900 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -291,6 +291,15 @@ struct drm_crtc_state {
 	u32 pageflip_flags;
 
 	/**
+	 * @vrr_enabled:
+	 *
+	 * Indicates if variable refresh rate should be enabled for the CRTC.
+	 * Support for the requested vrr state will depend on driver and
+	 * hardware capabiltiy - lacking support is not treated as failure.
+	 */
+	bool vrr_enabled;
+
+	/**
 	 * @event:
 	 *
 	 * Optional pointer to a DRM event to signal upon completion of the
diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h
index 3314e91..5736c94 100644
--- a/include/drm/drm_dp_helper.h
+++ b/include/drm/drm_dp_helper.h
@@ -1123,7 +1123,8 @@ drm_dp_is_branch(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
 u8 drm_dp_dsc_sink_max_slice_count(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE],
 				   bool is_edp);
 u8 drm_dp_dsc_sink_line_buf_depth(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE]);
-u8 drm_dp_dsc_sink_max_color_depth(const u8 dsc_dpc[DP_DSC_RECEIVER_CAP_SIZE]);
+int drm_dp_dsc_sink_supported_input_bpcs(const u8 dsc_dpc[DP_DSC_RECEIVER_CAP_SIZE],
+					 u8 dsc_bpc[3]);
 
 static inline bool
 drm_dp_sink_supports_dsc(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE])
diff --git a/include/drm/drm_dsc.h b/include/drm/drm_dsc.h
new file mode 100644
index 0000000..d03f1b8
--- /dev/null
+++ b/include/drm/drm_dsc.h
@@ -0,0 +1,485 @@
+/* SPDX-License-Identifier: MIT
+ * Copyright (C) 2018 Intel Corp.
+ *
+ * Authors:
+ * Manasi Navare <manasi.d.navare@intel.com>
+ */
+
+#ifndef DRM_DSC_H_
+#define DRM_DSC_H_
+
+#include <drm/drm_dp_helper.h>
+
+/* VESA Display Stream Compression DSC 1.2 constants */
+#define DSC_NUM_BUF_RANGES			15
+#define DSC_MUX_WORD_SIZE_8_10_BPC		48
+#define DSC_MUX_WORD_SIZE_12_BPC		64
+#define DSC_RC_PIXELS_PER_GROUP			3
+#define DSC_SCALE_DECREMENT_INTERVAL_MAX	4095
+#define DSC_RANGE_BPG_OFFSET_MASK		0x3f
+
+/* DSC Rate Control Constants */
+#define DSC_RC_MODEL_SIZE_CONST		    8192
+#define DSC_RC_EDGE_FACTOR_CONST	    6
+#define DSC_RC_TGT_OFFSET_HI_CONST	    3
+#define DSC_RC_TGT_OFFSET_LO_CONST	    3
+
+/* DSC PPS constants and macros */
+#define DSC_PPS_VERSION_MAJOR_SHIFT		4
+#define DSC_PPS_BPC_SHIFT			4
+#define DSC_PPS_MSB_SHIFT			8
+#define DSC_PPS_LSB_MASK			(0xFF << 0)
+#define DSC_PPS_BPP_HIGH_MASK			(0x3 << 8)
+#define DSC_PPS_VBR_EN_SHIFT			2
+#define DSC_PPS_SIMPLE422_SHIFT			3
+#define DSC_PPS_CONVERT_RGB_SHIFT		4
+#define DSC_PPS_BLOCK_PRED_EN_SHIFT		5
+#define DSC_PPS_INIT_XMIT_DELAY_HIGH_MASK	(0x3 << 8)
+#define DSC_PPS_SCALE_DEC_INT_HIGH_MASK		(0xF << 8)
+#define DSC_PPS_RC_TGT_OFFSET_HI_SHIFT		4
+#define DSC_PPS_RC_RANGE_MINQP_SHIFT		11
+#define DSC_PPS_RC_RANGE_MAXQP_SHIFT		6
+#define DSC_PPS_NATIVE_420_SHIFT		1
+#define DSC_1_2_MAX_LINEBUF_DEPTH_BITS		16
+#define DSC_1_2_MAX_LINEBUF_DEPTH_VAL		0
+#define DSC_1_1_MAX_LINEBUF_DEPTH_BITS		13
+
+/* Configuration for a single Rate Control model range */
+struct drm_dsc_rc_range_parameters {
+	/* Min Quantization Parameters allowed for this range */
+	u8 range_min_qp;
+	/* Max Quantization Parameters allowed for this range */
+	u8 range_max_qp;
+	/* Bits/group offset to apply to target for this group */
+	u8 range_bpg_offset;
+};
+
+struct drm_dsc_config {
+	/* Bits / component for previous reconstructed line buffer */
+	u8 line_buf_depth;
+	/* Bits per component to code (must be 8, 10, or 12) */
+	u8 bits_per_component;
+	/*
+	 * Flag indicating to do RGB - YCoCg conversion
+	 * and back (should be 1 for RGB input)
+	 */
+	bool convert_rgb;
+	u8 slice_count;
+	/* Slice Width */
+	u16 slice_width;
+	/* Slice Height */
+	u16 slice_height;
+	/*
+	 * 4:2:2 enable mode (from PPS, 4:2:2 conversion happens
+	 * outside of DSC encode/decode algorithm)
+	 */
+	bool enable422;
+	/* Picture Width */
+	u16 pic_width;
+	/* Picture Height */
+	u16 pic_height;
+	/* Offset to bits/group used by RC to determine QP adjustment */
+	u8 rc_tgt_offset_high;
+	/* Offset to bits/group used by RC to determine QP adjustment */
+	u8 rc_tgt_offset_low;
+	/* Bits/pixel target << 4 (ie., 4 fractional bits) */
+	u16 bits_per_pixel;
+	/*
+	 * Factor to determine if an edge is present based
+	 * on the bits produced
+	 */
+	u8 rc_edge_factor;
+	/* Slow down incrementing once the range reaches this value */
+	u8 rc_quant_incr_limit1;
+	/* Slow down incrementing once the range reaches this value */
+	u8 rc_quant_incr_limit0;
+	/* Number of pixels to delay the initial transmission */
+	u16 initial_xmit_delay;
+	/* Number of pixels to delay the VLD on the decoder,not including SSM */
+	u16  initial_dec_delay;
+	/* Block prediction enable */
+	bool block_pred_enable;
+	/* Bits/group offset to use for first line of the slice */
+	u8 first_line_bpg_offset;
+	/* Value to use for RC model offset at slice start */
+	u16 initial_offset;
+	/* Thresholds defining each of the buffer ranges */
+	u16 rc_buf_thresh[DSC_NUM_BUF_RANGES - 1];
+	/* Parameters for each of the RC ranges */
+	struct drm_dsc_rc_range_parameters rc_range_params[DSC_NUM_BUF_RANGES];
+	/* Total size of RC model */
+	u16 rc_model_size;
+	/* Minimum QP where flatness information is sent */
+	u8 flatness_min_qp;
+	/* Maximum QP where flatness information is sent */
+	u8 flatness_max_qp;
+	/* Initial value for scale factor */
+	u8 initial_scale_value;
+	/* Decrement scale factor every scale_decrement_interval groups */
+	u16 scale_decrement_interval;
+	/* Increment scale factor every scale_increment_interval groups */
+	u16 scale_increment_interval;
+	/* Non-first line BPG offset to use */
+	u16 nfl_bpg_offset;
+	/* BPG offset used to enforce slice bit */
+	u16 slice_bpg_offset;
+	/* Final RC linear transformation offset value */
+	u16 final_offset;
+	/* Enable on-off VBR (ie., disable stuffing bits) */
+	bool vbr_enable;
+	/* Mux word size (in bits) for SSM mode */
+	u8 mux_word_size;
+	/*
+	 * The (max) size in bytes of the "chunks" that are
+	 * used in slice multiplexing
+	 */
+	u16 slice_chunk_size;
+	/* Rate Control buffer siz in bits */
+	u16 rc_bits;
+	/* DSC Minor Version */
+	u8 dsc_version_minor;
+	/* DSC Major version */
+	u8 dsc_version_major;
+	/* Native 4:2:2 support */
+	bool native_422;
+	/* Native 4:2:0 support */
+	bool native_420;
+	/* Additional bits/grp for seconnd line of slice for native 4:2:0 */
+	u8 second_line_bpg_offset;
+	/* Num of bits deallocated for each grp that is not in second line of slice */
+	u16 nsl_bpg_offset;
+	/* Offset adj fr second line in Native 4:2:0 mode */
+	u16 second_line_offset_adj;
+};
+
+/**
+ * struct picture_parameter_set - Represents 128 bytes of Picture Parameter Set
+ *
+ * The VESA DSC standard defines picture parameter set (PPS) which display
+ * stream compression encoders must communicate to decoders.
+ * The PPS is encapsulated in 128 bytes (PPS 0 through PPS 127). The fields in
+ * this structure are as per Table 4.1 in Vesa DSC specification v1.1/v1.2.
+ * The PPS fields that span over more than a byte should be stored in Big Endian
+ * format.
+ */
+struct drm_dsc_picture_parameter_set {
+	/**
+	 * @dsc_version:
+	 * PPS0[3:0] - dsc_version_minor: Contains Minor version of DSC
+	 * PPS0[7:4] - dsc_version_major: Contains major version of DSC
+	 */
+	u8 dsc_version;
+	/**
+	 * @pps_identifier:
+	 * PPS1[7:0] - Application specific identifier that can be
+	 * used to differentiate between different PPS tables.
+	 */
+	u8 pps_identifier;
+	/**
+	 * @pps_reserved:
+	 * PPS2[7:0]- RESERVED Byte
+	 */
+	u8 pps_reserved;
+	/**
+	 * @pps_3:
+	 * PPS3[3:0] - linebuf_depth: Contains linebuffer bit depth used to
+	 * generate the bitstream. (0x0 - 16 bits for DSC 1.2, 0x8 - 8 bits,
+	 * 0xA - 10 bits, 0xB - 11 bits, 0xC - 12 bits, 0xD - 13 bits,
+	 * 0xE - 14 bits for DSC1.2, 0xF - 14 bits for DSC 1.2.
+	 * PPS3[7:4] - bits_per_component: Bits per component for the original
+	 * pixels of the encoded picture.
+	 * 0x0 = 16bpc (allowed only when dsc_version_minor = 0x2)
+	 * 0x8 = 8bpc, 0xA = 10bpc, 0xC = 12bpc, 0xE = 14bpc (also
+	 * allowed only when dsc_minor_version = 0x2)
+	 */
+	u8 pps_3;
+	/**
+	 * @pps_4:
+	 * PPS4[1:0] -These are the most significant 2 bits of
+	 * compressed BPP bits_per_pixel[9:0] syntax element.
+	 * PPS4[2] - vbr_enable: 0 = VBR disabled, 1 = VBR enabled
+	 * PPS4[3] - simple_422: Indicates if decoder drops samples to
+	 * reconstruct the 4:2:2 picture.
+	 * PPS4[4] - Convert_rgb: Indicates if DSC color space conversion is
+	 * active.
+	 * PPS4[5] - blobk_pred_enable: Indicates if BP is used to code any
+	 * groups in picture
+	 * PPS4[7:6] - Reseved bits
+	 */
+	u8 pps_4;
+	/**
+	 * @bits_per_pixel_low:
+	 * PPS5[7:0] - This indicates the lower significant 8 bits of
+	 * the compressed BPP bits_per_pixel[9:0] element.
+	 */
+	u8 bits_per_pixel_low;
+	/**
+	 * @pic_height:
+	 * PPS6[7:0], PPS7[7:0] -pic_height: Specifies the number of pixel rows
+	 * within the raster.
+	 */
+	__be16 pic_height;
+	/**
+	 * @pic_width:
+	 * PPS8[7:0], PPS9[7:0] - pic_width: Number of pixel columns within
+	 * the raster.
+	 */
+	__be16 pic_width;
+	/**
+	 * @slice_height:
+	 * PPS10[7:0], PPS11[7:0] - Slice height in units of pixels.
+	 */
+	__be16 slice_height;
+	/**
+	 * @slice_width:
+	 * PPS12[7:0], PPS13[7:0] - Slice width in terms of pixels.
+	 */
+	__be16 slice_width;
+	/**
+	 * @chunk_size:
+	 * PPS14[7:0], PPS15[7:0] - Size in units of bytes of the chunks
+	 * that are used for slice multiplexing.
+	 */
+	__be16 chunk_size;
+	/**
+	 * @initial_xmit_delay_high:
+	 * PPS16[1:0] - Most Significant two bits of initial transmission delay.
+	 * It specifies the number of pixel times that the encoder waits before
+	 * transmitting data from its rate buffer.
+	 * PPS16[7:2] - Reserved
+	 */
+	u8 initial_xmit_delay_high;
+	/**
+	 * @initial_xmit_delay_low:
+	 * PPS17[7:0] - Least significant 8 bits of initial transmission delay.
+	 */
+	u8 initial_xmit_delay_low;
+	/**
+	 * @initial_dec_delay:
+	 *
+	 * PPS18[7:0], PPS19[7:0] - Initial decoding delay which is the number
+	 * of pixel times that the decoder accumulates data in its rate buffer
+	 * before starting to decode and output pixels.
+	 */
+	__be16 initial_dec_delay;
+	/**
+	 * @pps20_reserved:
+	 *
+	 * PPS20[7:0] - Reserved
+	 */
+	u8 pps20_reserved;
+	/**
+	 * @initial_scale_value:
+	 * PPS21[5:0] - Initial rcXformScale factor used at beginning
+	 * of a slice.
+	 * PPS21[7:6] - Reserved
+	 */
+	u8 initial_scale_value;
+	/**
+	 * @scale_increment_interval:
+	 * PPS22[7:0], PPS23[7:0] - Number of group times between incrementing
+	 * the rcXformScale factor at end of a slice.
+	 */
+	__be16 scale_increment_interval;
+	/**
+	 * @scale_decrement_interval_high:
+	 * PPS24[3:0] - Higher 4 bits indicating number of group times between
+	 * decrementing the rcXformScale factor at beginning of a slice.
+	 * PPS24[7:4] - Reserved
+	 */
+	u8 scale_decrement_interval_high;
+	/**
+	 * @scale_decrement_interval_low:
+	 * PPS25[7:0] - Lower 8 bits of scale decrement interval
+	 */
+	u8 scale_decrement_interval_low;
+	/**
+	 * @pps26_reserved:
+	 * PPS26[7:0]
+	 */
+	u8 pps26_reserved;
+	/**
+	 * @first_line_bpg_offset:
+	 * PPS27[4:0] - Number of additional bits that are allocated
+	 * for each group on first line of a slice.
+	 * PPS27[7:5] - Reserved
+	 */
+	u8 first_line_bpg_offset;
+	/**
+	 * @nfl_bpg_offset:
+	 * PPS28[7:0], PPS29[7:0] - Number of bits including frac bits
+	 * deallocated for each group for groups after the first line of slice.
+	 */
+	__be16 nfl_bpg_offset;
+	/**
+	 * @slice_bpg_offset:
+	 * PPS30, PPS31[7:0] - Number of bits that are deallocated for each
+	 * group to enforce the slice constraint.
+	 */
+	__be16 slice_bpg_offset;
+	/**
+	 * @initial_offset:
+	 * PPS32,33[7:0] - Initial value for rcXformOffset
+	 */
+	__be16 initial_offset;
+	/**
+	 * @final_offset:
+	 * PPS34,35[7:0] - Maximum end-of-slice value for rcXformOffset
+	 */
+	__be16 final_offset;
+	/**
+	 * @flatness_min_qp:
+	 * PPS36[4:0] - Minimum QP at which flatness is signaled and
+	 * flatness QP adjustment is made.
+	 * PPS36[7:5] - Reserved
+	 */
+	u8 flatness_min_qp;
+	/**
+	 * @flatness_max_qp:
+	 * PPS37[4:0] - Max QP at which flatness is signalled and
+	 * the flatness adjustment is made.
+	 * PPS37[7:5] - Reserved
+	 */
+	u8 flatness_max_qp;
+	/**
+	 * @rc_model_size:
+	 * PPS38,39[7:0] - Number of bits within RC Model.
+	 */
+	__be16 rc_model_size;
+	/**
+	 * @rc_edge_factor:
+	 * PPS40[3:0] - Ratio of current activity vs, previous
+	 * activity to determine presence of edge.
+	 * PPS40[7:4] - Reserved
+	 */
+	u8 rc_edge_factor;
+	/**
+	 * @rc_quant_incr_limit0:
+	 * PPS41[4:0] - QP threshold used in short term RC
+	 * PPS41[7:5] - Reserved
+	 */
+	u8 rc_quant_incr_limit0;
+	/**
+	 * @rc_quant_incr_limit1:
+	 * PPS42[4:0] - QP threshold used in short term RC
+	 * PPS42[7:5] - Reserved
+	 */
+	u8 rc_quant_incr_limit1;
+	/**
+	 * @rc_tgt_offset:
+	 * PPS43[3:0] - Lower end of the variability range around the target
+	 * bits per group that is allowed by short term RC.
+	 * PPS43[7:4]- Upper end of the variability range around the target
+	 * bits per group that i allowed by short term rc.
+	 */
+	u8 rc_tgt_offset;
+	/**
+	 * @rc_buf_thresh:
+	 * PPS44[7:0] - PPS57[7:0] - Specifies the thresholds in RC model for
+	 * the 15 ranges defined by 14 thresholds.
+	 */
+	u8 rc_buf_thresh[DSC_NUM_BUF_RANGES - 1];
+	/**
+	 * @rc_range_parameters:
+	 * PPS58[7:0] - PPS87[7:0]
+	 * Parameters that correspond to each of the 15 ranges.
+	 */
+	__be16 rc_range_parameters[DSC_NUM_BUF_RANGES];
+	/**
+	 * @native_422_420:
+	 * PPS88[0] - 0 = Native 4:2:2 not used
+	 * 1 = Native 4:2:2 used
+	 * PPS88[1] - 0 = Native 4:2:0 not use
+	 * 1 = Native 4:2:0 used
+	 * PPS88[7:2] - Reserved 6 bits
+	 */
+	u8 native_422_420;
+	/**
+	 * @second_line_bpg_offset:
+	 * PPS89[4:0] - Additional bits/group budget for the
+	 * second line of a slice in Native 4:2:0 mode.
+	 * Set to 0 if DSC minor version is 1 or native420 is 0.
+	 * PPS89[7:5] - Reserved
+	 */
+	u8 second_line_bpg_offset;
+	/**
+	 * @nsl_bpg_offset:
+	 * PPS90[7:0], PPS91[7:0] - Number of bits that are deallocated
+	 * for each group that is not in the second line of a slice.
+	 */
+	__be16 nsl_bpg_offset;
+	/**
+	 * @second_line_offset_adj:
+	 * PPS92[7:0], PPS93[7:0] - Used as offset adjustment for the second
+	 * line in Native 4:2:0 mode.
+	 */
+	__be16 second_line_offset_adj;
+	/**
+	 * @pps_long_94_reserved:
+	 * PPS 94, 95, 96, 97 - Reserved
+	 */
+	u32 pps_long_94_reserved;
+	/**
+	 * @pps_long_98_reserved:
+	 * PPS 98, 99, 100, 101 - Reserved
+	 */
+	u32 pps_long_98_reserved;
+	/**
+	 * @pps_long_102_reserved:
+	 * PPS 102, 103, 104, 105 - Reserved
+	 */
+	u32 pps_long_102_reserved;
+	/**
+	 * @pps_long_106_reserved:
+	 * PPS 106, 107, 108, 109 - reserved
+	 */
+	u32 pps_long_106_reserved;
+	/**
+	 * @pps_long_110_reserved:
+	 * PPS 110, 111, 112, 113 - reserved
+	 */
+	u32 pps_long_110_reserved;
+	/**
+	 * @pps_long_114_reserved:
+	 * PPS 114 - 117 - reserved
+	 */
+	u32 pps_long_114_reserved;
+	/**
+	 * @pps_long_118_reserved:
+	 * PPS 118 - 121 - reserved
+	 */
+	u32 pps_long_118_reserved;
+	/**
+	 * @pps_long_122_reserved:
+	 * PPS 122- 125 - reserved
+	 */
+	u32 pps_long_122_reserved;
+	/**
+	 * @pps_short_126_reserved:
+	 * PPS 126, 127 - reserved
+	 */
+	__be16 pps_short_126_reserved;
+} __packed;
+
+/**
+ * struct drm_dsc_pps_infoframe - DSC infoframe carrying the Picture Parameter
+ * Set Metadata
+ *
+ * This structure represents the DSC PPS infoframe required to send the Picture
+ * Parameter Set metadata required before enabling VESA Display Stream
+ * Compression. This is based on the DP Secondary Data Packet structure and
+ * comprises of SDP Header as defined in drm_dp_helper.h and PPS payload.
+ *
+ * @pps_header: Header for PPS as per DP SDP header format
+ * @pps_payload: PPS payload fields as per DSC specification Table 4-1
+ */
+struct drm_dsc_pps_infoframe {
+	struct dp_sdp_header pps_header;
+	struct drm_dsc_picture_parameter_set pps_payload;
+} __packed;
+
+void drm_dsc_dp_pps_header_init(struct drm_dsc_pps_infoframe *pps_sdp);
+void drm_dsc_pps_infoframe_pack(struct drm_dsc_pps_infoframe *pps_sdp,
+				const struct drm_dsc_config *dsc_cfg);
+
+#endif /* _DRM_DSC_H_ */
diff --git a/include/drm/drm_mode_config.h b/include/drm/drm_mode_config.h
index 862767f..572274c 100644
--- a/include/drm/drm_mode_config.h
+++ b/include/drm/drm_mode_config.h
@@ -654,6 +654,11 @@ struct drm_mode_config {
 	 * connectors must be of and active must be set to disabled, too.
 	 */
 	struct drm_property *prop_mode_id;
+	/**
+	 * @prop_vrr_enabled: Default atomic CRTC property to indicate
+	 * whether variable refresh rate should be enabled on the CRTC.
+	 */
+	struct drm_property *prop_vrr_enabled;
 
 	/**
 	 * @dvi_i_subconnector_property: Optional DVI-I property to
diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h
index 987cefa..786816c 100644
--- a/include/linux/sysfs.h
+++ b/include/linux/sysfs.h
@@ -234,7 +234,7 @@ int __must_check sysfs_create_file_ns(struct kobject *kobj,
 				      const struct attribute *attr,
 				      const void *ns);
 int __must_check sysfs_create_files(struct kobject *kobj,
-				   const struct attribute **attr);
+				   const struct attribute * const *attr);
 int __must_check sysfs_chmod_file(struct kobject *kobj,
 				  const struct attribute *attr, umode_t mode);
 struct kernfs_node *sysfs_break_active_protection(struct kobject *kobj,
@@ -243,7 +243,7 @@ void sysfs_unbreak_active_protection(struct kernfs_node *kn);
 void sysfs_remove_file_ns(struct kobject *kobj, const struct attribute *attr,
 			  const void *ns);
 bool sysfs_remove_file_self(struct kobject *kobj, const struct attribute *attr);
-void sysfs_remove_files(struct kobject *kobj, const struct attribute **attr);
+void sysfs_remove_files(struct kobject *kobj, const struct attribute * const *attr);
 
 int __must_check sysfs_create_bin_file(struct kobject *kobj,
 				       const struct bin_attribute *attr);
@@ -342,7 +342,7 @@ static inline int sysfs_create_file_ns(struct kobject *kobj,
 }
 
 static inline int sysfs_create_files(struct kobject *kobj,
-				    const struct attribute **attr)
+				    const struct attribute * const *attr)
 {
 	return 0;
 }
@@ -377,7 +377,7 @@ static inline bool sysfs_remove_file_self(struct kobject *kobj,
 }
 
 static inline void sysfs_remove_files(struct kobject *kobj,
-				     const struct attribute **attr)
+				     const struct attribute * const *attr)
 {
 }