diff --git a/drivers/dma-buf/udmabuf.c b/drivers/dma-buf/udmabuf.c
index cd57747286f286d44dcc57f1829bcee64214a9f2..9635897458a09e660a07ef29064b6a1fb6995bbc 100644
--- a/drivers/dma-buf/udmabuf.c
+++ b/drivers/dma-buf/udmabuf.c
@@ -77,6 +77,7 @@ static void unmap_udmabuf(struct dma_buf_attachment *at,
 			  struct sg_table *sg,
 			  enum dma_data_direction direction)
 {
+	dma_unmap_sg(at->dev, sg->sgl, sg->nents, direction);
 	sg_free_table(sg);
 	kfree(sg);
 }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index cc8ad3831982d5e2e4dfa60ec76581fb9a3d777c..f4ac632a87b278d09a462bdfe4f145934f249884 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -1589,6 +1589,7 @@ static int amdgpu_device_fw_loading(struct amdgpu_device *adev)
 {
 	int r = 0;
 	int i;
+	uint32_t smu_version;
 
 	if (adev->asic_type >= CHIP_VEGA10) {
 		for (i = 0; i < adev->num_ip_blocks; i++) {
@@ -1614,16 +1615,9 @@ static int amdgpu_device_fw_loading(struct amdgpu_device *adev)
 			}
 		}
 	}
+	r = amdgpu_pm_load_smu_firmware(adev, &smu_version);
 
-	if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->load_firmware) {
-		r = adev->powerplay.pp_funcs->load_firmware(adev->powerplay.pp_handle);
-		if (r) {
-			pr_err("firmware loading failed\n");
-			return r;
-		}
-	}
-
-	return 0;
+	return r;
 }
 
 /**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index 34471dbaa872ad9d7b18c669a95c2fcd758def83..039cfa2ec89d9d9ada32d9a97731ac6204e8de95 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -2490,6 +2490,21 @@ void amdgpu_pm_print_power_states(struct amdgpu_device *adev)
 
 }
 
+int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_version)
+{
+	int r = -EINVAL;
+
+	if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->load_firmware) {
+		r = adev->powerplay.pp_funcs->load_firmware(adev->powerplay.pp_handle);
+		if (r) {
+			pr_err("smu firmware loading failed\n");
+			return r;
+		}
+		*smu_version = adev->pm.fw_version;
+	}
+	return r;
+}
+
 int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
 {
 	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.h
index f21a7716b90e67b7cdd184046a107153bc49d19a..7ff0e7621fffb3ea1ab2f93ddaa94ea0578bb9c8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.h
@@ -34,6 +34,7 @@ void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev);
 int amdgpu_pm_sysfs_init(struct amdgpu_device *adev);
 void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev);
 void amdgpu_pm_print_power_states(struct amdgpu_device *adev);
+int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_version);
 void amdgpu_pm_compute_clocks(struct amdgpu_device *adev);
 void amdgpu_dpm_thermal_work_handler(struct work_struct *work);
 void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index c021b114c8a44ec04c6ca3bccc4479c141218232..f7189e22f6b7081ae45762e904a72343e09fd534 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -1072,7 +1072,7 @@ void amdgpu_vce_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
 int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring)
 {
 	struct amdgpu_device *adev = ring->adev;
-	uint32_t rptr = amdgpu_ring_get_rptr(ring);
+	uint32_t rptr;
 	unsigned i;
 	int r, timeout = adev->usec_timeout;
 
@@ -1084,6 +1084,8 @@ int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring)
 	if (r)
 		return r;
 
+	rptr = amdgpu_ring_get_rptr(ring);
+
 	amdgpu_ring_write(ring, VCE_CMD_END);
 	amdgpu_ring_commit(ring);
 
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index ba67d10232643cb9963c2b58954fd00cbaaa67ae..b610e3b30d95a2a0d2b03e68ff8ef0e15ef13ae2 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -28,6 +28,7 @@
 #include "soc15.h"
 #include "soc15d.h"
 #include "amdgpu_atomfirmware.h"
+#include "amdgpu_pm.h"
 
 #include "gc/gc_9_0_offset.h"
 #include "gc/gc_9_0_sh_mask.h"
@@ -96,6 +97,7 @@ MODULE_FIRMWARE("amdgpu/raven2_me.bin");
 MODULE_FIRMWARE("amdgpu/raven2_mec.bin");
 MODULE_FIRMWARE("amdgpu/raven2_mec2.bin");
 MODULE_FIRMWARE("amdgpu/raven2_rlc.bin");
+MODULE_FIRMWARE("amdgpu/raven_kicker_rlc.bin");
 
 static const struct soc15_reg_golden golden_settings_gc_9_0[] =
 {
@@ -588,7 +590,8 @@ static void gfx_v9_0_check_if_need_gfxoff(struct amdgpu_device *adev)
 	case CHIP_RAVEN:
 		if (adev->rev_id >= 0x8 || adev->pdev->device == 0x15d8)
 			break;
-		if ((adev->gfx.rlc_fw_version < 531) ||
+		if ((adev->gfx.rlc_fw_version != 106 &&
+		     adev->gfx.rlc_fw_version < 531) ||
 		    (adev->gfx.rlc_fw_version == 53815) ||
 		    (adev->gfx.rlc_feature_version < 1) ||
 		    !adev->gfx.rlc.is_rlc_v2_1)
@@ -612,6 +615,7 @@ static int gfx_v9_0_init_microcode(struct amdgpu_device *adev)
 	unsigned int i = 0;
 	uint16_t version_major;
 	uint16_t version_minor;
+	uint32_t smu_version;
 
 	DRM_DEBUG("\n");
 
@@ -682,6 +686,12 @@ static int gfx_v9_0_init_microcode(struct amdgpu_device *adev)
 		(((adev->pdev->revision >= 0xC8) && (adev->pdev->revision <= 0xCF)) ||
 		((adev->pdev->revision >= 0xD8) && (adev->pdev->revision <= 0xDF))))
 		snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc_am4.bin", chip_name);
+	else if (!strcmp(chip_name, "raven") && (amdgpu_pm_load_smu_firmware(adev, &smu_version) == 0) &&
+		(smu_version >= 0x41e2b))
+		/**
+		*SMC is loaded by SBIOS on APU and it's able to get the SMU version directly.
+		*/
+		snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_kicker_rlc.bin", chip_name);
 	else
 		snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", chip_name);
 	err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index bcb1a93c0b4c5c9ee868f2dcdbffa8d72a1f5d94..ab7c5c3004eee3af477d2bcfad60f3cf2bda9887 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -4232,8 +4232,7 @@ static void dm_plane_atomic_async_update(struct drm_plane *plane,
 	struct drm_plane_state *old_state =
 		drm_atomic_get_old_plane_state(new_state->state, plane);
 
-	if (plane->state->fb != new_state->fb)
-		drm_atomic_set_fb_for_plane(plane->state, new_state->fb);
+	swap(plane->state->fb, new_state->fb);
 
 	plane->state->src_x = new_state->src_x;
 	plane->state->src_y = new_state->src_y;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
index 6cd6497c6fc21d3e935c4274de9d31048de2ee9f..f1d326caf69e1b6378cb89ae701758f8bede5c0a 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
@@ -92,6 +92,7 @@ int hwmgr_early_init(struct pp_hwmgr *hwmgr)
 	hwmgr_set_user_specify_caps(hwmgr);
 	hwmgr->fan_ctrl_is_in_default_mode = true;
 	hwmgr_init_workload_prority(hwmgr);
+	hwmgr->gfxoff_state_changed_by_workload = false;
 
 	switch (hwmgr->chip_family) {
 	case AMDGPU_FAMILY_CI:
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
index 9a595f7525e6b61fa81100e66bfe1538c4d45622..e32ae9d3373ca3e45fcea4a793be0d3951adf2a5 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
@@ -1258,21 +1258,46 @@ static int smu10_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf)
 	return size;
 }
 
+static bool smu10_is_raven1_refresh(struct pp_hwmgr *hwmgr)
+{
+	struct amdgpu_device *adev = hwmgr->adev;
+	if ((adev->asic_type == CHIP_RAVEN) &&
+	    (adev->rev_id != 0x15d8) &&
+	    (hwmgr->smu_version >= 0x41e2b))
+		return true;
+	else
+		return false;
+}
+
 static int smu10_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, uint32_t size)
 {
 	int workload_type = 0;
+	int result = 0;
 
 	if (input[size] > PP_SMC_POWER_PROFILE_COMPUTE) {
 		pr_err("Invalid power profile mode %ld\n", input[size]);
 		return -EINVAL;
 	}
-	hwmgr->power_profile_mode = input[size];
+	if (hwmgr->power_profile_mode == input[size])
+		return 0;
 
 	/* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
 	workload_type =
-		conv_power_profile_to_pplib_workload(hwmgr->power_profile_mode);
-	smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ActiveProcessNotify,
+		conv_power_profile_to_pplib_workload(input[size]);
+	if (workload_type &&
+	    smu10_is_raven1_refresh(hwmgr) &&
+	    !hwmgr->gfxoff_state_changed_by_workload) {
+		smu10_gfx_off_control(hwmgr, false);
+		hwmgr->gfxoff_state_changed_by_workload = true;
+	}
+	result = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ActiveProcessNotify,
 						1 << workload_type);
+	if (!result)
+		hwmgr->power_profile_mode = input[size];
+	if (workload_type && hwmgr->gfxoff_state_changed_by_workload) {
+		smu10_gfx_off_control(hwmgr, true);
+		hwmgr->gfxoff_state_changed_by_workload = false;
+	}
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
index bac3d85e3b82ce02a5ee91ab52c5064c04f8273a..c92999aac07c9984d39a8314be242d977606b439 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
@@ -782,6 +782,7 @@ struct pp_hwmgr {
 	uint32_t workload_mask;
 	uint32_t workload_prority[Workload_Policy_Max];
 	uint32_t workload_setting[Workload_Policy_Max];
+	bool gfxoff_state_changed_by_workload;
 };
 
 int hwmgr_early_init(struct pp_hwmgr *hwmgr);
diff --git a/drivers/gpu/drm/arm/display/komeda/d71/d71_component.c b/drivers/gpu/drm/arm/display/komeda/d71/d71_component.c
index 031e5f305a3c2d77a47ae175e0d24c12f86b39ea..6bab816ed8e73257daf72eacf3c3f798943074b9 100644
--- a/drivers/gpu/drm/arm/display/komeda/d71/d71_component.c
+++ b/drivers/gpu/drm/arm/display/komeda/d71/d71_component.c
@@ -245,7 +245,7 @@ static void d71_layer_dump(struct komeda_component *c, struct seq_file *sf)
 	seq_printf(sf, "%sAD_V_CROP:\t\t0x%X\n", prefix, v[2]);
 }
 
-static struct komeda_component_funcs d71_layer_funcs = {
+static const struct komeda_component_funcs d71_layer_funcs = {
 	.update		= d71_layer_update,
 	.disable	= d71_layer_disable,
 	.dump_register	= d71_layer_dump,
@@ -391,7 +391,7 @@ static void d71_compiz_dump(struct komeda_component *c, struct seq_file *sf)
 	seq_printf(sf, "CU_USER_HIGH:\t\t0x%X\n", v[1]);
 }
 
-static struct komeda_component_funcs d71_compiz_funcs = {
+static const struct komeda_component_funcs d71_compiz_funcs = {
 	.update		= d71_compiz_update,
 	.disable	= d71_component_disable,
 	.dump_register	= d71_compiz_dump,
@@ -467,7 +467,7 @@ static void d71_improc_dump(struct komeda_component *c, struct seq_file *sf)
 		seq_printf(sf, "IPS_RGB_YUV_COEFF%u:\t0x%X\n", i, v[i]);
 }
 
-static struct komeda_component_funcs d71_improc_funcs = {
+static const struct komeda_component_funcs d71_improc_funcs = {
 	.update		= d71_improc_update,
 	.disable	= d71_component_disable,
 	.dump_register	= d71_improc_dump,
@@ -580,7 +580,7 @@ static void d71_timing_ctrlr_dump(struct komeda_component *c,
 	seq_printf(sf, "BS_USER:\t\t0x%X\n", v[4]);
 }
 
-static struct komeda_component_funcs d71_timing_ctrlr_funcs = {
+static const struct komeda_component_funcs d71_timing_ctrlr_funcs = {
 	.update		= d71_timing_ctrlr_update,
 	.disable	= d71_timing_ctrlr_disable,
 	.dump_register	= d71_timing_ctrlr_dump,
diff --git a/drivers/gpu/drm/arm/display/komeda/d71/d71_dev.c b/drivers/gpu/drm/arm/display/komeda/d71/d71_dev.c
index 34506ef7ad40695e41b02b66e3a185130e912ff6..3a7248d42376c8ef9a5aaf0d088eafd4db69cbbd 100644
--- a/drivers/gpu/drm/arm/display/komeda/d71/d71_dev.c
+++ b/drivers/gpu/drm/arm/display/komeda/d71/d71_dev.c
@@ -502,7 +502,7 @@ static void d71_init_fmt_tbl(struct komeda_dev *mdev)
 	table->n_formats = ARRAY_SIZE(d71_format_caps_table);
 }
 
-static struct komeda_dev_funcs d71_chip_funcs = {
+static const struct komeda_dev_funcs d71_chip_funcs = {
 	.init_format_table = d71_init_fmt_tbl,
 	.enum_resources	= d71_enum_resources,
 	.cleanup	= d71_cleanup,
@@ -514,7 +514,7 @@ static struct komeda_dev_funcs d71_chip_funcs = {
 	.flush		= d71_flush,
 };
 
-struct komeda_dev_funcs *
+const struct komeda_dev_funcs *
 d71_identify(u32 __iomem *reg_base, struct komeda_chip_info *chip)
 {
 	chip->arch_id	= malidp_read32(reg_base, GLB_ARCH_ID);
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c b/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c
index 62fad59f5a6a1a82735f5f3df782a03a6719748e..284ce079d8c49d26cc0aa97d10d1ff2b6553f845 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c
@@ -350,7 +350,7 @@ static bool komeda_crtc_mode_fixup(struct drm_crtc *crtc,
 	return true;
 }
 
-static struct drm_crtc_helper_funcs komeda_crtc_helper_funcs = {
+static const struct drm_crtc_helper_funcs komeda_crtc_helper_funcs = {
 	.atomic_check	= komeda_crtc_atomic_check,
 	.atomic_flush	= komeda_crtc_atomic_flush,
 	.atomic_enable	= komeda_crtc_atomic_enable,
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_dev.c b/drivers/gpu/drm/arm/display/komeda/komeda_dev.c
index ca3599e4a4d3013d2462abe2206d136e0408cb6d..b67030a9f05684d120b9458a22a75cd8a322be49 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_dev.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_dev.c
@@ -8,6 +8,7 @@
 #include <linux/of_device.h>
 #include <linux/of_graph.h>
 #include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
 #ifdef CONFIG_DEBUG_FS
 #include <linux/debugfs.h>
 #include <linux/seq_file.h>
@@ -249,6 +250,9 @@ struct komeda_dev *komeda_dev_create(struct device *dev)
 		goto err_cleanup;
 	}
 
+	dev->dma_parms = &mdev->dma_parms;
+	dma_set_max_seg_size(dev, DMA_BIT_MASK(32));
+
 	err = sysfs_create_group(&dev->kobj, &komeda_sysfs_attr_group);
 	if (err) {
 		DRM_ERROR("create sysfs group failed.\n");
@@ -269,7 +273,7 @@ struct komeda_dev *komeda_dev_create(struct device *dev)
 void komeda_dev_destroy(struct komeda_dev *mdev)
 {
 	struct device *dev = mdev->dev;
-	struct komeda_dev_funcs *funcs = mdev->funcs;
+	const struct komeda_dev_funcs *funcs = mdev->funcs;
 	int i;
 
 	sysfs_remove_group(&dev->kobj, &komeda_sysfs_attr_group);
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_dev.h b/drivers/gpu/drm/arm/display/komeda/komeda_dev.h
index 29e03c4e1ffc6b6bdc16b8238c1bbc4fc089d269..973fd5e0eb98ef5fbd897a8a398410673d8b345a 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_dev.h
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_dev.h
@@ -60,7 +60,7 @@ struct komeda_chip_info {
 
 struct komeda_product_data {
 	u32 product_id;
-	struct komeda_dev_funcs *(*identify)(u32 __iomem *reg,
+	const struct komeda_dev_funcs *(*identify)(u32 __iomem *reg,
 					     struct komeda_chip_info *info);
 };
 
@@ -149,6 +149,8 @@ struct komeda_dev {
 	struct device *dev;
 	/** @reg_base: the base address of komeda io space */
 	u32 __iomem   *reg_base;
+	/** @dma_parms: the dma parameters of komeda */
+	struct device_dma_parameters dma_parms;
 
 	/** @chip: the basic chip information */
 	struct komeda_chip_info chip;
@@ -173,7 +175,7 @@ struct komeda_dev {
 	struct komeda_pipeline *pipelines[KOMEDA_MAX_PIPELINES];
 
 	/** @funcs: chip funcs to access to HW */
-	struct komeda_dev_funcs *funcs;
+	const struct komeda_dev_funcs *funcs;
 	/**
 	 * @chip_data:
 	 *
@@ -192,7 +194,7 @@ komeda_product_match(struct komeda_dev *mdev, u32 target)
 	return MALIDP_CORE_ID_PRODUCT_ID(mdev->chip.core_id) == target;
 }
 
-struct komeda_dev_funcs *
+const struct komeda_dev_funcs *
 d71_identify(u32 __iomem *reg, struct komeda_chip_info *chip);
 
 struct komeda_dev *komeda_dev_create(struct device *dev);
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c
index c379439c61941990b6e99738128f3f45daef2e7f..a130b62fa6d1ac485d892cb9a05fd658a4b1e589 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c
@@ -12,7 +12,7 @@
 /** komeda_pipeline_add - Add a pipeline to &komeda_dev */
 struct komeda_pipeline *
 komeda_pipeline_add(struct komeda_dev *mdev, size_t size,
-		    struct komeda_pipeline_funcs *funcs)
+		    const struct komeda_pipeline_funcs *funcs)
 {
 	struct komeda_pipeline *pipe;
 
@@ -130,7 +130,7 @@ komeda_pipeline_get_component(struct komeda_pipeline *pipe, int id)
 struct komeda_component *
 komeda_component_add(struct komeda_pipeline *pipe,
 		     size_t comp_sz, u32 id, u32 hw_id,
-		     struct komeda_component_funcs *funcs,
+		     const struct komeda_component_funcs *funcs,
 		     u8 max_active_inputs, u32 supported_inputs,
 		     u8 max_active_outputs, u32 __iomem *reg,
 		     const char *name_fmt, ...)
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.h b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.h
index b1f813a349a49ad060bd052fb2cd0e7fbd888b27..bae8a32b81a62ca174ebefe8a91f010d257da144 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.h
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.h
@@ -124,7 +124,7 @@ struct komeda_component {
 	/**
 	 * @funcs: chip functions to access HW
 	 */
-	struct komeda_component_funcs *funcs;
+	const struct komeda_component_funcs *funcs;
 };
 
 /**
@@ -346,8 +346,8 @@ struct komeda_pipeline {
 	struct komeda_improc *improc;
 	/** @ctrlr: timing controller */
 	struct komeda_timing_ctrlr *ctrlr;
-	/** @funcs: chip pipeline functions */
-	struct komeda_pipeline_funcs *funcs; /* private pipeline functions */
+	/** @funcs: chip private pipeline functions */
+	const struct komeda_pipeline_funcs *funcs;
 
 	/** @of_node: pipeline dt node */
 	struct device_node *of_node;
@@ -397,7 +397,7 @@ struct komeda_pipeline_state {
 /* pipeline APIs */
 struct komeda_pipeline *
 komeda_pipeline_add(struct komeda_dev *mdev, size_t size,
-		    struct komeda_pipeline_funcs *funcs);
+		    const struct komeda_pipeline_funcs *funcs);
 void komeda_pipeline_destroy(struct komeda_dev *mdev,
 			     struct komeda_pipeline *pipe);
 int komeda_assemble_pipelines(struct komeda_dev *mdev);
@@ -411,7 +411,7 @@ void komeda_pipeline_dump_register(struct komeda_pipeline *pipe,
 struct komeda_component *
 komeda_component_add(struct komeda_pipeline *pipe,
 		     size_t comp_sz, u32 id, u32 hw_id,
-		     struct komeda_component_funcs *funcs,
+		     const struct komeda_component_funcs *funcs,
 		     u8 max_active_inputs, u32 supported_inputs,
 		     u8 max_active_outputs, u32 __iomem *reg,
 		     const char *name_fmt, ...);
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_plane.c b/drivers/gpu/drm/arm/display/komeda/komeda_plane.c
index 07ed0cc1bc4459f1e433ab467365f936c8358635..c97062bdd69b1b3a46c5298ce3402ed2e589af82 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_plane.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_plane.c
@@ -55,7 +55,6 @@ komeda_plane_atomic_check(struct drm_plane *plane,
 	struct komeda_plane_state *kplane_st = to_kplane_st(state);
 	struct komeda_layer *layer = kplane->layer;
 	struct drm_crtc_state *crtc_st;
-	struct komeda_crtc *kcrtc;
 	struct komeda_crtc_state *kcrtc_st;
 	struct komeda_data_flow_cfg dflow;
 	int err;
@@ -64,7 +63,7 @@ komeda_plane_atomic_check(struct drm_plane *plane,
 		return 0;
 
 	crtc_st = drm_atomic_get_crtc_state(state->state, state->crtc);
-	if (!crtc_st->enable) {
+	if (IS_ERR(crtc_st) || !crtc_st->enable) {
 		DRM_DEBUG_ATOMIC("Cannot update plane on a disabled CRTC.\n");
 		return -EINVAL;
 	}
@@ -73,7 +72,6 @@ komeda_plane_atomic_check(struct drm_plane *plane,
 	if (!crtc_st->active)
 		return 0;
 
-	kcrtc = to_kcrtc(state->crtc);
 	kcrtc_st = to_kcrtc_st(crtc_st);
 
 	err = komeda_plane_init_data_flow(state, &dflow);
diff --git a/drivers/gpu/drm/arm/hdlcd_crtc.c b/drivers/gpu/drm/arm/hdlcd_crtc.c
index 0b2b62f8fa3c43a3508b23d6e7c2ac07262d5ff3..a3efa28436ea98d63c0a6781a965bb787b4eaa87 100644
--- a/drivers/gpu/drm/arm/hdlcd_crtc.c
+++ b/drivers/gpu/drm/arm/hdlcd_crtc.c
@@ -186,20 +186,20 @@ static void hdlcd_crtc_atomic_disable(struct drm_crtc *crtc,
 	clk_disable_unprepare(hdlcd->clk);
 }
 
-static int hdlcd_crtc_atomic_check(struct drm_crtc *crtc,
-				   struct drm_crtc_state *state)
+static enum drm_mode_status hdlcd_crtc_mode_valid(struct drm_crtc *crtc,
+		const struct drm_display_mode *mode)
 {
 	struct hdlcd_drm_private *hdlcd = crtc_to_hdlcd_priv(crtc);
-	struct drm_display_mode *mode = &state->adjusted_mode;
 	long rate, clk_rate = mode->clock * 1000;
 
 	rate = clk_round_rate(hdlcd->clk, clk_rate);
-	if (rate != clk_rate) {
+	/* 0.1% seems a close enough tolerance for the TDA19988 on Juno */
+	if (abs(rate - clk_rate) * 1000 > clk_rate) {
 		/* clock required by mode not supported by hardware */
-		return -EINVAL;
+		return MODE_NOCLOCK;
 	}
 
-	return 0;
+	return MODE_OK;
 }
 
 static void hdlcd_crtc_atomic_begin(struct drm_crtc *crtc,
@@ -220,7 +220,7 @@ static void hdlcd_crtc_atomic_begin(struct drm_crtc *crtc,
 }
 
 static const struct drm_crtc_helper_funcs hdlcd_crtc_helper_funcs = {
-	.atomic_check	= hdlcd_crtc_atomic_check,
+	.mode_valid	= hdlcd_crtc_mode_valid,
 	.atomic_begin	= hdlcd_crtc_atomic_begin,
 	.atomic_enable	= hdlcd_crtc_atomic_enable,
 	.atomic_disable	= hdlcd_crtc_atomic_disable,
diff --git a/drivers/gpu/drm/arm/malidp_drv.c b/drivers/gpu/drm/arm/malidp_drv.c
index 21725c9b9f5e0d3a5c15e5b96e2b9f053aa31ce9..18cb7f134f4e700e3b99d83cdea6508e189c823d 100644
--- a/drivers/gpu/drm/arm/malidp_drv.c
+++ b/drivers/gpu/drm/arm/malidp_drv.c
@@ -192,6 +192,7 @@ static void malidp_atomic_commit_hw_done(struct drm_atomic_state *state)
 {
 	struct drm_device *drm = state->dev;
 	struct malidp_drm *malidp = drm->dev_private;
+	int loop = 5;
 
 	malidp->event = malidp->crtc.state->event;
 	malidp->crtc.state->event = NULL;
@@ -206,8 +207,18 @@ static void malidp_atomic_commit_hw_done(struct drm_atomic_state *state)
 			drm_crtc_vblank_get(&malidp->crtc);
 
 		/* only set config_valid if the CRTC is enabled */
-		if (malidp_set_and_wait_config_valid(drm) < 0)
+		if (malidp_set_and_wait_config_valid(drm) < 0) {
+			/*
+			 * make a loop around the second CVAL setting and
+			 * try 5 times before giving up.
+			 */
+			while (loop--) {
+				if (!malidp_set_and_wait_config_valid(drm))
+					break;
+			}
 			DRM_DEBUG_DRIVER("timed out waiting for updated configuration\n");
+		}
+
 	} else if (malidp->event) {
 		/* CRTC inactive means vblank IRQ is disabled, send event directly */
 		spin_lock_irq(&drm->event_lock);
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 2e0cb4246cbda6c3a7a1e12506c55061449aea75..22a5c617f67071a2fcba0f7bb384635468cfcb0c 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -1607,15 +1607,6 @@ int drm_atomic_helper_async_check(struct drm_device *dev,
 	    old_plane_state->crtc != new_plane_state->crtc)
 		return -EINVAL;
 
-	/*
-	 * FIXME: Since prepare_fb and cleanup_fb are always called on
-	 * the new_plane_state for async updates we need to block framebuffer
-	 * changes. This prevents use of a fb that's been cleaned up and
-	 * double cleanups from occuring.
-	 */
-	if (old_plane_state->fb != new_plane_state->fb)
-		return -EINVAL;
-
 	funcs = plane->helper_private;
 	if (!funcs->atomic_async_update)
 		return -EINVAL;
@@ -1646,6 +1637,8 @@ EXPORT_SYMBOL(drm_atomic_helper_async_check);
  * drm_atomic_async_check() succeeds. Async commits are not supposed to swap
  * the states like normal sync commits, but just do in-place changes on the
  * current state.
+ *
+ * TODO: Implement full swap instead of doing in-place changes.
  */
 void drm_atomic_helper_async_commit(struct drm_device *dev,
 				    struct drm_atomic_state *state)
@@ -1656,6 +1649,9 @@ void drm_atomic_helper_async_commit(struct drm_device *dev,
 	int i;
 
 	for_each_new_plane_in_state(state, plane, plane_state, i) {
+		struct drm_framebuffer *new_fb = plane_state->fb;
+		struct drm_framebuffer *old_fb = plane->state->fb;
+
 		funcs = plane->helper_private;
 		funcs->atomic_async_update(plane, plane_state);
 
@@ -1664,11 +1660,17 @@ void drm_atomic_helper_async_commit(struct drm_device *dev,
 		 * plane->state in-place, make sure at least common
 		 * properties have been properly updated.
 		 */
-		WARN_ON_ONCE(plane->state->fb != plane_state->fb);
+		WARN_ON_ONCE(plane->state->fb != new_fb);
 		WARN_ON_ONCE(plane->state->crtc_x != plane_state->crtc_x);
 		WARN_ON_ONCE(plane->state->crtc_y != plane_state->crtc_y);
 		WARN_ON_ONCE(plane->state->src_x != plane_state->src_x);
 		WARN_ON_ONCE(plane->state->src_y != plane_state->src_y);
+
+		/*
+		 * Make sure the FBs have been swapped so that cleanups in the
+		 * new_state performs a cleanup in the old FB.
+		 */
+		WARN_ON_ONCE(plane_state->fb != old_fb);
 	}
 }
 EXPORT_SYMBOL(drm_atomic_helper_async_commit);
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index 5cb59c0b4bbe5a7376bdd8107be80889e83c55b0..de53477255646276e992be782ed64c6a60bd4c89 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -2530,7 +2530,7 @@ static const struct cmd_info cmd_info[] = {
 		0, 12, NULL},
 
 	{"VEB_DI_IECP", OP_VEB_DNDI_IECP_STATE, F_LEN_VAR, R_VECS, D_BDW_PLUS,
-		0, 20, NULL},
+		0, 12, NULL},
 };
 
 static void add_cmd_entry(struct intel_gvt *gvt, struct cmd_entry *e)
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index 244ad17297646be945d55218ddcadef799de335b..53115bdae12be320e3de82562fa6d36a39684686 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -53,13 +53,19 @@ static int preallocated_oos_pages = 8192;
  */
 bool intel_gvt_ggtt_validate_range(struct intel_vgpu *vgpu, u64 addr, u32 size)
 {
-	if ((!vgpu_gmadr_is_valid(vgpu, addr)) || (size
-			&& !vgpu_gmadr_is_valid(vgpu, addr + size - 1))) {
-		gvt_vgpu_err("invalid range gmadr 0x%llx size 0x%x\n",
-				addr, size);
-		return false;
-	}
-	return true;
+	if (size == 0)
+		return vgpu_gmadr_is_valid(vgpu, addr);
+
+	if (vgpu_gmadr_is_aperture(vgpu, addr) &&
+	    vgpu_gmadr_is_aperture(vgpu, addr + size - 1))
+		return true;
+	else if (vgpu_gmadr_is_hidden(vgpu, addr) &&
+		 vgpu_gmadr_is_hidden(vgpu, addr + size - 1))
+		return true;
+
+	gvt_dbg_mm("Invalid ggtt range at 0x%llx, size: 0x%x\n",
+		     addr, size);
+	return false;
 }
 
 /* translate a guest gmadr to host gmadr */
@@ -942,7 +948,16 @@ static int ppgtt_invalidate_spt_by_shadow_entry(struct intel_vgpu *vgpu,
 
 	if (e->type != GTT_TYPE_PPGTT_ROOT_L3_ENTRY
 		&& e->type != GTT_TYPE_PPGTT_ROOT_L4_ENTRY) {
-		cur_pt_type = get_next_pt_type(e->type) + 1;
+		cur_pt_type = get_next_pt_type(e->type);
+
+		if (!gtt_type_is_pt(cur_pt_type) ||
+				!gtt_type_is_pt(cur_pt_type + 1)) {
+			WARN(1, "Invalid page table type, cur_pt_type is: %d\n", cur_pt_type);
+			return -EINVAL;
+		}
+
+		cur_pt_type += 1;
+
 		if (ops->get_pfn(e) ==
 			vgpu->gtt.scratch_pt[cur_pt_type].page_mfn)
 			return 0;
@@ -1102,6 +1117,7 @@ static struct intel_vgpu_ppgtt_spt *ppgtt_populate_spt_by_guest_entry(
 
 err_free_spt:
 	ppgtt_free_spt(spt);
+	spt = NULL;
 err:
 	gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n",
 		     spt, we->val64, we->type);
@@ -2183,7 +2199,8 @@ static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
 	struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
 	unsigned long g_gtt_index = off >> info->gtt_entry_size_shift;
 	unsigned long gma, gfn;
-	struct intel_gvt_gtt_entry e, m;
+	struct intel_gvt_gtt_entry e = {.val64 = 0, .type = GTT_TYPE_GGTT_PTE};
+	struct intel_gvt_gtt_entry m = {.val64 = 0, .type = GTT_TYPE_GGTT_PTE};
 	dma_addr_t dma_addr;
 	int ret;
 	struct intel_gvt_partial_pte *partial_pte, *pos, *n;
@@ -2250,7 +2267,8 @@ static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
 
 	if (!partial_update && (ops->test_present(&e))) {
 		gfn = ops->get_pfn(&e);
-		m = e;
+		m.val64 = e.val64;
+		m.type = e.type;
 
 		/* one PTE update may be issued in multiple writes and the
 		 * first write may not construct a valid gfn
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index e09bd6e0cc4d6d85b21ebc22f5ff0bdbf8cf601d..a6ade66349bd8248f4bfe79ee780abc2f4bf03aa 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -464,6 +464,8 @@ static i915_reg_t force_nonpriv_white_list[] = {
 	_MMIO(0x2690),
 	_MMIO(0x2694),
 	_MMIO(0x2698),
+	_MMIO(0x2754),
+	_MMIO(0x28a0),
 	_MMIO(0x4de0),
 	_MMIO(0x4de4),
 	_MMIO(0x4dfc),
@@ -1690,8 +1692,22 @@ static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
 	bool enable_execlist;
 	int ret;
 
+	(*(u32 *)p_data) &= ~_MASKED_BIT_ENABLE(1);
+	if (IS_COFFEELAKE(vgpu->gvt->dev_priv))
+		(*(u32 *)p_data) &= ~_MASKED_BIT_ENABLE(2);
 	write_vreg(vgpu, offset, p_data, bytes);
 
+	if (data & _MASKED_BIT_ENABLE(1)) {
+		enter_failsafe_mode(vgpu, GVT_FAILSAFE_UNSUPPORTED_GUEST);
+		return 0;
+	}
+
+	if (IS_COFFEELAKE(vgpu->gvt->dev_priv) &&
+	    data & _MASKED_BIT_ENABLE(2)) {
+		enter_failsafe_mode(vgpu, GVT_FAILSAFE_UNSUPPORTED_GUEST);
+		return 0;
+	}
+
 	/* when PPGTT mode enabled, we will check if guest has called
 	 * pvinfo, if not, we will treat this guest as non-gvtg-aware
 	 * guest, and stop emulating its cfg space, mmio, gtt, etc.
@@ -1773,6 +1789,21 @@ static int ring_reset_ctl_write(struct intel_vgpu *vgpu,
 	return 0;
 }
 
+static int csfe_chicken1_mmio_write(struct intel_vgpu *vgpu,
+				    unsigned int offset, void *p_data,
+				    unsigned int bytes)
+{
+	u32 data = *(u32 *)p_data;
+
+	(*(u32 *)p_data) &= ~_MASKED_BIT_ENABLE(0x18);
+	write_vreg(vgpu, offset, p_data, bytes);
+
+	if (data & _MASKED_BIT_ENABLE(0x10) || data & _MASKED_BIT_ENABLE(0x8))
+		enter_failsafe_mode(vgpu, GVT_FAILSAFE_UNSUPPORTED_GUEST);
+
+	return 0;
+}
+
 #define MMIO_F(reg, s, f, am, rm, d, r, w) do { \
 	ret = new_mmio_info(gvt, i915_mmio_reg_offset(reg), \
 		f, s, am, rm, d, r, w); \
@@ -1893,7 +1924,8 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
 	MMIO_DFH(_MMIO(0x20dc), D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
 	MMIO_DFH(_3D_CHICKEN3, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
 	MMIO_DFH(_MMIO(0x2088), D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
-	MMIO_DFH(_MMIO(0x20e4), D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
+	MMIO_DFH(FF_SLICE_CS_CHICKEN2, D_ALL,
+		 F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
 	MMIO_DFH(_MMIO(0x2470), D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
 	MMIO_DFH(GAM_ECOCHK, D_ALL, F_CMD_ACCESS, NULL, NULL);
 	MMIO_DFH(GEN7_COMMON_SLICE_CHICKEN1, D_ALL, F_MODE_MASK | F_CMD_ACCESS,
@@ -2997,7 +3029,7 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
 	MMIO_D(CSR_HTP_SKL, D_SKL_PLUS);
 	MMIO_D(CSR_LAST_WRITE, D_SKL_PLUS);
 
-	MMIO_D(BDW_SCRATCH1, D_SKL_PLUS);
+	MMIO_DFH(BDW_SCRATCH1, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
 
 	MMIO_D(SKL_DFSM, D_SKL_PLUS);
 	MMIO_D(DISPIO_CR_TX_BMU_CR0, D_SKL_PLUS);
@@ -3010,8 +3042,8 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
 	MMIO_D(RPM_CONFIG0, D_SKL_PLUS);
 	MMIO_D(_MMIO(0xd08), D_SKL_PLUS);
 	MMIO_D(RC6_LOCATION, D_SKL_PLUS);
-	MMIO_DFH(GEN7_FF_SLICE_CS_CHICKEN1, D_SKL_PLUS, F_MODE_MASK,
-		NULL, NULL);
+	MMIO_DFH(GEN7_FF_SLICE_CS_CHICKEN1, D_SKL_PLUS,
+		 F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
 	MMIO_DFH(GEN9_CS_DEBUG_MODE1, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS,
 		NULL, NULL);
 
@@ -3030,7 +3062,7 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
 	MMIO_D(_MMIO(0x46520), D_SKL_PLUS);
 
 	MMIO_D(_MMIO(0xc403c), D_SKL_PLUS);
-	MMIO_D(_MMIO(0xb004), D_SKL_PLUS);
+	MMIO_DFH(GEN8_GARBCNTL, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
 	MMIO_DH(DMA_CTRL, D_SKL_PLUS, NULL, dma_ctrl_write);
 
 	MMIO_D(_MMIO(0x65900), D_SKL_PLUS);
@@ -3059,7 +3091,10 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
 	MMIO_D(_MMIO(_PLANE_KEYMSK_1(PIPE_C)), D_SKL_PLUS);
 
 	MMIO_D(_MMIO(0x44500), D_SKL_PLUS);
-	MMIO_DFH(GEN9_CSFE_CHICKEN1_RCS, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
+#define CSFE_CHICKEN1_REG(base) _MMIO((base) + 0xD4)
+	MMIO_RING_DFH(CSFE_CHICKEN1_REG, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS,
+		      NULL, csfe_chicken1_mmio_write);
+#undef CSFE_CHICKEN1_REG
 	MMIO_DFH(GEN8_HDC_CHICKEN1, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS,
 		 NULL, NULL);
 	MMIO_DFH(GEN9_WM_CHICKEN3, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS,
@@ -3239,7 +3274,7 @@ static int init_bxt_mmio_info(struct intel_gvt *gvt)
 	MMIO_D(GEN8_PUSHBUS_ENABLE, D_BXT);
 	MMIO_D(GEN8_PUSHBUS_SHIFT, D_BXT);
 	MMIO_D(GEN6_GFXPAUSE, D_BXT);
-	MMIO_D(GEN8_L3SQCREG1, D_BXT);
+	MMIO_DFH(GEN8_L3SQCREG1, D_BXT, F_CMD_ACCESS, NULL, NULL);
 
 	MMIO_DFH(GEN9_CTX_PREEMPT_REG, D_BXT, F_CMD_ACCESS, NULL, NULL);
 
diff --git a/drivers/gpu/drm/i915/gvt/reg.h b/drivers/gpu/drm/i915/gvt/reg.h
index 33aaa14bfdde789775758aa076e4d51af26c85e9..5b66e14c5b7b2b9a2d24071dce3504b61aa95b1a 100644
--- a/drivers/gpu/drm/i915/gvt/reg.h
+++ b/drivers/gpu/drm/i915/gvt/reg.h
@@ -102,6 +102,8 @@
 #define FORCEWAKE_ACK_MEDIA_GEN9_REG 0x0D88
 #define FORCEWAKE_ACK_HSW_REG 0x130044
 
+#define RB_HEAD_WRAP_CNT_MAX	((1 << 11) - 1)
+#define RB_HEAD_WRAP_CNT_OFF	21
 #define RB_HEAD_OFF_MASK	((1U << 21) - (1U << 2))
 #define RB_TAIL_OFF_MASK	((1U << 21) - (1U << 3))
 #define RB_TAIL_SIZE_MASK	((1U << 21) - (1U << 12))
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index 13632dba8b2afb516effcfa1ec79bdabb1e692d6..0f919f0a43d46ba212df5cd3d2c79bd57accfd68 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -812,10 +812,31 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
 	void *src;
 	unsigned long context_gpa, context_page_num;
 	int i;
+	struct drm_i915_private *dev_priv = gvt->dev_priv;
+	u32 ring_base;
+	u32 head, tail;
+	u16 wrap_count;
 
 	gvt_dbg_sched("ring id %d workload lrca %x\n", rq->engine->id,
 		      workload->ctx_desc.lrca);
 
+	head = workload->rb_head;
+	tail = workload->rb_tail;
+	wrap_count = workload->guest_rb_head >> RB_HEAD_WRAP_CNT_OFF;
+
+	if (tail < head) {
+		if (wrap_count == RB_HEAD_WRAP_CNT_MAX)
+			wrap_count = 0;
+		else
+			wrap_count += 1;
+	}
+
+	head = (wrap_count << RB_HEAD_WRAP_CNT_OFF) | tail;
+
+	ring_base = dev_priv->engine[workload->ring_id]->mmio_base;
+	vgpu_vreg_t(vgpu, RING_TAIL(ring_base)) = tail;
+	vgpu_vreg_t(vgpu, RING_HEAD(ring_base)) = head;
+
 	context_page_num = rq->engine->context_size;
 	context_page_num = context_page_num >> PAGE_SHIFT;
 
@@ -1415,6 +1436,7 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
 	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
 	u64 ring_context_gpa;
 	u32 head, tail, start, ctl, ctx_ctl, per_ctx, indirect_ctx;
+	u32 guest_head;
 	int ret;
 
 	ring_context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
@@ -1430,6 +1452,8 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
 	intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
 			RING_CTX_OFF(ring_tail.val), &tail, 4);
 
+	guest_head = head;
+
 	head &= RB_HEAD_OFF_MASK;
 	tail &= RB_TAIL_OFF_MASK;
 
@@ -1462,6 +1486,7 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
 	workload->ctx_desc = *desc;
 	workload->ring_context_gpa = ring_context_gpa;
 	workload->rb_head = head;
+	workload->guest_rb_head = guest_head;
 	workload->rb_tail = tail;
 	workload->rb_start = start;
 	workload->rb_ctl = ctl;
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.h b/drivers/gpu/drm/i915/gvt/scheduler.h
index 90c6756f54537382d666ba0356ca3c30f915baf8..c50d14a9ce8522229094795ebe2c0c6b0c39bac1 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.h
+++ b/drivers/gpu/drm/i915/gvt/scheduler.h
@@ -100,6 +100,7 @@ struct intel_vgpu_workload {
 	struct execlist_ctx_descriptor_format ctx_desc;
 	struct execlist_ring_context *ring_context;
 	unsigned long rb_head, rb_tail, rb_ctl, rb_start, rb_len;
+	unsigned long guest_rb_head;
 	bool restore_inhibit;
 	struct intel_vgpu_elsp_dwords elsp_dwords;
 	bool emulate_schedule_in;
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 249d35c12a7553811fe0ba8ecd66574dad25329a..2aa69d347ec4070de9fd27aaf1e138d8a305dcd5 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -7620,6 +7620,9 @@ enum {
   #define GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION		(1 << 8)
   #define GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE			(1 << 0)
 
+#define GEN8_L3CNTLREG	_MMIO(0x7034)
+  #define GEN8_ERRDETBCTRL (1 << 9)
+
 #define GEN11_COMMON_SLICE_CHICKEN3		_MMIO(0x7304)
   #define GEN11_BLEND_EMB_FIX_DISABLE_IN_RCC	(1 << 11)
 
diff --git a/drivers/gpu/drm/i915/intel_workarounds.c b/drivers/gpu/drm/i915/intel_workarounds.c
index 6decd432f4d3b12a1455a00a87491e8391258522..841b8e515f4d6fdc95cc317bd57df34aefebc161 100644
--- a/drivers/gpu/drm/i915/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/intel_workarounds.c
@@ -518,6 +518,12 @@ static void icl_ctx_workarounds_init(struct intel_engine_cs *engine)
 	struct drm_i915_private *i915 = engine->i915;
 	struct i915_wa_list *wal = &engine->ctx_wa_list;
 
+	/* WaDisableBankHangMode:icl */
+	wa_write(wal,
+		 GEN8_L3CNTLREG,
+		 intel_uncore_read(engine->uncore, GEN8_L3CNTLREG) |
+		 GEN8_ERRDETBCTRL);
+
 	/* Wa_1604370585:icl (pre-prod)
 	 * Formerly known as WaPushConstantDereferenceHoldDisable
 	 */
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
index be13140967b4e8a294263ed2632673aee73368b0..b854f471e9e5f4068eb49e5c28ebd4e11be1f050 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
@@ -502,6 +502,8 @@ static int mdp5_plane_atomic_async_check(struct drm_plane *plane,
 static void mdp5_plane_atomic_async_update(struct drm_plane *plane,
 					   struct drm_plane_state *new_state)
 {
+	struct drm_framebuffer *old_fb = plane->state->fb;
+
 	plane->state->src_x = new_state->src_x;
 	plane->state->src_y = new_state->src_y;
 	plane->state->crtc_x = new_state->crtc_x;
@@ -524,6 +526,8 @@ static void mdp5_plane_atomic_async_update(struct drm_plane *plane,
 
 	*to_mdp5_plane_state(plane->state) =
 		*to_mdp5_plane_state(new_state);
+
+	new_state->fb = old_fb;
 }
 
 static const struct drm_plane_helper_funcs mdp5_plane_helper_funcs = {
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/firmware.h b/drivers/gpu/drm/nouveau/include/nvkm/core/firmware.h
index ff0fa38aee72c03b07e60c24bf9167eea7efda8b..54da9c6bc8d57172a0d9ec8287e032dd4cfdb016 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/firmware.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/firmware.h
@@ -1,12 +1,12 @@
 /* SPDX-License-Identifier: GPL-2.0 */
 #ifndef __NVKM_FIRMWARE_H__
 #define __NVKM_FIRMWARE_H__
-
-#include <core/device.h>
-
-int nvkm_firmware_get(struct nvkm_device *device, const char *fwname,
-		      const struct firmware **fw);
-
-void nvkm_firmware_put(const struct firmware *fw);
-
+#include <core/subdev.h>
+
+int nvkm_firmware_get_version(const struct nvkm_subdev *, const char *fwname,
+			      int min_version, int max_version,
+			      const struct firmware **);
+int nvkm_firmware_get(const struct nvkm_subdev *, const char *fwname,
+		      const struct firmware **);
+void nvkm_firmware_put(const struct firmware *);
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/firmware.c b/drivers/gpu/drm/nouveau/nvkm/core/firmware.c
index 058ff46b5f16676ee56ab9323252109724a31595..092acdec2c39f77af23abfbb3481ac771be92374 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/firmware.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/firmware.c
@@ -24,7 +24,7 @@
 
 /**
  * nvkm_firmware_get - load firmware from the official nvidia/chip/ directory
- * @device	device that will use that firmware
+ * @subdev	subdevice that will use that firmware
  * @fwname	name of firmware file to load
  * @fw		firmware structure to load to
  *
@@ -32,9 +32,11 @@
  * Firmware files released by NVIDIA will always follow this format.
  */
 int
-nvkm_firmware_get(struct nvkm_device *device, const char *fwname,
-		  const struct firmware **fw)
+nvkm_firmware_get_version(const struct nvkm_subdev *subdev, const char *fwname,
+			  int min_version, int max_version,
+			  const struct firmware **fw)
 {
+	struct nvkm_device *device = subdev->device;
 	char f[64];
 	char cname[16];
 	int i;
@@ -48,8 +50,29 @@ nvkm_firmware_get(struct nvkm_device *device, const char *fwname,
 		cname[i] = tolower(cname[i]);
 	}
 
-	snprintf(f, sizeof(f), "nvidia/%s/%s.bin", cname, fwname);
-	return request_firmware(fw, f, device->dev);
+	for (i = max_version; i >= min_version; i--) {
+		if (i != 0)
+			snprintf(f, sizeof(f), "nvidia/%s/%s-%d.bin", cname, fwname, i);
+		else
+			snprintf(f, sizeof(f), "nvidia/%s/%s.bin", cname, fwname);
+
+		if (!firmware_request_nowarn(fw, f, device->dev)) {
+			nvkm_debug(subdev, "firmware \"%s\" loaded\n", f);
+			return i;
+		}
+
+		nvkm_debug(subdev, "firmware \"%s\" unavailable\n", f);
+	}
+
+	nvkm_error(subdev, "failed to load firmware \"%s\"", fwname);
+	return -ENOENT;
+}
+
+int
+nvkm_firmware_get(const struct nvkm_subdev *subdev, const char *fwname,
+		  const struct firmware **fw)
+{
+	return nvkm_firmware_get_version(subdev, fwname, 0, 0, fw);
 }
 
 /**
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
index 81a13cf9a292fef896270d56e9d0eddcce127462..c578deb5867a8d0cd2495a5f9bf55d6330bfe86a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
@@ -2115,12 +2115,10 @@ int
 gf100_gr_ctor_fw(struct gf100_gr *gr, const char *fwname,
 		 struct gf100_gr_fuc *fuc)
 {
-	struct nvkm_subdev *subdev = &gr->base.engine.subdev;
-	struct nvkm_device *device = subdev->device;
 	const struct firmware *fw;
 	int ret;
 
-	ret = nvkm_firmware_get(device, fwname, &fw);
+	ret = nvkm_firmware_get(&gr->base.engine.subdev, fwname, &fw);
 	if (ret) {
 		ret = gf100_gr_ctor_fw_legacy(gr, fwname, fuc, ret);
 		if (ret)
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr.c
index 75dc06557877670ad5b2529d556584514bf5ada4..dc80985cf0933a647855f470848cbe80bbe0a246 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr.c
@@ -36,7 +36,7 @@ nvkm_acr_load_firmware(const struct nvkm_subdev *subdev, const char *name,
 	void *blob;
 	int ret;
 
-	ret = nvkm_firmware_get(subdev->device, name, &fw);
+	ret = nvkm_firmware_get(subdev, name, &fw);
 	if (ret)
 		return ERR_PTR(ret);
 	if (fw->size < min_size) {
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.c
index 1df09ed6fe6d8025d16de873532e1741fa7a2498..4fd4cfe459b8ac5541eaf0eb8d1f70c3a2ab71e1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.c
@@ -229,6 +229,8 @@ struct acr_r352_lsf_wpr_header {
 struct ls_ucode_img_r352 {
 	struct ls_ucode_img base;
 
+	const struct acr_r352_lsf_func *func;
+
 	struct acr_r352_lsf_wpr_header wpr_header;
 	struct acr_r352_lsf_lsb_header lsb_header;
 };
@@ -243,6 +245,7 @@ acr_r352_ls_ucode_img_load(const struct acr_r352 *acr,
 			   enum nvkm_secboot_falcon falcon_id)
 {
 	const struct nvkm_subdev *subdev = acr->base.subdev;
+	const struct acr_r352_ls_func *func = acr->func->ls_func[falcon_id];
 	struct ls_ucode_img_r352 *img;
 	int ret;
 
@@ -252,15 +255,16 @@ acr_r352_ls_ucode_img_load(const struct acr_r352 *acr,
 
 	img->base.falcon_id = falcon_id;
 
-	ret = acr->func->ls_func[falcon_id]->load(sb, &img->base);
-
-	if (ret) {
+	ret = func->load(sb, func->version_max, &img->base);
+	if (ret < 0) {
 		kfree(img->base.ucode_data);
 		kfree(img->base.sig);
 		kfree(img);
 		return ERR_PTR(ret);
 	}
 
+	img->func = func->version[ret];
+
 	/* Check that the signature size matches our expectations... */
 	if (img->base.sig_size != sizeof(img->lsb_header.signature)) {
 		nvkm_error(subdev, "invalid signature size for %s falcon!\n",
@@ -302,8 +306,7 @@ acr_r352_ls_img_fill_headers(struct acr_r352 *acr,
 	struct acr_r352_lsf_wpr_header *whdr = &img->wpr_header;
 	struct acr_r352_lsf_lsb_header *lhdr = &img->lsb_header;
 	struct ls_ucode_img_desc *desc = &_img->ucode_desc;
-	const struct acr_r352_ls_func *func =
-					    acr->func->ls_func[_img->falcon_id];
+	const struct acr_r352_lsf_func *func = img->func;
 
 	/* Fill WPR header */
 	whdr->falcon_id = _img->falcon_id;
@@ -419,8 +422,8 @@ acr_r352_ls_write_wpr(struct acr_r352 *acr, struct list_head *imgs,
 
 	/* Figure out how large we need gdesc to be. */
 	list_for_each_entry(_img, imgs, node) {
-		const struct acr_r352_ls_func *ls_func =
-					    acr->func->ls_func[_img->falcon_id];
+		struct ls_ucode_img_r352 *img = ls_ucode_img_r352(_img);
+		const struct acr_r352_lsf_func *ls_func = img->func;
 
 		max_desc_size = max(max_desc_size, ls_func->bl_desc_size);
 	}
@@ -433,8 +436,7 @@ acr_r352_ls_write_wpr(struct acr_r352 *acr, struct list_head *imgs,
 
 	list_for_each_entry(_img, imgs, node) {
 		struct ls_ucode_img_r352 *img = ls_ucode_img_r352(_img);
-		const struct acr_r352_ls_func *ls_func =
-					    acr->func->ls_func[_img->falcon_id];
+		const struct acr_r352_lsf_func *ls_func = img->func;
 
 		nvkm_gpuobj_memcpy_to(wpr_blob, pos, &img->wpr_header,
 				      sizeof(img->wpr_header));
@@ -1063,20 +1065,36 @@ acr_r352_dtor(struct nvkm_acr *_acr)
 	kfree(acr);
 }
 
+static const struct acr_r352_lsf_func
+acr_r352_ls_fecs_func_0 = {
+	.generate_bl_desc = acr_r352_generate_flcn_bl_desc,
+	.bl_desc_size = sizeof(struct acr_r352_flcn_bl_desc),
+};
+
 const struct acr_r352_ls_func
 acr_r352_ls_fecs_func = {
 	.load = acr_ls_ucode_load_fecs,
+	.version_max = 0,
+	.version = {
+		&acr_r352_ls_fecs_func_0,
+	}
+};
+
+static const struct acr_r352_lsf_func
+acr_r352_ls_gpccs_func_0 = {
 	.generate_bl_desc = acr_r352_generate_flcn_bl_desc,
 	.bl_desc_size = sizeof(struct acr_r352_flcn_bl_desc),
+	/* GPCCS will be loaded using PRI */
+	.lhdr_flags = LSF_FLAG_FORCE_PRIV_LOAD,
 };
 
 const struct acr_r352_ls_func
 acr_r352_ls_gpccs_func = {
 	.load = acr_ls_ucode_load_gpccs,
-	.generate_bl_desc = acr_r352_generate_flcn_bl_desc,
-	.bl_desc_size = sizeof(struct acr_r352_flcn_bl_desc),
-	/* GPCCS will be loaded using PRI */
-	.lhdr_flags = LSF_FLAG_FORCE_PRIV_LOAD,
+	.version_max = 0,
+	.version = {
+		&acr_r352_ls_gpccs_func_0,
+	}
 };
 
 
@@ -1150,12 +1168,20 @@ acr_r352_generate_pmu_bl_desc(const struct nvkm_acr *acr,
 	desc->argv = addr_args;
 }
 
+static const struct acr_r352_lsf_func
+acr_r352_ls_pmu_func_0 = {
+	.generate_bl_desc = acr_r352_generate_pmu_bl_desc,
+	.bl_desc_size = sizeof(struct acr_r352_pmu_bl_desc),
+};
+
 static const struct acr_r352_ls_func
 acr_r352_ls_pmu_func = {
 	.load = acr_ls_ucode_load_pmu,
-	.generate_bl_desc = acr_r352_generate_pmu_bl_desc,
-	.bl_desc_size = sizeof(struct acr_r352_pmu_bl_desc),
 	.post_run = acr_ls_pmu_post_run,
+	.version_max = 0,
+	.version = {
+		&acr_r352_ls_pmu_func_0,
+	}
 };
 
 const struct acr_r352_func
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.h b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.h
index 3d58ab87156325de7097c51d6820c01b77414ecb..e516cab849dd1915926d2117839d31c3ac9b7d57 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.h
@@ -47,24 +47,34 @@ hsf_load_header_app_size(const struct hsf_load_header *hdr, u32 app)
 }
 
 /**
- * struct acr_r352_ls_func - manages a single LS firmware
+ * struct acr_r352_lsf_func - manages a specific LS firmware version
  *
- * @load: load the external firmware into a ls_ucode_img
  * @generate_bl_desc: function called on a block of bl_desc_size to generate the
  *		      proper bootloader descriptor for this LS firmware
  * @bl_desc_size: size of the bootloader descriptor
- * @post_run: hook called right after the ACR is executed
  * @lhdr_flags: LS flags
  */
-struct acr_r352_ls_func {
-	int (*load)(const struct nvkm_secboot *, struct ls_ucode_img *);
+struct acr_r352_lsf_func {
 	void (*generate_bl_desc)(const struct nvkm_acr *,
 				 const struct ls_ucode_img *, u64, void *);
 	u32 bl_desc_size;
-	int (*post_run)(const struct nvkm_acr *, const struct nvkm_secboot *);
 	u32 lhdr_flags;
 };
 
+/**
+ * struct acr_r352_ls_func - manages a single LS falcon
+ *
+ * @load: load the external firmware into a ls_ucode_img
+ * @post_run: hook called right after the ACR is executed
+ */
+struct acr_r352_ls_func {
+	int (*load)(const struct nvkm_secboot *, int maxver,
+		    struct ls_ucode_img *);
+	int (*post_run)(const struct nvkm_acr *, const struct nvkm_secboot *);
+	int version_max;
+	const struct acr_r352_lsf_func *version[];
+};
+
 struct acr_r352;
 
 /**
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r361.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r361.c
index 14b36ef93628ea60a172f1079e3ec140ae81f72a..f6b2d20d7fc3dd3d098fcfbc7577149a0f7580ac 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r361.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r361.c
@@ -66,20 +66,36 @@ acr_r361_generate_hs_bl_desc(const struct hsf_load_header *hdr, void *_bl_desc,
 	bl_desc->data_size = hdr->data_size;
 }
 
+static const struct acr_r352_lsf_func
+acr_r361_ls_fecs_func_0 = {
+	.generate_bl_desc = acr_r361_generate_flcn_bl_desc,
+	.bl_desc_size = sizeof(struct acr_r361_flcn_bl_desc),
+};
+
 const struct acr_r352_ls_func
 acr_r361_ls_fecs_func = {
 	.load = acr_ls_ucode_load_fecs,
+	.version_max = 0,
+	.version = {
+		&acr_r361_ls_fecs_func_0,
+	}
+};
+
+static const struct acr_r352_lsf_func
+acr_r361_ls_gpccs_func_0 = {
 	.generate_bl_desc = acr_r361_generate_flcn_bl_desc,
 	.bl_desc_size = sizeof(struct acr_r361_flcn_bl_desc),
+	/* GPCCS will be loaded using PRI */
+	.lhdr_flags = LSF_FLAG_FORCE_PRIV_LOAD,
 };
 
 const struct acr_r352_ls_func
 acr_r361_ls_gpccs_func = {
 	.load = acr_ls_ucode_load_gpccs,
-	.generate_bl_desc = acr_r361_generate_flcn_bl_desc,
-	.bl_desc_size = sizeof(struct acr_r361_flcn_bl_desc),
-	/* GPCCS will be loaded using PRI */
-	.lhdr_flags = LSF_FLAG_FORCE_PRIV_LOAD,
+	.version_max = 0,
+	.version = {
+		&acr_r361_ls_gpccs_func_0,
+	}
 };
 
 struct acr_r361_pmu_bl_desc {
@@ -125,12 +141,20 @@ acr_r361_generate_pmu_bl_desc(const struct nvkm_acr *acr,
 	desc->argv = addr_args;
 }
 
+static const struct acr_r352_lsf_func
+acr_r361_ls_pmu_func_0 = {
+	.generate_bl_desc = acr_r361_generate_pmu_bl_desc,
+	.bl_desc_size = sizeof(struct acr_r361_pmu_bl_desc),
+};
+
 const struct acr_r352_ls_func
 acr_r361_ls_pmu_func = {
 	.load = acr_ls_ucode_load_pmu,
-	.generate_bl_desc = acr_r361_generate_pmu_bl_desc,
-	.bl_desc_size = sizeof(struct acr_r361_pmu_bl_desc),
 	.post_run = acr_ls_pmu_post_run,
+	.version_max = 0,
+	.version = {
+		&acr_r361_ls_pmu_func_0,
+	}
 };
 
 static void
@@ -164,12 +188,20 @@ acr_r361_generate_sec2_bl_desc(const struct nvkm_acr *acr,
 	desc->argv = 0x01000000;
 }
 
-const struct acr_r352_ls_func
-acr_r361_ls_sec2_func = {
-	.load = acr_ls_ucode_load_sec2,
+const struct acr_r352_lsf_func
+acr_r361_ls_sec2_func_0 = {
 	.generate_bl_desc = acr_r361_generate_sec2_bl_desc,
 	.bl_desc_size = sizeof(struct acr_r361_pmu_bl_desc),
+};
+
+static const struct acr_r352_ls_func
+acr_r361_ls_sec2_func = {
+	.load = acr_ls_ucode_load_sec2,
 	.post_run = acr_ls_sec2_post_run,
+	.version_max = 0,
+	.version = {
+		&acr_r361_ls_sec2_func_0,
+	}
 };
 
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r361.h b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r361.h
index f9f978daadb96270635457b89affe06b87455a9b..38dec93779c886ac5e5103bfdae46d37796f8aad 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r361.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r361.h
@@ -67,6 +67,5 @@ void acr_r361_generate_hs_bl_desc(const struct hsf_load_header *, void *, u64);
 extern const struct acr_r352_ls_func acr_r361_ls_fecs_func;
 extern const struct acr_r352_ls_func acr_r361_ls_gpccs_func;
 extern const struct acr_r352_ls_func acr_r361_ls_pmu_func;
-extern const struct acr_r352_ls_func acr_r361_ls_sec2_func;
-
+extern const struct acr_r352_lsf_func acr_r361_ls_sec2_func_0;
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r367.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r367.c
index 978ad079036705cbb10b2b87fafa9507dc79317c..472ced29da7e5f01053d9e61b16a73bc9ebc1d84 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r367.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r367.c
@@ -22,6 +22,7 @@
 
 #include "acr_r367.h"
 #include "acr_r361.h"
+#include "acr_r370.h"
 
 #include <core/gpuobj.h>
 
@@ -100,6 +101,8 @@ struct acr_r367_lsf_wpr_header {
 struct ls_ucode_img_r367 {
 	struct ls_ucode_img base;
 
+	const struct acr_r352_lsf_func *func;
+
 	struct acr_r367_lsf_wpr_header wpr_header;
 	struct acr_r367_lsf_lsb_header lsb_header;
 };
@@ -111,6 +114,7 @@ acr_r367_ls_ucode_img_load(const struct acr_r352 *acr,
 			   enum nvkm_secboot_falcon falcon_id)
 {
 	const struct nvkm_subdev *subdev = acr->base.subdev;
+	const struct acr_r352_ls_func *func = acr->func->ls_func[falcon_id];
 	struct ls_ucode_img_r367 *img;
 	int ret;
 
@@ -120,14 +124,16 @@ acr_r367_ls_ucode_img_load(const struct acr_r352 *acr,
 
 	img->base.falcon_id = falcon_id;
 
-	ret = acr->func->ls_func[falcon_id]->load(sb, &img->base);
-	if (ret) {
+	ret = func->load(sb, func->version_max, &img->base);
+	if (ret < 0) {
 		kfree(img->base.ucode_data);
 		kfree(img->base.sig);
 		kfree(img);
 		return ERR_PTR(ret);
 	}
 
+	img->func = func->version[ret];
+
 	/* Check that the signature size matches our expectations... */
 	if (img->base.sig_size != sizeof(img->lsb_header.signature)) {
 		nvkm_error(subdev, "invalid signature size for %s falcon!\n",
@@ -158,8 +164,7 @@ acr_r367_ls_img_fill_headers(struct acr_r352 *acr,
 	struct acr_r367_lsf_wpr_header *whdr = &img->wpr_header;
 	struct acr_r367_lsf_lsb_header *lhdr = &img->lsb_header;
 	struct ls_ucode_img_desc *desc = &_img->ucode_desc;
-	const struct acr_r352_ls_func *func =
-					    acr->func->ls_func[_img->falcon_id];
+	const struct acr_r352_lsf_func *func = img->func;
 
 	/* Fill WPR header */
 	whdr->falcon_id = _img->falcon_id;
@@ -269,8 +274,8 @@ acr_r367_ls_write_wpr(struct acr_r352 *acr, struct list_head *imgs,
 	u8 *gdesc;
 
 	list_for_each_entry(_img, imgs, node) {
-		const struct acr_r352_ls_func *ls_func =
-					    acr->func->ls_func[_img->falcon_id];
+		struct ls_ucode_img_r367 *img = ls_ucode_img_r367(_img);
+		const struct acr_r352_lsf_func *ls_func = img->func;
 
 		max_desc_size = max(max_desc_size, ls_func->bl_desc_size);
 	}
@@ -283,8 +288,7 @@ acr_r367_ls_write_wpr(struct acr_r352 *acr, struct list_head *imgs,
 
 	list_for_each_entry(_img, imgs, node) {
 		struct ls_ucode_img_r367 *img = ls_ucode_img_r367(_img);
-		const struct acr_r352_ls_func *ls_func =
-					    acr->func->ls_func[_img->falcon_id];
+		const struct acr_r352_lsf_func *ls_func = img->func;
 
 		nvkm_gpuobj_memcpy_to(wpr_blob, pos, &img->wpr_header,
 				      sizeof(img->wpr_header));
@@ -378,6 +382,17 @@ acr_r367_fixup_hs_desc(struct acr_r352 *acr, struct nvkm_secboot *sb,
 	}
 }
 
+static const struct acr_r352_ls_func
+acr_r367_ls_sec2_func = {
+	.load = acr_ls_ucode_load_sec2,
+	.post_run = acr_ls_sec2_post_run,
+	.version_max = 1,
+	.version = {
+		&acr_r361_ls_sec2_func_0,
+		&acr_r370_ls_sec2_func_0,
+	}
+};
+
 const struct acr_r352_func
 acr_r367_func = {
 	.fixup_hs_desc = acr_r367_fixup_hs_desc,
@@ -391,7 +406,7 @@ acr_r367_func = {
 		[NVKM_SECBOOT_FALCON_FECS] = &acr_r361_ls_fecs_func,
 		[NVKM_SECBOOT_FALCON_GPCCS] = &acr_r361_ls_gpccs_func,
 		[NVKM_SECBOOT_FALCON_PMU] = &acr_r361_ls_pmu_func,
-		[NVKM_SECBOOT_FALCON_SEC2] = &acr_r361_ls_sec2_func,
+		[NVKM_SECBOOT_FALCON_SEC2] = &acr_r367_ls_sec2_func,
 	},
 };
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r370.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r370.c
index 2f890dfae7fc16cd7ad037013b42e48e2f3014dc..e821d0fd62171801047334b3344194744f3ac586 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r370.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r370.c
@@ -49,20 +49,36 @@ acr_r370_generate_flcn_bl_desc(const struct nvkm_acr *acr,
 	desc->data_size = pdesc->app_resident_data_size;
 }
 
+static const struct acr_r352_lsf_func
+acr_r370_ls_fecs_func_0 = {
+	.generate_bl_desc = acr_r370_generate_flcn_bl_desc,
+	.bl_desc_size = sizeof(struct acr_r370_flcn_bl_desc),
+};
+
 const struct acr_r352_ls_func
 acr_r370_ls_fecs_func = {
 	.load = acr_ls_ucode_load_fecs,
+	.version_max = 0,
+	.version = {
+		&acr_r370_ls_fecs_func_0,
+	}
+};
+
+static const struct acr_r352_lsf_func
+acr_r370_ls_gpccs_func_0 = {
 	.generate_bl_desc = acr_r370_generate_flcn_bl_desc,
 	.bl_desc_size = sizeof(struct acr_r370_flcn_bl_desc),
+	/* GPCCS will be loaded using PRI */
+	.lhdr_flags = LSF_FLAG_FORCE_PRIV_LOAD,
 };
 
 const struct acr_r352_ls_func
 acr_r370_ls_gpccs_func = {
 	.load = acr_ls_ucode_load_gpccs,
-	.generate_bl_desc = acr_r370_generate_flcn_bl_desc,
-	.bl_desc_size = sizeof(struct acr_r370_flcn_bl_desc),
-	/* GPCCS will be loaded using PRI */
-	.lhdr_flags = LSF_FLAG_FORCE_PRIV_LOAD,
+	.version_max = 0,
+	.version = {
+		&acr_r370_ls_gpccs_func_0,
+	}
 };
 
 static void
@@ -95,12 +111,20 @@ acr_r370_generate_sec2_bl_desc(const struct nvkm_acr *acr,
 	desc->argv = 0x01000000;
 }
 
+const struct acr_r352_lsf_func
+acr_r370_ls_sec2_func_0 = {
+	.generate_bl_desc = acr_r370_generate_sec2_bl_desc,
+	.bl_desc_size = sizeof(struct acr_r370_flcn_bl_desc),
+};
+
 const struct acr_r352_ls_func
 acr_r370_ls_sec2_func = {
 	.load = acr_ls_ucode_load_sec2,
-	.generate_bl_desc = acr_r370_generate_sec2_bl_desc,
-	.bl_desc_size = sizeof(struct acr_r370_flcn_bl_desc),
 	.post_run = acr_ls_sec2_post_run,
+	.version_max = 0,
+	.version = {
+		&acr_r370_ls_sec2_func_0,
+	}
 };
 
 void
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r370.h b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r370.h
index 3426f86a15e4766c52ed2de0514c0c91915bfe84..2efed6f995ad03fd64c895286f73985e52bda0a1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r370.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r370.h
@@ -46,4 +46,5 @@ struct acr_r370_flcn_bl_desc {
 void acr_r370_generate_hs_bl_desc(const struct hsf_load_header *, void *, u64);
 extern const struct acr_r352_ls_func acr_r370_ls_fecs_func;
 extern const struct acr_r352_ls_func acr_r370_ls_gpccs_func;
+extern const struct acr_r352_lsf_func acr_r370_ls_sec2_func_0;
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r375.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r375.c
index 7bdef93cb7aeacef382476d78b1d46812a275838..8f0647766038604635d90def01909cc5be75e378 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r375.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r375.c
@@ -54,12 +54,20 @@ acr_r375_generate_pmu_bl_desc(const struct nvkm_acr *acr,
 	desc->argv = addr_args;
 }
 
+static const struct acr_r352_lsf_func
+acr_r375_ls_pmu_func_0 = {
+	.generate_bl_desc = acr_r375_generate_pmu_bl_desc,
+	.bl_desc_size = sizeof(struct acr_r370_flcn_bl_desc),
+};
+
 const struct acr_r352_ls_func
 acr_r375_ls_pmu_func = {
 	.load = acr_ls_ucode_load_pmu,
-	.generate_bl_desc = acr_r375_generate_pmu_bl_desc,
-	.bl_desc_size = sizeof(struct acr_r370_flcn_bl_desc),
 	.post_run = acr_ls_pmu_post_run,
+	.version_max = 0,
+	.version = {
+		&acr_r375_ls_pmu_func_0,
+	}
 };
 
 const struct acr_r352_func
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode.h b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode.h
index 9b7c402594e80c16a9fe05e2f9a8147d5c2c1405..d43f906da3a7bbcb451a40f24aa27398e91134a4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode.h
@@ -147,11 +147,15 @@ struct fw_bl_desc {
 	u32 data_size;
 };
 
-int acr_ls_ucode_load_fecs(const struct nvkm_secboot *, struct ls_ucode_img *);
-int acr_ls_ucode_load_gpccs(const struct nvkm_secboot *, struct ls_ucode_img *);
-int acr_ls_ucode_load_pmu(const struct nvkm_secboot *, struct ls_ucode_img *);
+int acr_ls_ucode_load_fecs(const struct nvkm_secboot *, int,
+			   struct ls_ucode_img *);
+int acr_ls_ucode_load_gpccs(const struct nvkm_secboot *, int,
+			    struct ls_ucode_img *);
+int acr_ls_ucode_load_pmu(const struct nvkm_secboot *, int,
+			  struct ls_ucode_img *);
 int acr_ls_pmu_post_run(const struct nvkm_acr *, const struct nvkm_secboot *);
-int acr_ls_ucode_load_sec2(const struct nvkm_secboot *, struct ls_ucode_img *);
+int acr_ls_ucode_load_sec2(const struct nvkm_secboot *, int,
+			   struct ls_ucode_img *);
 int acr_ls_sec2_post_run(const struct nvkm_acr *, const struct nvkm_secboot *);
 
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_gr.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_gr.c
index 1b0c793c0192b940103701142e5561ffb1b5263a..821d3b2bdb1fa7503785d4b74d511b96dac8c1d4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_gr.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_gr.c
@@ -90,30 +90,30 @@ ls_ucode_img_build(const struct firmware *bl, const struct firmware *code,
  * blob. Also generate the corresponding ucode descriptor.
  */
 static int
-ls_ucode_img_load_gr(const struct nvkm_subdev *subdev, struct ls_ucode_img *img,
-		     const char *falcon_name)
+ls_ucode_img_load_gr(const struct nvkm_subdev *subdev, int maxver,
+		     struct ls_ucode_img *img, const char *falcon_name)
 {
 	const struct firmware *bl, *code, *data, *sig;
 	char f[64];
 	int ret;
 
 	snprintf(f, sizeof(f), "gr/%s_bl", falcon_name);
-	ret = nvkm_firmware_get(subdev->device, f, &bl);
+	ret = nvkm_firmware_get(subdev, f, &bl);
 	if (ret)
 		goto error;
 
 	snprintf(f, sizeof(f), "gr/%s_inst", falcon_name);
-	ret = nvkm_firmware_get(subdev->device, f, &code);
+	ret = nvkm_firmware_get(subdev, f, &code);
 	if (ret)
 		goto free_bl;
 
 	snprintf(f, sizeof(f), "gr/%s_data", falcon_name);
-	ret = nvkm_firmware_get(subdev->device, f, &data);
+	ret = nvkm_firmware_get(subdev, f, &data);
 	if (ret)
 		goto free_inst;
 
 	snprintf(f, sizeof(f), "gr/%s_sig", falcon_name);
-	ret = nvkm_firmware_get(subdev->device, f, &sig);
+	ret = nvkm_firmware_get(subdev, f, &sig);
 	if (ret)
 		goto free_data;
 
@@ -146,13 +146,15 @@ ls_ucode_img_load_gr(const struct nvkm_subdev *subdev, struct ls_ucode_img *img,
 }
 
 int
-acr_ls_ucode_load_fecs(const struct nvkm_secboot *sb, struct ls_ucode_img *img)
+acr_ls_ucode_load_fecs(const struct nvkm_secboot *sb, int maxver,
+		       struct ls_ucode_img *img)
 {
-	return ls_ucode_img_load_gr(&sb->subdev, img, "fecs");
+	return ls_ucode_img_load_gr(&sb->subdev, maxver, img, "fecs");
 }
 
 int
-acr_ls_ucode_load_gpccs(const struct nvkm_secboot *sb, struct ls_ucode_img *img)
+acr_ls_ucode_load_gpccs(const struct nvkm_secboot *sb, int maxver,
+			struct ls_ucode_img *img)
 {
-	return ls_ucode_img_load_gr(&sb->subdev, img, "gpccs");
+	return ls_ucode_img_load_gr(&sb->subdev, maxver, img, "gpccs");
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_msgqueue.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_msgqueue.c
index 1e1f1c635cabc468f1f44cb142c929805b427952..77c13b096a67833b6b8c945554df370994d5a1d8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_msgqueue.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_msgqueue.c
@@ -39,32 +39,32 @@
  */
 static int
 acr_ls_ucode_load_msgqueue(const struct nvkm_subdev *subdev, const char *name,
-			   struct ls_ucode_img *img)
+			   int maxver, struct ls_ucode_img *img)
 {
 	const struct firmware *image, *desc, *sig;
 	char f[64];
-	int ret;
+	int ver, ret;
 
 	snprintf(f, sizeof(f), "%s/image", name);
-	ret = nvkm_firmware_get(subdev->device, f, &image);
-	if (ret)
-		return ret;
+	ver = nvkm_firmware_get_version(subdev, f, 0, maxver, &image);
+	if (ver < 0)
+		return ver;
 	img->ucode_data = kmemdup(image->data, image->size, GFP_KERNEL);
 	nvkm_firmware_put(image);
 	if (!img->ucode_data)
 		return -ENOMEM;
 
 	snprintf(f, sizeof(f), "%s/desc", name);
-	ret = nvkm_firmware_get(subdev->device, f, &desc);
-	if (ret)
+	ret = nvkm_firmware_get_version(subdev, f, ver, ver, &desc);
+	if (ret < 0)
 		return ret;
 	memcpy(&img->ucode_desc, desc->data, sizeof(img->ucode_desc));
 	img->ucode_size = ALIGN(img->ucode_desc.app_start_offset + img->ucode_desc.app_size, 256);
 	nvkm_firmware_put(desc);
 
 	snprintf(f, sizeof(f), "%s/sig", name);
-	ret = nvkm_firmware_get(subdev->device, f, &sig);
-	if (ret)
+	ret = nvkm_firmware_get_version(subdev, f, ver, ver, &sig);
+	if (ret < 0)
 		return ret;
 	img->sig_size = sig->size;
 	img->sig = kmemdup(sig->data, sig->size, GFP_KERNEL);
@@ -72,7 +72,7 @@ acr_ls_ucode_load_msgqueue(const struct nvkm_subdev *subdev, const char *name,
 	if (!img->sig)
 		return -ENOMEM;
 
-	return 0;
+	return ver;
 }
 
 static int
@@ -99,12 +99,13 @@ acr_ls_msgqueue_post_run(struct nvkm_msgqueue *queue,
 }
 
 int
-acr_ls_ucode_load_pmu(const struct nvkm_secboot *sb, struct ls_ucode_img *img)
+acr_ls_ucode_load_pmu(const struct nvkm_secboot *sb, int maxver,
+		      struct ls_ucode_img *img)
 {
 	struct nvkm_pmu *pmu = sb->subdev.device->pmu;
 	int ret;
 
-	ret = acr_ls_ucode_load_msgqueue(&sb->subdev, "pmu", img);
+	ret = acr_ls_ucode_load_msgqueue(&sb->subdev, "pmu", maxver, img);
 	if (ret)
 		return ret;
 
@@ -136,14 +137,15 @@ acr_ls_pmu_post_run(const struct nvkm_acr *acr, const struct nvkm_secboot *sb)
 }
 
 int
-acr_ls_ucode_load_sec2(const struct nvkm_secboot *sb, struct ls_ucode_img *img)
+acr_ls_ucode_load_sec2(const struct nvkm_secboot *sb, int maxver,
+		       struct ls_ucode_img *img)
 {
 	struct nvkm_sec2 *sec = sb->subdev.device->sec2;
-	int ret;
+	int ver, ret;
 
-	ret = acr_ls_ucode_load_msgqueue(&sb->subdev, "sec2", img);
-	if (ret)
-		return ret;
+	ver = acr_ls_ucode_load_msgqueue(&sb->subdev, "sec2", maxver, img);
+	if (ver < 0)
+		return ver;
 
 	/* Allocate the PMU queue corresponding to the FW version */
 	ret = nvkm_msgqueue_new(img->ucode_desc.app_version, sec->falcon,
@@ -151,7 +153,7 @@ acr_ls_ucode_load_sec2(const struct nvkm_secboot *sb, struct ls_ucode_img *img)
 	if (ret)
 		return ret;
 
-	return 0;
+	return ver;
 }
 
 int
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
index 20a9c296d0272d7ef8ad89cf6f66a81deace12c4..3bb242f7d32f9d193c98447d04b439f9fad05a7c 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
@@ -924,29 +924,17 @@ static void vop_plane_atomic_async_update(struct drm_plane *plane,
 					  struct drm_plane_state *new_state)
 {
 	struct vop *vop = to_vop(plane->state->crtc);
-	struct drm_plane_state *plane_state;
-
-	plane_state = plane->funcs->atomic_duplicate_state(plane);
-	plane_state->crtc_x = new_state->crtc_x;
-	plane_state->crtc_y = new_state->crtc_y;
-	plane_state->crtc_h = new_state->crtc_h;
-	plane_state->crtc_w = new_state->crtc_w;
-	plane_state->src_x = new_state->src_x;
-	plane_state->src_y = new_state->src_y;
-	plane_state->src_h = new_state->src_h;
-	plane_state->src_w = new_state->src_w;
-
-	if (plane_state->fb != new_state->fb)
-		drm_atomic_set_fb_for_plane(plane_state, new_state->fb);
-
-	swap(plane_state, plane->state);
-
-	if (plane->state->fb && plane->state->fb != new_state->fb) {
-		drm_framebuffer_get(plane->state->fb);
-		WARN_ON(drm_crtc_vblank_get(plane->state->crtc) != 0);
-		drm_flip_work_queue(&vop->fb_unref_work, plane->state->fb);
-		set_bit(VOP_PENDING_FB_UNREF, &vop->pending);
-	}
+	struct drm_framebuffer *old_fb = plane->state->fb;
+
+	plane->state->crtc_x = new_state->crtc_x;
+	plane->state->crtc_y = new_state->crtc_y;
+	plane->state->crtc_h = new_state->crtc_h;
+	plane->state->crtc_w = new_state->crtc_w;
+	plane->state->src_x = new_state->src_x;
+	plane->state->src_y = new_state->src_y;
+	plane->state->src_h = new_state->src_h;
+	plane->state->src_w = new_state->src_w;
+	swap(plane->state->fb, new_state->fb);
 
 	if (vop->is_enabled) {
 		rockchip_drm_psr_inhibit_get_state(new_state->state);
@@ -955,9 +943,22 @@ static void vop_plane_atomic_async_update(struct drm_plane *plane,
 		vop_cfg_done(vop);
 		spin_unlock(&vop->reg_lock);
 		rockchip_drm_psr_inhibit_put_state(new_state->state);
-	}
 
-	plane->funcs->atomic_destroy_state(plane, plane_state);
+		/*
+		 * A scanout can still be occurring, so we can't drop the
+		 * reference to the old framebuffer. To solve this we get a
+		 * reference to old_fb and set a worker to release it later.
+		 * FIXME: if we perform 500 async_update calls before the
+		 * vblank, then we can have 500 different framebuffers waiting
+		 * to be released.
+		 */
+		if (old_fb && plane->state->fb != old_fb) {
+			drm_framebuffer_get(old_fb);
+			WARN_ON(drm_crtc_vblank_get(plane->state->crtc) != 0);
+			drm_flip_work_queue(&vop->fb_unref_work, old_fb);
+			set_bit(VOP_PENDING_FB_UNREF, &vop->pending);
+		}
+	}
 }
 
 static const struct drm_plane_helper_funcs plane_helper_funcs = {
diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c
index 4d918d3e4858dcc859c4a110adcf9f5fe856e369..afc80b245ea3b75d945ce92fd68edf77c6e3758c 100644
--- a/drivers/gpu/drm/vc4/vc4_plane.c
+++ b/drivers/gpu/drm/vc4/vc4_plane.c
@@ -1025,7 +1025,7 @@ static void vc4_plane_atomic_async_update(struct drm_plane *plane,
 {
 	struct vc4_plane_state *vc4_state, *new_vc4_state;
 
-	drm_atomic_set_fb_for_plane(plane->state, state->fb);
+	swap(plane->state->fb, state->fb);
 	plane->state->crtc_x = state->crtc_x;
 	plane->state->crtc_y = state->crtc_y;
 	plane->state->crtc_w = state->crtc_w;
diff --git a/include/drm/drm_modeset_helper_vtables.h b/include/drm/drm_modeset_helper_vtables.h
index f9c94c2a1364699a2c89dd535b52a2d204e4ef40..f7bbd0b0ecd1d23b54337046572afbf8cc233c4b 100644
--- a/include/drm/drm_modeset_helper_vtables.h
+++ b/include/drm/drm_modeset_helper_vtables.h
@@ -1185,6 +1185,14 @@ struct drm_plane_helper_funcs {
 	 * current one with the new plane configurations in the new
 	 * plane_state.
 	 *
+	 * Drivers should also swap the framebuffers between current plane
+	 * state (&drm_plane.state) and new_state.
+	 * This is required since cleanup for async commits is performed on
+	 * the new state, rather than old state like for traditional commits.
+	 * Since we want to give up the reference on the current (old) fb
+	 * instead of our brand new one, swap them in the driver during the
+	 * async commit.
+	 *
 	 * FIXME:
 	 *  - It only works for single plane updates
 	 *  - Async Pageflips are not supported yet