blob: 17f44ffea5866f5ff22f4fa84d7c4aef730f3658 [file] [log] [blame]
/*
* Copyright © 2006-2007 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Authors:
* Eric Anholt <eric@anholt.net>
*/
#include <acpi/video.h>
#include <linux/i2c.h>
#include <linux/input.h>
#include <linux/intel-iommu.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/dma-resv.h>
#include <linux/slab.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_atomic_uapi.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_dp_helper.h>
#include <drm/drm_edid.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_rect.h>
#include "display/intel_audio.h"
#include "display/intel_crt.h"
#include "display/intel_ddi.h"
#include "display/intel_display_debugfs.h"
#include "display/intel_dp.h"
#include "display/intel_dp_mst.h"
#include "display/intel_dpll.h"
#include "display/intel_dpll_mgr.h"
#include "display/intel_dsi.h"
#include "display/intel_dvo.h"
#include "display/intel_fb.h"
#include "display/intel_gmbus.h"
#include "display/intel_hdmi.h"
#include "display/intel_lvds.h"
#include "display/intel_sdvo.h"
#include "display/intel_snps_phy.h"
#include "display/intel_tv.h"
#include "display/intel_vdsc.h"
#include "display/intel_vrr.h"
#include "gem/i915_gem_lmem.h"
#include "gem/i915_gem_object.h"
#include "gt/intel_rps.h"
#include "gt/gen8_ppgtt.h"
#include "g4x_dp.h"
#include "g4x_hdmi.h"
#include "i915_drv.h"
#include "intel_acpi.h"
#include "intel_atomic.h"
#include "intel_atomic_plane.h"
#include "intel_bw.h"
#include "intel_cdclk.h"
#include "intel_color.h"
#include "intel_crtc.h"
#include "intel_de.h"
#include "intel_display_types.h"
#include "intel_dmc.h"
#include "intel_dp_link_training.h"
#include "intel_fbc.h"
#include "intel_fdi.h"
#include "intel_fbdev.h"
#include "intel_fifo_underrun.h"
#include "intel_frontbuffer.h"
#include "intel_hdcp.h"
#include "intel_hotplug.h"
#include "intel_overlay.h"
#include "intel_pipe_crc.h"
#include "intel_pm.h"
#include "intel_pps.h"
#include "intel_psr.h"
#include "intel_quirks.h"
#include "intel_sideband.h"
#include "intel_sprite.h"
#include "intel_tc.h"
#include "intel_vga.h"
#include "i9xx_plane.h"
#include "skl_scaler.h"
#include "skl_universal_plane.h"
static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config);
static void ilk_pch_clock_get(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config);
static int intel_framebuffer_init(struct intel_framebuffer *ifb,
struct drm_i915_gem_object *obj,
struct drm_mode_fb_cmd2 *mode_cmd);
static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_state);
static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state);
static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
const struct intel_link_m_n *m_n,
const struct intel_link_m_n *m2_n2);
static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state);
static void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state);
static void hsw_set_pipeconf(const struct intel_crtc_state *crtc_state);
static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state);
static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state);
static void intel_modeset_setup_hw_state(struct drm_device *dev,
struct drm_modeset_acquire_ctx *ctx);
struct i915_dpt {
struct i915_address_space vm;
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
void __iomem *iomem;
};
#define i915_is_dpt(vm) ((vm)->is_dpt)
static inline struct i915_dpt *
i915_vm_to_dpt(struct i915_address_space *vm)
{
BUILD_BUG_ON(offsetof(struct i915_dpt, vm));
GEM_BUG_ON(!i915_is_dpt(vm));
return container_of(vm, struct i915_dpt, vm);
}
#define dpt_total_entries(dpt) ((dpt)->vm.total >> PAGE_SHIFT)
static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte)
{
writeq(pte, addr);
}
static void dpt_insert_page(struct i915_address_space *vm,
dma_addr_t addr,
u64 offset,
enum i915_cache_level level,
u32 flags)
{
struct i915_dpt *dpt = i915_vm_to_dpt(vm);
gen8_pte_t __iomem *base = dpt->iomem;
gen8_set_pte(base + offset / I915_GTT_PAGE_SIZE,
vm->pte_encode(addr, level, flags));
}
static void dpt_insert_entries(struct i915_address_space *vm,
struct i915_vma *vma,
enum i915_cache_level level,
u32 flags)
{
struct i915_dpt *dpt = i915_vm_to_dpt(vm);
gen8_pte_t __iomem *base = dpt->iomem;
const gen8_pte_t pte_encode = vm->pte_encode(0, level, flags);
struct sgt_iter sgt_iter;
dma_addr_t addr;
int i;
/*
* Note that we ignore PTE_READ_ONLY here. The caller must be careful
* not to allow the user to override access to a read only page.
*/
i = vma->node.start / I915_GTT_PAGE_SIZE;
for_each_sgt_daddr(addr, sgt_iter, vma->pages)
gen8_set_pte(&base[i++], pte_encode | addr);
}
static void dpt_clear_range(struct i915_address_space *vm,
u64 start, u64 length)
{
}
static void dpt_bind_vma(struct i915_address_space *vm,
struct i915_vm_pt_stash *stash,
struct i915_vma *vma,
enum i915_cache_level cache_level,
u32 flags)
{
struct drm_i915_gem_object *obj = vma->obj;
u32 pte_flags;
/* Applicable to VLV (gen8+ do not support RO in the GGTT) */
pte_flags = 0;
if (vma->vm->has_read_only && i915_gem_object_is_readonly(obj))
pte_flags |= PTE_READ_ONLY;
if (i915_gem_object_is_lmem(obj))
pte_flags |= PTE_LM;
vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
/*
* Without aliasing PPGTT there's no difference between
* GLOBAL/LOCAL_BIND, it's all the same ptes. Hence unconditionally
* upgrade to both bound if we bind either to avoid double-binding.
*/
atomic_or(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND, &vma->flags);
}
static void dpt_unbind_vma(struct i915_address_space *vm, struct i915_vma *vma)
{
vm->clear_range(vm, vma->node.start, vma->size);
}
static void dpt_cleanup(struct i915_address_space *vm)
{
struct i915_dpt *dpt = i915_vm_to_dpt(vm);
i915_gem_object_put(dpt->obj);
}
static struct i915_address_space *
intel_dpt_create(struct intel_framebuffer *fb)
{
struct drm_gem_object *obj = &intel_fb_obj(&fb->base)->base;
struct drm_i915_private *i915 = to_i915(obj->dev);
struct drm_i915_gem_object *dpt_obj;
struct i915_address_space *vm;
struct i915_dpt *dpt;
size_t size;
int ret;
if (intel_fb_needs_pot_stride_remap(fb))
size = intel_remapped_info_size(&fb->remapped_view.gtt.remapped);
else
size = DIV_ROUND_UP_ULL(obj->size, I915_GTT_PAGE_SIZE);
size = round_up(size * sizeof(gen8_pte_t), I915_GTT_PAGE_SIZE);
if (HAS_LMEM(i915))
dpt_obj = i915_gem_object_create_lmem(i915, size, 0);
else
dpt_obj = i915_gem_object_create_stolen(i915, size);
if (IS_ERR(dpt_obj))
return ERR_CAST(dpt_obj);
ret = i915_gem_object_set_cache_level(dpt_obj, I915_CACHE_NONE);
if (ret) {
i915_gem_object_put(dpt_obj);
return ERR_PTR(ret);
}
dpt = kzalloc(sizeof(*dpt), GFP_KERNEL);
if (!dpt) {
i915_gem_object_put(dpt_obj);
return ERR_PTR(-ENOMEM);
}
vm = &dpt->vm;
vm->gt = &i915->gt;
vm->i915 = i915;
vm->dma = i915->drm.dev;
vm->total = (size / sizeof(gen8_pte_t)) * I915_GTT_PAGE_SIZE;
vm->is_dpt = true;
i915_address_space_init(vm, VM_CLASS_DPT);
vm->insert_page = dpt_insert_page;
vm->clear_range = dpt_clear_range;
vm->insert_entries = dpt_insert_entries;
vm->cleanup = dpt_cleanup;
vm->vma_ops.bind_vma = dpt_bind_vma;
vm->vma_ops.unbind_vma = dpt_unbind_vma;
vm->vma_ops.set_pages = ggtt_set_pages;
vm->vma_ops.clear_pages = clear_pages;
vm->pte_encode = gen8_ggtt_pte_encode;
dpt->obj = dpt_obj;
return &dpt->vm;
}
static void intel_dpt_destroy(struct i915_address_space *vm)
{
struct i915_dpt *dpt = i915_vm_to_dpt(vm);
i915_vm_close(&dpt->vm);
}
/* returns HPLL frequency in kHz */
int vlv_get_hpll_vco(struct drm_i915_private *dev_priv)
{
int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
/* Obtain SKU information */
hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) &
CCK_FUSE_HPLL_FREQ_MASK;
return vco_freq[hpll_freq] * 1000;
}
int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
const char *name, u32 reg, int ref_freq)
{
u32 val;
int divider;
val = vlv_cck_read(dev_priv, reg);
divider = val & CCK_FREQUENCY_VALUES;
drm_WARN(&dev_priv->drm, (val & CCK_FREQUENCY_STATUS) !=
(divider << CCK_FREQUENCY_STATUS_SHIFT),
"%s change in progress\n", name);
return DIV_ROUND_CLOSEST(ref_freq << 1, divider + 1);
}
int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
const char *name, u32 reg)
{
int hpll;
vlv_cck_get(dev_priv);
if (dev_priv->hpll_freq == 0)
dev_priv->hpll_freq = vlv_get_hpll_vco(dev_priv);
hpll = vlv_get_cck_clock(dev_priv, name, reg, dev_priv->hpll_freq);
vlv_cck_put(dev_priv);
return hpll;
}
static void intel_update_czclk(struct drm_i915_private *dev_priv)
{
if (!(IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)))
return;
dev_priv->czclk_freq = vlv_get_cck_clock_hpll(dev_priv, "czclk",
CCK_CZ_CLOCK_CONTROL);
drm_dbg(&dev_priv->drm, "CZ clock rate: %d kHz\n",
dev_priv->czclk_freq);
}
/* WA Display #0827: Gen9:all */
static void
skl_wa_827(struct drm_i915_private *dev_priv, enum pipe pipe, bool enable)
{
if (enable)
intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) | DUPS1_GATING_DIS | DUPS2_GATING_DIS);
else
intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) & ~(DUPS1_GATING_DIS | DUPS2_GATING_DIS));
}
/* Wa_2006604312:icl,ehl */
static void
icl_wa_scalerclkgating(struct drm_i915_private *dev_priv, enum pipe pipe,
bool enable)
{
if (enable)
intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) | DPFR_GATING_DIS);
else
intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) & ~DPFR_GATING_DIS);
}
static bool
is_trans_port_sync_slave(const struct intel_crtc_state *crtc_state)
{
return crtc_state->master_transcoder != INVALID_TRANSCODER;
}
static bool
is_trans_port_sync_master(const struct intel_crtc_state *crtc_state)
{
return crtc_state->sync_mode_slaves_mask != 0;
}
bool
is_trans_port_sync_mode(const struct intel_crtc_state *crtc_state)
{
return is_trans_port_sync_master(crtc_state) ||
is_trans_port_sync_slave(crtc_state);
}
static bool pipe_scanline_is_moving(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
i915_reg_t reg = PIPEDSL(pipe);
u32 line1, line2;
u32 line_mask;
if (DISPLAY_VER(dev_priv) == 2)
line_mask = DSL_LINEMASK_GEN2;
else
line_mask = DSL_LINEMASK_GEN3;
line1 = intel_de_read(dev_priv, reg) & line_mask;
msleep(5);
line2 = intel_de_read(dev_priv, reg) & line_mask;
return line1 != line2;
}
static void wait_for_pipe_scanline_moving(struct intel_crtc *crtc, bool state)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
/* Wait for the display line to settle/start moving */
if (wait_for(pipe_scanline_is_moving(dev_priv, pipe) == state, 100))
drm_err(&dev_priv->drm,
"pipe %c scanline %s wait timed out\n",
pipe_name(pipe), onoff(state));
}
static void intel_wait_for_pipe_scanline_stopped(struct intel_crtc *crtc)
{
wait_for_pipe_scanline_moving(crtc, false);
}
static void intel_wait_for_pipe_scanline_moving(struct intel_crtc *crtc)
{
wait_for_pipe_scanline_moving(crtc, true);
}
static void
intel_wait_for_pipe_off(const struct intel_crtc_state *old_crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
if (DISPLAY_VER(dev_priv) >= 4) {
enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
i915_reg_t reg = PIPECONF(cpu_transcoder);
/* Wait for the Pipe State to go off */
if (intel_de_wait_for_clear(dev_priv, reg,
I965_PIPECONF_ACTIVE, 100))
drm_WARN(&dev_priv->drm, 1,
"pipe_off wait timed out\n");
} else {
intel_wait_for_pipe_scanline_stopped(crtc);
}
}
/* Only for pre-ILK configs */
void assert_pll(struct drm_i915_private *dev_priv,
enum pipe pipe, bool state)
{
u32 val;
bool cur_state;
val = intel_de_read(dev_priv, DPLL(pipe));
cur_state = !!(val & DPLL_VCO_ENABLE);
I915_STATE_WARN(cur_state != state,
"PLL state assertion failure (expected %s, current %s)\n",
onoff(state), onoff(cur_state));
}
/* XXX: the dsi pll is shared between MIPI DSI ports */
void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state)
{
u32 val;
bool cur_state;
vlv_cck_get(dev_priv);
val = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
vlv_cck_put(dev_priv);
cur_state = val & DSI_PLL_VCO_EN;
I915_STATE_WARN(cur_state != state,
"DSI PLL state assertion failure (expected %s, current %s)\n",
onoff(state), onoff(cur_state));
}
static void assert_fdi_tx(struct drm_i915_private *dev_priv,
enum pipe pipe, bool state)
{
bool cur_state;
if (HAS_DDI(dev_priv)) {
/*
* DDI does not have a specific FDI_TX register.
*
* FDI is never fed from EDP transcoder
* so pipe->transcoder cast is fine here.
*/
enum transcoder cpu_transcoder = (enum transcoder)pipe;
u32 val = intel_de_read(dev_priv,
TRANS_DDI_FUNC_CTL(cpu_transcoder));
cur_state = !!(val & TRANS_DDI_FUNC_ENABLE);
} else {
u32 val = intel_de_read(dev_priv, FDI_TX_CTL(pipe));
cur_state = !!(val & FDI_TX_ENABLE);
}
I915_STATE_WARN(cur_state != state,
"FDI TX state assertion failure (expected %s, current %s)\n",
onoff(state), onoff(cur_state));
}
#define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
#define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
static void assert_fdi_rx(struct drm_i915_private *dev_priv,
enum pipe pipe, bool state)
{
u32 val;
bool cur_state;
val = intel_de_read(dev_priv, FDI_RX_CTL(pipe));
cur_state = !!(val & FDI_RX_ENABLE);
I915_STATE_WARN(cur_state != state,
"FDI RX state assertion failure (expected %s, current %s)\n",
onoff(state), onoff(cur_state));
}
#define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
#define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
u32 val;
/* ILK FDI PLL is always enabled */
if (IS_IRONLAKE(dev_priv))
return;
/* On Haswell, DDI ports are responsible for the FDI PLL setup */
if (HAS_DDI(dev_priv))
return;
val = intel_de_read(dev_priv, FDI_TX_CTL(pipe));
I915_STATE_WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
}
void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
enum pipe pipe, bool state)
{
u32 val;
bool cur_state;
val = intel_de_read(dev_priv, FDI_RX_CTL(pipe));
cur_state = !!(val & FDI_RX_PLL_ENABLE);
I915_STATE_WARN(cur_state != state,
"FDI RX PLL assertion failure (expected %s, current %s)\n",
onoff(state), onoff(cur_state));
}
void assert_panel_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe)
{
i915_reg_t pp_reg;
u32 val;
enum pipe panel_pipe = INVALID_PIPE;
bool locked = true;
if (drm_WARN_ON(&dev_priv->drm, HAS_DDI(dev_priv)))
return;
if (HAS_PCH_SPLIT(dev_priv)) {
u32 port_sel;
pp_reg = PP_CONTROL(0);
port_sel = intel_de_read(dev_priv, PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
switch (port_sel) {
case PANEL_PORT_SELECT_LVDS:
intel_lvds_port_enabled(dev_priv, PCH_LVDS, &panel_pipe);
break;
case PANEL_PORT_SELECT_DPA:
g4x_dp_port_enabled(dev_priv, DP_A, PORT_A, &panel_pipe);
break;
case PANEL_PORT_SELECT_DPC:
g4x_dp_port_enabled(dev_priv, PCH_DP_C, PORT_C, &panel_pipe);
break;
case PANEL_PORT_SELECT_DPD:
g4x_dp_port_enabled(dev_priv, PCH_DP_D, PORT_D, &panel_pipe);
break;
default:
MISSING_CASE(port_sel);
break;
}
} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
/* presumably write lock depends on pipe, not port select */
pp_reg = PP_CONTROL(pipe);
panel_pipe = pipe;
} else {
u32 port_sel;
pp_reg = PP_CONTROL(0);
port_sel = intel_de_read(dev_priv, PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
drm_WARN_ON(&dev_priv->drm,
port_sel != PANEL_PORT_SELECT_LVDS);
intel_lvds_port_enabled(dev_priv, LVDS, &panel_pipe);
}
val = intel_de_read(dev_priv, pp_reg);
if (!(val & PANEL_POWER_ON) ||
((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS))
locked = false;
I915_STATE_WARN(panel_pipe == pipe && locked,
"panel assertion failure, pipe %c regs locked\n",
pipe_name(pipe));
}
void assert_pipe(struct drm_i915_private *dev_priv,
enum transcoder cpu_transcoder, bool state)
{
bool cur_state;
enum intel_display_power_domain power_domain;
intel_wakeref_t wakeref;
/* we keep both pipes enabled on 830 */
if (IS_I830(dev_priv))
state = true;
power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
if (wakeref) {
u32 val = intel_de_read(dev_priv, PIPECONF(cpu_transcoder));
cur_state = !!(val & PIPECONF_ENABLE);
intel_display_power_put(dev_priv, power_domain, wakeref);
} else {
cur_state = false;
}
I915_STATE_WARN(cur_state != state,
"transcoder %s assertion failure (expected %s, current %s)\n",
transcoder_name(cpu_transcoder),
onoff(state), onoff(cur_state));
}
static void assert_plane(struct intel_plane *plane, bool state)
{
enum pipe pipe;
bool cur_state;
cur_state = plane->get_hw_state(plane, &pipe);
I915_STATE_WARN(cur_state != state,
"%s assertion failure (expected %s, current %s)\n",
plane->base.name, onoff(state), onoff(cur_state));
}
#define assert_plane_enabled(p) assert_plane(p, true)
#define assert_plane_disabled(p) assert_plane(p, false)
static void assert_planes_disabled(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_plane *plane;
for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane)
assert_plane_disabled(plane);
}
void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
u32 val;
bool enabled;
val = intel_de_read(dev_priv, PCH_TRANSCONF(pipe));
enabled = !!(val & TRANS_ENABLE);
I915_STATE_WARN(enabled,
"transcoder assertion failed, should be off on pipe %c but is still active\n",
pipe_name(pipe));
}
static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
enum pipe pipe, enum port port,
i915_reg_t dp_reg)
{
enum pipe port_pipe;
bool state;
state = g4x_dp_port_enabled(dev_priv, dp_reg, port, &port_pipe);
I915_STATE_WARN(state && port_pipe == pipe,
"PCH DP %c enabled on transcoder %c, should be disabled\n",
port_name(port), pipe_name(pipe));
I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
"IBX PCH DP %c still using transcoder B\n",
port_name(port));
}
static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
enum pipe pipe, enum port port,
i915_reg_t hdmi_reg)
{
enum pipe port_pipe;
bool state;
state = intel_sdvo_port_enabled(dev_priv, hdmi_reg, &port_pipe);
I915_STATE_WARN(state && port_pipe == pipe,
"PCH HDMI %c enabled on transcoder %c, should be disabled\n",
port_name(port), pipe_name(pipe));
I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
"IBX PCH HDMI %c still using transcoder B\n",
port_name(port));
}
static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
enum pipe port_pipe;
assert_pch_dp_disabled(dev_priv, pipe, PORT_B, PCH_DP_B);
assert_pch_dp_disabled(dev_priv, pipe, PORT_C, PCH_DP_C);
assert_pch_dp_disabled(dev_priv, pipe, PORT_D, PCH_DP_D);
I915_STATE_WARN(intel_crt_port_enabled(dev_priv, PCH_ADPA, &port_pipe) &&
port_pipe == pipe,
"PCH VGA enabled on transcoder %c, should be disabled\n",
pipe_name(pipe));
I915_STATE_WARN(intel_lvds_port_enabled(dev_priv, PCH_LVDS, &port_pipe) &&
port_pipe == pipe,
"PCH LVDS enabled on transcoder %c, should be disabled\n",
pipe_name(pipe));
/* PCH SDVOB multiplex with HDMIB */
assert_pch_hdmi_disabled(dev_priv, pipe, PORT_B, PCH_HDMIB);
assert_pch_hdmi_disabled(dev_priv, pipe, PORT_C, PCH_HDMIC);
assert_pch_hdmi_disabled(dev_priv, pipe, PORT_D, PCH_HDMID);
}
void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
struct intel_digital_port *dig_port,
unsigned int expected_mask)
{
u32 port_mask;
i915_reg_t dpll_reg;
switch (dig_port->base.port) {
case PORT_B:
port_mask = DPLL_PORTB_READY_MASK;
dpll_reg = DPLL(0);
break;
case PORT_C:
port_mask = DPLL_PORTC_READY_MASK;
dpll_reg = DPLL(0);
expected_mask <<= 4;
break;
case PORT_D:
port_mask = DPLL_PORTD_READY_MASK;
dpll_reg = DPIO_PHY_STATUS;
break;
default:
BUG();
}
if (intel_de_wait_for_register(dev_priv, dpll_reg,
port_mask, expected_mask, 1000))
drm_WARN(&dev_priv->drm, 1,
"timed out waiting for [ENCODER:%d:%s] port ready: got 0x%x, expected 0x%x\n",
dig_port->base.base.base.id, dig_port->base.base.name,
intel_de_read(dev_priv, dpll_reg) & port_mask,
expected_mask);
}
static void ilk_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
i915_reg_t reg;
u32 val, pipeconf_val;
/* Make sure PCH DPLL is enabled */
assert_shared_dpll_enabled(dev_priv, crtc_state->shared_dpll);
/* FDI must be feeding us bits for PCH ports */
assert_fdi_tx_enabled(dev_priv, pipe);
assert_fdi_rx_enabled(dev_priv, pipe);
if (HAS_PCH_CPT(dev_priv)) {
reg = TRANS_CHICKEN2(pipe);
val = intel_de_read(dev_priv, reg);
/*
* Workaround: Set the timing override bit
* before enabling the pch transcoder.
*/
val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
/* Configure frame start delay to match the CPU */
val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
val |= TRANS_CHICKEN2_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
intel_de_write(dev_priv, reg, val);
}
reg = PCH_TRANSCONF(pipe);
val = intel_de_read(dev_priv, reg);
pipeconf_val = intel_de_read(dev_priv, PIPECONF(pipe));
if (HAS_PCH_IBX(dev_priv)) {
/* Configure frame start delay to match the CPU */
val &= ~TRANS_FRAME_START_DELAY_MASK;
val |= TRANS_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
/*
* Make the BPC in transcoder be consistent with
* that in pipeconf reg. For HDMI we must use 8bpc
* here for both 8bpc and 12bpc.
*/
val &= ~PIPECONF_BPC_MASK;
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
val |= PIPECONF_8BPC;
else
val |= pipeconf_val & PIPECONF_BPC_MASK;
}
val &= ~TRANS_INTERLACE_MASK;
if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK) {
if (HAS_PCH_IBX(dev_priv) &&
intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
val |= TRANS_LEGACY_INTERLACED_ILK;
else
val |= TRANS_INTERLACED;
} else {
val |= TRANS_PROGRESSIVE;
}
intel_de_write(dev_priv, reg, val | TRANS_ENABLE);
if (intel_de_wait_for_set(dev_priv, reg, TRANS_STATE_ENABLE, 100))
drm_err(&dev_priv->drm, "failed to enable transcoder %c\n",
pipe_name(pipe));
}
static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
enum transcoder cpu_transcoder)
{
u32 val, pipeconf_val;
/* FDI must be feeding us bits for PCH ports */
assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder);
assert_fdi_rx_enabled(dev_priv, PIPE_A);
val = intel_de_read(dev_priv, TRANS_CHICKEN2(PIPE_A));
/* Workaround: set timing override bit. */
val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
/* Configure frame start delay to match the CPU */
val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
val |= TRANS_CHICKEN2_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
intel_de_write(dev_priv, TRANS_CHICKEN2(PIPE_A), val);
val = TRANS_ENABLE;
pipeconf_val = intel_de_read(dev_priv, PIPECONF(cpu_transcoder));
if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) ==
PIPECONF_INTERLACED_ILK)
val |= TRANS_INTERLACED;
else
val |= TRANS_PROGRESSIVE;
intel_de_write(dev_priv, LPT_TRANSCONF, val);
if (intel_de_wait_for_set(dev_priv, LPT_TRANSCONF,
TRANS_STATE_ENABLE, 100))
drm_err(&dev_priv->drm, "Failed to enable PCH transcoder\n");
}
static void ilk_disable_pch_transcoder(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
i915_reg_t reg;
u32 val;
/* FDI relies on the transcoder */
assert_fdi_tx_disabled(dev_priv, pipe);
assert_fdi_rx_disabled(dev_priv, pipe);
/* Ports must be off as well */
assert_pch_ports_disabled(dev_priv, pipe);
reg = PCH_TRANSCONF(pipe);
val = intel_de_read(dev_priv, reg);
val &= ~TRANS_ENABLE;
intel_de_write(dev_priv, reg, val);
/* wait for PCH transcoder off, transcoder state */
if (intel_de_wait_for_clear(dev_priv, reg, TRANS_STATE_ENABLE, 50))
drm_err(&dev_priv->drm, "failed to disable transcoder %c\n",
pipe_name(pipe));
if (HAS_PCH_CPT(dev_priv)) {
/* Workaround: Clear the timing override chicken bit again. */
reg = TRANS_CHICKEN2(pipe);
val = intel_de_read(dev_priv, reg);
val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
intel_de_write(dev_priv, reg, val);
}
}
void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
{
u32 val;
val = intel_de_read(dev_priv, LPT_TRANSCONF);
val &= ~TRANS_ENABLE;
intel_de_write(dev_priv, LPT_TRANSCONF, val);
/* wait for PCH transcoder off, transcoder state */
if (intel_de_wait_for_clear(dev_priv, LPT_TRANSCONF,
TRANS_STATE_ENABLE, 50))
drm_err(&dev_priv->drm, "Failed to disable PCH transcoder\n");
/* Workaround: clear timing override bit. */
val = intel_de_read(dev_priv, TRANS_CHICKEN2(PIPE_A));
val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
intel_de_write(dev_priv, TRANS_CHICKEN2(PIPE_A), val);
}
enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
if (HAS_PCH_LPT(dev_priv))
return PIPE_A;
else
return crtc->pipe;
}
void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
enum pipe pipe = crtc->pipe;
i915_reg_t reg;
u32 val;
drm_dbg_kms(&dev_priv->drm, "enabling pipe %c\n", pipe_name(pipe));
assert_planes_disabled(crtc);
/*
* A pipe without a PLL won't actually be able to drive bits from
* a plane. On ILK+ the pipe PLLs are integrated, so we don't
* need the check.
*/
if (HAS_GMCH(dev_priv)) {
if (intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI))
assert_dsi_pll_enabled(dev_priv);
else
assert_pll_enabled(dev_priv, pipe);
} else {
if (new_crtc_state->has_pch_encoder) {
/* if driving the PCH, we need FDI enabled */
assert_fdi_rx_pll_enabled(dev_priv,
intel_crtc_pch_transcoder(crtc));
assert_fdi_tx_pll_enabled(dev_priv,
(enum pipe) cpu_transcoder);
}
/* FIXME: assert CPU port conditions for SNB+ */
}
/* Wa_22012358565:adl-p */
if (DISPLAY_VER(dev_priv) == 13)
intel_de_rmw(dev_priv, PIPE_ARB_CTL(pipe),
0, PIPE_ARB_USE_PROG_SLOTS);
reg = PIPECONF(cpu_transcoder);
val = intel_de_read(dev_priv, reg);
if (val & PIPECONF_ENABLE) {
/* we keep both pipes enabled on 830 */
drm_WARN_ON(&dev_priv->drm, !IS_I830(dev_priv));
return;
}
intel_de_write(dev_priv, reg, val | PIPECONF_ENABLE);
intel_de_posting_read(dev_priv, reg);
/*
* Until the pipe starts PIPEDSL reads will return a stale value,
* which causes an apparent vblank timestamp jump when PIPEDSL
* resets to its proper value. That also messes up the frame count
* when it's derived from the timestamps. So let's wait for the
* pipe to start properly before we call drm_crtc_vblank_on()
*/
if (intel_crtc_max_vblank_count(new_crtc_state) == 0)
intel_wait_for_pipe_scanline_moving(crtc);
}
void intel_disable_pipe(const struct intel_crtc_state *old_crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
enum pipe pipe = crtc->pipe;
i915_reg_t reg;
u32 val;
drm_dbg_kms(&dev_priv->drm, "disabling pipe %c\n", pipe_name(pipe));
/*
* Make sure planes won't keep trying to pump pixels to us,
* or we might hang the display.
*/
assert_planes_disabled(crtc);
reg = PIPECONF(cpu_transcoder);
val = intel_de_read(dev_priv, reg);
if ((val & PIPECONF_ENABLE) == 0)
return;
/*
* Double wide has implications for planes
* so best keep it disabled when not needed.
*/
if (old_crtc_state->double_wide)
val &= ~PIPECONF_DOUBLE_WIDE;
/* Don't disable pipe or pipe PLLs if needed */
if (!IS_I830(dev_priv))
val &= ~PIPECONF_ENABLE;
if (DISPLAY_VER(dev_priv) >= 12)
intel_de_rmw(dev_priv, CHICKEN_TRANS(cpu_transcoder),
FECSTALL_DIS_DPTSTREAM_DPTTG, 0);
intel_de_write(dev_priv, reg, val);
if ((val & PIPECONF_ENABLE) == 0)
intel_wait_for_pipe_off(old_crtc_state);
}
bool
intel_format_info_is_yuv_semiplanar(const struct drm_format_info *info,
u64 modifier)
{
return info->is_yuv &&
info->num_planes == (is_ccs_modifier(modifier) ? 4 : 2);
}
unsigned int
intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane)
{
struct drm_i915_private *dev_priv = to_i915(fb->dev);
unsigned int cpp = fb->format->cpp[color_plane];
switch (fb->modifier) {
case DRM_FORMAT_MOD_LINEAR:
return intel_tile_size(dev_priv);
case I915_FORMAT_MOD_X_TILED:
if (DISPLAY_VER(dev_priv) == 2)
return 128;
else
return 512;
case I915_FORMAT_MOD_Y_TILED_CCS:
if (is_ccs_plane(fb, color_plane))
return 128;
fallthrough;
case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC:
case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
if (is_ccs_plane(fb, color_plane))
return 64;
fallthrough;
case I915_FORMAT_MOD_Y_TILED:
if (DISPLAY_VER(dev_priv) == 2 || HAS_128_BYTE_Y_TILING(dev_priv))
return 128;
else
return 512;
case I915_FORMAT_MOD_Yf_TILED_CCS:
if (is_ccs_plane(fb, color_plane))
return 128;
fallthrough;
case I915_FORMAT_MOD_Yf_TILED:
switch (cpp) {
case 1:
return 64;
case 2:
case 4:
return 128;
case 8:
case 16:
return 256;
default:
MISSING_CASE(cpp);
return cpp;
}
break;
default:
MISSING_CASE(fb->modifier);
return cpp;
}
}
unsigned int
intel_fb_align_height(const struct drm_framebuffer *fb,
int color_plane, unsigned int height)
{
unsigned int tile_height = intel_tile_height(fb, color_plane);
return ALIGN(height, tile_height);
}
unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info)
{
unsigned int size = 0;
int i;
for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++)
size += rot_info->plane[i].dst_stride * rot_info->plane[i].width;
return size;
}
unsigned int intel_remapped_info_size(const struct intel_remapped_info *rem_info)
{
unsigned int size = 0;
int i;
for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++)
size += rem_info->plane[i].dst_stride * rem_info->plane[i].height;
return size;
}
static unsigned int intel_linear_alignment(const struct drm_i915_private *dev_priv)
{
if (DISPLAY_VER(dev_priv) >= 9)
return 256 * 1024;
else if (IS_I965G(dev_priv) || IS_I965GM(dev_priv) ||
IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
return 128 * 1024;
else if (DISPLAY_VER(dev_priv) >= 4)
return 4 * 1024;
else
return 0;
}
static bool has_async_flips(struct drm_i915_private *i915)
{
return DISPLAY_VER(i915) >= 5;
}
unsigned int intel_surf_alignment(const struct drm_framebuffer *fb,
int color_plane)
{
struct drm_i915_private *dev_priv = to_i915(fb->dev);
if (intel_fb_uses_dpt(fb))
return 512 * 4096;
/* AUX_DIST needs only 4K alignment */
if (is_ccs_plane(fb, color_plane))
return 4096;
if (is_semiplanar_uv_plane(fb, color_plane)) {
/*
* TODO: cross-check wrt. the bspec stride in bytes * 64 bytes
* alignment for linear UV planes on all platforms.
*/
if (DISPLAY_VER(dev_priv) >= 12) {
if (fb->modifier == DRM_FORMAT_MOD_LINEAR)
return intel_linear_alignment(dev_priv);
return intel_tile_row_size(fb, color_plane);
}
return 4096;
}
drm_WARN_ON(&dev_priv->drm, color_plane != 0);
switch (fb->modifier) {
case DRM_FORMAT_MOD_LINEAR:
return intel_linear_alignment(dev_priv);
case I915_FORMAT_MOD_X_TILED:
if (has_async_flips(dev_priv))
return 256 * 1024;
return 0;
case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC:
return 16 * 1024;
case I915_FORMAT_MOD_Y_TILED_CCS:
case I915_FORMAT_MOD_Yf_TILED_CCS:
case I915_FORMAT_MOD_Y_TILED:
case I915_FORMAT_MOD_Yf_TILED:
return 1 * 1024 * 1024;
default:
MISSING_CASE(fb->modifier);
return 0;
}
}
static bool intel_plane_uses_fence(const struct intel_plane_state *plane_state)
{
struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
return DISPLAY_VER(dev_priv) < 4 ||
(plane->has_fbc &&
plane_state->view.gtt.type == I915_GGTT_VIEW_NORMAL);
}
static struct i915_vma *
intel_pin_fb_obj_dpt(struct drm_framebuffer *fb,
const struct i915_ggtt_view *view,
bool uses_fence,
unsigned long *out_flags,
struct i915_address_space *vm)
{
struct drm_device *dev = fb->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
struct i915_vma *vma;
u32 alignment;
int ret;
if (WARN_ON(!i915_gem_object_is_framebuffer(obj)))
return ERR_PTR(-EINVAL);
alignment = 4096 * 512;
atomic_inc(&dev_priv->gpu_error.pending_fb_pin);
ret = i915_gem_object_set_cache_level(obj, I915_CACHE_NONE);
if (ret) {
vma = ERR_PTR(ret);
goto err;
}
vma = i915_vma_instance(obj, vm, view);
if (IS_ERR(vma))
goto err;
if (i915_vma_misplaced(vma, 0, alignment, 0)) {
ret = i915_vma_unbind(vma);
if (ret) {
vma = ERR_PTR(ret);
goto err;
}
}
ret = i915_vma_pin(vma, 0, alignment, PIN_GLOBAL);
if (ret) {
vma = ERR_PTR(ret);
goto err;
}
vma->display_alignment = max_t(u64, vma->display_alignment, alignment);
i915_gem_object_flush_if_display(obj);
i915_vma_get(vma);
err:
atomic_dec(&dev_priv->gpu_error.pending_fb_pin);
return vma;
}
struct i915_vma *
intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
bool phys_cursor,
const struct i915_ggtt_view *view,
bool uses_fence,
unsigned long *out_flags)
{
struct drm_device *dev = fb->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
intel_wakeref_t wakeref;
struct i915_gem_ww_ctx ww;
struct i915_vma *vma;
unsigned int pinctl;
u32 alignment;
int ret;
if (drm_WARN_ON(dev, !i915_gem_object_is_framebuffer(obj)))
return ERR_PTR(-EINVAL);
if (phys_cursor)
alignment = intel_cursor_alignment(dev_priv);
else
alignment = intel_surf_alignment(fb, 0);
if (drm_WARN_ON(dev, alignment && !is_power_of_2(alignment)))
return ERR_PTR(-EINVAL);
/* Note that the w/a also requires 64 PTE of padding following the
* bo. We currently fill all unused PTE with the shadow page and so
* we should always have valid PTE following the scanout preventing
* the VT-d warning.
*/
if (intel_scanout_needs_vtd_wa(dev_priv) && alignment < 256 * 1024)
alignment = 256 * 1024;
/*
* Global gtt pte registers are special registers which actually forward
* writes to a chunk of system memory. Which means that there is no risk
* that the register values disappear as soon as we call
* intel_runtime_pm_put(), so it is correct to wrap only the
* pin/unpin/fence and not more.
*/
wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
atomic_inc(&dev_priv->gpu_error.pending_fb_pin);
/*
* Valleyview is definitely limited to scanning out the first
* 512MiB. Lets presume this behaviour was inherited from the
* g4x display engine and that all earlier gen are similarly
* limited. Testing suggests that it is a little more
* complicated than this. For example, Cherryview appears quite
* happy to scanout from anywhere within its global aperture.
*/
pinctl = 0;
if (HAS_GMCH(dev_priv))
pinctl |= PIN_MAPPABLE;
i915_gem_ww_ctx_init(&ww, true);
retry:
ret = i915_gem_object_lock(obj, &ww);
if (!ret && phys_cursor)
ret = i915_gem_object_attach_phys(obj, alignment);
else if (!ret && HAS_LMEM(dev_priv))
ret = i915_gem_object_migrate(obj, &ww, INTEL_REGION_LMEM);
/* TODO: Do we need to sync when migration becomes async? */
if (!ret)
ret = i915_gem_object_pin_pages(obj);
if (ret)
goto err;
if (!ret) {
vma = i915_gem_object_pin_to_display_plane(obj, &ww, alignment,
view, pinctl);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
goto err_unpin;
}
}
if (uses_fence && i915_vma_is_map_and_fenceable(vma)) {
/*
* Install a fence for tiled scan-out. Pre-i965 always needs a
* fence, whereas 965+ only requires a fence if using
* framebuffer compression. For simplicity, we always, when
* possible, install a fence as the cost is not that onerous.
*
* If we fail to fence the tiled scanout, then either the
* modeset will reject the change (which is highly unlikely as
* the affected systems, all but one, do not have unmappable
* space) or we will not be able to enable full powersaving
* techniques (also likely not to apply due to various limits
* FBC and the like impose on the size of the buffer, which
* presumably we violated anyway with this unmappable buffer).
* Anyway, it is presumably better to stumble onwards with
* something and try to run the system in a "less than optimal"
* mode that matches the user configuration.
*/
ret = i915_vma_pin_fence(vma);
if (ret != 0 && DISPLAY_VER(dev_priv) < 4) {
i915_vma_unpin(vma);
goto err_unpin;
}
ret = 0;
if (vma->fence)
*out_flags |= PLANE_HAS_FENCE;
}
i915_vma_get(vma);
err_unpin:
i915_gem_object_unpin_pages(obj);
err:
if (ret == -EDEADLK) {
ret = i915_gem_ww_ctx_backoff(&ww);
if (!ret)
goto retry;
}
i915_gem_ww_ctx_fini(&ww);
if (ret)
vma = ERR_PTR(ret);
atomic_dec(&dev_priv->gpu_error.pending_fb_pin);
intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
return vma;
}
void intel_unpin_fb_vma(struct i915_vma *vma, unsigned long flags)
{
if (flags & PLANE_HAS_FENCE)
i915_vma_unpin_fence(vma);
i915_vma_unpin(vma);
i915_vma_put(vma);
}
/*
* Convert the x/y offsets into a linear offset.
* Only valid with 0/180 degree rotation, which is fine since linear
* offset is only used with linear buffers on pre-hsw and tiled buffers
* with gen2/3, and 90/270 degree rotations isn't supported on any of them.
*/
u32 intel_fb_xy_to_linear(int x, int y,
const struct intel_plane_state *state,
int color_plane)
{
const struct drm_framebuffer *fb = state->hw.fb;
unsigned int cpp = fb->format->cpp[color_plane];
unsigned int pitch = state->view.color_plane[color_plane].stride;
return y * pitch + x * cpp;
}
/*
* Add the x/y offsets derived from fb->offsets[] to the user
* specified plane src x/y offsets. The resulting x/y offsets
* specify the start of scanout from the beginning of the gtt mapping.
*/
void intel_add_fb_offsets(int *x, int *y,
const struct intel_plane_state *state,
int color_plane)
{
*x += state->view.color_plane[color_plane].x;
*y += state->view.color_plane[color_plane].y;
}
static unsigned int intel_fb_modifier_to_tiling(u64 fb_modifier)
{
switch (fb_modifier) {
case I915_FORMAT_MOD_X_TILED:
return I915_TILING_X;
case I915_FORMAT_MOD_Y_TILED:
case I915_FORMAT_MOD_Y_TILED_CCS:
case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC:
case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
return I915_TILING_Y;
default:
return I915_TILING_NONE;
}
}
/*
* From the Sky Lake PRM:
* "The Color Control Surface (CCS) contains the compression status of
* the cache-line pairs. The compression state of the cache-line pair
* is specified by 2 bits in the CCS. Each CCS cache-line represents
* an area on the main surface of 16 x16 sets of 128 byte Y-tiled
* cache-line-pairs. CCS is always Y tiled."
*
* Since cache line pairs refers to horizontally adjacent cache lines,
* each cache line in the CCS corresponds to an area of 32x16 cache
* lines on the main surface. Since each pixel is 4 bytes, this gives
* us a ratio of one byte in the CCS for each 8x16 pixels in the
* main surface.
*/
static const struct drm_format_info skl_ccs_formats[] = {
{ .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2,
.cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
{ .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2,
.cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
{ .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2,
.cpp = { 4, 1, }, .hsub = 8, .vsub = 16, .has_alpha = true, },
{ .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2,
.cpp = { 4, 1, }, .hsub = 8, .vsub = 16, .has_alpha = true, },
};
/*
* Gen-12 compression uses 4 bits of CCS data for each cache line pair in the
* main surface. And each 64B CCS cache line represents an area of 4x1 Y-tiles
* in the main surface. With 4 byte pixels and each Y-tile having dimensions of
* 32x32 pixels, the ratio turns out to 1B in the CCS for every 2x32 pixels in
* the main surface.
*/
static const struct drm_format_info gen12_ccs_formats[] = {
{ .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2,
.char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
.hsub = 1, .vsub = 1, },
{ .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2,
.char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
.hsub = 1, .vsub = 1, },
{ .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2,
.char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
.hsub = 1, .vsub = 1, .has_alpha = true },
{ .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2,
.char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
.hsub = 1, .vsub = 1, .has_alpha = true },
{ .format = DRM_FORMAT_YUYV, .num_planes = 2,
.char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
.hsub = 2, .vsub = 1, .is_yuv = true },
{ .format = DRM_FORMAT_YVYU, .num_planes = 2,
.char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
.hsub = 2, .vsub = 1, .is_yuv = true },
{ .format = DRM_FORMAT_UYVY, .num_planes = 2,
.char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
.hsub = 2, .vsub = 1, .is_yuv = true },
{ .format = DRM_FORMAT_VYUY, .num_planes = 2,
.char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
.hsub = 2, .vsub = 1, .is_yuv = true },
{ .format = DRM_FORMAT_XYUV8888, .num_planes = 2,
.char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
.hsub = 1, .vsub = 1, .is_yuv = true },
{ .format = DRM_FORMAT_NV12, .num_planes = 4,
.char_per_block = { 1, 2, 1, 1 }, .block_w = { 1, 1, 4, 4 }, .block_h = { 1, 1, 1, 1 },
.hsub = 2, .vsub = 2, .is_yuv = true },
{ .format = DRM_FORMAT_P010, .num_planes = 4,
.char_per_block = { 2, 4, 1, 1 }, .block_w = { 1, 1, 2, 2 }, .block_h = { 1, 1, 1, 1 },
.hsub = 2, .vsub = 2, .is_yuv = true },
{ .format = DRM_FORMAT_P012, .num_planes = 4,
.char_per_block = { 2, 4, 1, 1 }, .block_w = { 1, 1, 2, 2 }, .block_h = { 1, 1, 1, 1 },
.hsub = 2, .vsub = 2, .is_yuv = true },
{ .format = DRM_FORMAT_P016, .num_planes = 4,
.char_per_block = { 2, 4, 1, 1 }, .block_w = { 1, 1, 2, 2 }, .block_h = { 1, 1, 1, 1 },
.hsub = 2, .vsub = 2, .is_yuv = true },
};
/*
* Same as gen12_ccs_formats[] above, but with additional surface used
* to pass Clear Color information in plane 2 with 64 bits of data.
*/
static const struct drm_format_info gen12_ccs_cc_formats[] = {
{ .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 3,
.char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 2 }, .block_h = { 1, 1, 1 },
.hsub = 1, .vsub = 1, },
{ .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 3,
.char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 2 }, .block_h = { 1, 1, 1 },
.hsub = 1, .vsub = 1, },
{ .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 3,
.char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 2 }, .block_h = { 1, 1, 1 },
.hsub = 1, .vsub = 1, .has_alpha = true },
{ .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 3,
.char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 2 }, .block_h = { 1, 1, 1 },
.hsub = 1, .vsub = 1, .has_alpha = true },
};
static const struct drm_format_info *
lookup_format_info(const struct drm_format_info formats[],
int num_formats, u32 format)
{
int i;
for (i = 0; i < num_formats; i++) {
if (formats[i].format == format)
return &formats[i];
}
return NULL;
}
static const struct drm_format_info *
intel_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
{
switch (cmd->modifier[0]) {
case I915_FORMAT_MOD_Y_TILED_CCS:
case I915_FORMAT_MOD_Yf_TILED_CCS:
return lookup_format_info(skl_ccs_formats,
ARRAY_SIZE(skl_ccs_formats),
cmd->pixel_format);
case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
return lookup_format_info(gen12_ccs_formats,
ARRAY_SIZE(gen12_ccs_formats),
cmd->pixel_format);
case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC:
return lookup_format_info(gen12_ccs_cc_formats,
ARRAY_SIZE(gen12_ccs_cc_formats),
cmd->pixel_format);
default:
return NULL;
}
}
static int gen12_ccs_aux_stride(struct drm_framebuffer *fb, int ccs_plane)
{
return DIV_ROUND_UP(fb->pitches[skl_ccs_to_main_plane(fb, ccs_plane)],
512) * 64;
}
u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv,
u32 pixel_format, u64 modifier)
{
struct intel_crtc *crtc;
struct intel_plane *plane;
if (!HAS_DISPLAY(dev_priv))
return 0;
/*
* We assume the primary plane for pipe A has
* the highest stride limits of them all,
* if in case pipe A is disabled, use the first pipe from pipe_mask.
*/
crtc = intel_get_first_crtc(dev_priv);
if (!crtc)
return 0;
plane = to_intel_plane(crtc->base.primary);
return plane->max_stride(plane, pixel_format, modifier,
DRM_MODE_ROTATE_0);
}
static
u32 intel_fb_max_stride(struct drm_i915_private *dev_priv,
u32 pixel_format, u64 modifier)
{
/*
* Arbitrary limit for gen4+ chosen to match the
* render engine max stride.
*
* The new CCS hash mode makes remapping impossible
*/
if (DISPLAY_VER(dev_priv) < 4 || is_ccs_modifier(modifier) ||
intel_modifier_uses_dpt(dev_priv, modifier))
return intel_plane_fb_max_stride(dev_priv, pixel_format, modifier);
else if (DISPLAY_VER(dev_priv) >= 7)
return 256 * 1024;
else
return 128 * 1024;
}
static u32
intel_fb_stride_alignment(const struct drm_framebuffer *fb, int color_plane)
{
struct drm_i915_private *dev_priv = to_i915(fb->dev);
u32 tile_width;
if (is_surface_linear(fb, color_plane)) {
u32 max_stride = intel_plane_fb_max_stride(dev_priv,
fb->format->format,
fb->modifier);
/*
* To make remapping with linear generally feasible
* we need the stride to be page aligned.
*/
if (fb->pitches[color_plane] > max_stride &&
!is_ccs_modifier(fb->modifier))
return intel_tile_size(dev_priv);
else
return 64;
}
tile_width = intel_tile_width_bytes(fb, color_plane);
if (is_ccs_modifier(fb->modifier)) {
/*
* Display WA #0531: skl,bxt,kbl,glk
*
* Render decompression and plane width > 3840
* combined with horizontal panning requires the
* plane stride to be a multiple of 4. We'll just
* require the entire fb to accommodate that to avoid
* potential runtime errors at plane configuration time.
*/
if ((DISPLAY_VER(dev_priv) == 9 || IS_GEMINILAKE(dev_priv)) &&
color_plane == 0 && fb->width > 3840)
tile_width *= 4;
/*
* The main surface pitch must be padded to a multiple of four
* tile widths.
*/
else if (DISPLAY_VER(dev_priv) >= 12)
tile_width *= 4;
}
return tile_width;
}
static struct i915_vma *
initial_plane_vma(struct drm_i915_private *i915,
struct intel_initial_plane_config *plane_config)
{
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
u32 base, size;
if (plane_config->size == 0)
return NULL;
base = round_down(plane_config->base,
I915_GTT_MIN_ALIGNMENT);
size = round_up(plane_config->base + plane_config->size,
I915_GTT_MIN_ALIGNMENT);
size -= base;
/*
* If the FB is too big, just don't use it since fbdev is not very
* important and we should probably use that space with FBC or other
* features.
*/
if (IS_ENABLED(CONFIG_FRAMEBUFFER_CONSOLE) &&
size * 2 > i915->stolen_usable_size)
return NULL;
obj = i915_gem_object_create_stolen_for_preallocated(i915, base, size);
if (IS_ERR(obj))
return NULL;
/*
* Mark it WT ahead of time to avoid changing the
* cache_level during fbdev initialization. The
* unbind there would get stuck waiting for rcu.
*/
i915_gem_object_set_cache_coherency(obj, HAS_WT(i915) ?
I915_CACHE_WT : I915_CACHE_NONE);
switch (plane_config->tiling) {
case I915_TILING_NONE:
break;
case I915_TILING_X:
case I915_TILING_Y:
obj->tiling_and_stride =
plane_config->fb->base.pitches[0] |
plane_config->tiling;
break;
default:
MISSING_CASE(plane_config->tiling);
goto err_obj;
}
vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
if (IS_ERR(vma))
goto err_obj;
if (i915_ggtt_pin(vma, NULL, 0, PIN_MAPPABLE | PIN_OFFSET_FIXED | base))
goto err_obj;
if (i915_gem_object_is_tiled(obj) &&
!i915_vma_is_map_and_fenceable(vma))
goto err_obj;
return vma;
err_obj:
i915_gem_object_put(obj);
return NULL;
}
static bool
intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
struct intel_initial_plane_config *plane_config)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_mode_fb_cmd2 mode_cmd = { 0 };
struct drm_framebuffer *fb = &plane_config->fb->base;
struct i915_vma *vma;
switch (fb->modifier) {
case DRM_FORMAT_MOD_LINEAR:
case I915_FORMAT_MOD_X_TILED:
case I915_FORMAT_MOD_Y_TILED:
break;
default:
drm_dbg(&dev_priv->drm,
"Unsupported modifier for initial FB: 0x%llx\n",
fb->modifier);
return false;
}
vma = initial_plane_vma(dev_priv, plane_config);
if (!vma)
return false;
mode_cmd.pixel_format = fb->format->format;
mode_cmd.width = fb->width;
mode_cmd.height = fb->height;
mode_cmd.pitches[0] = fb->pitches[0];
mode_cmd.modifier[0] = fb->modifier;
mode_cmd.flags = DRM_MODE_FB_MODIFIERS;
if (intel_framebuffer_init(to_intel_framebuffer(fb),
vma->obj, &mode_cmd)) {
drm_dbg_kms(&dev_priv->drm, "intel fb init failed\n");
goto err_vma;
}
plane_config->vma = vma;
return true;
err_vma:
i915_vma_put(vma);
return false;
}
static void
intel_set_plane_visible(struct intel_crtc_state *crtc_state,
struct intel_plane_state *plane_state,
bool visible)
{
struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
plane_state->uapi.visible = visible;
if (visible)
crtc_state->uapi.plane_mask |= drm_plane_mask(&plane->base);
else
crtc_state->uapi.plane_mask &= ~drm_plane_mask(&plane->base);
}
static void fixup_plane_bitmasks(struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
struct drm_plane *plane;
/*
* Active_planes aliases if multiple "primary" or cursor planes
* have been used on the same (or wrong) pipe. plane_mask uses
* unique ids, hence we can use that to reconstruct active_planes.
*/
crtc_state->enabled_planes = 0;
crtc_state->active_planes = 0;
drm_for_each_plane_mask(plane, &dev_priv->drm,
crtc_state->uapi.plane_mask) {
crtc_state->enabled_planes |= BIT(to_intel_plane(plane)->id);
crtc_state->active_planes |= BIT(to_intel_plane(plane)->id);
}
}
static void intel_plane_disable_noatomic(struct intel_crtc *crtc,
struct intel_plane *plane)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
struct intel_plane_state *plane_state =
to_intel_plane_state(plane->base.state);
drm_dbg_kms(&dev_priv->drm,
"Disabling [PLANE:%d:%s] on [CRTC:%d:%s]\n",
plane->base.base.id, plane->base.name,
crtc->base.base.id, crtc->base.name);
intel_set_plane_visible(crtc_state, plane_state, false);
fixup_plane_bitmasks(crtc_state);
crtc_state->data_rate[plane->id] = 0;
crtc_state->min_cdclk[plane->id] = 0;
if (plane->id == PLANE_PRIMARY)
hsw_disable_ips(crtc_state);
/*
* Vblank time updates from the shadow to live plane control register
* are blocked if the memory self-refresh mode is active at that
* moment. So to make sure the plane gets truly disabled, disable
* first the self-refresh mode. The self-refresh enable bit in turn
* will be checked/applied by the HW only at the next frame start
* event which is after the vblank start event, so we need to have a
* wait-for-vblank between disabling the plane and the pipe.
*/
if (HAS_GMCH(dev_priv) &&
intel_set_memory_cxsr(dev_priv, false))
intel_wait_for_vblank(dev_priv, crtc->pipe);
/*
* Gen2 reports pipe underruns whenever all planes are disabled.
* So disable underrun reporting before all the planes get disabled.
*/
if (DISPLAY_VER(dev_priv) == 2 && !crtc_state->active_planes)
intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
intel_disable_plane(plane, crtc_state);
intel_wait_for_vblank(dev_priv, crtc->pipe);
}
static struct i915_vma *intel_dpt_pin(struct i915_address_space *vm)
{
struct drm_i915_private *i915 = vm->i915;
struct i915_dpt *dpt = i915_vm_to_dpt(vm);
intel_wakeref_t wakeref;
struct i915_vma *vma;
void __iomem *iomem;
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
atomic_inc(&i915->gpu_error.pending_fb_pin);
vma = i915_gem_object_ggtt_pin(dpt->obj, NULL, 0, 4096,
HAS_LMEM(i915) ? 0 : PIN_MAPPABLE);
if (IS_ERR(vma))
goto err;
iomem = i915_vma_pin_iomap(vma);
i915_vma_unpin(vma);
if (IS_ERR(iomem)) {
vma = iomem;
goto err;
}
dpt->vma = vma;
dpt->iomem = iomem;
i915_vma_get(vma);
err:
atomic_dec(&i915->gpu_error.pending_fb_pin);
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
return vma;
}
static void intel_dpt_unpin(struct i915_address_space *vm)
{
struct i915_dpt *dpt = i915_vm_to_dpt(vm);
i915_vma_unpin_iomap(dpt->vma);
i915_vma_put(dpt->vma);
}
static bool
intel_reuse_initial_plane_obj(struct drm_i915_private *i915,
const struct intel_initial_plane_config *plane_config,
struct drm_framebuffer **fb,
struct i915_vma **vma)
{
struct intel_crtc *crtc;
for_each_intel_crtc(&i915->drm, crtc) {
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
struct intel_plane *plane =
to_intel_plane(crtc->base.primary);
struct intel_plane_state *plane_state =
to_intel_plane_state(plane->base.state);
if (!crtc_state->uapi.active)
continue;
if (!plane_state->ggtt_vma)
continue;
if (intel_plane_ggtt_offset(plane_state) == plane_config->base) {
*fb = plane_state->hw.fb;
*vma = plane_state->ggtt_vma;
return true;
}
}
return false;
}
static void
intel_find_initial_plane_obj(struct intel_crtc *crtc,
struct intel_initial_plane_config *plane_config)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
struct intel_plane *plane =
to_intel_plane(crtc->base.primary);
struct intel_plane_state *plane_state =
to_intel_plane_state(plane->base.state);
struct drm_framebuffer *fb;
struct i915_vma *vma;
/*
* TODO:
* Disable planes if get_initial_plane_config() failed.
* Make sure things work if the surface base is not page aligned.
*/
if (!plane_config->fb)
return;
if (intel_alloc_initial_plane_obj(crtc, plane_config)) {
fb = &plane_config->fb->base;
vma = plane_config->vma;
goto valid_fb;
}
/*
* Failed to alloc the obj, check to see if we should share
* an fb with another CRTC instead
*/
if (intel_reuse_initial_plane_obj(dev_priv, plane_config, &fb, &vma))
goto valid_fb;
/*
* We've failed to reconstruct the BIOS FB. Current display state
* indicates that the primary plane is visible, but has a NULL FB,
* which will lead to problems later if we don't fix it up. The
* simplest solution is to just disable the primary plane now and
* pretend the BIOS never had it enabled.
*/
intel_plane_disable_noatomic(crtc, plane);
if (crtc_state->bigjoiner) {
struct intel_crtc *slave =
crtc_state->bigjoiner_linked_crtc;
intel_plane_disable_noatomic(slave, to_intel_plane(slave->base.primary));
}
return;
valid_fb:
plane_state->uapi.rotation = plane_config->rotation;
intel_fb_fill_view(to_intel_framebuffer(fb),
plane_state->uapi.rotation, &plane_state->view);
__i915_vma_pin(vma);
plane_state->ggtt_vma = i915_vma_get(vma);
if (intel_plane_uses_fence(plane_state) &&
i915_vma_pin_fence(vma) == 0 && vma->fence)
plane_state->flags |= PLANE_HAS_FENCE;
plane_state->uapi.src_x = 0;
plane_state->uapi.src_y = 0;
plane_state->uapi.src_w = fb->width << 16;
plane_state->uapi.src_h = fb->height << 16;
plane_state->uapi.crtc_x = 0;
plane_state->uapi.crtc_y = 0;
plane_state->uapi.crtc_w = fb->width;
plane_state->uapi.crtc_h = fb->height;
if (plane_config->tiling)
dev_priv->preserve_bios_swizzle = true;
plane_state->uapi.fb = fb;
drm_framebuffer_get(fb);
plane_state->uapi.crtc = &crtc->base;
intel_plane_copy_uapi_to_hw_state(plane_state, plane_state, crtc);
intel_frontbuffer_flush(to_intel_frontbuffer(fb), ORIGIN_DIRTYFB);
atomic_or(plane->frontbuffer_bit, &to_intel_frontbuffer(fb)->bits);
}
unsigned int
intel_plane_fence_y_offset(const struct intel_plane_state *plane_state)
{
int x = 0, y = 0;
intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
plane_state->view.color_plane[0].offset, 0);
return y;
}
static int
__intel_display_resume(struct drm_device *dev,
struct drm_atomic_state *state,
struct drm_modeset_acquire_ctx *ctx)
{
struct drm_crtc_state *crtc_state;
struct drm_crtc *crtc;
int i, ret;
intel_modeset_setup_hw_state(dev, ctx);
intel_vga_redisable(to_i915(dev));
if (!state)
return 0;
/*
* We've duplicated the state, pointers to the old state are invalid.
*
* Don't attempt to use the old state until we commit the duplicated state.
*/
for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
/*
* Force recalculation even if we restore
* current state. With fast modeset this may not result
* in a modeset when the state is compatible.
*/
crtc_state->mode_changed = true;
}
/* ignore any reset values/BIOS leftovers in the WM registers */
if (!HAS_GMCH(to_i915(dev)))
to_intel_atomic_state(state)->skip_intermediate_wm = true;
ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
drm_WARN_ON(dev, ret == -EDEADLK);
return ret;
}
static bool gpu_reset_clobbers_display(struct drm_i915_private *dev_priv)
{
return (INTEL_INFO(dev_priv)->gpu_reset_clobbers_display &&
intel_has_gpu_reset(&dev_priv->gt));
}
void intel_display_prepare_reset(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = &dev_priv->drm;
struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
struct drm_atomic_state *state;
int ret;
if (!HAS_DISPLAY(dev_priv))
return;
/* reset doesn't touch the display */
if (!dev_priv->params.force_reset_modeset_test &&
!gpu_reset_clobbers_display(dev_priv))
return;
/* We have a modeset vs reset deadlock, defensively unbreak it. */
set_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags);
smp_mb__after_atomic();
wake_up_bit(&dev_priv->gt.reset.flags, I915_RESET_MODESET);
if (atomic_read(&dev_priv->gpu_error.pending_fb_pin)) {
drm_dbg_kms(&dev_priv->drm,
"Modeset potentially stuck, unbreaking through wedging\n");
intel_gt_set_wedged(&dev_priv->gt);
}
/*
* Need mode_config.mutex so that we don't
* trample ongoing ->detect() and whatnot.
*/
mutex_lock(&dev->mode_config.mutex);
drm_modeset_acquire_init(ctx, 0);
while (1) {
ret = drm_modeset_lock_all_ctx(dev, ctx);
if (ret != -EDEADLK)
break;
drm_modeset_backoff(ctx);
}
/*
* Disabling the crtcs gracefully seems nicer. Also the
* g33 docs say we should at least disable all the planes.
*/
state = drm_atomic_helper_duplicate_state(dev, ctx);
if (IS_ERR(state)) {
ret = PTR_ERR(state);
drm_err(&dev_priv->drm, "Duplicating state failed with %i\n",
ret);
return;
}
ret = drm_atomic_helper_disable_all(dev, ctx);
if (ret) {
drm_err(&dev_priv->drm, "Suspending crtc's failed with %i\n",
ret);
drm_atomic_state_put(state);
return;
}
dev_priv->modeset_restore_state = state;
state->acquire_ctx = ctx;
}
void intel_display_finish_reset(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = &dev_priv->drm;
struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
struct drm_atomic_state *state;
int ret;
if (!HAS_DISPLAY(dev_priv))
return;
/* reset doesn't touch the display */
if (!test_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags))
return;
state = fetch_and_zero(&dev_priv->modeset_restore_state);
if (!state)
goto unlock;
/* reset doesn't touch the display */
if (!gpu_reset_clobbers_display(dev_priv)) {
/* for testing only restore the display */
ret = __intel_display_resume(dev, state, ctx);
if (ret)
drm_err(&dev_priv->drm,
"Restoring old state failed with %i\n", ret);
} else {
/*
* The display has been reset as well,
* so need a full re-initialization.
*/
intel_pps_unlock_regs_wa(dev_priv);
intel_modeset_init_hw(dev_priv);
intel_init_clock_gating(dev_priv);
intel_hpd_init(dev_priv);
ret = __intel_display_resume(dev, state, ctx);
if (ret)
drm_err(&dev_priv->drm,
"Restoring old state failed with %i\n", ret);
intel_hpd_poll_disable(dev_priv);
}
drm_atomic_state_put(state);
unlock:
drm_modeset_drop_locks(ctx);
drm_modeset_acquire_fini(ctx);
mutex_unlock(&dev->mode_config.mutex);
clear_bit_unlock(I915_RESET_MODESET, &dev_priv->gt.reset.flags);
}
static bool underrun_recovery_supported(const struct intel_crtc_state *crtc_state)
{
if (crtc_state->pch_pfit.enabled &&
(crtc_state->pipe_src_w > drm_rect_width(&crtc_state->pch_pfit.dst) ||
crtc_state->pipe_src_h > drm_rect_height(&crtc_state->pch_pfit.dst) ||
crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420))
return false;
if (crtc_state->dsc.compression_enable)
return false;
if (crtc_state->has_psr2)
return false;
if (crtc_state->splitter.enable)
return false;
return true;
}
static void icl_set_pipe_chicken(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
u32 tmp;
tmp = intel_de_read(dev_priv, PIPE_CHICKEN(pipe));
/*
* Display WA #1153: icl
* enable hardware to bypass the alpha math
* and rounding for per-pixel values 00 and 0xff
*/
tmp |= PER_PIXEL_ALPHA_BYPASS_EN;
/*
* Display WA # 1605353570: icl
* Set the pixel rounding bit to 1 for allowing
* passthrough of Frame buffer pixels unmodified
* across pipe
*/
tmp |= PIXEL_ROUNDING_TRUNC_FB_PASSTHRU;
if (IS_DG2(dev_priv)) {
/*
* Underrun recovery must always be disabled on DG2. However
* the chicken bit meaning is inverted compared to other
* platforms.
*/
tmp &= ~UNDERRUN_RECOVERY_ENABLE_DG2;
} else if (DISPLAY_VER(dev_priv) >= 13) {
if (underrun_recovery_supported(crtc_state))
tmp &= ~UNDERRUN_RECOVERY_DISABLE_ADLP;
else
tmp |= UNDERRUN_RECOVERY_DISABLE_ADLP;
}
intel_de_write(dev_priv, PIPE_CHICKEN(pipe), tmp);
}
bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv)
{
struct drm_crtc *crtc;
bool cleanup_done;
drm_for_each_crtc(crtc, &dev_priv->drm) {
struct drm_crtc_commit *commit;
spin_lock(&crtc->commit_lock);
commit = list_first_entry_or_null(&crtc->commit_list,
struct drm_crtc_commit, commit_entry);
cleanup_done = commit ?
try_wait_for_completion(&commit->cleanup_done) : true;
spin_unlock(&crtc->commit_lock);
if (cleanup_done)
continue;
drm_crtc_wait_one_vblank(crtc);
return true;
}
return false;
}
void lpt_disable_iclkip(struct drm_i915_private *dev_priv)
{
u32 temp;
intel_de_write(dev_priv, PIXCLK_GATE, PIXCLK_GATE_GATE);
mutex_lock(&dev_priv->sb_lock);
temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
temp |= SBI_SSCCTL_DISABLE;
intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
mutex_unlock(&dev_priv->sb_lock);
}
/* Program iCLKIP clock to the desired frequency */
static void lpt_program_iclkip(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
int clock = crtc_state->hw.adjusted_mode.crtc_clock;
u32 divsel, phaseinc, auxdiv, phasedir = 0;
u32 temp;
lpt_disable_iclkip(dev_priv);
/* The iCLK virtual clock root frequency is in MHz,
* but the adjusted_mode->crtc_clock in in KHz. To get the
* divisors, it is necessary to divide one by another, so we
* convert the virtual clock precision to KHz here for higher
* precision.
*/
for (auxdiv = 0; auxdiv < 2; auxdiv++) {
u32 iclk_virtual_root_freq = 172800 * 1000;
u32 iclk_pi_range = 64;
u32 desired_divisor;
desired_divisor = DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
clock << auxdiv);
divsel = (desired_divisor / iclk_pi_range) - 2;
phaseinc = desired_divisor % iclk_pi_range;
/*
* Near 20MHz is a corner case which is
* out of range for the 7-bit divisor
*/
if (divsel <= 0x7f)
break;
}
/* This should not happen with any sane values */
drm_WARN_ON(&dev_priv->drm, SBI_SSCDIVINTPHASE_DIVSEL(divsel) &
~SBI_SSCDIVINTPHASE_DIVSEL_MASK);
drm_WARN_ON(&dev_priv->drm, SBI_SSCDIVINTPHASE_DIR(phasedir) &
~SBI_SSCDIVINTPHASE_INCVAL_MASK);
drm_dbg_kms(&dev_priv->drm,
"iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
clock, auxdiv, divsel, phasedir, phaseinc);
mutex_lock(&dev_priv->sb_lock);
/* Program SSCDIVINTPHASE6 */
temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK;
temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel);
temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK;
temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc);
temp |= SBI_SSCDIVINTPHASE_DIR(phasedir);
temp |= SBI_SSCDIVINTPHASE_PROPAGATE;
intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK);
/* Program SSCAUXDIV */
temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv);
intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK);
/* Enable modulator and associated divider */
temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
temp &= ~SBI_SSCCTL_DISABLE;
intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
mutex_unlock(&dev_priv->sb_lock);
/* Wait for initialization time */
udelay(24);
intel_de_write(dev_priv, PIXCLK_GATE, PIXCLK_GATE_UNGATE);
}
int lpt_get_iclkip(struct drm_i915_private *dev_priv)
{
u32 divsel, phaseinc, auxdiv;
u32 iclk_virtual_root_freq = 172800 * 1000;
u32 iclk_pi_range = 64;
u32 desired_divisor;
u32 temp;
if ((intel_de_read(dev_priv, PIXCLK_GATE) & PIXCLK_GATE_UNGATE) == 0)
return 0;
mutex_lock(&dev_priv->sb_lock);
temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
if (temp & SBI_SSCCTL_DISABLE) {
mutex_unlock(&dev_priv->sb_lock);
return 0;
}
temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
divsel = (temp & SBI_SSCDIVINTPHASE_DIVSEL_MASK) >>
SBI_SSCDIVINTPHASE_DIVSEL_SHIFT;
phaseinc = (temp & SBI_SSCDIVINTPHASE_INCVAL_MASK) >>
SBI_SSCDIVINTPHASE_INCVAL_SHIFT;
temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
auxdiv = (temp & SBI_SSCAUXDIV_FINALDIV2SEL_MASK) >>
SBI_SSCAUXDIV_FINALDIV2SEL_SHIFT;
mutex_unlock(&dev_priv->sb_lock);
desired_divisor = (divsel + 2) * iclk_pi_range + phaseinc;
return DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
desired_divisor << auxdiv);
}
static void ilk_pch_transcoder_set_timings(const struct intel_crtc_state *crtc_state,
enum pipe pch_transcoder)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
intel_de_write(dev_priv, PCH_TRANS_HTOTAL(pch_transcoder),
intel_de_read(dev_priv, HTOTAL(cpu_transcoder)));
intel_de_write(dev_priv, PCH_TRANS_HBLANK(pch_transcoder),
intel_de_read(dev_priv, HBLANK(cpu_transcoder)));
intel_de_write(dev_priv, PCH_TRANS_HSYNC(pch_transcoder),
intel_de_read(dev_priv, HSYNC(cpu_transcoder)));
intel_de_write(dev_priv, PCH_TRANS_VTOTAL(pch_transcoder),
intel_de_read(dev_priv, VTOTAL(cpu_transcoder)));
intel_de_write(dev_priv, PCH_TRANS_VBLANK(pch_transcoder),
intel_de_read(dev_priv, VBLANK(cpu_transcoder)));
intel_de_write(dev_priv, PCH_TRANS_VSYNC(pch_transcoder),
intel_de_read(dev_priv, VSYNC(cpu_transcoder)));
intel_de_write(dev_priv, PCH_TRANS_VSYNCSHIFT(pch_transcoder),
intel_de_read(dev_priv, VSYNCSHIFT(cpu_transcoder)));
}
static void cpt_set_fdi_bc_bifurcation(struct drm_i915_private *dev_priv, bool enable)
{
u32 temp;
temp = intel_de_read(dev_priv, SOUTH_CHICKEN1);
if (!!(temp & FDI_BC_BIFURCATION_SELECT) == enable)
return;
drm_WARN_ON(&dev_priv->drm,
intel_de_read(dev_priv, FDI_RX_CTL(PIPE_B)) &
FDI_RX_ENABLE);
drm_WARN_ON(&dev_priv->drm,
intel_de_read(dev_priv, FDI_RX_CTL(PIPE_C)) &
FDI_RX_ENABLE);
temp &= ~FDI_BC_BIFURCATION_SELECT;
if (enable)
temp |= FDI_BC_BIFURCATION_SELECT;
drm_dbg_kms(&dev_priv->drm, "%sabling fdi C rx\n",
enable ? "en" : "dis");
intel_de_write(dev_priv, SOUTH_CHICKEN1, temp);
intel_de_posting_read(dev_priv, SOUTH_CHICKEN1);
}
static void ivb_update_fdi_bc_bifurcation(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
switch (crtc->pipe) {
case PIPE_A:
break;
case PIPE_B:
if (crtc_state->fdi_lanes > 2)
cpt_set_fdi_bc_bifurcation(dev_priv, false);
else
cpt_set_fdi_bc_bifurcation(dev_priv, true);
break;
case PIPE_C:
cpt_set_fdi_bc_bifurcation(dev_priv, true);
break;
default:
BUG();
}
}
/*
* Finds the encoder associated with the given CRTC. This can only be
* used when we know that the CRTC isn't feeding multiple encoders!
*/
struct intel_encoder *
intel_get_crtc_new_encoder(const struct intel_atomic_state *state,
const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
const struct drm_connector_state *connector_state;
const struct drm_connector *connector;
struct intel_encoder *encoder = NULL;
int num_encoders = 0;
int i;
for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
if (connector_state->crtc != &crtc->base)
continue;
encoder = to_intel_encoder(connector_state->best_encoder);
num_encoders++;
}
drm_WARN(encoder->base.dev, num_encoders != 1,
"%d encoders for pipe %c\n",
num_encoders, pipe_name(crtc->pipe));
return encoder;
}
/*
* Enable PCH resources required for PCH ports:
* - PCH PLLs
* - FDI training & RX/TX
* - update transcoder timings
* - DP transcoding bits
* - transcoder
*/
static void ilk_pch_enable(const struct intel_atomic_state *state,
const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
enum pipe pipe = crtc->pipe;
u32 temp;
assert_pch_transcoder_disabled(dev_priv, pipe);
if (IS_IVYBRIDGE(dev_priv))
ivb_update_fdi_bc_bifurcation(crtc_state);
/* Write the TU size bits before fdi link training, so that error
* detection works. */
intel_de_write(dev_priv, FDI_RX_TUSIZE1(pipe),
intel_de_read(dev_priv, PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
/* For PCH output, training FDI link */
dev_priv->display.fdi_link_train(crtc, crtc_state);
/* We need to program the right clock selection before writing the pixel
* mutliplier into the DPLL. */
if (HAS_PCH_CPT(dev_priv)) {
u32 sel;
temp = intel_de_read(dev_priv, PCH_DPLL_SEL);
temp |= TRANS_DPLL_ENABLE(pipe);
sel = TRANS_DPLLB_SEL(pipe);
if (crtc_state->shared_dpll ==
intel_get_shared_dpll_by_id(dev_priv, DPLL_ID_PCH_PLL_B))
temp |= sel;
else
temp &= ~sel;
intel_de_write(dev_priv, PCH_DPLL_SEL, temp);
}
/* XXX: pch pll's can be enabled any time before we enable the PCH
* transcoder, and we actually should do this to not upset any PCH
* transcoder that already use the clock when we share it.
*
* Note that enable_shared_dpll tries to do the right thing, but
* get_shared_dpll unconditionally resets the pll - we need that to have
* the right LVDS enable sequence. */
intel_enable_shared_dpll(crtc_state);
/* set transcoder timing, panel must allow it */
assert_panel_unlocked(dev_priv, pipe);
ilk_pch_transcoder_set_timings(crtc_state, pipe);
intel_fdi_normal_train(crtc);
/* For PCH DP, enable TRANS_DP_CTL */
if (HAS_PCH_CPT(dev_priv) &&
intel_crtc_has_dp_encoder(crtc_state)) {
const struct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
u32 bpc = (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
i915_reg_t reg = TRANS_DP_CTL(pipe);
enum port port;
temp = intel_de_read(dev_priv, reg);
temp &= ~(TRANS_DP_PORT_SEL_MASK |
TRANS_DP_SYNC_MASK |
TRANS_DP_BPC_MASK);
temp |= TRANS_DP_OUTPUT_ENABLE;
temp |= bpc << 9; /* same format but at 11:9 */
if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
port = intel_get_crtc_new_encoder(state, crtc_state)->port;
drm_WARN_ON(dev, port < PORT_B || port > PORT_D);
temp |= TRANS_DP_PORT_SEL(port);
intel_de_write(dev_priv, reg, temp);
}
ilk_enable_pch_transcoder(crtc_state);
}
void lpt_pch_enable(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
assert_pch_transcoder_disabled(dev_priv, PIPE_A);
lpt_program_iclkip(crtc_state);
/* Set transcoder timing. */
ilk_pch_transcoder_set_timings(crtc_state, PIPE_A);
lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
}
static void cpt_verify_modeset(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
i915_reg_t dslreg = PIPEDSL(pipe);
u32 temp;
temp = intel_de_read(dev_priv, dslreg);
udelay(500);
if (wait_for(intel_de_read(dev_priv, dslreg) != temp, 5)) {
if (wait_for(intel_de_read(dev_priv, dslreg) != temp, 5))
drm_err(&dev_priv->drm,
"mode set failed: pipe %c stuck\n",
pipe_name(pipe));
}
}
static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
const struct drm_rect *dst = &crtc_state->pch_pfit.dst;
enum pipe pipe = crtc->pipe;
int width = drm_rect_width(dst);
int height = drm_rect_height(dst);
int x = dst->x1;
int y = dst->y1;
if (!crtc_state->pch_pfit.enabled)
return;
/* Force use of hard-coded filter coefficients
* as some pre-programmed values are broken,
* e.g. x201.
*/
if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv))
intel_de_write(dev_priv, PF_CTL(pipe), PF_ENABLE |
PF_FILTER_MED_3x3 | PF_PIPE_SEL_IVB(pipe));
else
intel_de_write(dev_priv, PF_CTL(pipe), PF_ENABLE |
PF_FILTER_MED_3x3);
intel_de_write(dev_priv, PF_WIN_POS(pipe), x << 16 | y);
intel_de_write(dev_priv, PF_WIN_SZ(pipe), width << 16 | height);
}
void hsw_enable_ips(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
if (!crtc_state->ips_enabled)
return;
/*
* We can only enable IPS after we enable a plane and wait for a vblank
* This function is called from post_plane_update, which is run after
* a vblank wait.
*/
drm_WARN_ON(dev, !(crtc_state->active_planes & ~BIT(PLANE_CURSOR)));
if (IS_BROADWELL(dev_priv)) {
drm_WARN_ON(dev, sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL,
IPS_ENABLE | IPS_PCODE_CONTROL));
/* Quoting Art Runyan: "its not safe to expect any particular
* value in IPS_CTL bit 31 after enabling IPS through the
* mailbox." Moreover, the mailbox may return a bogus state,
* so we need to just enable it and continue on.
*/
} else {
intel_de_write(dev_priv, IPS_CTL, IPS_ENABLE);
/* The bit only becomes 1 in the next vblank, so this wait here
* is essentially intel_wait_for_vblank. If we don't have this
* and don't wait for vblanks until the end of crtc_enable, then
* the HW state readout code will complain that the expected
* IPS_CTL value is not the one we read. */
if (intel_de_wait_for_set(dev_priv, IPS_CTL, IPS_ENABLE, 50))
drm_err(&dev_priv->drm,
"Timed out waiting for IPS enable\n");
}
}
void hsw_disable_ips(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
if (!crtc_state->ips_enabled)
return;
if (IS_BROADWELL(dev_priv)) {
drm_WARN_ON(dev,
sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
/*
* Wait for PCODE to finish disabling IPS. The BSpec specified
* 42ms timeout value leads to occasional timeouts so use 100ms
* instead.
*/
if (intel_de_wait_for_clear(dev_priv, IPS_CTL, IPS_ENABLE, 100))
drm_err(&dev_priv->drm,
"Timed out waiting for IPS disable\n");
} else {
intel_de_write(dev_priv, IPS_CTL, 0);
intel_de_posting_read(dev_priv, IPS_CTL);
}
/* We need to wait for a vblank before we can disable the plane. */
intel_wait_for_vblank(dev_priv, crtc->pipe);
}
static void intel_crtc_dpms_overlay_disable(struct intel_crtc *crtc)
{
if (crtc->overlay)
(void) intel_overlay_switch_off(crtc->overlay);
/* Let userspace switch the overlay on again. In most cases userspace
* has to recompute where to put it anyway.
*/
}
static bool hsw_pre_update_disable_ips(const struct intel_crtc_state *old_crtc_state,
const struct intel_crtc_state *new_crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
if (!old_crtc_state->ips_enabled)
return false;
if (intel_crtc_needs_modeset(new_crtc_state))
return true;
/*
* Workaround : Do not read or write the pipe palette/gamma data while
* GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
*
* Disable IPS before we program the LUT.
*/
if (IS_HASWELL(dev_priv) &&
(new_crtc_state->uapi.color_mgmt_changed ||
new_crtc_state->update_pipe) &&
new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)
return true;
return !new_crtc_state->ips_enabled;
}
static bool hsw_post_update_enable_ips(const struct intel_crtc_state *old_crtc_state,
const struct intel_crtc_state *new_crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
if (!new_crtc_state->ips_enabled)
return false;
if (intel_crtc_needs_modeset(new_crtc_state))
return true;
/*
* Workaround : Do not read or write the pipe palette/gamma data while
* GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
*
* Re-enable IPS after the LUT has been programmed.
*/
if (IS_HASWELL(dev_priv) &&
(new_crtc_state->uapi.color_mgmt_changed ||
new_crtc_state->update_pipe) &&
new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)
return true;
/*
* We can't read out IPS on broadwell, assume the worst and
* forcibly enable IPS on the first fastset.
*/
if (new_crtc_state->update_pipe && old_crtc_state->inherited)
return true;
return !old_crtc_state->ips_enabled;
}
static bool needs_nv12_wa(const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
if (!crtc_state->nv12_planes)
return false;
/* WA Display #0827: Gen9:all */
if (DISPLAY_VER(dev_priv) == 9)
return true;
return false;
}
static bool needs_scalerclk_wa(const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
/* Wa_2006604312:icl,ehl */
if (crtc_state->scaler_state.scaler_users > 0 && DISPLAY_VER(dev_priv) == 11)
return true;
return false;
}
static bool planes_enabling(const struct intel_crtc_state *old_crtc_state,
const struct intel_crtc_state *new_crtc_state)
{
return (!old_crtc_state->active_planes || intel_crtc_needs_modeset(new_crtc_state)) &&
new_crtc_state->active_planes;
}
static bool planes_disabling(const struct intel_crtc_state *old_crtc_state,
const struct intel_crtc_state *new_crtc_state)
{
return old_crtc_state->active_planes &&
(!new_crtc_state->active_planes || intel_crtc_needs_modeset(new_crtc_state));
}
static void intel_post_plane_update(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
const struct intel_crtc_state *old_crtc_state =
intel_atomic_get_old_crtc_state(state, crtc);
const struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
enum pipe pipe = crtc->pipe;
intel_frontbuffer_flip(dev_priv, new_crtc_state->fb_bits);
if (new_crtc_state->update_wm_post && new_crtc_state->hw.active)
intel_update_watermarks(crtc);
if (hsw_post_update_enable_ips(old_crtc_state, new_crtc_state))
hsw_enable_ips(new_crtc_state);
intel_fbc_post_update(state, crtc);
if (needs_nv12_wa(old_crtc_state) &&
!needs_nv12_wa(new_crtc_state))
skl_wa_827(dev_priv, pipe, false);
if (needs_scalerclk_wa(old_crtc_state) &&
!needs_scalerclk_wa(new_crtc_state))
icl_wa_scalerclkgating(dev_priv, pipe, false);
}
static void intel_crtc_enable_flip_done(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
const struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
u8 update_planes = crtc_state->update_planes;
const struct intel_plane_state *plane_state;
struct intel_plane *plane;
int i;
for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
if (plane->enable_flip_done &&
plane->pipe == crtc->pipe &&
update_planes & BIT(plane->id))
plane->enable_flip_done(plane);
}
}
static void intel_crtc_disable_flip_done(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
const struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
u8 update_planes = crtc_state->update_planes;
const struct intel_plane_state *plane_state;
struct intel_plane *plane;
int i;
for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
if (plane->disable_flip_done &&
plane->pipe == crtc->pipe &&
update_planes & BIT(plane->id))
plane->disable_flip_done(plane);
}
}
static void intel_crtc_async_flip_disable_wa(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
struct drm_i915_private *i915 = to_i915(state->base.dev);
const struct intel_crtc_state *old_crtc_state =
intel_atomic_get_old_crtc_state(state, crtc);
const struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
u8 update_planes = new_crtc_state->update_planes;
const struct intel_plane_state *old_plane_state;
struct intel_plane *plane;
bool need_vbl_wait = false;
int i;
for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) {
if (plane->need_async_flip_disable_wa &&
plane->pipe == crtc->pipe &&
update_planes & BIT(plane->id)) {
/*
* Apart from the async flip bit we want to
* preserve the old state for the plane.
*/
plane->async_flip(plane, old_crtc_state,
old_plane_state, false);
need_vbl_wait = true;
}
}
if (need_vbl_wait)
intel_wait_for_vblank(i915, crtc->pipe);
}
static void intel_pre_plane_update(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
const struct intel_crtc_state *old_crtc_state =
intel_atomic_get_old_crtc_state(state, crtc);
const struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
enum pipe pipe = crtc->pipe;
if (hsw_pre_update_disable_ips(old_crtc_state, new_crtc_state))
hsw_disable_ips(old_crtc_state);
if (intel_fbc_pre_update(state, crtc))
intel_wait_for_vblank(dev_priv, pipe);
/* Display WA 827 */
if (!needs_nv12_wa(old_crtc_state) &&
needs_nv12_wa(new_crtc_state))
skl_wa_827(dev_priv, pipe, true);
/* Wa_2006604312:icl,ehl */
if (!needs_scalerclk_wa(old_crtc_state) &&
needs_scalerclk_wa(new_crtc_state))
icl_wa_scalerclkgating(dev_priv, pipe, true);
/*
* Vblank time updates from the shadow to live plane control register
* are blocked if the memory self-refresh mode is active at that
* moment. So to make sure the plane gets truly disabled, disable
* first the self-refresh mode. The self-refresh enable bit in turn
* will be checked/applied by the HW only at the next frame start
* event which is after the vblank start event, so we need to have a
* wait-for-vblank between disabling the plane and the pipe.
*/
if (HAS_GMCH(dev_priv) && old_crtc_state->hw.active &&
new_crtc_state->disable_cxsr && intel_set_memory_cxsr(dev_priv, false))
intel_wait_for_vblank(dev_priv, pipe);
/*
* IVB workaround: must disable low power watermarks for at least
* one frame before enabling scaling. LP watermarks can be re-enabled
* when scaling is disabled.
*
* WaCxSRDisabledForSpriteScaling:ivb
*/
if (old_crtc_state->hw.active &&
new_crtc_state->disable_lp_wm && ilk_disable_lp_wm(dev_priv))
intel_wait_for_vblank(dev_priv, pipe);
/*
* If we're doing a modeset we don't need to do any
* pre-vblank watermark programming here.
*/
if (!intel_crtc_needs_modeset(new_crtc_state))<