blob: 5625f7736e37874f226d8efd90bccc08b61d7131 [file] [log] [blame]
/*
* Copyright 2008 Advanced Micro Devices, Inc.
* Copyright 2008 Red Hat Inc.
* Copyright 2009 Jerome Glisse.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Dave Airlie
* Alex Deucher
* Jerome Glisse
*/
#include <linux/power_supply.h>
#include <linux/kthread.h>
#include <linux/module.h>
#include <linux/console.h>
#include <linux/slab.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_probe_helper.h>
#include <drm/amdgpu_drm.h>
#include <linux/vgaarb.h>
#include <linux/vga_switcheroo.h>
#include <linux/efi.h>
#include "amdgpu.h"
#include "amdgpu_trace.h"
#include "amdgpu_i2c.h"
#include "atom.h"
#include "amdgpu_atombios.h"
#include "amdgpu_atomfirmware.h"
#include "amd_pcie.h"
#ifdef CONFIG_DRM_AMDGPU_SI
#include "si.h"
#endif
#ifdef CONFIG_DRM_AMDGPU_CIK
#include "cik.h"
#endif
#include "vi.h"
#include "soc15.h"
#include "nv.h"
#include "bif/bif_4_1_d.h"
#include <linux/pci.h>
#include <linux/firmware.h>
#include "amdgpu_vf_error.h"
#include "amdgpu_amdkfd.h"
#include "amdgpu_pm.h"
#include "amdgpu_xgmi.h"
#include "amdgpu_ras.h"
#include "amdgpu_pmu.h"
#include "amdgpu_fru_eeprom.h"
#include "amdgpu_reset.h"
#include <linux/suspend.h>
#include <drm/task_barrier.h>
#include <linux/pm_runtime.h>
#include <drm/drm_drv.h>
MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin");
MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
MODULE_FIRMWARE("amdgpu/picasso_gpu_info.bin");
MODULE_FIRMWARE("amdgpu/raven2_gpu_info.bin");
MODULE_FIRMWARE("amdgpu/arcturus_gpu_info.bin");
MODULE_FIRMWARE("amdgpu/renoir_gpu_info.bin");
MODULE_FIRMWARE("amdgpu/navi10_gpu_info.bin");
MODULE_FIRMWARE("amdgpu/navi14_gpu_info.bin");
MODULE_FIRMWARE("amdgpu/navi12_gpu_info.bin");
MODULE_FIRMWARE("amdgpu/vangogh_gpu_info.bin");
MODULE_FIRMWARE("amdgpu/yellow_carp_gpu_info.bin");
#define AMDGPU_RESUME_MS 2000
const char *amdgpu_asic_name[] = {
"TAHITI",
"PITCAIRN",
"VERDE",
"OLAND",
"HAINAN",
"BONAIRE",
"KAVERI",
"KABINI",
"HAWAII",
"MULLINS",
"TOPAZ",
"TONGA",
"FIJI",
"CARRIZO",
"STONEY",
"POLARIS10",
"POLARIS11",
"POLARIS12",
"VEGAM",
"VEGA10",
"VEGA12",
"VEGA20",
"RAVEN",
"ARCTURUS",
"RENOIR",
"ALDEBARAN",
"NAVI10",
"CYAN_SKILLFISH",
"NAVI14",
"NAVI12",
"SIENNA_CICHLID",
"NAVY_FLOUNDER",
"VANGOGH",
"DIMGREY_CAVEFISH",
"BEIGE_GOBY",
"YELLOW_CARP",
"IP DISCOVERY",
"LAST",
};
/**
* DOC: pcie_replay_count
*
* The amdgpu driver provides a sysfs API for reporting the total number
* of PCIe replays (NAKs)
* The file pcie_replay_count is used for this and returns the total
* number of replays as a sum of the NAKs generated and NAKs received
*/
static ssize_t amdgpu_device_get_pcie_replay_count(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(ddev);
uint64_t cnt = amdgpu_asic_get_pcie_replay_count(adev);
return sysfs_emit(buf, "%llu\n", cnt);
}
static DEVICE_ATTR(pcie_replay_count, S_IRUGO,
amdgpu_device_get_pcie_replay_count, NULL);
static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev);
/**
* DOC: product_name
*
* The amdgpu driver provides a sysfs API for reporting the product name
* for the device
* The file serial_number is used for this and returns the product name
* as returned from the FRU.
* NOTE: This is only available for certain server cards
*/
static ssize_t amdgpu_device_get_product_name(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(ddev);
return sysfs_emit(buf, "%s\n", adev->product_name);
}
static DEVICE_ATTR(product_name, S_IRUGO,
amdgpu_device_get_product_name, NULL);
/**
* DOC: product_number
*
* The amdgpu driver provides a sysfs API for reporting the part number
* for the device
* The file serial_number is used for this and returns the part number
* as returned from the FRU.
* NOTE: This is only available for certain server cards
*/
static ssize_t amdgpu_device_get_product_number(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(ddev);
return sysfs_emit(buf, "%s\n", adev->product_number);
}
static DEVICE_ATTR(product_number, S_IRUGO,
amdgpu_device_get_product_number, NULL);
/**
* DOC: serial_number
*
* The amdgpu driver provides a sysfs API for reporting the serial number
* for the device
* The file serial_number is used for this and returns the serial number
* as returned from the FRU.
* NOTE: This is only available for certain server cards
*/
static ssize_t amdgpu_device_get_serial_number(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(ddev);
return sysfs_emit(buf, "%s\n", adev->serial);
}
static DEVICE_ATTR(serial_number, S_IRUGO,
amdgpu_device_get_serial_number, NULL);
/**
* amdgpu_device_supports_px - Is the device a dGPU with ATPX power control
*
* @dev: drm_device pointer
*
* Returns true if the device is a dGPU with ATPX power control,
* otherwise return false.
*/
bool amdgpu_device_supports_px(struct drm_device *dev)
{
struct amdgpu_device *adev = drm_to_adev(dev);
if ((adev->flags & AMD_IS_PX) && !amdgpu_is_atpx_hybrid())
return true;
return false;
}
/**
* amdgpu_device_supports_boco - Is the device a dGPU with ACPI power resources
*
* @dev: drm_device pointer
*
* Returns true if the device is a dGPU with ACPI power control,
* otherwise return false.
*/
bool amdgpu_device_supports_boco(struct drm_device *dev)
{
struct amdgpu_device *adev = drm_to_adev(dev);
if (adev->has_pr3 ||
((adev->flags & AMD_IS_PX) && amdgpu_is_atpx_hybrid()))
return true;
return false;
}
/**
* amdgpu_device_supports_baco - Does the device support BACO
*
* @dev: drm_device pointer
*
* Returns true if the device supporte BACO,
* otherwise return false.
*/
bool amdgpu_device_supports_baco(struct drm_device *dev)
{
struct amdgpu_device *adev = drm_to_adev(dev);
return amdgpu_asic_supports_baco(adev);
}
/**
* amdgpu_device_supports_smart_shift - Is the device dGPU with
* smart shift support
*
* @dev: drm_device pointer
*
* Returns true if the device is a dGPU with Smart Shift support,
* otherwise returns false.
*/
bool amdgpu_device_supports_smart_shift(struct drm_device *dev)
{
return (amdgpu_device_supports_boco(dev) &&
amdgpu_acpi_is_power_shift_control_supported());
}
/*
* VRAM access helper functions
*/
/**
* amdgpu_device_mm_access - access vram by MM_INDEX/MM_DATA
*
* @adev: amdgpu_device pointer
* @pos: offset of the buffer in vram
* @buf: virtual address of the buffer in system memory
* @size: read/write size, sizeof(@buf) must > @size
* @write: true - write to vram, otherwise - read from vram
*/
void amdgpu_device_mm_access(struct amdgpu_device *adev, loff_t pos,
void *buf, size_t size, bool write)
{
unsigned long flags;
uint32_t hi = ~0, tmp = 0;
uint32_t *data = buf;
uint64_t last;
int idx;
if (!drm_dev_enter(adev_to_drm(adev), &idx))
return;
BUG_ON(!IS_ALIGNED(pos, 4) || !IS_ALIGNED(size, 4));
spin_lock_irqsave(&adev->mmio_idx_lock, flags);
for (last = pos + size; pos < last; pos += 4) {
tmp = pos >> 31;
WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)pos) | 0x80000000);
if (tmp != hi) {
WREG32_NO_KIQ(mmMM_INDEX_HI, tmp);
hi = tmp;
}
if (write)
WREG32_NO_KIQ(mmMM_DATA, *data++);
else
*data++ = RREG32_NO_KIQ(mmMM_DATA);
}
spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
drm_dev_exit(idx);
}
/**
* amdgpu_device_vram_access - access vram by vram aperature
*
* @adev: amdgpu_device pointer
* @pos: offset of the buffer in vram
* @buf: virtual address of the buffer in system memory
* @size: read/write size, sizeof(@buf) must > @size
* @write: true - write to vram, otherwise - read from vram
*
* The return value means how many bytes have been transferred.
*/
size_t amdgpu_device_aper_access(struct amdgpu_device *adev, loff_t pos,
void *buf, size_t size, bool write)
{
#ifdef CONFIG_64BIT
void __iomem *addr;
size_t count = 0;
uint64_t last;
if (!adev->mman.aper_base_kaddr)
return 0;
last = min(pos + size, adev->gmc.visible_vram_size);
if (last > pos) {
addr = adev->mman.aper_base_kaddr + pos;
count = last - pos;
if (write) {
memcpy_toio(addr, buf, count);
mb();
amdgpu_device_flush_hdp(adev, NULL);
} else {
amdgpu_device_invalidate_hdp(adev, NULL);
mb();
memcpy_fromio(buf, addr, count);
}
}
return count;
#else
return 0;
#endif
}
/**
* amdgpu_device_vram_access - read/write a buffer in vram
*
* @adev: amdgpu_device pointer
* @pos: offset of the buffer in vram
* @buf: virtual address of the buffer in system memory
* @size: read/write size, sizeof(@buf) must > @size
* @write: true - write to vram, otherwise - read from vram
*/
void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos,
void *buf, size_t size, bool write)
{
size_t count;
/* try to using vram apreature to access vram first */
count = amdgpu_device_aper_access(adev, pos, buf, size, write);
size -= count;
if (size) {
/* using MM to access rest vram */
pos += count;
buf += count;
amdgpu_device_mm_access(adev, pos, buf, size, write);
}
}
/*
* register access helper functions.
*/
/* Check if hw access should be skipped because of hotplug or device error */
bool amdgpu_device_skip_hw_access(struct amdgpu_device *adev)
{
if (adev->no_hw_access)
return true;
#ifdef CONFIG_LOCKDEP
/*
* This is a bit complicated to understand, so worth a comment. What we assert
* here is that the GPU reset is not running on another thread in parallel.
*
* For this we trylock the read side of the reset semaphore, if that succeeds
* we know that the reset is not running in paralell.
*
* If the trylock fails we assert that we are either already holding the read
* side of the lock or are the reset thread itself and hold the write side of
* the lock.
*/
if (in_task()) {
if (down_read_trylock(&adev->reset_sem))
up_read(&adev->reset_sem);
else
lockdep_assert_held(&adev->reset_sem);
}
#endif
return false;
}
/**
* amdgpu_device_rreg - read a memory mapped IO or indirect register
*
* @adev: amdgpu_device pointer
* @reg: dword aligned register offset
* @acc_flags: access flags which require special behavior
*
* Returns the 32 bit value from the offset specified.
*/
uint32_t amdgpu_device_rreg(struct amdgpu_device *adev,
uint32_t reg, uint32_t acc_flags)
{
uint32_t ret;
if (amdgpu_device_skip_hw_access(adev))
return 0;
if ((reg * 4) < adev->rmmio_size) {
if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
amdgpu_sriov_runtime(adev) &&
down_read_trylock(&adev->reset_sem)) {
ret = amdgpu_kiq_rreg(adev, reg);
up_read(&adev->reset_sem);
} else {
ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
}
} else {
ret = adev->pcie_rreg(adev, reg * 4);
}
trace_amdgpu_device_rreg(adev->pdev->device, reg, ret);
return ret;
}
/*
* MMIO register read with bytes helper functions
* @offset:bytes offset from MMIO start
*
*/
/**
* amdgpu_mm_rreg8 - read a memory mapped IO register
*
* @adev: amdgpu_device pointer
* @offset: byte aligned register offset
*
* Returns the 8 bit value from the offset specified.
*/
uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset)
{
if (amdgpu_device_skip_hw_access(adev))
return 0;
if (offset < adev->rmmio_size)
return (readb(adev->rmmio + offset));
BUG();
}
/*
* MMIO register write with bytes helper functions
* @offset:bytes offset from MMIO start
* @value: the value want to be written to the register
*
*/
/**
* amdgpu_mm_wreg8 - read a memory mapped IO register
*
* @adev: amdgpu_device pointer
* @offset: byte aligned register offset
* @value: 8 bit value to write
*
* Writes the value specified to the offset specified.
*/
void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value)
{
if (amdgpu_device_skip_hw_access(adev))
return;
if (offset < adev->rmmio_size)
writeb(value, adev->rmmio + offset);
else
BUG();
}
/**
* amdgpu_device_wreg - write to a memory mapped IO or indirect register
*
* @adev: amdgpu_device pointer
* @reg: dword aligned register offset
* @v: 32 bit value to write to the register
* @acc_flags: access flags which require special behavior
*
* Writes the value specified to the offset specified.
*/
void amdgpu_device_wreg(struct amdgpu_device *adev,
uint32_t reg, uint32_t v,
uint32_t acc_flags)
{
if (amdgpu_device_skip_hw_access(adev))
return;
if ((reg * 4) < adev->rmmio_size) {
if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
amdgpu_sriov_runtime(adev) &&
down_read_trylock(&adev->reset_sem)) {
amdgpu_kiq_wreg(adev, reg, v);
up_read(&adev->reset_sem);
} else {
writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
}
} else {
adev->pcie_wreg(adev, reg * 4, v);
}
trace_amdgpu_device_wreg(adev->pdev->device, reg, v);
}
/*
* amdgpu_mm_wreg_mmio_rlc - write register either with mmio or with RLC path if in range
*
* this function is invoked only the debugfs register access
* */
void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev,
uint32_t reg, uint32_t v)
{
if (amdgpu_device_skip_hw_access(adev))
return;
if (amdgpu_sriov_fullaccess(adev) &&
adev->gfx.rlc.funcs &&
adev->gfx.rlc.funcs->is_rlcg_access_range) {
if (adev->gfx.rlc.funcs->is_rlcg_access_range(adev, reg))
return adev->gfx.rlc.funcs->sriov_wreg(adev, reg, v, 0, 0);
} else {
writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
}
}
/**
* amdgpu_mm_rdoorbell - read a doorbell dword
*
* @adev: amdgpu_device pointer
* @index: doorbell index
*
* Returns the value in the doorbell aperture at the
* requested doorbell index (CIK).
*/
u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index)
{
if (amdgpu_device_skip_hw_access(adev))
return 0;
if (index < adev->doorbell.num_doorbells) {
return readl(adev->doorbell.ptr + index);
} else {
DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
return 0;
}
}
/**
* amdgpu_mm_wdoorbell - write a doorbell dword
*
* @adev: amdgpu_device pointer
* @index: doorbell index
* @v: value to write
*
* Writes @v to the doorbell aperture at the
* requested doorbell index (CIK).
*/
void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v)
{
if (amdgpu_device_skip_hw_access(adev))
return;
if (index < adev->doorbell.num_doorbells) {
writel(v, adev->doorbell.ptr + index);
} else {
DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
}
}
/**
* amdgpu_mm_rdoorbell64 - read a doorbell Qword
*
* @adev: amdgpu_device pointer
* @index: doorbell index
*
* Returns the value in the doorbell aperture at the
* requested doorbell index (VEGA10+).
*/
u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index)
{
if (amdgpu_device_skip_hw_access(adev))
return 0;
if (index < adev->doorbell.num_doorbells) {
return atomic64_read((atomic64_t *)(adev->doorbell.ptr + index));
} else {
DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
return 0;
}
}
/**
* amdgpu_mm_wdoorbell64 - write a doorbell Qword
*
* @adev: amdgpu_device pointer
* @index: doorbell index
* @v: value to write
*
* Writes @v to the doorbell aperture at the
* requested doorbell index (VEGA10+).
*/
void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v)
{
if (amdgpu_device_skip_hw_access(adev))
return;
if (index < adev->doorbell.num_doorbells) {
atomic64_set((atomic64_t *)(adev->doorbell.ptr + index), v);
} else {
DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
}
}
/**
* amdgpu_device_indirect_rreg - read an indirect register
*
* @adev: amdgpu_device pointer
* @pcie_index: mmio register offset
* @pcie_data: mmio register offset
* @reg_addr: indirect register address to read from
*
* Returns the value of indirect register @reg_addr
*/
u32 amdgpu_device_indirect_rreg(struct amdgpu_device *adev,
u32 pcie_index, u32 pcie_data,
u32 reg_addr)
{
unsigned long flags;
u32 r;
void __iomem *pcie_index_offset;
void __iomem *pcie_data_offset;
spin_lock_irqsave(&adev->pcie_idx_lock, flags);
pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
writel(reg_addr, pcie_index_offset);
readl(pcie_index_offset);
r = readl(pcie_data_offset);
spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
return r;
}
/**
* amdgpu_device_indirect_rreg64 - read a 64bits indirect register
*
* @adev: amdgpu_device pointer
* @pcie_index: mmio register offset
* @pcie_data: mmio register offset
* @reg_addr: indirect register address to read from
*
* Returns the value of indirect register @reg_addr
*/
u64 amdgpu_device_indirect_rreg64(struct amdgpu_device *adev,
u32 pcie_index, u32 pcie_data,
u32 reg_addr)
{
unsigned long flags;
u64 r;
void __iomem *pcie_index_offset;
void __iomem *pcie_data_offset;
spin_lock_irqsave(&adev->pcie_idx_lock, flags);
pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
/* read low 32 bits */
writel(reg_addr, pcie_index_offset);
readl(pcie_index_offset);
r = readl(pcie_data_offset);
/* read high 32 bits */
writel(reg_addr + 4, pcie_index_offset);
readl(pcie_index_offset);
r |= ((u64)readl(pcie_data_offset) << 32);
spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
return r;
}
/**
* amdgpu_device_indirect_wreg - write an indirect register address
*
* @adev: amdgpu_device pointer
* @pcie_index: mmio register offset
* @pcie_data: mmio register offset
* @reg_addr: indirect register offset
* @reg_data: indirect register data
*
*/
void amdgpu_device_indirect_wreg(struct amdgpu_device *adev,
u32 pcie_index, u32 pcie_data,
u32 reg_addr, u32 reg_data)
{
unsigned long flags;
void __iomem *pcie_index_offset;
void __iomem *pcie_data_offset;
spin_lock_irqsave(&adev->pcie_idx_lock, flags);
pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
writel(reg_addr, pcie_index_offset);
readl(pcie_index_offset);
writel(reg_data, pcie_data_offset);
readl(pcie_data_offset);
spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
}
/**
* amdgpu_device_indirect_wreg64 - write a 64bits indirect register address
*
* @adev: amdgpu_device pointer
* @pcie_index: mmio register offset
* @pcie_data: mmio register offset
* @reg_addr: indirect register offset
* @reg_data: indirect register data
*
*/
void amdgpu_device_indirect_wreg64(struct amdgpu_device *adev,
u32 pcie_index, u32 pcie_data,
u32 reg_addr, u64 reg_data)
{
unsigned long flags;
void __iomem *pcie_index_offset;
void __iomem *pcie_data_offset;
spin_lock_irqsave(&adev->pcie_idx_lock, flags);
pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
/* write low 32 bits */
writel(reg_addr, pcie_index_offset);
readl(pcie_index_offset);
writel((u32)(reg_data & 0xffffffffULL), pcie_data_offset);
readl(pcie_data_offset);
/* write high 32 bits */
writel(reg_addr + 4, pcie_index_offset);
readl(pcie_index_offset);
writel((u32)(reg_data >> 32), pcie_data_offset);
readl(pcie_data_offset);
spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
}
/**
* amdgpu_invalid_rreg - dummy reg read function
*
* @adev: amdgpu_device pointer
* @reg: offset of register
*
* Dummy register read function. Used for register blocks
* that certain asics don't have (all asics).
* Returns the value in the register.
*/
static uint32_t amdgpu_invalid_rreg(struct amdgpu_device *adev, uint32_t reg)
{
DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
BUG();
return 0;
}
/**
* amdgpu_invalid_wreg - dummy reg write function
*
* @adev: amdgpu_device pointer
* @reg: offset of register
* @v: value to write to the register
*
* Dummy register read function. Used for register blocks
* that certain asics don't have (all asics).
*/
static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
{
DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
reg, v);
BUG();
}
/**
* amdgpu_invalid_rreg64 - dummy 64 bit reg read function
*
* @adev: amdgpu_device pointer
* @reg: offset of register
*
* Dummy register read function. Used for register blocks
* that certain asics don't have (all asics).
* Returns the value in the register.
*/
static uint64_t amdgpu_invalid_rreg64(struct amdgpu_device *adev, uint32_t reg)
{
DRM_ERROR("Invalid callback to read 64 bit register 0x%04X\n", reg);
BUG();
return 0;
}
/**
* amdgpu_invalid_wreg64 - dummy reg write function
*
* @adev: amdgpu_device pointer
* @reg: offset of register
* @v: value to write to the register
*
* Dummy register read function. Used for register blocks
* that certain asics don't have (all asics).
*/
static void amdgpu_invalid_wreg64(struct amdgpu_device *adev, uint32_t reg, uint64_t v)
{
DRM_ERROR("Invalid callback to write 64 bit register 0x%04X with 0x%08llX\n",
reg, v);
BUG();
}
/**
* amdgpu_block_invalid_rreg - dummy reg read function
*
* @adev: amdgpu_device pointer
* @block: offset of instance
* @reg: offset of register
*
* Dummy register read function. Used for register blocks
* that certain asics don't have (all asics).
* Returns the value in the register.
*/
static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device *adev,
uint32_t block, uint32_t reg)
{
DRM_ERROR("Invalid callback to read register 0x%04X in block 0x%04X\n",
reg, block);
BUG();
return 0;
}
/**
* amdgpu_block_invalid_wreg - dummy reg write function
*
* @adev: amdgpu_device pointer
* @block: offset of instance
* @reg: offset of register
* @v: value to write to the register
*
* Dummy register read function. Used for register blocks
* that certain asics don't have (all asics).
*/
static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev,
uint32_t block,
uint32_t reg, uint32_t v)
{
DRM_ERROR("Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n",
reg, block, v);
BUG();
}
/**
* amdgpu_device_asic_init - Wrapper for atom asic_init
*
* @adev: amdgpu_device pointer
*
* Does any asic specific work and then calls atom asic init.
*/
static int amdgpu_device_asic_init(struct amdgpu_device *adev)
{
amdgpu_asic_pre_asic_init(adev);
return amdgpu_atom_asic_init(adev->mode_info.atom_context);
}
/**
* amdgpu_device_vram_scratch_init - allocate the VRAM scratch page
*
* @adev: amdgpu_device pointer
*
* Allocates a scratch page of VRAM for use by various things in the
* driver.
*/
static int amdgpu_device_vram_scratch_init(struct amdgpu_device *adev)
{
return amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_SIZE,
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
&adev->vram_scratch.robj,
&adev->vram_scratch.gpu_addr,
(void **)&adev->vram_scratch.ptr);
}
/**
* amdgpu_device_vram_scratch_fini - Free the VRAM scratch page
*
* @adev: amdgpu_device pointer
*
* Frees the VRAM scratch page.
*/
static void amdgpu_device_vram_scratch_fini(struct amdgpu_device *adev)
{
amdgpu_bo_free_kernel(&adev->vram_scratch.robj, NULL, NULL);
}
/**
* amdgpu_device_program_register_sequence - program an array of registers.
*
* @adev: amdgpu_device pointer
* @registers: pointer to the register array
* @array_size: size of the register array
*
* Programs an array or registers with and and or masks.
* This is a helper for setting golden registers.
*/
void amdgpu_device_program_register_sequence(struct amdgpu_device *adev,
const u32 *registers,
const u32 array_size)
{
u32 tmp, reg, and_mask, or_mask;
int i;
if (array_size % 3)
return;
for (i = 0; i < array_size; i +=3) {
reg = registers[i + 0];
and_mask = registers[i + 1];
or_mask = registers[i + 2];
if (and_mask == 0xffffffff) {
tmp = or_mask;
} else {
tmp = RREG32(reg);
tmp &= ~and_mask;
if (adev->family >= AMDGPU_FAMILY_AI)
tmp |= (or_mask & and_mask);
else
tmp |= or_mask;
}
WREG32(reg, tmp);
}
}
/**
* amdgpu_device_pci_config_reset - reset the GPU
*
* @adev: amdgpu_device pointer
*
* Resets the GPU using the pci config reset sequence.
* Only applicable to asics prior to vega10.
*/
void amdgpu_device_pci_config_reset(struct amdgpu_device *adev)
{
pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA);
}
/**
* amdgpu_device_pci_reset - reset the GPU using generic PCI means
*
* @adev: amdgpu_device pointer
*
* Resets the GPU using generic pci reset interfaces (FLR, SBR, etc.).
*/
int amdgpu_device_pci_reset(struct amdgpu_device *adev)
{
return pci_reset_function(adev->pdev);
}
/*
* GPU doorbell aperture helpers function.
*/
/**
* amdgpu_device_doorbell_init - Init doorbell driver information.
*
* @adev: amdgpu_device pointer
*
* Init doorbell driver information (CIK)
* Returns 0 on success, error on failure.
*/
static int amdgpu_device_doorbell_init(struct amdgpu_device *adev)
{
/* No doorbell on SI hardware generation */
if (adev->asic_type < CHIP_BONAIRE) {
adev->doorbell.base = 0;
adev->doorbell.size = 0;
adev->doorbell.num_doorbells = 0;
adev->doorbell.ptr = NULL;
return 0;
}
if (pci_resource_flags(adev->pdev, 2) & IORESOURCE_UNSET)
return -EINVAL;
amdgpu_asic_init_doorbell_index(adev);
/* doorbell bar mapping */
adev->doorbell.base = pci_resource_start(adev->pdev, 2);
adev->doorbell.size = pci_resource_len(adev->pdev, 2);
adev->doorbell.num_doorbells = min_t(u32, adev->doorbell.size / sizeof(u32),
adev->doorbell_index.max_assignment+1);
if (adev->doorbell.num_doorbells == 0)
return -EINVAL;
/* For Vega, reserve and map two pages on doorbell BAR since SDMA
* paging queue doorbell use the second page. The
* AMDGPU_DOORBELL64_MAX_ASSIGNMENT definition assumes all the
* doorbells are in the first page. So with paging queue enabled,
* the max num_doorbells should + 1 page (0x400 in dword)
*/
if (adev->asic_type >= CHIP_VEGA10)
adev->doorbell.num_doorbells += 0x400;
adev->doorbell.ptr = ioremap(adev->doorbell.base,
adev->doorbell.num_doorbells *
sizeof(u32));
if (adev->doorbell.ptr == NULL)
return -ENOMEM;
return 0;
}
/**
* amdgpu_device_doorbell_fini - Tear down doorbell driver information.
*
* @adev: amdgpu_device pointer
*
* Tear down doorbell driver information (CIK)
*/
static void amdgpu_device_doorbell_fini(struct amdgpu_device *adev)
{
iounmap(adev->doorbell.ptr);
adev->doorbell.ptr = NULL;
}
/*
* amdgpu_device_wb_*()
* Writeback is the method by which the GPU updates special pages in memory
* with the status of certain GPU events (fences, ring pointers,etc.).
*/
/**
* amdgpu_device_wb_fini - Disable Writeback and free memory
*
* @adev: amdgpu_device pointer
*
* Disables Writeback and frees the Writeback memory (all asics).
* Used at driver shutdown.
*/
static void amdgpu_device_wb_fini(struct amdgpu_device *adev)
{
if (adev->wb.wb_obj) {
amdgpu_bo_free_kernel(&adev->wb.wb_obj,
&adev->wb.gpu_addr,
(void **)&adev->wb.wb);
adev->wb.wb_obj = NULL;
}
}
/**
* amdgpu_device_wb_init- Init Writeback driver info and allocate memory
*
* @adev: amdgpu_device pointer
*
* Initializes writeback and allocates writeback memory (all asics).
* Used at driver startup.
* Returns 0 on success or an -error on failure.
*/
static int amdgpu_device_wb_init(struct amdgpu_device *adev)
{
int r;
if (adev->wb.wb_obj == NULL) {
/* AMDGPU_MAX_WB * sizeof(uint32_t) * 8 = AMDGPU_MAX_WB 256bit slots */
r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t) * 8,
PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
&adev->wb.wb_obj, &adev->wb.gpu_addr,
(void **)&adev->wb.wb);
if (r) {
dev_warn(adev->dev, "(%d) create WB bo failed\n", r);
return r;
}
adev->wb.num_wb = AMDGPU_MAX_WB;
memset(&adev->wb.used, 0, sizeof(adev->wb.used));
/* clear wb memory */
memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t) * 8);
}
return 0;
}
/**
* amdgpu_device_wb_get - Allocate a wb entry
*
* @adev: amdgpu_device pointer
* @wb: wb index
*
* Allocate a wb slot for use by the driver (all asics).
* Returns 0 on success or -EINVAL on failure.
*/
int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb)
{
unsigned long offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb);
if (offset < adev->wb.num_wb) {
__set_bit(offset, adev->wb.used);
*wb = offset << 3; /* convert to dw offset */
return 0;
} else {
return -EINVAL;
}
}
/**
* amdgpu_device_wb_free - Free a wb entry
*
* @adev: amdgpu_device pointer
* @wb: wb index
*
* Free a wb slot allocated for use by the driver (all asics)
*/
void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb)
{
wb >>= 3;
if (wb < adev->wb.num_wb)
__clear_bit(wb, adev->wb.used);
}
/**
* amdgpu_device_resize_fb_bar - try to resize FB BAR
*
* @adev: amdgpu_device pointer
*
* Try to resize FB BAR to make all VRAM CPU accessible. We try very hard not
* to fail, but if any of the BARs is not accessible after the size we abort
* driver loading by returning -ENODEV.
*/
int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
{
int rbar_size = pci_rebar_bytes_to_size(adev->gmc.real_vram_size);
struct pci_bus *root;
struct resource *res;
unsigned i;
u16 cmd;
int r;
/* Bypass for VF */
if (amdgpu_sriov_vf(adev))
return 0;
/* skip if the bios has already enabled large BAR */
if (adev->gmc.real_vram_size &&
(pci_resource_len(adev->pdev, 0) >= adev->gmc.real_vram_size))
return 0;
/* Check if the root BUS has 64bit memory resources */
root = adev->pdev->bus;
while (root->parent)
root = root->parent;
pci_bus_for_each_resource(root, res, i) {
if (res && res->flags & (IORESOURCE_MEM | IORESOURCE_MEM_64) &&
res->start > 0x100000000ull)
break;
}
/* Trying to resize is pointless without a root hub window above 4GB */
if (!res)
return 0;
/* Limit the BAR size to what is available */
rbar_size = min(fls(pci_rebar_get_possible_sizes(adev->pdev, 0)) - 1,
rbar_size);
/* Disable memory decoding while we change the BAR addresses and size */
pci_read_config_word(adev->pdev, PCI_COMMAND, &cmd);
pci_write_config_word(adev->pdev, PCI_COMMAND,
cmd & ~PCI_COMMAND_MEMORY);
/* Free the VRAM and doorbell BAR, we most likely need to move both. */
amdgpu_device_doorbell_fini(adev);
if (adev->asic_type >= CHIP_BONAIRE)
pci_release_resource(adev->pdev, 2);
pci_release_resource(adev->pdev, 0);
r = pci_resize_resource(adev->pdev, 0, rbar_size);
if (r == -ENOSPC)
DRM_INFO("Not enough PCI address space for a large BAR.");
else if (r && r != -ENOTSUPP)
DRM_ERROR("Problem resizing BAR0 (%d).", r);
pci_assign_unassigned_bus_resources(adev->pdev->bus);
/* When the doorbell or fb BAR isn't available we have no chance of
* using the device.
*/
r = amdgpu_device_doorbell_init(adev);
if (r || (pci_resource_flags(adev->pdev, 0) & IORESOURCE_UNSET))
return -ENODEV;
pci_write_config_word(adev->pdev, PCI_COMMAND, cmd);
return 0;
}
/*
* GPU helpers function.
*/
/**
* amdgpu_device_need_post - check if the hw need post or not
*
* @adev: amdgpu_device pointer
*
* Check if the asic has been initialized (all asics) at driver startup
* or post is needed if hw reset is performed.
* Returns true if need or false if not.
*/
bool amdgpu_device_need_post(struct amdgpu_device *adev)
{
uint32_t reg;
if (amdgpu_sriov_vf(adev))
return false;
if (amdgpu_passthrough(adev)) {
/* for FIJI: In whole GPU pass-through virtualization case, after VM reboot
* some old smc fw still need driver do vPost otherwise gpu hang, while
* those smc fw version above 22.15 doesn't have this flaw, so we force
* vpost executed for smc version below 22.15
*/
if (adev->asic_type == CHIP_FIJI) {
int err;
uint32_t fw_ver;
err = request_firmware(&adev->pm.fw, "amdgpu/fiji_smc.bin", adev->dev);
/* force vPost if error occured */
if (err)
return true;
fw_ver = *((uint32_t *)adev->pm.fw->data + 69);
if (fw_ver < 0x00160e00)
return true;
}
}
/* Don't post if we need to reset whole hive on init */
if (adev->gmc.xgmi.pending_reset)
return false;
if (adev->has_hw_reset) {
adev->has_hw_reset = false;
return true;
}
/* bios scratch used on CIK+ */
if (adev->asic_type >= CHIP_BONAIRE)
return amdgpu_atombios_scratch_need_asic_init(adev);
/* check MEM_SIZE for older asics */
reg = amdgpu_asic_get_config_memsize(adev);
if ((reg != 0) && (reg != 0xffffffff))
return false;
return true;
}
/* if we get transitioned to only one device, take VGA back */
/**
* amdgpu_device_vga_set_decode - enable/disable vga decode
*
* @pdev: PCI device pointer
* @state: enable/disable vga decode
*
* Enable/disable vga decode (all asics).
* Returns VGA resource flags.
*/
static unsigned int amdgpu_device_vga_set_decode(struct pci_dev *pdev,
bool state)
{
struct amdgpu_device *adev = drm_to_adev(pci_get_drvdata(pdev));
amdgpu_asic_set_vga_state(adev, state);
if (state)
return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
else
return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
}
/**
* amdgpu_device_check_block_size - validate the vm block size
*
* @adev: amdgpu_device pointer
*
* Validates the vm block size specified via module parameter.
* The vm block size defines number of bits in page table versus page directory,
* a page is 4KB so we have 12 bits offset, minimum 9 bits in the
* page table and the remaining bits are in the page directory.
*/
static void amdgpu_device_check_block_size(struct amdgpu_device *adev)
{
/* defines number of bits in page table versus page directory,
* a page is 4KB so we have 12 bits offset, minimum 9 bits in the
* page table and the remaining bits are in the page directory */
if (amdgpu_vm_block_size == -1)
return;
if (amdgpu_vm_block_size < 9) {
dev_warn(adev->dev, "VM page table size (%d) too small\n",
amdgpu_vm_block_size);
amdgpu_vm_block_size = -1;
}
}
/**
* amdgpu_device_check_vm_size - validate the vm size
*
* @adev: amdgpu_device pointer
*
* Validates the vm size in GB specified via module parameter.
* The VM size is the size of the GPU virtual memory space in GB.
*/
static void amdgpu_device_check_vm_size(struct amdgpu_device *adev)
{
/* no need to check the default value */
if (amdgpu_vm_size == -1)
return;
if (amdgpu_vm_size < 1) {
dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n",
amdgpu_vm_size);
amdgpu_vm_size = -1;
}
}
static void amdgpu_device_check_smu_prv_buffer_size(struct amdgpu_device *adev)
{
struct sysinfo si;
bool is_os_64 = (sizeof(void *) == 8);
uint64_t total_memory;
uint64_t dram_size_seven_GB = 0x1B8000000;
uint64_t dram_size_three_GB = 0xB8000000;
if (amdgpu_smu_memory_pool_size == 0)
return;
if (!is_os_64) {
DRM_WARN("Not 64-bit OS, feature not supported\n");
goto def_value;
}
si_meminfo(&si);
total_memory = (uint64_t)si.totalram * si.mem_unit;
if ((amdgpu_smu_memory_pool_size == 1) ||
(amdgpu_smu_memory_pool_size == 2)) {
if (total_memory < dram_size_three_GB)
goto def_value1;
} else if ((amdgpu_smu_memory_pool_size == 4) ||
(amdgpu_smu_memory_pool_size == 8)) {
if (total_memory < dram_size_seven_GB)
goto def_value1;
} else {
DRM_WARN("Smu memory pool size not supported\n");
goto def_value;
}
adev->pm.smu_prv_buffer_size = amdgpu_smu_memory_pool_size << 28;
return;
def_value1:
DRM_WARN("No enough system memory\n");
def_value:
adev->pm.smu_prv_buffer_size = 0;
}
static int amdgpu_device_init_apu_flags(struct amdgpu_device *adev)
{
if (!(adev->flags & AMD_IS_APU) ||
adev->asic_type < CHIP_RAVEN)
return 0;
switch (adev->asic_type) {
case CHIP_RAVEN:
if (adev->pdev->device == 0x15dd)
adev->apu_flags |= AMD_APU_IS_RAVEN;
if (adev->pdev->device == 0x15d8)
adev->apu_flags |= AMD_APU_IS_PICASSO;
break;
case CHIP_RENOIR:
if ((adev->pdev->device == 0x1636) ||
(adev->pdev->device == 0x164c))
adev->apu_flags |= AMD_APU_IS_RENOIR;
else
adev->apu_flags |= AMD_APU_IS_GREEN_SARDINE;
break;
case CHIP_VANGOGH:
adev->apu_flags |= AMD_APU_IS_VANGOGH;
break;
case CHIP_YELLOW_CARP:
break;
case CHIP_CYAN_SKILLFISH:
if (adev->pdev->device == 0x13FE)
adev->apu_flags |= AMD_APU_IS_CYAN_SKILLFISH2;
break;
default:
return -EINVAL;
}
return 0;
}
/**
* amdgpu_device_check_arguments - validate module params
*
* @adev: amdgpu_device pointer
*
* Validates certain module parameters and updates
* the associated values used by the driver (all asics).
*/
static int amdgpu_device_check_arguments(struct amdgpu_device *adev)
{
if (amdgpu_sched_jobs < 4) {
dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n",
amdgpu_sched_jobs);
amdgpu_sched_jobs = 4;
} else if (!is_power_of_2(amdgpu_sched_jobs)){
dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n",
amdgpu_sched_jobs);
amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs);
}
if (amdgpu_gart_size != -1 && amdgpu_gart_size < 32) {
/* gart size must be greater or equal to 32M */
dev_warn(adev->dev, "gart size (%d) too small\n",
amdgpu_gart_size);
amdgpu_gart_size = -1;
}
if (amdgpu_gtt_size != -1 && amdgpu_gtt_size < 32) {
/* gtt size must be greater or equal to 32M */
dev_warn(adev->dev, "gtt size (%d) too small\n",
amdgpu_gtt_size);
amdgpu_gtt_size = -1;
}
/* valid range is between 4 and 9 inclusive */
if (amdgpu_vm_fragment_size != -1 &&
(amdgpu_vm_fragment_size > 9 || amdgpu_vm_fragment_size < 4)) {
dev_warn(adev->dev, "valid range is between 4 and 9\n");
amdgpu_vm_fragment_size = -1;
}
if (amdgpu_sched_hw_submission < 2) {
dev_warn(adev->dev, "sched hw submission jobs (%d) must be at least 2\n",
amdgpu_sched_hw_submission);
amdgpu_sched_hw_submission = 2;
} else if (!is_power_of_2(amdgpu_sched_hw_submission)) {
dev_warn(adev->dev, "sched hw submission jobs (%d) must be a power of 2\n",
amdgpu_sched_hw_submission);
amdgpu_sched_hw_submission = roundup_pow_of_two(amdgpu_sched_hw_submission);
}
amdgpu_device_check_smu_prv_buffer_size(adev);
amdgpu_device_check_vm_size(adev);
amdgpu_device_check_block_size(adev);
adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type);
amdgpu_gmc_tmz_set(adev);
amdgpu_gmc_noretry_set(adev);
return 0;
}
/**
* amdgpu_switcheroo_set_state - set switcheroo state
*
* @pdev: pci dev pointer
* @state: vga_switcheroo state
*
* Callback for the switcheroo driver. Suspends or resumes the
* the asics before or after it is powered up using ACPI methods.
*/
static void amdgpu_switcheroo_set_state(struct pci_dev *pdev,
enum vga_switcheroo_state state)
{
struct drm_device *dev = pci_get_drvdata(pdev);
int r;
if (amdgpu_device_supports_px(dev) && state == VGA_SWITCHEROO_OFF)
return;
if (state == VGA_SWITCHEROO_ON) {
pr_info("switched on\n");
/* don't suspend or resume card normally */
dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
pci_set_power_state(pdev, PCI_D0);
amdgpu_device_load_pci_state(pdev);
r = pci_enable_device(pdev);
if (r)
DRM_WARN("pci_enable_device failed (%d)\n", r);
amdgpu_device_resume(dev, true);
dev->switch_power_state = DRM_SWITCH_POWER_ON;
} else {
pr_info("switched off\n");
dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
amdgpu_device_suspend(dev, true);
amdgpu_device_cache_pci_state(pdev);
/* Shut down the device */
pci_disable_device(pdev);
pci_set_power_state(pdev, PCI_D3cold);
dev->switch_power_state = DRM_SWITCH_POWER_OFF;
}
}
/**
* amdgpu_switcheroo_can_switch - see if switcheroo state can change
*
* @pdev: pci dev pointer
*
* Callback for the switcheroo driver. Check of the switcheroo
* state can be changed.
* Returns true if the state can be changed, false if not.
*/
static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev)
{
struct drm_device *dev = pci_get_drvdata(pdev);
/*
* FIXME: open_count is protected by drm_global_mutex but that would lead to
* locking inversion with the driver load path. And the access here is
* completely racy anyway. So don't bother with locking for now.
*/
return atomic_read(&dev->open_count) == 0;
}
static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = {
.set_gpu_state = amdgpu_switcheroo_set_state,
.reprobe = NULL,
.can_switch = amdgpu_switcheroo_can_switch,
};
/**
* amdgpu_device_ip_set_clockgating_state - set the CG state
*
* @dev: amdgpu_device pointer
* @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
* @state: clockgating state (gate or ungate)
*
* Sets the requested clockgating state for all instances of
* the hardware IP specified.
* Returns the error code from the last instance.
*/
int amdgpu_device_ip_set_clockgating_state(void *dev,
enum amd_ip_block_type block_type,
enum amd_clockgating_state state)
{
struct amdgpu_device *adev = dev;
int i, r = 0;
for (i = 0; i < adev->num_ip_blocks; i++) {
if (!adev->ip_blocks[i].status.valid)
continue;
if (adev->ip_blocks[i].version->type != block_type)
continue;
if (!adev->ip_blocks[i].version->funcs->set_clockgating_state)
continue;
r = adev->ip_blocks[i].version->funcs->set_clockgating_state(
(void *)adev, state);
if (r)
DRM_ERROR("set_clockgating_state of IP block <%s> failed %d\n",
adev->ip_blocks[i].version->funcs->name, r);
}
return r;
}
/**
* amdgpu_device_ip_set_powergating_state - set the PG state
*
* @dev: amdgpu_device pointer
* @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
* @state: powergating state (gate or ungate)
*
* Sets the requested powergating state for all instances of
* the hardware IP specified.
* Returns the error code from the last instance.
*/
int amdgpu_device_ip_set_powergating_state(void *dev,
enum amd_ip_block_type block_type,
enum amd_powergating_state state)
{
struct amdgpu_device *adev = dev;
int i, r = 0;
for (i = 0; i < adev->num_ip_blocks; i++) {
if (!adev->ip_blocks[i].status.valid)
continue;
if (adev->ip_blocks[i].version->type != block_type)
continue;
if (!adev->ip_blocks[i].version->funcs->set_powergating_state)
continue;
r = adev->ip_blocks[i].version->funcs->set_powergating_state(
(void *)adev, state);
if (r)
DRM_ERROR("set_powergating_state of IP block <%s> failed %d\n",
adev->ip_blocks[i].version->funcs->name, r);
}
return r;
}
/**
* amdgpu_device_ip_get_clockgating_state - get the CG state
*
* @adev: amdgpu_device pointer
* @flags: clockgating feature flags
*
* Walks the list of IPs on the device and updates the clockgating
* flags for each IP.
* Updates @flags with the feature flags for each hardware IP where
* clockgating is enabled.
*/
void amdgpu_device_ip_get_clockgating_state(struct amdgpu_device *adev,
u32 *flags)
{
int i;
for (i = 0; i < adev->num_ip_blocks; i++) {
if (!adev->ip_blocks[i].status.valid)
continue;
if (adev->ip_blocks[i].version->funcs->get_clockgating_state)
adev->ip_blocks[i].version->funcs->get_clockgating_state((void *)adev, flags);
}
}
/**
* amdgpu_device_ip_wait_for_idle - wait for idle
*
* @adev: amdgpu_device pointer
* @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
*
* Waits for the request hardware IP to be idle.
* Returns 0 for success or a negative error code on failure.
*/
int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev,
enum amd_ip_block_type block_type)
{
int i, r;
for (i = 0; i < adev->num_ip_blocks; i++) {
if (!adev->ip_blocks[i].status.valid)
continue;
if (adev->ip_blocks[i].version->type == block_type) {
r = adev->ip_blocks[i].version->funcs->wait_for_idle((void *)adev);
if (r)
return r;
break;
}
}
return 0;
}
/**
* amdgpu_device_ip_is_idle - is the hardware IP idle
*
* @adev: amdgpu_device pointer
* @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
*
* Check if the hardware IP is idle or not.
* Returns true if it the IP is idle, false if not.
*/
bool amdgpu_device_ip_is_idle(struct amdgpu_device *adev,
enum amd_ip_block_type block_type)
{
int i;
for (i = 0; i < adev->num_ip_blocks; i++) {
if (!adev->ip_blocks[i].status.valid)
continue;
if (adev->ip_blocks[i].version->type == block_type)
return adev->ip_blocks[i].version->funcs->is_idle((void *)adev);
}
return true;
}
/**
* amdgpu_device_ip_get_ip_block - get a hw IP pointer
*
* @adev: amdgpu_device pointer
* @type: Type of hardware IP (SMU, GFX, UVD, etc.)
*
* Returns a pointer to the hardware IP block structure
* if it exists for the asic, otherwise NULL.
*/
struct amdgpu_ip_block *
amdgpu_device_ip_get_ip_block(struct amdgpu_device *adev,
enum amd_ip_block_type type)
{
int i;
for (i = 0; i < adev->num_ip_blocks; i++)
if (adev->ip_blocks[i].version->type == type)
return &adev->ip_blocks[i];
return NULL;
}
/**
* amdgpu_device_ip_block_version_cmp
*
* @adev: amdgpu_device pointer
* @type: enum amd_ip_block_type
* @major: major version
* @minor: minor version
*
* return 0 if equal or greater
* return 1 if smaller or the ip_block doesn't exist
*/
int amdgpu_device_ip_block_version_cmp(struct amdgpu_device *adev,
enum amd_ip_block_type type,
u32 major, u32 minor)
{
struct amdgpu_ip_block *ip_block = amdgpu_device_ip_get_ip_block(adev, type);
if (ip_block && ((ip_block->version->major > major) ||
((ip_block->version->major == major) &&
(ip_block->version->minor >= minor))))
return 0;
return 1;
}
/**
* amdgpu_device_ip_block_add
*
* @adev: amdgpu_device pointer
* @ip_block_version: pointer to the IP to add
*
* Adds the IP block driver information to the collection of IPs
* on the asic.
*/
int amdgpu_device_ip_block_add(struct amdgpu_device *adev,
const struct amdgpu_ip_block_version *ip_block_version)
{
if (!ip_block_version)
return -EINVAL;
switch (ip_block_version->type) {
case AMD_IP_BLOCK_TYPE_VCN:
if (adev->harvest_ip_mask & AMD_HARVEST_IP_VCN_MASK)
return 0;
break;
case AMD_IP_BLOCK_TYPE_JPEG:
if (adev->harvest_ip_mask & AMD_HARVEST_IP_JPEG_MASK)
return 0;
break;
default:
break;
}
DRM_INFO("add ip block number %d <%s>\n", adev->num_ip_blocks,
ip_block_version->funcs->name);
adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version;
return 0;
}
/**
* amdgpu_device_enable_virtual_display - enable virtual display feature
*
* @adev: amdgpu_device pointer
*
* Enabled the virtual display feature if the user has enabled it via
* the module parameter virtual_display. This feature provides a virtual
* display hardware on headless boards or in virtualized environments.
* This function parses and validates the configuration string specified by
* the user and configues the virtual display configuration (number of
* virtual connectors, crtcs, etc.) specified.
*/
static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
{
adev->enable_virtual_display = false;
if (amdgpu_virtual_display) {
const char *pci_address_name = pci_name(adev->pdev);
char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname;
pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL);
pciaddstr_tmp = pciaddstr;
while ((pciaddname_tmp = strsep(&pciaddstr_tmp, ";"))) {
pciaddname = strsep(&pciaddname_tmp, ",");
if (!strcmp("all", pciaddname)
|| !strcmp(pci_address_name, pciaddname)) {
long num_crtc;
int res = -1;
adev->enable_virtual_display = true;
if (pciaddname_tmp)
res = kstrtol(pciaddname_tmp, 10,
&num_crtc);
if (!res) {
if (num_crtc < 1)
num_crtc = 1;
if (num_crtc > 6)
num_crtc = 6;
adev->mode_info.num_crtc = num_crtc;
} else {
adev->mode_info.num_crtc = 1;
}
break;
}
}
DRM_INFO("virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n",
amdgpu_virtual_display, pci_address_name,
adev->enable_virtual_display, adev->mode_info.num_crtc);
kfree(pciaddstr);
}
}
/**
* amdgpu_device_parse_gpu_info_fw - parse gpu info firmware
*
* @adev: amdgpu_device pointer
*
* Parses the asic configuration parameters specified in the gpu info
* firmware and makes them availale to the driver for use in configuring
* the asic.
* Returns 0 on success, -EINVAL on failure.
*/
static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
{
const char *chip_name;
char fw_name[40];
int err;
const struct gpu_info_firmware_header_v1_0 *hdr;
adev->firmware.gpu_info_fw = NULL;
if (adev->mman.discovery_bin) {
amdgpu_discovery_get_gfx_info(adev);
/*
* FIXME: The bounding box is still needed by Navi12, so
* temporarily read it from gpu_info firmware. Should be droped
* when DAL no longer needs it.
*/
if (adev->asic_type != CHIP_NAVI12)
return 0;
}
switch (adev->asic_type) {
#ifdef CONFIG_DRM_AMDGPU_SI
case CHIP_VERDE:
case CHIP_TAHITI:
case CHIP_PITCAIRN:
case CHIP_OLAND:
case CHIP_HAINAN:
#endif
#ifdef CONFIG_DRM_AMDGPU_CIK
case CHIP_BONAIRE:
case CHIP_HAWAII:
case CHIP_KAVERI:
case CHIP_KABINI:
case CHIP_MULLINS:
#endif
case CHIP_TOPAZ:
case CHIP_TONGA:
case CHIP_FIJI:
case CHIP_POLARIS10:
case CHIP_POLARIS11:
case CHIP_POLARIS12:
case CHIP_VEGAM:
case CHIP_CARRIZO:
case CHIP_STONEY:
case CHIP_VEGA20:
case CHIP_ALDEBARAN:
case CHIP_SIENNA_CICHLID:
case CHIP_NAVY_FLOUNDER:
case CHIP_DIMGREY_CAVEFISH:
case CHIP_BEIGE_GOBY:
default:
return 0;
case CHIP_VEGA10:
chip_name = "vega10";
break;
case CHIP_VEGA12:
chip_name = "vega12";
break;
case CHIP_RAVEN:
if (adev->apu_flags & AMD_APU_IS_RAVEN2)
chip_name = "raven2";
else if (adev->apu_flags & AMD_APU_IS_PICASSO)
chip_name = "picasso";
else
chip_name = "raven";
break;
case CHIP_ARCTURUS:
chip_name = "arcturus";
break;
case CHIP_RENOIR:
if (adev->apu_flags & AMD_APU_IS_RENOIR)
chip_name = "renoir";
else
chip_name = "green_sardine";
break;
case CHIP_NAVI10:
chip_name = "navi10";
break;
case CHIP_NAVI14:
chip_name = "navi14";
break;
case CHIP_NAVI12:
chip_name = "navi12";
break;
case CHIP_VANGOGH:
chip_name = "vangogh";
break;
case CHIP_YELLOW_CARP:
chip_name = "yellow_carp";
break;
}
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_gpu_info.bin", chip_name);
err = request_firmware(&adev->firmware.gpu_info_fw, fw_name, adev->dev);
if (err) {
dev_err(adev->dev,
"Failed to load gpu_info firmware \"%s\"\n",
fw_name);
goto out;
}
err = amdgpu_ucode_validate(adev->firmware.gpu_info_fw);
if (err) {
dev_err(adev->dev,
"Failed to validate gpu_info firmware \"%s\"\n",
fw_name);
goto out;
}
hdr = (const struct gpu_info_firmware_header_v1_0 *)adev->firmware.gpu_info_fw->data;
amdgpu_ucode_print_gpu_info_hdr(&hdr->header);
switch (hdr->version_major) {
case 1:
{
const struct gpu_info_firmware_v1_0 *gpu_info_fw =
(const struct gpu_info_firmware_v1_0 *)(adev->firmware.gpu_info_fw->data +
le32_to_cpu(hdr->header.ucode_array_offset_bytes));
/*
* Should be droped when DAL no longer needs it.
*/
if (adev->asic_type == CHIP_NAVI12)
goto parse_soc_bounding_box;
adev->gfx.config.max_shader_engines = le32_to_cpu(gpu_info_fw->gc_num_se);
adev->gfx.config.max_cu_per_sh = le32_to_cpu(gpu_info_fw->gc_num_cu_per_sh);
adev->gfx.config.max_sh_per_se = le32_to_cpu(gpu_info_fw->gc_num_sh_per_se);
adev->gfx.config.max_backends_per_se = le32_to_cpu(gpu_info_fw->gc_num_rb_per_se);
adev->gfx.config.max_texture_channel_caches =
le32_to_cpu(gpu_info_fw->gc_num_tccs);
adev->gfx.config.max_gprs = le32_to_cpu(gpu_info_fw->gc_num_gprs);
adev->gfx.config.max_gs_threads = le32_to_cpu(gpu_info_fw->gc_num_max_gs_thds);
adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gpu_info_fw->gc_gs_table_depth);
adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gpu_info_fw->gc_gsprim_buff_depth);
adev->gfx.config.double_offchip_lds_buf =
le32_to_cpu(gpu_info_fw->gc_double_offchip_lds_buffer);
adev->gfx.cu_info.wave_front_size = le32_to_cpu(gpu_info_fw->gc_wave_size);
adev->gfx.cu_info.max_waves_per_simd =
le32_to_cpu(gpu_info_fw->gc_max_waves_per_simd);
adev->gfx.cu_info.max_scratch_slots_per_cu =
le32_to_cpu(gpu_info_fw->gc_max_scratch_slots_per_cu);
adev->gfx.cu_info.lds_size = le32_to_cpu(gpu_info_fw->gc_lds_size);
if (hdr->version_minor >= 1) {
const struct gpu_info_firmware_v1_1 *gpu_info_fw =
(const struct gpu_info_firmware_v1_1 *)(adev->firmware.gpu_info_fw->data +
le32_to_cpu(hdr->header.ucode_array_offset_bytes));
adev->gfx.config.num_sc_per_sh =
le32_to_cpu(gpu_info_fw->num_sc_per_sh);
adev->gfx.config.num_packer_per_sc =
le32_to_cpu(gpu_info_fw->num_packer_per_sc);
}
parse_soc_bounding_box:
/*
* soc bounding box info is not integrated in disocovery table,
* we always need to parse it from gpu info firmware if needed.
*/
if (hdr->version_minor == 2) {
const struct gpu_info_firmware_v1_2 *gpu_info_fw =
(const struct gpu_info_firmware_v1_2 *)(adev->firmware.gpu_info_fw->data +
le32_to_cpu(hdr->header.ucode_array_offset_bytes));
adev->dm.soc_bounding_box = &gpu_info_fw->soc_bounding_box;
}
break;
}
default:
dev_err(adev->dev,
"Unsupported gpu_info table %d\n", hdr->header.ucode_version);
err = -EINVAL;
goto out;
}
out:
return err;
}
/**
* amdgpu_device_ip_early_init - run early init for hardware IPs
*
* @adev: amdgpu_device pointer
*
* Early initialization pass for hardware IPs. The hardware IPs that make
* up each asic are discovered each IP's early_init callback is run. This
* is the first stage in initializing the asic.
* Returns 0 on success, negative error code on failure.
*/
static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
{
int i, r;
amdgpu_device_enable_virtual_display(adev);
if (amdgpu_sriov_vf(adev)) {
r = amdgpu_virt_request_full_gpu(adev, true);
if (r)
return r;
}
switch (adev->asic_type) {
#ifdef CONFIG_DRM_AMDGPU_SI
case CHIP_VERDE:
case CHIP_TAHITI:
case CHIP_PITCAIRN:
case CHIP_OLAND:
case CHIP_HAINAN:
adev->family = AMDGPU_FAMILY_SI;
r = si_set_ip_blocks(adev);
if (r)
return r;
break;
#endif
#ifdef CONFIG_DRM_AMDGPU_CIK
case CHIP_BONAIRE:
case CHIP_HAWAII:
case CHIP_KAVERI:
case CHIP_KABINI:
case CHIP_MULLINS:
if (adev->flags & AMD_IS_APU)
adev->family = AMDGPU_FAMILY_KV;
else
adev->family = AMDGPU_FAMILY_CI;
r = cik_set_ip_blocks(adev);
if (r)
return r;
break;
#endif
case CHIP_TOPAZ:
case CHIP_TONGA:
case CHIP_FIJI:
case CHIP_POLARIS10:
case CHIP_POLARIS11:
case CHIP_POLARIS12:
case CHIP_VEGAM:
case CHIP_CARRIZO:
case CHIP_STONEY:
if (adev->flags & AMD_IS_APU)
adev->family = AMDGPU_FAMILY_CZ;
else
adev->family = AMDGPU_FAMILY_VI;
r = vi_set_ip_blocks(adev);
if (r)
return r;
break;
default:
r = amdgpu_discovery_set_ip_blocks(adev);
if (r)
return r;
break;
}
amdgpu_amdkfd_device_probe(adev);
adev->pm.pp_feature = amdgpu_pp_feature_mask;
if (amdgpu_sriov_vf(adev) || sched_policy == KFD_SCHED_POLICY_NO_HWS)
adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
if (amdgpu_sriov_vf(adev) && adev->asic_type == CHIP_SIENNA_CICHLID)
adev->pm.pp_feature &= ~PP_OVERDRIVE_MASK;
for (i = 0; i < adev->num_ip_blocks; i++) {
if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
DRM_ERROR("disabled ip block: %d <%s>\n",
i, adev->ip_blocks[i].version->funcs->name);
adev->ip_blocks[i].status.valid = false;
} else {
if (adev->ip_blocks[i].version->funcs->early_init) {
r = adev->ip_blocks[i].version->funcs->early_init((void *)adev);
if (r == -ENOENT) {
adev->ip_blocks[i].status.valid = false;
} else if (r) {
DRM_ERROR("early_init of IP block <%s> failed %d\n",
adev->ip_blocks[i].version->funcs->name, r);
return r;
} else {
adev->ip_blocks[i].status.valid = true;
}
} else {
adev->ip_blocks[i].status.valid = true;
}
}
/* get the vbios after the asic_funcs are set up */
if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) {
r = amdgpu_device_parse_gpu_info_fw(adev);
if (r)
return r;
/* Read BIOS */
if (!amdgpu_get_bios(adev))
return -EINVAL;
r = amdgpu_atombios_init(adev);
if (r) {
dev_err(adev->dev, "amdgpu_atombios_init failed\n");
amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0);
return r;
}
/*get pf2vf msg info at it's earliest time*/
if (amdgpu_sriov_vf(adev))
amdgpu_virt_init_data_exchange(adev);
}
}
adev->cg_flags &= amdgpu_cg_mask;
adev->pg_flags &= amdgpu_pg_mask;
return 0;
}
static int amdgpu_device_ip_hw_init_phase1(struct amdgpu_device *adev)
{
int i, r;
for (i = 0; i < adev->num_ip_blocks; i++) {
if (!adev->ip_blocks[i].status.sw)
continue;
if (adev->ip_blocks[i].status.hw)
continue;
if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
(amdgpu_sriov_vf(adev) && (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)) ||
adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) {
r = adev->ip_blocks[i].version->funcs->hw_init(adev);
if (r) {
DRM_ERROR("hw_init of IP block <%s> failed %d\n",
adev->ip_blocks[i].version->funcs->name, r);
return r;
}
adev->ip_blocks[i].status.hw = true;
}
}
return 0;
}
static int amdgpu_device_ip_hw_init_phase2(struct amdgpu_device *adev)
{
int i, r;
for (i = 0; i < adev->num_ip_blocks; i++) {
if (!adev->ip_blocks[i].status.sw)
continue;
if (adev->ip_blocks[i].status.hw)
continue;
r = adev->ip_blocks[i].version->funcs->hw_init(adev);
if (r) {
DRM_ERROR("hw_init of IP block <%s> failed %d\n",
adev->ip_blocks[i].version->funcs->name, r);
return r;
}
adev->ip_blocks[i].status.hw = true;
}
return 0;
}
static int amdgpu_device_fw_loading(struct amdgpu_device *adev)
{
int r = 0;
int i;
uint32_t smu_version;
if (adev->asic_type >= CHIP_VEGA10) {
for (i = 0; i < adev->num_ip_blocks; i++) {
if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_PSP)
continue;
if (!adev->ip_blocks[i].status.sw)
continue;
/* no need to do the fw loading again if already done*/
if (adev->ip_blocks[i].status.hw == true)
break;
if (amdgpu_in_reset(adev) || adev->in_suspend) {
r = adev->ip_blocks[i].version->funcs->resume(adev);
if (r) {
DRM_ERROR("resume of IP block <%s> failed %d\n",
adev->ip_blocks[i].version->funcs->name, r);
return r;
}
} else {
r = adev->ip_blocks[i].version->funcs->hw_init(adev);
if (r) {
DRM_ERROR("hw_init of IP block <%s> failed %d\n",
adev->ip_blocks[i].version->funcs->name, r);
return r;
}
}
adev->ip_blocks[i].status.hw = true;
break;
}
}
if (!amdgpu_sriov_vf(adev) || adev->asic_type == CHIP_TONGA)
r = amdgpu_pm_load_smu_firmware(adev, &smu_version);
return r;
}
/**
* amdgpu_device_ip_init - run init for hardware IPs
*
* @adev: amdgpu_device pointer
*
* Main initialization pass for hardware IPs. The list of all the hardware
* IPs that make up the asic is walked and the sw_init and hw_init callbacks
* are run. sw_init initializes the software state associated with each IP
* and hw_init initializes the hardware associated with each IP.
* Returns 0 on success, negative error code on failure.
*/
static int amdgpu_device_ip_init(struct amdgpu_device *adev)
{
int i, r;
r = amdgpu_ras_init(adev);
if (r)
return r;
for (i = 0; i < adev->num_ip_blocks; i++) {
if (!adev->ip_blocks[i].status.valid)
continue;
r = adev->ip_blocks[i].version->funcs->sw_init((void *)adev);
if (r) {
DRM_ERROR("sw_init of IP block <%s> failed %d\n",
adev->ip_blocks[i].version->funcs->name, r);
goto init_failed;
}
adev->ip_blocks[i].status.sw = true;
/* need to do gmc hw init early so we can allocate gpu mem */
if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
r = amdgpu_device_vram_scratch_init(adev);
if (r) {
DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r);
goto init_failed;
}
r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
if (r) {
DRM_ERROR("hw_init %d failed %d\n", i, r);
goto init_failed;
}
r = amdgpu_device_wb_init(adev);
if (r) {
DRM_ERROR("amdgpu_device_wb_init failed %d\n", r);
goto init_failed;
}
adev->ip_blocks[i].status.hw = true;
/* right after GMC hw init, we create CSA */
if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) {
r = amdgpu_allocate_static_csa(adev, &adev->virt.csa_obj,
AMDGPU_GEM_DOMAIN_VRAM,
AMDGPU_CSA_SIZE);
if (r) {
DRM_ERROR("allocate CSA failed %d\n", r);
goto init_failed;
}
}
}
}
if (amdgpu_sriov_vf(adev))
amdgpu_virt_init_data_exchange(adev);
r = amdgpu_ib_pool_init(adev);
if (r) {
dev_err(adev->dev, "IB initialization failed (%d).\n", r);
amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_IB_INIT_FAIL, 0, r);
goto init_failed;
}
r = amdgpu_ucode_create_bo(adev); /* create ucode bo when sw_init complete*/
if (r)
goto init_failed;
r = amdgpu_device_ip_hw_init_phase1(adev);
if (r)
goto init_failed;
r = amdgpu_device_fw_loading(adev);
if (r)
goto init_failed;
r = amdgpu_device_ip_hw_init_phase2(adev);
if (r)
goto init_failed;
/*
* retired pages will be loaded from eeprom and reserved here,
* it should be called after amdgpu_device_ip_hw_init_phase2 since
* for some ASICs the RAS EEPROM code relies on SMU fully functioning
* for I2C communication which only true at this point.
*
* amdgpu_ras_recovery_init may fail, but the upper only cares the
* failure from bad gpu situation and stop amdgpu init process
* accordingly. For other failed cases, it will still release all
* the resource and print error message, rather than returning one
* negative value to upper level.
*
* Note: theoretically, this should be called before all vram allocations
* to protect retired page from abusing
*/
r = amdgpu_ras_recovery_init(adev);
if (r)
goto init_failed;
if (adev->gmc.xgmi.num_physical_nodes > 1)
amdgpu_xgmi_add_device(adev);
/* Don't init kfd if whole hive need to be reset during init */
if (!adev->gmc.xgmi.pending_reset)
amdgpu_amdkfd_device_init(adev);
amdgpu_fru_get_product_info(adev);
init_failed:
if (amdgpu_sriov_vf(adev))
amdgpu_virt_release_full_gpu(adev, true);
return r;
}
/**
* amdgpu_device_fill_reset_magic - writes reset magic to gart pointer
*
* @adev: amdgpu_device pointer
*
* Writes a reset magic value to the gart pointer in VRAM. The driver calls
* this function before a GPU reset. If the value is retained after a
* GPU reset, VRAM has not been lost. Some GPU resets may destry VRAM contents.
*/
static void amdgpu_device_fill_reset_magic(struct amdgpu_device *adev)
{
memcpy(adev->reset_magic, adev->gart.ptr, AMDGPU_RESET_MAGIC_NUM);
}
/**
* amdgpu_device_check_vram_lost - check if vram is valid
*
* @adev: amdgpu_device pointer
*
* Checks the reset magic value written to the gart pointer in VRAM.
* The driver calls this after a GPU reset to see if the contents of
* VRAM is lost or now.
* returns true if vram is lost, false if not.
*/
static bool amdgpu_device_check_vram_lost(struct amdgpu_device *adev)
{
if (memcmp(adev->gart.ptr, adev->reset_magic,
AMDGPU_RESET_MAGIC_NUM))
return true;
if (!amdgpu_in_reset(adev))
return false;
/*
* For all ASICs with baco/mode1 reset, the VRAM is
* always assumed to be lost.
*/
switch (amdgpu_asic_reset_method(adev)) {
case AMD_RESET_METHOD_BACO:
case AMD_RESET_METHOD_MODE1:
return true;
default:
return false;
}
}
/**
* amdgpu_device_set_cg_state - set clockgating for amdgpu device
*
* @adev: amdgpu_device pointer
* @state: clockgating state (gate or ungate)
*
* The list of all the hardware IPs that make up the asic is walked and the
* set_clockgating_state callbacks are run.
* Late initialization pass enabling clockgating for hardware IPs.
* Fini or suspend, pass disabling clockgating for hardware IPs.
* Returns 0 on success, negative error code on failure.
*/
int amdgpu_device_set_cg_state(struct amdgpu_device *adev,
enum amd_clockgating_state state)
{
int i, j, r;
if (amdgpu_emu_mode == 1)
return 0;
for (j = 0; j < adev->num_ip_blocks; j++) {
i = state == AMD_CG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
if (!adev->ip_blocks[i].status.late_initialized)
continue;
/* skip CG for GFX on S0ix */
if (adev->in_s0ix &&
adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX)
continue;
/* skip CG for VCE/UVD, it's handled specially */
if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
adev->ip_blocks[i].version->funcs->set_clockgating_state) {
/* enable clockgating to save power */
r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
state);
if (r) {
DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n",
adev->ip_blocks[i].version->funcs->name, r);
return r;
}
}
}
return 0;
}
int amdgpu_device_set_pg_state(struct amdgpu_device *adev,
enum amd_powergating_state state)
{
int i, j, r;
if (amdgpu_emu_mode == 1)
return 0;
for (j = 0; j < adev->num_ip_blocks; j++) {
i = state == AMD_PG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
if (!adev->ip_blocks[i].status.late_initialized)
continue;
/* skip PG for GFX on S0ix */
if (adev->in_s0ix &&
adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX)
continue;
/* skip CG for VCE/UVD, it's handled specially */
if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
adev->ip_blocks[i].version->funcs->set_powergating_state) {
/* enable powergating to save power */
r = adev->ip_blocks[i].version->funcs->set_powergating_state((void *)adev,
state);
if (r) {
DRM_ERROR("set_powergating_state(gate) of IP block <%s> failed %d\n",
adev->ip_blocks[i].version->funcs->name, r);
return r;
}
}
}
return 0;
}
static int amdgpu_device_enable_mgpu_fan_boost(void)
{
struct amdgpu_gpu_instance *gpu_ins;
struct amdgpu_device *adev;
int i, ret = 0;
mutex_lock(&mgpu_info.mutex);
/*
* MGPU fan boost feature should be enabled
* only when there are two or more dGPUs in
* the system
*/
if (mgpu_info.num_dgpu < 2)
goto out;
for (i = 0; i < mgpu_info.num_dgpu; i++) {
gpu_ins = &(mgpu_info.gpu_ins[i]);
adev = gpu_ins->adev;
if (!(adev->flags & AMD_IS_APU) &&
!gpu_ins->mgpu_fan_enabled) {
ret = amdgpu_dpm_enable_mgpu_fan_boost(adev);
if (ret)
break;
gpu_ins->mgpu_fan_enabled = 1;
}
}
out:
mutex_unlock(&mgpu_info.mutex);
return ret;
}
/**
* amdgpu_device_ip_late_init - run late init for hardware IPs
*
* @adev: amdgpu_device pointer
*
* Late initialization pass for hardware IPs. The list of all the hardware
* IPs that make up the asic is walked and the late_init callbacks are run.
* late_init covers any special initialization that an IP requires
* after all of the have been initialized or something that needs to happen
* late in the init process.
* Returns 0 on success, negative error code on failure.
*/
static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
{
struct amdgpu_gpu_instance *gpu_instance;
int i = 0, r;
for (i = 0; i < adev->num_ip_blocks; i++) {
if (!adev->ip_blocks[i].status.hw)
continue;
if (adev->ip_blocks[i].version->funcs->late_init) {
r = adev->ip_blocks[i].version->funcs->late_init((void *)adev);
if (r) {
DRM_ERROR("late_init of IP block <%s> failed %d\n",
adev->ip_blocks[i].version->funcs->name, r);
return r;
}
}
adev->ip_blocks[i].status.late_initialized = true;
}
amdgpu_ras_set_error_query_ready(adev, true);
amdgpu_device_set_cg_state(adev, AMD_CG_STATE_GATE);
amdgpu_device_set_pg_state(adev, AMD_PG_STATE_GATE);
amdgpu_device_fill_reset_magic(adev);
r = amdgpu_device_enable_mgpu_fan_boost();
if (r)
DRM_ERROR("enable mgpu fan boost failed (%d).\n", r);
/* For XGMI + passthrough configuration on arcturus, enable light SBR */
if (adev->asic_type == CHIP_ARCTURUS &&
amdgpu_passthrough(adev) &&
adev->gmc.xgmi.num_physical_nodes > 1)
smu_set_light_sbr(&adev->smu, true);
if (adev->gmc.xgmi.num_physical_nodes > 1) {
mutex_lock(&mgpu_info.mutex);
/*
* Reset device p-state to low as this was booted with high.
*
* This should be performed only after all devices from the same
* hive get initialized.
*
* However, it's unknown how many device in the hive in advance.
* As this is counted one by one during devices initializations.
*
* So, we wait for all XGMI interlinked devices initialized.
* This may bring some delays as those devices may come from
* different hives. But that should be OK.
*/
if (mgpu_info.num_dgpu == adev->gmc.xgmi.num_physical_nodes) {
for (i = 0; i < mgpu_info.num_gpu; i++) {
gpu_instance = &(mgpu_info.gpu_ins[i]);
if (gpu_instance->adev->flags & AMD_IS_APU)
continue;
r = amdgpu_xgmi_set_pstate(gpu_instance->adev,
AMDGPU_XGMI_PSTATE_MIN);
if (r) {
DRM_ERROR("pstate setting failed (%d).\n", r);
break;
}
}
}
mutex_unlock(&mgpu_info.mutex);
}
return 0;
}
static int amdgpu_device_ip_fini_early(struct amdgpu_device *adev)
{
int i, r;
for (i = 0; i < adev->num_ip_blocks; i++) {
if (!adev->ip_blocks[i].version->funcs->early_fini)
continue;
r = adev->ip_blocks[i].version->funcs->early_fini((void *)adev);
if (r) {
DRM_DEBUG("early_fini of IP block <%s> failed %d\n",
adev->ip_blocks[i].version->funcs->name, r);
}
}
amdgpu_amdkfd_suspend(adev, false);
amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
/* need to disable SMC first */
for (i = 0; i < adev->num_ip_blocks; i++) {
if (!adev->ip_blocks[i].status.hw)
continue;
if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
/* XXX handle errors */
if (r) {
DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
adev->ip_blocks[i].version->funcs->name, r);
}
adev->ip_blocks[i].status.hw = false;
break;
}
}
for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
if (!adev->ip_blocks[i].status.hw)
continue;
r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
/* XXX handle errors */
if (r) {
DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
adev->ip_blocks[i].version->funcs->name, r);
}
adev->ip_blocks[i].status.hw = false;
}
if (amdgpu_sriov_vf(adev)) {
if (amdgpu_virt_release_full_gpu(adev, false))
DRM_ERROR("failed to release exclusive mode on fini\n");
}
return 0;
}
/**
* amdgpu_device_ip_fini - run fini for hardware IPs
*
* @adev: amdgpu_device pointer
*
* Main teardown pass for hardware IPs. The list of all the hardware
* IPs that make up the asic is walked and the hw_fini and sw_fini callbacks
* are run. hw_fini tears down the hardware associated with each IP
* and sw_fini tears down any software state associated with each IP.
* Returns 0 on success, negative error code on failure.
*/
static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
{
int i, r;
if (amdgpu_sriov_vf(adev) && adev->virt.ras_init_done)
amdgpu_virt_release_ras_err_handler_data(adev);
amdgpu_ras_pre_fini(adev);
if (adev->gmc.xgmi.num_physical_nodes > 1)
amdgpu_xgmi_remove_device(adev);
amdgpu_amdkfd_device_fini_sw(adev);
for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
if (!adev->ip_blocks[i].status.sw)
continue;
if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
amdgpu_ucode_free_bo(adev);
amdgpu_free_static_csa(&adev->virt.csa_obj);
amdgpu_device_wb_fini(adev);
amdgpu_device_vram_scratch_fini(adev);
amdgpu_ib_pool_fini(adev);
}
r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev);
/* XXX handle errors */
if (r) {
DRM_DEBUG("sw_fini of IP block <%s> failed %d\n",
adev->ip_blocks[i].version->funcs->name, r);
}
adev->ip_blocks[i].status.sw = false;
adev->ip_blocks[i].status.valid = false;
}
for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
if (!adev->ip_blocks[i].status.late_initialized)
continue;
if (adev->ip_blocks[i].version->funcs->late_fini)
adev->ip_blocks[i].version->funcs->late_fini((void *)adev);
adev->ip_blocks[i].status.late_initialized = false;
}
amdgpu_ras_fini(adev);
return 0;
}
/**
* amdgpu_device_delayed_init_work_handler - work handler for IB tests
*
* @work: work_struct.
*/
static void amdgpu_device_delayed_init_work_handler(struct work_struct *work)
{
struct amdgpu_device *adev =
container_of(work, struct amdgpu_device, delayed_init_work.work);
int r;
r = amdgpu_ib_ring_tests(adev);
if (r)
DRM_ERROR("ib ring test failed (%d).\n", r);
}
static void amdgpu_device_delay_enable_gfx_off(struct work_struct *work)
{
struct amdgpu_device *adev =
container_of(work, struct amdgpu_device, gfx.gfx_off_delay_work.work);
WARN_ON_ONCE(adev->gfx.gfx_off_state);
WARN_ON_ONCE(adev->gfx.gfx_off_req_count);
if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, true))
adev->gfx.gfx_off_state = true;
}
/**
* amdgpu_device_ip_suspend_phase1 - run suspend for hardware IPs (phase 1)
*
* @adev: amdgpu_device pointer
*
* Main suspend function for hardware IPs. The list of all the hardware
* IPs that make up the asic is walked, clockgating is disabled and the
* suspend callbacks are run. suspend puts the hardware and software state
* in each IP into a state suitable for suspend.
* Returns 0 on success, negative error code on failure.
*/
static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev)
{
int i, r;
amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
if (!adev->ip_blocks[i].status.valid)
continue;
/* displays are handled separately */
if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_DCE)
continue;
/* XXX handle errors */
r = adev->ip_blocks[i].version->funcs->suspend(adev);
/* XXX handle errors */
if (r) {
DRM_ERROR("suspend of IP block <%s> failed %d\n",
adev->ip_blocks[i].version->funcs->name, r);
return r;
}
adev->ip_blocks[i].status.hw = false;
}
return 0;
}
/**
* amdgpu_device_ip_suspend_phase2 - run suspend for hardware IPs (phase 2)
*
* @adev: amdgpu_device pointer
*
* Main suspend function for hardware IPs. The list of all the hardware
* IPs that make up the asic is walked, clockgating is disabled and the
* suspend callbacks are run. suspend puts the hardware and software state
* in each IP into a state suitable for suspend.
* Returns 0 on success, negative error code on failure.
*/
static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
{
int i, r;
if (adev->in_s0ix)
amdgpu_gfx_state_change_set(adev, sGpuChangeState_D3Entry);
for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
if (!adev->ip_blocks[i].status.valid)
continue;
/* displays are handled in phase1 */
if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE)
continue;
/* PSP lost connection when err_event_athub occurs */
if (amdgpu_ras_intr_triggered() &&
adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
adev->ip_blocks[i].status.hw = false;
continue;
}
/* skip unnecessary suspend if we do not initialize them yet */
if (adev->gmc.xgmi.pending_reset &&
!(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC ||
adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH)) {
adev->ip_blocks[i].status.hw = false;
continue;
}
/* skip suspend of gfx and psp for S0ix
* gfx is in gfxoff state, so on resume it will exit gfxoff just
* like at runtime. PSP is also part of the always on hardware
* so no need to suspend it.
*/
if (adev->in_s0ix &&
(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP ||
adev