diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2013-07-18 14:01:08 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-07-18 14:01:08 -0700 |
commit | 0a693ab6b6b2af0a230e98323023acd3678b0f81 (patch) | |
tree | 9ffd73de7e6d342a106b1414dacef8115a3e209d /drivers | |
parent | 7a62711aacda8887d94c40daa199b37abb1d54e1 (diff) | |
parent | 3668f0df6e62cd989909f40669bbe585e8dd51ae (diff) |
Merge branch 'drm-fixes' of git://people.freedesktop.org/~airlied/linux
Pull drm fixes from Dave Airlie:
"You'll be terribly disappointed in this, I'm not trying to sneak any
features in or anything, its mostly radeon and intel fixes, a couple
of ARM driver fixes"
* 'drm-fixes' of git://people.freedesktop.org/~airlied/linux: (34 commits)
drm/radeon/dpm: add debugfs support for RS780/RS880 (v3)
drm/radeon/dpm/atom: fix broken gcc harder
drm/radeon/dpm/atom: restructure logic to work around a compiler bug
drm/radeon/dpm: fix atom vram table parsing
drm/radeon: fix an endian bug in atom table parsing
drm/radeon: add a module parameter to disable aspm
drm/rcar-du: Use the GEM PRIME helpers
drm/shmobile: Use the GEM PRIME helpers
uvesafb: Really allow mtrr being 0, as documented and warn()ed
radeon kms: do not flush uninitialized hotplug work
drm/radeon/dpm/sumo: handle boost states properly when forcing a perf level
drm/radeon: align VM PTBs (Page Table Blocks) to 32K
drm/radeon: allow selection of alignment in the sub-allocator
drm/radeon: never unpin UVD bo v3
drm/radeon: fix UVD fence emit
drm/radeon: add fault decode function for CIK
drm/radeon: add fault decode function for SI (v2)
drm/radeon: add fault decode function for cayman/TN (v2)
drm/radeon: use radeon device for request firmware
drm/radeon: add missing ttm_eu_backoff_reservation to radeon_bo_list_validate
...
Diffstat (limited to 'drivers')
39 files changed, 918 insertions, 277 deletions
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 062cbda1bf4..f4af1ca0fb6 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -123,10 +123,10 @@ module_param_named(preliminary_hw_support, i915_preliminary_hw_support, int, 060 MODULE_PARM_DESC(preliminary_hw_support, "Enable preliminary hardware support. (default: false)"); -int i915_disable_power_well __read_mostly = 0; +int i915_disable_power_well __read_mostly = 1; module_param_named(disable_power_well, i915_disable_power_well, int, 0600); MODULE_PARM_DESC(disable_power_well, - "Disable the power well when possible (default: false)"); + "Disable the power well when possible (default: true)"); int i915_enable_ips __read_mostly = 1; module_param_named(enable_ips, i915_enable_ips, int, 0600); diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 4200c32407e..97afd2639fb 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1880,6 +1880,10 @@ i915_gem_object_move_to_active(struct drm_i915_gem_object *obj, u32 seqno = intel_ring_get_seqno(ring); BUG_ON(ring == NULL); + if (obj->ring != ring && obj->last_write_seqno) { + /* Keep the seqno relative to the current ring */ + obj->last_write_seqno = seqno; + } obj->ring = ring; /* Add a reference if we're newly entering the active list. */ @@ -2653,7 +2657,6 @@ static void i965_write_fence_reg(struct drm_device *dev, int reg, drm_i915_private_t *dev_priv = dev->dev_private; int fence_reg; int fence_pitch_shift; - uint64_t val; if (INTEL_INFO(dev)->gen >= 6) { fence_reg = FENCE_REG_SANDYBRIDGE_0; @@ -2663,8 +2666,23 @@ static void i965_write_fence_reg(struct drm_device *dev, int reg, fence_pitch_shift = I965_FENCE_PITCH_SHIFT; } + fence_reg += reg * 8; + + /* To w/a incoherency with non-atomic 64-bit register updates, + * we split the 64-bit update into two 32-bit writes. In order + * for a partial fence not to be evaluated between writes, we + * precede the update with write to turn off the fence register, + * and only enable the fence as the last step. + * + * For extra levels of paranoia, we make sure each step lands + * before applying the next step. + */ + I915_WRITE(fence_reg, 0); + POSTING_READ(fence_reg); + if (obj) { u32 size = obj->gtt_space->size; + uint64_t val; val = (uint64_t)((obj->gtt_offset + size - 4096) & 0xfffff000) << 32; @@ -2673,12 +2691,16 @@ static void i965_write_fence_reg(struct drm_device *dev, int reg, if (obj->tiling_mode == I915_TILING_Y) val |= 1 << I965_FENCE_TILING_Y_SHIFT; val |= I965_FENCE_REG_VALID; - } else - val = 0; - fence_reg += reg * 8; - I915_WRITE64(fence_reg, val); - POSTING_READ(fence_reg); + I915_WRITE(fence_reg + 4, val >> 32); + POSTING_READ(fence_reg + 4); + + I915_WRITE(fence_reg + 0, val); + POSTING_READ(fence_reg); + } else { + I915_WRITE(fence_reg + 4, 0); + POSTING_READ(fence_reg + 4); + } } static void i915_write_fence_reg(struct drm_device *dev, int reg, @@ -2796,56 +2818,17 @@ static inline int fence_number(struct drm_i915_private *dev_priv, return fence - dev_priv->fence_regs; } -struct write_fence { - struct drm_device *dev; - struct drm_i915_gem_object *obj; - int fence; -}; - -static void i915_gem_write_fence__ipi(void *data) -{ - struct write_fence *args = data; - - /* Required for SNB+ with LLC */ - wbinvd(); - - /* Required for VLV */ - i915_gem_write_fence(args->dev, args->fence, args->obj); -} - static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj, struct drm_i915_fence_reg *fence, bool enable) { struct drm_i915_private *dev_priv = obj->base.dev->dev_private; - struct write_fence args = { - .dev = obj->base.dev, - .fence = fence_number(dev_priv, fence), - .obj = enable ? obj : NULL, - }; - - /* In order to fully serialize access to the fenced region and - * the update to the fence register we need to take extreme - * measures on SNB+. In theory, the write to the fence register - * flushes all memory transactions before, and coupled with the - * mb() placed around the register write we serialise all memory - * operations with respect to the changes in the tiler. Yet, on - * SNB+ we need to take a step further and emit an explicit wbinvd() - * on each processor in order to manually flush all memory - * transactions before updating the fence register. - * - * However, Valleyview complicates matter. There the wbinvd is - * insufficient and unlike SNB/IVB requires the serialising - * register write. (Note that that register write by itself is - * conversely not sufficient for SNB+.) To compromise, we do both. - */ - if (INTEL_INFO(args.dev)->gen >= 6) - on_each_cpu(i915_gem_write_fence__ipi, &args, 1); - else - i915_gem_write_fence(args.dev, args.fence, args.obj); + int reg = fence_number(dev_priv, fence); + + i915_gem_write_fence(obj->base.dev, reg, enable ? obj : NULL); if (enable) { - obj->fence_reg = args.fence; + obj->fence_reg = reg; fence->obj = obj; list_move_tail(&fence->lru_list, &dev_priv->mm.fence_list); } else { @@ -4611,7 +4594,7 @@ i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc) list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) if (obj->pages_pin_count == 0) cnt += obj->base.size >> PAGE_SHIFT; - list_for_each_entry(obj, &dev_priv->mm.inactive_list, global_list) + list_for_each_entry(obj, &dev_priv->mm.inactive_list, mm_list) if (obj->pin_count == 0 && obj->pages_pin_count == 0) cnt += obj->base.size >> PAGE_SHIFT; diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index b7397123401..26e162bb3a5 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -75,7 +75,12 @@ intel_dp_max_link_bw(struct intel_dp *intel_dp) case DP_LINK_BW_1_62: case DP_LINK_BW_2_7: break; + case DP_LINK_BW_5_4: /* 1.2 capable displays may advertise higher bw */ + max_link_bw = DP_LINK_BW_2_7; + break; default: + WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n", + max_link_bw); max_link_bw = DP_LINK_BW_1_62; break; } diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index ccbdd83f522..d10e6735771 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -5500,9 +5500,38 @@ void intel_gt_init(struct drm_device *dev) if (IS_VALLEYVIEW(dev)) { dev_priv->gt.force_wake_get = vlv_force_wake_get; dev_priv->gt.force_wake_put = vlv_force_wake_put; - } else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) { + } else if (IS_HASWELL(dev)) { dev_priv->gt.force_wake_get = __gen6_gt_force_wake_mt_get; dev_priv->gt.force_wake_put = __gen6_gt_force_wake_mt_put; + } else if (IS_IVYBRIDGE(dev)) { + u32 ecobus; + + /* IVB configs may use multi-threaded forcewake */ + + /* A small trick here - if the bios hasn't configured + * MT forcewake, and if the device is in RC6, then + * force_wake_mt_get will not wake the device and the + * ECOBUS read will return zero. Which will be + * (correctly) interpreted by the test below as MT + * forcewake being disabled. + */ + mutex_lock(&dev->struct_mutex); + __gen6_gt_force_wake_mt_get(dev_priv); + ecobus = I915_READ_NOTRACE(ECOBUS); + __gen6_gt_force_wake_mt_put(dev_priv); + mutex_unlock(&dev->struct_mutex); + + if (ecobus & FORCEWAKE_MT_ENABLE) { + dev_priv->gt.force_wake_get = + __gen6_gt_force_wake_mt_get; + dev_priv->gt.force_wake_put = + __gen6_gt_force_wake_mt_put; + } else { + DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n"); + DRM_INFO("when using vblank-synced partial screen updates.\n"); + dev_priv->gt.force_wake_get = __gen6_gt_force_wake_get; + dev_priv->gt.force_wake_put = __gen6_gt_force_wake_put; + } } else if (IS_GEN6(dev)) { dev_priv->gt.force_wake_get = __gen6_gt_force_wake_get; dev_priv->gt.force_wake_put = __gen6_gt_force_wake_put; diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index e51ab552046..664118d8c1d 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -379,6 +379,17 @@ u32 intel_ring_get_active_head(struct intel_ring_buffer *ring) return I915_READ(acthd_reg); } +static void ring_setup_phys_status_page(struct intel_ring_buffer *ring) +{ + struct drm_i915_private *dev_priv = ring->dev->dev_private; + u32 addr; + + addr = dev_priv->status_page_dmah->busaddr; + if (INTEL_INFO(ring->dev)->gen >= 4) + addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0; + I915_WRITE(HWS_PGA, addr); +} + static int init_ring_common(struct intel_ring_buffer *ring) { struct drm_device *dev = ring->dev; @@ -390,6 +401,11 @@ static int init_ring_common(struct intel_ring_buffer *ring) if (HAS_FORCE_WAKE(dev)) gen6_gt_force_wake_get(dev_priv); + if (I915_NEED_GFX_HWS(dev)) + intel_ring_setup_status_page(ring); + else + ring_setup_phys_status_page(ring); + /* Stop the ring if it's running. */ I915_WRITE_CTL(ring, 0); I915_WRITE_HEAD(ring, 0); @@ -518,9 +534,6 @@ cleanup_pipe_control(struct intel_ring_buffer *ring) struct pipe_control *pc = ring->private; struct drm_i915_gem_object *obj; - if (!ring->private) - return; - obj = pc->obj; kunmap(sg_page(obj->pages->sgl)); @@ -528,7 +541,6 @@ cleanup_pipe_control(struct intel_ring_buffer *ring) drm_gem_object_unreference(&obj->base); kfree(pc); - ring->private = NULL; } static int init_render_ring(struct intel_ring_buffer *ring) @@ -601,7 +613,10 @@ static void render_ring_cleanup(struct intel_ring_buffer *ring) if (HAS_BROKEN_CS_TLB(dev)) drm_gem_object_unreference(to_gem_object(ring->private)); - cleanup_pipe_control(ring); + if (INTEL_INFO(dev)->gen >= 5) + cleanup_pipe_control(ring); + + ring->private = NULL; } static void @@ -1223,7 +1238,6 @@ static int init_status_page(struct intel_ring_buffer *ring) ring->status_page.obj = obj; memset(ring->status_page.page_addr, 0, PAGE_SIZE); - intel_ring_setup_status_page(ring); DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n", ring->name, ring->status_page.gfx_addr); @@ -1237,10 +1251,9 @@ err: return ret; } -static int init_phys_hws_pga(struct intel_ring_buffer *ring) +static int init_phys_status_page(struct intel_ring_buffer *ring) { struct drm_i915_private *dev_priv = ring->dev->dev_private; - u32 addr; if (!dev_priv->status_page_dmah) { dev_priv->status_page_dmah = @@ -1249,11 +1262,6 @@ static int init_phys_hws_pga(struct intel_ring_buffer *ring) return -ENOMEM; } - addr = dev_priv->status_page_dmah->busaddr; - if (INTEL_INFO(ring->dev)->gen >= 4) - addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0; - I915_WRITE(HWS_PGA, addr); - ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr; memset(ring->status_page.page_addr, 0, PAGE_SIZE); @@ -1281,7 +1289,7 @@ static int intel_init_ring_buffer(struct drm_device *dev, return ret; } else { BUG_ON(ring->id != RCS); - ret = init_phys_hws_pga(ring); + ret = init_phys_status_page(ring); if (ret) return ret; } @@ -1893,7 +1901,7 @@ int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size) } if (!I915_NEED_GFX_HWS(dev)) { - ret = init_phys_hws_pga(ring); + ret = init_phys_status_page(ring); if (ret) return ret; } diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c index ed1d9102592..6dacec4e209 100644 --- a/drivers/gpu/drm/radeon/cik.c +++ b/drivers/gpu/drm/radeon/cik.c @@ -22,7 +22,6 @@ * Authors: Alex Deucher */ #include <linux/firmware.h> -#include <linux/platform_device.h> #include <linux/slab.h> #include <linux/module.h> #include "drmP.h" @@ -742,7 +741,6 @@ static int ci_mc_load_microcode(struct radeon_device *rdev) */ static int cik_init_microcode(struct radeon_device *rdev) { - struct platform_device *pdev; const char *chip_name; size_t pfp_req_size, me_req_size, ce_req_size, mec_req_size, rlc_req_size, mc_req_size, @@ -752,13 +750,6 @@ static int cik_init_microcode(struct radeon_device *rdev) DRM_DEBUG("\n"); - pdev = platform_device_register_simple("radeon_cp", 0, NULL, 0); - err = IS_ERR(pdev); - if (err) { - printk(KERN_ERR "radeon_cp: Failed to register firmware\n"); - return -EINVAL; - } - switch (rdev->family) { case CHIP_BONAIRE: chip_name = "BONAIRE"; @@ -794,7 +785,7 @@ static int cik_init_microcode(struct radeon_device *rdev) DRM_INFO("Loading %s Microcode\n", chip_name); snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name); - err = request_firmware(&rdev->pfp_fw, fw_name, &pdev->dev); + err = request_firmware(&rdev->pfp_fw, fw_name, rdev->dev); if (err) goto out; if (rdev->pfp_fw->size != pfp_req_size) { @@ -806,7 +797,7 @@ static int cik_init_microcode(struct radeon_device *rdev) } snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name); - err = request_firmware(&rdev->me_fw, fw_name, &pdev->dev); + err = request_firmware(&rdev->me_fw, fw_name, rdev->dev); if (err) goto out; if (rdev->me_fw->size != me_req_size) { @@ -817,7 +808,7 @@ static int cik_init_microcode(struct radeon_device *rdev) } snprintf(fw_name, sizeof(fw_name), "radeon/%s_ce.bin", chip_name); - err = request_firmware(&rdev->ce_fw, fw_name, &pdev->dev); + err = request_firmware(&rdev->ce_fw, fw_name, rdev->dev); if (err) goto out; if (rdev->ce_fw->size != ce_req_size) { @@ -828,7 +819,7 @@ static int cik_init_microcode(struct radeon_device *rdev) } snprintf(fw_name, sizeof(fw_name), "radeon/%s_mec.bin", chip_name); - err = request_firmware(&rdev->mec_fw, fw_name, &pdev->dev); + err = request_firmware(&rdev->mec_fw, fw_name, rdev->dev); if (err) goto out; if (rdev->mec_fw->size != mec_req_size) { @@ -839,7 +830,7 @@ static int cik_init_microcode(struct radeon_device *rdev) } snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", chip_name); - err = request_firmware(&rdev->rlc_fw, fw_name, &pdev->dev); + err = request_firmware(&rdev->rlc_fw, fw_name, rdev->dev); if (err) goto out; if (rdev->rlc_fw->size != rlc_req_size) { @@ -850,7 +841,7 @@ static int cik_init_microcode(struct radeon_device *rdev) } snprintf(fw_name, sizeof(fw_name), "radeon/%s_sdma.bin", chip_name); - err = request_firmware(&rdev->sdma_fw, fw_name, &pdev->dev); + err = request_firmware(&rdev->sdma_fw, fw_name, rdev->dev); if (err) goto out; if (rdev->sdma_fw->size != sdma_req_size) { @@ -863,7 +854,7 @@ static int cik_init_microcode(struct radeon_device *rdev) /* No MC ucode on APUs */ if (!(rdev->flags & RADEON_IS_IGP)) { snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name); - err = request_firmware(&rdev->mc_fw, fw_name, &pdev->dev); + err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev); if (err) goto out; if (rdev->mc_fw->size != mc_req_size) { @@ -875,8 +866,6 @@ static int cik_init_microcode(struct radeon_device *rdev) } out: - platform_device_unregister(pdev); - if (err) { if (err != -EINVAL) printk(KERN_ERR @@ -4453,6 +4442,29 @@ void cik_vm_fini(struct radeon_device *rdev) } /** + * cik_vm_decode_fault - print human readable fault info + * + * @rdev: radeon_device pointer + * @status: VM_CONTEXT1_PROTECTION_FAULT_STATUS register value + * @addr: VM_CONTEXT1_PROTECTION_FAULT_ADDR register value + * + * Print human readable fault information (CIK). + */ +static void cik_vm_decode_fault(struct radeon_device *rdev, + u32 status, u32 addr, u32 mc_client) +{ + u32 mc_id = (status & MEMORY_CLIENT_ID_MASK) >> MEMORY_CLIENT_ID_SHIFT; + u32 vmid = (status & FAULT_VMID_MASK) >> FAULT_VMID_SHIFT; + u32 protections = (status & PROTECTIONS_MASK) >> PROTECTIONS_SHIFT; + char *block = (char *)&mc_client; + + printk("VM fault (0x%02x, vmid %d) at page %u, %s from %s (%d)\n", + protections, vmid, addr, + (status & MEMORY_CLIENT_RW_MASK) ? "write" : "read", + block, mc_id); +} + +/** * cik_vm_flush - cik vm flush using the CP * * @rdev: radeon_device pointer @@ -5507,6 +5519,7 @@ int cik_irq_process(struct radeon_device *rdev) u32 ring_index; bool queue_hotplug = false; bool queue_reset = false; + u32 addr, status, mc_client; if (!rdev->ih.enabled || rdev->shutdown) return IRQ_NONE; @@ -5742,11 +5755,15 @@ restart_ih: break; case 146: case 147: + addr = RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR); + status = RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS); + mc_client = RREG32(VM_CONTEXT1_PROTECTION_FAULT_MCCLIENT); dev_err(rdev->dev, "GPU fault detected: %d 0x%08x\n", src_id, src_data); dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n", - RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR)); + addr); dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n", - RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS)); + status); + cik_vm_decode_fault(rdev, status, addr, mc_client); /* reset addr and status */ WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1); break; @@ -6961,7 +6978,7 @@ int cik_uvd_resume(struct radeon_device *rdev) /* programm the VCPU memory controller bits 0-27 */ addr = rdev->uvd.gpu_addr >> 3; - size = RADEON_GPU_PAGE_ALIGN(rdev->uvd_fw->size + 4) >> 3; + size = RADEON_GPU_PAGE_ALIGN(rdev->uvd.fw_size + 4) >> 3; WREG32(UVD_VCPU_CACHE_OFFSET0, addr); WREG32(UVD_VCPU_CACHE_SIZE0, size); diff --git a/drivers/gpu/drm/radeon/cikd.h b/drivers/gpu/drm/radeon/cikd.h index 63514b95889..7e9275eaef8 100644 --- a/drivers/gpu/drm/radeon/cikd.h +++ b/drivers/gpu/drm/radeon/cikd.h @@ -136,6 +136,22 @@ #define VM_INVALIDATE_RESPONSE 0x147c #define VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x14DC +#define PROTECTIONS_MASK (0xf << 0) +#define PROTECTIONS_SHIFT 0 + /* bit 0: range + * bit 1: pde0 + * bit 2: valid + * bit 3: read + * bit 4: write + */ +#define MEMORY_CLIENT_ID_MASK (0xff << 12) +#define MEMORY_CLIENT_ID_SHIFT 12 +#define MEMORY_CLIENT_RW_MASK (1 << 24) +#define MEMORY_CLIENT_RW_SHIFT 24 +#define FAULT_VMID_MASK (0xf << 25) +#define FAULT_VMID_SHIFT 25 + +#define VM_CONTEXT1_PROTECTION_FAULT_MCCLIENT 0x14E4 #define VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x14FC diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index e49059dc9b8..038dcac7670 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c @@ -139,6 +139,8 @@ void evergreen_pcie_gen2_enable(struct radeon_device *rdev); void evergreen_program_aspm(struct radeon_device *rdev); extern void cayman_cp_int_cntl_setup(struct radeon_device *rdev, int ring, u32 cp_int_cntl); +extern void cayman_vm_decode_fault(struct radeon_device *rdev, + u32 status, u32 addr); static const u32 evergreen_golden_registers[] = { @@ -4586,6 +4588,7 @@ int evergreen_irq_process(struct radeon_device *rdev) bool queue_hotplug = false; bool queue_hdmi = false; bool queue_thermal = false; + u32 status, addr; if (!rdev->ih.enabled || rdev->shutdown) return IRQ_NONE; @@ -4872,11 +4875,14 @@ restart_ih: break; case 146: case 147: + addr = RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR); + status = RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS); dev_err(rdev->dev, "GPU fault detected: %d 0x%08x\n", src_id, src_data); dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n", - RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR)); + addr); dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n", - RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS)); + status); + cayman_vm_decode_fault(rdev, status, addr); /* reset addr and status */ WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1); break; @@ -5509,6 +5515,9 @@ void evergreen_program_aspm(struct radeon_device *rdev) */ bool fusion_platform = false; + if (radeon_aspm == 0) + return; + if (!(rdev->flags & RADEON_IS_PCIE)) return; diff --git a/drivers/gpu/drm/radeon/evergreen_hdmi.c b/drivers/gpu/drm/radeon/evergreen_hdmi.c index b9c6f7675e5..b0d3fb34141 100644 --- a/drivers/gpu/drm/radeon/evergreen_hdmi.c +++ b/drivers/gpu/drm/radeon/evergreen_hdmi.c @@ -177,6 +177,9 @@ void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode uint32_t offset; ssize_t err; + if (!dig || !dig->afmt) + return; + /* Silent, r600_hdmi_enable will raise WARN for us */ if (!dig->afmt->enabled) return; @@ -280,6 +283,9 @@ void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable) struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; + if (!dig || !dig->afmt) + return; + /* Silent, r600_hdmi_enable will raise WARN for us */ if (enable && dig->afmt->enabled) return; diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c index f30127cb30e..56bd4f3be4f 100644 --- a/drivers/gpu/drm/radeon/ni.c +++ b/drivers/gpu/drm/radeon/ni.c @@ -22,7 +22,6 @@ * Authors: Alex Deucher */ #include <linux/firmware.h> -#include <linux/platform_device.h> #include <linux/slab.h> #include <linux/module.h> #include <drm/drmP.h> @@ -684,7 +683,6 @@ int ni_mc_load_microcode(struct radeon_device *rdev) int ni_init_microcode(struct radeon_device *rdev) { - struct platform_device *pdev; const char *chip_name; const char *rlc_chip_name; size_t pfp_req_size, me_req_size, rlc_req_size, mc_req_size; @@ -694,13 +692,6 @@ int ni_init_microcode(struct radeon_device *rdev) DRM_DEBUG("\n"); - pdev = platform_device_register_simple("radeon_cp", 0, NULL, 0); - err = IS_ERR(pdev); - if (err) { - printk(KERN_ERR "radeon_cp: Failed to register firmware\n"); - return -EINVAL; - } - switch (rdev->family) { case CHIP_BARTS: chip_name = "BARTS"; @@ -753,7 +744,7 @@ int ni_init_microcode(struct radeon_device *rdev) DRM_INFO("Loading %s Microcode\n", chip_name); snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name); - err = request_firmware(&rdev->pfp_fw, fw_name, &pdev->dev); + err = request_firmware(&rdev->pfp_fw, fw_name, rdev->dev); if (err) goto out; if (rdev->pfp_fw->size != pfp_req_size) { @@ -765,7 +756,7 @@ int ni_init_microcode(struct radeon_device *rdev) } snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name); - err = request_firmware(&rdev->me_fw, fw_name, &pdev->dev); + err = request_firmware(&rdev->me_fw, fw_name, rdev->dev); if (err) goto out; if (rdev->me_fw->size != me_req_size) { @@ -776,7 +767,7 @@ int ni_init_microcode(struct radeon_device *rdev) } snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", rlc_chip_name); - err = request_firmware(&rdev->rlc_fw, fw_name, &pdev->dev); + err = request_firmware(&rdev->rlc_fw, fw_name, rdev->dev); if (err) goto out; if (rdev->rlc_fw->size != rlc_req_size) { @@ -789,7 +780,7 @@ int ni_init_microcode(struct radeon_device *rdev) /* no MC ucode on TN */ if (!(rdev->flags & RADEON_IS_IGP)) { snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name); - err = request_firmware(&rdev->mc_fw, fw_name, &pdev->dev); + err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev); if (err) goto out; if (rdev->mc_fw->size != mc_req_size) { @@ -802,7 +793,7 @@ int ni_init_microcode(struct radeon_device *rdev) if ((rdev->family >= CHIP_BARTS) && (rdev->family <= CHIP_CAYMAN)) { snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name); - err = request_firmware(&rdev->smc_fw, fw_name, &pdev->dev); + err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev); if (err) goto out; if (rdev->smc_fw->size != smc_req_size) { @@ -814,8 +805,6 @@ int ni_init_microcode(struct radeon_device *rdev) } out: - platform_device_unregister(pdev); - if (err) { if (err != -EINVAL) printk(KERN_ERR @@ -2461,6 +2450,167 @@ void cayman_vm_fini(struct radeon_device *rdev) { } +/** + * cayman_vm_decode_fault - print human readable fault info + * + * @rdev: radeon_device pointer + * @status: VM_CONTEXT1_PROTECTION_FAULT_STATUS register value + * @addr: VM_CONTEXT1_PROTECTION_FAULT_ADDR register value + * + * Print human readable fault information (cayman/TN). + */ +void cayman_vm_decode_fault(struct radeon_device *rdev, + u32 status, u32 addr) +{ + u32 mc_id = (status & MEMORY_CLIENT_ID_MASK) >> MEMORY_CLIENT_ID_SHIFT; + u32 vmid = (status & FAULT_VMID_MASK) >> FAULT_VMID_SHIFT; + u32 protections = (status & PROTECTIONS_MASK) >> PROTECTIONS_SHIFT; + char *block; + + switch (mc_id) { + case 32: + case 16: + case 96: + case 80: + case 160: + case 144: + case 224: + case 208: + block = "CB"; + break; + case 33: + case 17: + case 97: + case 81: + case 161: + case 145: + case 225: + case 209: + block = "CB_FMASK"; + break; + case 34: + case 18: + case 98: + case 82: + case 162: + case 146: + case 226: + case 210: + block = "CB_CMASK"; + break; + case 35: + case 19: + case 99: + case 83: + case 163: + case 147: + case 227: + case 211: + block = "CB_IMMED"; + break; + case 36: + case 20: + case 100: + case 84: + case 164: + case 148: + case 228: + case 212: + block = "DB"; + break; + case 37: + case 21: + case 101: + case 85: + case 165: + case 149: + case 229: + case 213: + block = "DB_HTILE"; + break; + case 38: + case 22: + case 102: + case 86: + case 166: + case 150: + case 230: + case 214: + block = "SX"; + break; + case 39: + case 23: + case 103: + case 87: + case 167: + case 151: + case 231: + case 215: + block = "DB_STEN"; + break; + case 40: + case 24: + case 104: + case 88: + case 232: + case 216: + case 168: + case 152: + block = "TC_TFETCH"; + break; + case 41: + case 25: + case 105: + case 89: + case 233: + case 217: + case 169: + case 153: + block = "TC_VFETCH"; + break; + case 42: + case 26: + case 106: + case 90: + case 234: + case 218: + case 170: + case 154: + block = "VC"; + break; + case 112: + block = "CP"; + break; + case 113: + case 114: + block = "SH"; + break; + case 115: + block = "VGT"; + break; + case 178: + block = "IH"; + break; + case 51: + block = "RLC"; + break; + case 55: + block = "DMA"; + break; + case 56: + block = "HDP"; + break; + default: + block = "unknown"; + break; + } + + printk("VM fault (0x%02x, vmid %d) at page %u, %s from %s (%d)\n", + protections, vmid, addr, + (status & MEMORY_CLIENT_RW_MASK) ? "write" : "read", + block, mc_id); +} + #define R600_ENTRY_VALID (1 << 0) #define R600_PTE_SYSTEM (1 << 1) #define R600_PTE_SNOOPED (1 << 2) diff --git a/drivers/gpu/drm/radeon/nid.h b/drivers/gpu/drm/radeon/nid.h index fe24a93542e..22421bc80c0 100644 --- a/drivers/gpu/drm/radeon/nid.h +++ b/drivers/gpu/drm/radeon/nid.h @@ -133,6 +133,22 @@ #define VM_CONTEXT1_CNTL2 0x1434 #define VM_INVALIDATE_REQUEST 0x1478 #define VM_INVALIDATE_RESPONSE 0x147c +#define VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x14FC +#define VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x14DC +#define PROTECTIONS_MASK (0xf << 0) +#define PROTECTIONS_SHIFT 0 + /* bit 0: range + * bit 2: pde0 + * bit 3: valid + * bit 4: read + * bit 5: write + */ +#define MEMORY_CLIENT_ID_MASK (0xff << 12) +#define MEMORY_CLIENT_ID_SHIFT 12 +#define MEMORY_CLIENT_RW_MASK (1 << 24) +#define MEMORY_CLIENT_RW_SHIFT 24 +#define FAULT_VMID_MASK (0x7 << 25) +#define FAULT_VMID_SHIFT 25 #define VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR 0x1518 #define VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR 0x151c #define VM_CONTEXT0_PAGE_TABLE_BASE_ADDR 0x153C diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index c9affefd79f..75349cdaa84 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c @@ -39,7 +39,6 @@ #include "atom.h" #include <linux/firmware.h> -#include <linux/platform_device.h> #include <linux/module.h> #include "r100_reg_safe.h" @@ -989,18 +988,11 @@ void r100_ring_start(struct radeon_device *rdev, struct radeon_ring *ring) /* Load the microcode for the CP */ static int r100_cp_init_microcode(struct radeon_device *rdev) { - struct platform_device *pdev; const char *fw_name = NULL; int err; DRM_DEBUG_KMS("\n"); - pdev = platform_device_register_simple("radeon_cp", 0, NULL, 0); - err = IS_ERR(pdev); - if (err) { - printk(KERN_ERR "radeon_cp: Failed to register firmware\n"); - return -EINVAL; - } if ((rdev->family == CHIP_R100) || (rdev->family == CHIP_RV100) || (rdev->family == CHIP_RV200) || (rdev->family == CHIP_RS100) || (rdev->family == CHIP_RS200)) { @@ -1042,8 +1034,7 @@ static int r100_cp_init_microcode(struct radeon_device *rdev) fw_name = FIRMWARE_R520; } - err = request_firmware(&rdev->me_fw, fw_name, &pdev->dev); - platform_device_unregister(pdev); + err = request_firmware(&rdev->me_fw, fw_name, rdev->dev); if (err) { printk(KERN_ERR "radeon_cp: Failed to load firmware \"%s\"\n", fw_name); diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index 2d3655f7f41..393880a0941 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c @@ -28,7 +28,6 @@ #include <linux/slab.h> #include <linux/seq_file.h> #include <linux/firmware.h> -#include <linux/platform_device.h> #include <linux/module.h> #include <drm/drmP.h> #include <drm/radeon_drm.h> @@ -2144,7 +2143,6 @@ void r600_cp_stop(struct radeon_device *rdev) int r600_init_microcode(struct radeon_device *rdev) { - struct platform_device *pdev; const char *chip_name; const char *rlc_chip_name; const char *smc_chip_name = "RV770"; @@ -2154,13 +2152,6 @@ int r600_init_microcode(struct radeon_device *rdev) DRM_DEBUG("\n"); - pdev = platform_device_register_simple("radeon_cp", 0, NULL, 0); - err = IS_ERR(pdev); - if (err) { - printk(KERN_ERR "radeon_cp: Failed to register firmware\n"); - return -EINVAL; - } - switch (rdev->family) { case CHIP_R600: chip_name = "R600"; @@ -2272,7 +2263,7 @@ int r600_init_microcode(struct radeon_device *rdev) DRM_INFO("Loading %s Microcode\n", chip_name); snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name); - err = request_firmware(&rdev->pfp_fw, fw_name, &pdev->dev); + err = request_firmware(&rdev->pfp_fw, fw_name, rdev->dev); if (err) goto out; if (rdev->pfp_fw->size != pfp_req_size) { @@ -2284,7 +2275,7 @@ int r600_init_microcode(struct radeon_device *rdev) } snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name); - err = request_firmware(&rdev->me_fw, fw_name, &pdev->dev); + err = request_firmware(&rdev->me_fw, fw_name, rdev->dev); if (err) goto out; if (rdev->me_fw->size != me_req_size) { @@ -2295,7 +2286,7 @@ int r600_init_microcode(struct radeon_device *rdev) } snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", rlc_chip_name); - err = request_firmware(&rdev->rlc_fw, fw_name, &pdev->dev); + err = request_firmware(&rdev->rlc_fw, fw_name, rdev->dev); if (err) goto out; if (rdev->rlc_fw->size != rlc_req_size) { @@ -2307,7 +2298,7 @@ int r600_init_microcode(struct radeon_device *rdev) if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_HEMLOCK)) { snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", smc_chip_name); - err = request_firmware(&rdev->smc_fw, fw_name, &pdev->dev); + err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev); if (err) goto out; if (rdev->smc_fw->size != smc_req_size) { @@ -2319,8 +2310,6 @@ int r600_init_microcode(struct radeon_device *rdev) } out: - platform_device_unregister(pdev); - if (err) { if (err != -EINVAL) printk(KERN_ERR @@ -3019,7 +3008,7 @@ void r600_uvd_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence) { struct radeon_ring *ring = &rdev->ring[fence->ring]; - uint32_t addr = rdev->fence_drv[fence->ring].gpu_addr; + uint64_t addr = rdev->fence_drv[fence->ring].gpu_addr; radeon_ring_write(ring, PACKET0(UVD_CONTEXT_ID, 0)); radeon_ring_write(ring, fence->seq); @@ -3145,6 +3134,87 @@ int r600_copy_blit(struct radeon_device *rdev, } /** + * r600_copy_cpdma - copy pages using the CP DMA engine + * + * @rdev: radeon_device pointer + * @src_offset: src GPU address + * @dst_offset: dst GPU address + * @num_gpu_pages: number of GPU pages to xfer + * @fence: radeon fence object + * + * Copy GPU paging using the CP DMA engine (r6xx+). + * Used by the radeon ttm implementation to move pages if + * registered as the asic copy callback. + */ +int r600_copy_cpdma(struct radeon_device *rdev, + uint64_t src_offset, uint64_t dst_offset, + unsigned num_gpu_pages, + struct radeon_fence **fence) +{ + struct radeon_semaphore *sem = NULL; + int ring_index = rdev->asic->copy.blit_ring_index; + struct radeon_ring *ring = &rdev->ring[ring_index]; + u32 size_in_bytes, cur_size_in_bytes, tmp; + int i, num_loops; + int r = 0; + + r = radeon_semaphore_create(rdev, &sem); + if (r) { + DRM_ERROR("radeon: moving bo (%d).\n", r); + return r; + } + + size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT); + num_loops = DIV_ROUND_UP(size_in_bytes, 0x1fffff); + r = radeon_ring_lock(rdev, ring, num_loops * 6 + 21); + if (r) { + DRM_ERROR("radeon: moving bo (%d).\n", r); + radeon_semaphore_free(rdev, &sem, NULL); + return r; + } + + if (radeon_fence_need_sync(*fence, ring->idx)) { + radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring, + ring->idx); + radeon_fence_note_sync(*fence, ring->idx); + } else { + radeon_semaphore_free(rdev, &sem, NULL); + } + + for (i = 0; i < num_loops; i++) { + cur_size_in_bytes = size_in_bytes; + if (cur_size_in_bytes > 0x1fffff) + cur_size_in_bytes = 0x1fffff; + size_in_bytes -= cur_size_in_bytes; + tmp = upper_32_bits(src_offset) & 0xff; + if (size_in_bytes == 0) + tmp |= PACKET3_CP_DMA_CP_SYNC; + radeon_ring_write(ring, PACKET3(PACKET3_CP_DMA, 4)); + radeon_ring_write(ring, src_offset & 0xffffffff); + radeon_ring_write(ring, tmp); + radeon_ring_write(ring, dst_offset & 0xffffffff); + radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff); + radeon_ring_write(ring, cur_size_in_bytes); + src_offset += cur_size_in_bytes; + dst_offset += cur_size_in_bytes; + } + radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); + radeon_ring_write(ring, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2); + radeon_ring_write(ring, WAIT_CP_DMA_IDLE_bit); + + r = radeon_fence_emit(rdev, fence, ring->idx); + if (r) { + radeon_ring_unlock_undo(rdev, ring); + return r; + } + + radeon_ring_unlock_commit(rdev, ring); + radeon_semaphore_free(rdev, &sem, *fence); + + return r; +} + +/** * r600_copy_dma - copy pages using the DMA engine * * @rdev: radeon_device pointer diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c index e73b2a73494..f48240bb8c5 100644 --- a/drivers/gpu/drm/radeon/r600_hdmi.c +++ b/drivers/gpu/drm/radeon/r600_hdmi.c @@ -266,6 +266,9 @@ void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mod uint32_t offset; ssize_t err; + if (!dig || !dig->afmt) + return; + /* Silent, r600_hdmi_enable will raise WARN for us */ if (!dig->afmt->enabled) return; @@ -448,6 +451,9 @@ void r600_hdmi_enable(struct drm_encoder *encoder, bool enable) struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; u32 hdmi = HDMI0_ERROR_ACK; + if (!dig || !dig->afmt) + return; + /* Silent, r600_hdmi_enable will raise WARN for us */ if (enable && dig->afmt->enabled) return; diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h index f1b3084d8f5..8e3fe815eda 100644 --- a/drivers/gpu/drm/radeon/r600d.h +++ b/drivers/gpu/drm/radeon/r600d.h @@ -602,6 +602,7 @@ #define L2_BUSY (1 << 0) #define WAIT_UNTIL 0x8040 +#define WAIT_CP_DMA_IDLE_bit (1 << 8) #define WAIT_2D_IDLE_bit (1 << 14) #define WAIT_3D_IDLE_bit (1 << 15) #define WAIT_2D_IDLECLEAN_bit (1 << 16) diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 9b7025d02cd..2f08219c39b 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -97,6 +97,7 @@ extern int radeon_msi; extern int radeon_lockup_timeout; extern int radeon_fastfb; extern int radeon_dpm; +extern int radeon_aspm; /* * Copy from radeon_drv.h so we don't have to include both and have conflicting @@ -455,6 +456,7 @@ struct radeon_sa_manager { uint64_t gpu_addr; void *cpu_ptr; uint32_t domain; + uint32_t align; }; struct radeon_sa_bo; @@ -783,6 +785,11 @@ struct radeon_mec { /* number of entries in page table */ #define RADEON_VM_PTE_COUNT (1 << RADEON_VM_BLOCK_SIZE) +/* PTBs (Page Table Blocks) need to be aligned to 32K */ +#define RADEON_VM_PTB_ALIGN_SIZE 32768 +#define RADEON_VM_PTB_ALIGN_MASK (RADEON_VM_PTB_ALIGN_SIZE - 1) +#define RADEON_VM_PTB_ALIGN(a) (((a) + RADEON_VM_PTB_ALIGN_MASK) & ~RADEON_VM_PTB_ALIGN_MASK) + struct radeon_vm { struct list_head list; struct list_head va; @@ -1460,6 +1467,8 @@ struct radeon_uvd { struct radeon_bo *vcpu_bo; void *cpu_addr; uint64_t gpu_addr; + void *saved_bo; + unsigned fw_size; atomic_t handles[RADEON_MAX_UVD_HANDLES]; struct drm_file *filp[RADEON_MAX_UVD_HANDLES]; struct delayed_work idle_work; @@ -2054,7 +2063,6 @@ struct radeon_device { const struct firmware *rlc_fw; /* r6/700 RLC firmware */ const struct firmware *mc_fw; /* NI MC firmware */ const struct firmware *ce_fw; /* SI CE firmware */ - const struct firmware *uvd_fw; /* UVD firmware */ const struct firmware *mec_fw; /* CIK MEC firmware */ const struct firmware *sdma_fw; /* CIK SDMA firmware */ const struct firmware *smc_fw; /* SMC firmware */ diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c index 097077499cc..78bec1a58ed 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.c +++ b/drivers/gpu/drm/radeon/radeon_asic.c @@ -1026,8 +1026,8 @@ static struct radeon_asic r600_asic = { .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, .dma = &r600_copy_dma, .dma_ring_index = R600_RING_TYPE_DMA_INDEX, - .copy = &r600_copy_dma, - .copy_ring_index = R600_RING_TYPE_DMA_INDEX, + .copy = &r600_copy_cpdma, + .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX, }, .surface = { .set_reg = r600_set_surface_reg, @@ -1119,8 +1119,8 @@ static struct radeon_asic rv6xx_asic = { .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, .dma = &r600_copy_dma, .dma_ring_index = R600_RING_TYPE_DMA_INDEX, - .copy = &r600_copy_dma, - .copy_ring_index = R600_RING_TYPE_DMA_INDEX, + .copy = &r600_copy_cpdma, + .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX, }, .surface = { .set_reg = r600_set_surface_reg, @@ -1229,8 +1229,8 @@ static struct radeon_asic rs780_asic = { .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, .dma = &r600_copy_dma, .dma_ring_index = R600_RING_TYPE_DMA_INDEX, - .copy = &r600_copy_dma, - .copy_ring_index = R600_RING_TYPE_DMA_INDEX, + .copy = &r600_copy_cpdma, + .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX, }, .surface = { .set_reg = r600_set_surface_reg, @@ -1270,6 +1270,7 @@ static struct radeon_asic rs780_asic = { .get_sclk = &rs780_dpm_get_sclk, .get_mclk = &rs780_dpm_get_mclk, .print_power_state = &rs780_dpm_print_power_state, + .debugfs_print_current_performance_level = &rs780_dpm_debugfs_print_current_performance_level, }, .pflip = { .pre_page_flip = &rs600_pre_page_flip, diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h index 45d0693cddd..ca189570990 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.h +++ b/drivers/gpu/drm/radeon/radeon_asic.h @@ -340,6 +340,9 @@ int r600_uvd_ring_test(struct radeon_device *rdev, struct radeon_ring *ring); int r600_copy_blit(struct radeon_device *rdev, uint64_t src_offset, uint64_t dst_offset, unsigned num_gpu_pages, struct radeon_fence **fence); +int r600_copy_cpdma(struct radeon_device *rdev, + uint64_t src_offset, uint64_t dst_offset, + unsigned num_gpu_pages, struct radeon_fence **fence); int r600_copy_dma(struct radeon_device *rdev, uint64_t src_offset, uint64_t dst_offset, unsigned num_gpu_pages, struct radeon_fence **fence); @@ -430,6 +433,8 @@ u32 rs780_dpm_get_sclk(struct radeon_device *rdev, bool low); u32 rs780_dpm_get_mclk(struct radeon_device *rdev, bool low); void rs780_dpm_print_power_state(struct radeon_device *rdev, struct radeon_ps *ps); +void rs780_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev, + struct seq_file *m); /* uvd */ int r600_uvd_init(struct radeon_device *rdev); diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c index fbdaff55556..e3f3e884178 100644 --- a/drivers/gpu/drm/radeon/radeon_atombios.c +++ b/drivers/gpu/drm/radeon/radeon_atombios.c @@ -3513,7 +3513,6 @@ int radeon_atom_get_memory_info(struct radeon_device *rdev, u8 frev, crev, i; u16 data_offset, size; union vram_info *vram_info; - u8 *p; memset(mem_info, 0, sizeof(struct atom_memory_info)); @@ -3529,13 +3528,12 @@ int radeon_atom_get_memory_info(struct radeon_device *rdev, if (module_index < vram_info->v1_3.ucNumOfVRAMModule) { ATOM_VRAM_MODULE_V3 *vram_module = (ATOM_VRAM_MODULE_V3 *)vram_info->v1_3.aVramInfo; - p = (u8 *)vram_info->v1_3.aVramInfo; for (i = 0; i < module_index; i++) { - vram_module = (ATOM_VRAM_MODULE_V3 *)p; if (le16_to_cpu(vram_module->usSize) == 0) return -EINVAL; - p += le16_to_cpu(vram_module->usSize); + vram_module = (ATOM_VRAM_MODULE_V3 *) + ((u8 *)vram_module + le16_to_cpu(vram_module->usSize)); } mem_info->mem_vendor = vram_module->asMemory.ucMemoryVenderID & 0xf; mem_info->mem_type = vram_module->asMemory.ucMemoryType & 0xf0; @@ -3547,13 +3545,12 @@ int radeon_atom_get_memory_info(struct radeon_device *rdev, if (module_index < vram_info->v1_4.ucNumOfVRAMModule) { ATOM_VRAM_MODULE_V4 *vram_module = (ATOM_VRAM_MODULE_V4 *)vram_info->v1_4.aVramInfo; - p = (u8 *)vram_info->v1_4.aVramInfo; for (i = 0; i < module_index; i++) { - vram_module = (ATOM_VRAM_MODULE_V4 *)p; if (le16_to_cpu(vram_module->usModuleSize) == 0) return -EINVAL; - p += le16_to_cpu(vram_module->usModuleSize); + vram_module = (ATOM_VRAM_MODULE_V4 *) + ((u8 *)vram_module + le16_to_cpu(vram_module->usModuleSize)); } mem_info->mem_vendor = vram_module->ucMemoryVenderID & 0xf; mem_info->mem_type = vram_module->ucMemoryType & 0xf0; @@ -3572,13 +3569,12 @@ int radeon_atom_get_memory_info(struct radeon_device *rdev, if (module_index < vram_info->v2_1.ucNumOfVRAMModule) { ATOM_VRAM_MODULE_V7 *vram_module = (ATOM_VRAM_MODULE_V7 *)vram_info->v2_1.aVramInfo; - p = (u8 *)vram_info->v2_1.aVramInfo; for (i = 0; i < module_index; i++) { - vram_module = (ATOM_VRAM_MODULE_V7 *)p; if (le16_to_cpu(vram_module->usModuleSize) == 0) return -EINVAL; - p += le16_to_cpu(vram_module->usModuleSize); + vram_module = (ATOM_VRAM_MODULE_V7 *) + ((u8 *)vram_module + le16_to_cpu(vram_module->usModuleSize)); } mem_info->mem_vendor = vram_module->ucMemoryVenderID & 0xf; mem_info->mem_type = vram_module->ucMemoryType & 0xf0; @@ -3628,21 +3624,19 @@ int radeon_atom_get_mclk_range_table(struct radeon_device *rdev, if (module_index < vram_info->v1_4.ucNumOfVRAMModule) { ATOM_VRAM_MODULE_V4 *vram_module = (ATOM_VRAM_MODULE_V4 *)vram_info->v1_4.aVramInfo; - ATOM_MEMORY_TIMING_FORMAT *format; - p = (u8 *)vram_info->v1_4.aVramInfo; for (i = 0; i < module_index; i++) { - vram_module = (ATOM_VRAM_MODULE_V4 *)p; if (le16_to_cpu(vram_module->usModuleSize) == 0) return -EINVAL; - p += le16_to_cpu(vram_module->usModuleSize); + vram_module = (ATOM_VRAM_MODULE_V4 *) + ((u8 *)vram_module + le16_to_cpu(vram_module->usModuleSize)); } mclk_range_table->num_entries = (u8) - ((vram_module->usModuleSize - offsetof(ATOM_VRAM_MODULE_V4, asMemTiming)) / + ((le16_to_cpu(vram_module->usModuleSize) - offsetof(ATOM_VRAM_MODULE_V4, asMemTiming)) / mem_timing_size); - p = (u8 *)vram_module->asMemTiming; + p = (u8 *)&vram_module->asMemTiming[0]; for (i = 0; i < mclk_range_table->num_entries; i++) { - format = (ATOM_MEMORY_TIMING_FORMAT *)p; + ATOM_MEMORY_TIMING_FORMAT *format = (ATOM_MEMORY_TIMING_FORMAT *)p; mclk_range_table->mclk[i] = le32_to_cpu(format->ulClkRange); p += mem_timing_size; } @@ -3705,17 +3699,21 @@ int radeon_atom_init_mc_reg_table(struct radeon_device *rdev, (ATOM_MEMORY_SETTING_DATA_BLOCK *) ((u8 *)reg_block + (2 * sizeof(u16)) + le16_to_cpu(reg_block->usRegIndexTblSize)); + ATOM_INIT_REG_INDEX_FORMAT *format = ®_block->asRegIndexBuf[0]; num_entries = (u8)((le16_to_cpu(reg_block->usRegIndexTblSize)) / sizeof(ATOM_INIT_REG_INDEX_FORMAT)) - 1; if (num_entries > VBIOS_MC_REGISTER_ARRAY_SIZE) return -EINVAL; - while (!(reg_block->asRegIndexBuf[i].ucPreRegDataLength & ACCESS_PLACEHOLDER) && - (i < num_entries)) { + while (i < num_entries) { + if (format->ucPreRegDataLength & ACCESS_PLACEHOLDER) + break; reg_table->mc_reg_address[i].s1 = - (u16)(le16_to_cpu(reg_block->asRegIndexBuf[i].usRegIndex)); + (u16)(le16_to_cpu(format->usRegIndex)); reg_table->mc_reg_address[i].pre_reg_data = - (u8)(reg_block->asRegIndexBuf[i].ucPreRegDataLength); + (u8)(format->ucPreRegDataLength); i++; + format = (ATOM_INIT_REG_INDEX_FORMAT *) + ((u8 *)format + sizeof(ATOM_INIT_REG_INDEX_FORMAT)); } reg_table->last = i; while ((*(u32 *)reg_data != END_OF_REG_DATA_BLOCK) && diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index e5419b35017..29876b1be8e 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c @@ -167,6 +167,7 @@ int radeon_msi = -1; int radeon_lockup_timeout = 10000; int radeon_fastfb = 0; int radeon_dpm = -1; +int radeon_aspm = -1; MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers"); module_param_named(no_wb, radeon_no_wb, int, 0444); @@ -225,6 +226,9 @@ module_param_named(fastfb, radeon_fastfb, int, 0444); MODULE_PARM_DESC(dpm, "DPM support (1 = enable, 0 = disable, -1 = auto)"); module_param_named(dpm, radeon_dpm, int, 0444); +MODULE_PARM_DESC(aspm, "ASPM support (1 = enable, 0 = disable, -1 = auto)"); +module_param_named(aspm, radeon_aspm, int, 0444); + static struct pci_device_id pciidlist[] = { radeon_PCI_IDS }; diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c index ddb8f8e04eb..7ddb0efe240 100644 --- a/drivers/gpu/drm/radeon/radeon_fence.c +++ b/drivers/gpu/drm/radeon/radeon_fence.c @@ -782,7 +782,7 @@ int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring) } else { /* put fence directly behind firmware */ - index = ALIGN(rdev->uvd_fw->size, 8); + index = ALIGN(rdev->uvd.fw_size, 8); rdev->fence_drv[ring].cpu_addr = rdev->uvd.cpu_addr + index; rdev->fence_drv[ring].gpu_addr = rdev->uvd.gpu_addr + index; } diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c index 43ec4a401f0..d9d31a38327 100644 --- a/drivers/gpu/drm/radeon/radeon_gart.c +++ b/drivers/gpu/drm/radeon/radeon_gart.c @@ -466,7 +466,8 @@ int radeon_vm_manager_init(struct radeon_device *rdev) size += rdev->vm_manager.max_pfn * 8; size *= 2; r = radeon_sa_bo_manager_init(rdev, &rdev->vm_manager.sa_manager, - RADEON_GPU_PAGE_ALIGN(size), + RADEON_VM_PTB_ALIGN(size), + RADEON_VM_PTB_ALIGN_SIZE, RADEON_GEM_DOMAIN_VRAM); if (r) { dev_err(rdev->dev, "failed to allocate vm bo (%dKB)\n", @@ -620,10 +621,10 @@ int radeon_vm_alloc_pt(struct radeon_device *rdev, struct radeon_vm *vm) } retry: - pd_size = RADEON_GPU_PAGE_ALIGN(radeon_vm_directory_size(rdev)); + pd_size = RADEON_VM_PTB_ALIGN(radeon_vm_directory_size(rdev)); r = radeon_sa_bo_new(rdev, &rdev->vm_manager.sa_manager, &vm->page_directory, pd_size, - RADEON_GPU_PAGE_SIZE, false); + RADEON_VM_PTB_ALIGN_SIZE, false); if (r == -ENOMEM) { r = radeon_vm_evict(rdev, vm); if (r) @@ -952,8 +953,8 @@ static int radeon_vm_update_pdes(struct radeon_device *rdev, retry: r = radeon_sa_bo_new(rdev, &rdev->vm_manager.sa_manager, &vm->page_tables[pt_idx], - RADEON_VM_PTE_COUNT * 8, - RADEON_GPU_PAGE_SIZE, false); + RADEON_VM_PTB_ALIGN(RADEON_VM_PTE_COUNT * 8), + RADEON_VM_PTB_ALIGN_SIZE, false); if (r == -ENOMEM) { r = radeon_vm_evict(rdev, vm); diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c index bcdefd1dcd4..081886b0642 100644 --- a/drivers/gpu/drm/radeon/radeon_irq_kms.c +++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c @@ -260,10 +260,6 @@ int radeon_irq_kms_init(struct radeon_device *rdev) { int r = 0; - INIT_WORK(&rdev->hotplug_work, radeon_hotplug_work_func); - INIT_WORK(&rdev->audio_work, r600_audio_update_hdmi); - INIT_WORK(&rdev->reset_work, radeon_irq_reset_work_func); - spin_lock_init(&rdev->irq.lock); r = drm_vblank_init(rdev->ddev, rdev->num_crtc); if (r) { @@ -285,6 +281,11 @@ int radeon_irq_kms_init(struct radeon_device *rdev) rdev->irq.installed = false; return r; } + + INIT_WORK(&rdev->hotplug_work, radeon_hotplug_work_func); + INIT_WORK(&rdev->audio_work, r600_audio_update_hdmi); + INIT_WORK(&rdev->reset_work, radeon_irq_reset_work_func); + DRM_INFO("radeon: irq initialized.\n"); return 0; } @@ -304,8 +305,8 @@ void radeon_irq_kms_fini(struct radeon_device *rdev) rdev->irq.installed = false; if (rdev->msi_enabled) pci_disable_msi(rdev->pdev); + flush_work(&rdev->hotplug_work); } - flush_work(&rdev->hotplug_work); } /** diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index 0219d263e2d..2020bf4a383 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c @@ -377,6 +377,7 @@ int radeon_bo_list_validate(struct ww_acquire_ctx *ticket, domain = lobj->alt_domain; goto retry; } + ttm_eu_backoff_reservation(ticket, head); return r; } } diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h index 91519a5622b..49c82c48001 100644 --- a/drivers/gpu/drm/radeon/radeon_object.h +++ b/drivers/gpu/drm/radeon/radeon_object.h @@ -174,7 +174,7 @@ static inline void * radeon_sa_bo_cpu_addr(struct radeon_sa_bo *sa_bo) extern int radeon_sa_bo_manager_init(struct radeon_device *rdev, struct radeon_sa_manager *sa_manager, - unsigned size, u32 domain); + unsigned size, u32 align, u32 domain); extern void radeon_sa_bo_manager_fini(struct radeon_device *rdev, struct radeon_sa_manager *sa_manager); extern int radeon_sa_bo_manager_start(struct radeon_device *rdev, diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c index 5f1c51a776e..fb5ea620897 100644 --- a/drivers/gpu/drm/radeon/radeon_ring.c +++ b/drivers/gpu/drm/radeon/radeon_ring.c @@ -224,6 +224,7 @@ int radeon_ib_pool_init(struct radeon_device *rdev) } r = radeon_sa_bo_manager_init(rdev, &rdev->ring_tmp_bo, RADEON_IB_POOL_SIZE*64*1024, + RADEON_GPU_PAGE_SIZE, RADEON_GEM_DOMAIN_GTT); if (r) { return r; diff --git a/drivers/gpu/drm/radeon/radeon_sa.c b/drivers/gpu/drm/radeon/radeon_sa.c index 0abe5a9431b..f0bac68254b 100644 --- a/drivers/gpu/drm/radeon/radeon_sa.c +++ b/drivers/gpu/drm/radeon/radeon_sa.c @@ -49,7 +49,7 @@ static void radeon_sa_bo_try_free(struct radeon_sa_manager *sa_manager); int radeon_sa_bo_manager_init(struct radeon_device *rdev, struct radeon_sa_manager *sa_manager, - unsigned size, u32 domain) + unsigned size, u32 align, u32 domain) { int i, r; @@ -57,13 +57,14 @@ int radeon_sa_bo_manager_init(struct radeon_device *rdev, sa_manager->bo = NULL; sa_manager->size = size; sa_manager->domain = domain; + sa_manager->align = align; sa_manager->hole = &sa_manager->olist; INIT_LIST_HEAD(&sa_manager->olist); for (i = 0; i < RADEON_NUM_RINGS; ++i) { INIT_LIST_HEAD(&sa_manager->flist[i]); } - r = radeon_bo_create(rdev, size, RADEON_GPU_PAGE_SIZE, true, + r = radeon_bo_create(rdev, size, align, true, domain, NULL, &sa_manager->bo); if (r) { dev_err(rdev->dev, "(%d) failed to allocate bo for manager\n", r); @@ -317,7 +318,7 @@ int radeon_sa_bo_new(struct radeon_device *rdev, unsigned tries[RADEON_NUM_RINGS]; int i, r; - BUG_ON(align > RADEON_GPU_PAGE_SIZE); + BUG_ON(align > sa_manager->align); BUG_ON(size > sa_manager->size); *sa_bo = kmalloc(sizeof(struct radeon_sa_bo), GFP_KERNEL); diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c index 41efcec28cd..414fd145d20 100644 --- a/drivers/gpu/drm/radeon/radeon_uvd.c +++ b/drivers/gpu/drm/radeon/radeon_uvd.c @@ -56,20 +56,13 @@ static void radeon_uvd_idle_work_handler(struct work_struct *work); int radeon_uvd_init(struct radeon_device *rdev) { - struct platform_device *pdev; + const struct firmware *fw; unsigned long bo_size; const char *fw_name; int i, r; INIT_DELAYED_WORK(&rdev->uvd.idle_work, radeon_uvd_idle_work_handler); - pdev = platform_device_register_simple("radeon_uvd", 0, NULL, 0); - r = IS_ERR(pdev); - if (r) { - dev_err(rdev->dev, "radeon_uvd: Failed to register firmware\n"); - return -EINVAL; - } - switch (rdev->family) { case CHIP_RV710: case CHIP_RV730: @@ -112,17 +105,14 @@ int radeon_uvd_init(struct radeon_device *rdev) return -EINVAL; } - r = request_firmware(&rdev->uvd_fw, fw_name, &pdev->dev); + r = request_firmware(&fw, fw_name, rdev->dev); if (r) { dev_err(rdev->dev, "radeon_uvd: Can't load firmware \"%s\"\n", fw_name); - platform_device_unregister(pdev); return r; } - platform_device_unregister(pdev); - - bo_size = RADEON_GPU_PAGE_ALIGN(rdev->uvd_fw->size + 8) + + bo_size = RADEON_GPU_PAGE_ALIGN(fw->size + 8) + RADEON_UVD_STACK_SIZE + RADEON_UVD_HEAP_SIZE; r = radeon_bo_create(rdev, bo_size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM, NULL, &rdev->uvd.vcpu_bo); @@ -131,16 +121,35 @@ int radeon_uvd_init(struct radeon_device *rdev) return r; } - r = radeon_uvd_resume(rdev); - if (r) + r = radeon_bo_reserve(rdev->uvd.vcpu_bo, false); + if (r) { + radeon_bo_unref(&rdev->uvd.vcpu_bo); + dev_err(rdev->dev, "(%d) failed to reserve UVD bo\n", r); return r; + } - memset(rdev->uvd.cpu_addr, 0, bo_size); - memcpy(rdev->uvd.cpu_addr, rdev->uvd_fw->data, rdev->uvd_fw->size); + r = radeon_bo_pin(rdev->uvd.vcpu_bo, RADEON_GEM_DOMAIN_VRAM, + &rdev->uvd.gpu_addr); + if (r) { + radeon_bo_unreserve(rdev->uvd.vcpu_bo); + radeon_bo_unref(&rdev->uvd.vcpu_bo); + dev_err(rdev->dev, "(%d) UVD bo pin failed\n", r); + return r; + } - r = radeon_uvd_suspend(rdev); - if (r) + r = radeon_bo_kmap(rdev->uvd.vcpu_bo, &rdev->uvd.cpu_addr); + if (r) { + dev_err(rdev->dev, "(%d) UVD map failed\n", r); return r; + } + + radeon_bo_unreserve(rdev->uvd.vcpu_bo); + + rdev->uvd.fw_size = fw->size; + memset(rdev->uvd.cpu_addr, 0, bo_size); + memcpy(rdev->uvd.cpu_addr, fw->data, fw->size); + + release_firmware(fw); for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) { atomic_set(&rdev->uvd.handles[i], 0); @@ -152,71 +161,47 @@ int radeon_uvd_init(struct radeon_device *rdev) void radeon_uvd_fini(struct radeon_device *rdev) { - radeon_uvd_suspend(rdev); - radeon_bo_unref(&rdev->uvd.vcpu_bo); -} - -int radeon_uvd_suspend(struct radeon_device *rdev) -{ int r; if (rdev->uvd.vcpu_bo == NULL) - return 0; + return; r = radeon_bo_reserve(rdev->uvd.vcpu_bo, false); if (!r) { radeon_bo_kunmap(rdev->uvd.vcpu_bo); radeon_bo_unpin(rdev->uvd.vcpu_bo); - rdev->uvd.cpu_addr = NULL; - if (!radeon_bo_pin(rdev->uvd.vcpu_bo, RADEON_GEM_DOMAIN_CPU, NULL)) { - radeon_bo_kmap(rdev->uvd.vcpu_bo, &rdev->uvd.cpu_addr); - } radeon_bo_unreserve(rdev->uvd.vcpu_bo); - - if (rdev->uvd.cpu_addr) { - radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_UVD_INDEX); - } else { - rdev->fence_drv[R600_RING_TYPE_UVD_INDEX].cpu_addr = NULL; - } } - return r; + + radeon_bo_unref(&rdev->uvd.vcpu_bo); } -int radeon_uvd_resume(struct radeon_device *rdev) +int radeon_uvd_suspend(struct radeon_device *rdev) { - int r; + unsigned size; if (rdev->uvd.vcpu_bo == NULL) - return -EINVAL; + return 0; - r = radeon_bo_reserve(rdev->uvd.vcpu_bo, false); - if (r) { - radeon_bo_unref(&rdev->uvd.vcpu_bo); - dev_err(rdev->dev, "(%d) failed to reserve UVD bo\n", r); - return r; - } + size = radeon_bo_size(rdev->uvd.vcpu_bo); + rdev->uvd.saved_bo = kmalloc(size, GFP_KERNEL); + memcpy(rdev->uvd.saved_bo, rdev->uvd.cpu_addr, size); - /* Have been pin in cpu unmap unpin */ - radeon_bo_kunmap(rdev->uvd.vcpu_bo); - radeon_bo_unpin(rdev->uvd.vcpu_bo); + return 0; +} - r = radeon_bo_pin(rdev->uvd.vcpu_bo, RADEON_GEM_DOMAIN_VRAM, - &rdev->uvd.gpu_addr); - if (r) { - radeon_bo_unreserve(rdev->uvd.vcpu_bo); - radeon_bo_unref(&rdev->uvd.vcpu_bo); - dev_err(rdev->dev, "(%d) UVD bo pin failed\n", r); - return r; - } +int radeon_uvd_resume(struct radeon_device *rdev) +{ + if (rdev->uvd.vcpu_bo == NULL) + return -EINVAL; - r = radeon_bo_kmap(rdev->uvd.vcpu_bo, &rdev->uvd.cpu_addr); - if (r) { - dev_err(rdev->dev, "(%d) UVD map failed\n", r); - return r; + if (rdev->uvd.saved_bo != NULL) { + unsigned size = radeon_bo_size(rdev->uvd.vcpu_bo); + memcpy(rdev->uvd.cpu_addr, rdev->uvd.saved_bo, size); + kfree(rdev->uvd.saved_bo); + rdev->uvd.saved_bo = NULL; } - radeon_bo_unreserve(rdev->uvd.vcpu_bo); - return 0; } diff --git a/drivers/gpu/drm/radeon/rs780_dpm.c b/drivers/gpu/drm/radeon/rs780_dpm.c index bef832a62fe..d1a1ce73bd4 100644 --- a/drivers/gpu/drm/radeon/rs780_dpm.c +++ b/drivers/gpu/drm/radeon/rs780_dpm.c @@ -28,6 +28,7 @@ #include "r600_dpm.h" #include "rs780_dpm.h" #include "atom.h" +#include <linux/seq_file.h> static struct igp_ps *rs780_get_ps(struct radeon_ps *rps) { @@ -961,3 +962,27 @@ u32 rs780_dpm_get_mclk(struct radeon_device *rdev, bool low) return pi->bootup_uma_clk; } + +void rs780_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev, + struct seq_file *m) +{ + struct radeon_ps *rps = rdev->pm.dpm.current_ps; + struct igp_ps *ps = rs780_get_ps(rps); + u32 current_fb_div = RREG32(FVTHROT_STATUS_REG0) & CURRENT_FEEDBACK_DIV_MASK; + u32 func_cntl = RREG32(CG_SPLL_FUNC_CNTL); + u32 ref_div = ((func_cntl & SPLL_REF_DIV_MASK) >> SPLL_REF_DIV_SHIFT) + 1; + u32 post_div = ((func_cntl & SPLL_SW_HILEN_MASK) >> SPLL_SW_HILEN_SHIFT) + 1 + + ((func_cntl & SPLL_SW_LOLEN_MASK) >> SPLL_SW_LOLEN_SHIFT) + 1; + u32 sclk = (rdev->clock.spll.reference_freq * current_fb_div) / + (post_div * ref_div); + + seq_printf(m, "uvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk); + + /* guess based on the current sclk */ + if (sclk < (ps->sclk_low + 500)) + seq_printf(m, "power level 0 sclk: %u vddc_index: %d\n", + ps->sclk_low, ps->min_voltage); + else + seq_printf(m, "power level 1 sclk: %u vddc_index: %d\n", + ps->sclk_high, ps->max_voltage); +} diff --git a/drivers/gpu/drm/radeon/rs780d.h b/drivers/gpu/drm/radeon/rs780d.h index b1142ed1c62..cfbe9a43d97 100644 --- a/drivers/gpu/drm/radeon/rs780d.h +++ b/drivers/gpu/drm/radeon/rs780d.h @@ -28,6 +28,7 @@ # define SPLL_SLEEP (1 << 1) # define SPLL_REF_DIV(x) ((x) << 2) # define SPLL_REF_DIV_MASK (7 << 2) +# define SPLL_REF_DIV_SHIFT 2 # define SPLL_FB_DIV(x) ((x) << 5) # define SPLL_FB_DIV_MASK (0xff << 2) # define SPLL_FB_DIV_SHIFT 2 @@ -36,8 +37,10 @@ # define SPLL_PULSENUM_MASK (3 << 14) # define SPLL_SW_HILEN(x) ((x) << 16) # define SPLL_SW_HILEN_MASK (0xf << 16) +# define SPLL_SW_HILEN_SHIFT 16 # define SPLL_SW_LOLEN(x) ((x) << 20) # define SPLL_SW_LOLEN_MASK (0xf << 20) +# define SPLL_SW_LOLEN_SHIFT 20 # define SPLL_DIVEN (1 << 24) # define SPLL_BYPASS_EN (1 << 25) # define SPLL_CHG_STATUS (1 << 29) diff --git a/drivers/gpu/drm/radeon/rv6xx_dpm.c b/drivers/gpu/drm/radeon/rv6xx_dpm.c index 8303de267ee..65e33f38734 100644 --- a/drivers/gpu/drm/radeon/rv6xx_dpm.c +++ b/drivers/gpu/drm/radeon/rv6xx_dpm.c @@ -1763,12 +1763,14 @@ void rv6xx_setup_asic(struct radeon_device *rdev) { r600_enable_acpi_pm(rdev); - if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_ASPM_L0s) - rv6xx_enable_l0s(rdev); - if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_ASPM_L1) - rv6xx_enable_l1(rdev); - if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_TURNOFFPLL_ASPML1) - rv6xx_enable_pll_sleep_in_l1(rdev); + if (radeon_aspm != 0) { + if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_ASPM_L0s) + rv6xx_enable_l0s(rdev); + if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_ASPM_L1) + rv6xx_enable_l1(rdev); + if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_TURNOFFPLL_ASPML1) + rv6xx_enable_pll_sleep_in_l1(rdev); + } } void rv6xx_dpm_display_configuration_changed(struct radeon_device *rdev) diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c index 4a62ad2e539..30ea14e8854 100644 --- a/drivers/gpu/drm/radeon/rv770.c +++ b/drivers/gpu/drm/radeon/rv770.c @@ -813,7 +813,7 @@ int rv770_uvd_resume(struct radeon_device *rdev) /* programm the VCPU memory controller bits 0-27 */ addr = rdev->uvd.gpu_addr >> 3; - size = RADEON_GPU_PAGE_ALIGN(rdev->uvd_fw->size + 4) >> 3; + size = RADEON_GPU_PAGE_ALIGN(rdev->uvd.fw_size + 4) >> 3; WREG32(UVD_VCPU_CACHE_OFFSET0, addr); WREG32(UVD_VCPU_CACHE_SIZE0, size); diff --git a/drivers/gpu/drm/radeon/rv770_dpm.c b/drivers/gpu/drm/radeon/rv770_dpm.c index d914e04ea39..2d347925f77 100644 --- a/drivers/gpu/drm/radeon/rv770_dpm.c +++ b/drivers/gpu/drm/radeon/rv770_dpm.c @@ -2099,12 +2099,14 @@ void rv770_dpm_setup_asic(struct radeon_device *rdev) rv770_enable_acpi_pm(rdev); - if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_ASPM_L0s) - rv770_enable_l0s(rdev); - if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_ASPM_L1) - rv770_enable_l1(rdev); - if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_TURNOFFPLL_ASPML1) - rv770_enable_pll_sleep_in_l1(rdev); + if (radeon_aspm != 0) { + if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_ASPM_L0s) + rv770_enable_l0s(rdev); + if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_ASPM_L1) + rv770_enable_l1(rdev); + if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_TURNOFFPLL_ASPML1) + rv770_enable_pll_sleep_in_l1(rdev); + } } void rv770_dpm_display_configuration_changed(struct radeon_device *rdev) diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index 23490670906..d325280e2f9 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c @@ -22,7 +22,6 @@ * Authors: Alex Deucher */ #include <linux/firmware.h> -#include <linux/platform_device.h> #include <linux/slab.h> #include <linux/module.h> #include <drm/drmP.h> @@ -1541,7 +1540,6 @@ static int si_mc_load_microcode(struct radeon_device *rdev) static int si_init_microcode(struct radeon_device *rdev) { - struct platform_device *pdev; const char *chip_name; const char *rlc_chip_name; size_t pfp_req_size, me_req_size, ce_req_size, rlc_req_size, mc_req_size; @@ -1551,13 +1549,6 @@ static int si_init_microcode(struct radeon_device *rdev) DRM_DEBUG("\n"); - pdev = platform_device_register_simple("radeon_cp", 0, NULL, 0); - err = IS_ERR(pdev); - if (err) { - printk(KERN_ERR "radeon_cp: Failed to register firmware\n"); - return -EINVAL; - } - switch (rdev->family) { case CHIP_TAHITI: chip_name = "TAHITI"; @@ -1615,7 +1606,7 @@ static int si_init_microcode(struct radeon_device *rdev) DRM_INFO("Loading %s Microcode\n", chip_name); snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name); - err = request_firmware(&rdev->pfp_fw, fw_name, &pdev->dev); + err = request_firmware(&rdev->pfp_fw, fw_name, rdev->dev); if (err) goto out; if (rdev->pfp_fw->size != pfp_req_size) { @@ -1627,7 +1618,7 @@ static int si_init_microcode(struct radeon_device *rdev) } snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name); - err = request_firmware(&rdev->me_fw, fw_name, &pdev->dev); + err = request_firmware(&rdev->me_fw, fw_name, rdev->dev); if (err) goto out; if (rdev->me_fw->size != me_req_size) { @@ -1638,7 +1629,7 @@ static int si_init_microcode(struct radeon_device *rdev) } snprintf(fw_name, sizeof(fw_name), "radeon/%s_ce.bin", chip_name); - err = request_firmware(&rdev->ce_fw, fw_name, &pdev->dev); + err = request_firmware(&rdev->ce_fw, fw_name, rdev->dev); if (err) goto out; if (rdev->ce_fw->size != ce_req_size) { @@ -1649,7 +1640,7 @@ static int si_init_microcode(struct radeon_device *rdev) } snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", rlc_chip_name); - err = request_firmware(&rdev->rlc_fw, fw_name, &pdev->dev); + err = request_firmware(&rdev->rlc_fw, fw_name, rdev->dev); if (err) goto out; if (rdev->rlc_fw->size != rlc_req_size) { @@ -1660,7 +1651,7 @@ static int si_init_microcode(struct radeon_device *rdev) } snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name); - err = request_firmware(&rdev->mc_fw, fw_name, &pdev->dev); + err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev); if (err) goto out; if (rdev->mc_fw->size != mc_req_size) { @@ -1671,7 +1662,7 @@ static int si_init_microcode(struct radeon_device *rdev) } snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name); - err = request_firmware(&rdev->smc_fw, fw_name, &pdev->dev); + err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev); if (err) goto out; if (rdev->smc_fw->size != smc_req_size) { @@ -1682,8 +1673,6 @@ static int si_init_microcode(struct radeon_device *rdev) } out: - platform_device_unregister(pdev); - if (err) { if (err != -EINVAL) printk(KERN_ERR @@ -4401,6 +4390,270 @@ void si_vm_fini(struct radeon_device *rdev) } /** + * si_vm_decode_fault - print human readable fault info + * + * @rdev: radeon_device pointer + * @status: VM_CONTEXT1_PROTECTION_FAULT_STATUS register value + * @addr: VM_CONTEXT1_PROTECTION_FAULT_ADDR register value + * + * Print human readable fault information (SI). + */ +static void si_vm_decode_fault(struct radeon_device *rdev, + u32 status, u32 addr) +{ + u32 mc_id = (status & MEMORY_CLIENT_ID_MASK) >> MEMORY_CLIENT_ID_SHIFT; + u32 vmid = (status & FAULT_VMID_MASK) >> FAULT_VMID_SHIFT; + u32 protections = (status & PROTECTIONS_MASK) >> PROTECTIONS_SHIFT; + char *block; + + if (rdev->family == CHIP_TAHITI) { + switch (mc_id) { + case 160: + case 144: + case 96: + case 80: + case 224: + case 208: + case 32: + case 16: + block = "CB"; + break; + case 161: + case 145: + case 97: + case 81: + case 225: + case 209: + case 33: + case 17: + block = "CB_FMASK"; + break; + case 162: + case 146: + case 98: + case 82: + case 226: + case 210: + case 34: + case 18: + block = "CB_CMASK"; + break; + case 163: + case 147: + case 99: + case 83: + case 227: + case 211: + case 35: + case 19: + block = "CB_IMMED"; + break; + case 164: + case 148: + case 100: + case 84: + case 228: + case 212: + case 36: + case 20: + block = "DB"; + break; + case 165: + case 149: + case 101: + case 85: + case 229: + case 213: + case 37: + case 21: + block = "DB_HTILE"; + break; + case 167: + case 151: + case 103: + case 87: + case 231: + case 215: + case 39: + case 23: + block = "DB_STEN"; + break; + case 72: + case 68: + case 64: + case 8: + case 4: + case 0: + case 136: + case 132: + case 128: + case 200: + case 196: + case 192: + block = "TC"; + break; + case 112: + case 48: + block = "CP"; + break; + case 49: + case 177: + case 50: + case 178: + block = "SH"; + break; + case 53: + case 190: + block = "VGT"; + break; + case 117: + block = "IH"; + break; + case 51: + case 115: + block = "RLC"; + break; + case 119: + case 183: + block = "DMA0"; + break; + case 61: + block = "DMA1"; + break; + case 248: + case 120: + block = "HDP"; + break; + default: + block = "unknown"; + break; + } + } else { + switch (mc_id) { + case 32: + case 16: + case 96: + case 80: + case 160: + case 144: + case 224: + case 208: + block = "CB"; + break; + case 33: + case 17: + case 97: + case 81: + case 161: + case 145: + case 225: + case 209: + block = "CB_FMASK"; + break; + case 34: + case 18: + case 98: + case 82: + case 162: + case 146: + case 226: + case 210: + block = "CB_CMASK"; + break; + case 35: + case 19: + case 99: + case 83: + case 163: + case 147: + case 227: + case 211: + block = "CB_IMMED"; + break; + case 36: + case 20: + case 100: + case 84: + case 164: + case 148: + case 228: + case 212: + block = "DB"; + break; + case 37: + case 21: + case 101: + case 85: + case 165: + case 149: + case 229: + case 213: + block = "DB_HTILE"; + break; + case 39: + case 23: + case 103: + case 87: + case 167: + case 151: + case 231: + case 215: + block = "DB_STEN"; + break; + case 72: + case 68: + case 8: + case 4: + case 136: + case 132: + case 200: + case 196: + block = "TC"; + break; + case 112: + case 48: + block = "CP"; + break; + case 49: + case 177: + case 50: + case 178: + block = "SH"; + break; + case 53: + block = "VGT"; + break; + case 117: + block = "IH"; + break; + case 51: + case 115: + block = "RLC"; + break; + case 119: + case 183: + block = "DMA0"; + break; + case 61: + block = "DMA1"; + break; + case 248: + case 120: + block = "HDP"; + break; + default: + block = "unknown"; + break; + } + } + + printk("VM fault (0x%02x, vmid %d) at page %u, %s from %s (%d)\n", + protections, vmid, addr, + (status & MEMORY_CLIENT_RW_MASK) ? "write" : "read", + block, mc_id); +} + +/** * si_vm_set_page - update the page tables using the CP * * @rdev: radeon_device pointer @@ -5766,6 +6019,7 @@ int si_irq_process(struct radeon_device *rdev) u32 ring_index; bool queue_hotplug = false; bool queue_thermal = false; + u32 status, addr; if (!rdev->ih.enabled || rdev->shutdown) return IRQ_NONE; @@ -6001,11 +6255,14 @@ restart_ih: break; case 146: case 147: + addr = RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR); + status = RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS); dev_err(rdev->dev, "GPU fault detected: %d 0x%08x\n", src_id, src_data); dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n", - RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR)); + addr); dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n", - RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS)); + status); + si_vm_decode_fault(rdev, status, addr); /* reset addr and status */ WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1); break; @@ -6796,6 +7053,9 @@ static void si_program_aspm(struct radeon_device *rdev) bool disable_l0s = false, disable_l1 = false, disable_plloff_in_l1 = false; bool disable_clkreq = false; + if (radeon_aspm == 0) + return; + if (!(rdev->flags & RADEON_IS_PCIE)) return; diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h index 12a20eb77d0..2c8da27a929 100644 --- a/drivers/gpu/drm/radeon/sid.h +++ b/drivers/gpu/drm/radeon/sid.h @@ -367,6 +367,20 @@ #define VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x14FC #define VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x14DC +#define PROTECTIONS_MASK (0xf << 0) +#define PROTECTIONS_SHIFT 0 + /* bit 0: range + * bit 1: pde0 + * bit 2: valid + * bit 3: read + * bit 4: write + */ +#define MEMORY_CLIENT_ID_MASK (0xff << 12) +#define MEMORY_CLIENT_ID_SHIFT 12 +#define MEMORY_CLIENT_RW_MASK (1 << 24) +#define MEMORY_CLIENT_RW_SHIFT 24 +#define FAULT_VMID_MASK (0xf << 25) +#define FAULT_VMID_SHIFT 25 #define VM_INVALIDATE_REQUEST 0x1478 #define VM_INVALIDATE_RESPONSE 0x147c diff --git a/drivers/gpu/drm/radeon/sumo_dpm.c b/drivers/gpu/drm/radeon/sumo_dpm.c index 11b6b9924f1..c0a85031990 100644 --- a/drivers/gpu/drm/radeon/sumo_dpm.c +++ b/drivers/gpu/drm/radeon/sumo_dpm.c @@ -1732,7 +1732,13 @@ int sumo_dpm_init(struct radeon_device *rdev) pi->enable_sclk_ds = true; pi->enable_dynamic_m3_arbiter = false; pi->enable_dynamic_patch_ps = true; - pi->enable_gfx_power_gating = true; + /* Some PALM chips don't seem to properly ungate gfx when UVD is in use; + * for now just disable gfx PG. + */ + if (rdev->family == CHIP_PALM) + pi->enable_gfx_power_gating = false; + else + pi->enable_gfx_power_gating = true; pi->enable_gfx_clock_gating = true; pi->enable_mg_clock_gating = true; pi->enable_auto_thermal_throttling = true; @@ -1845,6 +1851,8 @@ int sumo_dpm_force_performance_level(struct radeon_device *rdev, return 0; if (level == RADEON_DPM_FORCED_LEVEL_HIGH) { + if (pi->enable_boost) + sumo_enable_boost(rdev, rps, false); sumo_power_level_enable(rdev, ps->num_levels - 1, true); sumo_set_forced_level(rdev, ps->num_levels - 1); sumo_set_forced_mode_enabled(rdev); @@ -1855,6 +1863,8 @@ int sumo_dpm_force_performance_level(struct radeon_device *rdev, sumo_set_forced_mode_enabled(rdev); sumo_set_forced_mode(rdev, false); } else if (level == RADEON_DPM_FORCED_LEVEL_LOW) { + if (pi->enable_boost) + sumo_enable_boost(rdev, rps, false); sumo_power_level_enable(rdev, 0, true); sumo_set_forced_level(rdev, 0); sumo_set_forced_mode_enabled(rdev); @@ -1868,6 +1878,8 @@ int sumo_dpm_force_performance_level(struct radeon_device *rdev, for (i = 0; i < ps->num_levels; i++) { sumo_power_level_enable(rdev, i, true); } + if (pi->enable_boost) + sumo_enable_boost(rdev, rps, true); } rdev->pm.dpm.forced_level = level; diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/rcar-du/rcar_du_drv.c index ff82877de87..dc0fe09b2ba 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_drv.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.c @@ -249,8 +249,13 @@ static struct drm_driver rcar_du_driver = { .gem_vm_ops = &drm_gem_cma_vm_ops, .prime_handle_to_fd = drm_gem_prime_handle_to_fd, .prime_fd_to_handle = drm_gem_prime_fd_to_handle, - .gem_prime_import = drm_gem_cma_dmabuf_import, - .gem_prime_export = drm_gem_cma_dmabuf_export, + .gem_prime_import = drm_gem_prime_import, + .gem_prime_export = drm_gem_prime_export, + .gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table, + .gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table, + .gem_prime_vmap = drm_gem_cma_prime_vmap, + .gem_prime_vunmap = drm_gem_cma_prime_vunmap, + .gem_prime_mmap = drm_gem_cma_prime_mmap, .dumb_create = rcar_du_dumb_create, .dumb_map_offset = drm_gem_cma_dumb_map_offset, .dumb_destroy = drm_gem_cma_dumb_destroy, diff --git a/drivers/gpu/drm/shmobile/shmob_drm_drv.c b/drivers/gpu/drm/shmobile/shmob_drm_drv.c index edc10181f55..5f83f9a3ef5 100644 --- a/drivers/gpu/drm/shmobile/shmob_drm_drv.c +++ b/drivers/gpu/drm/shmobile/shmob_drm_drv.c @@ -276,8 +276,13 @@ static struct drm_driver shmob_drm_driver = { .gem_vm_ops = &drm_gem_cma_vm_ops, .prime_handle_to_fd = drm_gem_prime_handle_to_fd, .prime_fd_to_handle = drm_gem_prime_fd_to_handle, - .gem_prime_import = drm_gem_cma_dmabuf_import, - .gem_prime_export = drm_gem_cma_dmabuf_export, + .gem_prime_import = drm_gem_prime_import, + .gem_prime_export = drm_gem_prime_export, + .gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table, + .gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table, + .gem_prime_vmap = drm_gem_cma_prime_vmap, + .gem_prime_vunmap = drm_gem_cma_prime_vunmap, + .gem_prime_mmap = drm_gem_cma_prime_mmap, .dumb_create = drm_gem_cma_dumb_create, .dumb_map_offset = drm_gem_cma_dumb_map_offset, .dumb_destroy = drm_gem_cma_dumb_destroy, diff --git a/drivers/video/uvesafb.c b/drivers/video/uvesafb.c index b963ea12d17..7aec6f39fdd 100644 --- a/drivers/video/uvesafb.c +++ b/drivers/video/uvesafb.c @@ -1891,7 +1891,7 @@ static int uvesafb_setup(char *options) } } - if (mtrr != 3 && mtrr != 1) + if (mtrr != 3 && mtrr != 0) pr_warn("uvesafb: mtrr should be set to 0 or 3; %d is unsupported", mtrr); return 0; |