diff options
Diffstat (limited to 'drivers/gpu/drm/radeon/cik_sdma.c')
| -rw-r--r-- | drivers/gpu/drm/radeon/cik_sdma.c | 235 |
1 files changed, 160 insertions, 75 deletions
diff --git a/drivers/gpu/drm/radeon/cik_sdma.c b/drivers/gpu/drm/radeon/cik_sdma.c index b6286068e11..8e9d0f1d858 100644 --- a/drivers/gpu/drm/radeon/cik_sdma.c +++ b/drivers/gpu/drm/radeon/cik_sdma.c @@ -25,6 +25,7 @@ #include <drm/drmP.h> #include "radeon.h" #include "radeon_asic.h" +#include "radeon_trace.h" #include "cikd.h" /* sdma */ @@ -51,6 +52,75 @@ u32 cik_gpu_check_soft_reset(struct radeon_device *rdev); */ /** + * cik_sdma_get_rptr - get the current read pointer + * + * @rdev: radeon_device pointer + * @ring: radeon ring pointer + * + * Get the current rptr from the hardware (CIK+). + */ +uint32_t cik_sdma_get_rptr(struct radeon_device *rdev, + struct radeon_ring *ring) +{ + u32 rptr, reg; + + if (rdev->wb.enabled) { + rptr = rdev->wb.wb[ring->rptr_offs/4]; + } else { + if (ring->idx == R600_RING_TYPE_DMA_INDEX) + reg = SDMA0_GFX_RB_RPTR + SDMA0_REGISTER_OFFSET; + else + reg = SDMA0_GFX_RB_RPTR + SDMA1_REGISTER_OFFSET; + + rptr = RREG32(reg); + } + + return (rptr & 0x3fffc) >> 2; +} + +/** + * cik_sdma_get_wptr - get the current write pointer + * + * @rdev: radeon_device pointer + * @ring: radeon ring pointer + * + * Get the current wptr from the hardware (CIK+). + */ +uint32_t cik_sdma_get_wptr(struct radeon_device *rdev, + struct radeon_ring *ring) +{ + u32 reg; + + if (ring->idx == R600_RING_TYPE_DMA_INDEX) + reg = SDMA0_GFX_RB_WPTR + SDMA0_REGISTER_OFFSET; + else + reg = SDMA0_GFX_RB_WPTR + SDMA1_REGISTER_OFFSET; + + return (RREG32(reg) & 0x3fffc) >> 2; +} + +/** + * cik_sdma_set_wptr - commit the write pointer + * + * @rdev: radeon_device pointer + * @ring: radeon ring pointer + * + * Write the wptr back to the hardware (CIK+). + */ +void cik_sdma_set_wptr(struct radeon_device *rdev, + struct radeon_ring *ring) +{ + u32 reg; + + if (ring->idx == R600_RING_TYPE_DMA_INDEX) + reg = SDMA0_GFX_RB_WPTR + SDMA0_REGISTER_OFFSET; + else + reg = SDMA0_GFX_RB_WPTR + SDMA1_REGISTER_OFFSET; + + WREG32(reg, (ring->wptr << 2) & 0x3fffc); +} + +/** * cik_sdma_ring_ib_execute - Schedule an IB on the DMA engine * * @rdev: radeon_device pointer @@ -71,7 +141,7 @@ void cik_sdma_ring_ib_execute(struct radeon_device *rdev, next_rptr += 4; radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0)); radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc); - radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff); + radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr)); radeon_ring_write(ring, 1); /* number of DWs to follow */ radeon_ring_write(ring, next_rptr); } @@ -81,12 +151,41 @@ void cik_sdma_ring_ib_execute(struct radeon_device *rdev, radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0)); radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_INDIRECT_BUFFER, 0, extra_bits)); radeon_ring_write(ring, ib->gpu_addr & 0xffffffe0); /* base must be 32 byte aligned */ - radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xffffffff); + radeon_ring_write(ring, upper_32_bits(ib->gpu_addr)); radeon_ring_write(ring, ib->length_dw); } /** + * cik_sdma_hdp_flush_ring_emit - emit an hdp flush on the DMA ring + * + * @rdev: radeon_device pointer + * @ridx: radeon ring index + * + * Emit an hdp flush packet on the requested DMA ring. + */ +static void cik_sdma_hdp_flush_ring_emit(struct radeon_device *rdev, + int ridx) +{ + struct radeon_ring *ring = &rdev->ring[ridx]; + u32 extra_bits = (SDMA_POLL_REG_MEM_EXTRA_OP(1) | + SDMA_POLL_REG_MEM_EXTRA_FUNC(3)); /* == */ + u32 ref_and_mask; + + if (ridx == R600_RING_TYPE_DMA_INDEX) + ref_and_mask = SDMA0; + else + ref_and_mask = SDMA1; + + radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_POLL_REG_MEM, 0, extra_bits)); + radeon_ring_write(ring, GPU_HDP_FLUSH_DONE); + radeon_ring_write(ring, GPU_HDP_FLUSH_REQ); + radeon_ring_write(ring, ref_and_mask); /* reference */ + radeon_ring_write(ring, ref_and_mask); /* mask */ + radeon_ring_write(ring, (0xfff << 16) | 10); /* retry count, poll interval */ +} + +/** * cik_sdma_fence_ring_emit - emit a fence on the DMA ring * * @rdev: radeon_device pointer @@ -101,29 +200,16 @@ void cik_sdma_fence_ring_emit(struct radeon_device *rdev, { struct radeon_ring *ring = &rdev->ring[fence->ring]; u64 addr = rdev->fence_drv[fence->ring].gpu_addr; - u32 extra_bits = (SDMA_POLL_REG_MEM_EXTRA_OP(1) | - SDMA_POLL_REG_MEM_EXTRA_FUNC(3)); /* == */ - u32 ref_and_mask; - - if (fence->ring == R600_RING_TYPE_DMA_INDEX) - ref_and_mask = SDMA0; - else - ref_and_mask = SDMA1; /* write the fence */ radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_FENCE, 0, 0)); - radeon_ring_write(ring, addr & 0xffffffff); - radeon_ring_write(ring, upper_32_bits(addr) & 0xffffffff); + radeon_ring_write(ring, lower_32_bits(addr)); + radeon_ring_write(ring, upper_32_bits(addr)); radeon_ring_write(ring, fence->seq); /* generate an interrupt */ radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_TRAP, 0, 0)); /* flush HDP */ - radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_POLL_REG_MEM, 0, extra_bits)); - radeon_ring_write(ring, GPU_HDP_FLUSH_DONE); - radeon_ring_write(ring, GPU_HDP_FLUSH_REQ); - radeon_ring_write(ring, ref_and_mask); /* REFERENCE */ - radeon_ring_write(ring, ref_and_mask); /* MASK */ - radeon_ring_write(ring, (4 << 16) | 10); /* RETRY_COUNT, POLL_INTERVAL */ + cik_sdma_hdp_flush_ring_emit(rdev, fence->ring); } /** @@ -137,7 +223,7 @@ void cik_sdma_fence_ring_emit(struct radeon_device *rdev, * Add a DMA semaphore packet to the ring wait on or signal * other rings (CIK). */ -void cik_sdma_semaphore_ring_emit(struct radeon_device *rdev, +bool cik_sdma_semaphore_ring_emit(struct radeon_device *rdev, struct radeon_ring *ring, struct radeon_semaphore *semaphore, bool emit_wait) @@ -147,7 +233,9 @@ void cik_sdma_semaphore_ring_emit(struct radeon_device *rdev, radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SEMAPHORE, 0, extra_bits)); radeon_ring_write(ring, addr & 0xfffffff8); - radeon_ring_write(ring, upper_32_bits(addr) & 0xffffffff); + radeon_ring_write(ring, upper_32_bits(addr)); + + return true; } /** @@ -162,7 +250,9 @@ static void cik_sdma_gfx_stop(struct radeon_device *rdev) u32 rb_cntl, reg_offset; int i; - radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); + if ((rdev->asic->copy.copy_ring_index == R600_RING_TYPE_DMA_INDEX) || + (rdev->asic->copy.copy_ring_index == CAYMAN_RING_TYPE_DMA1_INDEX)) + radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); for (i = 0; i < 2; i++) { if (i == 0) @@ -174,6 +264,8 @@ static void cik_sdma_gfx_stop(struct radeon_device *rdev) WREG32(SDMA0_GFX_RB_CNTL + reg_offset, rb_cntl); WREG32(SDMA0_GFX_IB_CNTL + reg_offset, 0); } + rdev->ring[R600_RING_TYPE_DMA_INDEX].ready = false; + rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX].ready = false; } /** @@ -201,6 +293,11 @@ void cik_sdma_enable(struct radeon_device *rdev, bool enable) u32 me_cntl, reg_offset; int i; + if (enable == false) { + cik_sdma_gfx_stop(rdev); + cik_sdma_rlc_stop(rdev); + } + for (i = 0; i < 2; i++) { if (i == 0) reg_offset = SDMA0_REGISTER_OFFSET; @@ -272,8 +369,6 @@ static int cik_sdma_gfx_resume(struct radeon_device *rdev) ring->wptr = 0; WREG32(SDMA0_GFX_RB_WPTR + reg_offset, ring->wptr << 2); - ring->rptr = RREG32(SDMA0_GFX_RB_RPTR + reg_offset) >> 2; - /* enable DMA RB */ WREG32(SDMA0_GFX_RB_CNTL + reg_offset, rb_cntl | SDMA_RB_ENABLE); @@ -293,7 +388,9 @@ static int cik_sdma_gfx_resume(struct radeon_device *rdev) } } - radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size); + if ((rdev->asic->copy.copy_ring_index == R600_RING_TYPE_DMA_INDEX) || + (rdev->asic->copy.copy_ring_index == CAYMAN_RING_TYPE_DMA1_INDEX)) + radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size); return 0; } @@ -328,10 +425,6 @@ static int cik_sdma_load_microcode(struct radeon_device *rdev) if (!rdev->sdma_fw) return -EINVAL; - /* stop the gfx rings and rlc compute queues */ - cik_sdma_gfx_stop(rdev); - cik_sdma_rlc_stop(rdev); - /* halt the MEs */ cik_sdma_enable(rdev, false); @@ -400,9 +493,6 @@ int cik_sdma_resume(struct radeon_device *rdev) */ void cik_sdma_fini(struct radeon_device *rdev) { - /* stop the gfx rings and rlc compute queues */ - cik_sdma_gfx_stop(rdev); - cik_sdma_rlc_stop(rdev); /* halt the MEs */ cik_sdma_enable(rdev, false); radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX]); @@ -450,13 +540,8 @@ int cik_copy_dma(struct radeon_device *rdev, return r; } - if (radeon_fence_need_sync(*fence, ring->idx)) { - radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring, - ring->idx); - radeon_fence_note_sync(*fence, ring->idx); - } else { - radeon_semaphore_free(rdev, &sem, NULL); - } + radeon_semaphore_sync_to(sem, *fence); + radeon_semaphore_sync_rings(rdev, sem, ring->idx); for (i = 0; i < num_loops; i++) { cur_size_in_bytes = size_in_bytes; @@ -466,10 +551,10 @@ int cik_copy_dma(struct radeon_device *rdev, radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_COPY, SDMA_COPY_SUB_OPCODE_LINEAR, 0)); radeon_ring_write(ring, cur_size_in_bytes); radeon_ring_write(ring, 0); /* src/dst endian swap */ - radeon_ring_write(ring, src_offset & 0xffffffff); - radeon_ring_write(ring, upper_32_bits(src_offset) & 0xffffffff); - radeon_ring_write(ring, dst_offset & 0xfffffffc); - radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xffffffff); + radeon_ring_write(ring, lower_32_bits(src_offset)); + radeon_ring_write(ring, upper_32_bits(src_offset)); + radeon_ring_write(ring, lower_32_bits(dst_offset)); + radeon_ring_write(ring, upper_32_bits(dst_offset)); src_offset += cur_size_in_bytes; dst_offset += cur_size_in_bytes; } @@ -477,6 +562,7 @@ int cik_copy_dma(struct radeon_device *rdev, r = radeon_fence_emit(rdev, fence, ring->idx); if (r) { radeon_ring_unlock_undo(rdev, ring); + radeon_semaphore_free(rdev, &sem, NULL); return r; } @@ -512,14 +598,14 @@ int cik_sdma_ring_test(struct radeon_device *rdev, tmp = 0xCAFEDEAD; writel(tmp, ptr); - r = radeon_ring_lock(rdev, ring, 4); + r = radeon_ring_lock(rdev, ring, 5); if (r) { DRM_ERROR("radeon: dma failed to lock ring %d (%d).\n", ring->idx, r); return r; } radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0)); radeon_ring_write(ring, rdev->vram_scratch.gpu_addr & 0xfffffffc); - radeon_ring_write(ring, upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xffffffff); + radeon_ring_write(ring, upper_32_bits(rdev->vram_scratch.gpu_addr)); radeon_ring_write(ring, 1); /* number of DWs to follow */ radeon_ring_write(ring, 0xDEADBEEF); radeon_ring_unlock_commit(rdev, ring); @@ -574,7 +660,7 @@ int cik_sdma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring) ib.ptr[0] = SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0); ib.ptr[1] = rdev->vram_scratch.gpu_addr & 0xfffffffc; - ib.ptr[2] = upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xffffffff; + ib.ptr[2] = upper_32_bits(rdev->vram_scratch.gpu_addr); ib.ptr[3] = 1; ib.ptr[4] = 0xDEADBEEF; ib.length_dw = 5; @@ -626,11 +712,9 @@ bool cik_sdma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring) mask = RADEON_RESET_DMA1; if (!(reset_mask & mask)) { - radeon_ring_lockup_update(ring); + radeon_ring_lockup_update(rdev, ring); return false; } - /* force ring activities */ - radeon_ring_force_activity(rdev, ring); return radeon_ring_test_lockup(rdev, ring); } @@ -653,11 +737,31 @@ void cik_sdma_vm_set_page(struct radeon_device *rdev, uint64_t addr, unsigned count, uint32_t incr, uint32_t flags) { - uint32_t r600_flags = cayman_vm_page_flags(rdev, flags); uint64_t value; unsigned ndw; - if (flags & RADEON_VM_PAGE_SYSTEM) { + trace_radeon_vm_set_page(pe, addr, count, incr, flags); + + if (flags == R600_PTE_GART) { + uint64_t src = rdev->gart.table_addr + (addr >> 12) * 8; + while (count) { + unsigned bytes = count * 8; + if (bytes > 0x1FFFF8) + bytes = 0x1FFFF8; + + ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_COPY, SDMA_WRITE_SUB_OPCODE_LINEAR, 0); + ib->ptr[ib->length_dw++] = bytes; + ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */ + ib->ptr[ib->length_dw++] = lower_32_bits(src); + ib->ptr[ib->length_dw++] = upper_32_bits(src); + ib->ptr[ib->length_dw++] = lower_32_bits(pe); + ib->ptr[ib->length_dw++] = upper_32_bits(pe); + + pe += bytes; + src += bytes; + count -= bytes / 8; + } + } else if (flags & R600_PTE_SYSTEM) { while (count) { ndw = count * 2; if (ndw > 0xFFFFE) @@ -669,16 +773,10 @@ void cik_sdma_vm_set_page(struct radeon_device *rdev, ib->ptr[ib->length_dw++] = upper_32_bits(pe); ib->ptr[ib->length_dw++] = ndw; for (; ndw > 0; ndw -= 2, --count, pe += 8) { - if (flags & RADEON_VM_PAGE_SYSTEM) { - value = radeon_vm_map_gart(rdev, addr); - value &= 0xFFFFFFFFFFFFF000ULL; - } else if (flags & RADEON_VM_PAGE_VALID) { - value = addr; - } else { - value = 0; - } + value = radeon_vm_map_gart(rdev, addr); + value &= 0xFFFFFFFFFFFFF000ULL; addr += incr; - value |= r600_flags; + value |= flags; ib->ptr[ib->length_dw++] = value; ib->ptr[ib->length_dw++] = upper_32_bits(value); } @@ -689,7 +787,7 @@ void cik_sdma_vm_set_page(struct radeon_device *rdev, if (ndw > 0x7FFFF) ndw = 0x7FFFF; - if (flags & RADEON_VM_PAGE_VALID) + if (flags & R600_PTE_VALID) value = addr; else value = 0; @@ -697,7 +795,7 @@ void cik_sdma_vm_set_page(struct radeon_device *rdev, ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_GENERATE_PTE_PDE, 0, 0); ib->ptr[ib->length_dw++] = pe; /* dst addr */ ib->ptr[ib->length_dw++] = upper_32_bits(pe); - ib->ptr[ib->length_dw++] = r600_flags; /* mask */ + ib->ptr[ib->length_dw++] = flags; /* mask */ ib->ptr[ib->length_dw++] = 0; ib->ptr[ib->length_dw++] = value; /* value */ ib->ptr[ib->length_dw++] = upper_32_bits(value); @@ -724,18 +822,10 @@ void cik_sdma_vm_set_page(struct radeon_device *rdev, void cik_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm) { struct radeon_ring *ring = &rdev->ring[ridx]; - u32 extra_bits = (SDMA_POLL_REG_MEM_EXTRA_OP(1) | - SDMA_POLL_REG_MEM_EXTRA_FUNC(3)); /* == */ - u32 ref_and_mask; if (vm == NULL) return; - if (ridx == R600_RING_TYPE_DMA_INDEX) - ref_and_mask = SDMA0; - else - ref_and_mask = SDMA1; - radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000)); if (vm->id < 8) { radeon_ring_write(ring, (VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2)) >> 2); @@ -770,12 +860,7 @@ void cik_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm radeon_ring_write(ring, VMID(0)); /* flush HDP */ - radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_POLL_REG_MEM, 0, extra_bits)); - radeon_ring_write(ring, GPU_HDP_FLUSH_DONE); - radeon_ring_write(ring, GPU_HDP_FLUSH_REQ); - radeon_ring_write(ring, ref_and_mask); /* REFERENCE */ - radeon_ring_write(ring, ref_and_mask); /* MASK */ - radeon_ring_write(ring, (4 << 16) | 10); /* RETRY_COUNT, POLL_INTERVAL */ + cik_sdma_hdp_flush_ring_emit(rdev, ridx); /* flush TLB */ radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000)); |
