aboutsummaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/radeon/r200.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/radeon/r200.c')
-rw-r--r--drivers/gpu/drm/radeon/r200.c74
1 files changed, 43 insertions, 31 deletions
diff --git a/drivers/gpu/drm/radeon/r200.c b/drivers/gpu/drm/radeon/r200.c
index eba4cbfa78f..58f0473aa73 100644
--- a/drivers/gpu/drm/radeon/r200.c
+++ b/drivers/gpu/drm/radeon/r200.c
@@ -25,9 +25,8 @@
* Alex Deucher
* Jerome Glisse
*/
-#include "drmP.h"
-#include "drm.h"
-#include "radeon_drm.h"
+#include <drm/drmP.h>
+#include <drm/radeon_drm.h>
#include "radeon_reg.h"
#include "radeon.h"
#include "radeon_asic.h"
@@ -85,7 +84,7 @@ int r200_copy_dma(struct radeon_device *rdev,
uint64_t src_offset,
uint64_t dst_offset,
unsigned num_gpu_pages,
- struct radeon_fence *fence)
+ struct radeon_fence **fence)
{
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
uint32_t size;
@@ -120,7 +119,7 @@ int r200_copy_dma(struct radeon_device *rdev,
radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
radeon_ring_write(ring, RADEON_WAIT_DMA_GUI_IDLE);
if (fence) {
- r = radeon_fence_emit(rdev, fence);
+ r = radeon_fence_emit(rdev, fence, RADEON_RING_TYPE_GFX_INDEX);
}
radeon_ring_unlock_commit(rdev, ring);
return r;
@@ -154,7 +153,7 @@ int r200_packet0_check(struct radeon_cs_parser *p,
u32 tile_flags = 0;
u32 idx_value;
- ib = p->ib->ptr;
+ ib = p->ib.ptr;
track = (struct r100_cs_track *)p->track;
idx_value = radeon_get_ib_value(p, idx);
switch (reg) {
@@ -163,7 +162,7 @@ int r200_packet0_check(struct radeon_cs_parser *p,
if (r) {
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
idx, reg);
- r100_cs_dump_packet(p, pkt);
+ radeon_cs_dump_packet(p, pkt);
return r;
}
break;
@@ -176,30 +175,30 @@ int r200_packet0_check(struct radeon_cs_parser *p,
return r;
break;
case RADEON_RB3D_DEPTHOFFSET:
- r = r100_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
idx, reg);
- r100_cs_dump_packet(p, pkt);
+ radeon_cs_dump_packet(p, pkt);
return r;
}
track->zb.robj = reloc->robj;
track->zb.offset = idx_value;
track->zb_dirty = true;
- ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
+ ib[idx] = idx_value + ((u32)reloc->gpu_offset);
break;
case RADEON_RB3D_COLOROFFSET:
- r = r100_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
idx, reg);
- r100_cs_dump_packet(p, pkt);
+ radeon_cs_dump_packet(p, pkt);
return r;
}
track->cb[0].robj = reloc->robj;
track->cb[0].offset = idx_value;
track->cb_dirty = true;
- ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
+ ib[idx] = idx_value + ((u32)reloc->gpu_offset);
break;
case R200_PP_TXOFFSET_0:
case R200_PP_TXOFFSET_1:
@@ -208,14 +207,24 @@ int r200_packet0_check(struct radeon_cs_parser *p,
case R200_PP_TXOFFSET_4:
case R200_PP_TXOFFSET_5:
i = (reg - R200_PP_TXOFFSET_0) / 24;
- r = r100_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
idx, reg);
- r100_cs_dump_packet(p, pkt);
+ radeon_cs_dump_packet(p, pkt);
return r;
}
- ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
+ if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
+ if (reloc->tiling_flags & RADEON_TILING_MACRO)
+ tile_flags |= R200_TXO_MACRO_TILE;
+ if (reloc->tiling_flags & RADEON_TILING_MICRO)
+ tile_flags |= R200_TXO_MICRO_TILE;
+
+ tmp = idx_value & ~(0x7 << 2);
+ tmp |= tile_flags;
+ ib[idx] = tmp + ((u32)reloc->gpu_offset);
+ } else
+ ib[idx] = idx_value + ((u32)reloc->gpu_offset);
track->textures[i].robj = reloc->robj;
track->tex_dirty = true;
break;
@@ -251,15 +260,15 @@ int r200_packet0_check(struct radeon_cs_parser *p,
case R200_PP_CUBIC_OFFSET_F5_5:
i = (reg - R200_PP_TXOFFSET_0) / 24;
face = (reg - ((i * 24) + R200_PP_TXOFFSET_0)) / 4;
- r = r100_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
idx, reg);
- r100_cs_dump_packet(p, pkt);
+ radeon_cs_dump_packet(p, pkt);
return r;
}
track->textures[i].cube_info[face - 1].offset = idx_value;
- ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
+ ib[idx] = idx_value + ((u32)reloc->gpu_offset);
track->textures[i].cube_info[face - 1].robj = reloc->robj;
track->tex_dirty = true;
break;
@@ -269,22 +278,25 @@ int r200_packet0_check(struct radeon_cs_parser *p,
track->zb_dirty = true;
break;
case RADEON_RB3D_COLORPITCH:
- r = r100_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
idx, reg);
- r100_cs_dump_packet(p, pkt);
+ radeon_cs_dump_packet(p, pkt);
return r;
}
- if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
- tile_flags |= RADEON_COLOR_TILE_ENABLE;
- if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
- tile_flags |= RADEON_COLOR_MICROTILE_ENABLE;
+ if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
+ if (reloc->tiling_flags & RADEON_TILING_MACRO)
+ tile_flags |= RADEON_COLOR_TILE_ENABLE;
+ if (reloc->tiling_flags & RADEON_TILING_MICRO)
+ tile_flags |= RADEON_COLOR_MICROTILE_ENABLE;
- tmp = idx_value & ~(0x7 << 16);
- tmp |= tile_flags;
- ib[idx] = tmp;
+ tmp = idx_value & ~(0x7 << 16);
+ tmp |= tile_flags;
+ ib[idx] = tmp;
+ } else
+ ib[idx] = idx_value;
track->cb[0].pitch = idx_value & RADEON_COLORPITCH_MASK;
track->cb_dirty = true;
@@ -343,14 +355,14 @@ int r200_packet0_check(struct radeon_cs_parser *p,
track->zb_dirty = true;
break;
case RADEON_RB3D_ZPASS_ADDR:
- r = r100_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
idx, reg);
- r100_cs_dump_packet(p, pkt);
+ radeon_cs_dump_packet(p, pkt);
return r;
}
- ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
+ ib[idx] = idx_value + ((u32)reloc->gpu_offset);
break;
case RADEON_PP_CNTL:
{