diff options
Diffstat (limited to 'drivers/gpu/drm/i915/i915_gem_tiling.c')
| -rw-r--r-- | drivers/gpu/drm/i915/i915_gem_tiling.c | 148 |
1 files changed, 94 insertions, 54 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c index 31d334d9d9d..cb150e8b433 100644 --- a/drivers/gpu/drm/i915/i915_gem_tiling.c +++ b/drivers/gpu/drm/i915/i915_gem_tiling.c @@ -25,11 +25,10 @@ * */ -#include "linux/string.h" -#include "linux/bitops.h" -#include "drmP.h" -#include "drm.h" -#include "i915_drm.h" +#include <linux/string.h> +#include <linux/bitops.h> +#include <drm/drmP.h> +#include <drm/i915_drm.h> #include "i915_drv.h" /** @file i915_gem_tiling.c @@ -88,13 +87,31 @@ void i915_gem_detect_bit_6_swizzle(struct drm_device *dev) { - drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN; uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN; - if (INTEL_INFO(dev)->gen >= 6) { + if (IS_VALLEYVIEW(dev)) { swizzle_x = I915_BIT_6_SWIZZLE_NONE; swizzle_y = I915_BIT_6_SWIZZLE_NONE; + } else if (INTEL_INFO(dev)->gen >= 6) { + uint32_t dimm_c0, dimm_c1; + dimm_c0 = I915_READ(MAD_DIMM_C0); + dimm_c1 = I915_READ(MAD_DIMM_C1); + dimm_c0 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK; + dimm_c1 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK; + /* Enable swizzling when the channels are populated with + * identically sized dimms. We don't need to check the 3rd + * channel because no cpu with gpu attached ships in that + * configuration. Also, swizzling only makes sense for 2 + * channels anyway. */ + if (dimm_c0 == dimm_c1) { + swizzle_x = I915_BIT_6_SWIZZLE_9_10; + swizzle_y = I915_BIT_6_SWIZZLE_9; + } else { + swizzle_x = I915_BIT_6_SWIZZLE_NONE; + swizzle_y = I915_BIT_6_SWIZZLE_NONE; + } } else if (IS_GEN5(dev)) { /* On Ironlake whatever DRAM config, GPU always do * same swizzling setup. @@ -107,10 +124,10 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev) */ swizzle_x = I915_BIT_6_SWIZZLE_NONE; swizzle_y = I915_BIT_6_SWIZZLE_NONE; - } else if (IS_MOBILE(dev)) { + } else if (IS_MOBILE(dev) || (IS_GEN3(dev) && !IS_G33(dev))) { uint32_t dcc; - /* On mobile 9xx chipsets, channel interleave by the CPU is + /* On 9xx chipsets, channel interleave by the CPU is * determined by DCC. For single-channel, neither the CPU * nor the GPU do swizzling. For dual channel interleaved, * the GPU's interleave is bit 9 and 10 for X tiled, and bit @@ -200,9 +217,12 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode) tile_width = 512; /* check maximum stride & object size */ - if (INTEL_INFO(dev)->gen >= 4) { - /* i965 stores the end address of the gtt mapping in the fence - * reg, so dont bother to check the size */ + /* i965+ stores the end address of the gtt mapping in the fence + * reg, so dont bother to check the size */ + if (INTEL_INFO(dev)->gen >= 7) { + if (stride / 128 > GEN7_FENCE_MAX_PITCH_VAL) + return false; + } else if (INTEL_INFO(dev)->gen >= 4) { if (stride / 128 > I965_FENCE_MAX_PITCH_VAL) return false; } else { @@ -218,6 +238,9 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode) } } + if (stride < tile_width) + return false; + /* 965+ just needs multiples of tile width */ if (INTEL_INFO(dev)->gen >= 4) { if (stride & (tile_width - 1)) @@ -226,9 +249,6 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode) } /* Pre-965 needs power of two tile widths */ - if (stride < tile_width) - return false; - if (stride & (stride - 1)) return false; @@ -248,29 +268,18 @@ i915_gem_object_fence_ok(struct drm_i915_gem_object *obj, int tiling_mode) return true; if (INTEL_INFO(obj->base.dev)->gen == 3) { - if (obj->gtt_offset & ~I915_FENCE_START_MASK) + if (i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK) return false; } else { - if (obj->gtt_offset & ~I830_FENCE_START_MASK) + if (i915_gem_obj_ggtt_offset(obj) & ~I830_FENCE_START_MASK) return false; } - /* - * Previous chips need to be aligned to the size of the smallest - * fence register that can contain the object. - */ - if (INTEL_INFO(obj->base.dev)->gen == 3) - size = 1024*1024; - else - size = 512*1024; - - while (size < obj->base.size) - size <<= 1; - - if (obj->gtt_space->size != size) + size = i915_gem_get_gtt_size(obj->base.dev, obj->base.size, tiling_mode); + if (i915_gem_obj_ggtt_size(obj) != size) return false; - if (obj->gtt_offset & (size - 1)) + if (i915_gem_obj_ggtt_offset(obj) & (size - 1)) return false; return true; @@ -285,7 +294,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data, struct drm_file *file) { struct drm_i915_gem_set_tiling *args = data; - drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_gem_object *obj; int ret = 0; @@ -299,7 +308,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data, return -EINVAL; } - if (obj->pin_count) { + if (i915_gem_obj_is_pinned(obj) || obj->framebuffer_references) { drm_gem_object_unreference_unlocked(&obj->base); return -EBUSY; } @@ -339,34 +348,59 @@ i915_gem_set_tiling(struct drm_device *dev, void *data, /* We need to rebind the object if its current allocation * no longer meets the alignment restrictions for its new * tiling mode. Otherwise we can just leave it alone, but - * need to ensure that any fence register is cleared. + * need to ensure that any fence register is updated before + * the next fenced (either through the GTT or by the BLT unit + * on older GPUs) access. + * + * After updating the tiling parameters, we then flag whether + * we need to update an associated fence register. Note this + * has to also include the unfenced register the GPU uses + * whilst executing a fenced command for an untiled object. */ - i915_gem_release_mmap(obj); obj->map_and_fenceable = - obj->gtt_space == NULL || - (obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end && + !i915_gem_obj_ggtt_bound(obj) || + (i915_gem_obj_ggtt_offset(obj) + + obj->base.size <= dev_priv->gtt.mappable_end && i915_gem_object_fence_ok(obj, args->tiling_mode)); /* Rebind if we need a change of alignment */ if (!obj->map_and_fenceable) { - u32 unfenced_alignment = - i915_gem_get_unfenced_gtt_alignment(dev, - obj->base.size, - args->tiling_mode); - if (obj->gtt_offset & (unfenced_alignment - 1)) - ret = i915_gem_object_unbind(obj); + u32 unfenced_align = + i915_gem_get_gtt_alignment(dev, obj->base.size, + args->tiling_mode, + false); + if (i915_gem_obj_ggtt_offset(obj) & (unfenced_align - 1)) + ret = i915_gem_object_ggtt_unbind(obj); } if (ret == 0) { - obj->tiling_changed = true; + obj->fence_dirty = + obj->fenced_gpu_access || + obj->fence_reg != I915_FENCE_REG_NONE; + obj->tiling_mode = args->tiling_mode; obj->stride = args->stride; + + /* Force the fence to be reacquired for GTT access */ + i915_gem_release_mmap(obj); } } /* we have to maintain this existing ABI... */ args->stride = obj->stride; args->tiling_mode = obj->tiling_mode; + + /* Try to preallocate memory required to save swizzling on put-pages */ + if (i915_gem_object_needs_bit17_swizzle(obj)) { + if (obj->bit_17 == NULL) { + obj->bit_17 = kcalloc(BITS_TO_LONGS(obj->base.size >> PAGE_SHIFT), + sizeof(long), GFP_KERNEL); + } + } else { + kfree(obj->bit_17); + obj->bit_17 = NULL; + } + drm_gem_object_unreference(&obj->base); mutex_unlock(&dev->struct_mutex); @@ -381,7 +415,7 @@ i915_gem_get_tiling(struct drm_device *dev, void *data, struct drm_file *file) { struct drm_i915_gem_get_tiling *args = data; - drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_gem_object *obj; obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); @@ -443,31 +477,35 @@ i915_gem_swizzle_page(struct page *page) void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj) { - int page_count = obj->base.size >> PAGE_SHIFT; + struct sg_page_iter sg_iter; int i; if (obj->bit_17 == NULL) return; - for (i = 0; i < page_count; i++) { - char new_bit_17 = page_to_phys(obj->pages[i]) >> 17; + i = 0; + for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) { + struct page *page = sg_page_iter_page(&sg_iter); + char new_bit_17 = page_to_phys(page) >> 17; if ((new_bit_17 & 0x1) != (test_bit(i, obj->bit_17) != 0)) { - i915_gem_swizzle_page(obj->pages[i]); - set_page_dirty(obj->pages[i]); + i915_gem_swizzle_page(page); + set_page_dirty(page); } + i++; } } void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj) { + struct sg_page_iter sg_iter; int page_count = obj->base.size >> PAGE_SHIFT; int i; if (obj->bit_17 == NULL) { - obj->bit_17 = kmalloc(BITS_TO_LONGS(page_count) * - sizeof(long), GFP_KERNEL); + obj->bit_17 = kcalloc(BITS_TO_LONGS(page_count), + sizeof(long), GFP_KERNEL); if (obj->bit_17 == NULL) { DRM_ERROR("Failed to allocate memory for bit 17 " "record\n"); @@ -475,10 +513,12 @@ i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj) } } - for (i = 0; i < page_count; i++) { - if (page_to_phys(obj->pages[i]) & (1 << 17)) + i = 0; + for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) { + if (page_to_phys(sg_page_iter_page(&sg_iter)) & (1 << 17)) __set_bit(i, obj->bit_17); else __clear_bit(i, obj->bit_17); + i++; } } |
