diff options
Diffstat (limited to 'drivers/gpu/drm/drm_cache.c')
| -rw-r--r-- | drivers/gpu/drm/drm_cache.c | 105 |
1 files changed, 95 insertions, 10 deletions
diff --git a/drivers/gpu/drm/drm_cache.c b/drivers/gpu/drm/drm_cache.c index 0e994a0e46d..a6b690626a6 100644 --- a/drivers/gpu/drm/drm_cache.c +++ b/drivers/gpu/drm/drm_cache.c @@ -28,22 +28,47 @@ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com> */ -#include "drmP.h" +#include <linux/export.h> +#include <drm/drmP.h> #if defined(CONFIG_X86) + +/* + * clflushopt is an unordered instruction which needs fencing with mfence or + * sfence to avoid ordering issues. For drm_clflush_page this fencing happens + * in the caller. + */ static void drm_clflush_page(struct page *page) { uint8_t *page_virtual; unsigned int i; + const int size = boot_cpu_data.x86_clflush_size; if (unlikely(page == NULL)) return; - page_virtual = kmap_atomic(page, KM_USER0); - for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size) - clflush(page_virtual + i); - kunmap_atomic(page_virtual, KM_USER0); + page_virtual = kmap_atomic(page); + for (i = 0; i < PAGE_SIZE; i += size) + clflushopt(page_virtual + i); + kunmap_atomic(page_virtual); +} + +static void drm_cache_flush_clflush(struct page *pages[], + unsigned long num_pages) +{ + unsigned long i; + + mb(); + for (i = 0; i < num_pages; i++) + drm_clflush_page(*pages++); + mb(); +} + +static void +drm_clflush_ipi_handler(void *null) +{ + wbinvd(); } #endif @@ -53,17 +78,77 @@ drm_clflush_pages(struct page *pages[], unsigned long num_pages) #if defined(CONFIG_X86) if (cpu_has_clflush) { - unsigned long i; + drm_cache_flush_clflush(pages, num_pages); + return; + } + + if (on_each_cpu(drm_clflush_ipi_handler, NULL, 1) != 0) + printk(KERN_ERR "Timed out waiting for cache flush.\n"); + +#elif defined(__powerpc__) + unsigned long i; + for (i = 0; i < num_pages; i++) { + struct page *page = pages[i]; + void *page_virtual; + + if (unlikely(page == NULL)) + continue; + + page_virtual = kmap_atomic(page); + flush_dcache_range((unsigned long)page_virtual, + (unsigned long)page_virtual + PAGE_SIZE); + kunmap_atomic(page_virtual); + } +#else + printk(KERN_ERR "Architecture has no drm_cache.c support\n"); + WARN_ON_ONCE(1); +#endif +} +EXPORT_SYMBOL(drm_clflush_pages); + +void +drm_clflush_sg(struct sg_table *st) +{ +#if defined(CONFIG_X86) + if (cpu_has_clflush) { + struct sg_page_iter sg_iter; mb(); - for (i = 0; i < num_pages; ++i) - drm_clflush_page(*pages++); + for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) + drm_clflush_page(sg_page_iter_page(&sg_iter)); mb(); return; } - wbinvd(); + if (on_each_cpu(drm_clflush_ipi_handler, NULL, 1) != 0) + printk(KERN_ERR "Timed out waiting for cache flush.\n"); +#else + printk(KERN_ERR "Architecture has no drm_cache.c support\n"); + WARN_ON_ONCE(1); #endif } -EXPORT_SYMBOL(drm_clflush_pages); +EXPORT_SYMBOL(drm_clflush_sg); + +void +drm_clflush_virt_range(void *addr, unsigned long length) +{ +#if defined(CONFIG_X86) + if (cpu_has_clflush) { + void *end = addr + length; + mb(); + for (; addr < end; addr += boot_cpu_data.x86_clflush_size) + clflushopt(addr); + clflushopt(end - 1); + mb(); + return; + } + + if (on_each_cpu(drm_clflush_ipi_handler, NULL, 1) != 0) + printk(KERN_ERR "Timed out waiting for cache flush.\n"); +#else + printk(KERN_ERR "Architecture has no drm_cache.c support\n"); + WARN_ON_ONCE(1); +#endif +} +EXPORT_SYMBOL(drm_clflush_virt_range); |
