aboutsummaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/drm_cache.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/drm_cache.c')
-rw-r--r--drivers/gpu/drm/drm_cache.c69
1 files changed, 62 insertions, 7 deletions
diff --git a/drivers/gpu/drm/drm_cache.c b/drivers/gpu/drm/drm_cache.c
index 0e3bd5b54b7..a6b690626a6 100644
--- a/drivers/gpu/drm/drm_cache.c
+++ b/drivers/gpu/drm/drm_cache.c
@@ -28,22 +28,30 @@
* Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
*/
-#include "drmP.h"
+#include <linux/export.h>
+#include <drm/drmP.h>
#if defined(CONFIG_X86)
+
+/*
+ * clflushopt is an unordered instruction which needs fencing with mfence or
+ * sfence to avoid ordering issues. For drm_clflush_page this fencing happens
+ * in the caller.
+ */
static void
drm_clflush_page(struct page *page)
{
uint8_t *page_virtual;
unsigned int i;
+ const int size = boot_cpu_data.x86_clflush_size;
if (unlikely(page == NULL))
return;
- page_virtual = kmap_atomic(page, KM_USER0);
- for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size)
- clflush(page_virtual + i);
- kunmap_atomic(page_virtual, KM_USER0);
+ page_virtual = kmap_atomic(page);
+ for (i = 0; i < PAGE_SIZE; i += size)
+ clflushopt(page_virtual + i);
+ kunmap_atomic(page_virtual);
}
static void drm_cache_flush_clflush(struct page *pages[],
@@ -86,10 +94,10 @@ drm_clflush_pages(struct page *pages[], unsigned long num_pages)
if (unlikely(page == NULL))
continue;
- page_virtual = kmap_atomic(page, KM_USER0);
+ page_virtual = kmap_atomic(page);
flush_dcache_range((unsigned long)page_virtual,
(unsigned long)page_virtual + PAGE_SIZE);
- kunmap_atomic(page_virtual, KM_USER0);
+ kunmap_atomic(page_virtual);
}
#else
printk(KERN_ERR "Architecture has no drm_cache.c support\n");
@@ -97,3 +105,50 @@ drm_clflush_pages(struct page *pages[], unsigned long num_pages)
#endif
}
EXPORT_SYMBOL(drm_clflush_pages);
+
+void
+drm_clflush_sg(struct sg_table *st)
+{
+#if defined(CONFIG_X86)
+ if (cpu_has_clflush) {
+ struct sg_page_iter sg_iter;
+
+ mb();
+ for_each_sg_page(st->sgl, &sg_iter, st->nents, 0)
+ drm_clflush_page(sg_page_iter_page(&sg_iter));
+ mb();
+
+ return;
+ }
+
+ if (on_each_cpu(drm_clflush_ipi_handler, NULL, 1) != 0)
+ printk(KERN_ERR "Timed out waiting for cache flush.\n");
+#else
+ printk(KERN_ERR "Architecture has no drm_cache.c support\n");
+ WARN_ON_ONCE(1);
+#endif
+}
+EXPORT_SYMBOL(drm_clflush_sg);
+
+void
+drm_clflush_virt_range(void *addr, unsigned long length)
+{
+#if defined(CONFIG_X86)
+ if (cpu_has_clflush) {
+ void *end = addr + length;
+ mb();
+ for (; addr < end; addr += boot_cpu_data.x86_clflush_size)
+ clflushopt(addr);
+ clflushopt(end - 1);
+ mb();
+ return;
+ }
+
+ if (on_each_cpu(drm_clflush_ipi_handler, NULL, 1) != 0)
+ printk(KERN_ERR "Timed out waiting for cache flush.\n");
+#else
+ printk(KERN_ERR "Architecture has no drm_cache.c support\n");
+ WARN_ON_ONCE(1);
+#endif
+}
+EXPORT_SYMBOL(drm_clflush_virt_range);