diff options
Diffstat (limited to 'arch/tile/include/asm/cacheflush.h')
| -rw-r--r-- | arch/tile/include/asm/cacheflush.h | 118 |
1 files changed, 43 insertions, 75 deletions
diff --git a/arch/tile/include/asm/cacheflush.h b/arch/tile/include/asm/cacheflush.h index 14a3f8556ac..92ee4c8a4f7 100644 --- a/arch/tile/include/asm/cacheflush.h +++ b/arch/tile/include/asm/cacheflush.h @@ -20,7 +20,6 @@ /* Keep includes the same across arches. */ #include <linux/mm.h> #include <linux/cache.h> -#include <asm/system.h> #include <arch/icache.h> /* Caches are physically-indexed and so don't need special treatment */ @@ -76,23 +75,6 @@ static inline void copy_to_user_page(struct vm_area_struct *vma, #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ memcpy((dst), (src), (len)) -/* - * Invalidate a VA range; pads to L2 cacheline boundaries. - * - * Note that on TILE64, __inv_buffer() actually flushes modified - * cache lines in addition to invalidating them, i.e., it's the - * same as __finv_buffer(). - */ -static inline void __inv_buffer(void *buffer, size_t size) -{ - char *next = (char *)((long)buffer & -L2_CACHE_BYTES); - char *finish = (char *)L2_CACHE_ALIGN((long)buffer + size); - while (next < finish) { - __insn_inv(next); - next += CHIP_INV_STRIDE(); - } -} - /* Flush a VA range; pads to L2 cacheline boundaries. */ static inline void __flush_buffer(void *buffer, size_t size) { @@ -116,77 +98,63 @@ static inline void __finv_buffer(void *buffer, size_t size) } -/* Invalidate a VA range, then memory fence. */ -static inline void inv_buffer(void *buffer, size_t size) -{ - __inv_buffer(buffer, size); - mb_incoherent(); -} - -/* Flush a VA range, then memory fence. */ -static inline void flush_buffer(void *buffer, size_t size) +/* + * Flush a locally-homecached VA range and wait for the evicted + * cachelines to hit memory. + */ +static inline void flush_buffer_local(void *buffer, size_t size) { __flush_buffer(buffer, size); mb_incoherent(); } -/* Flush & invalidate a VA range, then memory fence. */ -static inline void finv_buffer(void *buffer, size_t size) +/* + * Flush and invalidate a locally-homecached VA range and wait for the + * evicted cachelines to hit memory. + */ +static inline void finv_buffer_local(void *buffer, size_t size) { __finv_buffer(buffer, size); mb_incoherent(); } -/* - * Flush & invalidate a VA range that is homed remotely on a single core, - * waiting until the memory controller holds the flushed values. - */ -static inline void finv_buffer_remote(void *buffer, size_t size) +#ifdef __tilepro__ +/* Invalidate a VA range; pads to L2 cacheline boundaries. */ +static inline void __inv_buffer(void *buffer, size_t size) { - char *p; - int i; + char *next = (char *)((long)buffer & -L2_CACHE_BYTES); + char *finish = (char *)L2_CACHE_ALIGN((long)buffer + size); + while (next < finish) { + __insn_inv(next); + next += CHIP_INV_STRIDE(); + } +} - /* - * Flush and invalidate the buffer out of the local L1/L2 - * and request the home cache to flush and invalidate as well. - */ - __finv_buffer(buffer, size); +/* Invalidate a VA range and wait for it to be complete. */ +static inline void inv_buffer(void *buffer, size_t size) +{ + __inv_buffer(buffer, size); + mb(); +} +#endif - /* - * Wait for the home cache to acknowledge that it has processed - * all the flush-and-invalidate requests. This does not mean - * that the flushed data has reached the memory controller yet, - * but it does mean the home cache is processing the flushes. - */ - __insn_mf(); - - /* - * Issue a load to the last cache line, which can't complete - * until all the previously-issued flushes to the same memory - * controller have also completed. If we weren't striping - * memory, that one load would be sufficient, but since we may - * be, we also need to back up to the last load issued to - * another memory controller, which would be the point where - * we crossed an 8KB boundary (the granularity of striping - * across memory controllers). Keep backing up and doing this - * until we are before the beginning of the buffer, or have - * hit all the controllers. - */ - for (i = 0, p = (char *)buffer + size - 1; - i < (1 << CHIP_LOG_NUM_MSHIMS()) && p >= (char *)buffer; - ++i) { - const unsigned long STRIPE_WIDTH = 8192; - - /* Force a load instruction to issue. */ - *(volatile char *)p; - - /* Jump to end of previous stripe. */ - p -= STRIPE_WIDTH; - p = (char *)((unsigned long)p | (STRIPE_WIDTH - 1)); - } +/* + * Flush and invalidate a VA range that is homed remotely, waiting + * until the memory controller holds the flushed values. If "hfh" is + * true, we will do a more expensive flush involving additional loads + * to make sure we have touched all the possible home cpus of a buffer + * that is homed with "hash for home". + */ +void finv_buffer_remote(void *buffer, size_t size, int hfh); - /* Wait for the loads (and thus flushes) to have completed. */ - __insn_mf(); +/* + * On SMP systems, when the scheduler does migration-cost autodetection, + * it needs a way to flush as much of the CPU's caches as possible: + * + * TODO: fill this in! + */ +static inline void sched_cacheflush(void) +{ } #endif /* _ASM_TILE_CACHEFLUSH_H */ |
