aboutsummaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-03-21 09:40:26 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2012-03-21 09:40:26 -0700
commit9f3938346a5c1fa504647670edb5fea5756cfb00 (patch)
tree7cf6d24d6b076c8db8571494984924cac03703a2 /drivers
parent69a7aebcf019ab3ff5764525ad6858fbe23bb86d (diff)
parent317b6e128247f75976b0fc2b9fd8d2c20ef13b3a (diff)
Merge branch 'kmap_atomic' of git://github.com/congwang/linux
Pull kmap_atomic cleanup from Cong Wang. It's been in -next for a long time, and it gets rid of the (no longer used) second argument to k[un]map_atomic(). Fix up a few trivial conflicts in various drivers, and do an "evil merge" to catch some new uses that have come in since Cong's tree. * 'kmap_atomic' of git://github.com/congwang/linux: (59 commits) feature-removal-schedule.txt: schedule the deprecated form of kmap_atomic() for removal highmem: kill all __kmap_atomic() [swarren@nvidia.com: highmem: Fix ARM build break due to __kmap_atomic rename] drbd: remove the second argument of k[un]map_atomic() zcache: remove the second argument of k[un]map_atomic() gma500: remove the second argument of k[un]map_atomic() dm: remove the second argument of k[un]map_atomic() tomoyo: remove the second argument of k[un]map_atomic() sunrpc: remove the second argument of k[un]map_atomic() rds: remove the second argument of k[un]map_atomic() net: remove the second argument of k[un]map_atomic() mm: remove the second argument of k[un]map_atomic() lib: remove the second argument of k[un]map_atomic() power: remove the second argument of k[un]map_atomic() kdb: remove the second argument of k[un]map_atomic() udf: remove the second argument of k[un]map_atomic() ubifs: remove the second argument of k[un]map_atomic() squashfs: remove the second argument of k[un]map_atomic() reiserfs: remove the second argument of k[un]map_atomic() ocfs2: remove the second argument of k[un]map_atomic() ntfs: remove the second argument of k[un]map_atomic() ...
Diffstat (limited to 'drivers')
-rw-r--r--drivers/ata/libata-sff.c8
-rw-r--r--drivers/block/brd.c20
-rw-r--r--drivers/block/drbd/drbd_bitmap.c50
-rw-r--r--drivers/block/drbd/drbd_nl.c4
-rw-r--r--drivers/block/loop.c16
-rw-r--r--drivers/block/pktcdvd.c8
-rw-r--r--drivers/crypto/hifn_795x.c10
-rw-r--r--drivers/edac/edac_mc.c4
-rw-r--r--drivers/gpu/drm/drm_cache.c8
-rw-r--r--drivers/gpu/drm/gma500/mmu.c30
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c16
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c6
-rw-r--r--drivers/ide/ide-taskfile.c4
-rw-r--r--drivers/infiniband/ulp/iser/iser_memory.c8
-rw-r--r--drivers/md/bitmap.c42
-rw-r--r--drivers/md/dm-crypt.c8
-rw-r--r--drivers/media/video/ivtv/ivtv-udma.c4
-rw-r--r--drivers/memstick/host/jmb38x_ms.c4
-rw-r--r--drivers/memstick/host/tifm_ms.c4
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c6
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c11
-rw-r--r--drivers/net/ethernet/sun/cassini.c4
-rw-r--r--drivers/scsi/arcmsr/arcmsr_hba.c8
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_fcoe.c5
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.c5
-rw-r--r--drivers/scsi/fcoe/fcoe.c4
-rw-r--r--drivers/scsi/fcoe/fcoe_transport.c5
-rw-r--r--drivers/scsi/gdth.c4
-rw-r--r--drivers/scsi/ips.c6
-rw-r--r--drivers/scsi/isci/request.c16
-rw-r--r--drivers/scsi/libfc/fc_fcp.c8
-rw-r--r--drivers/scsi/libfc/fc_libfc.c8
-rw-r--r--drivers/scsi/libfc/fc_libfc.h2
-rw-r--r--drivers/scsi/libfc/fc_lport.c2
-rw-r--r--drivers/scsi/libiscsi_tcp.c4
-rw-r--r--drivers/scsi/libsas/sas_host_smp.c8
-rw-r--r--drivers/scsi/megaraid.c4
-rw-r--r--drivers/scsi/mvsas/mv_sas.c4
-rw-r--r--drivers/scsi/scsi_debug.c24
-rw-r--r--drivers/scsi/scsi_lib.c4
-rw-r--r--drivers/scsi/sd_dif.c12
-rw-r--r--drivers/scsi/storvsc_drv.c52
-rw-r--r--drivers/staging/ramster/xvmalloc.c39
-rw-r--r--drivers/staging/ramster/zcache-main.c20
-rw-r--r--drivers/staging/rtl8192u/ieee80211/cipher.c8
-rw-r--r--drivers/staging/rtl8192u/ieee80211/digest.c8
-rw-r--r--drivers/staging/rtl8192u/ieee80211/internal.h17
-rw-r--r--drivers/staging/rtl8192u/ieee80211/kmap_types.h20
-rw-r--r--drivers/staging/rtl8192u/ieee80211/scatterwalk.c19
-rw-r--r--drivers/staging/zcache/zcache-main.c12
-rw-r--r--drivers/staging/zram/zram_drv.c32
-rw-r--r--drivers/target/target_core_transport.c4
-rw-r--r--drivers/target/tcm_fc/tfc_io.c10
-rw-r--r--drivers/vhost/vhost.c4
54 files changed, 297 insertions, 356 deletions
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
index 9691dd0966d..d8af325a6bd 100644
--- a/drivers/ata/libata-sff.c
+++ b/drivers/ata/libata-sff.c
@@ -720,13 +720,13 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
/* FIXME: use a bounce buffer */
local_irq_save(flags);
- buf = kmap_atomic(page, KM_IRQ0);
+ buf = kmap_atomic(page);
/* do the actual data transfer */
ap->ops->sff_data_xfer(qc->dev, buf + offset, qc->sect_size,
do_write);
- kunmap_atomic(buf, KM_IRQ0);
+ kunmap_atomic(buf);
local_irq_restore(flags);
} else {
buf = page_address(page);
@@ -865,13 +865,13 @@ next_sg:
/* FIXME: use bounce buffer */
local_irq_save(flags);
- buf = kmap_atomic(page, KM_IRQ0);
+ buf = kmap_atomic(page);
/* do the actual data transfer */
consumed = ap->ops->sff_data_xfer(dev, buf + offset,
count, rw);
- kunmap_atomic(buf, KM_IRQ0);
+ kunmap_atomic(buf);
local_irq_restore(flags);
} else {
buf = page_address(page);
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index ec246437f5a..531ceb31d0f 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -242,9 +242,9 @@ static void copy_to_brd(struct brd_device *brd, const void *src,
page = brd_lookup_page(brd, sector);
BUG_ON(!page);
- dst = kmap_atomic(page, KM_USER1);
+ dst = kmap_atomic(page);
memcpy(dst + offset, src, copy);
- kunmap_atomic(dst, KM_USER1);
+ kunmap_atomic(dst);
if (copy < n) {
src += copy;
@@ -253,9 +253,9 @@ static void copy_to_brd(struct brd_device *brd, const void *src,
page = brd_lookup_page(brd, sector);
BUG_ON(!page);
- dst = kmap_atomic(page, KM_USER1);
+ dst = kmap_atomic(page);
memcpy(dst, src, copy);
- kunmap_atomic(dst, KM_USER1);
+ kunmap_atomic(dst);
}
}
@@ -273,9 +273,9 @@ static void copy_from_brd(void *dst, struct brd_device *brd,
copy = min_t(size_t, n, PAGE_SIZE - offset);
page = brd_lookup_page(brd, sector);
if (page) {
- src = kmap_atomic(page, KM_USER1);
+ src = kmap_atomic(page);
memcpy(dst, src + offset, copy);
- kunmap_atomic(src, KM_USER1);
+ kunmap_atomic(src);
} else
memset(dst, 0, copy);
@@ -285,9 +285,9 @@ static void copy_from_brd(void *dst, struct brd_device *brd,
copy = n - copy;
page = brd_lookup_page(brd, sector);
if (page) {
- src = kmap_atomic(page, KM_USER1);
+ src = kmap_atomic(page);
memcpy(dst, src, copy);
- kunmap_atomic(src, KM_USER1);
+ kunmap_atomic(src);
} else
memset(dst, 0, copy);
}
@@ -309,7 +309,7 @@ static int brd_do_bvec(struct brd_device *brd, struct page *page,
goto out;
}
- mem = kmap_atomic(page, KM_USER0);
+ mem = kmap_atomic(page);
if (rw == READ) {
copy_from_brd(mem + off, brd, sector, len);
flush_dcache_page(page);
@@ -317,7 +317,7 @@ static int brd_do_bvec(struct brd_device *brd, struct page *page,
flush_dcache_page(page);
copy_to_brd(brd, mem + off, sector, len);
}
- kunmap_atomic(mem, KM_USER0);
+ kunmap_atomic(mem);
out:
return err;
diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c
index 912f585a760..3030201c69d 100644
--- a/drivers/block/drbd/drbd_bitmap.c
+++ b/drivers/block/drbd/drbd_bitmap.c
@@ -289,25 +289,25 @@ static unsigned int bm_bit_to_page_idx(struct drbd_bitmap *b, u64 bitnr)
return page_nr;
}
-static unsigned long *__bm_map_pidx(struct drbd_bitmap *b, unsigned int idx, const enum km_type km)
+static unsigned long *__bm_map_pidx(struct drbd_bitmap *b, unsigned int idx)
{
struct page *page = b->bm_pages[idx];
- return (unsigned long *) kmap_atomic(page, km);
+ return (unsigned long *) kmap_atomic(page);
}
static unsigned long *bm_map_pidx(struct drbd_bitmap *b, unsigned int idx)
{
- return __bm_map_pidx(b, idx, KM_IRQ1);
+ return __bm_map_pidx(b, idx);
}
-static void __bm_unmap(unsigned long *p_addr, const enum km_type km)
+static void __bm_unmap(unsigned long *p_addr)
{
- kunmap_atomic(p_addr, km);
+ kunmap_atomic(p_addr);
};
static void bm_unmap(unsigned long *p_addr)
{
- return __bm_unmap(p_addr, KM_IRQ1);
+ return __bm_unmap(p_addr);
}
/* long word offset of _bitmap_ sector */
@@ -543,15 +543,15 @@ static unsigned long bm_count_bits(struct drbd_bitmap *b)
/* all but last page */
for (idx = 0; idx < b->bm_number_of_pages - 1; idx++) {
- p_addr = __bm_map_pidx(b, idx, KM_USER0);
+ p_addr = __bm_map_pidx(b, idx);
for (i = 0; i < LWPP; i++)
bits += hweight_long(p_addr[i]);
- __bm_unmap(p_addr, KM_USER0);
+ __bm_unmap(p_addr);
cond_resched();
}
/* last (or only) page */
last_word = ((b->bm_bits - 1) & BITS_PER_PAGE_MASK) >> LN2_BPL;
- p_addr = __bm_map_pidx(b, idx, KM_USER0);
+ p_addr = __bm_map_pidx(b, idx);
for (i = 0; i < last_word; i++)
bits += hweight_long(p_addr[i]);
p_addr[last_word] &= cpu_to_lel(mask);
@@ -559,7 +559,7 @@ static unsigned long bm_count_bits(struct drbd_bitmap *b)
/* 32bit arch, may have an unused padding long */
if (BITS_PER_LONG == 32 && (last_word & 1) == 0)
p_addr[last_word+1] = 0;
- __bm_unmap(p_addr, KM_USER0);
+ __bm_unmap(p_addr);
return bits;
}
@@ -970,11 +970,11 @@ static void bm_page_io_async(struct bm_aio_ctx *ctx, int page_nr, int rw) __must
* to use pre-allocated page pool */
void *src, *dest;
page = alloc_page(__GFP_HIGHMEM|__GFP_WAIT);
- dest = kmap_atomic(page, KM_USER0);
- src = kmap_atomic(b->bm_pages[page_nr], KM_USER1);
+ dest = kmap_atomic(page);
+ src = kmap_atomic(b->bm_pages[page_nr]);
memcpy(dest, src, PAGE_SIZE);
- kunmap_atomic(src, KM_USER1);
- kunmap_atomic(dest, KM_USER0);
+ kunmap_atomic(src);
+ kunmap_atomic(dest);
bm_store_page_idx(page, page_nr);
} else
page = b->bm_pages[page_nr];
@@ -1163,7 +1163,7 @@ int drbd_bm_write_page(struct drbd_conf *mdev, unsigned int idx) __must_hold(loc
* this returns a bit number, NOT a sector!
*/
static unsigned long __bm_find_next(struct drbd_conf *mdev, unsigned long bm_fo,
- const int find_zero_bit, const enum km_type km)
+ const int find_zero_bit)
{
struct drbd_bitmap *b = mdev->bitmap;
unsigned long *p_addr;
@@ -1178,7 +1178,7 @@ static unsigned long __bm_find_next(struct drbd_conf *mdev, unsigned long bm_fo,
while (bm_fo < b->bm_bits) {
/* bit offset of the first bit in the page */
bit_offset = bm_fo & ~BITS_PER_PAGE_MASK;
- p_addr = __bm_map_pidx(b, bm_bit_to_page_idx(b, bm_fo), km);
+ p_addr = __bm_map_pidx(b, bm_bit_to_page_idx(b, bm_fo));
if (find_zero_bit)
i = find_next_zero_bit_le(p_addr,
@@ -1187,7 +1187,7 @@ static unsigned long __bm_find_next(struct drbd_conf *mdev, unsigned long bm_fo,
i = find_next_bit_le(p_addr,
PAGE_SIZE*8, bm_fo & BITS_PER_PAGE_MASK);
- __bm_unmap(p_addr, km);
+ __bm_unmap(p_addr);
if (i < PAGE_SIZE*8) {
bm_fo = bit_offset + i;
if (bm_fo >= b->bm_bits)
@@ -1215,7 +1215,7 @@ static unsigned long bm_find_next(struct drbd_conf *mdev,
if (BM_DONT_TEST & b->bm_flags)
bm_print_lock_info(mdev);
- i = __bm_find_next(mdev, bm_fo, find_zero_bit, KM_IRQ1);
+ i = __bm_find_next(mdev, bm_fo, find_zero_bit);
spin_unlock_irq(&b->bm_lock);
return i;
@@ -1239,13 +1239,13 @@ unsigned long drbd_bm_find_next_zero(struct drbd_conf *mdev, unsigned long bm_fo
unsigned long _drbd_bm_find_next(struct drbd_conf *mdev, unsigned long bm_fo)
{
/* WARN_ON(!(BM_DONT_SET & mdev->b->bm_flags)); */
- return __bm_find_next(mdev, bm_fo, 0, KM_USER1);
+ return __bm_find_next(mdev, bm_fo, 0);
}
unsigned long _drbd_bm_find_next_zero(struct drbd_conf *mdev, unsigned long bm_fo)
{
/* WARN_ON(!(BM_DONT_SET & mdev->b->bm_flags)); */
- return __bm_find_next(mdev, bm_fo, 1, KM_USER1);
+ return __bm_find_next(mdev, bm_fo, 1);
}
/* returns number of bits actually changed.
@@ -1273,14 +1273,14 @@ static int __bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s,
unsigned int page_nr = bm_bit_to_page_idx(b, bitnr);
if (page_nr != last_page_nr) {
if (p_addr)
- __bm_unmap(p_addr, KM_IRQ1);
+ __bm_unmap(p_addr);
if (c < 0)
bm_set_page_lazy_writeout(b->bm_pages[last_page_nr]);
else if (c > 0)
bm_set_page_need_writeout(b->bm_pages[last_page_nr]);
changed_total += c;
c = 0;
- p_addr = __bm_map_pidx(b, page_nr, KM_IRQ1);
+ p_addr = __bm_map_pidx(b, page_nr);
last_page_nr = page_nr;
}
if (val)
@@ -1289,7 +1289,7 @@ static int __bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s,
c -= (0 != __test_and_clear_bit_le(bitnr & BITS_PER_PAGE_MASK, p_addr));
}
if (p_addr)
- __bm_unmap(p_addr, KM_IRQ1);
+ __bm_unmap(p_addr);
if (c < 0)
bm_set_page_lazy_writeout(b->bm_pages[last_page_nr]);
else if (c > 0)
@@ -1342,13 +1342,13 @@ static inline void bm_set_full_words_within_one_page(struct drbd_bitmap *b,
{
int i;
int bits;
- unsigned long *paddr = kmap_atomic(b->bm_pages[page_nr], KM_IRQ1);
+ unsigned long *paddr = kmap_atomic(b->bm_pages[page_nr]);
for (i = first_word; i < last_word; i++) {
bits = hweight_long(paddr[i]);
paddr[i] = ~0UL;
b->bm_set += BITS_PER_LONG - bits;
}
- kunmap_atomic(paddr, KM_IRQ1);
+ kunmap_atomic(paddr);
}
/* Same thing as drbd_bm_set_bits,
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
index af2a25049bc..e09f9cebbb2 100644
--- a/drivers/block/drbd/drbd_nl.c
+++ b/drivers/block/drbd/drbd_nl.c
@@ -2526,10 +2526,10 @@ void drbd_bcast_ee(struct drbd_conf *mdev,
page = e->pages;
page_chain_for_each(page) {
- void *d = kmap_atomic(page, KM_USER0);
+ void *d = kmap_atomic(page);
unsigned l = min_t(unsigned, len, PAGE_SIZE);
memcpy(tl, d, l);
- kunmap_atomic(d, KM_USER0);
+ kunmap_atomic(d);
tl = (unsigned short*)((char*)tl + l);
len -= l;
if (len == 0)
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index cd504353b27..bbca966f8f6 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -93,16 +93,16 @@ static int transfer_none(struct loop_device *lo, int cmd,
struct page *loop_page, unsigned loop_off,
int size, sector_t real_block)
{
- char *raw_buf = kmap_atomic(raw_page, KM_USER0) + raw_off;
- char *loop_buf = kmap_atomic(loop_page, KM_USER1) + loop_off;
+ char *raw_buf = kmap_atomic(raw_page) + raw_off;
+ char *loop_buf = kmap_atomic(loop_page) + loop_off;
if (cmd == READ)
memcpy(loop_buf, raw_buf, size);
else
memcpy(raw_buf, loop_buf, size);
- kunmap_atomic(loop_buf, KM_USER1);
- kunmap_atomic(raw_buf, KM_USER0);
+ kunmap_atomic(loop_buf);
+ kunmap_atomic(raw_buf);
cond_resched();
return 0;
}
@@ -112,8 +112,8 @@ static int transfer_xor(struct loop_device *lo, int cmd,
struct page *loop_page, unsigned loop_off,
int size, sector_t real_block)
{
- char *raw_buf = kmap_atomic(raw_page, KM_USER0) + raw_off;
- char *loop_buf = kmap_atomic(loop_page, KM_USER1) + loop_off;
+ char *raw_buf = kmap_atomic(raw_page) + raw_off;
+ char *loop_buf = kmap_atomic(loop_page) + loop_off;
char *in, *out, *key;
int i, keysize;
@@ -130,8 +130,8 @@ static int transfer_xor(struct loop_device *lo, int cmd,
for (i = 0; i < size; i++)
*out++ = *in++ ^ key[(i & 511) % keysize];
- kunmap_atomic(loop_buf, KM_USER1);
- kunmap_atomic(raw_buf, KM_USER0);
+ kunmap_atomic(loop_buf);
+ kunmap_atomic(raw_buf);
cond_resched();
return 0;
}
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index d59edeabd93..ba66e4445f4 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -987,14 +987,14 @@ static void pkt_copy_bio_data(struct bio *src_bio, int seg, int offs, struct pag
while (copy_size > 0) {
struct bio_vec *src_bvl = bio_iovec_idx(src_bio, seg);
- void *vfrom = kmap_atomic(src_bvl->bv_page, KM_USER0) +
+ void *vfrom = kmap_atomic(src_bvl->bv_page) +
src_bvl->bv_offset + offs;
void *vto = page_address(dst_page) + dst_offs;
int len = min_t(int, copy_size, src_bvl->bv_len - offs);
BUG_ON(len < 0);
memcpy(vto, vfrom, len);
- kunmap_atomic(vfrom, KM_USER0);
+ kunmap_atomic(vfrom);
seg++;
offs = 0;
@@ -1019,10 +1019,10 @@ static void pkt_make_local_copy(struct packet_data *pkt, struct bio_vec *bvec)
offs = 0;
for (f = 0; f < pkt->frames; f++) {
if (bvec[f].bv_page != pkt->pages[p]) {
- void *vfrom = kmap_atomic(bvec[f].bv_page, KM_USER0) + bvec[f].bv_offset;
+ void *vfrom = kmap_atomic(bvec[f].bv_page) + bvec[f].bv_offset;
void *vto = page_address(pkt->pages[p]) + offs;
memcpy(vto, vfrom, CD_FRAMESIZE);
- kunmap_atomic(vfrom, KM_USER0);
+ kunmap_atomic(vfrom);
bvec[f].bv_page = pkt->pages[p];
bvec[f].bv_offset = offs;
} else {
diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c
index fe765f49de5..76368f98402 100644
--- a/drivers/crypto/hifn_795x.c
+++ b/drivers/crypto/hifn_795x.c
@@ -1731,9 +1731,9 @@ static int ablkcipher_get(void *saddr, unsigned int *srestp, unsigned int offset
while (size) {
copy = min3(srest, dst->length, size);
- daddr = kmap_atomic(sg_page(dst), KM_IRQ0);
+ daddr = kmap_atomic(sg_page(dst));
memcpy(daddr + dst->offset + offset, saddr, copy);
- kunmap_atomic(daddr, KM_IRQ0);
+ kunmap_atomic(daddr);
nbytes -= copy;
size -= copy;
@@ -1793,17 +1793,17 @@ static void hifn_process_ready(struct ablkcipher_request *req, int error)
continue;
}
- saddr = kmap_atomic(sg_page(t), KM_SOFTIRQ0);
+ saddr = kmap_atomic(sg_page(t));
err = ablkcipher_get(saddr, &t->length, t->offset,
dst, nbytes, &nbytes);
if (err < 0) {
- kunmap_atomic(saddr, KM_SOFTIRQ0);
+ kunmap_atomic(saddr);
break;
}
idx += err;
- kunmap_atomic(saddr, KM_SOFTIRQ0);
+ kunmap_atomic(saddr);
}
hifn_cipher_walk_exit(&rctx->walk);
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
index ca6c04d350e..da09cd74bc5 100644
--- a/drivers/edac/edac_mc.c
+++ b/drivers/edac/edac_mc.c
@@ -620,13 +620,13 @@ static void edac_mc_scrub_block(unsigned long page, unsigned long offset,
if (PageHighMem(pg))
local_irq_save(flags);
- virt_addr = kmap_atomic(pg, KM_BOUNCE_READ);
+ virt_addr = kmap_atomic(pg);
/* Perform architecture specific atomic scrub operation */
atomic_scrub(virt_addr + offset, size);
/* Unmap and complete */
- kunmap_atomic(virt_addr, KM_BOUNCE_READ);
+ kunmap_atomic(virt_addr);
if (PageHighMem(pg))
local_irq_restore(flags);
diff --git a/drivers/gpu/drm/drm_cache.c b/drivers/gpu/drm/drm_cache.c
index 592865381c6..4b8653b932f 100644
--- a/drivers/gpu/drm/drm_cache.c
+++ b/drivers/gpu/drm/drm_cache.c
@@ -41,10 +41,10 @@ drm_clflush_page(struct page *page)
if (unlikely(page == NULL))
return;
- page_virtual = kmap_atomic(page, KM_USER0);
+ page_virtual = kmap_atomic(page);
for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size)
clflush(page_virtual + i);
- kunmap_atomic(page_virtual, KM_USER0);
+ kunmap_atomic(page_virtual);
}
static void drm_cache_flush_clflush(struct page *pages[],
@@ -87,10 +87,10 @@ drm_clflush_pages(struct page *pages[], unsigned long num_pages)
if (unlikely(page == NULL))
continue;
- page_virtual = kmap_atomic(page, KM_USER0);
+ page_virtual = kmap_atomic(page);
flush_dcache_range((unsigned long)page_virtual,
(unsigned long)page_virtual + PAGE_SIZE);
- kunmap_atomic(page_virtual, KM_USER0);
+ kunmap_atomic(page_virtual);
}
#else
printk(KERN_ERR "Architecture has no drm_cache.c support\n");
diff --git a/drivers/gpu/drm/gma500/mmu.c b/drivers/gpu/drm/gma500/mmu.c
index c904d73b1de..e80ee82f6ca 100644
--- a/drivers/gpu/drm/gma500/mmu.c
+++ b/drivers/gpu/drm/gma500/mmu.c
@@ -125,14 +125,14 @@ static void psb_page_clflush(struct psb_mmu_driver *driver, struct page* page)
int i;
uint8_t *clf;
- clf = kmap_atomic(page, KM_USER0);
+ clf = kmap_atomic(page);
mb();
for (i = 0; i < clflush_count; ++i) {
psb_clflush(clf);
clf += clflush_add;
}
mb();
- kunmap_atomic(clf, KM_USER0);
+ kunmap_atomic(clf);
}
static void psb_pages_clflush(struct psb_mmu_driver *driver,
@@ -325,7 +325,7 @@ static struct psb_mmu_pt *psb_mmu_alloc_pt(struct psb_mmu_pd *pd)
spin_lock(lock);
- v = kmap_atomic(pt->p, KM_USER0);
+ v = kmap_atomic(pt->p);
clf = (uint8_t *) v;
ptes = (uint32_t *) v;
for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i)
@@ -341,7 +341,7 @@ static struct psb_mmu_pt *psb_mmu_alloc_pt(struct psb_mmu_pd *pd)
mb();
}
- kunmap_atomic(v, KM_USER0);
+ kunmap_atomic(v);
spin_unlock(lock);
pt->count = 0;
@@ -376,18 +376,18 @@ struct psb_mmu_pt *psb_mmu_pt_alloc_map_lock(struct psb_mmu_pd *pd,
continue;
}
- v = kmap_atomic(pd->p, KM_USER0);
+ v = kmap_atomic(pd->p);
pd->tables[index] = pt;
v[index] = (page_to_pfn(pt->p) << 12) | pd->pd_mask;
pt->index = index;
- kunmap_atomic((void *) v, KM_USER0);
+ kunmap_atomic((void *) v);
if (pd->hw_context != -1) {
psb_mmu_clflush(pd->driver, (void *) &v[index]);
atomic_set(&pd->driver->needs_tlbflush, 1);
}
}
- pt->v = kmap_atomic(pt->p, KM_USER0);
+ pt->v = kmap_atomic(pt->p);
return pt;
}
@@ -404,7 +404,7 @@ static struct psb_mmu_pt *psb_mmu_pt_map_lock(struct psb_mmu_pd *pd,
spin_unlock(lock);
return NULL;
}
- pt->v = kmap_atomic(pt->p, KM_USER0);
+ pt->v = kmap_atomic(pt->p);
return pt;
}
@@ -413,9 +413,9 @@ static void psb_mmu_pt_unmap_unlock(struct psb_mmu_pt *pt)
struct psb_mmu_pd *pd = pt->pd;
uint32_t *v;
- kunmap_atomic(pt->v, KM_USER0);
+ kunmap_atomic(pt->v);
if (pt->count == 0) {
- v = kmap_atomic(pd->p, KM_USER0);
+ v = kmap_atomic(pd->p);
v[pt->index] = pd->invalid_pde;
pd->tables[pt->index] = NULL;
@@ -424,7 +424,7 @@ static void psb_mmu_pt_unmap_unlock(struct psb_mmu_pt *pt)
(void *) &v[pt->index]);
atomic_set(&pd->driver->needs_tlbflush, 1);
}
- kunmap_atomic(pt->v, KM_USER0);
+ kunmap_atomic(pt->v);
spin_unlock(&pd->driver->lock);
psb_mmu_free_pt(pt);
return;
@@ -457,7 +457,7 @@ void psb_mmu_mirror_gtt(struct psb_mmu_pd *pd,
down_read(&driver->sem);
spin_lock(&driver->lock);
- v = kmap_atomic(pd->p, KM_USER0);
+ v = kmap_atomic(pd->p);
v += start;
while (gtt_pages--) {
@@ -467,7 +467,7 @@ void psb_mmu_mirror_gtt(struct psb_mmu_pd *pd,
/*ttm_tt_cache_flush(&pd->p, num_pages);*/
psb_pages_clflush(pd->driver, &pd->p, num_pages);
- kunmap_atomic(v, KM_USER0);
+ kunmap_atomic(v);
spin_unlock(&driver->lock);
if (pd->hw_context != -1)
@@ -830,9 +830,9 @@ int psb_mmu_virtual_to_pfn(struct psb_mmu_pd *pd, uint32_t virtual,
uint32_t *v;
spin_lock(lock);
- v = kmap_atomic(pd->p, KM_USER0);
+ v = kmap_atomic(pd->p);
tmp = v[psb_mmu_pd_index(virtual)];
- kunmap_atomic(v, KM_USER0);
+ kunmap_atomic(v);
spin_unlock(lock);
if (tmp != pd->invalid_pde || !(tmp & PSB_PTE_VALID) ||
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index 2f75d203a2b..c10cf5e2443 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -309,11 +309,11 @@ int ttm_tt_swapin(struct ttm_tt *ttm)
goto out_err;
preempt_disable();
- from_virtual = kmap_atomic(from_page, KM_USER0);
- to_virtual = kmap_atomic(to_page, KM_USER1);
+ from_virtual = kmap_atomic(from_page);