From e08afeb7e69f45e4ab9fbb8530fe433484b96606 Mon Sep 17 00:00:00 2001 From: Brian King Date: Tue, 23 Jun 2009 17:14:01 -0500 Subject: [SCSI] ibmvscsi: Fix module load hang Fixes a regression seen in the ibmvscsi driver when using the VSCSI server in SLES 9 and SLES 10. The VSCSI server in these releases has a bug in it in which it does not send responses to unknown MADs. Check the OS Type field in the adapter info response and do not send these unsupported commands when talking to an older server. Signed-off-by: Brian King Signed-off-by: James Bottomley --- drivers/scsi/ibmvscsi/ibmvscsi.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c index 869a11bdccb..9928704e235 100644 --- a/drivers/scsi/ibmvscsi/ibmvscsi.c +++ b/drivers/scsi/ibmvscsi/ibmvscsi.c @@ -1095,9 +1095,14 @@ static void adapter_info_rsp(struct srp_event_struct *evt_struct) MAX_INDIRECT_BUFS); hostdata->host->sg_tablesize = MAX_INDIRECT_BUFS; } + + if (hostdata->madapter_info.os_type == 3) { + enable_fast_fail(hostdata); + return; + } } - enable_fast_fail(hostdata); + send_srp_login(hostdata); } /** -- cgit v1.2.3-18-g5258 From 87a2d34b0372dcf6bc4caf4d97a7889f5e62a1af Mon Sep 17 00:00:00 2001 From: Roel Kluin Date: Tue, 23 Jun 2009 01:06:40 +0200 Subject: [SCSI] fnic: remove redundant BUG_ONs and fix checks on unsigned The shost sg tablesize is set to FNIC_MAX_SG_DESC_CNT and fnic uses scsi_dma_map, so both BUG_ONs can be removed. scsi_dma_map may return -ENOMEM, sg_count should be int to catch that. Signed-off-by: Roel Kluin Signed-off-by: James Bottomley --- drivers/scsi/fnic/fnic_scsi.c | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c index eabf3650285..bfc996971b8 100644 --- a/drivers/scsi/fnic/fnic_scsi.c +++ b/drivers/scsi/fnic/fnic_scsi.c @@ -245,7 +245,7 @@ static inline int fnic_queue_wq_copy_desc(struct fnic *fnic, struct vnic_wq_copy *wq, struct fnic_io_req *io_req, struct scsi_cmnd *sc, - u32 sg_count) + int sg_count) { struct scatterlist *sg; struct fc_rport *rport = starget_to_rport(scsi_target(sc->device)); @@ -260,9 +260,6 @@ static inline int fnic_queue_wq_copy_desc(struct fnic *fnic, char msg[2]; if (sg_count) { - BUG_ON(sg_count < 0); - BUG_ON(sg_count > FNIC_MAX_SG_DESC_CNT); - /* For each SGE, create a device desc entry */ desc = io_req->sgl_list; for_each_sg(scsi_sglist(sc), sg, sg_count, i) { @@ -344,7 +341,7 @@ int fnic_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *)) struct fnic *fnic; struct vnic_wq_copy *wq; int ret; - u32 sg_count; + int sg_count; unsigned long flags; unsigned long ptr; -- cgit v1.2.3-18-g5258 From e3f47cc74bddea8121560026185ede4770170043 Mon Sep 17 00:00:00 2001 From: Abhijeet Joglekar Date: Wed, 24 Jun 2009 07:42:25 -0700 Subject: [SCSI] fnic: use DMA_BIT_MASK(nn) instead of deprecated DMA_nnBIT_MASK Robert Love reported warning while building fnic_main.c: drivers/scsi/fnic/fnic_main.c:478: warning: `DMA_nnBIT_MASK' is deprecated. Replaced use of DMA_nnBIT_MASK by DMA_BIT_MASK(nn) Signed-off-by: Abhijeet Joglekar Signed-off-by: James Bottomley --- drivers/scsi/fnic/fnic_main.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c index a84072865fc..2c266c01dc5 100644 --- a/drivers/scsi/fnic/fnic_main.c +++ b/drivers/scsi/fnic/fnic_main.c @@ -473,16 +473,16 @@ static int __devinit fnic_probe(struct pci_dev *pdev, * limitation for the device. Try 40-bit first, and * fail to 32-bit. */ - err = pci_set_dma_mask(pdev, DMA_40BIT_MASK); + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(40)); if (err) { - err = pci_set_dma_mask(pdev, DMA_32BIT_MASK); + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); if (err) { shost_printk(KERN_ERR, fnic->lport->host, "No usable DMA configuration " "aborting\n"); goto err_out_release_regions; } - err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK); + err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); if (err) { shost_printk(KERN_ERR, fnic->lport->host, "Unable to obtain 32-bit DMA " @@ -490,7 +490,7 @@ static int __devinit fnic_probe(struct pci_dev *pdev, goto err_out_release_regions; } } else { - err = pci_set_consistent_dma_mask(pdev, DMA_40BIT_MASK); + err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(40)); if (err) { shost_printk(KERN_ERR, fnic->lport->host, "Unable to obtain 40-bit DMA " -- cgit v1.2.3-18-g5258 From d3a263a8168f78874254ea9da9595cfb0f3e96d7 Mon Sep 17 00:00:00 2001 From: James Bottomley Date: Wed, 24 Jun 2009 19:55:22 +0000 Subject: [SCSI] zalon: fix oops on attach failure I recently discovered on my zalon that if the attachment fails because of a bus misconfiguration (I scrapped my HVD array, so the card is now unterminated) then the system oopses. The reason is that if ncr_attach() returns NULL (signalling failure) that NULL is passed by the goto failed straight into ncr_detach() which oopses. The fix is just to return -ENODEV in this case. Cc: Stable Tree Signed-off-by: James Bottomley --- drivers/scsi/zalon.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/scsi/zalon.c b/drivers/scsi/zalon.c index 97f3158fa7b..27e84e4b1fa 100644 --- a/drivers/scsi/zalon.c +++ b/drivers/scsi/zalon.c @@ -134,7 +134,7 @@ zalon_probe(struct parisc_device *dev) host = ncr_attach(&zalon7xx_template, unit, &device); if (!host) - goto fail; + return -ENODEV; if (request_irq(dev->irq, ncr53c8xx_intr, IRQF_SHARED, "zalon", host)) { dev_printk(KERN_ERR, &dev->dev, "irq problem with %d, detaching\n ", -- cgit v1.2.3-18-g5258 From 6fdc03709433ccc2005f0f593ae9d9dd04f7b485 Mon Sep 17 00:00:00 2001 From: Stefan Richter Date: Sat, 20 Jun 2009 13:23:59 +0200 Subject: firewire: core: do not DMA-map stack addresses The DMA mapping API cannot map on-stack addresses, as explained in Documentation/DMA-mapping.txt. Convert the two cases of on-stack packet payload buffers in firewire-core (payload of lock requests in the bus manager work and in iso resource management) to slab-allocated memory. There are a number on-stack buffers for quadlet write or quadlet read requests in firewire-core and firewire-sbp2. These are harmless; they are copied to/ from card driver internal DMA buffers since quadlet payloads are inlined with packet headers. Signed-off-by: Stefan Richter --- drivers/firewire/core-card.c | 14 +++++++------- drivers/firewire/core-cdev.c | 4 +++- drivers/firewire/core-iso.c | 24 +++++++++++++----------- drivers/firewire/core.h | 3 ++- include/linux/firewire.h | 1 + 5 files changed, 26 insertions(+), 20 deletions(-) diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c index 543fccac81b..f74edae5cb4 100644 --- a/drivers/firewire/core-card.c +++ b/drivers/firewire/core-card.c @@ -196,8 +196,8 @@ static void allocate_broadcast_channel(struct fw_card *card, int generation) { int channel, bandwidth = 0; - fw_iso_resource_manage(card, generation, 1ULL << 31, - &channel, &bandwidth, true); + fw_iso_resource_manage(card, generation, 1ULL << 31, &channel, + &bandwidth, true, card->bm_transaction_data); if (channel == 31) { card->broadcast_channel_allocated = true; device_for_each_child(card->device, (void *)(long)generation, @@ -230,7 +230,6 @@ static void fw_card_bm_work(struct work_struct *work) bool do_reset = false; bool root_device_is_running; bool root_device_is_cmc; - __be32 lock_data[2]; spin_lock_irqsave(&card->lock, flags); @@ -273,22 +272,23 @@ static void fw_card_bm_work(struct work_struct *work) goto pick_me; } - lock_data[0] = cpu_to_be32(0x3f); - lock_data[1] = cpu_to_be32(local_id); + card->bm_transaction_data[0] = cpu_to_be32(0x3f); + card->bm_transaction_data[1] = cpu_to_be32(local_id); spin_unlock_irqrestore(&card->lock, flags); rcode = fw_run_transaction(card, TCODE_LOCK_COMPARE_SWAP, irm_id, generation, SCODE_100, CSR_REGISTER_BASE + CSR_BUS_MANAGER_ID, - lock_data, sizeof(lock_data)); + card->bm_transaction_data, + sizeof(card->bm_transaction_data)); if (rcode == RCODE_GENERATION) /* Another bus reset, BM work has been rescheduled. */ goto out; if (rcode == RCODE_COMPLETE && - lock_data[0] != cpu_to_be32(0x3f)) { + card->bm_transaction_data[0] != cpu_to_be32(0x3f)) { /* Somebody else is BM. Only act as IRM. */ if (local_id == irm_id) diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c index d1d30c615b0..ced186d7e9a 100644 --- a/drivers/firewire/core-cdev.c +++ b/drivers/firewire/core-cdev.c @@ -125,6 +125,7 @@ struct iso_resource { int generation; u64 channels; s32 bandwidth; + __be32 transaction_data[2]; struct iso_resource_event *e_alloc, *e_dealloc; }; @@ -1049,7 +1050,8 @@ static void iso_resource_work(struct work_struct *work) r->channels, &channel, &bandwidth, todo == ISO_RES_ALLOC || todo == ISO_RES_REALLOC || - todo == ISO_RES_ALLOC_ONCE); + todo == ISO_RES_ALLOC_ONCE, + r->transaction_data); /* * Is this generation outdated already? As long as this resource sticks * in the idr, it will be scheduled again for a newer generation or at diff --git a/drivers/firewire/core-iso.c b/drivers/firewire/core-iso.c index 166f19c6d38..110e731f557 100644 --- a/drivers/firewire/core-iso.c +++ b/drivers/firewire/core-iso.c @@ -177,9 +177,8 @@ EXPORT_SYMBOL(fw_iso_context_stop); */ static int manage_bandwidth(struct fw_card *card, int irm_id, int generation, - int bandwidth, bool allocate) + int bandwidth, bool allocate, __be32 data[2]) { - __be32 data[2]; int try, new, old = allocate ? BANDWIDTH_AVAILABLE_INITIAL : 0; /* @@ -215,9 +214,9 @@ static int manage_bandwidth(struct fw_card *card, int irm_id, int generation, } static int manage_channel(struct fw_card *card, int irm_id, int generation, - u32 channels_mask, u64 offset, bool allocate) + u32 channels_mask, u64 offset, bool allocate, __be32 data[2]) { - __be32 data[2], c, all, old; + __be32 c, all, old; int i, retry = 5; old = all = allocate ? cpu_to_be32(~0) : 0; @@ -260,7 +259,7 @@ static int manage_channel(struct fw_card *card, int irm_id, int generation, } static void deallocate_channel(struct fw_card *card, int irm_id, - int generation, int channel) + int generation, int channel, __be32 buffer[2]) { u32 mask; u64 offset; @@ -269,7 +268,7 @@ static void deallocate_channel(struct fw_card *card, int irm_id, offset = channel < 32 ? CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_HI : CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_LO; - manage_channel(card, irm_id, generation, mask, offset, false); + manage_channel(card, irm_id, generation, mask, offset, false, buffer); } /** @@ -298,7 +297,7 @@ static void deallocate_channel(struct fw_card *card, int irm_id, */ void fw_iso_resource_manage(struct fw_card *card, int generation, u64 channels_mask, int *channel, int *bandwidth, - bool allocate) + bool allocate, __be32 buffer[2]) { u32 channels_hi = channels_mask; /* channels 31...0 */ u32 channels_lo = channels_mask >> 32; /* channels 63...32 */ @@ -310,10 +309,12 @@ void fw_iso_resource_manage(struct fw_card *card, int generation, if (channels_hi) c = manage_channel(card, irm_id, generation, channels_hi, - CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_HI, allocate); + CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_HI, + allocate, buffer); if (channels_lo && c < 0) { c = manage_channel(card, irm_id, generation, channels_lo, - CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_LO, allocate); + CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_LO, + allocate, buffer); if (c >= 0) c += 32; } @@ -325,12 +326,13 @@ void fw_iso_resource_manage(struct fw_card *card, int generation, if (*bandwidth == 0) return; - ret = manage_bandwidth(card, irm_id, generation, *bandwidth, allocate); + ret = manage_bandwidth(card, irm_id, generation, *bandwidth, + allocate, buffer); if (ret < 0) *bandwidth = 0; if (allocate && ret < 0 && c >= 0) { - deallocate_channel(card, irm_id, generation, c); + deallocate_channel(card, irm_id, generation, c, buffer); *channel = ret; } } diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h index c3cfc647e5e..6052816be35 100644 --- a/drivers/firewire/core.h +++ b/drivers/firewire/core.h @@ -120,7 +120,8 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event); int fw_iso_buffer_map(struct fw_iso_buffer *buffer, struct vm_area_struct *vma); void fw_iso_resource_manage(struct fw_card *card, int generation, - u64 channels_mask, int *channel, int *bandwidth, bool allocate); + u64 channels_mask, int *channel, int *bandwidth, + bool allocate, __be32 buffer[2]); /* -topology */ diff --git a/include/linux/firewire.h b/include/linux/firewire.h index 9823946adbc..192d1e43c43 100644 --- a/include/linux/firewire.h +++ b/include/linux/firewire.h @@ -127,6 +127,7 @@ struct fw_card { struct delayed_work work; int bm_retries; int bm_generation; + __be32 bm_transaction_data[2]; bool broadcast_channel_allocated; u32 broadcast_channel; -- cgit v1.2.3-18-g5258 From 7ed9f7e5db58c6e8c2b4b738a75d5dcd8e17aad5 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Thu, 25 Jun 2009 12:31:37 -0700 Subject: fix RCU-callback-after-kmem_cache_destroy problem in sl[aou]b Jesper noted that kmem_cache_destroy() invokes synchronize_rcu() rather than rcu_barrier() in the SLAB_DESTROY_BY_RCU case, which could result in RCU callbacks accessing a kmem_cache after it had been destroyed. Cc: Acked-by: Matt Mackall Reported-by: Jesper Dangaard Brouer Signed-off-by: Paul E. McKenney Signed-off-by: Pekka Enberg --- mm/slab.c | 2 +- mm/slob.c | 2 ++ mm/slub.c | 2 ++ 3 files changed, 5 insertions(+), 1 deletion(-) diff --git a/mm/slab.c b/mm/slab.c index e74a16e4ced..5241b6598ba 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -2547,7 +2547,7 @@ void kmem_cache_destroy(struct kmem_cache *cachep) } if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU)) - synchronize_rcu(); + rcu_barrier(); __kmem_cache_destroy(cachep); mutex_unlock(&cache_chain_mutex); diff --git a/mm/slob.c b/mm/slob.c index c78742defdc..9641da3d5e5 100644 --- a/mm/slob.c +++ b/mm/slob.c @@ -595,6 +595,8 @@ EXPORT_SYMBOL(kmem_cache_create); void kmem_cache_destroy(struct kmem_cache *c) { kmemleak_free(c); + if (c->flags & SLAB_DESTROY_BY_RCU) + rcu_barrier(); slob_free(c, sizeof(struct kmem_cache)); } EXPORT_SYMBOL(kmem_cache_destroy); diff --git a/mm/slub.c b/mm/slub.c index 819f056b39c..a9201d83178 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -2595,6 +2595,8 @@ static inline int kmem_cache_close(struct kmem_cache *s) */ void kmem_cache_destroy(struct kmem_cache *s) { + if (s->flags & SLAB_DESTROY_BY_RCU) + rcu_barrier(); down_write(&slub_lock); s->refcount--; if (!s->refcount) { -- cgit v1.2.3-18-g5258 From 39562e783928e3ea9ee2cbce99a756ab48d3c06a Mon Sep 17 00:00:00 2001 From: Christof Schmitt Date: Fri, 26 Jun 2009 16:30:43 +0200 Subject: [SCSI] FC transport: Locking fix for common-code FC pass-through patch Fix this: ------------[ cut here ]------------ Badness at block/blk-core.c:244 CPU: 0 Tainted: G W 2.6.31-rc1-00004-gd3a263a #3 Process zfcp_wq (pid: 901, task: 000000002fb7a038, ksp: 000000002f02bc78) Krnl PSW : 0704300180000000 00000000002141ba (blk_remove_plug+0xb2/0xb8) R:0 T:1 IO:1 EX:1 Key:0 M:1 W:0 P:0 AS:0 CC:3 PM:0 EA:3 Krnl GPRS: 0000000000000001 0000000000000001 0000000022811440 0000000022811798 000000000027ff4e 0000000000000000 0000000000000000 000000002f00f000 070000000006a0f4 000000002af70000 000000002af2a800 00000000228d1c00 0000000022811440 000000000050c708 000000002f02bca8 000000002f02bc80 Krnl Code: 00000000002141b0: b9140022 lgfr %r2,%r2 00000000002141b4: 07fe bcr 15,%r14 00000000002141b6: a7f40001 brc 15,2141b8 >00000000002141ba: a7f4ffbe brc 15,214136 00000000002141be: 0707 bcr 0,%r7 00000000002141c0: ebaff0680024 stmg %r10,%r15,104(%r15) 00000000002141c6: c0d00017c2a9 larl %r13,50c718 00000000002141cc: a7f13fc0 tmll %r15,16320 Call Trace: ([<000000000050e7d8>] C.272.16122+0x88/0x110) [<00000000002141ec>] __blk_run_queue+0x2c/0x154 [<000000000028013a>] fc_remote_port_add+0x85e/0x95c [<000000000037596e>] zfcp_scsi_rport_work+0xe6/0x148 [<000000000006908c>] worker_thread+0x25c/0x318 [<000000000006f10c>] kthread+0x94/0x9c [<000000000001c2b2>] kernel_thread_starter+0x6/0xc [<000000000001c2ac>] kernel_thread_starter+0x0/0xc INFO: lockdep is turned off. Last Breaking-Event-Address: [<00000000002141b6>] blk_remove_plug+0xae/0xb8 The FC pass-through support triggers the WARN_ON(!irqs_disabled()) in blk_plug_device. Since blk_plug_device requires being called with disabled interrupts, use spin_lock_irqsave in fc_bsg_goose_queue to disable the interrupts before calling into the block layer. Signed-off-by: Christof Schmitt Acked-by: James Smart Signed-off-by: James Bottomley --- drivers/scsi/scsi_transport_fc.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c index 2eee9e6e4fe..292c02f810d 100644 --- a/drivers/scsi/scsi_transport_fc.c +++ b/drivers/scsi/scsi_transport_fc.c @@ -3670,13 +3670,14 @@ static void fc_bsg_goose_queue(struct fc_rport *rport) { int flagset; + unsigned long flags; if (!rport->rqst_q) return; get_device(&rport->dev); - spin_lock(rport->rqst_q->queue_lock); + spin_lock_irqsave(rport->rqst_q->queue_lock, flags); flagset = test_bit(QUEUE_FLAG_REENTER, &rport->rqst_q->queue_flags) && !test_bit(QUEUE_FLAG_REENTER, &rport->rqst_q->queue_flags); if (flagset) @@ -3684,7 +3685,7 @@ fc_bsg_goose_queue(struct fc_rport *rport) __blk_run_queue(rport->rqst_q); if (flagset) queue_flag_clear(QUEUE_FLAG_REENTER, rport->rqst_q); - spin_unlock(rport->rqst_q->queue_lock); + spin_unlock_irqrestore(rport->rqst_q->queue_lock, flags); put_device(&rport->dev); } -- cgit v1.2.3-18-g5258 From 73f1d9391a6aa72efdcea2f302ee7bfcd313c631 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Wed, 24 Jun 2009 01:04:36 +0900 Subject: asm-generic/vmlinux.lds.h: Fix up RW_DATA_SECTION definition. RW_DATA_SECTION is defined to take 4 different alignment parameters, while NOSAVE_DATA currently uses a fixed PAGE_SIZE alignment as noted in the comments. There are presently no in-tree users of this at present, and I just stumbled across this while implementing the simplified script on a new architecture port, which subsequently resulted in a syntax error. Signed-off-by: Paul Mundt Signed-off-by: Sam Ravnborg --- include/asm-generic/vmlinux.lds.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index 92b73b6140f..f92e730695c 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -704,7 +704,7 @@ * matches the requirment of PAGE_ALIGNED_DATA. * * use 0 as page_align if page_aligned data is not used */ -#define RW_DATA_SECTION(cacheline, nosave, pagealigned, inittask) \ +#define RW_DATA_SECTION(cacheline, pagealigned, inittask) \ . = ALIGN(PAGE_SIZE); \ .data : AT(ADDR(.data) - LOAD_OFFSET) { \ INIT_TASK(inittask) \ @@ -712,7 +712,7 @@ READ_MOSTLY_DATA(cacheline) \ DATA_DATA \ CONSTRUCTORS \ - NOSAVE_DATA(nosave) \ + NOSAVE_DATA \ PAGE_ALIGNED_DATA(pagealigned) \ } -- cgit v1.2.3-18-g5258 From d2af12aeadaedf657c9fb9c3df984d2c5ab25f4c Mon Sep 17 00:00:00 2001 From: Tim Abbott Date: Tue, 23 Jun 2009 19:59:35 -0400 Subject: Add new macros for page-aligned data and bss sections. This patch is preparation for replacing most uses of ".bss.page_aligned" and ".data.page_aligned" in the kernel with macros, so that the section name can later be changed without having to touch a lot of the kernel. The long-term goal here is to be able to change the kernel's magic section names to those that are compatible with -ffunction-sections -fdata-sections. This requires renaming all magic sections with names of the form ".data.foo". Signed-off-by: Tim Abbott Acked-by: David Howells Signed-off-by: Sam Ravnborg --- include/linux/linkage.h | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/include/linux/linkage.h b/include/linux/linkage.h index fee9e59649c..691f59171c6 100644 --- a/include/linux/linkage.h +++ b/include/linux/linkage.h @@ -21,6 +21,15 @@ #define __page_aligned_data __section(.data.page_aligned) __aligned(PAGE_SIZE) #define __page_aligned_bss __section(.bss.page_aligned) __aligned(PAGE_SIZE) +/* + * For assembly routines. + * + * Note when using these that you must specify the appropriate + * alignment directives yourself + */ +#define __PAGE_ALIGNED_DATA .section ".data.page_aligned", "aw" +#define __PAGE_ALIGNED_BSS .section ".bss.page_aligned", "aw" + /* * This is used by architectures to keep arguments on the stack * untouched by the compiler by keeping them live until the end. -- cgit v1.2.3-18-g5258 From 39a449d96ac3db9b6d498b6ffbf4c763746d5e8b Mon Sep 17 00:00:00 2001 From: Tim Abbott Date: Tue, 23 Jun 2009 18:53:15 -0400 Subject: asm-generic/vmlinux.lds.h: shuffle INIT_TASK* macro names in vmlinux.lds.h We recently added a INIT_TASK(align) in include/asm-generic/vmlinux.lds.h, but there is already a macro INIT_TASK in include/linux/init_task.h, which is quite confusing. We should switch the macro in the linker script to INIT_TASK_DATA. (Sorry that I missed this in reviewing the patch). Since the macros are new, there is only one user of the INIT_TASK in vmlinux.lds.h, arch/mn10300/kernel/vmlinux.lds.S. However, we are currently using INIT_TASK_DATA for laying down an entire .data.init_task section. So rename that to INIT_TASK_DATA_SECTION. I would be worried about changing the meaning of INIT_TASK_DATA, but the old INIT_TASK_DATA implementation had no users, and in fact if anyone had tried to use it, it would have failed to compile because it didn't pass the alignment to the old INIT_TASK. Signed-off-by: Tim Abbott Cc: David Howells Cc: Jesper Nilsson --- arch/mn10300/kernel/vmlinux.lds.S | 2 +- include/asm-generic/vmlinux.lds.h | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/arch/mn10300/kernel/vmlinux.lds.S b/arch/mn10300/kernel/vmlinux.lds.S index bcebcefb4ad..c96ba3da95a 100644 --- a/arch/mn10300/kernel/vmlinux.lds.S +++ b/arch/mn10300/kernel/vmlinux.lds.S @@ -61,7 +61,7 @@ SECTIONS _edata = .; /* End of data section */ } - .data.init_task : { INIT_TASK(THREAD_SIZE); } + .data.init_task : { INIT_TASK_DATA(THREAD_SIZE); } /* might get freed after init */ . = ALIGN(PAGE_SIZE); diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index f92e730695c..720af4c7220 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -191,7 +191,7 @@ . = ALIGN(align); \ *(.data.cacheline_aligned) -#define INIT_TASK(align) \ +#define INIT_TASK_DATA(align) \ . = ALIGN(align); \ *(.data.init_task) @@ -434,10 +434,10 @@ /* * Init task */ -#define INIT_TASK_DATA(align) \ +#define INIT_TASK_DATA_SECTION(align) \ . = ALIGN(align); \ .data.init_task : { \ - INIT_TASK \ + INIT_TASK_DATA(align) \ } #ifdef CONFIG_CONSTRUCTORS @@ -707,7 +707,7 @@ #define RW_DATA_SECTION(cacheline, pagealigned, inittask) \ . = ALIGN(PAGE_SIZE); \ .data : AT(ADDR(.data) - LOAD_OFFSET) { \ - INIT_TASK(inittask) \ + INIT_TASK_DATA(inittask) \ CACHELINE_ALIGNED_DATA(cacheline) \ READ_MOSTLY_DATA(cacheline) \ DATA_DATA \ -- cgit v1.2.3-18-g5258 From 857eceebd2803c9a3459f784acf45e5266921e4d Mon Sep 17 00:00:00 2001 From: Tim Abbott Date: Tue, 23 Jun 2009 19:59:36 -0400 Subject: Add new __init_task_data macro to be used in arch init_task.c files. This patch is preparation for replacing most ".data.init_task" in the kernel with macros, so that the section name can later be changed without having to touch a lot of the kernel. The long-term goal here is to be able to change the kernel's magic section names to those that are compatible with -ffunction-sections -fdata-sections. This requires renaming all magic sections with names of the form ".data.foo". Signed-off-by: Tim Abbott Signed-off-by: Sam Ravnborg --- include/linux/init_task.h | 3 +++ 1 file changed, 3 insertions(+) diff --git a/include/linux/init_task.h b/include/linux/init_task.h index 5368fbdc780..7fc01b13be4 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h @@ -183,5 +183,8 @@ extern struct cred init_cred; LIST_HEAD_INIT(cpu_timers[2]), \ } +/* Attach to the init_task data structure for proper alignment */ +#define __init_task_data __attribute__((__section__(".data.init_task"))) + #endif -- cgit v1.2.3-18-g5258 From 1ab18486e4e8bf9554d8439207b97422d7466d77 Mon Sep 17 00:00:00 2001 From: maximilian attems Date: Fri, 26 Jun 2009 20:04:36 +0200 Subject: kbuild: deb-pkg ship changelog In the series for 2.6.31 it was noticed to ship the copyright, but the generated changelog got lost somehow. As bonus the generated linux-image deb packages are Lenny lintian clean. Cc: Frans Pop Cc: Andres Salomon Signed-off-by: maximilian attems Signed-off-by: Sam Ravnborg --- scripts/package/builddeb | 2 ++ 1 file changed, 2 insertions(+) diff --git a/scripts/package/builddeb b/scripts/package/builddeb index 01c2d13dd02..b19f1f4962e 100644 --- a/scripts/package/builddeb +++ b/scripts/package/builddeb @@ -16,6 +16,8 @@ create_package() { local pname="$1" pdir="$2" cp debian/copyright "$pdir/usr/share/doc/$pname/" + cp debian/changelog "$pdir/usr/share/doc/$pname/changelog.Debian" + gzip -9 "$pdir/usr/share/doc/$pname/changelog.Debian" # Fix ownership and permissions chown -R root:root "$pdir" -- cgit v1.2.3-18-g5258 From 7a6b1f1c0c492a6bb6f778dff0f9f5facb90d1a1 Mon Sep 17 00:00:00 2001 From: Amerigo Wang Date: Mon, 22 Jun 2009 17:18:32 +0800 Subject: gitignore: ignore gcov output files Ignore *.gcno files which are generated by gcov. Signed-off-by: WANG Cong Signed-off-by: Sam Ravnborg --- .gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitignore b/.gitignore index cecb3b040cc..b93fb7eff94 100644 --- a/.gitignore +++ b/.gitignore @@ -27,6 +27,7 @@ *.gz *.lzma *.patch +*.gcno # # Top-level generic files -- cgit v1.2.3-18-g5258 From a8735821d198675dd326cc5847e79df79c735119 Mon Sep 17 00:00:00 2001 From: Floris Kraak Date: Mon, 15 Jun 2009 08:54:02 +0300 Subject: Kbuild: Disable the -Wformat-security gcc flag Some distributions have enabled the gcc flag -Wformat-security by default. This results in a number of warnings about format arguments to functions, sometimes in cases where fixing the warning is not likely to actually fix a bug. Instead of hand patching a dozens of places (possibly more) that produce warnings that get ignored anyway we just turn off the flag in the Makefile. Signed-off-by: Floris Kraak Signed-off-by: Pekka Enberg Signed-off-by: Sam Ravnborg --- Makefile | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/Makefile b/Makefile index d1216fea0c9..8fb9bfce212 100644 --- a/Makefile +++ b/Makefile @@ -344,7 +344,8 @@ KBUILD_CPPFLAGS := -D__KERNEL__ KBUILD_CFLAGS := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \ -fno-strict-aliasing -fno-common \ - -Werror-implicit-function-declaration + -Werror-implicit-function-declaration \ + -Wno-format-security KBUILD_AFLAGS := -D__ASSEMBLY__ # Read KERNELRELEASE from include/config/kernel.release (if it exists) -- cgit v1.2.3-18-g5258 From c512d2544c688ff1fab18a530860a9c7440a71b7 Mon Sep 17 00:00:00 2001 From: Jaswinder Singh Rajput Date: Sat, 20 Jun 2009 18:25:25 +0530 Subject: gitignore: ignore scripts/ihex2fw scripts/ihex2fw is a generated binary and should be ignored Signed-off-by: Jaswinder Singh Rajput Signed-off-by: Sam Ravnborg --- scripts/.gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/scripts/.gitignore b/scripts/.gitignore index b939fbd0119..52cab46ae35 100644 --- a/scripts/.gitignore +++ b/scripts/.gitignore @@ -7,3 +7,4 @@ pnmtologo bin2c unifdef binoffset +ihex2fw -- cgit v1.2.3-18-g5258 From 112942353992d95099fb5b71c679ff1046fccfcf Mon Sep 17 00:00:00 2001 From: Amerigo Wang Date: Fri, 19 Jun 2009 03:40:26 -0400 Subject: kbuild: finally remove the obsolete variable $TOPDIR TOPDIR is obsolete, it can be finally removed now. Signed-off-by: WANG Cong Signed-off-by: Sam Ravnborg --- Makefile | 4 +--- drivers/scsi/cxgb3i/Kbuild | 2 +- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/Makefile b/Makefile index 8fb9bfce212..b4c7ef5ab43 100644 --- a/Makefile +++ b/Makefile @@ -140,15 +140,13 @@ _all: modules endif srctree := $(if $(KBUILD_SRC),$(KBUILD_SRC),$(CURDIR)) -TOPDIR := $(srctree) -# FIXME - TOPDIR is obsolete, use srctree/objtree objtree := $(CURDIR) src := $(srctree) obj := $(objtree) VPATH := $(srctree)$(if $(KBUILD_EXTMOD),:$(KBUILD_EXTMOD)) -export srctree objtree VPATH TOPDIR +export srctree objtree VPATH # SUBARCH tells the usermode build what the underlying arch is. That is set diff --git a/drivers/scsi/cxgb3i/Kbuild b/drivers/scsi/cxgb3i/Kbuild index 25a2032bfa2..70d060b7ff4 100644 --- a/drivers/scsi/cxgb3i/Kbuild +++ b/drivers/scsi/cxgb3i/Kbuild @@ -1,4 +1,4 @@ -EXTRA_CFLAGS += -I$(TOPDIR)/drivers/net/cxgb3 +EXTRA_CFLAGS += -I$(srctree)/drivers/net/cxgb3 cxgb3i-y := cxgb3i_init.o cxgb3i_iscsi.o cxgb3i_pdu.o cxgb3i_offload.o cxgb3i_ddp.o obj-$(CONFIG_SCSI_CXGB3_ISCSI) += cxgb3i.o -- cgit v1.2.3-18-g5258 From a222ad1a4b2e3ca177a538482c99c519c1ce94d1 Mon Sep 17 00:00:00 2001 From: Karen Xie Date: Fri, 26 Jun 2009 15:17:29 -0700 Subject: [SCSI] cxgb3i: fix connection error when vlan is enabled There is a bug when VLAN is configured on the cxgb3 interface, the iscsi conn. would be denied with message "cxgb3i: NOT going through cxgbi device." This patch adds code to get the real egress net_device when vlan is configured. Signed-off-by: Karen Xie Reviewed-by: Mike Christie Signed-off-by: James Bottomley --- drivers/scsi/cxgb3i/cxgb3i_iscsi.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/scsi/cxgb3i/cxgb3i_iscsi.c b/drivers/scsi/cxgb3i/cxgb3i_iscsi.c index 74369a3f963..c399f485aa7 100644 --- a/drivers/scsi/cxgb3i/cxgb3i_iscsi.c +++ b/drivers/scsi/cxgb3i/cxgb3i_iscsi.c @@ -13,6 +13,7 @@ #include #include +#include #include #include #include @@ -184,6 +185,9 @@ static struct cxgb3i_hba *cxgb3i_hba_find_by_netdev(struct net_device *ndev) struct cxgb3i_adapter *snic; int i; + if (ndev->priv_flags & IFF_802_1Q_VLAN) + ndev = vlan_dev_real_dev(ndev); + read_lock(&cxgb3i_snic_rwlock); list_for_each_entry(snic, &cxgb3i_snic_list, list_head) { for (i = 0; i < snic->hba_cnt; i++) { -- cgit v1.2.3-18-g5258 From ec5a36f94e7ca4b1f28ae4dd135cd415a704e772 Mon Sep 17 00:00:00 2001 From: Pekka Enberg Date: Mon, 29 Jun 2009 09:57:10 +0300 Subject: SLAB: Fix lockdep annotations Commit 8429db5... ("slab: setup cpu caches later on when interrupts are enabled") broke mm/slab.c lockdep annotations: [ 11.554715] ============================================= [ 11.555249] [ INFO: possible recursive locking detected ] [ 11.555560] 2.6.31-rc1 #896 [ 11.555861] --------------------------------------------- [ 11.556127] udevd/1899 is trying to acquire lock: [ 11.556436] (&nc->lock){-.-...}, at: [] kmem_cache_free+0xcd/0x25b [ 11.557101] [ 11.557102] but task is already holding lock: [ 11.557706] (&nc->lock){-.-...}, at: [] kfree+0x137/0x292 [ 11.558109] [ 11.558109] other info that might help us debug this: [ 11.558720] 2 locks held by udevd/1899: [ 11.558983] #0: (&nc->lock){-.-...}, at: [] kfree+0x137/0x292 [ 11.559734] #1: (&parent->list_lock){-.-...}, at: [] __drain_alien_cache+0x3b/0xbd [ 11.560442] [ 11.560443] stack backtrace: [ 11.561009] Pid: 1899, comm: udevd Not tainted 2.6.31-rc1 #896 [ 11.561276] Call Trace: [ 11.561632] [] __lock_acquire+0x15ec/0x168f [ 11.561901] [] ? __lock_acquire+0x1676/0x168f [ 11.562171] [] ? trace_hardirqs_on_caller+0x113/0x13e [ 11.562490] [] ? trace_hardirqs_on_thunk+0x3a/0x3f [ 11.562807] [] lock_acquire+0xc1/0xe5 [ 11.563073] [] ? kmem_cache_free+0xcd/0x25b [ 11.563385] [] _spin_lock+0x31/0x66 [ 11.563696] [] ? kmem_cache_free+0xcd/0x25b [ 11.563964] [] kmem_cache_free+0xcd/0x25b [ 11.564235] [] ? __free_pages+0x1b/0x24 [ 11.564551] [] slab_destroy+0x57/0x5c [ 11.564860] [] free_block+0xd8/0x123 [ 11.565126] [] __drain_alien_cache+0xa2/0xbd [ 11.565441] [] kfree+0x14c/0x292 [ 11.565752] [] skb_release_data+0xc6/0xcb [ 11.566020] [] __kfree_skb+0x19/0x86 [ 11.566286] [] consume_skb+0x2b/0x2d [ 11.566631] [] skb_free_datagram+0x14/0x3a [ 11.566901] [] netlink_recvmsg+0x164/0x258 [ 11.567170] [] sock_recvmsg+0xe5/0xfe [ 11.567486] [] ? might_fault+0xaf/0xb1 [ 11.567802] [] ? autoremove_wake_function+0x0/0x38 [ 11.568073] [] ? core_sys_select+0x3d/0x2b4 [ 11.568378] [] ? __lock_acquire+0x1676/0x168f [ 11.568693] [] ? sockfd_lookup_light+0x1b/0x54 [ 11.568961] [] sys_recvfrom+0xa3/0xf8 [ 11.569228] [] ? trace_hardirqs_on+0xd/0xf [ 11.569546] [] system_call_fastpath+0x16/0x1b# Fix that up. Closes-bug: http://bugzilla.kernel.org/show_bug.cgi?id=13654 Tested-by: Venkatesh Pallipadi Signed-off-by: Pekka Enberg --- mm/slab.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/mm/slab.c b/mm/slab.c index 5241b6598ba..7b5d4deacfc 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -1544,9 +1544,6 @@ void __init kmem_cache_init(void) } g_cpucache_up = EARLY; - - /* Annotate slab for lockdep -- annotate the malloc caches */ - init_lock_keys(); } void __init kmem_cache_init_late(void) @@ -1563,6 +1560,9 @@ void __init kmem_cache_init_late(void) /* Done! */ g_cpucache_up = FULL; + /* Annotate slab for lockdep -- annotate the malloc caches */ + init_lock_keys(); + /* * Register a cpu startup notifier callback that initializes * cpu_cache_get for all new cpus -- cgit v1.2.3-18-g5258 From bf92df30df909710c498d05620e2df1be1ef779b Mon Sep 17 00:00:00 2001 From: Yu Zhao Date: Mon, 29 Jun 2009 11:31:45 +0800 Subject: intel-iommu: Only avoid flushing device IOTLB for domain ID 0 in caching mode In caching mode, domain ID 0 is reserved for non-present to present mapping flush. Device IOTLB doesn't need to be flushed in this case. Previously we were avoiding the flush for domain zero, even if the IOMMU wasn't in caching mode and domain zero wasn't special. Signed-off-by: Yu Zhao Signed-off-by: David Woodhouse --- drivers/pci/intel-iommu.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c index 420afa88728..3cad7006ed8 100644 --- a/drivers/pci/intel-iommu.c +++ b/drivers/pci/intel-iommu.c @@ -1054,7 +1054,12 @@ static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did, else iommu->flush.flush_iotlb(iommu, did, addr, mask, DMA_TLB_PSI_FLUSH); - if (did) + + /* + * In caching mode, domain ID 0 is reserved for non-present to present + * mapping flush. Device IOTLB doesn't need to be flushed in this case. + */ + if (!cap_caching_mode(iommu->cap) || did) iommu_flush_dev_iotlb(iommu->domains[did], addr, mask); } -- cgit v1.2.3-18-g5258 From b213203e475212a69ad6fedfb73464087e317148 Mon Sep 17 00:00:00 2001 From: David Woodhouse Date: Fri, 26 Jun 2009 18:50:28 +0100 Subject: intel-iommu: Create new iommu_domain_identity_map() function We'll want to do this to a _domain_ (the si_domain) rather than a PCI device. Signed-off-by: David Woodhouse --- drivers/pci/intel-iommu.c | 62 +++++++++++++++++++++++++++-------------------- 1 file changed, 36 insertions(+), 26 deletions(-) diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c index 3cad7006ed8..3a4f347e2f8 100644 --- a/drivers/pci/intel-iommu.c +++ b/drivers/pci/intel-iommu.c @@ -1849,25 +1849,12 @@ error: static int iommu_identity_mapping; -static int iommu_prepare_identity_map(struct pci_dev *pdev, - unsigned long long start, - unsigned long long end) +static int iommu_domain_identity_map(struct dmar_domain *domain, + unsigned long long start, + unsigned long long end) { - struct dmar_domain *domain; unsigned long size; unsigned long long base; - int ret; - - printk(KERN_INFO - "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n", - pci_name(pdev), start, end); - if (iommu_identity_mapping) - domain = si_domain; - else - /* page table init */ - domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH); - if (!domain) - return -ENOMEM; /* The address might not be aligned */ base = start & PAGE_MASK; @@ -1876,31 +1863,54 @@ static int iommu_prepare_identity_map(struct pci_dev *pdev, if (!reserve_iova(&domain->iovad, IOVA_PFN(base), IOVA_PFN(base + size) - 1)) { printk(KERN_ERR "IOMMU: reserve iova failed\n"); - ret = -ENOMEM; - goto error; + return -ENOMEM; } - pr_debug("Mapping reserved region %lx@%llx for %s\n", - size, base, pci_name(pdev)); + pr_debug("Mapping reserved region %lx@%llx for domain %d\n", + size, base, domain->id); /* * RMRR range might have overlap with physical memory range, * clear it first */ dma_pte_clear_range(domain, base, base + size); - ret = domain_page_mapping(domain, base, base, size, - DMA_PTE_READ|DMA_PTE_WRITE); + return domain_page_mapping(domain, base, base, size, + DMA_PTE_READ|DMA_PTE_WRITE); +} + +static int iommu_prepare_identity_map(struct pci_dev *pdev, + unsigned long long start, + unsigned long long end) +{ + struct dmar_domain *domain; + int ret; + + printk(KERN_INFO + "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n", + pci_name(pdev), start, end); + + if (iommu_identity_mapping) + domain = si_domain; + else + /* page table init */ + domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH); + if (!domain) + return -ENOMEM; + + ret = iommu_domain_identity_map(domain, start, end); if (ret) goto error; /* context entry init */ ret = domain_context_mapping(domain, pdev, CONTEXT_TT_MULTI_LEVEL); - if (!ret) - return 0; -error: + if (ret) + goto error; + + return 0; + + error: domain_exit(domain); return ret; - } static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr, -- cgit v1.2.3-18-g5258 From c7ab48d2acaf959e4d59c3f55d12fdb7ca9afd7c Mon Sep 17 00:00:00 2001 From: David Woodhouse Date: Fri, 26 Jun 2009 19:10:36 +0100 Subject: intel-iommu: Clean up identity mapping code, remove CONFIG_DMAR_GFX_WA There's no need for the GFX workaround now we have 'iommu=pt' for the cases where people really care about performance. There's no need to have a special case for just one type of device. This also speeds up the iommu=pt path and reduces memory usage by setting up the si_domain _once_ and then using it for all devices, rather than giving each device its own private page tables. Signed-off-by: David Woodhouse --- arch/x86/Kconfig | 15 +------ drivers/pci/intel-iommu.c | 107 ++++++++++++++-------------------------------- 2 files changed, 34 insertions(+), 88 deletions(-) diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index d1430ef6b4f..c07f7220590 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -1913,25 +1913,14 @@ config DMAR_DEFAULT_ON recommended you say N here while the DMAR code remains experimental. -config DMAR_GFX_WA - def_bool y - prompt "Support for Graphics workaround" - depends on DMAR - ---help--- - Current Graphics drivers tend to use physical address - for DMA and avoid using DMA APIs. Setting this config - option permits the IOMMU driver to set a unity map for - all the OS-visible memory. Hence the driver can continue - to use physical addresses for DMA. - config DMAR_FLOPPY_WA def_bool y depends on DMAR ---help--- - Floppy disk drivers are know to bypass DMA API calls + Floppy disk drivers are known to bypass DMA API calls thereby failing to work when IOMMU is enabled. This workaround will setup a 1:1 mapping for the first - 16M to make floppy (an ISA device) work. + 16MiB to make floppy (an ISA device) work. config INTR_REMAP bool "Support for Interrupt Remapping (EXPERIMENTAL)" diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c index 3a4f347e2f8..fc121967cb5 100644 --- a/drivers/pci/intel-iommu.c +++ b/drivers/pci/intel-iommu.c @@ -1889,11 +1889,7 @@ static int iommu_prepare_identity_map(struct pci_dev *pdev, "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n", pci_name(pdev), start, end); - if (iommu_identity_mapping) - domain = si_domain; - else - /* page table init */ - domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH); + domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH); if (!domain) return -ENOMEM; @@ -1922,64 +1918,6 @@ static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr, rmrr->end_address + 1); } -struct iommu_prepare_data { - struct pci_dev *pdev; - int ret; -}; - -static int __init iommu_prepare_work_fn(unsigned long start_pfn, - unsigned long end_pfn, void *datax) -{ - struct iommu_prepare_data *data; - - data = (struct iommu_prepare_data *)datax; - - data->ret = iommu_prepare_identity_map(data->pdev, - start_pfn<ret; - -} - -static int __init iommu_prepare_with_active_regions(struct pci_dev *pdev) -{ - int nid; - struct iommu_prepare_data data; - - data.pdev = pdev; - data.ret = 0; - - for_each_online_node(nid) { - work_with_active_regions(nid, iommu_prepare_work_fn, &data); - if (data.ret) - return data.ret; - } - return data.ret; -} - -#ifdef CONFIG_DMAR_GFX_WA -static void __init iommu_prepare_gfx_mapping(void) -{ - struct pci_dev *pdev = NULL; - int ret; - - for_each_pci_dev(pdev) { - if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO || - !IS_GFX_DEVICE(pdev)) - continue; - printk(KERN_INFO "IOMMU: gfx device %s 1-1 mapping\n", - pci_name(pdev)); - ret = iommu_prepare_with_active_regions(pdev); - if (ret) - printk(KERN_ERR "IOMMU: mapping reserved region failed\n"); - } -} -#else /* !CONFIG_DMAR_GFX_WA */ -static inline void iommu_prepare_gfx_mapping(void) -{ - return; -} -#endif - #ifdef CONFIG_DMAR_FLOPPY_WA static inline void iommu_prepare_isa(void) { @@ -1990,12 +1928,12 @@ static inline void iommu_prepare_isa(void) if (!pdev) return; - printk(KERN_INFO "IOMMU: Prepare 0-16M unity mapping for LPC\n"); + printk(KERN_INFO "IOMMU: Prepare 0-16MiB unity mapping for LPC\n"); ret = iommu_prepare_identity_map(pdev, 0, 16*1024*1024); if (ret) - printk(KERN_ERR "IOMMU: Failed to create 0-64M identity map, " - "floppy might not work\n"); + printk(KERN_ERR "IOMMU: Failed to create 0-16MiB identity map; " + "floppy might not work\n"); } #else @@ -2023,16 +1961,30 @@ static int __init init_context_pass_through(void) } static int md_domain_init(struct dmar_domain *domain, int guest_width); + +static int __init si_domain_work_fn(unsigned long start_pfn, + unsigned long end_pfn, void *datax) +{ + int *ret = datax; + + *ret = iommu_domain_identity_map(si_domain, + (uint64_t)start_pfn << PAGE_SHIFT, + (uint64_t)end_pfn << PAGE_SHIFT); + return *ret; + +} + static int si_domain_init(void) { struct dmar_drhd_unit *drhd; struct intel_iommu *iommu; - int ret = 0; + int nid, ret = 0; si_domain = alloc_domain(); if (!si_domain) return -EFAULT; + pr_debug("Identity mapping domain is domain %d\n", si_domain->id); for_each_active_iommu(iommu, drhd) { ret = iommu_attach_domain(si_domain, iommu); @@ -2049,6 +2001,12 @@ static int si_domain_init(void) si_domain->flags = DOMAIN_FLAG_STATIC_IDENTITY; + for_each_online_node(nid) { + work_with_active_regions(nid, si_domain_work_fn, &ret); + if (ret) + return ret; + } + return 0; } @@ -2102,13 +2060,14 @@ static int iommu_prepare_static_identity_mapping(void) if (ret) return -EFAULT; - printk(KERN_INFO "IOMMU: Setting identity map:\n"); for_each_pci_dev(pdev) { - ret = iommu_prepare_with_active_regions(pdev); - if (ret) { - printk(KERN_INFO "1:1 mapping to one domain failed.\n"); - return -EFAULT; - } + printk(KERN_INFO "IOMMU: identity mapping for device %s\n", + pci_name(pdev)); + + ret = domain_context_mapping(si_domain, pdev, + CONTEXT_TT_MULTI_LEVEL); + if (ret) + return ret; ret = domain_add_dev_info(si_domain, pdev); if (ret) return ret; @@ -2299,8 +2258,6 @@ int __init init_dmars(void) } } - iommu_prepare_gfx_mapping(); - iommu_prepare_isa(); } -- cgit v1.2.3-18-g5258 From dd4e831960e4f0214480fa96a53ca9bb7dd04927 Mon Sep 17 00:00:00 2001 From: David Woodhouse Date: Sat, 27 Jun 2009 16:21:20 +0100 Subject: intel-iommu: Change dma_set_pte_addr() to dma_set_pte_pfn() Add some helpers for converting between VT-d and normal system pfns, since system pages can be larger than VT-d pages. Signed-off-by: David Woodhouse --- drivers/pci/intel-iommu.c | 28 ++++++++++++++++++++++++---- 1 file changed, 24 insertions(+), 4 deletions(-) diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c index fc121967cb5..852f40a913d 100644 --- a/drivers/pci/intel-iommu.c +++ b/drivers/pci/intel-iommu.c @@ -65,6 +65,26 @@ #define PHYSICAL_PAGE_MASK PAGE_MASK #endif +/* VT-d pages must always be _smaller_ than MM pages. Otherwise things + are never going to work. */ +static inline unsigned long dma_to_mm_pfn(unsigned long dma_pfn) +{ + return dma_pfn >> (PAGE_SHIFT - VTD_PAGE_SHIFT); +} + +static inline unsigned long mm_to_dma_pfn(unsigned long mm_pfn) +{ + return mm_pfn << (PAGE_SHIFT - VTD_PAGE_SHIFT); +} +static inline unsigned long page_to_dma_pfn(struct page *pg) +{ + return mm_to_dma_pfn(page_to_pfn(pg)); +} +static inline unsigned long virt_to_dma_pfn(void *p) +{ + return page_to_dma_pfn(virt_to_page(p)); +} + /* global iommu list, set NULL for ignored DMAR units */ static struct intel_iommu **g_iommus; @@ -207,9 +227,9 @@ static inline u64 dma_pte_addr(struct dma_pte *pte) return (pte->val & VTD_PAGE_MASK); } -static inline void dma_set_pte_addr(struct dma_pte *pte, u64 addr) +static inline void dma_set_pte_pfn(struct dma_pte *pte, unsigned long pfn) { - pte->val |= (addr & VTD_PAGE_MASK); + pte->val |= (uint64_t)pfn << VTD_PAGE_SHIFT; } static inline bool dma_pte_present(struct dma_pte *pte) @@ -702,7 +722,7 @@ static struct dma_pte * addr_to_dma_pte(struct dmar_domain *domain, u64 addr) return NULL; } domain_flush_cache(domain, tmp_page, PAGE_SIZE); - dma_set_pte_addr(pte, virt_to_phys(tmp_page)); + dma_set_pte_pfn(pte, virt_to_dma_pfn(tmp_page)); /* * high level table always sets r/w, last level page * table control read/write @@ -1648,7 +1668,7 @@ domain_page_mapping(struct dmar_domain *domain, dma_addr_t iova, * touches the iova range */ BUG_ON(dma_pte_addr(pte)); - dma_set_pte_addr(pte, start_pfn << VTD_PAGE_SHIFT); + dma_set_pte_pfn(pte, start_pfn); dma_set_pte_prot(pte, prot); if (prot & DMA_PTE_SNP) dma_set_pte_snp(pte); -- cgit v1.2.3-18-g5258 From 77dfa56c94d2855a25ff552b74980a5538e129f8 Mon Sep 17 00:00:00 2001 From: David Woodhouse Date: Sat, 27 Jun 2009 16:40:08 +0100 Subject: intel-iommu: Change address_level_offset() to pfn_level_offset() We're shifting the inputs for now, but that'll change... Signed-off-by: David Woodhouse --- drivers/pci/intel-iommu.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c index 852f40a913d..529c1c13048 100644 --- a/drivers/pci/intel-iommu.c +++ b/drivers/pci/intel-iommu.c @@ -671,9 +671,9 @@ static inline unsigned int level_to_offset_bits(int level) return (12 + (level - 1) * LEVEL_STRIDE); } -static inline int address_level_offset(u64 addr, int level) +static inline int pfn_level_offset(unsigned long pfn, int level) { - return ((addr >> level_to_offset_bits(level)) & LEVEL_MASK); + return (pfn >> (level_to_offset_bits(level) - 12)) & LEVEL_MASK; } static inline u64 level_mask(int level) @@ -708,7 +708,7 @@ static struct dma_pte * addr_to_dma_pte(struct dmar_domain *domain, u64 addr) while (level > 0) { void *tmp_page; - offset = address_level_offset(addr, level); + offset = pfn_level_offset(addr >> VTD_PAGE_SHIFT, level); pte = &parent[offset]; if (level == 1) break; @@ -749,7 +749,7 @@ static struct dma_pte *dma_addr_level_pte(struct dmar_domain *domain, u64 addr, parent = domain->pgd; while (level <= total) { - offset = address_level_offset(addr, total); + offset = pfn_level_offset(addr >> VTD_PAGE_SHIFT, total); pte = &parent[offset]; if (level == total) return pte; -- cgit v1.2.3-18-g5258 From 90dcfb5eb2fd427b16135a14f176a6902750b6b4 Mon Sep 17 00:00:00 2001 From: David Woodhouse Date: Sat, 27 Jun 2009 17:14:59 +0100 Subject: intel-iommu: Change dma_addr_level_pte() to dma_pfn_level_pte() Signed-off-by: David Woodhouse --- drivers/pci/intel-iommu.c | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c index 529c1c13048..edd39d348a9 100644 --- a/drivers/pci/intel-iommu.c +++ b/drivers/pci/intel-iommu.c @@ -740,8 +740,9 @@ static struct dma_pte * addr_to_dma_pte(struct dmar_domain *domain, u64 addr) } /* return address's pte at specific level */ -static struct dma_pte *dma_addr_level_pte(struct dmar_domain *domain, u64 addr, - int level) +static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain, + unsigned long pfn, + int level) { struct dma_pte *parent, *pte = NULL; int total = agaw_to_level(domain->agaw); @@ -749,7 +750,7 @@ static struct dma_pte *dma_addr_level_pte(struct dmar_domain *domain, u64 addr, parent = domain->pgd; while (level <= total) { - offset = pfn_level_offset(addr >> VTD_PAGE_SHIFT, total); + offset = pfn_level_offset(pfn, total); pte = &parent[offset]; if (level == total) return pte; @@ -768,7 +769,7 @@ static void dma_pte_clear_one(struct dmar_domain *domain, u64 addr) struct dma_pte *pte = NULL; /* get last level pte */ - pte = dma_addr_level_pte(domain, addr, 1); + pte = dma_pfn_level_pte(domain, addr >> VTD_PAGE_SHIFT, 1); if (pte) { dma_clear_pte(pte); @@ -817,7 +818,8 @@ static void dma_pte_free_pagetable(struct dmar_domain *domain, return; while (tmp < end) { - pte = dma_addr_level_pte(domain, tmp, level); + pte = dma_pfn_level_pte(domain, tmp >> VTD_PAGE_SHIFT, + level); if (pte) { free_pgtable_page( phys_to_virt(dma_pte_addr(pte))); -- cgit v1.2.3-18-g5258 From a75f7cf94f01717c5103138319b96752ee2a2be9 Mon Sep 17 00:00:00 2001 From: David Woodhouse Date: Sat, 27 Jun 2009 17:44:39 +0100 Subject: intel-iommu: Make dma_pte_clear_one() take pfn not address Signed-off-by: David Woodhouse --- drivers/pci/intel-iommu.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c index edd39d348a9..40eae2097ac 100644 --- a/drivers/pci/intel-iommu.c +++ b/drivers/pci/intel-iommu.c @@ -764,12 +764,12 @@ static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain, } /* clear one page's page table */ -static void dma_pte_clear_one(struct dmar_domain *domain, u64 addr) +static void dma_pte_clear_one(struct dmar_domain *domain, unsigned long pfn) { struct dma_pte *pte = NULL; /* get last level pte */ - pte = dma_pfn_level_pte(domain, addr >> VTD_PAGE_SHIFT, 1); + pte = dma_pfn_level_pte(domain, pfn, 1); if (pte) { dma_clear_pte(pte); @@ -792,7 +792,7 @@ static void dma_pte_clear_range(struct dmar_domain *domain, u64 start, u64 end) /* we don't need lock here, nobody else touches the iova range */ while (npages--) { - dma_pte_clear_one(domain, start); + dma_pte_clear_one(domain, start >> VTD_PAGE_SHIFT); start += VTD_PAGE_SIZE; } } -- cgit v1.2.3-18-g5258 From 66eae8469e4e4ba6f4ca7ef82103c78f6d645583 Mon Sep 17 00:00:00 2001 From: David Woodhouse Date: Sat, 27 Jun 2009 19:00:32 +0100 Subject: intel-iommu: Don't just mask out too-big physical addresses; BUG() instead Signed-off-by: David Woodhouse --- drivers/pci/intel-iommu.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c index 40eae2097ac..ad367f53a2b 100644 --- a/drivers/pci/intel-iommu.c +++ b/drivers/pci/intel-iommu.c @@ -700,8 +700,7 @@ static struct dma_pte * addr_to_dma_pte(struct dmar_domain *domain, u64 addr) unsigned long flags; BUG_ON(!domain->pgd); - - addr &= (((u64)1) << addr_width) - 1; + BUG_ON(addr >> addr_width); parent = domain->pgd; spin_lock_irqsave(&domain->mapping_lock, flags); @@ -783,8 +782,9 @@ static void dma_pte_clear_range(struct dmar_domain *domain, u64 start, u64 end) int addr_width = agaw_to_width(domain->agaw); int npages; - start &= (((u64)1) << addr_width) - 1; - end &= (((u64)1) << addr_width) - 1; + BUG_ON(start >> addr_width); + BUG_ON((end-1) >> addr_width); + /* in case it's partial page */ start &= PAGE_MASK; end = PAGE_ALIGN(end); @@ -807,8 +807,8 @@ static void dma_pte_free_pagetable(struct dmar_domain *domain, int level; u64 tmp; - start &= (((u64)1) << addr_width) - 1; - end &= (((u64)1) << addr_width) - 1; + BUG_ON(start >> addr_width); + BUG_ON(end >> addr_width); /* we don't need lock here, nobody else touches the iova range */ level = 2; @@ -1654,7 +1654,7 @@ domain_page_mapping(struct dmar_domain *domain, dma_addr_t iova, int index; int addr_width = agaw_to_width(domain->agaw); - hpa &= (((u64)1) << addr_width) - 1; + BUG_ON(hpa >> addr_width); if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0) return -EINVAL; -- cgit v1.2.3-18-g5258 From 04b18e65dd5a3e544f07f4bcfa8fb52704a1833b Mon Sep 17 00:00:00 2001 From: David Woodhouse Date: Sat, 27 Jun 2009 19:15:01 +0100 Subject: intel-iommu: Make dma_pte_clear_range() use pfns Signed-off-by: David Woodhouse --- drivers/pci/intel-iommu.c | 22 +++++++++------------- 1 file changed, 9 insertions(+), 13 deletions(-) diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c index ad367f53a2b..d4217f73715 100644 --- a/drivers/pci/intel-iommu.c +++ b/drivers/pci/intel-iommu.c @@ -779,21 +779,17 @@ static void dma_pte_clear_one(struct dmar_domain *domain, unsigned long pfn) /* clear last level pte, a tlb flush should be followed */ static void dma_pte_clear_range(struct dmar_domain *domain, u64 start, u64 end) { - int addr_width = agaw_to_width(domain->agaw); - int npages; - - BUG_ON(start >> addr_width); - BUG_ON((end-1) >> addr_width); + unsigned long start_pfn = IOVA_PFN(start); + unsigned long end_pfn = IOVA_PFN(end-1); + int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT; - /* in case it's partial page */ - start &= PAGE_MASK; - end = PAGE_ALIGN(end); - npages = (end - start) / VTD_PAGE_SIZE; + BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width); + BUG_ON(addr_width < BITS_PER_LONG && end_pfn >> addr_width); - /* we don't need lock here, nobody else touches the iova range */ - while (npages--) { - dma_pte_clear_one(domain, start >> VTD_PAGE_SHIFT); - start += VTD_PAGE_SIZE; + /* we don't need lock here; nobody else touches the iova range */ + while (start_pfn <= end_pfn) { + dma_pte_clear_one(domain, start_pfn); + start_pfn++; } } -- cgit v1.2.3-18-g5258 From 595badf5d65d50300319e6178e6df005ea501f70 Mon Sep 17 00:00:00 2001 From: David Woodhouse Date: Sat, 27 Jun 2009 22:09:11 +0100 Subject: intel-iommu: Make dma_pte_clear_range() take pfns as argument Noting that this is now an _inclusive_ range. Signed-off-by: David Woodhouse --- drivers/pci/intel-iommu.c | 32 +++++++++++++++++++------------- 1 file changed, 19 insertions(+), 13 deletions(-) diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c index d4217f73715..ff8b7ce4a01 100644 --- a/drivers/pci/intel-iommu.c +++ b/drivers/pci/intel-iommu.c @@ -56,6 +56,7 @@ #define MAX_AGAW_WIDTH 64 #define DOMAIN_MAX_ADDR(gaw) ((((u64)1) << gaw) - 1) +#define DOMAIN_MAX_PFN(gaw) ((((u64)1) << (gaw-VTD_PAGE_SHIFT)) - 1) #define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT) #define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32)) @@ -777,17 +778,17 @@ static void dma_pte_clear_one(struct dmar_domain *domain, unsigned long pfn) } /* clear last level pte, a tlb flush should be followed */ -static void dma_pte_clear_range(struct dmar_domain *domain, u64 start, u64 end) +static void dma_pte_clear_range(struct dmar_domain *domain, + unsigned long start_pfn, + unsigned long last_pfn) { - unsigned long start_pfn = IOVA_PFN(start); - unsigned long end_pfn = IOVA_PFN(end-1); int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT; BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width); - BUG_ON(addr_width < BITS_PER_LONG && end_pfn >> addr_width); + BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width); /* we don't need lock here; nobody else touches the iova range */ - while (start_pfn <= end_pfn) { + while (start_pfn <= last_pfn) { dma_pte_clear_one(domain, start_pfn); start_pfn++; } @@ -1424,7 +1425,7 @@ static void domain_exit(struct dmar_domain *domain) end = end & (~PAGE_MASK); /* clear ptes */ - dma_pte_clear_range(domain, 0, end); + dma_pte_clear_range(domain, 0, DOMAIN_MAX_PFN(domain->gaw)); /* free page tables */ dma_pte_free_pagetable(domain, 0, end); @@ -1890,7 +1891,8 @@ static int iommu_domain_identity_map(struct dmar_domain *domain, * RMRR range might have overlap with physical memory range, * clear it first */ - dma_pte_clear_range(domain, base, base + size); + dma_pte_clear_range(domain, base >> VTD_PAGE_SHIFT, + (base + size - 1) >> VTD_PAGE_SHIFT); return domain_page_mapping(domain, base, base, size, DMA_PTE_READ|DMA_PTE_WRITE); @@ -2618,7 +2620,8 @@ static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr, pci_name(pdev), size, (unsigned long long)start_addr); /* clear the whole page */ - dma_pte_clear_range(domain, start_addr, start_addr + size); + dma_pte_clear_range(domain, start_addr >> VTD_PAGE_SHIFT, + (start_addr + size - 1) >> VTD_PAGE_SHIFT); /* free page tables */ dma_pte_free_pagetable(domain, start_addr, start_addr + size); if (intel_iommu_strict) { @@ -2710,7 +2713,8 @@ static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist, start_addr = iova->pfn_lo << PAGE_SHIFT; /* clear the whole page */ - dma_pte_clear_range(domain, start_addr, start_addr + size); + dma_pte_clear_range(domain, start_addr >> VTD_PAGE_SHIFT, + (start_addr + size - 1) >> VTD_PAGE_SHIFT); /* free page tables */ dma_pte_free_pagetable(domain, start_addr, start_addr + size); @@ -2792,8 +2796,9 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int ne size, prot); if (ret) { /* clear the page */ - dma_pte_clear_range(domain, start_addr, - start_addr + offset); + dma_pte_clear_range(domain, + start_addr >> VTD_PAGE_SHIFT, + (start_addr + offset - 1) >> VTD_PAGE_SHIFT); /* free page tables */ dma_pte_free_pagetable(domain, start_addr, start_addr + offset); @@ -3382,7 +3387,7 @@ static void vm_domain_exit(struct dmar_domain *domain) end = end & (~VTD_PAGE_MASK); /* clear ptes */ - dma_pte_clear_range(domain, 0, end); + dma_pte_clear_range(domain, 0, DOMAIN_MAX_PFN(domain->gaw)); /* free page tables */ dma_pte_free_pagetable(domain, 0, end); @@ -3526,7 +3531,8 @@ static void intel_iommu_unmap_range(struct iommu_domain *domain, /* The address might not be aligned */ base = iova & VTD_PAGE_MASK; size = VTD_PAGE_ALIGN(size); - dma_pte_clear_range(dmar_domain, base, base + size); + dma_pte_clear_range(dmar_domain, base >> VTD_PAGE_SHIFT, + (base + size - 1) >> VTD_PAGE_SHIFT); if (dmar_domain->max_addr == base + size) dmar_domain->max_addr = base; -- cgit v1.2.3-18-g5258 From 6660c63a79a639b86e3a709e25a8c4fc3ab24770 Mon Sep 17 00:00:00 2001 From: David Woodhouse Date: Sat, 27 Jun 2009 22:41:00 +0100 Subject: intel-iommu: Make dma_pte_free_pagetable() use pfns Signed-off-by: David Woodhouse --- drivers/pci/intel-iommu.c | 40 ++++++++++++++++++++++------------------ 1 file changed, 22 insertions(+), 18 deletions(-) diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c index ff8b7ce4a01..1526864a9d6 100644 --- a/drivers/pci/intel-iommu.c +++ b/drivers/pci/intel-iommu.c @@ -669,27 +669,27 @@ static inline int width_to_agaw(int width) static inline unsigned int level_to_offset_bits(int level) { - return (12 + (level - 1) * LEVEL_STRIDE); + return (level - 1) * LEVEL_STRIDE; } static inline int pfn_level_offset(unsigned long pfn, int level) { - return (pfn >> (level_to_offset_bits(level) - 12)) & LEVEL_MASK; + return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK; } -static inline u64 level_mask(int level) +static inline unsigned long level_mask(int level) { - return ((u64)-1 << l