From 6e0de817594c61f3b392a9245deeb09609ec707d Mon Sep 17 00:00:00 2001 From: Martin Schwidefsky Date: Fri, 25 Apr 2014 10:53:44 +0200 Subject: s390/bpf,jit: initialize A register if 1st insn is BPF_S_LDX_B_MSH The A register needs to be initialized to zero in the prolog if the first instruction of the BPF program is BPF_S_LDX_B_MSH to prevent leaking the content of %r5 to user space. Cc: Signed-off-by: Martin Schwidefsky --- arch/s390/net/bpf_jit_comp.c | 1 - 1 file changed, 1 deletion(-) (limited to 'arch/s390') diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c index 9c36dc398f9..452d3ebd9d0 100644 --- a/arch/s390/net/bpf_jit_comp.c +++ b/arch/s390/net/bpf_jit_comp.c @@ -276,7 +276,6 @@ static void bpf_jit_noleaks(struct bpf_jit *jit, struct sock_filter *filter) case BPF_S_LD_W_IND: case BPF_S_LD_H_IND: case BPF_S_LD_B_IND: - case BPF_S_LDX_B_MSH: case BPF_S_LD_IMM: case BPF_S_LD_MEM: case BPF_S_MISC_TXA: -- cgit v1.2.3-18-g5258 From 0c8c77d35582c3f7989f1316368da5ae7f14ad4b Mon Sep 17 00:00:00 2001 From: Christian Borntraeger Date: Wed, 23 Apr 2014 20:58:45 +0200 Subject: s390/ccwgroup: Fix memory corruption commit 0b60f9ead5d4816e7e3d6e28f4a0d22d4a1b2513 (s390: use device_remove_file_self() instead of device_schedule_callback()) caused random memory corruption on my s390 box. Turns out that the last element of the ccwgroup structure is of dynamic size, so we must move the newly introduced work structure _before_ the zero length array. Signed-off-by: Christian Borntraeger CC: Tejun Heo CC: Martin Schwidefsky CC: Heiko Carstens CC: Sebastian Ott CC: Peter Oberparleiter Acked-by: Tejun Heo Signed-off-by: Greg Kroah-Hartman --- arch/s390/include/asm/ccwgroup.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/s390') diff --git a/arch/s390/include/asm/ccwgroup.h b/arch/s390/include/asm/ccwgroup.h index 6e670f88d12..ebc2913f9ee 100644 --- a/arch/s390/include/asm/ccwgroup.h +++ b/arch/s390/include/asm/ccwgroup.h @@ -22,8 +22,8 @@ struct ccwgroup_device { /* public: */ unsigned int count; struct device dev; - struct ccw_device *cdev[0]; struct work_struct ungroup_work; + struct ccw_device *cdev[0]; }; /** -- cgit v1.2.3-18-g5258 From 1cf35d47712dd5dc4d62c6ce984f04ac6eab0408 Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Fri, 25 Apr 2014 16:05:40 -0700 Subject: mm: split 'tlb_flush_mmu()' into tlb flushing and memory freeing parts The mmu-gather operation 'tlb_flush_mmu()' has done two things: the actual tlb flush operation, and the batched freeing of the pages that the TLB entries pointed at. This splits the operation into separate phases, so that the forced batched flushing done by zap_pte_range() can now do the actual TLB flush while still holding the page table lock, but delay the batched freeing of all the pages to after the lock has been dropped. This in turn allows us to avoid a race condition between set_page_dirty() (as called by zap_pte_range() when it finds a dirty shared memory pte) and page_mkclean(): because we now flush all the dirty page data from the TLB's while holding the pte lock, page_mkclean() will be held up walking the (recently cleaned) page tables until after the TLB entries have been flushed from all CPU's. Reported-by: Benjamin Herrenschmidt Tested-by: Dave Hansen Acked-by: Hugh Dickins Cc: Peter Zijlstra Cc: Russell King - ARM Linux Cc: Tony Luck Signed-off-by: Linus Torvalds --- arch/s390/include/asm/tlb.h | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) (limited to 'arch/s390') diff --git a/arch/s390/include/asm/tlb.h b/arch/s390/include/asm/tlb.h index c544b6f05d9..a25f09fbaf3 100644 --- a/arch/s390/include/asm/tlb.h +++ b/arch/s390/include/asm/tlb.h @@ -59,12 +59,23 @@ static inline void tlb_gather_mmu(struct mmu_gather *tlb, tlb->batch = NULL; } -static inline void tlb_flush_mmu(struct mmu_gather *tlb) +static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb) { __tlb_flush_mm_lazy(tlb->mm); +} + +static inline void tlb_flush_mmu_free(struct mmu_gather *tlb) +{ tlb_table_flush(tlb); } + +static inline void tlb_flush_mmu(struct mmu_gather *tlb) +{ + tlb_flush_mmu_tlbonly(tlb); + tlb_flush_mmu_free(tlb); +} + static inline void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end) { -- cgit v1.2.3-18-g5258 From 3901c1124ec5099254a9396085f7798153a7293f Mon Sep 17 00:00:00 2001 From: Harald Freudenberger Date: Wed, 7 May 2014 16:51:29 +0200 Subject: crypto: s390 - fix aes,des ctr mode concurrency finding. An additional testcase found an issue with the last series of patches applied: the fallback solution may not save the iv value after operation. This very small fix just makes sure the iv is copied back to the walk/desc struct. Cc: # 3.14+ Signed-off-by: Harald Freudenberger Signed-off-by: Herbert Xu --- arch/s390/crypto/aes_s390.c | 3 +++ arch/s390/crypto/des_s390.c | 3 +++ 2 files changed, 6 insertions(+) (limited to 'arch/s390') diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c index cf3c0089bef..23223cd63e5 100644 --- a/arch/s390/crypto/aes_s390.c +++ b/arch/s390/crypto/aes_s390.c @@ -820,6 +820,9 @@ static int ctr_aes_crypt(struct blkcipher_desc *desc, long func, else memcpy(walk->iv, ctrptr, AES_BLOCK_SIZE); spin_unlock(&ctrblk_lock); + } else { + if (!nbytes) + memcpy(walk->iv, ctrptr, AES_BLOCK_SIZE); } /* * final block may be < AES_BLOCK_SIZE, copy only nbytes diff --git a/arch/s390/crypto/des_s390.c b/arch/s390/crypto/des_s390.c index 0a5aac8a941..7acb77f7ef1 100644 --- a/arch/s390/crypto/des_s390.c +++ b/arch/s390/crypto/des_s390.c @@ -429,6 +429,9 @@ static int ctr_desall_crypt(struct blkcipher_desc *desc, long func, else memcpy(walk->iv, ctrptr, DES_BLOCK_SIZE); spin_unlock(&ctrblk_lock); + } else { + if (!nbytes) + memcpy(walk->iv, ctrptr, DES_BLOCK_SIZE); } /* final block may be < DES_BLOCK_SIZE, copy only nbytes */ if (nbytes) { -- cgit v1.2.3-18-g5258