diff options
Diffstat (limited to 'arch/sparc/net/bpf_jit_comp.c')
| -rw-r--r-- | arch/sparc/net/bpf_jit_comp.c | 182 | 
1 files changed, 93 insertions, 89 deletions
diff --git a/arch/sparc/net/bpf_jit_comp.c b/arch/sparc/net/bpf_jit_comp.c index 218b6b23c37..892a102671a 100644 --- a/arch/sparc/net/bpf_jit_comp.c +++ b/arch/sparc/net/bpf_jit_comp.c @@ -83,9 +83,9 @@ static void bpf_flush_icache(void *start_, void *end_)  #define BNE		(F2(0, 2) | CONDNE)  #ifdef CONFIG_SPARC64 -#define BNE_PTR		(F2(0, 1) | CONDNE | (2 << 20)) +#define BE_PTR		(F2(0, 1) | CONDE | (2 << 20))  #else -#define BNE_PTR		BNE +#define BE_PTR		BE  #endif  #define SETHI(K, REG)	\ @@ -415,20 +415,11 @@ void bpf_jit_compile(struct sk_filter *fp)  		emit_reg_move(O7, r_saved_O7);  		switch (filter[0].code) { -		case BPF_S_RET_K: -		case BPF_S_LD_W_LEN: -		case BPF_S_ANC_PROTOCOL: -		case BPF_S_ANC_PKTTYPE: -		case BPF_S_ANC_IFINDEX: -		case BPF_S_ANC_MARK: -		case BPF_S_ANC_RXHASH: -		case BPF_S_ANC_VLAN_TAG: -		case BPF_S_ANC_VLAN_TAG_PRESENT: -		case BPF_S_ANC_CPU: -		case BPF_S_ANC_QUEUE: -		case BPF_S_LD_W_ABS: -		case BPF_S_LD_H_ABS: -		case BPF_S_LD_B_ABS: +		case BPF_RET | BPF_K: +		case BPF_LD | BPF_W | BPF_LEN: +		case BPF_LD | BPF_W | BPF_ABS: +		case BPF_LD | BPF_H | BPF_ABS: +		case BPF_LD | BPF_B | BPF_ABS:  			/* The first instruction sets the A register (or is  			 * a "RET 'constant'")  			 */ @@ -445,63 +436,75 @@ void bpf_jit_compile(struct sk_filter *fp)  			unsigned int t_offset;  			unsigned int f_offset;  			u32 t_op, f_op; +			u16 code = bpf_anc_helper(&filter[i]);  			int ilen; -			switch (filter[i].code) { -			case BPF_S_ALU_ADD_X:	/* A += X; */ +			switch (code) { +			case BPF_ALU | BPF_ADD | BPF_X:	/* A += X; */  				emit_alu_X(ADD);  				break; -			case BPF_S_ALU_ADD_K:	/* A += K; */ +			case BPF_ALU | BPF_ADD | BPF_K:	/* A += K; */  				emit_alu_K(ADD, K);  				break; -			case BPF_S_ALU_SUB_X:	/* A -= X; */ +			case BPF_ALU | BPF_SUB | BPF_X:	/* A -= X; */  				emit_alu_X(SUB);  				break; -			case BPF_S_ALU_SUB_K:	/* A -= K */ +			case BPF_ALU | BPF_SUB | BPF_K:	/* A -= K */  				emit_alu_K(SUB, K);  				break; -			case BPF_S_ALU_AND_X:	/* A &= X */ +			case BPF_ALU | BPF_AND | BPF_X:	/* A &= X */  				emit_alu_X(AND);  				break; -			case BPF_S_ALU_AND_K:	/* A &= K */ +			case BPF_ALU | BPF_AND | BPF_K:	/* A &= K */  				emit_alu_K(AND, K);  				break; -			case BPF_S_ALU_OR_X:	/* A |= X */ +			case BPF_ALU | BPF_OR | BPF_X:	/* A |= X */  				emit_alu_X(OR);  				break; -			case BPF_S_ALU_OR_K:	/* A |= K */ +			case BPF_ALU | BPF_OR | BPF_K:	/* A |= K */  				emit_alu_K(OR, K);  				break; -			case BPF_S_ANC_ALU_XOR_X: /* A ^= X; */ -			case BPF_S_ALU_XOR_X: +			case BPF_ANC | SKF_AD_ALU_XOR_X: /* A ^= X; */ +			case BPF_ALU | BPF_XOR | BPF_X:  				emit_alu_X(XOR);  				break; -			case BPF_S_ALU_XOR_K:	/* A ^= K */ +			case BPF_ALU | BPF_XOR | BPF_K:	/* A ^= K */  				emit_alu_K(XOR, K);  				break; -			case BPF_S_ALU_LSH_X:	/* A <<= X */ +			case BPF_ALU | BPF_LSH | BPF_X:	/* A <<= X */  				emit_alu_X(SLL);  				break; -			case BPF_S_ALU_LSH_K:	/* A <<= K */ +			case BPF_ALU | BPF_LSH | BPF_K:	/* A <<= K */  				emit_alu_K(SLL, K);  				break; -			case BPF_S_ALU_RSH_X:	/* A >>= X */ +			case BPF_ALU | BPF_RSH | BPF_X:	/* A >>= X */  				emit_alu_X(SRL);  				break; -			case BPF_S_ALU_RSH_K:	/* A >>= K */ +			case BPF_ALU | BPF_RSH | BPF_K:	/* A >>= K */  				emit_alu_K(SRL, K);  				break; -			case BPF_S_ALU_MUL_X:	/* A *= X; */ +			case BPF_ALU | BPF_MUL | BPF_X:	/* A *= X; */  				emit_alu_X(MUL);  				break; -			case BPF_S_ALU_MUL_K:	/* A *= K */ +			case BPF_ALU | BPF_MUL | BPF_K:	/* A *= K */  				emit_alu_K(MUL, K);  				break; -			case BPF_S_ALU_DIV_K:	/* A /= K */ -				emit_alu_K(MUL, K); -				emit_read_y(r_A); +			case BPF_ALU | BPF_DIV | BPF_K:	/* A /= K with K != 0*/ +				if (K == 1) +					break; +				emit_write_y(G0); +#ifdef CONFIG_SPARC32 +				/* The Sparc v8 architecture requires +				 * three instructions between a %y +				 * register write and the first use. +				 */ +				emit_nop(); +				emit_nop(); +				emit_nop(); +#endif +				emit_alu_K(DIV, K);  				break; -			case BPF_S_ALU_DIV_X:	/* A /= X; */ +			case BPF_ALU | BPF_DIV | BPF_X:	/* A /= X; */  				emit_cmpi(r_X, 0);  				if (pc_ret0 > 0) {  					t_offset = addrs[pc_ret0 - 1]; @@ -533,10 +536,10 @@ void bpf_jit_compile(struct sk_filter *fp)  #endif  				emit_alu_X(DIV);  				break; -			case BPF_S_ALU_NEG: +			case BPF_ALU | BPF_NEG:  				emit_neg();  				break; -			case BPF_S_RET_K: +			case BPF_RET | BPF_K:  				if (!K) {  					if (pc_ret0 == -1)  						pc_ret0 = i; @@ -545,7 +548,7 @@ void bpf_jit_compile(struct sk_filter *fp)  					emit_loadimm(K, r_A);  				}  				/* Fallthrough */ -			case BPF_S_RET_A: +			case BPF_RET | BPF_A:  				if (seen_or_pass0) {  					if (i != flen - 1) {  						emit_jump(cleanup_addr); @@ -562,18 +565,18 @@ void bpf_jit_compile(struct sk_filter *fp)  				emit_jmpl(r_saved_O7, 8, G0);  				emit_reg_move(r_A, O0); /* delay slot */  				break; -			case BPF_S_MISC_TAX: +			case BPF_MISC | BPF_TAX:  				seen |= SEEN_XREG;  				emit_reg_move(r_A, r_X);  				break; -			case BPF_S_MISC_TXA: +			case BPF_MISC | BPF_TXA:  				seen |= SEEN_XREG;  				emit_reg_move(r_X, r_A);  				break; -			case BPF_S_ANC_CPU: +			case BPF_ANC | SKF_AD_CPU:  				emit_load_cpu(r_A);  				break; -			case BPF_S_ANC_PROTOCOL: +			case BPF_ANC | SKF_AD_PROTOCOL:  				emit_skb_load16(protocol, r_A);  				break;  #if 0 @@ -581,38 +584,38 @@ void bpf_jit_compile(struct sk_filter *fp)  				 * a bit field even though we very much  				 * know what we are doing here.  				 */ -			case BPF_S_ANC_PKTTYPE: +			case BPF_ANC | SKF_AD_PKTTYPE:  				__emit_skb_load8(pkt_type, r_A);  				emit_alu_K(SRL, 5);  				break;  #endif -			case BPF_S_ANC_IFINDEX: +			case BPF_ANC | SKF_AD_IFINDEX:  				emit_skb_loadptr(dev, r_A);  				emit_cmpi(r_A, 0); -				emit_branch(BNE_PTR, cleanup_addr + 4); +				emit_branch(BE_PTR, cleanup_addr + 4);  				emit_nop();  				emit_load32(r_A, struct net_device, ifindex, r_A);  				break; -			case BPF_S_ANC_MARK: +			case BPF_ANC | SKF_AD_MARK:  				emit_skb_load32(mark, r_A);  				break; -			case BPF_S_ANC_QUEUE: +			case BPF_ANC | SKF_AD_QUEUE:  				emit_skb_load16(queue_mapping, r_A);  				break; -			case BPF_S_ANC_HATYPE: +			case BPF_ANC | SKF_AD_HATYPE:  				emit_skb_loadptr(dev, r_A);  				emit_cmpi(r_A, 0); -				emit_branch(BNE_PTR, cleanup_addr + 4); +				emit_branch(BE_PTR, cleanup_addr + 4);  				emit_nop();  				emit_load16(r_A, struct net_device, type, r_A);  				break; -			case BPF_S_ANC_RXHASH: -				emit_skb_load32(rxhash, r_A); +			case BPF_ANC | SKF_AD_RXHASH: +				emit_skb_load32(hash, r_A);  				break; -			case BPF_S_ANC_VLAN_TAG: -			case BPF_S_ANC_VLAN_TAG_PRESENT: +			case BPF_ANC | SKF_AD_VLAN_TAG: +			case BPF_ANC | SKF_AD_VLAN_TAG_PRESENT:  				emit_skb_load16(vlan_tci, r_A); -				if (filter[i].code == BPF_S_ANC_VLAN_TAG) { +				if (code == (BPF_ANC | SKF_AD_VLAN_TAG)) {  					emit_andi(r_A, VLAN_VID_MASK, r_A);  				} else {  					emit_loadimm(VLAN_TAG_PRESENT, r_TMP); @@ -620,44 +623,44 @@ void bpf_jit_compile(struct sk_filter *fp)  				}  				break; -			case BPF_S_LD_IMM: +			case BPF_LD | BPF_IMM:  				emit_loadimm(K, r_A);  				break; -			case BPF_S_LDX_IMM: +			case BPF_LDX | BPF_IMM:  				emit_loadimm(K, r_X);  				break; -			case BPF_S_LD_MEM: +			case BPF_LD | BPF_MEM:  				emit_ldmem(K * 4, r_A);  				break; -			case BPF_S_LDX_MEM: +			case BPF_LDX | BPF_MEM:  				emit_ldmem(K * 4, r_X);  				break; -			case BPF_S_ST: +			case BPF_ST:  				emit_stmem(K * 4, r_A);  				break; -			case BPF_S_STX: +			case BPF_STX:  				emit_stmem(K * 4, r_X);  				break;  #define CHOOSE_LOAD_FUNC(K, func) \  	((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset) -			case BPF_S_LD_W_ABS: +			case BPF_LD | BPF_W | BPF_ABS:  				func = CHOOSE_LOAD_FUNC(K, bpf_jit_load_word);  common_load:			seen |= SEEN_DATAREF;  				emit_loadimm(K, r_OFF);  				emit_call(func);  				break; -			case BPF_S_LD_H_ABS: +			case BPF_LD | BPF_H | BPF_ABS:  				func = CHOOSE_LOAD_FUNC(K, bpf_jit_load_half);  				goto common_load; -			case BPF_S_LD_B_ABS: +			case BPF_LD | BPF_B | BPF_ABS:  				func = CHOOSE_LOAD_FUNC(K, bpf_jit_load_byte);  				goto common_load; -			case BPF_S_LDX_B_MSH: +			case BPF_LDX | BPF_B | BPF_MSH:  				func = CHOOSE_LOAD_FUNC(K, bpf_jit_load_byte_msh);  				goto common_load; -			case BPF_S_LD_W_IND: +			case BPF_LD | BPF_W | BPF_IND:  				func = bpf_jit_load_word;  common_load_ind:		seen |= SEEN_DATAREF | SEEN_XREG;  				if (K) { @@ -672,13 +675,13 @@ common_load_ind:		seen |= SEEN_DATAREF | SEEN_XREG;  				}  				emit_call(func);  				break; -			case BPF_S_LD_H_IND: +			case BPF_LD | BPF_H | BPF_IND:  				func = bpf_jit_load_half;  				goto common_load_ind; -			case BPF_S_LD_B_IND: +			case BPF_LD | BPF_B | BPF_IND:  				func = bpf_jit_load_byte;  				goto common_load_ind; -			case BPF_S_JMP_JA: +			case BPF_JMP | BPF_JA:  				emit_jump(addrs[i + K]);  				emit_nop();  				break; @@ -689,14 +692,14 @@ common_load_ind:		seen |= SEEN_DATAREF | SEEN_XREG;  		f_op = FOP;		\  		goto cond_branch -			COND_SEL(BPF_S_JMP_JGT_K, BGU, BLEU); -			COND_SEL(BPF_S_JMP_JGE_K, BGEU, BLU); -			COND_SEL(BPF_S_JMP_JEQ_K, BE, BNE); -			COND_SEL(BPF_S_JMP_JSET_K, BNE, BE); -			COND_SEL(BPF_S_JMP_JGT_X, BGU, BLEU); -			COND_SEL(BPF_S_JMP_JGE_X, BGEU, BLU); -			COND_SEL(BPF_S_JMP_JEQ_X, BE, BNE); -			COND_SEL(BPF_S_JMP_JSET_X, BNE, BE); +			COND_SEL(BPF_JMP | BPF_JGT | BPF_K, BGU, BLEU); +			COND_SEL(BPF_JMP | BPF_JGE | BPF_K, BGEU, BLU); +			COND_SEL(BPF_JMP | BPF_JEQ | BPF_K, BE, BNE); +			COND_SEL(BPF_JMP | BPF_JSET | BPF_K, BNE, BE); +			COND_SEL(BPF_JMP | BPF_JGT | BPF_X, BGU, BLEU); +			COND_SEL(BPF_JMP | BPF_JGE | BPF_X, BGEU, BLU); +			COND_SEL(BPF_JMP | BPF_JEQ | BPF_X, BE, BNE); +			COND_SEL(BPF_JMP | BPF_JSET | BPF_X, BNE, BE);  cond_branch:			f_offset = addrs[i + filter[i].jf];  				t_offset = addrs[i + filter[i].jt]; @@ -708,20 +711,20 @@ cond_branch:			f_offset = addrs[i + filter[i].jf];  					break;  				} -				switch (filter[i].code) { -				case BPF_S_JMP_JGT_X: -				case BPF_S_JMP_JGE_X: -				case BPF_S_JMP_JEQ_X: +				switch (code) { +				case BPF_JMP | BPF_JGT | BPF_X: +				case BPF_JMP | BPF_JGE | BPF_X: +				case BPF_JMP | BPF_JEQ | BPF_X:  					seen |= SEEN_XREG;  					emit_cmp(r_A, r_X);  					break; -				case BPF_S_JMP_JSET_X: +				case BPF_JMP | BPF_JSET | BPF_X:  					seen |= SEEN_XREG;  					emit_btst(r_A, r_X);  					break; -				case BPF_S_JMP_JEQ_K: -				case BPF_S_JMP_JGT_K: -				case BPF_S_JMP_JGE_K: +				case BPF_JMP | BPF_JEQ | BPF_K: +				case BPF_JMP | BPF_JGT | BPF_K: +				case BPF_JMP | BPF_JGE | BPF_K:  					if (is_simm13(K)) {  						emit_cmpi(r_A, K);  					} else { @@ -729,7 +732,7 @@ cond_branch:			f_offset = addrs[i + filter[i].jf];  						emit_cmp(r_A, r_TMP);  					}  					break; -				case BPF_S_JMP_JSET_K: +				case BPF_JMP | BPF_JSET | BPF_K:  					if (is_simm13(K)) {  						emit_btsti(r_A, K);  					} else { @@ -798,6 +801,7 @@ cond_branch:			f_offset = addrs[i + filter[i].jf];  	if (image) {  		bpf_flush_icache(image, image + proglen);  		fp->bpf_func = (void *)image; +		fp->jited = 1;  	}  out:  	kfree(addrs); @@ -806,7 +810,7 @@ out:  void bpf_jit_free(struct sk_filter *fp)  { -	if (fp->bpf_func != sk_run_filter) +	if (fp->jited)  		module_free(NULL, fp->bpf_func);  	kfree(fp);  }  | 
