diff options
Diffstat (limited to 'net/ipv6/inet6_connection_sock.c')
| -rw-r--r-- | net/ipv6/inet6_connection_sock.c | 209 | 
1 files changed, 105 insertions, 104 deletions
diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c index e46305d1815..a245e5ddffb 100644 --- a/net/ipv6/inet6_connection_sock.c +++ b/net/ipv6/inet6_connection_sock.c @@ -28,59 +28,66 @@  #include <net/inet6_connection_sock.h>  int inet6_csk_bind_conflict(const struct sock *sk, -			    const struct inet_bind_bucket *tb) +			    const struct inet_bind_bucket *tb, bool relax)  {  	const struct sock *sk2; -	const struct hlist_node *node; +	int reuse = sk->sk_reuse; +	int reuseport = sk->sk_reuseport; +	kuid_t uid = sock_i_uid((struct sock *)sk);  	/* We must walk the whole port owner list in this case. -DaveM */  	/*  	 * See comment in inet_csk_bind_conflict about sock lookup  	 * vs net namespaces issues.  	 */ -	sk_for_each_bound(sk2, node, &tb->owners) { +	sk_for_each_bound(sk2, &tb->owners) {  		if (sk != sk2 &&  		    (!sk->sk_bound_dev_if ||  		     !sk2->sk_bound_dev_if || -		     sk->sk_bound_dev_if == sk2->sk_bound_dev_if) && -		    (!sk->sk_reuse || !sk2->sk_reuse || -		     sk2->sk_state == TCP_LISTEN) && -		     ipv6_rcv_saddr_equal(sk, sk2)) -			break; +		     sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) { +			if ((!reuse || !sk2->sk_reuse || +			     sk2->sk_state == TCP_LISTEN) && +			    (!reuseport || !sk2->sk_reuseport || +			     (sk2->sk_state != TCP_TIME_WAIT && +			      !uid_eq(uid, +				      sock_i_uid((struct sock *)sk2))))) { +				if (ipv6_rcv_saddr_equal(sk, sk2)) +					break; +			} +			if (!relax && reuse && sk2->sk_reuse && +			    sk2->sk_state != TCP_LISTEN && +			    ipv6_rcv_saddr_equal(sk, sk2)) +				break; +		}  	} -	return node != NULL; +	return sk2 != NULL;  }  EXPORT_SYMBOL_GPL(inet6_csk_bind_conflict);  struct dst_entry *inet6_csk_route_req(struct sock *sk, +				      struct flowi6 *fl6,  				      const struct request_sock *req)  { -	struct inet6_request_sock *treq = inet6_rsk(req); +	struct inet_request_sock *ireq = inet_rsk(req);  	struct ipv6_pinfo *np = inet6_sk(sk);  	struct in6_addr *final_p, final;  	struct dst_entry *dst; -	struct flowi fl; - -	memset(&fl, 0, sizeof(fl)); -	fl.proto = IPPROTO_TCP; -	ipv6_addr_copy(&fl.fl6_dst, &treq->rmt_addr); -	final_p = fl6_update_dst(&fl, np->opt, &final); -	ipv6_addr_copy(&fl.fl6_src, &treq->loc_addr); -	fl.oif = sk->sk_bound_dev_if; -	fl.mark = sk->sk_mark; -	fl.fl_ip_dport = inet_rsk(req)->rmt_port; -	fl.fl_ip_sport = inet_rsk(req)->loc_port; -	security_req_classify_flow(req, &fl); - -	if (ip6_dst_lookup(sk, &dst, &fl)) -		return NULL; - -	if (final_p) -		ipv6_addr_copy(&fl.fl6_dst, final_p); -	if ((xfrm_lookup(sock_net(sk), &dst, &fl, sk, 0)) < 0) +	memset(fl6, 0, sizeof(*fl6)); +	fl6->flowi6_proto = IPPROTO_TCP; +	fl6->daddr = ireq->ir_v6_rmt_addr; +	final_p = fl6_update_dst(fl6, np->opt, &final); +	fl6->saddr = ireq->ir_v6_loc_addr; +	fl6->flowi6_oif = ireq->ir_iif; +	fl6->flowi6_mark = ireq->ir_mark; +	fl6->fl6_dport = ireq->ir_rmt_port; +	fl6->fl6_sport = htons(ireq->ir_num); +	security_req_classify_flow(req, flowi6_to_flowi(fl6)); + +	dst = ip6_dst_lookup_flow(sk, fl6, final_p); +	if (IS_ERR(dst))  		return NULL;  	return dst; @@ -90,7 +97,7 @@ struct dst_entry *inet6_csk_route_req(struct sock *sk,   * request_sock (formerly open request) hash tables.   */  static u32 inet6_synq_hash(const struct in6_addr *raddr, const __be16 rport, -			   const u32 rnd, const u16 synq_hsize) +			   const u32 rnd, const u32 synq_hsize)  {  	u32 c; @@ -122,13 +129,13 @@ struct request_sock *inet6_csk_search_req(const struct sock *sk,  						     lopt->nr_table_entries)];  	     (req = *prev) != NULL;  	     prev = &req->dl_next) { -		const struct inet6_request_sock *treq = inet6_rsk(req); +		const struct inet_request_sock *ireq = inet_rsk(req); -		if (inet_rsk(req)->rmt_port == rport && +		if (ireq->ir_rmt_port == rport &&  		    req->rsk_ops->family == AF_INET6 && -		    ipv6_addr_equal(&treq->rmt_addr, raddr) && -		    ipv6_addr_equal(&treq->loc_addr, laddr) && -		    (!treq->iif || treq->iif == iif)) { +		    ipv6_addr_equal(&ireq->ir_v6_rmt_addr, raddr) && +		    ipv6_addr_equal(&ireq->ir_v6_loc_addr, laddr) && +		    (!ireq->ir_iif || ireq->ir_iif == iif)) {  			WARN_ON(req->sk != NULL);  			*prevp = prev;  			return req; @@ -146,8 +153,8 @@ void inet6_csk_reqsk_queue_hash_add(struct sock *sk,  {  	struct inet_connection_sock *icsk = inet_csk(sk);  	struct listen_sock *lopt = icsk->icsk_accept_queue.listen_opt; -	const u32 h = inet6_synq_hash(&inet6_rsk(req)->rmt_addr, -				      inet_rsk(req)->rmt_port, +	const u32 h = inet6_synq_hash(&inet_rsk(req)->ir_v6_rmt_addr, +				      inet_rsk(req)->ir_rmt_port,  				      lopt->hash_rnd, lopt->nr_table_entries);  	reqsk_queue_hash_req(&icsk->icsk_accept_queue, h, req, timeout); @@ -158,108 +165,102 @@ EXPORT_SYMBOL_GPL(inet6_csk_reqsk_queue_hash_add);  void inet6_csk_addr2sockaddr(struct sock *sk, struct sockaddr * uaddr)  { -	struct ipv6_pinfo *np = inet6_sk(sk);  	struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) uaddr;  	sin6->sin6_family = AF_INET6; -	ipv6_addr_copy(&sin6->sin6_addr, &np->daddr); +	sin6->sin6_addr = sk->sk_v6_daddr;  	sin6->sin6_port	= inet_sk(sk)->inet_dport;  	/* We do not store received flowlabel for TCP */  	sin6->sin6_flowinfo = 0; -	sin6->sin6_scope_id = 0; -	if (sk->sk_bound_dev_if && -	    ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LINKLOCAL) -		sin6->sin6_scope_id = sk->sk_bound_dev_if; +	sin6->sin6_scope_id = ipv6_iface_scope_id(&sin6->sin6_addr, +						  sk->sk_bound_dev_if);  }  EXPORT_SYMBOL_GPL(inet6_csk_addr2sockaddr);  static inline  void __inet6_csk_dst_store(struct sock *sk, struct dst_entry *dst, -			   struct in6_addr *daddr, struct in6_addr *saddr) +			   const struct in6_addr *daddr, +			   const struct in6_addr *saddr)  {  	__ip6_dst_store(sk, dst, daddr, saddr); - -#ifdef CONFIG_XFRM -	{ -		struct rt6_info *rt = (struct rt6_info  *)dst; -		rt->rt6i_flow_cache_genid = atomic_read(&flow_cache_genid); -	} -#endif  }  static inline  struct dst_entry *__inet6_csk_dst_check(struct sock *sk, u32 cookie)  { -	struct dst_entry *dst; - -	dst = __sk_dst_check(sk, cookie); - -#ifdef CONFIG_XFRM -	if (dst) { -		struct rt6_info *rt = (struct rt6_info *)dst; -		if (rt->rt6i_flow_cache_genid != atomic_read(&flow_cache_genid)) { -			__sk_dst_reset(sk); -			dst = NULL; -		} -	} -#endif - -	return dst; +	return __sk_dst_check(sk, cookie);  } -int inet6_csk_xmit(struct sk_buff *skb) +static struct dst_entry *inet6_csk_route_socket(struct sock *sk, +						struct flowi6 *fl6)  { -	struct sock *sk = skb->sk;  	struct inet_sock *inet = inet_sk(sk);  	struct ipv6_pinfo *np = inet6_sk(sk); -	struct flowi fl; -	struct dst_entry *dst;  	struct in6_addr *final_p, final; +	struct dst_entry *dst; -	memset(&fl, 0, sizeof(fl)); -	fl.proto = sk->sk_protocol; -	ipv6_addr_copy(&fl.fl6_dst, &np->daddr); -	ipv6_addr_copy(&fl.fl6_src, &np->saddr); -	fl.fl6_flowlabel = np->flow_label; -	IP6_ECN_flow_xmit(sk, fl.fl6_flowlabel); -	fl.oif = sk->sk_bound_dev_if; -	fl.mark = sk->sk_mark; -	fl.fl_ip_sport = inet->inet_sport; -	fl.fl_ip_dport = inet->inet_dport; -	security_sk_classify_flow(sk, &fl); +	memset(fl6, 0, sizeof(*fl6)); +	fl6->flowi6_proto = sk->sk_protocol; +	fl6->daddr = sk->sk_v6_daddr; +	fl6->saddr = np->saddr; +	fl6->flowlabel = np->flow_label; +	IP6_ECN_flow_xmit(sk, fl6->flowlabel); +	fl6->flowi6_oif = sk->sk_bound_dev_if; +	fl6->flowi6_mark = sk->sk_mark; +	fl6->fl6_sport = inet->inet_sport; +	fl6->fl6_dport = inet->inet_dport; +	security_sk_classify_flow(sk, flowi6_to_flowi(fl6)); -	final_p = fl6_update_dst(&fl, np->opt, &final); +	final_p = fl6_update_dst(fl6, np->opt, &final);  	dst = __inet6_csk_dst_check(sk, np->dst_cookie); +	if (!dst) { +		dst = ip6_dst_lookup_flow(sk, fl6, final_p); -	if (dst == NULL) { -		int err = ip6_dst_lookup(sk, &dst, &fl); - -		if (err) { -			sk->sk_err_soft = -err; -			kfree_skb(skb); -			return err; -		} - -		if (final_p) -			ipv6_addr_copy(&fl.fl6_dst, final_p); - -		if ((err = xfrm_lookup(sock_net(sk), &dst, &fl, sk, 0)) < 0) { -			sk->sk_route_caps = 0; -			kfree_skb(skb); -			return err; -		} +		if (!IS_ERR(dst)) +			__inet6_csk_dst_store(sk, dst, NULL, NULL); +	} +	return dst; +} -		__inet6_csk_dst_store(sk, dst, NULL, NULL); +int inet6_csk_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl_unused) +{ +	struct ipv6_pinfo *np = inet6_sk(sk); +	struct flowi6 fl6; +	struct dst_entry *dst; +	int res; + +	dst = inet6_csk_route_socket(sk, &fl6); +	if (IS_ERR(dst)) { +		sk->sk_err_soft = -PTR_ERR(dst); +		sk->sk_route_caps = 0; +		kfree_skb(skb); +		return PTR_ERR(dst);  	} -	skb_dst_set(skb, dst_clone(dst)); +	rcu_read_lock(); +	skb_dst_set_noref(skb, dst);  	/* Restore final destination back after routing done */ -	ipv6_addr_copy(&fl.fl6_dst, &np->daddr); +	fl6.daddr = sk->sk_v6_daddr; -	return ip6_xmit(sk, skb, &fl, np->opt); +	res = ip6_xmit(sk, skb, &fl6, np->opt, np->tclass); +	rcu_read_unlock(); +	return res;  } -  EXPORT_SYMBOL_GPL(inet6_csk_xmit); + +struct dst_entry *inet6_csk_update_pmtu(struct sock *sk, u32 mtu) +{ +	struct flowi6 fl6; +	struct dst_entry *dst = inet6_csk_route_socket(sk, &fl6); + +	if (IS_ERR(dst)) +		return NULL; +	dst->ops->update_pmtu(dst, sk, NULL, mtu); + +	dst = inet6_csk_route_socket(sk, &fl6); +	return IS_ERR(dst) ? NULL : dst; +} +EXPORT_SYMBOL_GPL(inet6_csk_update_pmtu);  | 
