diff options
Diffstat (limited to 'include/net/sock.h')
| -rw-r--r-- | include/net/sock.h | 151 |
1 files changed, 80 insertions, 71 deletions
diff --git a/include/net/sock.h b/include/net/sock.h index 79532540201..15635074570 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -156,7 +156,7 @@ typedef __u64 __bitwise __addrpair; */ struct sock_common { /* skc_daddr and skc_rcv_saddr must be grouped on a 8 bytes aligned - * address on 64bit arches : cf INET_MATCH() and INET_TW_MATCH() + * address on 64bit arches : cf INET_MATCH() */ union { __addrpair skc_addrpair; @@ -191,6 +191,12 @@ struct sock_common { #ifdef CONFIG_NET_NS struct net *skc_net; #endif + +#if IS_ENABLED(CONFIG_IPV6) + struct in6_addr skc_v6_daddr; + struct in6_addr skc_v6_rcv_saddr; +#endif + /* * fields between dontcopy_begin/dontcopy_end * are not copied in sock_copy() @@ -237,7 +243,8 @@ struct cg_proto; * @sk_sndbuf: size of send buffer in bytes * @sk_flags: %SO_LINGER (l_onoff), %SO_BROADCAST, %SO_KEEPALIVE, * %SO_OOBINLINE settings, %SO_TIMESTAMPING settings - * @sk_no_check: %SO_NO_CHECK setting, whether or not checkup packets + * @sk_no_check_tx: %SO_NO_CHECK setting, set checksum in TX packets + * @sk_no_check_rx: allow zero checksum in RX packets * @sk_route_caps: route capabilities (e.g. %NETIF_F_TSO) * @sk_route_nocaps: forbidden route capabilities (e.g NETIF_F_GSO_MASK) * @sk_gso_type: GSO type (e.g. %SKB_GSO_TCPV4) @@ -301,6 +308,8 @@ struct sock { #define sk_dontcopy_end __sk_common.skc_dontcopy_end #define sk_hash __sk_common.skc_hash #define sk_portpair __sk_common.skc_portpair +#define sk_num __sk_common.skc_num +#define sk_dport __sk_common.skc_dport #define sk_addrpair __sk_common.skc_addrpair #define sk_daddr __sk_common.skc_daddr #define sk_rcv_saddr __sk_common.skc_rcv_saddr @@ -312,6 +321,9 @@ struct sock { #define sk_bind_node __sk_common.skc_bind_node #define sk_prot __sk_common.skc_prot #define sk_net __sk_common.skc_net +#define sk_v6_daddr __sk_common.skc_v6_daddr +#define sk_v6_rcv_saddr __sk_common.skc_v6_rcv_saddr + socket_lock_t sk_lock; struct sk_buff_head sk_receive_queue; /* @@ -360,7 +372,8 @@ struct sock { struct sk_buff_head sk_write_queue; kmemcheck_bitfield_begin(flags); unsigned int sk_shutdown : 2, - sk_no_check : 2, + sk_no_check_tx : 1, + sk_no_check_rx : 1, sk_userlocks : 4, sk_protocol : 8, sk_type : 16; @@ -384,7 +397,7 @@ struct sock { unsigned short sk_ack_backlog; unsigned short sk_max_ack_backlog; __u32 sk_priority; -#if IS_ENABLED(CONFIG_NETPRIO_CGROUP) +#if IS_ENABLED(CONFIG_CGROUP_NET_PRIO) __u32 sk_cgrp_prioidx; #endif struct pid *sk_peer_pid; @@ -407,7 +420,7 @@ struct sock { u32 sk_classid; struct cg_proto *sk_cgrp; void (*sk_state_change)(struct sock *sk); - void (*sk_data_ready)(struct sock *sk, int bytes); + void (*sk_data_ready)(struct sock *sk); void (*sk_write_space)(struct sock *sk); void (*sk_error_report)(struct sock *sk); int (*sk_backlog_rcv)(struct sock *sk, @@ -809,37 +822,51 @@ static inline int sk_backlog_rcv(struct sock *sk, struct sk_buff *skb) return sk->sk_backlog_rcv(sk, skb); } -static inline void sock_rps_record_flow(const struct sock *sk) +static inline void sock_rps_record_flow_hash(__u32 hash) { #ifdef CONFIG_RPS struct rps_sock_flow_table *sock_flow_table; rcu_read_lock(); sock_flow_table = rcu_dereference(rps_sock_flow_table); - rps_record_sock_flow(sock_flow_table, sk->sk_rxhash); + rps_record_sock_flow(sock_flow_table, hash); rcu_read_unlock(); #endif } -static inline void sock_rps_reset_flow(const struct sock *sk) +static inline void sock_rps_reset_flow_hash(__u32 hash) { #ifdef CONFIG_RPS struct rps_sock_flow_table *sock_flow_table; rcu_read_lock(); sock_flow_table = rcu_dereference(rps_sock_flow_table); - rps_reset_sock_flow(sock_flow_table, sk->sk_rxhash); + rps_reset_sock_flow(sock_flow_table, hash); rcu_read_unlock(); #endif } +static inline void sock_rps_record_flow(const struct sock *sk) +{ +#ifdef CONFIG_RPS + sock_rps_record_flow_hash(sk->sk_rxhash); +#endif +} + +static inline void sock_rps_reset_flow(const struct sock *sk) +{ +#ifdef CONFIG_RPS + sock_rps_reset_flow_hash(sk->sk_rxhash); +#endif +} + static inline void sock_rps_save_rxhash(struct sock *sk, const struct sk_buff *skb) { #ifdef CONFIG_RPS - if (unlikely(sk->sk_rxhash != skb->rxhash)) { + if (unlikely(sk->sk_rxhash != skb->hash)) { sock_rps_reset_flow(sk); - sk->sk_rxhash = skb->rxhash; + sk->sk_rxhash = skb->hash; } #endif } @@ -1024,11 +1051,10 @@ enum cg_proto_flags { }; struct cg_proto { - void (*enter_memory_pressure)(struct sock *sk); - struct res_counter *memory_allocated; /* Current allocated memory. */ - struct percpu_counter *sockets_allocated; /* Current number of sockets. */ - int *memory_pressure; - long *sysctl_mem; + struct res_counter memory_allocated; /* Current allocated memory. */ + struct percpu_counter sockets_allocated; /* Current number of sockets. */ + int memory_pressure; + long sysctl_mem[3]; unsigned long flags; /* * memcg field is used to find which memcg we belong directly @@ -1124,7 +1150,7 @@ static inline bool sk_under_memory_pressure(const struct sock *sk) return false; if (mem_cgroup_sockets_enabled && sk->sk_cgrp) - return !!*sk->sk_cgrp->memory_pressure; + return !!sk->sk_cgrp->memory_pressure; return !!*sk->sk_prot->memory_pressure; } @@ -1144,8 +1170,7 @@ static inline void sk_leave_memory_pressure(struct sock *sk) struct proto *prot = sk->sk_prot; for (; cg_proto; cg_proto = parent_cg_proto(prot, cg_proto)) - if (*cg_proto->memory_pressure) - *cg_proto->memory_pressure = 0; + cg_proto->memory_pressure = 0; } } @@ -1160,7 +1185,7 @@ static inline void sk_enter_memory_pressure(struct sock *sk) struct proto *prot = sk->sk_prot; for (; cg_proto; cg_proto = parent_cg_proto(prot, cg_proto)) - cg_proto->enter_memory_pressure(sk); + cg_proto->memory_pressure = 1; } sk->sk_prot->enter_memory_pressure(sk); @@ -1181,7 +1206,7 @@ static inline void memcg_memory_allocated_add(struct cg_proto *prot, struct res_counter *fail; int ret; - ret = res_counter_charge_nofail(prot->memory_allocated, + ret = res_counter_charge_nofail(&prot->memory_allocated, amt << PAGE_SHIFT, &fail); if (ret < 0) *parent_status = OVER_LIMIT; @@ -1190,13 +1215,13 @@ static inline void memcg_memory_allocated_add(struct cg_proto *prot, static inline void memcg_memory_allocated_sub(struct cg_proto *prot, unsigned long amt) { - res_counter_uncharge(prot->memory_allocated, amt << PAGE_SHIFT); + res_counter_uncharge(&prot->memory_allocated, amt << PAGE_SHIFT); } static inline u64 memcg_memory_allocated_read(struct cg_proto *prot) { u64 ret; - ret = res_counter_read_u64(prot->memory_allocated, RES_USAGE); + ret = res_counter_read_u64(&prot->memory_allocated, RES_USAGE); return ret >> PAGE_SHIFT; } @@ -1244,7 +1269,7 @@ static inline void sk_sockets_allocated_dec(struct sock *sk) struct cg_proto *cg_proto = sk->sk_cgrp; for (; cg_proto; cg_proto = parent_cg_proto(prot, cg_proto)) - percpu_counter_dec(cg_proto->sockets_allocated); + percpu_counter_dec(&cg_proto->sockets_allocated); } percpu_counter_dec(prot->sockets_allocated); @@ -1258,7 +1283,7 @@ static inline void sk_sockets_allocated_inc(struct sock *sk) struct cg_proto *cg_proto = sk->sk_cgrp; for (; cg_proto; cg_proto = parent_cg_proto(prot, cg_proto)) - percpu_counter_inc(cg_proto->sockets_allocated); + percpu_counter_inc(&cg_proto->sockets_allocated); } percpu_counter_inc(prot->sockets_allocated); @@ -1270,7 +1295,7 @@ sk_sockets_allocated_read_positive(struct sock *sk) struct proto *prot = sk->sk_prot; if (mem_cgroup_sockets_enabled && sk->sk_cgrp) - return percpu_counter_read_positive(sk->sk_cgrp->sockets_allocated); + return percpu_counter_read_positive(&sk->sk_cgrp->sockets_allocated); return percpu_counter_read_positive(prot->sockets_allocated); } @@ -1465,6 +1490,11 @@ static inline void sk_wmem_free_skb(struct sock *sk, struct sk_buff *skb) */ #define sock_owned_by_user(sk) ((sk)->sk_lock.owned) +static inline void sock_release_ownership(struct sock *sk) +{ + sk->sk_lock.owned = 0; +} + /* * Macro so as to not evaluate some arguments when * lockdep is not enabled. @@ -1526,8 +1556,6 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority); struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force, gfp_t priority); -struct sk_buff *sock_rmalloc(struct sock *sk, unsigned long size, int force, - gfp_t priority); void sock_wfree(struct sk_buff *skb); void skb_orphan_partial(struct sk_buff *skb); void sock_rfree(struct sk_buff *skb); @@ -1595,35 +1623,6 @@ void sk_common_release(struct sock *sk); /* Initialise core socket variables */ void sock_init_data(struct socket *sock, struct sock *sk); -void sk_filter_release_rcu(struct rcu_head *rcu); - -/** - * sk_filter_release - release a socket filter - * @fp: filter to remove - * - * Remove a filter from a socket and release its resources. - */ - -static inline void sk_filter_release(struct sk_filter *fp) -{ - if (atomic_dec_and_test(&fp->refcnt)) - call_rcu(&fp->rcu, sk_filter_release_rcu); -} - -static inline void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp) -{ - unsigned int size = sk_filter_len(fp); - - atomic_sub(size, &sk->sk_omem_alloc); - sk_filter_release(fp); -} - -static inline void sk_filter_charge(struct sock *sk, struct sk_filter *fp) -{ - atomic_inc(&fp->refcnt); - atomic_add(sk_filter_len(fp), &sk->sk_omem_alloc); -} - /* * Socket reference counting postulates. * @@ -1655,6 +1654,10 @@ static inline void sock_put(struct sock *sk) if (atomic_dec_and_test(&sk->sk_refcnt)) sk_free(sk); } +/* Generic version of sock_put(), dealing with all sockets + * (TCP_TIMEWAIT, ESTABLISHED...) + */ +void sock_gen_put(struct sock *sk); int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested); @@ -1727,14 +1730,12 @@ sk_dst_get(struct sock *sk) rcu_read_lock(); dst = rcu_dereference(sk->sk_dst_cache); - if (dst) - dst_hold(dst); + if (dst && !atomic_inc_not_zero(&dst->__refcnt)) + dst = NULL; rcu_read_unlock(); return dst; } -void sk_reset_txq(struct sock *sk); - static inline void dst_negative_advice(struct sock *sk) { struct dst_entry *ndst, *dst = __sk_dst_get(sk); @@ -1744,7 +1745,7 @@ static inline void dst_negative_advice(struct sock *sk) if (ndst != dst) { rcu_assign_pointer(sk->sk_dst_cache, ndst); - sk_reset_txq(sk); + sk_tx_queue_clear(sk); } } } @@ -1767,9 +1768,11 @@ __sk_dst_set(struct sock *sk, struct dst_entry *dst) static inline void sk_dst_set(struct sock *sk, struct dst_entry *dst) { - spin_lock(&sk->sk_dst_lock); - __sk_dst_set(sk, dst); - spin_unlock(&sk->sk_dst_lock); + struct dst_entry *old_dst; + + sk_tx_queue_clear(sk); + old_dst = xchg((__force struct dst_entry **)&sk->sk_dst_cache, dst); + dst_release(old_dst); } static inline void @@ -1781,9 +1784,7 @@ __sk_dst_reset(struct sock *sk) static inline void sk_dst_reset(struct sock *sk) { - spin_lock(&sk->sk_dst_lock); - __sk_dst_reset(sk); - spin_unlock(&sk->sk_dst_lock); + sk_dst_set(sk, NULL); } struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie); @@ -2165,7 +2166,6 @@ static inline void sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk, { #define FLAGS_TS_OR_DROPS ((1UL << SOCK_RXQ_OVFL) | \ (1UL << SOCK_RCVTSTAMP) | \ - (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE) | \ (1UL << SOCK_TIMESTAMPING_SOFTWARE) | \ (1UL << SOCK_TIMESTAMPING_RAW_HARDWARE) | \ (1UL << SOCK_TIMESTAMPING_SYS_HARDWARE)) @@ -2231,8 +2231,12 @@ void sock_net_set(struct sock *sk, struct net *net) */ static inline void sk_change_net(struct sock *sk, struct net *net) { - put_net(sock_net(sk)); - sock_net_set(sk, hold_net(net)); + struct net *current_net = sock_net(sk); + + if (!net_eq(current_net, net)) { + put_net(current_net); + sock_net_set(sk, hold_net(net)); + } } static inline struct sock *skb_steal_sock(struct sk_buff *skb) @@ -2253,6 +2257,11 @@ int sock_get_timestampns(struct sock *, struct timespec __user *); int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len, int level, int type); +bool sk_ns_capable(const struct sock *sk, + struct user_namespace *user_ns, int cap); +bool sk_capable(const struct sock *sk, int cap); +bool sk_net_capable(const struct sock *sk, int cap); + /* * Enable debug/info messages */ |
