aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2010-02-16 15:20:26 +0000
committerDavid S. Miller <davem@davemloft.net>2010-02-16 23:05:38 -0800
commit7d720c3e4f0c4fc152a6bf17e24244a3c85412d2 (patch)
tree36e037187ce79acb211702bea22e99c625787757 /net/ipv4
parent2bb4646fce8d09916b351d1a62f98db7cec6fc41 (diff)
downloadkernel_samsung_smdk4412-7d720c3e4f0c4fc152a6bf17e24244a3c85412d2.tar.gz
kernel_samsung_smdk4412-7d720c3e4f0c4fc152a6bf17e24244a3c85412d2.tar.bz2
kernel_samsung_smdk4412-7d720c3e4f0c4fc152a6bf17e24244a3c85412d2.zip
percpu: add __percpu sparse annotations to net
Add __percpu sparse annotations to net. These annotations are to make sparse consider percpu variables to be in a different address space and warn if accessed without going through percpu accessors. This patch doesn't affect normal builds. The macro and type tricks around snmp stats make things a bit interesting. DEFINE/DECLARE_SNMP_STAT() macros mark the target field as __percpu and SNMP_UPD_PO_STATS() macro is updated accordingly. All snmp_mib_*() users which used to cast the argument to (void **) are updated to cast it to (void __percpu **). Signed-off-by: Tejun Heo <tj@kernel.org> Acked-by: David S. Miller <davem@davemloft.net> Cc: Patrick McHardy <kaber@trash.net> Cc: Arnaldo Carvalho de Melo <acme@ghostprotocols.net> Cc: Vlad Yasevich <vladislav.yasevich@hp.com> Cc: netdev@vger.kernel.org Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4')
-rw-r--r--net/ipv4/af_inet.c46
-rw-r--r--net/ipv4/proc.c28
-rw-r--r--net/ipv4/route.c2
-rw-r--r--net/ipv4/tcp.c21
4 files changed, 50 insertions, 47 deletions
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 7d12c6a9b19..33b7dffa773 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -1385,7 +1385,7 @@ int inet_ctl_sock_create(struct sock **sk, unsigned short family,
}
EXPORT_SYMBOL_GPL(inet_ctl_sock_create);
-unsigned long snmp_fold_field(void *mib[], int offt)
+unsigned long snmp_fold_field(void __percpu *mib[], int offt)
{
unsigned long res = 0;
int i;
@@ -1398,7 +1398,7 @@ unsigned long snmp_fold_field(void *mib[], int offt)
}
EXPORT_SYMBOL_GPL(snmp_fold_field);
-int snmp_mib_init(void *ptr[2], size_t mibsize)
+int snmp_mib_init(void __percpu *ptr[2], size_t mibsize)
{
BUG_ON(ptr == NULL);
ptr[0] = __alloc_percpu(mibsize, __alignof__(unsigned long long));
@@ -1416,7 +1416,7 @@ err0:
}
EXPORT_SYMBOL_GPL(snmp_mib_init);
-void snmp_mib_free(void *ptr[2])
+void snmp_mib_free(void __percpu *ptr[2])
{
BUG_ON(ptr == NULL);
free_percpu(ptr[0]);
@@ -1460,25 +1460,25 @@ static const struct net_protocol icmp_protocol = {
static __net_init int ipv4_mib_init_net(struct net *net)
{
- if (snmp_mib_init((void **)net->mib.tcp_statistics,
+ if (snmp_mib_init((void __percpu **)net->mib.tcp_statistics,
sizeof(struct tcp_mib)) < 0)
goto err_tcp_mib;
- if (snmp_mib_init((void **)net->mib.ip_statistics,
+ if (snmp_mib_init((void __percpu **)net->mib.ip_statistics,
sizeof(struct ipstats_mib)) < 0)
goto err_ip_mib;
- if (snmp_mib_init((void **)net->mib.net_statistics,
+ if (snmp_mib_init((void __percpu **)net->mib.net_statistics,
sizeof(struct linux_mib)) < 0)
goto err_net_mib;
- if (snmp_mib_init((void **)net->mib.udp_statistics,
+ if (snmp_mib_init((void __percpu **)net->mib.udp_statistics,
sizeof(struct udp_mib)) < 0)
goto err_udp_mib;
- if (snmp_mib_init((void **)net->mib.udplite_statistics,
+ if (snmp_mib_init((void __percpu **)net->mib.udplite_statistics,
sizeof(struct udp_mib)) < 0)
goto err_udplite_mib;
- if (snmp_mib_init((void **)net->mib.icmp_statistics,
+ if (snmp_mib_init((void __percpu **)net->mib.icmp_statistics,
sizeof(struct icmp_mib)) < 0)
goto err_icmp_mib;
- if (snmp_mib_init((void **)net->mib.icmpmsg_statistics,
+ if (snmp_mib_init((void __percpu **)net->mib.icmpmsg_statistics,
sizeof(struct icmpmsg_mib)) < 0)
goto err_icmpmsg_mib;
@@ -1486,30 +1486,30 @@ static __net_init int ipv4_mib_init_net(struct net *net)
return 0;
err_icmpmsg_mib:
- snmp_mib_free((void **)net->mib.icmp_statistics);
+ snmp_mib_free((void __percpu **)net->mib.icmp_statistics);
err_icmp_mib:
- snmp_mib_free((void **)net->mib.udplite_statistics);
+ snmp_mib_free((void __percpu **)net->mib.udplite_statistics);
err_udplite_mib:
- snmp_mib_free((void **)net->mib.udp_statistics);
+ snmp_mib_free((void __percpu **)net->mib.udp_statistics);
err_udp_mib:
- snmp_mib_free((void **)net->mib.net_statistics);
+ snmp_mib_free((void __percpu **)net->mib.net_statistics);
err_net_mib:
- snmp_mib_free((void **)net->mib.ip_statistics);
+ snmp_mib_free((void __percpu **)net->mib.ip_statistics);
err_ip_mib:
- snmp_mib_free((void **)net->mib.tcp_statistics);
+ snmp_mib_free((void __percpu **)net->mib.tcp_statistics);
err_tcp_mib:
return -ENOMEM;
}
static __net_exit void ipv4_mib_exit_net(struct net *net)
{
- snmp_mib_free((void **)net->mib.icmpmsg_statistics);
- snmp_mib_free((void **)net->mib.icmp_statistics);
- snmp_mib_free((void **)net->mib.udplite_statistics);
- snmp_mib_free((void **)net->mib.udp_statistics);
- snmp_mib_free((void **)net->mib.net_statistics);
- snmp_mib_free((void **)net->mib.ip_statistics);
- snmp_mib_free((void **)net->mib.tcp_statistics);
+ snmp_mib_free((void __percpu **)net->mib.icmpmsg_statistics);
+ snmp_mib_free((void __percpu **)net->mib.icmp_statistics);
+ snmp_mib_free((void __percpu **)net->mib.udplite_statistics);
+ snmp_mib_free((void __percpu **)net->mib.udp_statistics);
+ snmp_mib_free((void __percpu **)net->mib.net_statistics);
+ snmp_mib_free((void __percpu **)net->mib.ip_statistics);
+ snmp_mib_free((void __percpu **)net->mib.tcp_statistics);
}
static __net_initdata struct pernet_operations ipv4_mib_ops = {
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c
index 1b09a6dde7c..242ed230737 100644
--- a/net/ipv4/proc.c
+++ b/net/ipv4/proc.c
@@ -280,7 +280,7 @@ static void icmpmsg_put(struct seq_file *seq)
count = 0;
for (i = 0; i < ICMPMSG_MIB_MAX; i++) {
- val = snmp_fold_field((void **) net->mib.icmpmsg_statistics, i);
+ val = snmp_fold_field((void __percpu **) net->mib.icmpmsg_statistics, i);
if (val) {
type[count] = i;
vals[count++] = val;
@@ -307,18 +307,18 @@ static void icmp_put(struct seq_file *seq)
for (i=0; icmpmibmap[i].name != NULL; i++)
seq_printf(seq, " Out%s", icmpmibmap[i].name);
seq_printf(seq, "\nIcmp: %lu %lu",
- snmp_fold_field((void **) net->mib.icmp_statistics, ICMP_MIB_INMSGS),
- snmp_fold_field((void **) net->mib.icmp_statistics, ICMP_MIB_INERRORS));
+ snmp_fold_field((void __percpu **) net->mib.icmp_statistics, ICMP_MIB_INMSGS),
+ snmp_fold_field((void __percpu **) net->mib.icmp_statistics, ICMP_MIB_INERRORS));
for (i=0; icmpmibmap[i].name != NULL; i++)
seq_printf(seq, " %lu",
- snmp_fold_field((void **) net->mib.icmpmsg_statistics,
+ snmp_fold_field((void __percpu **) net->mib.icmpmsg_statistics,
icmpmibmap[i].index));
seq_printf(seq, " %lu %lu",
- snmp_fold_field((void **) net->mib.icmp_statistics, ICMP_MIB_OUTMSGS),
- snmp_fold_field((void **) net->mib.icmp_statistics, ICMP_MIB_OUTERRORS));
+ snmp_fold_field((void __percpu **) net->mib.icmp_statistics, ICMP_MIB_OUTMSGS),
+ snmp_fold_field((void __percpu **) net->mib.icmp_statistics, ICMP_MIB_OUTERRORS));
for (i=0; icmpmibmap[i].name != NULL; i++)
seq_printf(seq, " %lu",
- snmp_fold_field((void **) net->mib.icmpmsg_statistics,
+ snmp_fold_field((void __percpu **) net->mib.icmpmsg_statistics,
icmpmibmap[i].index | 0x100));
}
@@ -341,7 +341,7 @@ static int snmp_seq_show(struct seq_file *seq, void *v)
for (i = 0; snmp4_ipstats_list[i].name != NULL; i++)
seq_printf(seq, " %lu",
- snmp_fold_field((void **)net->mib.ip_statistics,
+ snmp_fold_field((void __percpu **)net->mib.ip_statistics,
snmp4_ipstats_list[i].entry));
icmp_put(seq); /* RFC 2011 compatibility */
@@ -356,11 +356,11 @@ static int snmp_seq_show(struct seq_file *seq, void *v)
/* MaxConn field is signed, RFC 2012 */
if (snmp4_tcp_list[i].entry == TCP_MIB_MAXCONN)
seq_printf(seq, " %ld",
- snmp_fold_field((void **)net->mib.tcp_statistics,
+ snmp_fold_field((void __percpu **)net->mib.tcp_statistics,
snmp4_tcp_list[i].entry));
else
seq_printf(seq, " %lu",
- snmp_fold_field((void **)net->mib.tcp_statistics,
+ snmp_fold_field((void __percpu **)net->mib.tcp_statistics,
snmp4_tcp_list[i].entry));
}
@@ -371,7 +371,7 @@ static int snmp_seq_show(struct seq_file *seq, void *v)
seq_puts(seq, "\nUdp:");
for (i = 0; snmp4_udp_list[i].name != NULL; i++)
seq_printf(seq, " %lu",
- snmp_fold_field((void **)net->mib.udp_statistics,
+ snmp_fold_field((void __percpu **)net->mib.udp_statistics,
snmp4_udp_list[i].entry));
/* the UDP and UDP-Lite MIBs are the same */
@@ -382,7 +382,7 @@ static int snmp_seq_show(struct seq_file *seq, void *v)
seq_puts(seq, "\nUdpLite:");
for (i = 0; snmp4_udp_list[i].name != NULL; i++)
seq_printf(seq, " %lu",
- snmp_fold_field((void **)net->mib.udplite_statistics,
+ snmp_fold_field((void __percpu **)net->mib.udplite_statistics,
snmp4_udp_list[i].entry));
seq_putc(seq, '\n');
@@ -419,7 +419,7 @@ static int netstat_seq_show(struct seq_file *seq, void *v)
seq_puts(seq, "\nTcpExt:");
for (i = 0; snmp4_net_list[i].name != NULL; i++)
seq_printf(seq, " %lu",
- snmp_fold_field((void **)net->mib.net_statistics,
+ snmp_fold_field((void __percpu **)net->mib.net_statistics,
snmp4_net_list[i].entry));
seq_puts(seq, "\nIpExt:");
@@ -429,7 +429,7 @@ static int netstat_seq_show(struct seq_file *seq, void *v)
seq_puts(seq, "\nIpExt:");
for (i = 0; snmp4_ipextstats_list[i].name != NULL; i++)
seq_printf(seq, " %lu",
- snmp_fold_field((void **)net->mib.ip_statistics,
+ snmp_fold_field((void __percpu **)net->mib.ip_statistics,
snmp4_ipextstats_list[i].entry));
seq_putc(seq, '\n');
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index b16dfadbe6d..04762d3bef7 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -3334,7 +3334,7 @@ static __net_initdata struct pernet_operations rt_secret_timer_ops = {
#ifdef CONFIG_NET_CLS_ROUTE
-struct ip_rt_acct *ip_rt_acct __read_mostly;
+struct ip_rt_acct __percpu *ip_rt_acct __read_mostly;
#endif /* CONFIG_NET_CLS_ROUTE */
static __initdata unsigned long rhash_entries;
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index d5d69ea8f24..e471d037fcc 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -2788,10 +2788,10 @@ EXPORT_SYMBOL(tcp_gro_complete);
#ifdef CONFIG_TCP_MD5SIG
static unsigned long tcp_md5sig_users;
-static struct tcp_md5sig_pool **tcp_md5sig_pool;
+static struct tcp_md5sig_pool * __percpu *tcp_md5sig_pool;
static DEFINE_SPINLOCK(tcp_md5sig_pool_lock);
-static void __tcp_free_md5sig_pool(struct tcp_md5sig_pool **pool)
+static void __tcp_free_md5sig_pool(struct tcp_md5sig_pool * __percpu *pool)
{
int cpu;
for_each_possible_cpu(cpu) {
@@ -2808,7 +2808,7 @@ static void __tcp_free_md5sig_pool(struct tcp_md5sig_pool **pool)
void tcp_free_md5sig_pool(void)
{
- struct tcp_md5sig_pool **pool = NULL;
+ struct tcp_md5sig_pool * __percpu *pool = NULL;
spin_lock_bh(&tcp_md5sig_pool_lock);
if (--tcp_md5sig_users == 0) {
@@ -2822,10 +2822,11 @@ void tcp_free_md5sig_pool(void)
EXPORT_SYMBOL(tcp_free_md5sig_pool);
-static struct tcp_md5sig_pool **__tcp_alloc_md5sig_pool(struct sock *sk)
+static struct tcp_md5sig_pool * __percpu *
+__tcp_alloc_md5sig_pool(struct sock *sk)
{
int cpu;
- struct tcp_md5sig_pool **pool;
+ struct tcp_md5sig_pool * __percpu *pool;
pool = alloc_percpu(struct tcp_md5sig_pool *);
if (!pool)
@@ -2852,9 +2853,9 @@ out_free:
return NULL;
}
-struct tcp_md5sig_pool **tcp_alloc_md5sig_pool(struct sock *sk)
+struct tcp_md5sig_pool * __percpu *tcp_alloc_md5sig_pool(struct sock *sk)
{
- struct tcp_md5sig_pool **pool;
+ struct tcp_md5sig_pool * __percpu *pool;
int alloc = 0;
retry:
@@ -2873,7 +2874,9 @@ retry:
if (alloc) {
/* we cannot hold spinlock here because this may sleep. */
- struct tcp_md5sig_pool **p = __tcp_alloc_md5sig_pool(sk);
+ struct tcp_md5sig_pool * __percpu *p;
+
+ p = __tcp_alloc_md5sig_pool(sk);
spin_lock_bh(&tcp_md5sig_pool_lock);
if (!p) {
tcp_md5sig_users--;
@@ -2897,7 +2900,7 @@ EXPORT_SYMBOL(tcp_alloc_md5sig_pool);
struct tcp_md5sig_pool *__tcp_get_md5sig_pool(int cpu)
{
- struct tcp_md5sig_pool **p;
+ struct tcp_md5sig_pool * __percpu *p;
spin_lock_bh(&tcp_md5sig_pool_lock);
p = tcp_md5sig_pool;
if (p)