aboutsummaryrefslogtreecommitdiff
path: root/include/net/inet_hashtables.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/net/inet_hashtables.h')
-rw-r--r--include/net/inet_hashtables.h266
1 files changed, 133 insertions, 133 deletions
diff --git a/include/net/inet_hashtables.h b/include/net/inet_hashtables.h
index 97dc35ad09b..dd1950a7e27 100644
--- a/include/net/inet_hashtables.h
+++ b/include/net/inet_hashtables.h
@@ -16,6 +16,7 @@
#include <linux/interrupt.h>
+#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/list.h>
#include <linux/slab.h>
@@ -28,18 +29,19 @@
#include <net/inet_connection_sock.h>
#include <net/inet_sock.h>
#include <net/sock.h>
+#include <net/route.h>
#include <net/tcp_states.h>
+#include <net/netns/hash.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/byteorder.h>
/* This is for all connections with a full identity, no wildcards.
- * One chain is dedicated to TIME_WAIT sockets.
- * I'll experiment with dynamic table growth later.
+ * The 'e' prefix stands for Establish, but we really put all sockets
+ * but LISTEN ones.
*/
struct inet_ehash_bucket {
- struct hlist_head chain;
- struct hlist_head twchain;
+ struct hlist_nulls_head chain;
};
/* There are a few simple rules, which allow for local port reuse by
@@ -74,21 +76,43 @@ struct inet_ehash_bucket {
* ports are created in O(1) time? I thought so. ;-) -DaveM
*/
struct inet_bind_bucket {
+#ifdef CONFIG_NET_NS
struct net *ib_net;
+#endif
unsigned short port;
- signed short fastreuse;
+ signed char fastreuse;
+ signed char fastreuseport;
+ kuid_t fastuid;
+ int num_owners;
struct hlist_node node;
struct hlist_head owners;
};
-#define inet_bind_bucket_for_each(tb, node, head) \
- hlist_for_each_entry(tb, node, head, node)
+static inline struct net *ib_net(struct inet_bind_bucket *ib)
+{
+ return read_pnet(&ib->ib_net);
+}
+
+#define inet_bind_bucket_for_each(tb, head) \
+ hlist_for_each_entry(tb, head, node)
struct inet_bind_hashbucket {
spinlock_t lock;
struct hlist_head chain;
};
+/*
+ * Sockets can be hashed in established or listening table
+ * We must use different 'nulls' end-of-chain value for listening
+ * hash table, or we might find a socket that was closed and
+ * reallocated/inserted into established hash table
+ */
+#define LISTENING_NULLS_BASE (1U << 29)
+struct inet_listen_hashbucket {
+ spinlock_t lock;
+ struct hlist_nulls_head head;
+};
+
/* This is for listening sockets, thus all sockets which possess wildcards. */
#define INET_LHTABLE_SIZE 32 /* Yes, really, this is all you need. */
@@ -98,11 +122,10 @@ struct inet_hashinfo {
*
* TCP_ESTABLISHED <= sk->sk_state < TCP_CLOSE
*
- * TIME_WAIT sockets use a separate chain (twchain).
*/
struct inet_ehash_bucket *ehash;
- rwlock_t *ehash_locks;
- unsigned int ehash_size;
+ spinlock_t *ehash_locks;
+ unsigned int ehash_mask;
unsigned int ehash_locks_mask;
/* Ok, let's try this, I give up, we do need a local binding
@@ -111,34 +134,34 @@ struct inet_hashinfo {
struct inet_bind_hashbucket *bhash;
unsigned int bhash_size;
- /* Note : 4 bytes padding on 64 bit arches */
+ /* 4 bytes hole on 64 bit */
- /* All sockets in TCP_LISTEN state will be in here. This is the only
- * table where wildcard'd TCP sockets can exist. Hash function here
- * is just local port number.
- */
- struct hlist_head listening_hash[INET_LHTABLE_SIZE];
+ struct kmem_cache *bind_bucket_cachep;
/* All the above members are written once at bootup and
* never written again _or_ are predominantly read-access.
*
* Now align to a new cache line as all the following members
- * are often dirty.
+ * might be often dirty.
+ */
+ /* All sockets in TCP_LISTEN state will be in here. This is the only
+ * table where wildcard'd TCP sockets can exist. Hash function here
+ * is just local port number.
*/
- rwlock_t lhash_lock ____cacheline_aligned;
- atomic_t lhash_users;
- wait_queue_head_t lhash_wait;
- struct kmem_cache *bind_bucket_cachep;
+ struct inet_listen_hashbucket listening_hash[INET_LHTABLE_SIZE]
+ ____cacheline_aligned_in_smp;
+
+ atomic_t bsockets;
};
static inline struct inet_ehash_bucket *inet_ehash_bucket(
struct inet_hashinfo *hashinfo,
unsigned int hash)
{
- return &hashinfo->ehash[hash & (hashinfo->ehash_size - 1)];
+ return &hashinfo->ehash[hash & hashinfo->ehash_mask];
}
-static inline rwlock_t *inet_ehash_lockp(
+static inline spinlock_t *inet_ehash_lockp(
struct inet_hashinfo *hashinfo,
unsigned int hash)
{
@@ -161,18 +184,18 @@ static inline int inet_ehash_locks_alloc(struct inet_hashinfo *hashinfo)
size = 2048;
if (nr_pcpus >= 32)
size = 4096;
- if (sizeof(rwlock_t) != 0) {
+ if (sizeof(spinlock_t) != 0) {
#ifdef CONFIG_NUMA
- if (size * sizeof(rwlock_t) > PAGE_SIZE)
- hashinfo->ehash_locks = vmalloc(size * sizeof(rwlock_t));
+ if (size * sizeof(spinlock_t) > PAGE_SIZE)
+ hashinfo->ehash_locks = vmalloc(size * sizeof(spinlock_t));
else
#endif
- hashinfo->ehash_locks = kmalloc(size * sizeof(rwlock_t),
+ hashinfo->ehash_locks = kmalloc(size * sizeof(spinlock_t),
GFP_KERNEL);
if (!hashinfo->ehash_locks)
return ENOMEM;
for (i = 0; i < size; i++)
- rwlock_init(&hashinfo->ehash_locks[i]);
+ spin_lock_init(&hashinfo->ehash_locks[i]);
}
hashinfo->ehash_locks_mask = size - 1;
return 0;
@@ -183,7 +206,7 @@ static inline void inet_ehash_locks_free(struct inet_hashinfo *hashinfo)
if (hashinfo->ehash_locks) {
#ifdef CONFIG_NUMA
unsigned int size = (hashinfo->ehash_locks_mask + 1) *
- sizeof(rwlock_t);
+ sizeof(spinlock_t);
if (size > PAGE_SIZE)
vfree(hashinfo->ehash_locks);
else
@@ -193,93 +216,58 @@ static inline void inet_ehash_locks_free(struct inet_hashinfo *hashinfo)
}
}
-extern struct inet_bind_bucket *
- inet_bind_bucket_create(struct kmem_cache *cachep,
- struct net *net,
- struct inet_bind_hashbucket *head,
- const unsigned short snum);
-extern void inet_bind_bucket_destroy(struct kmem_cache *cachep,
- struct inet_bind_bucket *tb);
+struct inet_bind_bucket *
+inet_bind_bucket_create(struct kmem_cache *cachep, struct net *net,
+ struct inet_bind_hashbucket *head,
+ const unsigned short snum);
+void inet_bind_bucket_destroy(struct kmem_cache *cachep,
+ struct inet_bind_bucket *tb);
-static inline int inet_bhashfn(const __u16 lport, const int bhash_size)
+static inline int inet_bhashfn(struct net *net, const __u16 lport,
+ const int bhash_size)
{
- return lport & (bhash_size - 1);
+ return (lport + net_hash_mix(net)) & (bhash_size - 1);
}
-extern void inet_bind_hash(struct sock *sk, struct inet_bind_bucket *tb,
- const unsigned short snum);
+void inet_bind_hash(struct sock *sk, struct inet_bind_bucket *tb,
+ const unsigned short snum);
/* These can have wildcards, don't try too hard. */
-static inline int inet_lhashfn(const unsigned short num)
+static inline int inet_lhashfn(struct net *net, const unsigned short num)
{
- return num & (INET_LHTABLE_SIZE - 1);
+ return (num + net_hash_mix(net)) & (INET_LHTABLE_SIZE - 1);
}
static inline int inet_sk_listen_hashfn(const struct sock *sk)
{
- return inet_lhashfn(inet_sk(sk)->num);
+ return inet_lhashfn(sock_net(sk), inet_sk(sk)->inet_num);
}
/* Caller must disable local BH processing. */
-static inline void __inet_inherit_port(struct sock *sk, struct sock *child)
-{
- struct inet_hashinfo *table = sk->sk_prot->hashinfo;
- const int bhash = inet_bhashfn(inet_sk(child)->num, table->bhash_size);
- struct inet_bind_hashbucket *head = &table->bhash[bhash];
- struct inet_bind_bucket *tb;
-
- spin_lock(&head->lock);
- tb = inet_csk(sk)->icsk_bind_hash;
- sk_add_bind_node(child, &tb->owners);
- inet_csk(child)->icsk_bind_hash = tb;
- spin_unlock(&head->lock);
-}
+int __inet_inherit_port(struct sock *sk, struct sock *child);
-static inline void inet_inherit_port(struct sock *sk, struct sock *child)
-{
- local_bh_disable();
- __inet_inherit_port(sk, child);
- local_bh_enable();
-}
-
-extern void inet_put_port(struct sock *sk);
-
-extern void inet_listen_wlock(struct inet_hashinfo *hashinfo);
-
-/*
- * - We may sleep inside this lock.
- * - If sleeping is not required (or called from BH),
- * use plain read_(un)lock(&inet_hashinfo.lhash_lock).
- */
-static inline void inet_listen_lock(struct inet_hashinfo *hashinfo)
-{
- /* read_lock synchronizes to candidates to writers */
- read_lock(&hashinfo->lhash_lock);
- atomic_inc(&hashinfo->lhash_users);
- read_unlock(&hashinfo->lhash_lock);
-}
+void inet_put_port(struct sock *sk);
-static inline void inet_listen_unlock(struct inet_hashinfo *hashinfo)
-{
- if (atomic_dec_and_test(&hashinfo->lhash_users))
- wake_up(&hashinfo->lhash_wait);
-}
+void inet_hashinfo_init(struct inet_hashinfo *h);
-extern void __inet_hash_nolisten(struct sock *sk);
-extern void inet_hash(struct sock *sk);
-extern void inet_unhash(struct sock *sk);
+int __inet_hash_nolisten(struct sock *sk, struct inet_timewait_sock *tw);
+void inet_hash(struct sock *sk);
+void inet_unhash(struct sock *sk);
-extern struct sock *__inet_lookup_listener(struct net *net,
- struct inet_hashinfo *hashinfo,
- const __be32 daddr,
- const unsigned short hnum,
- const int dif);
+struct sock *__inet_lookup_listener(struct net *net,
+ struct inet_hashinfo *hashinfo,
+ const __be32 saddr, const __be16 sport,
+ const __be32 daddr,
+ const unsigned short hnum,
+ const int dif);
static inline struct sock *inet_lookup_listener(struct net *net,
struct inet_hashinfo *hashinfo,
+ __be32 saddr, __be16 sport,
__be32 daddr, __be16 dport, int dif)
{
- return __inet_lookup_listener(net, hashinfo, daddr, ntohs(dport), dif);
+ return __inet_lookup_listener(net, hashinfo, saddr, sport,
+ daddr, ntohs(dport), dif);
}
/* Socket demux engine toys. */
@@ -291,7 +279,6 @@ static inline struct sock *inet_lookup_listener(struct net *net,
On 64bit targets we combine comparisons with pair of adjacent __be32
fields in the same way.
*/
-typedef __u32 __bitwise __portpair;
#ifdef __BIG_ENDIAN
#define INET_COMBINED_PORTS(__sport, __dport) \
((__force __portpair)(((__force __u32)(__be16)(__sport) << 16) | (__u32)(__dport)))
@@ -301,42 +288,34 @@ typedef __u32 __bitwise __portpair;
#endif
#if (BITS_PER_LONG == 64)
-typedef __u64 __bitwise __addrpair;
#ifdef __BIG_ENDIAN
#define INET_ADDR_COOKIE(__name, __saddr, __daddr) \
const __addrpair __name = (__force __addrpair) ( \
(((__force __u64)(__be32)(__saddr)) << 32) | \
- ((__force __u64)(__be32)(__daddr)));
+ ((__force __u64)(__be32)(__daddr)))
#else /* __LITTLE_ENDIAN */
#define INET_ADDR_COOKIE(__name, __saddr, __daddr) \
const __addrpair __name = (__force __addrpair) ( \
(((__force __u64)(__be32)(__daddr)) << 32) | \
- ((__force __u64)(__be32)(__saddr)));
+ ((__force __u64)(__be32)(__saddr)))
#endif /* __BIG_ENDIAN */
-#define INET_MATCH(__sk, __net, __hash, __cookie, __saddr, __daddr, __ports, __dif)\
- (((__sk)->sk_hash == (__hash)) && ((__sk)->sk_net == (__net)) && \
- ((*((__addrpair *)&(inet_sk(__sk)->daddr))) == (__cookie)) && \
- ((*((__portpair *)&(inet_sk(__sk)->dport))) == (__ports)) && \
- (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif))))
-#define INET_TW_MATCH(__sk, __net, __hash, __cookie, __saddr, __daddr, __ports, __dif)\
- (((__sk)->sk_hash == (__hash)) && ((__sk)->sk_net == (__net)) && \
- ((*((__addrpair *)&(inet_twsk(__sk)->tw_daddr))) == (__cookie)) && \
- ((*((__portpair *)&(inet_twsk(__sk)->tw_dport))) == (__ports)) && \
- (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif))))
+#define INET_MATCH(__sk, __net, __cookie, __saddr, __daddr, __ports, __dif) \
+ (((__sk)->sk_portpair == (__ports)) && \
+ ((__sk)->sk_addrpair == (__cookie)) && \
+ (!(__sk)->sk_bound_dev_if || \
+ ((__sk)->sk_bound_dev_if == (__dif))) && \
+ net_eq(sock_net(__sk), (__net)))
#else /* 32-bit arch */
-#define INET_ADDR_COOKIE(__name, __saddr, __daddr)
-#define INET_MATCH(__sk, __net, __hash, __cookie, __saddr, __daddr, __ports, __dif) \
- (((__sk)->sk_hash == (__hash)) && ((__sk)->sk_net == (__net)) && \
- (inet_sk(__sk)->daddr == (__saddr)) && \
- (inet_sk(__sk)->rcv_saddr == (__daddr)) && \
- ((*((__portpair *)&(inet_sk(__sk)->dport))) == (__ports)) && \
- (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif))))
-#define INET_TW_MATCH(__sk, __net, __hash,__cookie, __saddr, __daddr, __ports, __dif) \
- (((__sk)->sk_hash == (__hash)) && ((__sk)->sk_net == (__net)) && \
- (inet_twsk(__sk)->tw_daddr == (__saddr)) && \
- (inet_twsk(__sk)->tw_rcv_saddr == (__daddr)) && \
- ((*((__portpair *)&(inet_twsk(__sk)->tw_dport))) == (__ports)) && \
- (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif))))
+#define INET_ADDR_COOKIE(__name, __saddr, __daddr) \
+ const int __name __deprecated __attribute__((unused))
+
+#define INET_MATCH(__sk, __net, __cookie, __saddr, __daddr, __ports, __dif) \
+ (((__sk)->sk_portpair == (__ports)) && \
+ ((__sk)->sk_daddr == (__saddr)) && \
+ ((__sk)->sk_rcv_saddr == (__daddr)) && \
+ (!(__sk)->sk_bound_dev_if || \
+ ((__sk)->sk_bound_dev_if == (__dif))) && \
+ net_eq(sock_net(__sk), (__net)))
#endif /* 64-bit arch */
/*
@@ -345,10 +324,11 @@ typedef __u64 __bitwise __addrpair;
*
* Local BH must be disabled here.
*/
-extern struct sock * __inet_lookup_established(struct net *net,
- struct inet_hashinfo *hashinfo,
- const __be32 saddr, const __be16 sport,
- const __be32 daddr, const u16 hnum, const int dif);
+struct sock *__inet_lookup_established(struct net *net,
+ struct inet_hashinfo *hashinfo,
+ const __be32 saddr, const __be16 sport,
+ const __be32 daddr, const u16 hnum,
+ const int dif);
static inline struct sock *
inet_lookup_established(struct net *net, struct inet_hashinfo *hashinfo,
@@ -370,7 +350,8 @@ static inline struct sock *__inet_lookup(struct net *net,
struct sock *sk = __inet_lookup_established(net, hashinfo,
saddr, sport, daddr, hnum, dif);
- return sk ? : __inet_lookup_listener(net, hashinfo, daddr, hnum, dif);
+ return sk ? : __inet_lookup_listener(net, hashinfo, saddr, sport,
+ daddr, hnum, dif);
}
static inline struct sock *inet_lookup(struct net *net,
@@ -388,11 +369,30 @@ static inline struct sock *inet_lookup(struct net *net,
return sk;
}
-extern int __inet_hash_connect(struct inet_timewait_death_row *death_row,
- struct sock *sk, u32 port_offset,
- int (*check_established)(struct inet_timewait_death_row *,
- struct sock *, __u16, struct inet_timewait_sock **),
- void (*hash)(struct sock *sk));
-extern int inet_hash_connect(struct inet_timewait_death_row *death_row,
- struct sock *sk);
+static inline struct sock *__inet_lookup_skb(struct inet_hashinfo *hashinfo,
+ struct sk_buff *skb,
+ const __be16 sport,
+ const __be16 dport)
+{
+ struct sock *sk = skb_steal_sock(skb);
+ const struct iphdr *iph = ip_hdr(skb);
+
+ if (sk)
+ return sk;
+ else
+ return __inet_lookup(dev_net(skb_dst(skb)->dev), hashinfo,
+ iph->saddr, sport,
+ iph->daddr, dport, inet_iif(skb));
+}
+
+int __inet_hash_connect(struct inet_timewait_death_row *death_row,
+ struct sock *sk, u32 port_offset,
+ int (*check_established)(struct inet_timewait_death_row *,
+ struct sock *, __u16,
+ struct inet_timewait_sock **),
+ int (*hash)(struct sock *sk,
+ struct inet_timewait_sock *twp));
+
+int inet_hash_connect(struct inet_timewait_death_row *death_row,
+ struct sock *sk);
#endif /* _INET_HASHTABLES_H */
style='width: 87.8%;'/> -rw-r--r--arch/mips/kernel/mips_ksyms.c46
-rw-r--r--arch/mips/kernel/mips_machine.c66
-rw-r--r--arch/mips/kernel/module-rela.c145
-rw-r--r--arch/mips/kernel/module.c197
-rw-r--r--arch/mips/kernel/octeon_switch.S519
-rw-r--r--arch/mips/kernel/perf_event.c69
-rw-r--r--arch/mips/kernel/perf_event_mipsxx.c1710
-rw-r--r--arch/mips/kernel/pm-cps.c716
-rw-r--r--arch/mips/kernel/pm.c99
-rw-r--r--arch/mips/kernel/proc.c196
-rw-r--r--arch/mips/kernel/process.c420
-rw-r--r--arch/mips/kernel/prom.c57
-rw-r--r--arch/mips/kernel/ptrace.c591
-rw-r--r--arch/mips/kernel/ptrace32.c218
-rw-r--r--arch/mips/kernel/r2300_fpu.S130
-rw-r--r--arch/mips/kernel/r2300_switch.S28
-rw-r--r--arch/mips/kernel/r4k_fpu.S89
-rw-r--r--arch/mips/kernel/r4k_switch.S163
-rw-r--r--arch/mips/kernel/r6000_fpu.S2
-rw-r--r--arch/mips/kernel/relocate_kernel.S192
-rw-r--r--arch/mips/kernel/reset.c4
-rw-r--r--arch/mips/kernel/rtlx-cmp.c119
-rw-r--r--arch/mips/kernel/rtlx-mt.c150
-rw-r--r--arch/mips/kernel/rtlx.c485
-rw-r--r--arch/mips/kernel/scall32-o32.S967
-rw-r--r--arch/mips/kernel/scall64-64.S168
-rw-r--r--arch/mips/kernel/scall64-n32.S169
-rw-r--r--arch/mips/kernel/scall64-o32.S207
-rw-r--r--arch/mips/kernel/segment.c110
-rw-r--r--arch/mips/kernel/semaphore.c168
-rw-r--r--arch/mips/kernel/setup.c485
-rw-r--r--arch/mips/kernel/signal-common.h183
-rw-r--r--arch/mips/kernel/signal.c640
-rw-r--r--arch/mips/kernel/signal32.c970
-rw-r--r--arch/mips/kernel/signal_n32.c137
-rw-r--r--arch/mips/kernel/smp-bmips.c535
-rw-r--r--arch/mips/kernel/smp-cmp.c159
-rw-r--r--arch/mips/kernel/smp-cps.c466
-rw-r--r--arch/mips/kernel/smp-gic.c64
-rw-r--r--arch/mips/kernel/smp-mt.c401
-rw-r--r--arch/mips/kernel/smp-up.c78
-rw-r--r--arch/mips/kernel/smp.c395
-rw-r--r--arch/mips/kernel/smtc-asm.S131
-rw-r--r--arch/mips/kernel/smtc-proc.c93
-rw-r--r--arch/mips/kernel/smtc.c1322
-rw-r--r--arch/mips/kernel/spinlock_test.c141
-rw-r--r--arch/mips/kernel/spram.c221
-rw-r--r--arch/mips/kernel/stacktrace.c87
-rw-r--r--arch/mips/kernel/sync-r4k.c132
-rw-r--r--arch/mips/kernel/syscall.c441
-rw-r--r--arch/mips/kernel/sysirix.c2136
-rw-r--r--arch/mips/kernel/time.c778
-rw-r--r--arch/mips/kernel/topology.c32
-rw-r--r--arch/mips/kernel/traps.c1816
-rw-r--r--arch/mips/kernel/unaligned.c1746
-rw-r--r--arch/mips/kernel/vdso.c109
-rw-r--r--arch/mips/kernel/vmlinux.lds.S316
-rw-r--r--arch/mips/kernel/vpe-cmp.c180
-rw-r--r--arch/mips/kernel/vpe-mt.c521
-rw-r--r--arch/mips/kernel/vpe.c1087
-rw-r--r--arch/mips/kernel/watch.c196
126 files changed, 22194 insertions, 19920 deletions
diff --git a/arch/mips/kernel/.gitignore b/arch/mips/kernel/.gitignore
new file mode 100644
index 00000000000..c5f676c3c22
--- /dev/null
+++ b/arch/mips/kernel/.gitignore
@@ -0,0 +1 @@
+vmlinux.lds
diff --git a/arch/mips/kernel/8250-platform.c b/arch/mips/kernel/8250-platform.c
new file mode 100644
index 00000000000..5c6b2ab1f56
--- /dev/null
+++ b/arch/mips/kernel/8250-platform.c
@@ -0,0 +1,46 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2007 Ralf Baechle (ralf@linux-mips.org)
+ */
+#include <linux/init.h>
+#include <linux/serial_8250.h>
+
+#define PORT(base, int) \
+{ \
+ .iobase = base, \
+ .irq = int, \
+ .uartclk = 1843200, \
+ .iotype = UPIO_PORT, \
+ .flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST, \
+ .regshift = 0, \
+}
+
+static struct plat_serial8250_port uart8250_data[] = {
+ PORT(0x3F8, 4),
+ PORT(0x2F8, 3),
+ PORT(0x3E8, 4),
+ PORT(0x2E8, 3),
+ { },
+};
+
+static struct platform_device uart8250_device = {
+ .name = "serial8250",
+ .id = PLAT8250_DEV_PLATFORM,
+ .dev = {
+ .platform_data = uart8250_data,
+ },
+};
+
+static int __init uart8250_init(void)
+{
+ return platform_device_register(&uart8250_device);
+}
+
+module_init(uart8250_init);
+
+MODULE_AUTHOR("Ralf Baechle <ralf@linux-mips.org>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Generic 8250 UART probe driver");
diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile
index 881c467c698..008a2fed058 100644
--- a/arch/mips/kernel/Makefile
+++ b/arch/mips/kernel/Makefile
@@ -2,70 +2,128 @@
# Makefile for the Linux/MIPS kernel.
#
-extra-y := head.o init_task.o vmlinux.lds
-
-obj-y += cpu-probe.o branch.o entry.o genex.o irq.o process.o \
- ptrace.o reset.o semaphore.o setup.o signal.o syscall.o \
- time.o traps.o unaligned.o
-
-binfmt_irix-objs := irixelf.o irixinv.o irixioctl.o irixsig.o \
- irix5sys.o sysirix.o
-
+extra-y := head.o vmlinux.lds
+
+obj-y += cpu-probe.o branch.o entry.o genex.o idle.o irq.o process.o \
+ prom.o ptrace.o reset.o setup.o signal.o syscall.o \
+ time.o topology.o traps.o unaligned.o watch.o vdso.o
+
+ifdef CONFIG_FUNCTION_TRACER
+CFLAGS_REMOVE_ftrace.o = -pg
+CFLAGS_REMOVE_early_printk.o = -pg
+CFLAGS_REMOVE_perf_event.o = -pg
+CFLAGS_REMOVE_perf_event_mipsxx.o = -pg
+endif
+
+obj-$(CONFIG_CEVT_BCM1480) += cevt-bcm1480.o
+obj-$(CONFIG_CEVT_R4K) += cevt-r4k.o
+obj-$(CONFIG_CEVT_DS1287) += cevt-ds1287.o
+obj-$(CONFIG_CEVT_GIC) += cevt-gic.o
+obj-$(CONFIG_CEVT_GT641XX) += cevt-gt641xx.o
+obj-$(CONFIG_CEVT_SB1250) += cevt-sb1250.o
+obj-$(CONFIG_CEVT_TXX9) += cevt-txx9.o
+obj-$(CONFIG_CSRC_BCM1480) += csrc-bcm1480.o
+obj-$(CONFIG_CSRC_GIC) += csrc-gic.o
+obj-$(CONFIG_CSRC_IOASIC) += csrc-ioasic.o
+obj-$(CONFIG_CSRC_R4K) += csrc-r4k.o
+obj-$(CONFIG_CSRC_SB1250) += csrc-sb1250.o
+obj-$(CONFIG_SYNC_R4K) += sync-r4k.o
+
+obj-$(CONFIG_DEBUG_FS) += segment.o
+obj-$(CONFIG_STACKTRACE) += stacktrace.o
obj-$(CONFIG_MODULES) += mips_ksyms.o module.o
+obj-$(CONFIG_MODULES_USE_ELF_RELA) += module-rela.o
-obj-$(CONFIG_APM) += apm.o
+obj-$(CONFIG_FTRACE_SYSCALLS) += ftrace.o
+obj-$(CONFIG_FUNCTION_TRACER) += mcount.o ftrace.o
+obj-$(CONFIG_CPU_R4K_FPU) += r4k_fpu.o r4k_switch.o
obj-$(CONFIG_CPU_R3000) += r2300_fpu.o r2300_switch.o
-obj-$(CONFIG_CPU_TX39XX) += r2300_fpu.o r2300_switch.o
-obj-$(CONFIG_CPU_TX49XX) += r4k_fpu.o r4k_switch.o
-obj-$(CONFIG_CPU_R4000) += r4k_fpu.o r4k_switch.o
-obj-$(CONFIG_CPU_VR41XX) += r4k_fpu.o r4k_switch.o
-obj-$(CONFIG_CPU_R4300) += r4k_fpu.o r4k_switch.o
-obj-$(CONFIG_CPU_R4X00) += r4k_fpu.o r4k_switch.o
-obj-$(CONFIG_CPU_R5000) += r4k_fpu.o r4k_switch.o
-obj-$(CONFIG_CPU_R5432) += r4k_fpu.o r4k_switch.o
-obj-$(CONFIG_CPU_R8000) += r4k_fpu.o r4k_switch.o
-obj-$(CONFIG_CPU_RM7000) += r4k_fpu.o r4k_switch.o
-obj-$(CONFIG_CPU_RM9000) += r4k_fpu.o r4k_switch.o
-obj-$(CONFIG_CPU_NEVADA) += r4k_fpu.o r4k_switch.o
-obj-$(CONFIG_CPU_R10000) += r4k_fpu.o r4k_switch.o
-obj-$(CONFIG_CPU_SB1) += r4k_fpu.o r4k_switch.o
-obj-$(CONFIG_CPU_MIPS32) += r4k_fpu.o r4k_switch.o
-obj-$(CONFIG_CPU_MIPS64) += r4k_fpu.o r4k_switch.o
obj-$(CONFIG_CPU_R6000) += r6000_fpu.o r4k_switch.o
+obj-$(CONFIG_CPU_TX39XX) += r2300_fpu.o r2300_switch.o
+obj-$(CONFIG_CPU_CAVIUM_OCTEON) += r4k_fpu.o octeon_switch.o
obj-$(CONFIG_SMP) += smp.o
+obj-$(CONFIG_SMP_UP) += smp-up.o
+obj-$(CONFIG_CPU_BMIPS) += smp-bmips.o bmips_vec.o
obj-$(CONFIG_MIPS_MT) += mips-mt.o
-obj-$(CONFIG_MIPS_MT_SMTC) += smtc.o smtc-asm.o smtc-proc.o
+obj-$(CONFIG_MIPS_MT_FPAFF) += mips-mt-fpaff.o
obj-$(CONFIG_MIPS_MT_SMP) += smp-mt.o
+obj-$(CONFIG_MIPS_CMP) += smp-cmp.o
+obj-$(CONFIG_MIPS_CPS) += smp-cps.o cps-vec.o
+obj-$(CONFIG_MIPS_GIC_IPI) += smp-gic.o
+obj-$(CONFIG_CPU_MIPSR2) += spram.o
-obj-$(CONFIG_MIPS_APSP_KSPD) += kspd.o
obj-$(CONFIG_MIPS_VPE_LOADER) += vpe.o
-obj-$(CONFIG_MIPS_VPE_APSP_API) += rtlx.o
+obj-$(CONFIG_MIPS_VPE_LOADER_CMP) += vpe-cmp.o
+obj-$(CONFIG_MIPS_VPE_LOADER_MT) += vpe-mt.o
+obj-$(CONFIG_MIPS_VPE_APSP_API) += rtlx.o
+obj-$(CONFIG_MIPS_VPE_APSP_API_CMP) += rtlx-cmp.o
+obj-$(CONFIG_MIPS_VPE_APSP_API_MT) += rtlx-mt.o
-obj-$(CONFIG_NO_ISA) += dma-no-isa.o
obj-$(CONFIG_I8259) += i8259.o
obj-$(CONFIG_IRQ_CPU) += irq_cpu.o
obj-$(CONFIG_IRQ_CPU_RM7K) += irq-rm7000.o
-obj-$(CONFIG_IRQ_CPU_RM9K) += irq-rm9000.o
-obj-$(CONFIG_IRQ_MV64340) += irq-mv6434x.o
-obj-$(CONFIG_MIPS_BOARDS_GEN) += irq-msc01.o
+obj-$(CONFIG_MIPS_MSC) += irq-msc01.o
+obj-$(CONFIG_IRQ_TXX9) += irq_txx9.o
+obj-$(CONFIG_IRQ_GT641XX) += irq-gt641xx.o
+obj-$(CONFIG_IRQ_GIC) += irq-gic.o
+obj-$(CONFIG_KPROBES) += kprobes.o
obj-$(CONFIG_32BIT) += scall32-o32.o
obj-$(CONFIG_64BIT) += scall64-64.o
-obj-$(CONFIG_BINFMT_IRIX) += binfmt_irix.o
-obj-$(CONFIG_MIPS32_COMPAT) += linux32.o signal32.o
+obj-$(CONFIG_MIPS32_COMPAT) += linux32.o ptrace32.o signal32.o
obj-$(CONFIG_MIPS32_N32) += binfmt_elfn32.o scall64-n32.o signal_n32.o
-obj-$(CONFIG_MIPS32_O32) += binfmt_elfo32.o scall64-o32.o ptrace32.o
+obj-$(CONFIG_MIPS32_O32) += binfmt_elfo32.o scall64-o32.o
-obj-$(CONFIG_KGDB) += gdb-low.o gdb-stub.o
+obj-$(CONFIG_KGDB) += kgdb.o
obj-$(CONFIG_PROC_FS) += proc.o
obj-$(CONFIG_64BIT) += cpu-bugs64.o
obj-$(CONFIG_I8253) += i8253.o
-CFLAGS_cpu-bugs64.o = $(shell if $(CC) $(CFLAGS) -Wa,-mdaddi -c -o /dev/null -xc /dev/null >/dev/null 2>&1; then echo "-DHAVE_AS_SET_DADDI"; fi)
+obj-$(CONFIG_GPIO_TXX9) += gpio_txx9.o
+
+obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o crash.o
+obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
+obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
+obj-$(CONFIG_EARLY_PRINTK_8250) += early_printk_8250.o
+obj-$(CONFIG_SPINLOCK_TEST) += spinlock_test.o
+obj-$(CONFIG_MIPS_MACHINE) += mips_machine.o
+
+CFLAGS_cpu-bugs64.o = $(shell if $(CC) $(KBUILD_CFLAGS) -Wa,-mdaddi -c -o /dev/null -x c /dev/null >/dev/null 2>&1; then echo "-DHAVE_AS_SET_DADDI"; fi)
+
+obj-$(CONFIG_HAVE_STD_PC_SERIAL_PORT) += 8250-platform.o
+
+obj-$(CONFIG_PERF_EVENTS) += perf_event.o
+obj-$(CONFIG_HW_PERF_EVENTS) += perf_event_mipsxx.o
+
+obj-$(CONFIG_JUMP_LABEL) += jump_label.o
+
+obj-$(CONFIG_MIPS_CM) += mips-cm.o
+obj-$(CONFIG_MIPS_CPC) += mips-cpc.o
+
+obj-$(CONFIG_CPU_PM) += pm.o
+obj-$(CONFIG_MIPS_CPS_PM) += pm-cps.o
+
+#
+# DSP ASE supported for MIPS32 or MIPS64 Release 2 cores only. It is not
+# safe to unconditionnaly use the assembler -mdsp / -mdspr2 switches
+# here because the compiler may use DSP ASE instructions (such as lwx) in
+# code paths where we cannot check that the CPU we are running on supports it.
+# Proper abstraction using HAVE_AS_DSP and macros is done in
+# arch/mips/include/asm/mipsregs.h.
+#
+ifeq ($(CONFIG_CPU_MIPSR2), y)
+CFLAGS_DSP = -DHAVE_AS_DSP
+
+CFLAGS_signal.o = $(CFLAGS_DSP)
+CFLAGS_signal32.o = $(CFLAGS_DSP)
+CFLAGS_process.o = $(CFLAGS_DSP)
+CFLAGS_branch.o = $(CFLAGS_DSP)
+CFLAGS_ptrace.o = $(CFLAGS_DSP)
+endif
-EXTRA_AFLAGS := $(CFLAGS)
+CPPFLAGS_vmlinux.lds := $(KBUILD_CFLAGS)
diff --git a/arch/mips/kernel/apm.c b/arch/mips/kernel/apm.c
deleted file mode 100644
index 528e731049c..00000000000
--- a/arch/mips/kernel/apm.c
+++ /dev/null
@@ -1,604 +0,0 @@
-/*
- * bios-less APM driver for MIPS Linux
- * Jamey Hicks <jamey@crl.dec.com>
- * adapted from the APM BIOS driver for Linux by Stephen Rothwell (sfr@linuxcare.com)
- *
- * APM 1.2 Reference:
- * Intel Corporation, Microsoft Corporation. Advanced Power Management
- * (APM) BIOS Interface Specification, Revision 1.2, February 1996.
- *
- * [This document is available from Microsoft at:
- * http://www.microsoft.com/hwdev/busbios/amp_12.htm]
- */
-#include <linux/module.h>
-#include <linux/poll.h>
-#include <linux/timer.h>
-#include <linux/slab.h>
-#include <linux/proc_fs.h>
-#include <linux/miscdevice.h>
-#include <linux/apm_bios.h>
-#include <linux/capability.h>
-#include <linux/sched.h>
-#include <linux/pm.h>
-#include <linux/device.h>
-#include <linux/kernel.h>
-#include <linux/list.h>
-#include <linux/init.h>
-#include <linux/completion.h>
-
-#include <asm/apm.h> /* apm_power_info */
-#include <asm/system.h>
-
-/*
- * The apm_bios device is one of the misc char devices.
- * This is its minor number.
- */
-#define APM_MINOR_DEV 134
-
-/*
- * See Documentation/Config.help for the configuration options.
- *
- * Various options can be changed at boot time as follows:
- * (We allow underscores for compatibility with the modules code)
- * apm=on/off enable/disable APM
- */
-
-/*
- * Maximum number of events stored
- */
-#define APM_MAX_EVENTS 16
-
-struct apm_queue {
- unsigned int event_head;
- unsigned int event_tail;
- apm_event_t events[APM_MAX_EVENTS];
-};
-
-/*
- * The per-file APM data
- */
-struct apm_user {
- struct list_head list;
-
- unsigned int suser: 1;
- unsigned int writer: 1;
- unsigned int reader: 1;
-
- int suspend_result;
- unsigned int suspend_state;
-#define SUSPEND_NONE 0 /* no suspend pending */
-#define SUSPEND_PENDING 1 /* suspend pending read */
-#define SUSPEND_READ 2 /* suspend read, pending ack */
-#define SUSPEND_ACKED 3 /* suspend acked */
-#define SUSPEND_DONE 4 /* suspend completed */
-
- struct apm_queue queue;
-};
-
-/*
- * Local variables
- */
-static int suspends_pending;
-static int apm_disabled;
-static int mips_apm_active;
-
-static DECLARE_WAIT_QUEUE_HEAD(apm_waitqueue);
-static DECLARE_WAIT_QUEUE_HEAD(apm_suspend_waitqueue);
-
-/*
- * This is a list of everyone who has opened /dev/apm_bios
- */
-static DECLARE_RWSEM(user_list_lock);
-static LIST_HEAD(apm_user_list);
-
-/*
- * kapmd info. kapmd provides us a process context to handle
- * "APM" events within - specifically necessary if we're going
- * to be suspending the system.
- */
-static DECLARE_WAIT_QUEUE_HEAD(kapmd_wait);
-static DECLARE_COMPLETION(kapmd_exit);
-static DEFINE_SPINLOCK(kapmd_queue_lock);
-static struct apm_queue kapmd_queue;
-
-
-static const char driver_version[] = "1.13"; /* no spaces */
-
-
-
-/*
- * Compatibility cruft until the IPAQ people move over to the new
- * interface.
- */
-static void __apm_get_power_status(struct apm_power_info *info)
-{
-}
-
-/*
- * This allows machines to provide their own "apm get power status" function.
- */
-void (*apm_get_power_status)(struct apm_power_info *) = __apm_get_power_status;
-EXPORT_SYMBOL(apm_get_power_status);
-
-
-/*
- * APM event queue management.
- */
-static inline int queue_empty(struct apm_queue *q)
-{
- return q->event_head == q->event_tail;
-}
-
-static inline apm_event_t queue_get_event(struct apm_queue *q)
-{
- q->event_tail = (q->event_tail + 1) % APM_MAX_EVENTS;
- return q->events[q->event_tail];
-}
-
-static void queue_add_event(struct apm_queue *q, apm_event_t event)
-{
- q->event_head = (q->event_head + 1) % APM_MAX_EVENTS;
- if (q->event_head == q->event_tail) {
- static int notified;
-
- if (notified++ == 0)
- printk(KERN_ERR "apm: an event queue overflowed\n");
- q->event_tail = (q->event_tail + 1) % APM_MAX_EVENTS;
- }
- q->events[q->event_head] = event;
-}
-
-static void queue_event_one_user(struct apm_user *as, apm_event_t event)
-{
- if (as->suser && as->writer) {
- switch (event) {
- case APM_SYS_SUSPEND:
- case APM_USER_SUSPEND:
- /*
- * If this user already has a suspend pending,
- * don't queue another one.
- */
- if (as->suspend_state != SUSPEND_NONE)
- return;
-
- as->suspend_state = SUSPEND_PENDING;
- suspends_pending++;
- break;
- }
- }
- queue_add_event(&as->queue, event);
-}
-
-static void queue_event(apm_event_t event, struct apm_user *sender)
-{
- struct apm_user *as;
-
- down_read(&user_list_lock);
- list_for_each_entry(as, &apm_user_list, list) {
- if (as != sender && as->reader)
- queue_event_one_user(as, event);
- }
- up_read(&user_list_lock);
- wake_up_interruptible(&apm_waitqueue);
-}
-
-static void apm_suspend(void)
-{
- struct apm_user *as;
- int err = pm_suspend(PM_SUSPEND_MEM);
-
- /*
- * Anyone on the APM queues will think we're still suspended.
- * Send a message so everyone knows we're now awake again.
- */
- queue_event(APM_NORMAL_RESUME, NULL);
-
- /*
- * Finally, wake up anyone who is sleeping on the suspend.
- */
- down_read(&user_list_lock);
- list_for_each_entry(as, &apm_user_list, list) {
- as->suspend_result = err;
- as->suspend_state = SUSPEND_DONE;
- }
- up_read(&user_list_lock);
-
- wake_up(&apm_suspend_waitqueue);
-}
-
-static ssize_t apm_read(struct file *fp, char __user *buf, size_t count, loff_t *ppos)
-{
- struct apm_user *as = fp->private_data;
- apm_event_t event;
- int i = count, ret = 0;
-
- if (count < sizeof(apm_event_t))
- return -EINVAL;
-
- if (queue_empty(&as->queue) && fp->f_flags & O_NONBLOCK)
- return -EAGAIN;
-
- wait_event_interruptible(apm_waitqueue, !queue_empty(&as->queue));
-
- while ((i >= sizeof(event)) && !queue_empty(&as->queue)) {
- event = queue_get_event(&as->queue);
-
- ret = -EFAULT;
- if (copy_to_user(buf, &event, sizeof(event)))
- break;
-
- if (event == APM_SYS_SUSPEND || event == APM_USER_SUSPEND)
- as->suspend_state = SUSPEND_READ;
-
- buf += sizeof(event);
- i -= sizeof(event);
- }
-
- if (i < count)
- ret = count - i;
-
- return ret;
-}
-
-static unsigned int apm_poll(struct file *fp, poll_table * wait)
-{
- struct apm_user *as = fp->private_data;
-
- poll_wait(fp, &apm_waitqueue, wait);
- return queue_empty(&as->queue) ? 0 : POLLIN | POLLRDNORM;
-}
-
-/*
- * apm_ioctl - handle APM ioctl
- *
- * APM_IOC_SUSPEND
- * This IOCTL is overloaded, and performs two functions. It is used to:
- * - initiate a suspend
- * - acknowledge a suspend read from /dev/apm_bios.
- * Only when everyone who has opened /dev/apm_bios with write permission
- * has acknowledge does the actual suspend happen.
- */
-static int
-apm_ioctl(struct inode * inode, struct file *filp, unsigned int cmd, unsigned long arg)
-{
- struct apm_user *as = filp->private_data;
- unsigned long flags;
- int err = -EINVAL;
-
- if (!as->suser || !as->writer)
- return -EPERM;
-
- switch (cmd) {
- case APM_IOC_SUSPEND:
- as->suspend_result = -EINTR;
-
- if (as->suspend_state == SUSPEND_READ) {
- /*
- * If we read a suspend command from /dev/apm_bios,
- * then the corresponding APM_IOC_SUSPEND ioctl is
- * interpreted as an acknowledge.
- */
- as->suspend_state = SUSPEND_ACKED;
- suspends_pending--;
- } else {
- /*
- * Otherwise it is a request to suspend the system.
- * Queue an event for all readers, and expect an
- * acknowledge from all writers who haven't already
- * acknowledged.
- */
- queue_event(APM_USER_SUSPEND, as);
- }
-
- /*
- * If there are no further acknowledges required, suspend
- * the system.
- */
- if (suspends_pending == 0)
- apm_suspend();
-
- /*
- * Wait for the suspend/resume to complete. If there are
- * pending acknowledges, we wait here for them.
- *
- * Note that we need to ensure that the PM subsystem does
- * not kick us out of the wait when it suspends the threads.
- */
- flags = current->flags;
- current->flags |= PF_NOFREEZE;
-
- /*
- * Note: do not allow a thread which is acking the suspend
- * to escape until the resume is complete.
- */
- if (as->suspend_state == SUSPEND_ACKED)
- wait_event(apm_suspend_waitqueue,
- as->suspend_state == SUSPEND_DONE);
- else
- wait_event_interruptible(apm_suspend_waitqueue,
- as->suspend_state == SUSPEND_DONE);
-
- current->flags = flags;
- err = as->suspend_result;
- as->suspend_state = SUSPEND_NONE;
- break;
- }
-
- return err;
-}
-
-static int apm_release(struct inode * inode, struct file * filp)
-{
- struct apm_user *as = filp->private_data;
- filp->private_data = NULL;
-
- down_write(&user_list_lock);
- list_del(&as->list);
- up_write(&user_list_lock);
-
- /*
- * We are now unhooked from the chain. As far as new
- * events are concerned, we no longer exist. However, we
- * need to balance suspends_pending, which means the
- * possibility of sleeping.
- */
- if (as->suspend_state != SUSPEND_NONE) {
- suspends_pending -= 1;
- if (suspends_pending == 0)
- apm_suspend();
- }
-
- kfree(as);
- return 0;
-}
-
-static int apm_open(struct inode * inode, struct file * filp)
-{
- struct apm_user *as;
-
- as = (struct apm_user *)kzalloc(sizeof(*as), GFP_KERNEL);
- if (as) {
- /*
- * XXX - this is a tiny bit broken, when we consider BSD
- * process accounting. If the device is opened by root, we
- * instantly flag that we used superuser privs. Who knows,
- * we might close the device immediately without doing a
- * privileged operation -- cevans
- */
- as->suser = capable(CAP_SYS_ADMIN);
- as->writer = (filp->f_mode & FMODE_WRITE) == FMODE_WRITE;
- as->reader = (filp->f_mode & FMODE_READ) == FMODE_READ;
-
- down_write(&user_list_lock);
- list_add(&as->list, &apm_user_list);
- up_write(&user_list_lock);
-
- filp->private_data = as;
- }
-
- return as ? 0 : -ENOMEM;
-}
-
-static struct file_operations apm_bios_fops = {
- .owner = THIS_MODULE,
- .read = apm_read,
- .poll = apm_poll,
- .ioctl = apm_ioctl,
- .open = apm_open,
- .release = apm_release,
-};
-
-static struct miscdevice apm_device = {
- .minor = APM_MINOR_DEV,
- .name = "apm_bios",
- .fops = &apm_bios_fops
-};
-
-
-#ifdef CONFIG_PROC_FS
-/*
- * Arguments, with symbols from linux/apm_bios.h.
- *
- * 0) Linux driver version (this will change if format changes)
- * 1) APM BIOS Version. Usually 1.0, 1.1 or 1.2.
- * 2) APM flags from APM Installation Check (0x00):
- * bit 0: APM_16_BIT_SUPPORT
- * bit 1: APM_32_BIT_SUPPORT
- * bit 2: APM_IDLE_SLOWS_CLOCK
- * bit 3: APM_BIOS_DISABLED
- * bit 4: APM_BIOS_DISENGAGED
- * 3) AC line status
- * 0x00: Off-line
- * 0x01: On-line
- * 0x02: On backup power (BIOS >= 1.1 only)
- * 0xff: Unknown
- * 4) Battery status
- * 0x00: High
- * 0x01: Low
- * 0x02: Critical
- * 0x03: Charging
- * 0x04: Selected battery not present (BIOS >= 1.2 only)
- * 0xff: Unknown
- * 5) Battery flag
- * bit 0: High
- * bit 1: Low
- * bit 2: Critical
- * bit 3: Charging
- * bit 7: No system battery
- * 0xff: Unknown
- * 6) Remaining battery life (percentage of charge):
- * 0-100: valid
- * -1: Unknown
- * 7) Remaining battery life (time units):
- * Number of remaining minutes or seconds
- * -1: Unknown
- * 8) min = minutes; sec = seconds
- */
-static int apm_get_info(char *buf, char **start, off_t fpos, int length)
-{
- struct apm_power_info info;
- char *units;
- int ret;
-
- info.ac_line_status = 0xff;
- info.battery_status = 0xff;
- info.battery_flag = 0xff;
- info.battery_life = -1;
- info.time = -1;
- info.units = -1;
-
- if (apm_get_power_status)
- apm_get_power_status(&info);
-
- switch (info.units) {
- default: units = "?"; break;
- case 0: units = "min"; break;
- case 1: units = "sec"; break;
- }
-
- ret = sprintf(buf, "%s 1.2 0x%02x 0x%02x 0x%02x 0x%02x %d%% %d %s\n",
- driver_version, APM_32_BIT_SUPPORT,
- info.ac_line_status, info.battery_status,
- info.battery_flag, info.battery_life,
- info.time, units);
-
- return ret;
-}
-#endif
-
-static int kapmd(void *arg)
-{
- daemonize("kapmd");
- current->flags |= PF_NOFREEZE;
-
- do {
- apm_event_t event;
-
- wait_event_interruptible(kapmd_wait,
- !queue_empty(&kapmd_queue) || !mips_apm_active);
-
- if (!mips_apm_active)
- break;
-
- spin_lock_irq(&kapmd_queue_lock);
- event = 0;
- if (!queue_empty(&kapmd_queue))
- event = queue_get_event(&kapmd_queue);
- spin_unlock_irq(&kapmd_queue_lock);
-
- switch (event) {
- case 0:
- break;
-
- case APM_LOW_BATTERY:
- case APM_POWER_STATUS_CHANGE:
- queue_event(event, NULL);
- break;
-
- case APM_USER_SUSPEND:
- case APM_SYS_SUSPEND:
- queue_event(event, NULL);
- if (suspends_pending == 0)
- apm_suspend();
- break;
-
- case APM_CRITICAL_SUSPEND:
- apm_suspend();
- break;
- }
- } while (1);
-
- complete_and_exit(&kapmd_exit, 0);
-}
-
-static int __init apm_init(void)
-{
- int ret;
-
- if (apm_disabled) {
- printk(KERN_NOTICE "apm: disabled on user request.\n");
- return -ENODEV;
- }
-
- mips_apm_active = 1;
-
- ret = kernel_thread(kapmd, NULL, CLONE_KERNEL);
- if (ret < 0) {
- mips_apm_active = 0;
- return ret;
- }
-
-#ifdef CONFIG_PROC_FS
- create_proc_info_entry("apm", 0, NULL, apm_get_info);
-#endif
-
- ret = misc_register(&apm_device);
- if (ret != 0) {
- remove_proc_entry("apm", NULL);
-
- mips_apm_active = 0;
- wake_up(&kapmd_wait);
- wait_for_completion(&kapmd_exit);
- }
-
- return ret;
-}
-
-static void __exit apm_exit(void)
-{
- misc_deregister(&apm_device);
- remove_proc_entry("apm", NULL);
-
- mips_apm_active = 0;
- wake_up(&kapmd_wait);
- wait_for_completion(&kapmd_exit);
-}
-
-module_init(apm_init);
-module_exit(apm_exit);
-
-MODULE_AUTHOR("Stephen Rothwell");
-MODULE_DESCRIPTION("Advanced Power Management");
-MODULE_LICENSE("GPL");
-
-#ifndef MODULE
-static int __init apm_setup(char *str)
-{
- while ((str != NULL) && (*str != '\0')) {
- if (strncmp(str, "off", 3) == 0)
- apm_disabled = 1;
- if (strncmp(str, "on", 2) == 0)
- apm_disabled = 0;
- str = strchr(str, ',');
- if (str != NULL)
- str += strspn(str, ", \t");
- }
- return 1;
-}
-
-__setup("apm=", apm_setup);
-#endif
-
-/**
- * apm_queue_event - queue an APM event for kapmd
- * @event: APM event
- *
- * Queue an APM event for kapmd to process and ultimately take the
- * appropriate action. Only a subset of events are handled:
- * %APM_LOW_BATTERY
- * %APM_POWER_STATUS_CHANGE
- * %APM_USER_SUSPEND
- * %APM_SYS_SUSPEND
- * %APM_CRITICAL_SUSPEND
- */
-void apm_queue_event(apm_event_t event)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&kapmd_queue_lock, flags);
- queue_add_event(&kapmd_queue, event);
- spin_unlock_irqrestore(&kapmd_queue_lock, flags);
-
- wake_up_interruptible(&kapmd_wait);
-}
-EXPORT_SYMBOL(apm_queue_event);
diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c
index ec28077d5ee..4bb5107511e 100644
--- a/arch/mips/kernel/asm-offsets.c
+++ b/arch/mips/kernel/asm-offsets.c
@@ -12,326 +12,483 @@
#include <linux/types.h>
#include <linux/sched.h>
#include <linux/mm.h>
-#include <linux/interrupt.h>
-
+#include <linux/kbuild.h>
+#include <linux/suspend.h>
+#include <asm/pm.h>
#include <asm/ptrace.h>
#include <asm/processor.h>
+#include <asm/smp-cps.h>
-#define text(t) __asm__("\n@@@" t)
-#define _offset(type, member) (&(((type *)NULL)->member))
-#define offset(string, ptr, member) \
- __asm__("\n@@@" string "%0" : : "i" (_offset(ptr, member)))
-#define constant(string, member) \
- __asm__("\n@@@" string "%x0" : : "ri" (member))
-#define size(string, size) \
- __asm__("\n@@@" string "%0" : : "i" (sizeof(size)))
-#define linefeed text("")
+#include <linux/kvm_host.h>
void output_ptreg_defines(void)
{
- text("/* MIPS pt_regs offsets. */");
- offset("#define PT_R0 ", struct pt_regs, regs[0]);
- offset("#define PT_R1 ", struct pt_regs, regs[1]);
- offset("#define PT_R2 ", struct pt_regs, regs[2]);
- offset("#define PT_R3 ", struct pt_regs, regs[3]);
- offset("#define PT_R4 ", struct pt_regs, regs[4]);
- offset("#define PT_R5 ", struct pt_regs, regs[5]);
- offset("#define PT_R6 ", struct pt_regs, regs[6]);
- offset("#define PT_R7 ", struct pt_regs, regs[7]);
- offset("#define PT_R8 ", struct pt_regs, regs[8]);
- offset("#define PT_R9 ", struct pt_regs, regs[9]);
- offset("#define PT_R10 ", struct pt_regs, regs[10]);
- offset("#define PT_R11 ", struct pt_regs, regs[11]);
- offset("#define PT_R12 ", struct pt_regs, regs[12]);
- offset("#define PT_R13 ", struct pt_regs, regs[13]);
- offset("#define PT_R14 ", struct pt_regs, regs[14]);
- offset("#define PT_R15 ", struct pt_regs, regs[15]);
- offset("#define PT_R16 ", struct pt_regs, regs[16]);
- offset("#define PT_R17 ", struct pt_regs, regs[17]);
- offset("#define PT_R18 ", struct pt_regs, regs[18]);
- offset("#define PT_R19 ", struct pt_regs, regs[19]);
- offset("#define PT_R20 ", struct pt_regs, regs[20]);
- offset("#define PT_R21 ", struct pt_regs, regs[21]);
- offset("#define PT_R22 ", struct pt_regs, regs[22]);
- offset("#define PT_R23 ", struct pt_regs, regs[23]);
- offset("#define PT_R24 ", struct pt_regs, regs[24]);
- offset("#define PT_R25 ", struct pt_regs, regs[25]);
- offset("#define PT_R26 ", struct pt_regs, regs[26]);
- offset("#define PT_R27 ", struct pt_regs, regs[27]);
- offset("#define PT_R28 ", struct pt_regs, regs[28]);
- offset("#define PT_R29 ", struct pt_regs, regs[29]);
- offset("#define PT_R30 ", struct pt_regs, regs[30]);
- offset("#define PT_R31 ", struct pt_regs, regs[31]);
- offset("#define PT_LO ", struct pt_regs, lo);
- offset("#define PT_HI ", struct pt_regs, hi);
- offset("#define PT_EPC ", struct pt_regs, cp0_epc);
- offset("#define PT_BVADDR ", struct pt_regs, cp0_badvaddr);
- offset("#define PT_STATUS ", struct pt_regs, cp0_status);
- offset("#define PT_CAUSE ", struct pt_regs, cp0_cause);
-#ifdef CONFIG_MIPS_MT_SMTC
- offset("#define PT_TCSTATUS ", struct pt_regs, cp0_tcstatus);
-#endif /* CONFIG_MIPS_MT_SMTC */
- size("#define PT_SIZE ", struct pt_regs);
- linefeed;
+ COMMENT("MIPS pt_regs offsets.");
+ OFFSET(PT_R0, pt_regs, regs[0]);
+ OFFSET(PT_R1, pt_regs, regs[1]);
+ OFFSET(PT_R2, pt_regs, regs[2]);
+ OFFSET(PT_R3, pt_regs, regs[3]);
+ OFFSET(PT_R4, pt_regs, regs[4]);
+ OFFSET(PT_R5, pt_regs, regs[5]);
+ OFFSET(PT_R6, pt_regs, regs[6]);
+ OFFSET(PT_R7, pt_regs, regs[7]);
+ OFFSET(PT_R8, pt_regs, regs[8]);
+ OFFSET(PT_R9, pt_regs, regs[9]);
+ OFFSET(PT_R10, pt_regs, regs[10]);
+ OFFSET(PT_R11, pt_regs, regs[11]);
+ OFFSET(PT_R12, pt_regs, regs[12]);
+ OFFSET(PT_R13, pt_regs, regs[13]);
+ OFFSET(PT_R14, pt_regs, regs[14]);
+ OFFSET(PT_R15, pt_regs, regs[15]);
+ OFFSET(PT_R16, pt_regs, regs[16]);
+ OFFSET(PT_R17, pt_regs, regs[17]);
+ OFFSET(PT_R18, pt_regs, regs[18]);
+ OFFSET(PT_R19, pt_regs, regs[19]);
+ OFFSET(PT_R20, pt_regs, regs[20]);
+ OFFSET(PT_R21, pt_regs, regs[21]);
+ OFFSET(PT_R22, pt_regs, regs[22]);
+ OFFSET(PT_R23, pt_regs, regs[23]);
+ OFFSET(PT_R24, pt_regs, regs[24]);
+ OFFSET(PT_R25, pt_regs, regs[25]);
+ OFFSET(PT_R26, pt_regs, regs[26]);
+ OFFSET(PT_R27, pt_regs, regs[27]);
+ OFFSET(PT_R28, pt_regs, regs[28]);
+ OFFSET(PT_R29, pt_regs, regs[29]);
+ OFFSET(PT_R30, pt_regs, regs[30]);
+ OFFSET(PT_R31, pt_regs, regs[31]);
+ OFFSET(PT_LO, pt_regs, lo);
+ OFFSET(PT_HI, pt_regs, hi);
+#ifdef CONFIG_CPU_HAS_SMARTMIPS
+ OFFSET(PT_ACX, pt_regs, acx);
+#endif
+ OFFSET(PT_EPC, pt_regs, cp0_epc);
+ OFFSET(PT_BVADDR, pt_regs, cp0_badvaddr);
+ OFFSET(PT_STATUS, pt_regs, cp0_status);
+ OFFSET(PT_CAUSE, pt_regs, cp0_cause);
+#ifdef CONFIG_CPU_CAVIUM_OCTEON
+ OFFSET(PT_MPL, pt_regs, mpl);
+ OFFSET(PT_MTP, pt_regs, mtp);
+#endif /* CONFIG_CPU_CAVIUM_OCTEON */
+ DEFINE(PT_SIZE, sizeof(struct pt_regs));
+ BLANK();
}
void output_task_defines(void)
{
- text("/* MIPS task_struct offsets. */");
- offset("#define TASK_STATE ", struct task_struct, state);
- offset("#define TASK_THREAD_INFO ", struct task_struct, thread_info);
- offset("#define TASK_FLAGS ", struct task_struct, flags);
- offset("#define TASK_MM ", struct task_struct, mm);
- offset("#define TASK_PID ", struct task_struct, pid);
- size( "#define TASK_STRUCT_SIZE ", struct task_struct);
- linefeed;
+ COMMENT("MIPS task_struct offsets.");
+ OFFSET(TASK_STATE, task_struct, state);
+ OFFSET(TASK_THREAD_INFO, task_struct, stack);
+ OFFSET(TASK_FLAGS, task_struct, flags);
+ OFFSET(TASK_MM, task_struct, mm);
+ OFFSET(TASK_PID, task_struct, pid);
+#if defined(CONFIG_CC_STACKPROTECTOR)
+ OFFSET(TASK_STACK_CANARY, task_struct, stack_canary);
+#endif
+ DEFINE(TASK_STRUCT_SIZE, sizeof(struct task_struct));
+ BLANK();
}
void output_thread_info_defines(void)
{
- text("/* MIPS thread_info offsets. */");
- offset("#define TI_TASK ", struct thread_info, task);
- offset("#define TI_EXEC_DOMAIN ", struct thread_info, exec_domain);
- offset("#define TI_FLAGS ", struct thread_info, flags);
- offset("#define TI_CPU ", struct thread_info, cpu);
- offset("#define TI_PRE_COUNT ", struct thread_info, preempt_count);
- offset("#define TI_ADDR_LIMIT ", struct thread_info, addr_limit);
- offset("#define TI_RESTART_BLOCK ", struct thread_info, restart_block);
- offset("#define TI_TP_VALUE ", struct thread_info, tp_value);
- constant("#define _THREAD_SIZE_ORDER ", THREAD_SIZE_ORDER);
- constant("#define _THREAD_SIZE ", THREAD_SIZE);
- constant("#define _THREAD_MASK ", THREAD_MASK);
- linefeed;
+ COMMENT("MIPS thread_info offsets.");
+ OFFSET(TI_TASK, thread_info, task);
+ OFFSET(TI_EXEC_DOMAIN, thread_info, exec_domain);
+ OFFSET(TI_FLAGS, thread_info, flags);
+ OFFSET(TI_TP_VALUE, thread_info, tp_value);
+ OFFSET(TI_CPU, thread_info, cpu);
+ OFFSET(TI_PRE_COUNT, thread_info, preempt_count);
+ OFFSET(TI_ADDR_LIMIT, thread_info, addr_limit);
+ OFFSET(TI_RESTART_BLOCK, thread_info, restart_block);
+ OFFSET(TI_REGS, thread_info, regs);
+ DEFINE(_THREAD_SIZE, THREAD_SIZE);
+ DEFINE(_THREAD_MASK, THREAD_MASK);
+ BLANK();
}
void output_thread_defines(void)
{
- text("/* MIPS specific thread_struct offsets. */");
- offset("#define THREAD_REG16 ", struct task_struct, thread.reg16);
- offset("#define THREAD_REG17 ", struct task_struct, thread.reg17);
- offset("#define THREAD_REG18 ", struct task_struct, thread.reg18);
- offset("#define THREAD_REG19 ", struct task_struct, thread.reg19);
- offset("#define THREAD_REG20 ", struct task_struct, thread.reg20);
- offset("#define THREAD_REG21 ", struct task_struct, thread.reg21);
- offset("#define THREAD_REG22 ", struct task_struct, thread.reg22);
- offset("#define THREAD_REG23 ", struct task_struct, thread.reg23);
- offset("#define THREAD_REG29 ", struct task_struct, thread.reg29);
- offset("#define THREAD_REG30 ", struct task_struct, thread.reg30);
- offset("#define THREAD_REG31 ", struct task_struct, thread.reg31);
- offset("#define THREAD_STATUS ", struct task_struct,
+ COMMENT("MIPS specific thread_struct offsets.");
+ OFFSET(THREAD_REG16, task_struct, thread.reg16);
+ OFFSET(THREAD_REG17, task_struct, thread.reg17);
+ OFFSET(THREAD_REG18, task_struct, thread.reg18);
+ OFFSET(THREAD_REG19, task_struct, thread.reg19);
+ OFFSET(THREAD_REG20, task_struct, thread.reg20);
+ OFFSET(THREAD_REG21, task_struct, thread.reg21);
+ OFFSET(THREAD_REG22, task_struct, thread.reg22);
+ OFFSET(THREAD_REG23, task_struct, thread.reg23);
+ OFFSET(THREAD_REG29, task_struct, thread.reg29);
+ OFFSET(THREAD_REG30, task_struct, thread.reg30);
+ OFFSET(THREAD_REG31, task_struct, thread.reg31);
+ OFFSET(THREAD_STATUS, task_struct,
thread.cp0_status);
- offset("#define THREAD_FPU ", struct task_struct, thread.fpu);
+ OFFSET(THREAD_FPU, task_struct, thread.fpu);
- offset("#define THREAD_BVADDR ", struct task_struct, \
+ OFFSET(THREAD_BVADDR, task_struct, \
thread.cp0_badvaddr);
- offset("#define THREAD_BUADDR ", struct task_struct, \
+ OFFSET(THREAD_BUADDR, task_struct, \
thread.cp0_baduaddr);
- offset("#define THREAD_ECODE ", struct task_struct, \
+ OFFSET(THREAD_ECODE, task_struct, \
thread.error_code);
- offset("#define THREAD_TRAPNO ", struct task_struct, thread.trap_no);
- offset("#define THREAD_MFLAGS ", struct task_struct, thread.mflags);
- offset("#define THREAD_TRAMP ", struct task_struct, \
- thread.irix_trampoline);
- offset("#define THREAD_OLDCTX ", struct task_struct, \
- thread.irix_oldctx);
- linefeed;
+ BLANK();
}
void output_thread_fpu_defines(void)
{
- offset("#define THREAD_FPR0 ",
- struct task_struct, thread.fpu.fpr[0]);
- offset("#define THREAD_FPR1 ",
- struct task_struct, thread.fpu.fpr[1]);
- offset("#define THREAD_FPR2 ",
- struct task_struct, thread.fpu.fpr[2]);
- offset("#define THREAD_FPR3 ",
- struct task_struct, thread.fpu.fpr[3]);
- offset("#define THREAD_FPR4 ",
- struct task_struct, thread.fpu.fpr[4]);
- offset("#define THREAD_FPR5 ",
- struct task_struct, thread.fpu.fpr[5]);
- offset("#define THREAD_FPR6 ",
- struct task_struct, thread.fpu.fpr[6]);
- offset("#define THREAD_FPR7 ",
- struct task_struct, thread.fpu.fpr[7]);
- offset("#define THREAD_FPR8 ",
- struct task_struct, thread.fpu.fpr[8]);
- offset("#define THREAD_FPR9 ",
- struct task_struct, thread.fpu.fpr[9]);
- offset("#define THREAD_FPR10 ",
- struct task_struct, thread.fpu.fpr[10]);
- offset("#define THREAD_FPR11 ",
- struct task_struct, thread.fpu.fpr[11]);
- offset("#define THREAD_FPR12 ",
- struct task_struct, thread.fpu.fpr[12]);
- offset("#define THREAD_FPR13 ",
- struct task_struct, thread.fpu.fpr[13]);
- offset("#define THREAD_FPR14 ",
- struct task_struct, thread.fpu.fpr[14]);
- offset("#define THREAD_FPR15 ",
- struct task_struct, thread.fpu.fpr[15]);
- offset("#define THREAD_FPR16 ",
- struct task_struct, thread.fpu.fpr[16]);
- offset("#define THREAD_FPR17 ",
- struct task_struct, thread.fpu.fpr[17]);
- offset("#define THREAD_FPR18 ",
- struct task_struct, thread.fpu.fpr[18]);
- offset("#define THREAD_FPR19 ",
- struct task_struct, thread.fpu.fpr[19]);
- offset("#define THREAD_FPR20 ",
- struct task_struct, thread.fpu.fpr[20]);
- offset("#define THREAD_FPR21 ",
- struct task_struct, thread.fpu.fpr[21]);
- offset("#define THREAD_FPR22 ",
- struct task_struct, thread.fpu.fpr[22]);
- offset("#define THREAD_FPR23 ",
- struct task_struct, thread.fpu.fpr[23]);
- offset("#define THREAD_FPR24 ",
- struct task_struct, thread.fpu.fpr[24]);
- offset("#define THREAD_FPR25 ",
- struct task_struct, thread.fpu.fpr[25]);
- offset("#define THREAD_FPR26 ",
- struct task_struct, thread.fpu.fpr[26]);
- offset("#define THREAD_FPR27 ",
- struct task_struct, thread.fpu.fpr[27]);
- offset("#define THREAD_FPR28 ",
- struct task_struct, thread.fpu.fpr[28]);
- offset("#define THREAD_FPR29 ",
- struct task_struct, thread.fpu.fpr[29]);
- offset("#define THREAD_FPR30 ",
- struct task_struct, thread.fpu.fpr[30]);
- offset("#define THREAD_FPR31 ",
- struct task_struct, thread.fpu.fpr[31]);
+ OFFSET(THREAD_FPR0, task_struct, thread.fpu.fpr[0]);
+ OFFSET(THREAD_FPR1, task_struct, thread.fpu.fpr[1]);
+ OFFSET(THREAD_FPR2, task_struct, thread.fpu.fpr[2]);
+ OFFSET(THREAD_FPR3, task_struct, thread.fpu.fpr[3]);
+ OFFSET(THREAD_FPR4, task_struct, thread.fpu.fpr[4]);
+ OFFSET(THREAD_FPR5, task_struct, thread.fpu.fpr[5]);
+ OFFSET(THREAD_FPR6, task_struct, thread.fpu.fpr[6]);
+ OFFSET(THREAD_FPR7, task_struct, thread.fpu.fpr[7]);
+ OFFSET(THREAD_FPR8, task_struct, thread.fpu.fpr[8]);
+ OFFSET(THREAD_FPR9, task_struct, thread.fpu.fpr[9]);
+ OFFSET(THREAD_FPR10, task_struct, thread.fpu.fpr[10]);
+ OFFSET(THREAD_FPR11, task_struct, thread.fpu.fpr[11]);
+ OFFSET(THREAD_FPR12, task_struct, thread.fpu.fpr[12]);
+ OFFSET(THREAD_FPR13, task_struct, thread.fpu.fpr[13]);
+ OFFSET(THREAD_FPR14, task_struct, thread.fpu.fpr[14]);
+ OFFSET(THREAD_FPR15, task_struct, thread.fpu.fpr[15]);
+ OFFSET(THREAD_FPR16, task_struct, thread.fpu.fpr[16]);
+ OFFSET(THREAD_FPR17, task_struct, thread.fpu.fpr[17]);
+ OFFSET(THREAD_FPR18, task_struct, thread.fpu.fpr[18]);
+ OFFSET(THREAD_FPR19, task_struct, thread.fpu.fpr[19]);
+ OFFSET(THREAD_FPR20, task_struct, thread.fpu.fpr[20]);
+ OFFSET(THREAD_FPR21, task_struct, thread.fpu.fpr[21]);
+ OFFSET(THREAD_FPR22, task_struct, thread.fpu.fpr[22]);
+ OFFSET(THREAD_FPR23, task_struct, thread.fpu.fpr[23]);
+ OFFSET(THREAD_FPR24, task_struct, thread.fpu.fpr[24]);
+ OFFSET(THREAD_FPR25, task_struct, thread.fpu.fpr[25]);
+ OFFSET(THREAD_FPR26, task_struct, thread.fpu.fpr[26]);
+ OFFSET(THREAD_FPR27, task_struct, thread.fpu.fpr[27]);
+ OFFSET(THREAD_FPR28, task_struct, thread.fpu.fpr[28]);
+ OFFSET(THREAD_FPR29, task_struct, thread.fpu.fpr[29]);
+ OFFSET(THREAD_FPR30, task_struct, thread.fpu.fpr[30]);
+ OFFSET(THREAD_FPR31, task_struct, thread.fpu.fpr[31]);
+
+ /* the least significant 64 bits of each FP register */
+ OFFSET(THREAD_FPR0_LS64, task_struct,
+ thread.fpu.fpr[0].val64[FPR_IDX(64, 0)]);
+ OFFSET(THREAD_FPR1_LS64, task_struct,
+ thread.fpu.fpr[1].val64[FPR_IDX(64, 0)]);
+ OFFSET(THREAD_FPR2_LS64, task_struct,
+ thread.fpu.fpr[2].val64[FPR_IDX(64, 0)]);
+ OFFSET(THREAD_FPR3_LS64, task_struct,
+ thread.fpu.fpr[3].val64[FPR_IDX(64, 0)]);
+ OFFSET(THREAD_FPR4_LS64, task_struct,
+ thread.fpu.fpr[4].val64[FPR_IDX(64, 0)]);
+ OFFSET(THREAD_FPR5_LS64, task_struct,
+ thread.fpu.fpr[5].val64[FPR_IDX(64, 0)]);
+ OFFSET(THREAD_FPR6_LS64, task_struct,
+ thread.fpu.fpr[6].val64[FPR_IDX(64, 0)]);
+ OFFSET(THREAD_FPR7_LS64, task_struct,
+ thread.fpu.fpr[7].val64[FPR_IDX(64, 0)]);
+ OFFSET(THREAD_FPR8_LS64, task_struct,
+ thread.fpu.fpr[8].val64[FPR_IDX(64, 0)]);
+ OFFSET(THREAD_FPR9_LS64, task_struct,
+ thread.fpu.fpr[9].val64[FPR_IDX(64, 0)]);
+ OFFSET(THREAD_FPR10_LS64, task_struct,
+ thread.fpu.fpr[10].val64[FPR_IDX(64, 0)]);
+ OFFSET(THREAD_FPR11_LS64, task_struct,
+ thread.fpu.fpr[11].val64[FPR_IDX(64, 0)]);
+ OFFSET(THREAD_FPR12_LS64, task_struct,
+ thread.fpu.fpr[12].val64[FPR_IDX(64, 0)]);
+ OFFSET(THREAD_FPR13_LS64, task_struct,
+ thread.fpu.fpr[13].val64[FPR_IDX(64, 0)]);
+ OFFSET(THREAD_FPR14_LS64, task_struct,
+ thread.fpu.fpr[14].val64[FPR_IDX(64, 0)]);
+ OFFSET(THREAD_FPR15_LS64, task_struct,
+ thread.fpu.fpr[15].val64[FPR_IDX(64, 0)]);
+ OFFSET(THREAD_FPR16_LS64, task_struct,
+ thread.fpu.fpr[16].val64[FPR_IDX(64, 0)]);
+ OFFSET(THREAD_FPR17_LS64, task_struct,
+ thread.fpu.fpr[17].val64[FPR_IDX(64, 0)]);
+ OFFSET(THREAD_FPR18_LS64, task_struct,
+ thread.fpu.fpr[18].val64[FPR_IDX(64, 0)]);
+ OFFSET(THREAD_FPR19_LS64, task_struct,
+ thread.fpu.fpr[19].val64[FPR_IDX(64, 0)]);
+ OFFSET(THREAD_FPR20_LS64, task_struct,
+ thread.fpu.fpr[20].val64[FPR_IDX(64, 0)]);
+ OFFSET(THREAD_FPR21_LS64, task_struct,
+ thread.fpu.fpr[21].val64[FPR_IDX(64, 0)]);
+ OFFSET(THREAD_FPR22_LS64, task_struct,
+ thread.fpu.fpr[22].val64[FPR_IDX(64, 0)]);
+ OFFSET(THREAD_FPR23_LS64, task_struct,
+ thread.fpu.fpr[23].val64[FPR_IDX(64, 0)]);
+ OFFSET(THREAD_FPR24_LS64, task_struct,
+ thread.fpu.fpr[24].val64[FPR_IDX(64, 0)]);
+ OFFSET(THREAD_FPR25_LS64, task_struct,
+ thread.fpu.fpr[25].val64[FPR_IDX(64, 0)]);
+ OFFSET(THREAD_FPR26_LS64, task_struct,
+ thread.fpu.fpr[26].val64[FPR_IDX(64, 0)]);
+ OFFSET(THREAD_FPR27_LS64, task_struct,
+ thread.fpu.fpr[27].val64[FPR_IDX(64, 0)]);
+ OFFSET(THREAD_FPR28_LS64, task_struct,
+ thread.fpu.fpr[28].val64[FPR_IDX(64, 0)]);
+ OFFSET(THREAD_FPR29_LS64, task_struct,
+ thread.fpu.fpr[29].val64[FPR_IDX(64, 0)]);
+ OFFSET(THREAD_FPR30_LS64, task_struct,
+ thread.fpu.fpr[30].val64[FPR_IDX(64, 0)]);
+ OFFSET(THREAD_FPR31_LS64, task_struct,
+ thread.fpu.fpr[31].val64[FPR_IDX(64, 0)]);
- offset("#define THREAD_FCR31 ",
- struct task_struct, thread.fpu.fcr31);
- linefeed;
+ OFFSET(THREAD_FCR31, task_struct, thread.fpu.fcr31);
+ BLANK();
}
void output_mm_defines(void)
{
- text("/* Size of struct page */");
- size("#define STRUCT_PAGE_SIZE ", struct page);
- linefeed;
- text("/* Linux mm_struct offsets. */");
- offset("#define MM_USERS ", struct mm_struct, mm_users);
- offset("#define MM_PGD ", struct mm_struct, pgd);
- offset("#define MM_CONTEXT ", struct mm_struct, context);
- linefeed;
- constant("#define _PAGE_SIZE ", PAGE_SIZE);
- constant("#define _PAGE_SHIFT ", PAGE_SHIFT);
- linefeed;
- constant("#define _PGD_T_SIZE ", sizeof(pgd_t));
- constant("#define _PMD_T_SIZE ", sizeof(pmd_t));
- constant("#define _PTE_T_SIZE ", sizeof(pte_t));
- linefeed;
- constant("#define _PGD_T_LOG2 ", PGD_T_LOG2);
- constant("#define _PMD_T_LOG2 ", PMD_T_LOG2);
- constant("#define _PTE_T_LOG2 ", PTE_T_LOG2);
- linefeed;
- constant("#define _PMD_SHIFT ", PMD_SHIFT);
- constant("#define _PGDIR_SHIFT ", PGDIR_SHIFT);
- linefeed;
- constant("#define _PGD_ORDER ", PGD_ORDER);
- constant("#define _PMD_ORDER ", PMD_ORDER);
- constant("#define _PTE_ORDER ", PTE_ORDER);
- linefeed;
- constant("#define _PTRS_PER_PGD ", PTRS_PER_PGD);
- constant("#define _PTRS_PER_PMD ", PTRS_PER_PMD);
- constant("#define _PTRS_PER_PTE ", PTRS_PER_PTE);
- linefeed;
+ COMMENT("Size of struct page");
+ DEFINE(STRUCT_PAGE_SIZE, sizeof(struct page));
+ BLANK();
+ COMMENT("Linux mm_struct offsets.");
+ OFFSET(MM_USERS, mm_struct, mm_users);
+ OFFSET(MM_PGD, mm_struct, pgd);
+ OFFSET(MM_CONTEXT, mm_struct, context);
+ BLANK();
+ DEFINE(_PGD_T_SIZE, sizeof(pgd_t));
+ DEFINE(_PMD_T_SIZE, sizeof(pmd_t));
+ DEFINE(_PTE_T_SIZE, sizeof(pte_t));
+ BLANK();
+ DEFINE(_PGD_T_LOG2, PGD_T_LOG2);
+#ifndef __PAGETABLE_PMD_FOLDED
+ DEFINE(_PMD_T_LOG2, PMD_T_LOG2);
+#endif
+ DEFINE(_PTE_T_LOG2, PTE_T_LOG2);
+ BLANK();
+ DEFINE(_PGD_ORDER, PGD_ORDER);
+#ifndef __PAGETABLE_PMD_FOLDED
+ DEFINE(_PMD_ORDER, PMD_ORDER);
+#endif
+ DEFINE(_PTE_ORDER, PTE_ORDER);
+ BLANK();
+ DEFINE(_PMD_SHIFT, PMD_SHIFT);
+ DEFINE(_PGDIR_SHIFT, PGDIR_SHIFT);
+ BLANK();
+ DEFINE(_PTRS_PER_PGD, PTRS_PER_PGD);
+ DEFINE(_PTRS_PER_PMD, PTRS_PER_PMD);
+ DEFINE(_PTRS_PER_PTE, PTRS_PER_PTE);
+ BLANK();
+ DEFINE(_PAGE_SHIFT, PAGE_SHIFT);
+ DEFINE(_PAGE_SIZE, PAGE_SIZE);
+ BLANK();
}
#ifdef CONFIG_32BIT
void output_sc_defines(void)
{
- text("/* Linux sigcontext offsets. */");
- offset("#define SC_REGS ", struct sigcontext, sc_regs);
- offset("#define SC_FPREGS ", struct sigcontext, sc_fpregs);
- offset("#define SC_MDHI ", struct sigcontext, sc_mdhi);
- offset("#define SC_MDLO ", struct sigcontext, sc_mdlo);
- offset("#define SC_PC ", struct sigcontext, sc_pc);
- offset("#define SC_STATUS ", struct sigcontext, sc_status);
- offset("#define SC_FPC_CSR ", struct sigcontext, sc_fpc_csr);
- offset("#define SC_FPC_EIR ", struct sigcontext, sc_fpc_eir);
- offset("#define SC_HI1 ", struct sigcontext, sc_hi1);
- offset("#define SC_LO1 ", struct sigcontext, sc_lo1);
- offset("#define SC_HI2 ", struct sigcontext, sc_hi2);
- offset("#define SC_LO2 ", struct sigcontext, sc_lo2);
- offset("#define SC_HI3 ", struct sigcontext, sc_hi3);
- offset("#define SC_LO3 ", struct sigcontext, sc_lo3);
- linefeed;
+ COMMENT("Linux sigcontext offsets.");
+ OFFSET(SC_REGS, sigcontext, sc_regs);
+ OFFSET(SC_FPREGS, sigcontext, sc_fpregs);
+ OFFSET(SC_ACX, sigcontext, sc_acx);
+ OFFSET(SC_MDHI, sigcontext, sc_mdhi);
+ OFFSET(SC_MDLO, sigcontext, sc_mdlo);
+ OFFSET(SC_PC, sigcontext, sc_pc);
+ OFFSET(SC_FPC_CSR, sigcontext, sc_fpc_csr);
+ OFFSET(SC_FPC_EIR, sigcontext, sc_fpc_eir);
+ OFFSET(SC_HI1, sigcontext, sc_hi1);
+ OFFSET(SC_LO1, sigcontext, sc_lo1);
+ OFFSET(SC_HI2, sigcontext, sc_hi2);
+ OFFSET(SC_LO2, sigcontext, sc_lo2);
+ OFFSET(SC_HI3, sigcontext, sc_hi3);
+ OFFSET(SC_LO3, sigcontext, sc_lo3);
+ BLANK();
}
#endif
#ifdef CONFIG_64BIT
void output_sc_defines(void)
{
- text("/* Linux sigcontext offsets. */");
- offset("#define SC_REGS ", struct sigcontext, sc_regs);
- offset("#define SC_FPREGS ", struct sigcontext, sc_fpregs);
- offset("#define SC_MDHI ", struct sigcontext, sc_mdhi);
- offset("#define SC_MDLO ", struct sigcontext, sc_mdlo);
- offset("#define SC_PC ", struct sigcontext, sc_pc);
- offset("#define SC_FPC_CSR ", struct sigcontext, sc_fpc_csr);
- linefeed;
+ COMMENT("Linux sigcontext offsets.");
+ OFFSET(SC_REGS, sigcontext, sc_regs);
+ OFFSET(SC_FPREGS, sigcontext, sc_fpregs);
+ OFFSET(SC_MDHI, sigcontext, sc_mdhi);
+ OFFSET(SC_MDLO, sigcontext, sc_mdlo);
+ OFFSET(SC_PC, sigcontext, sc_pc);
+ OFFSET(SC_FPC_CSR, sigcontext, sc_fpc_csr);
+ BLANK();
}
#endif
#ifdef CONFIG_MIPS32_COMPAT
void output_sc32_defines(void)
{
- text("/* Linux 32-bit sigcontext offsets. */");
- offset("#define SC32_FPREGS ", struct sigcontext32, sc_fpregs);
- offset("#define SC32_FPC_CSR ", struct sigcontext32, sc_fpc_csr);
- offset("#define SC32_FPC_EIR ", struct sigcontext32, sc_fpc_eir);
- linefeed;
+ COMMENT("Linux 32-bit sigcontext offsets.");
+ OFFSET(SC32_FPREGS, sigcontext32, sc_fpregs);
+ OFFSET(SC32_FPC_CSR, sigcontext32, sc_fpc_csr);
+ OFFSET(SC32_FPC_EIR, sigcontext32, sc_fpc_eir);
+ BLANK();
}
#endif
void output_signal_defined(void)
{
- text("/* Linux signal numbers. */");
- constant("#define _SIGHUP ", SIGHUP);
- constant("#define _SIGINT ", SIGINT);
- constant("#define _SIGQUIT ", SIGQUIT);
- constant("#define _SIGILL ", SIGILL);
- constant("#define _SIGTRAP ", SIGTRAP);
- constant("#define _SIGIOT ", SIGIOT);
- constant("#define _SIGABRT ", SIGABRT);
- constant("#define _SIGEMT ", SIGEMT);
- constant("#define _SIGFPE ", SIGFPE);
- constant("#define _SIGKILL ", SIGKILL);
- constant("#define _SIGBUS ", SIGBUS);
- constant("#define _SIGSEGV ", SIGSEGV);
- constant("#define _SIGSYS ", SIGSYS);
- constant("#define _SIGPIPE ", SIGPIPE);
- constant("#define _SIGALRM ", SIGALRM);
- constant("#define _SIGTERM ", SIGTERM);
- constant("#define _SIGUSR1 ", SIGUSR1);
- constant("#define _SIGUSR2 ", SIGUSR2);
- constant("#define _SIGCHLD ", SIGCHLD);
- constant("#define _SIGPWR ", SIGPWR);
- constant("#define _SIGWINCH ", SIGWINCH);
- constant("#define _SIGURG ", SIGURG);
- constant("#define _SIGIO ", SIGIO);
- constant("#define _SIGSTOP ", SIGSTOP);
- constant("#define _SIGTSTP ", SIGTSTP);
- constant("#define _SIGCONT ", SIGCONT);
- constant("#define _SIGTTIN ", SIGTTIN);
- constant("#define _SIGTTOU ", SIGTTOU);
- constant("#define _SIGVTALRM ", SIGVTALRM);
- constant("#define _SIGPROF ", SIGPROF);
- constant("#define _SIGXCPU ", SIGXCPU);
- constant("#define _SIGXFSZ ", SIGXFSZ);
- linefeed;
+ COMMENT("Linux signal numbers.");
+ DEFINE(_SIGHUP, SIGHUP);
+ DEFINE(_SIGINT, SIGINT);
+ DEFINE(_SIGQUIT, SIGQUIT);
+ DEFINE(_SIGILL, SIGILL);
+ DEFINE(_SIGTRAP, SIGTRAP);
+ DEFINE(_SIGIOT, SIGIOT);
+ DEFINE(_SIGABRT, SIGABRT);
+ DEFINE(_SIGEMT, SIGEMT);
+ DEFINE(_SIGFPE, SIGFPE);
+ DEFINE(_SIGKILL, SIGKILL);
+ DEFINE(_SIGBUS, SIGBUS);
+ DEFINE(_SIGSEGV, SIGSEGV);
+ DEFINE(_SIGSYS, SIGSYS);
+ DEFINE(_SIGPIPE, SIGPIPE);
+ DEFINE(_SIGALRM, SIGALRM);
+ DEFINE(_SIGTERM, SIGTERM);
+ DEFINE(_SIGUSR1, SIGUSR1);
+ DEFINE(_SIGUSR2, SIGUSR2);
+ DEFINE(_SIGCHLD, SIGCHLD);
+ DEFINE(_SIGPWR, SIGPWR);
+ DEFINE(_SIGWINCH, SIGWINCH);
+ DEFINE(_SIGURG, SIGURG);
+ DEFINE(_SIGIO, SIGIO);
+ DEFINE(_SIGSTOP, SIGSTOP);
+ DEFINE(_SIGTSTP, SIGTSTP);
+ DEFINE(_SIGCONT, SIGCONT);
+ DEFINE(_SIGTTIN, SIGTTIN);
+ DEFINE(_SIGTTOU, SIGTTOU);
+ DEFINE(_SIGVTALRM, SIGVTALRM);
+ DEFINE(_SIGPROF, SIGPROF);
+ DEFINE(_SIGXCPU, SIGXCPU);
+ DEFINE(_SIGXFSZ, SIGXFSZ);
+ BLANK();
+}
+
+#ifdef CONFIG_CPU_CAVIUM_OCTEON
+void output_octeon_cop2_state_defines(void)
+{
+ COMMENT("Octeon specific octeon_cop2_state offsets.");
+ OFFSET(OCTEON_CP2_CRC_IV, octeon_cop2_state, cop2_crc_iv);
+ OFFSET(OCTEON_CP2_CRC_LENGTH, octeon_cop2_state, cop2_crc_length);
+ OFFSET(OCTEON_CP2_CRC_POLY, octeon_cop2_state, cop2_crc_poly);
+ OFFSET(OCTEON_CP2_LLM_DAT, octeon_cop2_state, cop2_llm_dat);
+ OFFSET(OCTEON_CP2_3DES_IV, octeon_cop2_state, cop2_3des_iv);
+ OFFSET(OCTEON_CP2_3DES_KEY, octeon_cop2_state, cop2_3des_key);
+ OFFSET(OCTEON_CP2_3DES_RESULT, octeon_cop2_state, cop2_3des_result);
+ OFFSET(OCTEON_CP2_AES_INP0, octeon_cop2_state, cop2_aes_inp0);
+ OFFSET(OCTEON_CP2_AES_IV, octeon_cop2_state, cop2_aes_iv);
+ OFFSET(OCTEON_CP2_AES_KEY, octeon_cop2_state, cop2_aes_key);
+ OFFSET(OCTEON_CP2_AES_KEYLEN, octeon_cop2_state, cop2_aes_keylen);
+ OFFSET(OCTEON_CP2_AES_RESULT, octeon_cop2_state, cop2_aes_result);
+ OFFSET(OCTEON_CP2_GFM_MULT, octeon_cop2_state, cop2_gfm_mult);
+ OFFSET(OCTEON_CP2_GFM_POLY, octeon_cop2_state, cop2_gfm_poly);
+ OFFSET(OCTEON_CP2_GFM_RESULT, octeon_cop2_state, cop2_gfm_result);
+ OFFSET(OCTEON_CP2_HSH_DATW, octeon_cop2_state, cop2_hsh_datw);
+ OFFSET(OCTEON_CP2_HSH_IVW, octeon_cop2_state, cop2_hsh_ivw);
+ OFFSET(THREAD_CP2, task_struct, thread.cp2);
+ OFFSET(THREAD_CVMSEG, task_struct, thread.cvmseg.cvmseg);
+ BLANK();
+}
+#endif
+
+#ifdef CONFIG_HIBERNATION
+void output_pbe_defines(void)
+{
+ COMMENT(" Linux struct pbe offsets. ");
+ OFFSET(PBE_ADDRESS, pbe, address);
+ OFFSET(PBE_ORIG_ADDRESS, pbe, orig_address);
+ OFFSET(PBE_NEXT, pbe, next);
+ DEFINE(PBE_SIZE, sizeof(struct pbe));
+ BLANK();
}
+#endif
-void output_irq_cpustat_t_defines(void)
+#ifdef CONFIG_CPU_PM
+void output_pm_defines(void)
{
- text("/* Linux irq_cpustat_t offsets. */");
- offset("#define IC_SOFTIRQ_PENDING ", irq_cpustat_t, __softirq_pending);
- size("#define IC_IRQ_CPUSTAT_T ", irq_cpustat_t);
- linefeed;
+ COMMENT(" PM offsets. ");
+#ifdef CONFIG_EVA
+ OFFSET(SSS_SEGCTL0, mips_static_suspend_state, segctl[0]);
+ OFFSET(SSS_SEGCTL1, mips_static_suspend_state, segctl[1]);
+ OFFSET(SSS_SEGCTL2, mips_static_suspend_state, segctl[2]);
+#endif
+ OFFSET(SSS_SP, mips_static_suspend_state, sp);
+ BLANK();
}
+#endif
+
+void output_kvm_defines(void)
+{
+ COMMENT(" KVM/MIPS Specfic offsets. ");
+ DEFINE(VCPU_ARCH_SIZE, sizeof(struct kvm_vcpu_arch));
+ OFFSET(VCPU_RUN, kvm_vcpu, run);
+ OFFSET(VCPU_HOST_ARCH, kvm_vcpu, arch);
+
+ OFFSET(VCPU_HOST_EBASE, kvm_vcpu_arch, host_ebase);
+ OFFSET(VCPU_GUEST_EBASE, kvm_vcpu_arch, guest_ebase);
+
+ OFFSET(VCPU_HOST_STACK, kvm_vcpu_arch, host_stack);
+ OFFSET(VCPU_HOST_GP, kvm_vcpu_arch, host_gp);
+
+ OFFSET(VCPU_HOST_CP0_BADVADDR, kvm_vcpu_arch, host_cp0_badvaddr);
+ OFFSET(VCPU_HOST_CP0_CAUSE, kvm_vcpu_arch, host_cp0_cause);
+ OFFSET(VCPU_HOST_EPC, kvm_vcpu_arch, host_cp0_epc);
+ OFFSET(VCPU_HOST_ENTRYHI, kvm_vcpu_arch, host_cp0_entryhi);
+
+ OFFSET(VCPU_GUEST_INST, kvm_vcpu_arch, guest_inst);
+
+ OFFSET(VCPU_R0, kvm_vcpu_arch, gprs[0]);
+ OFFSET(VCPU_R1, kvm_vcpu_arch, gprs[1]);
+ OFFSET(VCPU_R2, kvm_vcpu_arch, gprs[2]);
+ OFFSET(VCPU_R3, kvm_vcpu_arch, gprs[3]);
+ OFFSET(VCPU_R4, kvm_vcpu_arch, gprs[4]);
+ OFFSET(VCPU_R5, kvm_vcpu_arch, gprs[5]);
+ OFFSET(VCPU_R6, kvm_vcpu_arch, gprs[6]);
+ OFFSET(VCPU_R7, kvm_vcpu_arch, gprs[7]);
+ OFFSET(VCPU_R8, kvm_vcpu_arch, gprs[8]);
+ OFFSET(VCPU_R9, kvm_vcpu_arch, gprs[9]);
+ OFFSET(VCPU_R10, kvm_vcpu_arch, gprs[10]);
+ OFFSET(VCPU_R11, kvm_vcpu_arch, gprs[11]);
+ OFFSET(VCPU_R12, kvm_vcpu_arch, gprs[12]);
+ OFFSET(VCPU_R13, kvm_vcpu_arch, gprs[13]);
+ OFFSET(VCPU_R14, kvm_vcpu_arch, gprs[14]);
+ OFFSET(VCPU_R15, kvm_vcpu_arch, gprs[15]);
+ OFFSET(VCPU_R16, kvm_vcpu_arch, gprs[16]);
+ OFFSET(VCPU_R17, kvm_vcpu_arch, gprs[17]);
+ OFFSET(VCPU_R18, kvm_vcpu_arch, gprs[18]);
+ OFFSET(VCPU_R19, kvm_vcpu_arch, gprs[19]);
+ OFFSET(VCPU_R20, kvm_vcpu_arch, gprs[20]);
+ OFFSET(VCPU_R21, kvm_vcpu_arch, gprs[21]);
+ OFFSET(VCPU_R22, kvm_vcpu_arch, gprs[22]);
+ OFFSET(VCPU_R23, kvm_vcpu_arch, gprs[23]);
+ OFFSET(VCPU_R24, kvm_vcpu_arch, gprs[24]);
+ OFFSET(VCPU_R25, kvm_vcpu_arch, gprs[25]);
+ OFFSET(VCPU_R26, kvm_vcpu_arch, gprs[26]);
+ OFFSET(VCPU_R27, kvm_vcpu_arch, gprs[27]);
+ OFFSET(VCPU_R28, kvm_vcpu_arch, gprs[28]);
+ OFFSET(VCPU_R29, kvm_vcpu_arch, gprs[29]);
+ OFFSET(VCPU_R30, kvm_vcpu_arch, gprs[30]);
+ OFFSET(VCPU_R31, kvm_vcpu_arch, gprs[31]);
+ OFFSET(VCPU_LO, kvm_vcpu_arch, lo);
+ OFFSET(VCPU_HI, kvm_vcpu_arch, hi);
+ OFFSET(VCPU_PC, kvm_vcpu_arch, pc);
+ OFFSET(VCPU_COP0, kvm_vcpu_arch, cop0);
+ OFFSET(VCPU_GUEST_KERNEL_ASID, kvm_vcpu_arch, guest_kernel_asid);
+ OFFSET(VCPU_GUEST_USER_ASID, kvm_vcpu_arch, guest_user_asid);
+
+ OFFSET(COP0_TLB_HI, mips_coproc, reg[MIPS_CP0_TLB_HI][0]);
+ OFFSET(COP0_STATUS, mips_coproc, reg[MIPS_CP0_STATUS][0]);
+ BLANK();
+}
+
+#ifdef CONFIG_MIPS_CPS
+void output_cps_defines(void)
+{
+ COMMENT(" MIPS CPS offsets. ");
+
+ OFFSET(COREBOOTCFG_VPEMASK, core_boot_config, vpe_mask);
+ OFFSET(COREBOOTCFG_VPECONFIG, core_boot_config, vpe_config);
+ DEFINE(COREBOOTCFG_SIZE, sizeof(struct core_boot_config));
+
+ OFFSET(VPEBOOTCFG_PC, vpe_boot_config, pc);
+ OFFSET(VPEBOOTCFG_SP, vpe_boot_config, sp);
+ OFFSET(VPEBOOTCFG_GP, vpe_boot_config, gp);
+ DEFINE(VPEBOOTCFG_SIZE, sizeof(struct vpe_boot_config));
+}
+#endif
diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c
index 4a9f1ecefaf..1188e00bb12 100644
--- a/arch/mips/kernel/binfmt_elfn32.c
+++ b/arch/mips/kernel/binfmt_elfn32.c
@@ -6,7 +6,7 @@
*
* Heavily inspired by the 32-bit Sparc compat code which is
* Copyright (C) 1995, 1996, 1997, 1998 David S. Miller (davem@redhat.com)
- * Copyright (C) 1995, 1996, 1997, 1998 Jakub Jelinek (jj@ultra.linux.cz)
+ * Copyright (C) 1995, 1996, 1997, 1998 Jakub Jelinek (jj@ultra.linux.cz)
*/
#define ELF_ARCH EM_MIPS
@@ -48,12 +48,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
#define TASK32_SIZE 0x7fff8000UL
#undef ELF_ET_DYN_BASE
-#define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
+#define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
#include <asm/processor.h>
#include <linux/module.h>
#include <linux/elfcore.h>
#include <linux/compat.h>
+#include <linux/math64.h>
#define elf_prstatus elf_prstatus32
struct elf_prstatus32
@@ -66,8 +67,8 @@ struct elf_prstatus32
pid_t pr_ppid;
pid_t pr_pgrp;
pid_t pr_sid;
- struct compat_timeval pr_utime; /* User time */
- struct compat_timeval pr_stime; /* System time */
+ struct compat_timeval pr_utime; /* User time */
+ struct compat_timeval pr_stime; /* System time */
struct compat_timeval pr_cutime;/* Cumulative user time */
struct compat_timeval pr_cstime;/* Cumulative system time */
elf_gregset_t pr_reg; /* GP registers */
@@ -87,10 +88,9 @@ struct elf_prpsinfo32
pid_t pr_pid, pr_ppid, pr_pgrp, pr_sid;
/* Lots missing */
char pr_fname[16]; /* filename of executable */
- char pr_psargs[ELF_PRARGSZ]; /* initial part of arg list */
+ char pr_psargs[ELF_PRARGSZ]; /* initial part of arg list */
};
-#define elf_addr_t u32
#define elf_caddr_t u32
#define init_elf_binfmt init_elfn32_binfmt
@@ -99,12 +99,12 @@ static __inline__ void
jiffies_to_compat_timeval(unsigned long jiffies, struct compat_timeval *value)
{
/*
- * Convert jiffies to nanoseconds and seperate with
+ * Convert jiffies to nanoseconds and separate with
* one divide.
*/
u64 nsec = (u64)jiffies * TICK_NSEC;
- long rem;
- value->tv_sec = div_long_long_rem(nsec, NSEC_PER_SEC, &rem);
+ u32 rem;
+ value->tv_sec = div_u64_rem(nsec, NSEC_PER_SEC, &rem);
value->tv_usec = rem / NSEC_PER_USEC;
}
@@ -119,4 +119,15 @@ MODULE_AUTHOR("Ralf Baechle (ralf@linux-mips.org)");
#undef TASK_SIZE
#define TASK_SIZE TASK_SIZE32
+#undef cputime_to_timeval
+#define cputime_to_timeval cputime_to_compat_timeval
+static __inline__ void
+cputime_to_compat_timeval(const cputime_t cputime, struct compat_timeval *value)
+{
+ unsigned long jiffies = cputime_to_jiffies(cputime);
+
+ value->tv_usec = (jiffies % HZ) * (1000000L / HZ);
+ value->tv_sec = jiffies / HZ;
+}
+
#include "../../../fs/binfmt_elf.c"
diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
index e3181377989..7faf5f2bee2 100644
--- a/arch/mips/kernel/binfmt_elfo32.c
+++ b/arch/mips/kernel/binfmt_elfo32.c
@@ -6,7 +6,7 @@
*
* Heavily inspired by the 32-bit Sparc compat code which is
* Copyright (C) 1995, 1996, 1997, 1998 David S. Miller (davem@redhat.com)
- * Copyright (C) 1995, 1996, 1997, 1998 Jakub Jelinek (jj@ultra.linux.cz)
+ * Copyright (C) 1995, 1996, 1997, 1998 Jakub Jelinek (jj@ultra.linux.cz)
*/
#define ELF_ARCH EM_MIPS
@@ -28,6 +28,18 @@ typedef double elf_fpreg_t;
typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
/*
+ * In order to be sure that we don't attempt to execute an O32 binary which
+ * requires 64 bit FP (FR=1) on a system which does not support it we refuse
+ * to execute any binary which has bits specified by the following macro set
+ * in its ELF header flags.
+ */
+#ifdef CONFIG_MIPS_O32_FP64_SUPPORT
+# define __MIPS_O32_FP64_MUST_BE_ZERO 0
+#else
+# define __MIPS_O32_FP64_MUST_BE_ZERO EF_MIPS_FP64
+#endif
+
+/*
* This is used to ensure we don't load something for the wrong architecture.
*/
#define elf_check_arch(hdr) \
@@ -44,18 +56,42 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
if (((__h->e_flags & EF_MIPS_ABI) != 0) && \
((__h->e_flags & EF_MIPS_ABI) != EF_MIPS_ABI_O32)) \
__res = 0; \
+ if (__h->e_flags & __MIPS_O32_FP64_MUST_BE_ZERO) \
+ __res = 0; \
\
__res; \
})
+#ifdef CONFIG_KVM_GUEST
+#define TASK32_SIZE 0x3fff8000UL
+#else
#define TASK32_SIZE 0x7fff8000UL
+#endif
#undef ELF_ET_DYN_BASE
-#define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
+#define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
#include <asm/processor.h>
+
+/*
+ * When this file is selected, we are definitely running a 64bit kernel.
+ * So using the right regs define in asm/reg.h
+ */
+#define WANT_COMPAT_REG_H
+
+/* These MUST be defined before elf.h gets included */
+extern void elf32_core_copy_regs(elf_gregset_t grp, struct pt_regs *regs);
+#define ELF_CORE_COPY_REGS(_dest, _regs) elf32_core_copy_regs(_dest, _regs);
+#define ELF_CORE_COPY_TASK_REGS(_tsk, _dest) \
+({ \
+ int __res = 1; \
+ elf32_core_copy_regs(*(_dest), task_pt_regs(_tsk)); \
+ __res; \
+})
+
#include <linux/module.h>
#include <linux/elfcore.h>
#include <linux/compat.h>
+#include <linux/math64.h>
#define elf_prstatus elf_prstatus32
struct elf_prstatus32
@@ -68,8 +104,8 @@ struct elf_prstatus32
pid_t pr_ppid;
pid_t pr_pgrp;
pid_t pr_sid;
- struct compat_timeval pr_utime; /* User time */
- struct compat_timeval pr_stime; /* System time */
+ struct compat_timeval pr_utime; /* User time */
+ struct compat_timeval pr_stime; /* System time */
struct compat_timeval pr_cutime;/* Cumulative user time */
struct compat_timeval pr_cstime;/* Cumulative system time */
elf_gregset_t pr_reg; /* GP registers */
@@ -89,10 +125,9 @@ struct elf_prpsinfo32
pid_t pr_pid, pr_ppid, pr_pgrp, pr_sid;
/* Lots missing */
char pr_fname[16]; /* filename of executable */
- char pr_psargs[ELF_PRARGSZ]; /* initial part of arg list */
+ char pr_psargs[ELF_PRARGSZ]; /* initial part of arg list */
};
-#define elf_addr_t u32
#define elf_caddr_t u32
#define init_elf_binfmt init_elf32_binfmt
@@ -101,18 +136,15 @@ static inline void
jiffies_to_compat_timeval(unsigned long jiffies, struct compat_timeval *value)
{
/*
- * Convert jiffies to nanoseconds and seperate with
+ * Convert jiffies to nanoseconds and separate with
* one divide.
*/
u64 nsec = (u64)jiffies * TICK_NSEC;
- long rem;
- value->tv_sec = div_long_long_rem(nsec, NSEC_PER_SEC, &rem);
+ u32 rem;
+ value->tv_sec = div_u64_rem(nsec, NSEC_PER_SEC, &rem);
value->tv_usec = rem / NSEC_PER_USEC;
}
-#undef ELF_CORE_COPY_REGS
-#define ELF_CORE_COPY_REGS(_dest,_regs) elf32_core_copy_regs(_dest,_regs);
-
void elf32_core_copy_regs(elf_gregset_t grp, struct pt_regs *regs)
{
int i;
@@ -144,4 +176,15 @@ MODULE_AUTHOR("Ralf Baechle (ralf@linux-mips.org)");
#undef TASK_SIZE
#define TASK_SIZE TASK_SIZE32
+#undef cputime_to_timeval
+#define cputime_to_timeval cputime_to_compat_timeval
+static __inline__ void
+cputime_to_compat_timeval(const cputime_t cputime, struct compat_timeval *value)
+{
+ unsigned long jiffies = cputime_to_jiffies(cputime);
+
+ value->tv_usec = (jiffies % HZ) * (1000000L / HZ);
+ value->tv_sec = jiffies / HZ;
+}
+
#include "../../../fs/binfmt_elf.c"
diff --git a/arch/mips/kernel/bmips_vec.S b/arch/mips/kernel/bmips_vec.S
new file mode 100644
index 00000000000..290c23b5167
--- /dev/null
+++ b/arch/mips/kernel/bmips_vec.S
@@ -0,0 +1,285 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2011 by Kevin Cernekee (cernekee@gmail.com)
+ *
+ * Reset/NMI/re-entry vectors for BMIPS processors
+ */
+
+
+#include <asm/asm.h>
+#include <asm/asmmacro.h>
+#include <asm/cacheops.h>
+#include <asm/cpu.h>
+#include <asm/regdef.h>
+#include <asm/mipsregs.h>
+#include <asm/stackframe.h>
+#include <asm/addrspace.h>
+#include <asm/hazards.h>
+#include <asm/bmips.h>
+
+ .macro BARRIER
+ .set mips32
+ _ssnop
+ _ssnop
+ _ssnop
+ .set mips0
+ .endm
+
+/***********************************************************************
+ * Alternate CPU1 startup vector for BMIPS4350
+ *
+ * On some systems the bootloader has already started CPU1 and configured
+ * it to resume execution at 0x8000_0200 (!BEV IV vector) when it is
+ * triggered by the SW1 interrupt. If that is the case we try to move
+ * it to a more convenient place: BMIPS_WARM_RESTART_VEC @ 0x8000_0380.
+ ***********************************************************************/
+
+LEAF(bmips_smp_movevec)
+ la k0, 1f
+ li k1, CKSEG1
+ or k0, k1
+ jr k0
+
+1:
+ /* clear IV, pending IPIs */
+ mtc0 zero, CP0_CAUSE
+
+ /* re-enable IRQs to wait for SW1 */
+ li k0, ST0_IE | ST0_BEV | STATUSF_IP1
+ mtc0 k0, CP0_STATUS
+
+ /* set up CPU1 CBR; move BASE to 0xa000_0000 */
+ li k0, 0xff400000
+ mtc0 k0, $22, 6
+ /* set up relocation vector address based on thread ID */
+ mfc0 k1, $22, 3
+ srl k1, 16
+ andi k1, 0x8000
+ or k1, CKSEG1 | BMIPS_RELO_VECTOR_CONTROL_0
+ or k0, k1
+ li k1, 0xa0080000
+ sw k1, 0(k0)
+
+ /* wait here for SW1 interrupt from bmips_boot_secondary() */
+ wait
+
+ la k0, bmips_reset_nmi_vec
+ li k1, CKSEG1
+ or k0, k1
+ jr k0
+END(bmips_smp_movevec)
+
+/***********************************************************************
+ * Reset/NMI vector
+ * For BMIPS processors that can relocate their exception vectors, this
+ * entire function gets copied to 0x8000_0000.
+ ***********************************************************************/
+
+NESTED(bmips_reset_nmi_vec, PT_SIZE, sp)
+ .set push
+ .set noat
+ .align 4
+
+#ifdef CONFIG_SMP
+ /* if the NMI bit is clear, assume this is a CPU1 reset instead */
+ li k1, (1 << 19)
+ mfc0 k0, CP0_STATUS
+ and k0, k1
+ beqz k0, bmips_smp_entry
+
+#if defined(CONFIG_CPU_BMIPS5000)
+ mfc0 k0, CP0_PRID
+ li k1, PRID_IMP_BMIPS5000
+ andi k0, 0xff00
+ bne k0, k1, 1f
+
+ /* if we're not on core 0, this must be the SMP boot signal */
+ li k1, (3 << 25)
+ mfc0 k0, $22
+ and k0, k1
+ bnez k0, bmips_smp_entry
+1:
+#endif /* CONFIG_CPU_BMIPS5000 */
+#endif /* CONFIG_SMP */
+
+ /* nope, it's just a regular NMI */
+ SAVE_ALL
+ move a0, sp
+
+ /* clear EXL, ERL, BEV so that TLB refills still work */
+ mfc0 k0, CP0_STATUS
+ li k1, ST0_ERL | ST0_EXL | ST0_BEV | ST0_IE
+ or k0, k1
+ xor k0, k1
+ mtc0 k0, CP0_STATUS
+ BARRIER
+
+ /* jump to the NMI handler function */
+ la k0, nmi_handler
+ jr k0
+
+ RESTORE_ALL
+ .set arch=r4000
+ eret
+
+/***********************************************************************
+ * CPU1 reset vector (used for the initial boot only)
+ * This is still part of bmips_reset_nmi_vec().
+ ***********************************************************************/
+
+#ifdef CONFIG_SMP
+
+bmips_smp_entry:
+
+ /* set up CP0 STATUS; enable FPU */
+ li k0, 0x30000000
+ mtc0 k0, CP0_STATUS
+ BARRIER
+
+ /* set local CP0 CONFIG to make kseg0 cacheable, write-back */
+ mfc0 k0, CP0_CONFIG
+ ori k0, 0x07
+ xori k0, 0x04
+ mtc0 k0, CP0_CONFIG
+
+ mfc0 k0, CP0_PRID
+ andi k0, 0xff00
+#if defined(CONFIG_CPU_BMIPS4350) || defined(CONFIG_CPU_BMIPS4380)
+ li k1, PRID_IMP_BMIPS43XX
+ bne k0, k1, 2f
+
+ /* initialize CPU1's local I-cache */
+ li k0, 0x80000000
+ li k1, 0x80010000
+ mtc0 zero, $28
+ mtc0 zero, $28, 1
+ BARRIER
+
+1: cache Index_Store_Tag_I, 0(k0)
+ addiu k0, 16
+ bne k0, k1, 1b
+
+ b 3f
+2:
+#endif /* CONFIG_CPU_BMIPS4350 || CONFIG_CPU_BMIPS4380 */
+#if defined(CONFIG_CPU_BMIPS5000)
+ /* set exception vector base */
+ li k1, PRID_IMP_BMIPS5000
+ bne k0, k1, 3f
+
+ la k0, ebase
+ lw k0, 0(k0)
+ mtc0 k0, $15, 1
+ BARRIER
+#endif /* CONFIG_CPU_BMIPS5000 */
+3:
+ /* jump back to kseg0 in case we need to remap the kseg1 area */
+ la k0, 1f
+ jr k0
+1:
+ la k0, bmips_enable_xks01
+ jalr k0
+
+ /* use temporary stack to set up upper memory TLB */
+ li sp, BMIPS_WARM_RESTART_VEC
+ la k0, plat_wired_tlb_setup
+ jalr k0
+
+ /* switch to permanent stack and continue booting */
+
+ .global bmips_secondary_reentry
+bmips_secondary_reentry:
+ la k0, bmips_smp_boot_sp
+ lw sp, 0(k0)
+ la k0, bmips_smp_boot_gp
+ lw gp, 0(k0)
+ la k0, start_secondary
+ jr k0
+
+#endif /* CONFIG_SMP */
+
+ .align 4
+ .global bmips_reset_nmi_vec_end
+bmips_reset_nmi_vec_end:
+
+END(bmips_reset_nmi_vec)
+
+ .set pop
+ .previous
+
+/***********************************************************************
+ * CPU1 warm restart vector (used for second and subsequent boots).
+ * Also used for S2 standby recovery (PM).
+ * This entire function gets copied to (BMIPS_WARM_RESTART_VEC)
+ ***********************************************************************/
+
+LEAF(bmips_smp_int_vec)
+
+ .align 4
+ mfc0 k0, CP0_STATUS
+ ori k0, 0x01
+ xori k0, 0x01
+ mtc0 k0, CP0_STATUS
+ eret
+
+ .align 4
+ .global bmips_smp_int_vec_end
+bmips_smp_int_vec_end:
+
+END(bmips_smp_int_vec)
+
+/***********************************************************************
+ * XKS01 support
+ * Certain CPUs support extending kseg0 to 1024MB.
+ ***********************************************************************/
+
+LEAF(bmips_enable_xks01)
+
+#if defined(CONFIG_XKS01)
+ mfc0 t0, CP0_PRID
+ andi t2, t0, 0xff00
+#if defined(CONFIG_CPU_BMIPS4380)
+ li t1, PRID_IMP_BMIPS43XX
+ bne t2, t1, 1f
+
+ andi t0, 0xff
+ addiu t1, t0, -PRID_REV_BMIPS4380_HI
+ bgtz t1, 2f
+ addiu t0, -PRID_REV_BMIPS4380_LO
+ bltz t0, 2f
+
+ mfc0 t0, $22, 3
+ li t1, 0x1ff0
+ li t2, (1 << 12) | (1 << 9)
+ or t0, t1
+ xor t0, t1
+ or t0, t2
+ mtc0 t0, $22, 3
+ BARRIER
+ b 2f
+1:
+#endif /* CONFIG_CPU_BMIPS4380 */
+#if defined(CONFIG_CPU_BMIPS5000)
+ li t1, PRID_IMP_BMIPS5000
+ bne t2, t1, 2f
+
+ mfc0 t0, $22, 5
+ li t1, 0x01ff
+ li t2, (1 << 8) | (1 << 5)
+ or t0, t1
+ xor t0, t1
+ or t0, t2
+ mtc0 t0, $22, 5
+ BARRIER
+#endif /* CONFIG_CPU_BMIPS5000 */
+2:
+#endif /* defined(CONFIG_XKS01) */
+
+ jr ra
+
+END(bmips_enable_xks01)
+
+ .previous
diff --git a/arch/mips/kernel/branch.c b/arch/mips/kernel/branch.c
index 76fd3f22c76..7b2df224f04 100644
--- a/arch/mips/kernel/branch.c
+++ b/arch/mips/kernel/branch.c
@@ -9,37 +9,404 @@
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/signal.h>
+#include <linux/module.h>
#include <asm/branch.h>
#include <asm/cpu.h>
#include <asm/cpu-features.h>
#include <asm/fpu.h>
+#include <asm/fpu_emulator.h>
#include <asm/inst.h>
#include <asm/ptrace.h>
#include <asm/uaccess.h>
/*
- * Compute the return address and do emulate branch simulation, if required.
+ * Calculate and return exception PC in case of branch delay slot
+ * for microMIPS and MIPS16e. It does not clear the ISA mode bit.
*/
-int __compute_return_epc(struct pt_regs *regs)
+int __isa_exception_epc(struct pt_regs *regs)
+{
+ unsigned short inst;
+ long epc = regs->cp0_epc;
+
+ /* Calculate exception PC in branch delay slot. */
+ if (__get_user(inst, (u16 __user *) msk_isa16_mode(epc))) {
+ /* This should never happen because delay slot was checked. */
+ force_sig(SIGSEGV, current);
+ return epc;
+ }
+ if (cpu_has_mips16) {
+ if (((union mips16e_instruction)inst).ri.opcode
+ == MIPS16e_jal_op)
+ epc += 4;
+ else
+ epc += 2;
+ } else if (mm_insn_16bit(inst))
+ epc += 2;
+ else
+ epc += 4;
+
+ return epc;
+}
+
+/* (microMIPS) Convert 16-bit register encoding to 32-bit register encoding. */
+static const unsigned int reg16to32map[8] = {16, 17, 2, 3, 4, 5, 6, 7};
+
+int __mm_isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
+ unsigned long *contpc)
{
- unsigned int *addr, bit, fcr31, dspcontrol;
+ union mips_instruction insn = (union mips_instruction)dec_insn.insn;
+ int bc_false = 0;
+ unsigned int fcr31;
+ unsigned int bit;
+
+ if (!cpu_has_mmips)
+ return 0;
+
+ switch (insn.mm_i_format.opcode) {
+ case mm_pool32a_op:
+ if ((insn.mm_i_format.simmediate & MM_POOL32A_MINOR_MASK) ==
+ mm_pool32axf_op) {
+ switch (insn.mm_i_format.simmediate >>
+ MM_POOL32A_MINOR_SHIFT) {
+ case mm_jalr_op:
+ case mm_jalrhb_op:
+ case mm_jalrs_op:
+ case mm_jalrshb_op:
+ if (insn.mm_i_format.rt != 0) /* Not mm_jr */
+ regs->regs[insn.mm_i_format.rt] =
+ regs->cp0_epc +
+ dec_insn.pc_inc +
+ dec_insn.next_pc_inc;
+ *contpc = regs->regs[insn.mm_i_format.rs];
+ return 1;
+ }
+ }
+ break;
+ case mm_pool32i_op:
+ switch (insn.mm_i_format.rt) {
+ case mm_bltzals_op:
+ case mm_bltzal_op:
+ regs->regs[31] = regs->cp0_epc +
+ dec_insn.pc_inc +
+ dec_insn.next_pc_inc;
+ /* Fall through */
+ case mm_bltz_op:
+ if ((long)regs->regs[insn.mm_i_format.rs] < 0)
+ *contpc = regs->cp0_epc +
+ dec_insn.pc_inc +
+ (insn.mm_i_format.simmediate << 1);
+ else
+ *contpc = regs->cp0_epc +
+ dec_insn.pc_inc +
+ dec_insn.next_pc_inc;
+ return 1;
+ case mm_bgezals_op:
+ case mm_bgezal_op:
+ regs->regs[31] = regs->cp0_epc +
+ dec_insn.pc_inc +
+ dec_insn.next_pc_inc;
+ /* Fall through */
+ case mm_bgez_op:
+ if ((long)regs->regs[insn.mm_i_format.rs] >= 0)
+ *contpc = regs->cp0_epc +
+ dec_insn.pc_inc +
+ (insn.mm_i_format.simmediate << 1);
+ else
+ *contpc = regs->cp0_epc +
+ dec_insn.pc_inc +
+ dec_insn.next_pc_inc;
+ return 1;
+ case mm_blez_op:
+ if ((long)regs->regs[insn.mm_i_format.rs] <= 0)
+ *contpc = regs->cp0_epc +
+ dec_insn.pc_inc +
+ (insn.mm_i_format.simmediate << 1);
+ else
+ *contpc = regs->cp0_epc +
+ dec_insn.pc_inc +
+ dec_insn.next_pc_inc;
+ return 1;
+ case mm_bgtz_op:
+ if ((long)regs->regs[insn.mm_i_format.rs] <= 0)
+ *contpc = regs->cp0_epc +
+ dec_insn.pc_inc +
+ (insn.mm_i_format.simmediate << 1);
+ else
+ *contpc = regs->cp0_epc +
+ dec_insn.pc_inc +
+ dec_insn.next_pc_inc;
+ return 1;
+ case mm_bc2f_op:
+ case mm_bc1f_op:
+ bc_false = 1;
+ /* Fall through */
+ case mm_bc2t_op:
+ case mm_bc1t_op:
+ preempt_disable();
+ if (is_fpu_owner())
+ asm volatile("cfc1\t%0,$31" : "=r" (fcr31));
+ else
+ fcr31 = current->thread.fpu.fcr31;
+ preempt_enable();
+
+ if (bc_false)
+ fcr31 = ~fcr31;
+
+ bit = (insn.mm_i_format.rs >> 2);
+ bit += (bit != 0);
+ bit += 23;
+ if (fcr31 & (1 << bit))
+ *contpc = regs->cp0_epc +
+ dec_insn.pc_inc +
+ (insn.mm_i_format.simmediate << 1);
+ else
+ *contpc = regs->cp0_epc +
+ dec_insn.pc_inc + dec_insn.next_pc_inc;
+ return 1;
+ }
+ break;
+ case mm_pool16c_op:
+ switch (insn.mm_i_format.rt) {
+ case mm_jalr16_op:
+ case mm_jalrs16_op:
+ regs->regs[31] = regs->cp0_epc +
+ dec_insn.pc_inc + dec_insn.next_pc_inc;
+ /* Fall through */
+ case mm_jr16_op:
+ *contpc = regs->regs[insn.mm_i_format.rs];
+ return 1;
+ }
+ break;
+ case mm_beqz16_op:
+ if ((long)regs->regs[reg16to32map[insn.mm_b1_format.rs]] == 0)
+ *contpc = regs->cp0_epc +
+ dec_insn.pc_inc +
+ (insn.mm_b1_format.simmediate << 1);
+ else
+ *contpc = regs->cp0_epc +
+ dec_insn.pc_inc + dec_insn.next_pc_inc;
+ return 1;
+ case mm_bnez16_op:
+ if ((long)regs->regs[reg16to32map[insn.mm_b1_format.rs]] != 0)
+ *contpc = regs->cp0_epc +
+ dec_insn.pc_inc +
+ (insn.mm_b1_format.simmediate << 1);
+ else
+ *contpc = regs->cp0_epc +
+ dec_insn.pc_inc + dec_insn.next_pc_inc;
+ return 1;
+ case mm_b16_op:
+ *contpc = regs->cp0_epc + dec_insn.pc_inc +
+ (insn.mm_b0_format.simmediate << 1);
+ return 1;
+ case mm_beq32_op:
+ if (regs->regs[insn.mm_i_format.rs] ==
+ regs->regs[insn.mm_i_format.rt])
+ *contpc = regs->cp0_epc +
+ dec_insn.pc_inc +
+ (insn.mm_i_format.simmediate << 1);
+ else
+ *contpc = regs->cp0_epc +
+ dec_insn.pc_inc +
+ dec_insn.next_pc_inc;
+ return 1;
+ case mm_bne32_op:
+ if (regs->regs[insn.mm_i_format.rs] !=
+ regs->regs[insn.mm_i_format.rt])
+ *contpc = regs->cp0_epc +
+ dec_insn.pc_inc +
+ (insn.mm_i_format.simmediate << 1);
+ else
+ *contpc = regs->cp0_epc +
+ dec_insn.pc_inc + dec_insn.next_pc_inc;
+ return 1;
+ case mm_jalx32_op:
+ regs->regs[31] = regs->cp0_epc +
+ dec_insn.pc_inc + dec_insn.next_pc_inc;
+ *contpc = regs->cp0_epc + dec_insn.pc_inc;
+ *contpc >>= 28;
+ *contpc <<= 28;
+ *contpc |= (insn.j_format.target << 2);
+ return 1;
+ case mm_jals32_op:
+ case mm_jal32_op:
+ regs->regs[31] = regs->cp0_epc +
+ dec_insn.pc_inc + dec_insn.next_pc_inc;
+ /* Fall through */
+ case mm_j32_op:
+ *contpc = regs->cp0_epc + dec_insn.pc_inc;
+ *contpc >>= 27;
+ *contpc <<= 27;
+ *contpc |= (insn.j_format.target << 1);
+ set_isa16_mode(*contpc);
+ return 1;
+ }
+ return 0;
+}
+
+/*
+ * Compute return address and emulate branch in microMIPS mode after an
+ * exception only. It does not handle compact branches/jumps and cannot
+ * be used in interrupt context. (Compact branches/jumps do not cause
+ * exceptions.)
+ */
+int __microMIPS_compute_return_epc(struct pt_regs *regs)
+{
+ u16 __user *pc16;
+ u16 halfword;
+ unsigned int word;
+ unsigned long contpc;
+ struct mm_decoded_insn mminsn = { 0 };
+
+ mminsn.micro_mips_mode = 1;
+
+ /* This load never faults. */
+ pc16 = (unsigned short __user *)msk_isa16_mode(regs->cp0_epc);
+ __get_user(halfword, pc16);
+ pc16++;
+ contpc = regs->cp0_epc + 2;
+ word = ((unsigned int)halfword << 16);
+ mminsn.pc_inc = 2;
+
+ if (!mm_insn_16bit(halfword)) {
+ __get_user(halfword, pc16);
+ pc16++;
+ contpc = regs->cp0_epc + 4;
+ mminsn.pc_inc = 4;
+ word |= halfword;
+ }
+ mminsn.insn = word;
+
+ if (get_user(halfword, pc16))
+ goto sigsegv;
+ mminsn.next_pc_inc = 2;
+ word = ((unsigned int)halfword << 16);
+
+ if (!mm_insn_16bit(halfword)) {
+ pc16++;
+ if (get_user(halfword, pc16))
+ goto sigsegv;
+ mminsn.next_pc_inc = 4;
+ word |= halfword;
+ }
+ mminsn.next_insn = word;
+
+ mm_isBranchInstr(regs, mminsn, &contpc);
+
+ regs->cp0_epc = contpc;
+
+ return 0;
+
+sigsegv:
+ force_sig(SIGSEGV, current);
+ return -EFAULT;
+}
+
+/*
+ * Compute return address and emulate branch in MIPS16e mode after an
+ * exception only. It does not handle compact branches/jumps and cannot
+ * be used in interrupt context. (Compact branches/jumps do not cause
+ * exceptions.)
+ */
+int __MIPS16e_compute_return_epc(struct pt_regs *regs)
+{
+ u16 __user *addr;
+ union mips16e_instruction inst;
+ u16 inst2;
+ u32 fullinst;
long epc;
- union mips_instruction insn;
epc = regs->cp0_epc;
- if (epc & 3)
- goto unaligned;
- /*
- * Read the instruction
- */
- addr = (unsigned int *) epc;
- if (__get_user(insn.word, addr)) {
+ /* Read the instruction. */
+ addr = (u16 __user *)msk_isa16_mode(epc);
+ if (__get_user(inst.full, addr)) {
force_sig(SIGSEGV, current);
return -EFAULT;
}
- regs->regs[0] = 0;
+ switch (inst.ri.opcode) {
+ case MIPS16e_extend_op:
+ regs->cp0_epc += 4;
+ return 0;
+
+ /*
+ * JAL and JALX in MIPS16e mode
+ */
+ case MIPS16e_jal_op:
+ addr += 1;
+ if (__get_user(inst2, addr)) {
+ force_sig(SIGSEGV, current);
+ return -EFAULT;
+ }
+ fullinst = ((unsigned)inst.full << 16) | inst2;
+ regs->regs[31] = epc + 6;
+ epc += 4;
+ epc >>= 28;
+ epc <<= 28;
+ /*
+ * JAL:5 X:1 TARGET[20-16]:5 TARGET[25:21]:5 TARGET[15:0]:16
+ *
+ * ......TARGET[15:0].................TARGET[20:16]...........
+ * ......TARGET[25:21]
+ */
+ epc |=
+ ((fullinst & 0xffff) << 2) | ((fullinst & 0x3e00000) >> 3) |
+ ((fullinst & 0x1f0000) << 7);
+ if (!inst.jal.x)
+ set_isa16_mode(epc); /* Set ISA mode bit. */
+ regs->cp0_epc = epc;
+ return 0;
+
+ /*
+ * J(AL)R(C)
+ */
+ case MIPS16e_rr_op:
+ if (inst.rr.func == MIPS16e_jr_func) {
+
+ if (inst.rr.ra)
+ regs->cp0_epc = regs->regs[31];
+ else
+ regs->cp0_epc =
+ regs->regs[reg16to32[inst.rr.rx]];
+
+ if (inst.rr.l) {
+ if (inst.rr.nd)
+ regs->regs[31] = epc + 2;
+ else
+ regs->regs[31] = epc + 4;
+ }
+ return 0;
+ }
+ break;
+ }
+
+ /*
+ * All other cases have no branch delay slot and are 16-bits.
+ * Branches do not cause an exception.
+ */
+ regs->cp0_epc += 2;
+
+ return 0;
+}
+
+/**
+ * __compute_return_epc_for_insn - Computes the return address and do emulate
+ * branch simulation, if required.
+ *
+ * @regs: Pointer to pt_regs
+ * @insn: branch instruction to decode
+ * @returns: -EFAULT on error and forces SIGBUS, and on success
+ * returns 0 or BRANCH_LIKELY_TAKEN as appropriate after
+ * evaluating the branch.
+ */
+int __compute_return_epc_for_insn(struct pt_regs *regs,
+ union mips_instruction insn)
+{
+ unsigned int bit, fcr31, dspcontrol;
+ long epc = regs->cp0_epc;
+ int ret = 0;
+
switch (insn.i_format.opcode) {
/*
* jr and jalr are in r_format format.
@@ -62,20 +429,24 @@ int __compute_return_epc(struct pt_regs *regs)
*/
case bcond_op:
switch (insn.i_format.rt) {
- case bltz_op:
+ case bltz_op:
case bltzl_op:
- if ((long)regs->regs[insn.i_format.rs] < 0)
+ if ((long)regs->regs[insn.i_format.rs] < 0) {
epc = epc + 4 + (insn.i_format.simmediate << 2);
- else
+ if (insn.i_format.rt == bltzl_op)
+ ret = BRANCH_LIKELY_TAKEN;
+ } else
epc += 8;
regs->cp0_epc = epc;
break;
case bgez_op:
case bgezl_op:
- if ((long)regs->regs[insn.i_format.rs] >= 0)
+ if ((long)regs->regs[insn.i_format.rs] >= 0) {
epc = epc + 4 + (insn.i_format.simmediate << 2);
- else
+ if (insn.i_format.rt == bgezl_op)
+ ret = BRANCH_LIKELY_TAKEN;
+ } else
epc += 8;
regs->cp0_epc = epc;
break;
@@ -83,9 +454,11 @@ int __compute_return_epc(struct pt_regs *regs)
case bltzal_op:
case bltzall_op:
regs->regs[31] = epc + 8;
- if ((long)regs->regs[insn.i_format.rs] < 0)
+ if ((long)regs->regs[insn.i_format.rs] < 0) {
epc = epc + 4 + (insn.i_format.simmediate << 2);
- else
+ if (insn.i_format.rt == bltzall_op)
+ ret = BRANCH_LIKELY_TAKEN;
+ } else
epc += 8;
regs->cp0_epc = epc;
break;
@@ -93,12 +466,15 @@ int __compute_return_epc(struct pt_regs *regs)
case bgezal_op:
case bgezall_op:
regs->regs[31] = epc + 8;
- if ((long)regs->regs[insn.i_format.rs] >= 0)
+ if ((long)regs->regs[insn.i_format.rs] >= 0) {
epc = epc + 4 + (insn.i_format.simmediate << 2);
- else
+ if (insn.i_format.rt == bgezall_op)
+ ret = BRANCH_LIKELY_TAKEN;
+ } else
epc += 8;
regs->cp0_epc = epc;
break;
+
case bposge32_op:
if (!cpu_has_dsp)
goto sigill;
@@ -125,6 +501,8 @@ int __compute_return_epc(struct pt_regs *regs)
epc <<= 28;
epc |= (insn.j_format.target << 2);
regs->cp0_epc = epc;
+ if (insn.i_format.opcode == jalx_op)
+ set_isa16_mode(regs->cp0_epc);
break;
/*
@@ -133,9 +511,11 @@ int __compute_return_epc(struct pt_regs *regs)
case beq_op:
case beql_op:
if (regs->regs[insn.i_format.rs] ==
- regs->regs[insn.i_format.rt])
+ regs->regs[insn.i_format.rt]) {
epc = epc + 4 + (insn.i_format.simmediate << 2);
- else
+ if (insn.i_format.opcode == beql_op)
+ ret = BRANCH_LIKELY_TAKEN;
+ } else
epc += 8;
regs->cp0_epc = epc;
break;
@@ -143,9 +523,11 @@ int __compute_return_epc(struct pt_regs *regs)
case bne_op:
case bnel_op:
if (regs->regs[insn.i_format.rs] !=
- regs->regs[insn.i_format.rt])
+ regs->regs[insn.i_format.rt]) {
epc = epc + 4 + (insn.i_format.simmediate << 2);
- else
+ if (insn.i_format.opcode == bnel_op)
+ ret = BRANCH_LIKELY_TAKEN;
+ } else
epc += 8;
regs->cp0_epc = epc;
break;
@@ -153,9 +535,11 @@ int __compute_return_epc(struct pt_regs *regs)
case blez_op: /* not really i_format */
case blezl_op:
/* rt field assumed to be zero */
- if ((long)regs->regs[insn.i_format.rs] <= 0)
+ if ((long)regs->regs[insn.i_format.rs] <= 0) {
epc = epc + 4 + (insn.i_format.simmediate << 2);
- else
+ if (insn.i_format.opcode == blezl_op)
+ ret = BRANCH_LIKELY_TAKEN;
+ } else
epc += 8;
regs->cp0_epc = epc;
break;
@@ -163,9 +547,11 @@ int __compute_return_epc(struct pt_regs *regs)
case bgtz_op:
case bgtzl_op:
/* rt field assumed to be zero */
- if ((long)regs->regs[insn.i_format.rs] > 0)
+ if ((long)regs->regs[insn.i_format.rs] > 0) {
epc = epc + 4 + (insn.i_format.simmediate << 2);
- else
+ if (insn.i_format.opcode == bgtzl_op)
+ ret = BRANCH_LIKELY_TAKEN;
+ } else
epc += 8;
regs->cp0_epc = epc;
break;
@@ -176,7 +562,11 @@ int __compute_return_epc(struct pt_regs *regs)
case cop1_op:
preempt_disable();
if (is_fpu_owner())
- asm volatile("cfc1\t%0,$31" : "=r" (fcr31));
+ asm volatile(
+ ".set push\n"
+ "\t.set mips1\n"
+ "\tcfc1\t%0,$31\n"
+ "\t.set pop" : "=r" (fcr31));
else
fcr31 = current->thread.fpu.fcr31;
preempt_enable();
@@ -185,36 +575,96 @@ int __compute_return_epc(struct pt_regs *regs)
bit += (bit != 0);
bit += 23;
switch (insn.i_format.rt & 3) {
- case 0: /* bc1f */
- case 2: /* bc1fl */
- if (~fcr31 & (1 << bit))
+ case 0: /* bc1f */
+ case 2: /* bc1fl */
+ if (~fcr31 & (1 << bit)) {
epc = epc + 4 + (insn.i_format.simmediate << 2);
- else
+ if (insn.i_format.rt == 2)
+ ret = BRANCH_LIKELY_TAKEN;
+ } else
epc += 8;
regs->cp0_epc = epc;
break;
- case 1: /* bc1t */
- case 3: /* bc1tl */
- if (fcr31 & (1 << bit))
+ case 1: /* bc1t */
+ case 3: /* bc1tl */
+ if (fcr31 & (1 << bit)) {
epc = epc + 4 + (insn.i_format.simmediate << 2);
- else
+ if (insn.i_format.rt == 3)
+ ret = BRANCH_LIKELY_TAKEN;
+ } else
epc += 8;
regs->cp0_epc = epc;
break;
}
break;
+#ifdef CONFIG_CPU_CAVIUM_OCTEON
+ case lwc2_op: /* This is bbit0 on Octeon */
+ if ((regs->regs[insn.i_format.rs] & (1ull<<insn.i_format.rt))
+ == 0)
+ epc = epc + 4 + (insn.i_format.simmediate << 2);
+ else
+ epc += 8;
+ regs->cp0_epc = epc;
+ break;
+ case ldc2_op: /* This is bbit032 on Octeon */
+ if ((regs->regs[insn.i_format.rs] &
+ (1ull<<(insn.i_format.rt+32))) == 0)
+ epc = epc + 4 + (insn.i_format.simmediate << 2);
+ else
+ epc += 8;
+ regs->cp0_epc = epc;
+ break;
+ case swc2_op: /* This is bbit1 on Octeon */
+ if (regs->regs[insn.i_format.rs] & (1ull<<insn.i_format.rt))
+ epc = epc + 4 + (insn.i_format.simmediate << 2);
+ else
+ epc += 8;
+ regs->cp0_epc = epc;
+ break;
+ case sdc2_op: /* This is bbit132 on Octeon */
+ if (regs->regs[insn.i_format.rs] &
+ (1ull<<(insn.i_format.rt+32)))
+ epc = epc + 4 + (insn.i_format.simmediate << 2);
+ else
+ epc += 8;
+ regs->cp0_epc = epc;
+ break;
+#endif
}
- return 0;
+ return ret;
-unaligned:
- printk("%s: unaligned epc - sending SIGBUS.\n", current->comm);
+sigill:
+ printk("%s: DSP branch but not DSP ASE - sending SIGBUS.\n", current->comm);
force_sig(SIGBUS, current);
return -EFAULT;
+}
+EXPORT_SYMBOL_GPL(__compute_return_epc_for_insn);
-sigill:
- printk("%s: DSP branch but not DSP ASE - sending SIGBUS.\n", current->comm);
+int __compute_return_epc(struct pt_regs *regs)
+{
+ unsigned int __user *addr;
+ long epc;
+ union mips_instruction insn;
+
+ epc = regs->cp0_epc;
+ if (epc & 3)
+ goto unaligned;
+
+ /*
+ * Read the instruction
+ */
+ addr = (unsigned int __user *) epc;
+ if (__get_user(insn.word, addr)) {
+ force_sig(SIGSEGV, current);
+ return -EFAULT;
+ }
+
+ return __compute_return_epc_for_insn(regs, insn);
+
+unaligned:
+ printk("%s: unaligned epc - sending SIGBUS.\n", current->comm);
force_sig(SIGBUS, current);
return -EFAULT;
}
diff --git a/arch/mips/kernel/cevt-bcm1480.c b/arch/mips/kernel/cevt-bcm1480.c
new file mode 100644
index 00000000000..7976457184b
--- /dev/null
+++ b/arch/mips/kernel/cevt-bcm1480.c
@@ -0,0 +1,154 @@
+/*
+ * Copyright (C) 2000,2001,2004 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <linux/clockchips.h>
+#include <linux/interrupt.h>
+#include <linux/percpu.h>
+#include <linux/smp.h>
+#include <linux/irq.h>
+
+#include <asm/addrspace.h>
+#include <asm/io.h>
+#include <asm/time.h>
+
+#include <asm/sibyte/bcm1480_regs.h>
+#include <asm/sibyte/sb1250_regs.h>
+#include <asm/sibyte/bcm1480_int.h>
+#include <asm/sibyte/bcm1480_scd.h>
+
+#include <asm/sibyte/sb1250.h>
+
+#define IMR_IP2_VAL K_BCM1480_INT_MAP_I0
+#define IMR_IP3_VAL K_BCM1480_INT_MAP_I1
+#define IMR_IP4_VAL K_BCM1480_INT_MAP_I2
+
+/*
+ * The general purpose timer ticks at 1MHz independent if
+ * the rest of the system
+ */
+static void sibyte_set_mode(enum clock_event_mode mode,
+ struct clock_event_device *evt)
+{
+ unsigned int cpu = smp_processor_id();
+ void __iomem *cfg, *init;
+
+ cfg = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_CFG));
+ init = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_INIT));
+
+ switch (mode) {
+ case CLOCK_EVT_MODE_PERIODIC:
+ __raw_writeq(0, cfg);
+ __raw_writeq((V_SCD_TIMER_FREQ / HZ) - 1, init);
+ __raw_writeq(M_SCD_TIMER_ENABLE | M_SCD_TIMER_MODE_CONTINUOUS,
+ cfg);
+ break;
+
+ case CLOCK_EVT_MODE_ONESHOT:
+ /* Stop the timer until we actually program a shot */
+ case CLOCK_EVT_MODE_SHUTDOWN:
+ __raw_writeq(0, cfg);
+ break;
+
+ case CLOCK_EVT_MODE_UNUSED: /* shuddup gcc */
+ case CLOCK_EVT_MODE_RESUME:
+ ;
+ }
+}
+
+static int sibyte_next_event(unsigned long delta, struct clock_event_device *cd)
+{
+ unsigned int cpu = smp_processor_id();
+ void __iomem *cfg, *init;
+
+ cfg = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_CFG));
+ init = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_INIT));
+
+ __raw_writeq(0, cfg);
+ __raw_writeq(delta - 1, init);
+ __raw_writeq(M_SCD_TIMER_ENABLE, cfg);
+
+ return 0;
+}
+
+static irqreturn_t sibyte_counter_handler(int irq, void *dev_id)
+{
+ unsigned int cpu = smp_processor_id();
+ struct clock_event_device *cd = dev_id;
+ void __iomem *cfg;
+ unsigned long tmode;
+
+ if (cd->mode == CLOCK_EVT_MODE_PERIODIC)
+ tmode = M_SCD_TIMER_ENABLE | M_SCD_TIMER_MODE_CONTINUOUS;
+ else
+ tmode = 0;
+
+ /* ACK interrupt */
+ cfg = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_CFG));
+ ____raw_writeq(tmode, cfg);
+
+ cd->event_handler(cd);
+
+ return IRQ_HANDLED;
+}
+
+static DEFINE_PER_CPU(struct clock_event_device, sibyte_hpt_clockevent);
+static DEFINE_PER_CPU(struct irqaction, sibyte_hpt_irqaction);
+static DEFINE_PER_CPU(char [18], sibyte_hpt_name);
+
+void sb1480_clockevent_init(void)
+{
+ unsigned int cpu = smp_processor_id();
+ unsigned int irq = K_BCM1480_INT_TIMER_0 + cpu;
+ struct irqaction *action = &per_cpu(sibyte_hpt_irqaction, cpu);
+ struct clock_event_device *cd = &per_cpu(sibyte_hpt_clockevent, cpu);
+ unsigned char *name = per_cpu(sibyte_hpt_name, cpu);
+
+ BUG_ON(cpu > 3); /* Only have 4 general purpose timers */
+
+ sprintf(name, "bcm1480-counter-%d", cpu);
+ cd->name = name;
+ cd->features = CLOCK_EVT_FEAT_PERIODIC |
+ CLOCK_EVT_FEAT_ONESHOT;
+ clockevent_set_clock(cd, V_SCD_TIMER_FREQ);
+ cd->max_delta_ns = clockevent_delta2ns(0x7fffff, cd);
+ cd->min_delta_ns = clockevent_delta2ns(2, cd);
+ cd->rating = 200;
+ cd->irq = irq;
+ cd->cpumask = cpumask_of(cpu);
+ cd->set_next_event = sibyte_next_event;
+ cd->set_mode = sibyte_set_mode;
+ clockevents_register_device(cd);
+
+ bcm1480_mask_irq(cpu, irq);
+
+ /*
+ * Map the timer interrupt to IP[4] of this cpu
+ */
+ __raw_writeq(IMR_IP4_VAL,
+ IOADDR(A_BCM1480_IMR_REGISTER(cpu,
+ R_BCM1480_IMR_INTERRUPT_MAP_BASE_H) + (irq << 3)));
+
+ bcm1480_unmask_irq(cpu, irq);
+
+ action->handler = sibyte_counter_handler;
+ action->flags = IRQF_PERCPU | IRQF_TIMER;
+ action->name = name;
+ action->dev_id = cd;
+
+ irq_set_affinity(irq, cpumask_of(cpu));
+ setup_irq(irq, action);
+}
diff --git a/arch/mips/kernel/cevt-ds1287.c b/arch/mips/kernel/cevt-ds1287.c
new file mode 100644
index 00000000000..ff1f01b7227
--- /dev/null
+++ b/arch/mips/kernel/cevt-ds1287.c
@@ -0,0 +1,130 @@
+/*
+ * DS1287 clockevent driver
+ *
+ * Copyright (C) 2008 Yoichi Yuasa <yuasa@linux-mips.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include <linux/clockchips.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/mc146818rtc.h>
+#include <linux/irq.h>
+
+#include <asm/time.h>
+
+int ds1287_timer_state(void)
+{
+ return (CMOS_READ(RTC_REG_C) & RTC_PF) != 0;
+}
+
+int ds1287_set_base_clock(unsigned int hz)
+{
+ u8 rate;
+
+ switch (hz) {
+ case 128:
+ rate = 0x9;
+ break;
+ case 256:
+ rate = 0x8;
+ break;
+ case 1024:
+ rate = 0x6;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ CMOS_WRITE(RTC_REF_CLCK_32KHZ | rate, RTC_REG_A);
+
+ return 0;
+}
+
+static int ds1287_set_next_event(unsigned long delta,
+ struct clock_event_device *evt)
+{
+ return -EINVAL;
+}
+
+static void ds1287_set_mode(enum clock_event_mode mode,
+ struct clock_event_device *evt)
+{
+ u8 val;
+
+ spin_lock(&rtc_lock);
+
+ val = CMOS_READ(RTC_REG_B);
+
+ switch (mode) {
+ case CLOCK_EVT_MODE_PERIODIC:
+ val |= RTC_PIE;
+ break;
+ default:
+ val &= ~RTC_PIE;
+ break;
+ }
+
+ CMOS_WRITE(val, RTC_REG_B);
+
+ spin_unlock(&rtc_lock);
+}
+
+static void ds1287_event_handler(struct clock_event_device *dev)
+{
+}
+
+static struct clock_event_device ds1287_clockevent = {
+ .name = "ds1287",
+ .features = CLOCK_EVT_FEAT_PERIODIC,
+ .set_next_event = ds1287_set_next_event,
+ .set_mode = ds1287_set_mode,
+ .event_handler = ds1287_event_handler,
+};
+
+static irqreturn_t ds1287_interrupt(int irq, void *dev_id)
+{
+ struct clock_event_device *cd = &ds1287_clockevent;
+
+ /* Ack the RTC interrupt. */
+ CMOS_READ(RTC_REG_C);
+
+ cd->event_handler(cd);
+
+ return IRQ_HANDLED;
+}
+
+static struct irqaction ds1287_irqaction = {
+ .handler = ds1287_interrupt,
+ .flags = IRQF_PERCPU | IRQF_TIMER,
+ .name = "ds1287",
+};
+
+int __init ds1287_clockevent_init(int irq)
+{
+ struct clock_event_device *cd;
+
+ cd = &ds1287_clockevent;
+ cd->rating = 100;
+ cd->irq = irq;
+ clockevent_set_clock(cd, 32768);
+ cd->max_delta_ns = clockevent_delta2ns(0x7fffffff, cd);
+ cd->min_delta_ns = clockevent_delta2ns(0x300, cd);
+ cd->cpumask = cpumask_of(0);
+
+ clockevents_register_device(&ds1287_clockevent);
+
+ return setup_irq(irq, &ds1287_irqaction);
+}
diff --git a/arch/mips/kernel/cevt-gic.c b/arch/mips/kernel/cevt-gic.c
new file mode 100644
index 00000000000..6093716980b
--- /dev/null
+++ b/arch/mips/kernel/cevt-gic.c
@@ -0,0 +1,105 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2013 Imagination Technologies Ltd.
+ */
+#include <linux/clockchips.h>
+#include <linux/interrupt.h>
+#include <linux/percpu.h>
+#include <linux/smp.h>
+#include <linux/irq.h>
+
+#include <asm/time.h>
+#include <asm/gic.h>
+#include <asm/mips-boards/maltaint.h>
+
+DEFINE_PER_CPU(struct clock_event_device, gic_clockevent_device);
+int gic_timer_irq_installed;
+
+
+static int gic_next_event(unsigned long delta, struct clock_event_device *evt)
+{
+ u64 cnt;
+ int res;
+
+ cnt = gic_read_count();
+ cnt += (u64)delta;
+ gic_write_cpu_compare(cnt, cpumask_first(evt->cpumask));
+ res = ((int)(gic_read_count() - cnt) >= 0) ? -ETIME : 0;
+ return res;
+}
+
+void gic_set_clock_mode(enum clock_event_mode mode,
+ struct clock_event_device *evt)
+{
+ /* Nothing to do ... */
+}
+
+irqreturn_t gic_compare_interrupt(int irq, void *dev_id)
+{
+ struct clock_event_device *cd;
+ int cpu = smp_processor_id();
+
+ gic_write_compare(gic_read_compare());
+ cd = &per_cpu(gic_clockevent_device, cpu);
+ cd->event_handler(cd);
+ return IRQ_HANDLED;
+}
+
+struct irqaction gic_compare_irqaction = {
+ .handler = gic_compare_interrupt,
+ .flags = IRQF_PERCPU | IRQF_TIMER,
+ .name = "timer",
+};
+
+
+void gic_event_handler(struct clock_event_device *dev)
+{
+}
+
+int gic_clockevent_init(void)
+{
+ unsigned int cpu = smp_processor_id();
+ struct clock_event_device *cd;
+ unsigned int irq;
+
+ if (!cpu_has_counter || !gic_frequency)
+ return -ENXIO;
+
+ irq = MIPS_GIC_IRQ_BASE;
+
+ cd = &per_cpu(gic_clockevent_device, cpu);
+
+ cd->name = "MIPS GIC";
+ cd->features = CLOCK_EVT_FEAT_ONESHOT |
+ CLOCK_EVT_FEAT_C3STOP;
+
+ clockevent_set_clock(cd, gic_frequency);
+
+ /* Calculate the min / max delta */
+ cd->max_delta_ns = clockevent_delta2ns(0x7fffffff, cd);
+ cd->min_delta_ns = clockevent_delta2ns(0x300, cd);
+
+ cd->rating = 300;
+ cd->irq = irq;
+ cd->cpumask = cpumask_of(cpu);
+ cd->set_next_event = gic_next_event;
+ cd->set_mode = gic_set_clock_mode;
+ cd->event_handler = gic_event_handler;
+
+ clockevents_register_device(cd);
+
+ GICWRITE(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_MAP), 0x80000002);
+ GICWRITE(GIC_REG(VPE_LOCAL, GIC_VPE_SMASK), GIC_VPE_SMASK_CMP_MSK);
+
+ if (gic_timer_irq_installed)
+ return 0;
+
+ gic_timer_irq_installed = 1;
+
+ setup_irq(irq, &gic_compare_irqaction);
+ irq_set_handler(irq, handle_percpu_irq);
+ return 0;
+}
diff --git a/arch/mips/kernel/cevt-gt641xx.c b/arch/mips/kernel/cevt-gt641xx.c
new file mode 100644
index 00000000000..f069460751a
--- /dev/null
+++ b/arch/mips/kernel/cevt-gt641xx.c
@@ -0,0 +1,141 @@
+/*
+ * GT641xx clockevent routines.
+ *
+ * Copyright (C) 2007 Yoichi Yuasa <yuasa@linux-mips.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include <linux/clockchips.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/irq.h>
+
+#include <asm/gt64120.h>
+#include <asm/time.h>
+
+static DEFINE_RAW_SPINLOCK(gt641xx_timer_lock);
+static unsigned int gt641xx_base_clock;
+
+void gt641xx_set_base_clock(unsigned int clock)
+{
+ gt641xx_base_clock = clock;
+}
+
+int gt641xx_timer0_state(void)
+{
+ if (GT_READ(GT_TC0_OFS))
+ return 0;
+
+ GT_WRITE(GT_TC0_OFS, gt641xx_base_clock / HZ);
+ GT_WRITE(GT_TC_CONTROL_OFS, GT_TC_CONTROL_ENTC0_MSK);
+
+ return 1;
+}
+
+static int gt641xx_timer0_set_next_event(unsigned long delta,
+ struct clock_event_device *evt)
+{
+ u32 ctrl;
+
+ raw_spin_lock(&gt641xx_timer_lock);
+
+ ctrl = GT_READ(GT_TC_CONTROL_OFS);
+ ctrl &= ~(GT_TC_CONTROL_ENTC0_MSK | GT_TC_CONTROL_SELTC0_MSK);
+ ctrl |= GT_TC_CONTROL_ENTC0_MSK;
+
+ GT_WRITE(GT_TC0_OFS, delta);
+ GT_WRITE(GT_TC_CONTROL_OFS, ctrl);
+
+ raw_spin_unlock(&gt641xx_timer_lock);
+
+ return 0;
+}
+
+static void gt641xx_timer0_set_mode(enum clock_event_mode mode,
+ struct clock_event_device *evt)
+{
+ u32 ctrl;
+
+ raw_spin_lock(&gt641xx_timer_lock);
+
+ ctrl = GT_READ(GT_TC_CONTROL_OFS);
+ ctrl &= ~(GT_TC_CONTROL_ENTC0_MSK | GT_TC_CONTROL_SELTC0_MSK);
+
+ switch (mode) {
+ case CLOCK_EVT_MODE_PERIODIC:
+ ctrl |= GT_TC_CONTROL_ENTC0_MSK | GT_TC_CONTROL_SELTC0_MSK;
+ break;
+ case CLOCK_EVT_MODE_ONESHOT:
+ ctrl |= GT_TC_CONTROL_ENTC0_MSK;
+ break;
+ default:
+ break;
+ }
+
+ GT_WRITE(GT_TC_CONTROL_OFS, ctrl);
+
+ raw_spin_unlock(&gt641xx_timer_lock);
+}
+
+static void gt641xx_timer0_event_handler(struct clock_event_device *dev)
+{
+}
+
+static struct clock_event_device gt641xx_timer0_clockevent = {
+ .name = "gt641xx-timer0",
+ .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
+ .irq = GT641XX_TIMER0_IRQ,
+ .set_next_event = gt641xx_timer0_set_next_event,
+ .set_mode = gt641xx_timer0_set_mode,
+ .event_handler = gt641xx_timer0_event_handler,
+};
+
+static irqreturn_t gt641xx_timer0_interrupt(int irq, void *dev_id)
+{
+ struct clock_event_device *cd = &gt641xx_timer0_clockevent;
+
+ cd->event_handler(cd);
+
+ return IRQ_HANDLED;
+}
+
+static struct irqaction gt641xx_timer0_irqaction = {
+ .handler = gt641xx_timer0_interrupt,
+ .flags = IRQF_PERCPU | IRQF_TIMER,
+ .name = "gt641xx_timer0",
+};
+
+static int __init gt641xx_timer0_clockevent_init(void)
+{
+ struct clock_event_device *cd;
+
+ if (!gt641xx_base_clock)
+ return 0;
+
+ GT_WRITE(GT_TC0_OFS, gt641xx_base_clock / HZ);
+
+ cd = &gt641xx_timer0_clockevent;
+ cd->rating = 200 + gt641xx_base_clock / 10000000;
+ clockevent_set_clock(cd, gt641xx_base_clock);
+ cd->max_delta_ns = clockevent_delta2ns(0x7fffffff, cd);
+ cd->min_delta_ns = clockevent_delta2ns(0x300, cd);
+ cd->cpumask = cpumask_of(0);
+
+ clockevents_register_device(&gt641xx_timer0_clockevent);
+
+ return setup_irq(GT641XX_TIMER0_IRQ, &gt641xx_timer0_irqaction);
+}
+arch_initcall(gt641xx_timer0_clockevent_init);
diff --git a/arch/mips/kernel/cevt-r4k.c b/arch/mips/kernel/cevt-r4k.c
new file mode 100644
index 00000000000..bc127e22fda
--- /dev/null
+++ b/arch/mips/kernel/cevt-r4k.c
@@ -0,0 +1,210 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2007 MIPS Technologies, Inc.
+ * Copyright (C) 2007 Ralf Baechle <ralf@linux-mips.org>
+ */
+#include <linux/clockchips.h>
+#include <linux/interrupt.h>
+#include <linux/percpu.h>
+#include <linux/smp.h>
+#include <linux/irq.h>
+
+#include <asm/time.h>
+#include <asm/cevt-r4k.h>
+#include <asm/gic.h>
+
+static int mips_next_event(unsigned long delta,
+ struct clock_event_device *evt)
+{
+ unsigned int cnt;
+ int res;
+
+ cnt = read_c0_count();
+ cnt += delta;
+ write_c0_compare(cnt);
+ res = ((int)(read_c0_count() - cnt) >= 0) ? -ETIME : 0;
+ return res;
+}
+
+void mips_set_clock_mode(enum clock_event_mode mode,
+ struct clock_event_device *evt)
+{
+ /* Nothing to do ... */
+}
+
+DEFINE_PER_CPU(struct clock_event_device, mips_clockevent_device);
+int cp0_timer_irq_installed;
+
+irqreturn_t c0_compare_interrupt(int irq, void *dev_id)
+{
+ const int r2 = cpu_has_mips_r2;
+ struct clock_event_device *cd;
+ int cpu = smp_processor_id();
+
+ /*
+ * Suckage alert:
+ * Before R2 of the architecture there was no way to see if a
+ * performance counter interrupt was pending, so we have to run
+ * the performance counter interrupt handler anyway.
+ */
+ if (handle_perf_irq(r2))
+ goto out;
+
+ /*
+ * The same applies to performance counter interrupts. But with the
+ * above we now know that the reason we got here must be a timer
+ * interrupt. Being the paranoiacs we are we check anyway.
+ */
+ if (!r2 || (read_c0_cause() & (1 << 30))) {
+ /* Clear Count/Compare Interrupt */
+ write_c0_compare(read_c0_compare());
+ cd = &per_cpu(mips_clockevent_device, cpu);
+ cd->event_handler(cd);
+ }
+
+out:
+ return IRQ_HANDLED;
+}
+
+struct irqaction c0_compare_irqaction = {
+ .handler = c0_compare_interrupt,
+ .flags = IRQF_PERCPU | IRQF_TIMER,
+ .name = "timer",
+};
+
+
+void mips_event_handler(struct clock_event_device *dev)
+{
+}
+
+/*
+ * FIXME: This doesn't hold for the relocated E9000 compare interrupt.
+ */
+static int c0_compare_int_pending(void)
+{
+#ifdef CONFIG_IRQ_GIC
+ if (cpu_has_veic)
+ return gic_get_timer_pending();
+#endif
+ return (read_c0_cause() >> cp0_compare_irq_shift) & (1ul << CAUSEB_IP);
+}
+
+/*
+ * Compare interrupt can be routed and latched outside the core,
+ * so wait up to worst case number of cycle counter ticks for timer interrupt
+ * changes to propagate to the cause register.
+ */
+#define COMPARE_INT_SEEN_TICKS 50
+
+int c0_compare_int_usable(void)
+{
+ unsigned int delta;
+ unsigned int cnt;
+
+#ifdef CONFIG_KVM_GUEST
+ return 1;
+#endif
+
+ /*
+ * IP7 already pending? Try to clear it by acking the timer.
+ */
+ if (c0_compare_int_pending()) {
+ cnt = read_c0_count();
+ write_c0_compare(cnt);
+ back_to_back_c0_hazard();
+ while (read_c0_count() < (cnt + COMPARE_INT_SEEN_TICKS))
+ if (!c0_compare_int_pending())
+ break;
+ if (c0_compare_int_pending())
+ return 0;
+ }
+
+ for (delta = 0x10; delta <= 0x400000; delta <<= 1) {
+ cnt = read_c0_count();
+ cnt += delta;
+ write_c0_compare(cnt);
+ back_to_back_c0_hazard();
+ if ((int)(read_c0_count() - cnt) < 0)
+ break;
+ /* increase delta if the timer was already expired */
+ }
+
+ while ((int)(read_c0_count() - cnt) <= 0)
+ ; /* Wait for expiry */
+
+ while (read_c0_count() < (cnt + COMPARE_INT_SEEN_TICKS))
+ if (c0_compare_int_pending())
+ break;
+ if (!c0_compare_int_pending())
+ return 0;
+ cnt = read_c0_count();
+ write_c0_compare(cnt);
+ back_to_back_c0_hazard();
+ while (read_c0_count() < (cnt + COMPARE_INT_SEEN_TICKS))
+ if (!c0_compare_int_pending())
+ break;
+ if (c0_compare_int_pending())
+ return 0;
+
+ /*
+ * Feels like a real count / compare timer.
+ */
+ return 1;
+}
+
+int r4k_clockevent_init(void)
+{
+ unsigned int cpu = smp_processor_id();
+ struct clock_event_device *cd;
+ unsigned int irq;
+
+ if (!cpu_has_counter || !mips_hpt_frequency)
+ return -ENXIO;
+
+ if (!c0_compare_int_usable())
+ return -ENXIO;
+
+ /*
+ * With vectored interrupts things are getting platform specific.
+ * get_c0_compare_int is a hook to allow a platform to return the
+ * interrupt number of it's liking.
+ */
+ irq = MIPS_CPU_IRQ_BASE + cp0_compare_irq;
+ if (get_c0_compare_int)
+ irq = get_c0_compare_int();
+
+ cd = &per_cpu(mips_clockevent_device, cpu);
+
+ cd->name = "MIPS";
+ cd->features = CLOCK_EVT_FEAT_ONESHOT |
+ CLOCK_EVT_FEAT_C3STOP |
+ CLOCK_EVT_FEAT_PERCPU;
+
+ clockevent_set_clock(cd, mips_hpt_frequency);
+
+ /* Calculate the min / max delta */
+ cd->max_delta_ns = clockevent_delta2ns(0x7fffffff, cd);
+ cd->min_delta_ns = clockevent_delta2ns(0x300, cd);
+
+ cd->rating = 300;
+ cd->irq = irq;
+ cd->cpumask = cpumask_of(cpu);
+ cd->set_next_event = mips_next_event;
+ cd->set_mode = mips_set_clock_mode;
+ cd->event_handler = mips_event_handler;
+
+ clockevents_register_device(cd);
+
+ if (cp0_timer_irq_installed)
+ return 0;
+
+ cp0_timer_irq_installed = 1;
+
+ setup_irq(irq, &c0_compare_irqaction);
+
+ return 0;
+}
+
diff --git a/arch/mips/kernel/cevt-sb1250.c b/arch/mips/kernel/cevt-sb1250.c
new file mode 100644
index 00000000000..5ea6d6b1de1
--- /dev/null
+++ b/arch/mips/kernel/cevt-sb1250.c
@@ -0,0 +1,153 @@
+/*
+ * Copyright (C) 2000, 2001 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <linux/clockchips.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/percpu.h>
+#include <linux/smp.h>
+
+#include <asm/addrspace.h>
+#include <asm/io.h>
+#include <asm/time.h>
+
+#include <asm/sibyte/sb1250.h>
+#include <asm/sibyte/sb1250_regs.h>
+#include <asm/sibyte/sb1250_int.h>
+#include <asm/sibyte/sb1250_scd.h>
+
+#define IMR_IP2_VAL K_INT_MAP_I0
+#define IMR_IP3_VAL K_INT_MAP_I1
+#define IMR_IP4_VAL K_INT_MAP_I2
+
+/*
+ * The general purpose timer ticks at 1MHz independent if
+ * the rest of the system
+ */
+static void sibyte_set_mode(enum clock_event_mode mode,
+ struct clock_event_device *evt)
+{
+ unsigned int cpu = smp_processor_id();
+ void __iomem *cfg, *init;
+
+ cfg = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_CFG));
+ init = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_INIT));
+
+ switch (mode) {
+ case CLOCK_EVT_MODE_PERIODIC:
+ __raw_writeq(0, cfg);
+ __raw_writeq((V_SCD_TIMER_FREQ / HZ) - 1, init);
+ __raw_writeq(M_SCD_TIMER_ENABLE | M_SCD_TIMER_MODE_CONTINUOUS,
+ cfg);
+ break;
+
+ case CLOCK_EVT_MODE_ONESHOT:
+ /* Stop the timer until we actually program a shot */
+ case CLOCK_EVT_MODE_SHUTDOWN:
+ __raw_writeq(0, cfg);
+ break;
+
+ case CLOCK_EVT_MODE_UNUSED: /* shuddup gcc */
+ case CLOCK_EVT_MODE_RESUME:
+ ;
+ }
+}
+
+static int sibyte_next_event(unsigned long delta, struct clock_event_device *cd)
+{
+ unsigned int cpu = smp_processor_id();
+ void __iomem *cfg, *init;
+
+ cfg = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_CFG));
+ init = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_INIT));
+
+ __raw_writeq(0, cfg);
+ __raw_writeq(delta - 1, init);
+ __raw_writeq(M_SCD_TIMER_ENABLE, cfg);
+
+ return 0;
+}
+
+static irqreturn_t sibyte_counter_handler(int irq, void *dev_id)
+{
+ unsigned int cpu = smp_processor_id();
+ struct clock_event_device *cd = dev_id;
+ void __iomem *cfg;
+ unsigned long tmode;
+
+ if (cd->mode == CLOCK_EVT_MODE_PERIODIC)
+ tmode = M_SCD_TIMER_ENABLE | M_SCD_TIMER_MODE_CONTINUOUS;
+ else
+ tmode = 0;
+
+ /* ACK interrupt */
+ cfg = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_CFG));
+ ____raw_writeq(tmode, cfg);
+
+ cd->event_handler(cd);
+
+ return IRQ_HANDLED;
+}
+
+static DEFINE_PER_CPU(struct clock_event_device, sibyte_hpt_clockevent);
+static DEFINE_PER_CPU(struct irqaction, sibyte_hpt_irqaction);
+static DEFINE_PER_CPU(char [18], sibyte_hpt_name);
+
+void sb1250_clockevent_init(void)
+{
+ unsigned int cpu = smp_processor_id();
+ unsigned int irq = K_INT_TIMER_0 + cpu;
+ struct irqaction *action = &per_cpu(sibyte_hpt_irqaction, cpu);
+ struct clock_event_device *cd = &per_cpu(sibyte_hpt_clockevent, cpu);
+ unsigned char *name = per_cpu(sibyte_hpt_name, cpu);
+
+ /* Only have 4 general purpose timers, and we use last one as hpt */
+ BUG_ON(cpu > 2);
+
+ sprintf(name, "sb1250-counter-%d", cpu);
+ cd->name = name;
+ cd->features = CLOCK_EVT_FEAT_PERIODIC |
+ CLOCK_EVT_FEAT_ONESHOT;
+ clockevent_set_clock(cd, V_SCD_TIMER_FREQ);
+ cd->max_delta_ns = clockevent_delta2ns(0x7fffff, cd);
+ cd->min_delta_ns = clockevent_delta2ns(2, cd);
+ cd->rating = 200;
+ cd->irq = irq;
+ cd->cpumask = cpumask_of(cpu);
+ cd->set_next_event = sibyte_next_event;
+ cd->set_mode = sibyte_set_mode;
+ clockevents_register_device(cd);
+
+ sb1250_mask_irq(cpu, irq);
+
+ /*
+ * Map the timer interrupt to IP[4] of this cpu
+ */
+ __raw_writeq(IMR_IP4_VAL,
+ IOADDR(A_IMR_REGISTER(cpu, R_IMR_INTERRUPT_MAP_BASE) +
+ (irq << 3)));
+
+ sb1250_unmask_irq(cpu, irq);
+
+ action->handler = sibyte_counter_handler;
+ action->flags = IRQF_PERCPU | IRQF_TIMER;
+ action->name = name;
+ action->dev_id = cd;
+
+ irq_set_affinity(irq, cpumask_of(cpu));
+ setup_irq(irq, action);
+}
diff --git a/arch/mips/kernel/cevt-txx9.c b/arch/mips/kernel/cevt-txx9.c
new file mode 100644
index 00000000000..2ae08462e46
--- /dev/null
+++ b/arch/mips/kernel/cevt-txx9.c
@@ -0,0 +1,193 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Based on linux/arch/mips/kernel/cevt-r4k.c,
+ * linux/arch/mips/jmr3927/rbhma3100/setup.c
+ *
+ * Copyright 2001 MontaVista Software Inc.
+ * Copyright (C) 2000-2001 Toshiba Corporation
+ * Copyright (C) 2007 MIPS Technologies, Inc.
+ * Copyright (C) 2007 Ralf Baechle <ralf@linux-mips.org>
+ */
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <asm/time.h>
+#include <asm/txx9tmr.h>
+
+#define TCR_BASE (TXx9_TMTCR_CCDE | TXx9_TMTCR_CRE | TXx9_TMTCR_TMODE_ITVL)
+#define TIMER_CCD 0 /* 1/2 */
+#define TIMER_CLK(imclk) ((imclk) / (2 << TIMER_CCD))
+
+struct txx9_clocksource {
+ struct clocksource cs;
+ struct txx9_tmr_reg __iomem *tmrptr;
+};
+
+static cycle_t txx9_cs_read(struct clocksource *cs)
+{
+ struct txx9_clocksource *txx9_cs =
+ container_of(cs, struct txx9_clocksource, cs);
+ return __raw_readl(&txx9_cs->tmrptr->trr);
+}
+
+/* Use 1 bit smaller width to use full bits in that width */
+#define TXX9_CLOCKSOURCE_BITS (TXX9_TIMER_BITS - 1)
+
+static struct txx9_clocksource txx9_clocksource = {
+ .cs = {
+ .name = "TXx9",
+ .rating = 200,
+ .read = txx9_cs_read,
+ .mask = CLOCKSOURCE_MASK(TXX9_CLOCKSOURCE_BITS),
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+ },
+};
+
+void __init txx9_clocksource_init(unsigned long baseaddr,
+ unsigned int imbusclk)
+{
+ struct txx9_tmr_reg __iomem *tmrptr;
+
+ clocksource_register_hz(&txx9_clocksource.cs, TIMER_CLK(imbusclk));
+
+ tmrptr = ioremap(baseaddr, sizeof(struct txx9_tmr_reg));
+ __raw_writel(TCR_BASE, &tmrptr->tcr);
+ __raw_writel(0, &tmrptr->tisr);
+ __raw_writel(TIMER_CCD, &tmrptr->ccdr);
+ __raw_writel(TXx9_TMITMR_TZCE, &tmrptr->itmr);
+ __raw_writel(1 << TXX9_CLOCKSOURCE_BITS, &tmrptr->cpra);
+ __raw_writel(TCR_BASE | TXx9_TMTCR_TCE, &tmrptr->tcr);
+ txx9_clocksource.tmrptr = tmrptr;
+}
+
+struct txx9_clock_event_device {
+ struct clock_event_device cd;
+ struct txx9_tmr_reg __iomem *tmrptr;
+};
+
+static void txx9tmr_stop_and_clear(struct txx9_tmr_reg __iomem *tmrptr)
+{
+ /* stop and reset counter */
+ __raw_writel(TCR_BASE, &tmrptr->tcr);
+ /* clear pending interrupt */
+ __raw_writel(0, &tmrptr->tisr);
+}
+
+static void txx9tmr_set_mode(enum clock_event_mode mode,
+ struct clock_event_device *evt)
+{
+ struct txx9_clock_event_device *txx9_cd =
+ container_of(evt, struct txx9_clock_event_device, cd);
+ struct txx9_tmr_reg __iomem *tmrptr = txx9_cd->tmrptr;
+
+ txx9tmr_stop_and_clear(tmrptr);
+ switch (mode) {
+ case CLOCK_EVT_MODE_PERIODIC:
+ __raw_writel(TXx9_TMITMR_TIIE | TXx9_TMITMR_TZCE,
+ &tmrptr->itmr);
+ /* start timer */
+ __raw_writel(((u64)(NSEC_PER_SEC / HZ) * evt->mult) >>
+ evt->shift,
+ &tmrptr->cpra);
+ __raw_writel(TCR_BASE | TXx9_TMTCR_TCE, &tmrptr->tcr);
+ break;
+ case CLOCK_EVT_MODE_SHUTDOWN:
+ case CLOCK_EVT_MODE_UNUSED:
+ __raw_writel(0, &tmrptr->itmr);
+ break;
+ case CLOCK_EVT_MODE_ONESHOT:
+ __raw_writel(TXx9_TMITMR_TIIE, &tmrptr->itmr);
+ break;
+ case CLOCK_EVT_MODE_RESUME:
+ __raw_writel(TIMER_CCD, &tmrptr->ccdr);
+ __raw_writel(0, &tmrptr->itmr);
+ break;
+ }
+}
+
+static int txx9tmr_set_next_event(unsigned long delta,
+ struct clock_event_device *evt)
+{
+ struct txx9_clock_event_device *txx9_cd =
+ container_of(evt, struct txx9_clock_event_device, cd);
+ struct txx9_tmr_reg __iomem *tmrptr = txx9_cd->tmrptr;
+
+ txx9tmr_stop_and_clear(tmrptr);
+ /* start timer */
+ __raw_writel(delta, &tmrptr->cpra);
+ __raw_writel(TCR_BASE | TXx9_TMTCR_TCE, &tmrptr->tcr);
+ return 0;
+}
+
+static struct txx9_clock_event_device txx9_clock_event_device = {
+ .cd = {
+ .name = "TXx9",
+ .features = CLOCK_EVT_FEAT_PERIODIC |
+ CLOCK_EVT_FEAT_ONESHOT,
+ .rating = 200,
+ .set_mode = txx9tmr_set_mode,
+ .set_next_event = txx9tmr_set_next_event,
+ },
+};
+
+static irqreturn_t txx9tmr_interrupt(int irq, void *dev_id)
+{
+ struct txx9_clock_event_device *txx9_cd = dev_id;
+ struct clock_event_device *cd = &txx9_cd->cd;
+ struct txx9_tmr_reg __iomem *tmrptr = txx9_cd->tmrptr;
+
+ __raw_writel(0, &tmrptr->tisr); /* ack interrupt */
+ cd->event_handler(cd);
+ return IRQ_HANDLED;
+}
+
+static struct irqaction txx9tmr_irq = {
+ .handler = txx9tmr_interrupt,
+ .flags = IRQF_PERCPU | IRQF_TIMER,
+ .name = "txx9tmr",
+ .dev_id = &txx9_clock_event_device,
+};
+
+void __init txx9_clockevent_init(unsigned long baseaddr, int irq,
+ unsigned int imbusclk)
+{
+ struct clock_event_device *cd = &txx9_clock_event_device.cd;
+ struct txx9_tmr_reg __iomem *tmrptr;
+
+ tmrptr = ioremap(baseaddr, sizeof(struct txx9_tmr_reg));
+ txx9tmr_stop_and_clear(tmrptr);
+ __raw_writel(TIMER_CCD, &tmrptr->ccdr);
+ __raw_writel(0, &tmrptr->itmr);
+ txx9_clock_event_device.tmrptr = tmrptr;
+
+ clockevent_set_clock(cd, TIMER_CLK(imbusclk));
+ cd->max_delta_ns =
+ clockevent_delta2ns(0xffffffff >> (32 - TXX9_TIMER_BITS), cd);
+ cd->min_delta_ns = clockevent_delta2ns(0xf, cd);
+ cd->irq = irq;
+ cd->cpumask = cpumask_of(0),
+ clockevents_register_device(cd);
+ setup_irq(irq, &txx9tmr_irq);
+ printk(KERN_INFO "TXx9: clockevent device at 0x%lx, irq %d\n",
+ baseaddr, irq);
+}
+
+void __init txx9_tmr_init(unsigned long baseaddr)
+{
+ struct txx9_tmr_reg __iomem *tmrptr;
+
+ tmrptr = ioremap(baseaddr, sizeof(struct txx9_tmr_reg));
+ /* Start once to make CounterResetEnable effective */
+ __raw_writel(TXx9_TMTCR_CRE | TXx9_TMTCR_TCE, &tmrptr->tcr);
+ /* Stop and reset the counter */
+ __raw_writel(TXx9_TMTCR_CRE, &tmrptr->tcr);
+ __raw_writel(0, &tmrptr->tisr);
+ __raw_writel(0xffffffff, &tmrptr->cpra);
+ __raw_writel(0, &tmrptr->itmr);
+ __raw_writel(0, &tmrptr->ccdr);
+ __raw_writel(0, &tmrptr->pgmr);
+ iounmap(tmrptr);
+}
diff --git a/arch/mips/kernel/cps-vec.S b/arch/mips/kernel/cps-vec.S
new file mode 100644
index 00000000000..6f4f739dad9
--- /dev/null
+++ b/arch/mips/kernel/cps-vec.S
@@ -0,0 +1,487 @@
+/*
+ * Copyright (C) 2013 Imagination Technologies
+ * Author: Paul Burton <paul.burton@imgtec.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <asm/addrspace.h>
+#include <asm/asm.h>
+#include <asm/asm-offsets.h>
+#include <asm/asmmacro.h>
+#include <asm/cacheops.h>
+#include <asm/mipsregs.h>
+#include <asm/mipsmtregs.h>
+#include <asm/pm.h>
+
+#define GCR_CL_COHERENCE_OFS 0x2008
+#define GCR_CL_ID_OFS 0x2028
+
+.extern mips_cm_base
+
+.set noreorder
+
+ /*
+ * Set dest to non-zero if the core supports the MT ASE, else zero. If
+ * MT is not supported then branch to nomt.
+ */
+ .macro has_mt dest, nomt
+ mfc0 \dest, CP0_CONFIG
+ bgez \dest, \nomt
+ mfc0 \dest, CP0_CONFIG, 1
+ bgez \dest, \nomt
+ mfc0 \dest, CP0_CONFIG, 2
+ bgez \dest, \nomt
+ mfc0 \dest, CP0_CONFIG, 3
+ andi \dest, \dest, MIPS_CONF3_MT
+ beqz \dest, \nomt
+ .endm
+
+.section .text.cps-vec
+.balign 0x1000
+
+LEAF(mips_cps_core_entry)
+ /*
+ * These first 12 bytes will be patched by cps_smp_setup to load the
+ * base address of the CM GCRs into register v1 and the CCA to use into
+ * register s0.
+ */
+ .quad 0
+ .word 0
+
+ /* Check whether we're here due to an NMI */
+ mfc0 k0, CP0_STATUS
+ and k0, k0, ST0_NMI
+ beqz k0, not_nmi
+ nop
+
+ /* This is an NMI */
+ la k0, nmi_handler
+ jr k0
+ nop
+
+not_nmi:
+ /* Setup Cause */
+ li t0, CAUSEF_IV
+ mtc0 t0, CP0_CAUSE
+
+ /* Setup Status */
+ li t0, ST0_CU1 | ST0_CU0
+ mtc0 t0, CP0_STATUS
+
+ /*
+ * Clear the bits used to index the caches. Note that the architecture
+ * dictates that writing to any of TagLo or TagHi selects 0 or 2 should
+ * be valid for all MIPS32 CPUs, even those for which said writes are
+ * unnecessary.
+ */
+ mtc0 zero, CP0_TAGLO, 0
+ mtc0 zero, CP0_TAGHI, 0
+ mtc0 zero, CP0_TAGLO, 2
+ mtc0 zero, CP0_TAGHI, 2
+ ehb
+
+ /* Primary cache configuration is indicated by Config1 */
+ mfc0 v0, CP0_CONFIG, 1
+
+ /* Detect I-cache line size */
+ _EXT t0, v0, MIPS_CONF1_IL_SHF, MIPS_CONF1_IL_SZ
+ beqz t0, icache_done
+ li t1, 2
+ sllv t0, t1, t0
+
+ /* Detect I-cache size */
+ _EXT t1, v0, MIPS_CONF1_IS_SHF, MIPS_CONF1_IS_SZ
+ xori t2, t1, 0x7
+ beqz t2, 1f
+ li t3, 32
+ addi t1, t1, 1
+ sllv t1, t3, t1
+1: /* At this point t1 == I-cache sets per way */
+ _EXT t2, v0, MIPS_CONF1_IA_SHF, MIPS_CONF1_IA_SZ
+ addi t2, t2, 1
+ mul t1, t1, t0
+ mul t1, t1, t2
+
+ li a0, KSEG0
+ add a1, a0, t1
+1: cache Index_Store_Tag_I, 0(a0)
+ add a0, a0, t0
+ bne a0, a1, 1b
+ nop
+icache_done:
+
+ /* Detect D-cache line size */
+ _EXT t0, v0, MIPS_CONF1_DL_SHF, MIPS_CONF1_DL_SZ
+ beqz t0, dcache_done
+ li t1, 2
+ sllv t0, t1, t0
+
+ /* Detect D-cache size */
+ _EXT t1, v0, MIPS_CONF1_DS_SHF, MIPS_CONF1_DS_SZ
+ xori t2, t1, 0x7
+ beqz t2, 1f
+ li t3, 32
+ addi t1, t1, 1
+ sllv t1, t3, t1
+1: /* At this point t1 == D-cache sets per way */
+ _EXT t2, v0, MIPS_CONF1_DA_SHF, MIPS_CONF1_DA_SZ
+ addi t2, t2, 1
+ mul t1, t1, t0
+ mul t1, t1, t2
+
+ li a0, KSEG0
+ addu a1, a0, t1
+ subu a1, a1, t0
+1: cache Index_Store_Tag_D, 0(a0)
+ bne a0, a1, 1b
+ add a0, a0, t0
+dcache_done:
+
+ /* Set Kseg0 CCA to that in s0 */
+ mfc0 t0, CP0_CONFIG
+ ori t0, 0x7
+ xori t0, 0x7
+ or t0, t0, s0
+ mtc0 t0, CP0_CONFIG
+ ehb
+
+ /* Enter the coherent domain */
+ li t0, 0xff
+ sw t0, GCR_CL_COHERENCE_OFS(v1)
+ ehb
+
+ /* Jump to kseg0 */
+ la t0, 1f
+ jr t0
+ nop
+
+ /*
+ * We're up, cached & coherent. Perform any further required core-level
+ * initialisation.
+ */
+1: jal mips_cps_core_init
+ nop
+
+ /*
+ * Boot any other VPEs within this core that should be online, and
+ * deactivate this VPE if it should be offline.
+ */
+ jal mips_cps_boot_vpes
+ nop
+
+ /* Off we go! */
+ lw t1, VPEBOOTCFG_PC(v0)
+ lw gp, VPEBOOTCFG_GP(v0)
+ lw sp, VPEBOOTCFG_SP(v0)
+ jr t1
+ nop
+ END(mips_cps_core_entry)
+
+.org 0x200
+LEAF(excep_tlbfill)
+ b .
+ nop
+ END(excep_tlbfill)
+
+.org 0x280
+LEAF(excep_xtlbfill)
+ b .
+ nop
+ END(excep_xtlbfill)
+
+.org 0x300
+LEAF(excep_cache)
+ b .
+ nop
+ END(excep_cache)
+
+.org 0x380
+LEAF(excep_genex)
+ b .
+ nop
+ END(excep_genex)
+
+.org 0x400
+LEAF(excep_intex)
+ b .
+ nop
+ END(excep_intex)
+
+.org 0x480
+LEAF(excep_ejtag)
+ la k0, ejtag_debug_handler
+ jr k0
+ nop
+ END(excep_ejtag)
+
+LEAF(mips_cps_core_init)
+#ifdef CONFIG_MIPS_MT
+ /* Check that the core implements the MT ASE */
+ has_mt t0, 3f
+ nop
+
+ .set push
+ .set mt
+
+ /* Only allow 1 TC per VPE to execute... */
+ dmt
+
+ /* ...and for the moment only 1 VPE */
+ dvpe
+ la t1, 1f
+ jr.hb t1
+ nop
+
+ /* Enter VPE configuration state */
+1: mfc0 t0, CP0_MVPCONTROL
+ ori t0, t0, MVPCONTROL_VPC
+ mtc0 t0, CP0_MVPCONTROL
+
+ /* Retrieve the number of VPEs within the core */
+ mfc0 t0, CP0_MVPCONF0
+ srl t0, t0, MVPCONF0_PVPE_SHIFT
+ andi t0, t0, (MVPCONF0_PVPE >> MVPCONF0_PVPE_SHIFT)
+ addi t7, t0, 1
+
+ /* If there's only 1, we're done */
+ beqz t0, 2f
+ nop
+
+ /* Loop through each VPE within this core */
+ li t5, 1
+
+1: /* Operate on the appropriate TC */
+ mtc0 t5, CP0_VPECONTROL
+ ehb
+
+ /* Bind TC to VPE (1:1 TC:VPE mapping) */
+ mttc0 t5, CP0_TCBIND
+
+ /* Set exclusive TC, non-active, master */
+ li t0, VPECONF0_MVP
+ sll t1, t5, VPECONF0_XTC_SHIFT
+ or t0, t0, t1
+ mttc0 t0, CP0_VPECONF0
+
+ /* Set TC non-active, non-allocatable */
+ mttc0 zero, CP0_TCSTATUS
+
+ /* Set TC halted */
+ li t0, TCHALT_H
+ mttc0 t0, CP0_TCHALT
+
+ /* Next VPE */
+ addi t5, t5, 1
+ slt t0, t5, t7
+ bnez t0, 1b
+ nop
+
+ /* Leave VPE configuration state */
+2: mfc0 t0, CP0_MVPCONTROL
+ xori t0, t0, MVPCONTROL_VPC
+ mtc0 t0, CP0_MVPCONTROL
+
+3: .set pop
+#endif
+ jr ra
+ nop
+ END(mips_cps_core_init)
+
+LEAF(mips_cps_boot_vpes)
+ /* Retrieve CM base address */
+ la t0, mips_cm_base
+ lw t0, 0(t0)
+
+ /* Calculate a pointer to this cores struct core_boot_config */
+ lw t0, GCR_CL_ID_OFS(t0)
+ li t1, COREBOOTCFG_SIZE
+ mul t0, t0, t1
+ la t1, mips_cps_core_bootcfg
+ lw t1, 0(t1)
+ addu t0, t0, t1
+
+ /* Calculate this VPEs ID. If the core doesn't support MT use 0 */
+ has_mt t6, 1f
+ li t9, 0
+
+ /* Find the number of VPEs present in the core */
+ mfc0 t1, CP0_MVPCONF0
+ srl t1, t1, MVPCONF0_PVPE_SHIFT
+ andi t1, t1, MVPCONF0_PVPE >> MVPCONF0_PVPE_SHIFT
+ addi t1, t1, 1
+
+ /* Calculate a mask for the VPE ID from EBase.CPUNum */
+ clz t1, t1
+ li t2, 31
+ subu t1, t2, t1
+ li t2, 1
+ sll t1, t2, t1
+ addiu t1, t1, -1
+
+ /* Retrieve the VPE ID from EBase.CPUNum */
+ mfc0 t9, $15, 1
+ and t9, t9, t1
+
+1: /* Calculate a pointer to this VPEs struct vpe_boot_config */
+ li t1, VPEBOOTCFG_SIZE
+ mul v0, t9, t1
+ lw t7, COREBOOTCFG_VPECONFIG(t0)
+ addu v0, v0, t7
+
+#ifdef CONFIG_MIPS_MT
+
+ /* If the core doesn't support MT then return */
+ bnez t6, 1f
+ nop
+ jr ra
+ nop
+
+ .set push
+ .set mt
+
+1: /* Enter VPE configuration state */
+ dvpe
+ la t1, 1f
+ jr.hb t1
+ nop
+1: mfc0 t1, CP0_MVPCONTROL
+ ori t1, t1, MVPCONTROL_VPC
+ mtc0 t1, CP0_MVPCONTROL
+ ehb
+
+ /* Loop through each VPE */
+ lw t6, COREBOOTCFG_VPEMASK(t0)
+ move t8, t6
+ li t5, 0
+
+ /* Check whether the VPE should be running. If not, skip it */
+1: andi t0, t6, 1
+ beqz t0, 2f
+ nop
+
+ /* Operate on the appropriate TC */
+ mfc0 t0, CP0_VPECONTROL
+ ori t0, t0, VPECONTROL_TARGTC
+ xori t0, t0, VPECONTROL_TARGTC
+ or t0, t0, t5
+ mtc0 t0, CP0_VPECONTROL
+ ehb
+
+ /* Skip the VPE if its TC is not halted */
+ mftc0 t0, CP0_TCHALT
+ beqz t0, 2f
+ nop
+
+ /* Calculate a pointer to the VPEs struct vpe_boot_config */
+ li t0, VPEBOOTCFG_SIZE
+ mul t0, t0, t5
+ addu t0, t0, t7
+
+ /* Set the TC restart PC */
+ lw t1, VPEBOOTCFG_PC(t0)
+ mttc0 t1, CP0_TCRESTART
+
+ /* Set the TC stack pointer */
+ lw t1, VPEBOOTCFG_SP(t0)
+ mttgpr t1, sp
+
+ /* Set the TC global pointer */
+ lw t1, VPEBOOTCFG_GP(t0)
+ mttgpr t1, gp
+
+ /* Copy config from this VPE */
+ mfc0 t0, CP0_CONFIG
+ mttc0 t0, CP0_CONFIG
+
+ /* Ensure no software interrupts are pending */
+ mttc0 zero, CP0_CAUSE
+ mttc0 zero, CP0_STATUS
+
+ /* Set TC active, not interrupt exempt */
+ mftc0 t0, CP0_TCSTATUS
+ li t1, ~TCSTATUS_IXMT
+ and t0, t0, t1
+ ori t0, t0, TCSTATUS_A
+ mttc0 t0, CP0_TCSTATUS
+
+ /* Clear the TC halt bit */
+ mttc0 zero, CP0_TCHALT
+
+ /* Set VPE active */
+ mftc0 t0, CP0_VPECONF0
+ ori t0, t0, VPECONF0_VPA
+ mttc0 t0, CP0_VPECONF0
+
+ /* Next VPE */
+2: srl t6, t6, 1
+ addi t5, t5, 1
+ bnez t6, 1b
+ nop
+
+ /* Leave VPE configuration state */
+ mfc0 t1, CP0_MVPCONTROL
+ xori t1, t1, MVPCONTROL_VPC
+ mtc0 t1, CP0_MVPCONTROL
+ ehb
+ evpe
+
+ /* Check whether this VPE is meant to be running */
+ li t0, 1
+ sll t0, t0, t9
+ and t0, t0, t8
+ bnez t0, 2f
+ nop
+
+ /* This VPE should be offline, halt the TC */
+ li t0, TCHALT_H
+ mtc0 t0, CP0_TCHALT
+ la t0, 1f
+1: jr.hb t0
+ nop
+
+2: .set pop
+
+#endif /* CONFIG_MIPS_MT */
+
+ /* Return */
+ jr ra
+ nop
+ END(mips_cps_boot_vpes)
+
+#if defined(CONFIG_MIPS_CPS_PM) && defined(CONFIG_CPU_PM)
+
+ /* Calculate a pointer to this CPUs struct mips_static_suspend_state */
+ .macro psstate dest
+ .set push
+ .set noat
+ lw $1, TI_CPU(gp)
+ sll $1, $1, LONGLOG
+ la \dest, __per_cpu_offset
+ addu $1, $1, \dest
+ lw $1, 0($1)
+ la \dest, cps_cpu_state
+ addu \dest, \dest, $1
+ .set pop
+ .endm
+
+LEAF(mips_cps_pm_save)
+ /* Save CPU state */
+ SUSPEND_SAVE_REGS
+ psstate t1
+ SUSPEND_SAVE_STATIC
+ jr v0
+ nop
+ END(mips_cps_pm_save)
+
+LEAF(mips_cps_pm_restore)
+ /* Restore CPU state */
+ psstate t1
+ RESUME_RESTORE_STATIC
+ RESUME_RESTORE_REGS_RETURN
+ END(mips_cps_pm_restore)
+
+#endif /* CONFIG_MIPS_CPS_PM && CONFIG_CPU_PM */
diff --git a/arch/mips/kernel/cpu-bugs64.c b/arch/mips/kernel/cpu-bugs64.c
index c09337b947b..2d80b5f1aea 100644
--- a/arch/mips/kernel/cpu-bugs64.c
+++ b/arch/mips/kernel/cpu-bugs64.c
@@ -1,11 +1,12 @@
/*
- * Copyright (C) 2003, 2004 Maciej W. Rozycki
+ * Copyright (C) 2003, 2004, 2007 Maciej W. Rozycki
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
+#include <linux/context_tracking.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/ptrace.h>
@@ -16,7 +17,16 @@
#include <asm/cpu.h>
#include <asm/fpu.h>
#include <asm/mipsregs.h>
-#include <asm/system.h>
+#include <asm/setup.h>
+
+static char bug64hit[] __initdata =
+ "reliable operation impossible!\n%s";
+static char nowar[] __initdata =
+ "Please report to <linux-mips@linux-mips.org>.";
+static char r4kwar[] __initdata =
+ "Enable CPU_R4000_WORKAROUNDS to rectify.";
+static char daddiwar[] __initdata =
+ "Enable CPU_DADDI_WORKAROUNDS to rectify.";
static inline void align_mod(const int align, const int mod)
{
@@ -29,7 +39,7 @@ static inline void align_mod(const int align, const int mod)
".endr\n\t"
".set pop"
:
- : "n" (align), "n" (mod));
+ : GCC_IMM_ASM() (align), GCC_IMM_ASM() (mod));
}
static inline void mult_sh_align_mod(long *v1, long *v2, long *w,
@@ -64,7 +74,7 @@ static inline void mult_sh_align_mod(long *v1, long *v2, long *w,
: "0" (5), "1" (8), "2" (5));
align_mod(align, mod);
/*
- * The trailing nop is needed to fullfill the two-instruction
+ * The trailing nop is needed to fulfill the two-instruction
* requirement between reading hi/lo and staring a mult/div.
* Leaving it out may cause gas insert a nop itself breaking
* the desired alignment of the next chunk.
@@ -75,9 +85,9 @@ static inline void mult_sh_align_mod(long *v1, long *v2, long *w,
".set noreorder\n\t"
".set nomacro\n\t"
"mult %2, %3\n\t"
- "dsll32 %0, %4, %5\n\t"
+ "dsll32 %0, %4, %5\n\t"
"mflo $0\n\t"
- "dsll32 %1, %4, %5\n\t"
+ "dsll32 %1, %4, %5\n\t"
"nop\n\t"
".set pop"
: "=&r" (lv1), "=r" (lw)
@@ -155,21 +165,19 @@ static inline void check_mult_sh(void)
}
printk("no.\n");
- panic("Reliable operation impossible!\n"
-#ifndef CONFIG_CPU_R4000
- "Configure for R4000 to enable the workaround."
-#else
- "Please report to <linux-mips@linux-mips.org>."
-#endif
- );
+ panic(bug64hit, !R4000_WAR ? r4kwar : nowar);
}
-static volatile int daddi_ov __initdata = 0;
+static volatile int daddi_ov;
asmlinkage void __init do_daddi_ov(struct pt_regs *regs)
{
+ enum ctx_state prev_state;
+
+ prev_state = exception_enter();
daddi_ov = 1;
regs->cp0_epc += 4;
+ exception_exit(prev_state);
}
static inline void check_daddi(void)
@@ -233,15 +241,11 @@ static inline void check_daddi(void)
}
printk("no.\n");
- panic("Reliable operation impossible!\n"
-#if !defined(CONFIG_CPU_R4000) && !defined(CONFIG_CPU_R4400)
- "Configure for R4000 or R4400 to enable the workaround."
-#else
- "Please report to <linux-mips@linux-mips.org>."
-#endif
- );
+ panic(bug64hit, !DADDI_WAR ? daddiwar : nowar);
}
+int daddiu_bug = -1;
+
static inline void check_daddiu(void)
{
long v, w, tmp;
@@ -274,14 +278,16 @@ static inline void check_daddiu(void)
#ifdef HAVE_AS_SET_DADDI
".set daddi\n\t"
#endif
- "daddiu %0, %2, %4\n\t"
+ "daddiu %0, %2, %4\n\t"
"addiu %1, $0, %4\n\t"
"daddu %1, %2\n\t"
".set pop"
: "=&r" (v), "=&r" (w), "=&r" (tmp)
: "I" (0xffffffffffffdb9aUL), "I" (0x1234));
- if (v == w) {
+ daddiu_bug = v != w;
+
+ if (!daddiu_bug) {
printk("no.\n");
return;
}
@@ -291,7 +297,7 @@ static inline void check_daddiu(void)
asm volatile(
"addiu %2, $0, %3\n\t"
"dsrl %2, %2, 1\n\t"
- "daddiu %0, %2, %4\n\t"
+ "daddiu %0, %2, %4\n\t"
"addiu %1, $0, %4\n\t"
"daddu %1, %2"
: "=&r" (v), "=&r" (w), "=&r" (tmp)
@@ -303,18 +309,16 @@ static inline void check_daddiu(void)
}
printk("no.\n");
- panic("Reliable operation impossible!\n"
-#if !defined(CONFIG_CPU_R4000) && !defined(CONFIG_CPU_R4400)
- "Configure for R4000 or R4400 to enable the workaround."
-#else
- "Please report to <linux-mips@linux-mips.org>."
-#endif
- );
+ panic(bug64hit, !DADDI_WAR ? daddiwar : nowar);
}
-void __init check_bugs64(void)
+void __init check_bugs64_early(void)
{
check_mult_sh();
- check_daddi();
check_daddiu();
}
+
+void __init check_bugs64(void)
+{
+ check_daddi();
+}
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
index 9fbf8430c84..d74f957c561 100644
--- a/arch/mips/kernel/cpu-probe.c
+++ b/arch/mips/kernel/cpu-probe.c
@@ -4,7 +4,7 @@
* Copyright (C) xxxx the Anonymous
* Copyright (C) 1994 - 2006 Ralf Baechle
* Copyright (C) 2003, 2004 Maciej W. Rozycki
- * Copyright (C) 2001, 2004 MIPS Inc.
+ * Copyright (C) 2001, 2004, 2011, 2012 MIPS Technologies, Inc.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -14,165 +14,68 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/ptrace.h>
+#include <linux/smp.h>
#include <linux/stddef.h>
+#include <linux/export.h>
+#include <asm/bugs.h>
#include <asm/cpu.h>
+#include <asm/cpu-type.h>
#include <asm/fpu.h>
#include <asm/mipsregs.h>
-#include <asm/system.h>
+#include <asm/mipsmtregs.h>
+#include <asm/msa.h>
+#include <asm/watch.h>
+#include <asm/elf.h>
+#include <asm/spram.h>
+#include <asm/uaccess.h>
-/*
- * Not all of the MIPS CPUs have the "wait" instruction available. Moreover,
- * the implementation of the "wait" feature differs between CPU families. This
- * points to the function that implements CPU specific wait.
- * The wait instruction stops the pipeline and reduces the power consumption of
- * the CPU very much.
- */
-void (*cpu_wait)(void) = NULL;
-
-static void r3081_wait(void)
-{
- unsigned long cfg = read_c0_conf();
- write_c0_conf(cfg | R30XX_CONF_HALT);
-}
-
-static void r39xx_wait(void)
-{
- local_irq_disable();
- if (!need_resched())
- write_c0_conf(read_c0_conf() | TX39_CONF_HALT);
- local_irq_enable();
-}
+static int mips_fpu_disabled;
-/*
- * There is a race when WAIT instruction executed with interrupt
- * enabled.
- * But it is implementation-dependent wheter the pipelie restarts when
- * a non-enabled interrupt is requested.
- */
-static void r4k_wait(void)
+static int __init fpu_disable(char *s)
{
- __asm__(" .set mips3 \n"
- " wait \n"
- " .set mips0 \n");
-}
+ cpu_data[0].options &= ~MIPS_CPU_FPU;
+ mips_fpu_disabled = 1;
-/*
- * This variant is preferable as it allows testing need_resched and going to
- * sleep depending on the outcome atomically. Unfortunately the "It is
- * implementation-dependent whether the pipeline restarts when a non-enabled
- * interrupt is requested" restriction in the MIPS32/MIPS64 architecture makes
- * using this version a gamble.
- */
-static void r4k_wait_irqoff(void)
-{
- local_irq_disable();
- if (!need_resched())
- __asm__(" .set mips3 \n"
- " wait \n"
- " .set mips0 \n");
- local_irq_enable();
+ return 1;
}
-/* The Au1xxx wait is available only if using 32khz counter or
- * external timer source, but specifically not CP0 Counter. */
-int allow_au1k_wait;
+__setup("nofpu", fpu_disable);
-static void au1k_wait(void)
-{
- /* using the wait instruction makes CP0 counter unusable */
- __asm__(" .set mips3 \n"
- " cache 0x14, 0(%0) \n"
- " cache 0x14, 32(%0) \n"
- " sync \n"
- " nop \n"
- " wait \n"
- " nop \n"
- " nop \n"
- " nop \n"
- " nop \n"
- " .set mips0 \n"
- : : "r" (au1k_wait));
-}
+int mips_dsp_disabled;
-static int __initdata nowait = 0;
-
-int __init wait_disable(char *s)
+static int __init dsp_disable(char *s)
{
- nowait = 1;
+ cpu_data[0].ases &= ~(MIPS_ASE_DSP | MIPS_ASE_DSP2P);
+ mips_dsp_disabled = 1;
return 1;
}
-__setup("nowait", wait_disable);
+__setup("nodsp", dsp_disable);
-static inline void check_wait(void)
+static inline void check_errata(void)
{
struct cpuinfo_mips *c = &current_cpu_data;
- printk("Checking for 'wait' instruction... ");
- if (nowait) {
- printk (" disabled.\n");
- return;
- }
-
- switch (c->cputype) {
- case CPU_R3081:
- case CPU_R3081E:
- cpu_wait = r3081_wait;
- printk(" available.\n");
- break;
- case CPU_TX3927:
- cpu_wait = r39xx_wait;
- printk(" available.\n");
- break;
- case CPU_R4200:
-/* case CPU_R4300: */
- case CPU_R4600:
- case CPU_R4640:
- case CPU_R4650:
- case CPU_R4700:
- case CPU_R5000:
- case CPU_NEVADA:
- case CPU_RM7000:
- case CPU_RM9000:
- case CPU_4KC:
- case CPU_4KEC:
- case CPU_4KSC:
- case CPU_5KC:
-/* case CPU_20KC:*/
- case CPU_24K:
- case CPU_25KF:
+ switch (current_cpu_type()) {
case CPU_34K:
- case CPU_74K:
- case CPU_PR4450:
- cpu_wait = r4k_wait;
- printk(" available.\n");
- break;
- case CPU_TX49XX:
- cpu_wait = r4k_wait_irqoff;
- printk(" available.\n");
- break;
- case CPU_AU1000:
- case CPU_AU1100:
- case CPU_AU1500:
- case CPU_AU1550:
- case CPU_AU1200:
- if (allow_au1k_wait) {
- cpu_wait = au1k_wait;
- printk(" available.\n");
- } else
- printk(" unavailable.\n");
+ /*
+ * Erratum "RPS May Cause Incorrect Instruction Execution"
+ * This code only handles VPE0, any SMP/RTOS code
+ * making use of VPE1 will be responsable for that VPE.
+ */
+ if ((c->processor_id & PRID_REV_MASK) <= PRID_REV_34K_V1_0_2)
+ write_c0_config7(read_c0_config7() | MIPS_CONF7_RPS);
break;
default:
- printk(" unavailable.\n");
break;
}
}
void __init check_bugs32(void)
{
- check_wait();
+ check_errata();
}
/*
@@ -197,6 +100,12 @@ static inline int cpu_has_confreg(void)
#endif
}
+static inline void set_elf_platform(int cpu, const char *plat)
+{
+ if (cpu == 0)
+ __elf_platform = plat;
+}
+
/*
* Get the FPU Implementation/Revision.
*/
@@ -205,7 +114,7 @@ static inline unsigned long cpu_get_fpu_id(void)
unsigned long tmp, fpu_id;
tmp = read_c0_status();
- __enable_fpu();
+ __enable_fpu(FPU_AS_IS);
fpu_id = read_32bit_cp1_register(CP1_REVISION);
write_c0_status(tmp);
return fpu_id;
@@ -216,103 +125,449 @@ static inline unsigned long cpu_get_fpu_id(void)
*/
static inline int __cpu_has_fpu(void)
{
- return ((cpu_get_fpu_id() & 0xff00) != FPIR_IMP_NONE);
+ return ((cpu_get_fpu_id() & FPIR_IMP_MASK) != FPIR_IMP_NONE);
+}
+
+static inline unsigned long cpu_get_msa_id(void)
+{
+ unsigned long status, conf5, msa_id;
+
+ status = read_c0_status();
+ __enable_fpu(FPU_64BIT);
+ conf5 = read_c0_config5();
+ enable_msa();
+ msa_id = read_msa_ir();
+ write_c0_config5(conf5);
+ write_c0_status(status);
+ return msa_id;
+}
+
+static inline void cpu_probe_vmbits(struct cpuinfo_mips *c)
+{
+#ifdef __NEED_VMBITS_PROBE
+ write_c0_entryhi(0x3fffffffffffe000ULL);
+ back_to_back_c0_hazard();
+ c->vmbits = fls64(read_c0_entryhi() & 0x3fffffffffffe000ULL);
+#endif
+}
+
+static void set_isa(struct cpuinfo_mips *c, unsigned int isa)
+{
+ switch (isa) {
+ case MIPS_CPU_ISA_M64R2:
+ c->isa_level |= MIPS_CPU_ISA_M32R2 | MIPS_CPU_ISA_M64R2;
+ case MIPS_CPU_ISA_M64R1:
+ c->isa_level |= MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M64R1;
+ case MIPS_CPU_ISA_V:
+ c->isa_level |= MIPS_CPU_ISA_V;
+ case MIPS_CPU_ISA_IV:
+ c->isa_level |= MIPS_CPU_ISA_IV;
+ case MIPS_CPU_ISA_III:
+ c->isa_level |= MIPS_CPU_ISA_II | MIPS_CPU_ISA_III;
+ break;
+
+ case MIPS_CPU_ISA_M32R2:
+ c->isa_level |= MIPS_CPU_ISA_M32R2;
+ case MIPS_CPU_ISA_M32R1:
+ c->isa_level |= MIPS_CPU_ISA_M32R1;
+ case MIPS_CPU_ISA_II:
+ c->isa_level |= MIPS_CPU_ISA_II;
+ break;
+ }
+}
+
+static char unknown_isa[] = KERN_ERR \
+ "Unsupported ISA type, c0.config0: %d.";
+
+static void set_ftlb_enable(struct cpuinfo_mips *c, int enable)
+{
+ unsigned int config6;
+
+ /* It's implementation dependent how the FTLB can be enabled */
+ switch (c->cputype) {
+ case CPU_PROAPTIV:
+ case CPU_P5600:
+ /* proAptiv & related cores use Config6 to enable the FTLB */
+ config6 = read_c0_config6();
+ if (enable)
+ /* Enable FTLB */
+ write_c0_config6(config6 | MIPS_CONF6_FTLBEN);
+ else
+ /* Disable FTLB */
+ write_c0_config6(config6 & ~MIPS_CONF6_FTLBEN);
+ back_to_back_c0_hazard();
+ break;
+ }
+}
+
+static inline unsigned int decode_config0(struct cpuinfo_mips *c)
+{
+ unsigned int config0;
+ int isa;
+
+ config0 = read_c0_config();
+
+ /*
+ * Look for Standard TLB or Dual VTLB and FTLB
+ */
+ if ((((config0 & MIPS_CONF_MT) >> 7) == 1) ||
+ (((config0 & MIPS_CONF_MT) >> 7) == 4))
+ c->options |= MIPS_CPU_TLB;
+
+ isa = (config0 & MIPS_CONF_AT) >> 13;
+ switch (isa) {
+ case 0:
+ switch ((config0 & MIPS_CONF_AR) >> 10) {
+ case 0:
+ set_isa(c, MIPS_CPU_ISA_M32R1);
+ break;
+ case 1:
+ set_isa(c, MIPS_CPU_ISA_M32R2);
+ break;
+ default:
+ goto unknown;
+ }
+ break;
+ case 2:
+ switch ((config0 & MIPS_CONF_AR) >> 10) {
+ case 0:
+ set_isa(c, MIPS_CPU_ISA_M64R1);
+ break;
+ case 1:
+ set_isa(c, MIPS_CPU_ISA_M64R2);
+ break;
+ default:
+ goto unknown;
+ }
+ break;
+ default:
+ goto unknown;
+ }
+
+ return config0 & MIPS_CONF_M;
+
+unknown:
+ panic(unknown_isa, config0);
+}
+
+static inline unsigned int decode_config1(struct cpuinfo_mips *c)
+{
+ unsigned int config1;
+
+ config1 = read_c0_config1();
+
+ if (config1 & MIPS_CONF1_MD)
+ c->ases |= MIPS_ASE_MDMX;
+ if (config1 & MIPS_CONF1_WR)
+ c->options |= MIPS_CPU_WATCH;
+ if (config1 & MIPS_CONF1_CA)
+ c->ases |= MIPS_ASE_MIPS16;
+ if (config1 & MIPS_CONF1_EP)
+ c->options |= MIPS_CPU_EJTAG;
+ if (config1 & MIPS_CONF1_FP) {
+ c->options |= MIPS_CPU_FPU;
+ c->options |= MIPS_CPU_32FPR;
+ }
+ if (cpu_has_tlb) {
+ c->tlbsize = ((config1 & MIPS_CONF1_TLBS) >> 25) + 1;
+ c->tlbsizevtlb = c->tlbsize;
+ c->tlbsizeftlbsets = 0;
+ }
+
+ return config1 & MIPS_CONF_M;
+}
+
+static inline unsigned int decode_config2(struct cpuinfo_mips *c)
+{
+ unsigned int config2;
+
+ config2 = read_c0_config2();
+
+ if (config2 & MIPS_CONF2_SL)
+ c->scache.flags &= ~MIPS_CACHE_NOT_PRESENT;
+
+ return config2 & MIPS_CONF_M;
+}
+
+static inline unsigned int decode_config3(struct cpuinfo_mips *c)
+{
+ unsigned int config3;
+
+ config3 = read_c0_config3();
+
+ if (config3 & MIPS_CONF3_SM) {
+ c->ases |= MIPS_ASE_SMARTMIPS;
+ c->options |= MIPS_CPU_RIXI;
+ }
+ if (config3 & MIPS_CONF3_RXI)
+ c->options |= MIPS_CPU_RIXI;
+ if (config3 & MIPS_CONF3_DSP)
+ c->ases |= MIPS_ASE_DSP;
+ if (config3 & MIPS_CONF3_DSP2P)
+ c->ases |= MIPS_ASE_DSP2P;
+ if (config3 & MIPS_CONF3_VINT)
+ c->options |= MIPS_CPU_VINT;
+ if (config3 & MIPS_CONF3_VEIC)
+ c->options |= MIPS_CPU_VEIC;
+ if (config3 & MIPS_CONF3_MT)
+ c->ases |= MIPS_ASE_MIPSMT;
+ if (config3 & MIPS_CONF3_ULRI)
+ c->options |= MIPS_CPU_ULRI;
+ if (config3 & MIPS_CONF3_ISA)
+ c->options |= MIPS_CPU_MICROMIPS;
+ if (config3 & MIPS_CONF3_VZ)
+ c->ases |= MIPS_ASE_VZ;
+ if (config3 & MIPS_CONF3_SC)
+ c->options |= MIPS_CPU_SEGMENTS;
+ if (config3 & MIPS_CONF3_MSA)
+ c->ases |= MIPS_ASE_MSA;
+
+ return config3 & MIPS_CONF_M;
+}
+
+static inline unsigned int decode_config4(struct cpuinfo_mips *c)
+{
+ unsigned int config4;
+ unsigned int newcf4;
+ unsigned int mmuextdef;
+ unsigned int ftlb_page = MIPS_CONF4_FTLBPAGESIZE;
+
+ config4 = read_c0_config4();
+
+ if (cpu_has_tlb) {
+ if (((config4 & MIPS_CONF4_IE) >> 29) == 2)
+ c->options |= MIPS_CPU_TLBINV;
+ mmuextdef = config4 & MIPS_CONF4_MMUEXTDEF;
+ switch (mmuextdef) {
+ case MIPS_CONF4_MMUEXTDEF_MMUSIZEEXT:
+ c->tlbsize += (config4 & MIPS_CONF4_MMUSIZEEXT) * 0x40;
+ c->tlbsizevtlb = c->tlbsize;
+ break;
+ case MIPS_CONF4_MMUEXTDEF_VTLBSIZEEXT:
+ c->tlbsizevtlb +=
+ ((config4 & MIPS_CONF4_VTLBSIZEEXT) >>
+ MIPS_CONF4_VTLBSIZEEXT_SHIFT) * 0x40;
+ c->tlbsize = c->tlbsizevtlb;
+ ftlb_page = MIPS_CONF4_VFTLBPAGESIZE;
+ /* fall through */
+ case MIPS_CONF4_MMUEXTDEF_FTLBSIZEEXT:
+ newcf4 = (config4 & ~ftlb_page) |
+ (page_size_ftlb(mmuextdef) <<
+ MIPS_CONF4_FTLBPAGESIZE_SHIFT);
+ write_c0_config4(newcf4);
+ back_to_back_c0_hazard();
+ config4 = read_c0_config4();
+ if (config4 != newcf4) {
+ pr_err("PAGE_SIZE 0x%lx is not supported by FTLB (config4=0x%x)\n",
+ PAGE_SIZE, config4);
+ /* Switch FTLB off */
+ set_ftlb_enable(c, 0);
+ break;
+ }
+ c->tlbsizeftlbsets = 1 <<
+ ((config4 & MIPS_CONF4_FTLBSETS) >>
+ MIPS_CONF4_FTLBSETS_SHIFT);
+ c->tlbsizeftlbways = ((config4 & MIPS_CONF4_FTLBWAYS) >>
+ MIPS_CONF4_FTLBWAYS_SHIFT) + 2;
+ c->tlbsize += c->tlbsizeftlbways * c->tlbsizeftlbsets;
+ break;
+ }
+ }
+
+ c->kscratch_mask = (config4 >> 16) & 0xff;
+
+ return config4 & MIPS_CONF_M;
+}
+
+static inline unsigned int decode_config5(struct cpuinfo_mips *c)
+{
+ unsigned int config5;
+
+ config5 = read_c0_config5();
+ config5 &= ~MIPS_CONF5_UFR;
+ write_c0_config5(config5);
+
+ if (config5 & MIPS_CONF5_EVA)
+ c->options |= MIPS_CPU_EVA;
+
+ return config5 & MIPS_CONF_M;
+}
+
+static void decode_configs(struct cpuinfo_mips *c)
+{
+ int ok;
+
+ /* MIPS32 or MIPS64 compliant CPU. */
+ c->options = MIPS_CPU_4KEX | MIPS_CPU_4K_CACHE | MIPS_CPU_COUNTER |
+ MIPS_CPU_DIVEC | MIPS_CPU_LLSC | MIPS_CPU_MCHECK;
+
+ c->scache.flags = MIPS_CACHE_NOT_PRESENT;
+
+ /* Enable FTLB if present */
+ set_ftlb_enable(c, 1);
+
+ ok = decode_config0(c); /* Read Config registers. */
+ BUG_ON(!ok); /* Arch spec violation! */
+ if (ok)
+ ok = decode_config1(c);
+ if (ok)
+ ok = decode_config2(c);
+ if (ok)
+ ok = decode_config3(c);
+ if (ok)
+ ok = decode_config4(c);
+ if (ok)
+ ok = decode_config5(c);
+
+ mips_probe_watch_registers(c);
+
+#ifndef CONFIG_MIPS_CPS
+ if (cpu_has_mips_r2) {
+ c->core = get_ebase_cpunum();
+ if (cpu_has_mipsmt)
+ c->core >>= fls(core_nvpes()) - 1;
+ }
+#endif
}
#define R4K_OPTS (MIPS_CPU_TLB | MIPS_CPU_4KEX | MIPS_CPU_4K_CACHE \
| MIPS_CPU_COUNTER)
-static inline void cpu_probe_legacy(struct cpuinfo_mips *c)
+static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
{
- switch (c->processor_id & 0xff00) {
+ switch (c->processor_id & PRID_IMP_MASK) {
case PRID_IMP_R2000:
c->cputype = CPU_R2000;
- c->isa_level = MIPS_CPU_ISA_I;
+ __cpu_name[cpu] = "R2000";
c->options = MIPS_CPU_TLB | MIPS_CPU_3K_CACHE |
- MIPS_CPU_NOFPUEX;
+ MIPS_CPU_NOFPUEX;
if (__cpu_has_fpu())
c->options |= MIPS_CPU_FPU;
c->tlbsize = 64;
break;
case PRID_IMP_R3000:
- if ((c->processor_id & 0xff) == PRID_REV_R3000A)
- if (cpu_has_confreg())
+ if ((c->processor_id & PRID_REV_MASK) == PRID_REV_R3000A) {
+ if (cpu_has_confreg()) {
c->cputype = CPU_R3081E;
- else
+ __cpu_name[cpu] = "R3081";
+ } else {
c->cputype = CPU_R3000A;
- else
+ __cpu_name[cpu] = "R3000A";
+ }
+ } else {
c->cputype = CPU_R3000;
- c->isa_level = MIPS_CPU_ISA_I;
+ __cpu_name[cpu] = "R3000";
+ }
c->options = MIPS_CPU_TLB | MIPS_CPU_3K_CACHE |
- MIPS_CPU_NOFPUEX;
+ MIPS_CPU_NOFPUEX;
if (__cpu_has_fpu())
c->options |= MIPS_CPU_FPU;
c->tlbsize = 64;
break;
case PRID_IMP_R4000:
if (read_c0_config() & CONF_SC) {
- if ((c->processor_id & 0xff) >= PRID_REV_R4400)
+ if ((c->processor_id & PRID_REV_MASK) >=
+ PRID_REV_R4400) {
c->cputype = CPU_R4400PC;
- else
+ __cpu_name[cpu] = "R4400PC";
+ } else {
c->cputype = CPU_R4000PC;
+ __cpu_name[cpu] = "R4000PC";
+ }
} else {
- if ((c->processor_id & 0xff) >= PRID_REV_R4400)
- c->cputype = CPU_R4400SC;
- else
- c->cputype = CPU_R4000SC;
+ int cca = read_c0_config() & CONF_CM_CMASK;
+ int mc;
+
+ /*
+ * SC and MC versions can't be reliably told apart,
+ * but only the latter support coherent caching
+ * modes so assume the firmware has set the KSEG0
+ * coherency attribute reasonably (if uncached, we
+ * assume SC).
+ */
+ switch (cca) {
+ case CONF_CM_CACHABLE_CE:
+ case CONF_CM_CACHABLE_COW:
+ case CONF_CM_CACHABLE_CUW:
+ mc = 1;
+ break;
+ default:
+ mc = 0;
+ break;
+ }
+ if ((c->processor_id & PRID_REV_MASK) >=
+ PRID_REV_R4400) {
+ c->cputype = mc ? CPU_R4400MC : CPU_R4400SC;
+ __cpu_name[cpu] = mc ? "R4400MC" : "R4400SC";
+ } else {
+ c->cputype = mc ? CPU_R4000MC : CPU_R4000SC;
+ __cpu_name[cpu] = mc ? "R4000MC" : "R4000SC";
+ }
}
- c->isa_level = MIPS_CPU_ISA_III;
+ set_isa(c, MIPS_CPU_ISA_III);
c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR |
- MIPS_CPU_WATCH | MIPS_CPU_VCE |
- MIPS_CPU_LLSC;
+ MIPS_CPU_WATCH | MIPS_CPU_VCE |
+ MIPS_CPU_LLSC;
c->tlbsize = 48;
break;
case PRID_IMP_VR41XX:
+ set_isa(c, MIPS_CPU_ISA_III);
+ c->options = R4K_OPTS;
+ c->tlbsize = 32;
switch (c->processor_id & 0xf0) {
case PRID_REV_VR4111:
c->cputype = CPU_VR4111;
+ __cpu_name[cpu] = "NEC VR4111";
break;
case PRID_REV_VR4121:
c->cputype = CPU_VR4121;
+ __cpu_name[cpu] = "NEC VR4121";
break;
case PRID_REV_VR4122:
- if ((c->processor_id & 0xf) < 0x3)
+ if ((c->processor_id & 0xf) < 0x3) {
c->cputype = CPU_VR4122;
- else
+ __cpu_name[cpu] = "NEC VR4122";
+ } else {
c->cputype = CPU_VR4181A;
+ __cpu_name[cpu] = "NEC VR4181A";
+ }
break;
case PRID_REV_VR4130:
- if ((c->processor_id & 0xf) < 0x4)
+ if ((c->processor_id & 0xf) < 0x4) {
c->cputype = CPU_VR4131;
- else
+ __cpu_name[cpu] = "NEC VR4131";
+ } else {
c->cputype = CPU_VR4133;
+ c->options |= MIPS_CPU_LLSC;
+ __cpu_name[cpu] = "NEC VR4133";
+ }
break;
default:
printk(KERN_INFO "Unexpected CPU of NEC VR4100 series\n");
c->cputype = CPU_VR41XX;
+ __cpu_name[cpu] = "NEC Vr41xx";
break;
}
- c->isa_level = MIPS_CPU_ISA_III;
- c->options = R4K_OPTS;
- c->tlbsize = 32;
break;
case PRID_IMP_R4300:
c->cputype = CPU_R4300;
- c->isa_level = MIPS_CPU_ISA_III;
+ __cpu_name[cpu] = "R4300";
+ set_isa(c, MIPS_CPU_ISA_III);
c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR |
- MIPS_CPU_LLSC;
+ MIPS_CPU_LLSC;
c->tlbsize = 32;
break;
case PRID_IMP_R4600:
c->cputype = CPU_R4600;
- c->isa_level = MIPS_CPU_ISA_III;
+ __cpu_name[cpu] = "R4600";
+ set_isa(c, MIPS_CPU_ISA_III);
c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR |
MIPS_CPU_LLSC;
c->tlbsize = 48;
break;
#if 0
- case PRID_IMP_R4650:
+ case PRID_IMP_R4650:
/*
* This processor doesn't have an MMU, so it's not
* "real easy" to run Linux on it. It is left purely
@@ -320,44 +575,46 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c)
* it's c0_prid id number with the TX3900.
*/
c->cputype = CPU_R4650;
- c->isa_level = MIPS_CPU_ISA_III;
+ __cpu_name[cpu] = "R4650";
+ set_isa(c, MIPS_CPU_ISA_III);
c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_LLSC;
- c->tlbsize = 48;
+ c->tlbsize = 48;
break;
#endif
case PRID_IMP_TX39:
- c->isa_level = MIPS_CPU_ISA_I;
c->options = MIPS_CPU_TLB | MIPS_CPU_TX39_CACHE;
if ((c->processor_id & 0xf0) == (PRID_REV_TX3927 & 0xf0)) {
c->cputype = CPU_TX3927;
+ __cpu_name[cpu] = "TX3927";
c->tlbsize = 64;
} else {
- switch (c->processor_id & 0xff) {
+ switch (c->processor_id & PRID_REV_MASK) {
case PRID_REV_TX3912:
c->cputype = CPU_TX3912;
+ __cpu_name[cpu] = "TX3912";
c->tlbsize = 32;
break;
case PRID_REV_TX3922:
c->cputype = CPU_TX3922;
+ __cpu_name[cpu] = "TX3922";
c->tlbsize = 64;
break;
- default:
- c->cputype = CPU_UNKNOWN;
- break;
}
}
break;
case PRID_IMP_R4700:
c->cputype = CPU_R4700;
- c->isa_level = MIPS_CPU_ISA_III;
+ __cpu_name[cpu] = "R4700";
+ set_isa(c, MIPS_CPU_ISA_III);
c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR |
- MIPS_CPU_LLSC;
+ MIPS_CPU_LLSC;
c->tlbsize = 48;
break;
case PRID_IMP_TX49:
c->cputype = CPU_TX49XX;
- c->isa_level = MIPS_CPU_ISA_III;
+ __cpu_name[cpu] = "R49XX";
+ set_isa(c, MIPS_CPU_ISA_III);
c->options = R4K_OPTS | MIPS_CPU_LLSC;
if (!(c->processor_id & 0x08))
c->options |= MIPS_CPU_FPU | MIPS_CPU_32FPR;
@@ -365,406 +622,611 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c)
break;
case PRID_IMP_R5000:
c->cputype = CPU_R5000;
- c->isa_level = MIPS_CPU_ISA_IV;
+ __cpu_name[cpu] = "R5000";
+ set_isa(c, MIPS_CPU_ISA_IV);
c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR |
- MIPS_CPU_LLSC;
+ MIPS_CPU_LLSC;
c->tlbsize = 48;
break;
case PRID_IMP_R5432:
c->cputype = CPU_R5432;
- c->isa_level = MIPS_CPU_ISA_IV;
+ __cpu_name[cpu] = "R5432";
+ set_isa(c, MIPS_CPU_ISA_IV);
c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR |
- MIPS_CPU_WATCH | MIPS_CPU_LLSC;
+ MIPS_CPU_WATCH | MIPS_CPU_LLSC;
c->tlbsize = 48;
break;
case PRID_IMP_R5500:
c->cputype = CPU_R5500;
- c->isa_level = MIPS_CPU_ISA_IV;
+ __cpu_name[cpu] = "R5500";
+ set_isa(c, MIPS_CPU_ISA_IV);
c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR |
- MIPS_CPU_WATCH | MIPS_CPU_LLSC;
+ MIPS_CPU_WATCH | MIPS_CPU_LLSC;
c->tlbsize = 48;
break;
case PRID_IMP_NEVADA:
c->cputype = CPU_NEVADA;
- c->isa_level = MIPS_CPU_ISA_IV;
+ __cpu_name[cpu] = "Nevada";
+ set_isa(c, MIPS_CPU_ISA_IV);
c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR |
- MIPS_CPU_DIVEC | MIPS_CPU_LLSC;
+ MIPS_CPU_DIVEC | MIPS_CPU_LLSC;
c->tlbsize = 48;
break;
case PRID_IMP_R6000:
c->cputype = CPU_R6000;
- c->isa_level = MIPS_CPU_ISA_II;
+ __cpu_name[cpu] = "R6000";
+ set_isa(c, MIPS_CPU_ISA_II);
c->options = MIPS_CPU_TLB | MIPS_CPU_FPU |
- MIPS_CPU_LLSC;
+ MIPS_CPU_LLSC;
c->tlbsize = 32;
break;
case PRID_IMP_R6000A:
c->cputype = CPU_R6000A;
- c->isa_level = MIPS_CPU_ISA_II;
+ __cpu_name[cpu] = "R6000A";
+ set_isa(c, MIPS_CPU_ISA_II);
c->options = MIPS_CPU_TLB | MIPS_CPU_FPU |
- MIPS_CPU_LLSC;
+ MIPS_CPU_LLSC;
c->tlbsize = 32;
break;
case PRID_IMP_RM7000:
c->cputype = CPU_RM7000;
- c->isa_level = MIPS_CPU_ISA_IV;
+ __cpu_name[cpu] = "RM7000";
+ set_isa(c, MIPS_CPU_ISA_IV);
c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR |
- MIPS_CPU_LLSC;
+ MIPS_CPU_LLSC;
/*
- * Undocumented RM7000: Bit 29 in the info register of
+ * Undocumented RM7000: Bit 29 in the info register of
* the RM7000 v2.0 indicates if the TLB has 48 or 64
* entries.
*
- * 29 1 => 64 entry JTLB
- * 0 => 48 entry JTLB
- */
- c->tlbsize = (read_c0_info() & (1 << 29)) ? 64 : 48;
- break;
- case PRID_IMP_RM9000:
- c->cputype = CPU_RM9000;
- c->isa_level = MIPS_CPU_ISA_IV;
- c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR |
- MIPS_CPU_LLSC;
- /*
- * Bit 29 in the info register of the RM9000
- * indicates if the TLB has 48 or 64 entries.
- *
- * 29 1 => 64 entry JTLB
- * 0 => 48 entry JTLB
+ * 29 1 => 64 entry JTLB
+ * 0 => 48 entry JTLB
*/
c->tlbsize = (read_c0_info() & (1 << 29)) ? 64 : 48;
break;
case PRID_IMP_R8000:
c->cputype = CPU_R8000;
- c->isa_level = MIPS_CPU_ISA_IV;
+ __cpu_name[cpu] = "RM8000";
+ set_isa(c, MIPS_CPU_ISA_IV);
c->options = MIPS_CPU_TLB | MIPS_CPU_4KEX |
- MIPS_CPU_FPU | MIPS_CPU_32FPR |
- MIPS_CPU_LLSC;
+ MIPS_CPU_FPU | MIPS_CPU_32FPR |
+ MIPS_CPU_LLSC;
c->tlbsize = 384; /* has weird TLB: 3-way x 128 */
break;
case PRID_IMP_R10000:
c->cputype = CPU_R10000;
- c->isa_level = MIPS_CPU_ISA_IV;
+ __cpu_name[cpu] = "R10000";
+ set_isa(c, MIPS_CPU_ISA_IV);
c->options = MIPS_CPU_TLB | MIPS_CPU_4K_CACHE | MIPS_CPU_4KEX |
- MIPS_CPU_FPU | MIPS_CPU_32FPR |
+ MIPS_CPU_FPU | MIPS_CPU_32FPR |
MIPS_CPU_COUNTER | MIPS_CPU_WATCH |
- MIPS_CPU_LLSC;
+ MIPS_CPU_LLSC;
c->tlbsize = 64;
break;
case PRID_IMP_R12000:
c->cputype = CPU_R12000;
- c->isa_level = MIPS_CPU_ISA_IV;
+ __cpu_name[cpu] = "R12000";
+ set_isa(c, MIPS_CPU_ISA_IV);
c->options = MIPS_CPU_TLB | MIPS_CPU_4K_CACHE | MIPS_CPU_4KEX |
- MIPS_CPU_FPU | MIPS_CPU_32FPR |
+ MIPS_CPU_FPU | MIPS_CPU_32FPR |
MIPS_CPU_COUNTER | MIPS_CPU_WATCH |
- MIPS_CPU_LLSC;
+ MIPS_CPU_LLSC;
c->tlbsize = 64;
break;
case PRID_IMP_R14000:
c->cputype = CPU_R14000;
- c->isa_level = MIPS_CPU_ISA_IV;
+ __cpu_name[cpu] = "R14000";
+ set_isa(c, MIPS_CPU_ISA_IV);
c->options = MIPS_CPU_TLB | MIPS_CPU_4K_CACHE | MIPS_CPU_4KEX |
- MIPS_CPU_FPU | MIPS_CPU_32FPR |
+ MIPS_CPU_FPU | MIPS_CPU_32FPR |
MIPS_CPU_COUNTER | MIPS_CPU_WATCH |
- MIPS_CPU_LLSC;
+ MIPS_CPU_LLSC;
c->tlbsize = 64;
break;
- }
-}
-
-static char unknown_isa[] __initdata = KERN_ERR \
- "Unsupported ISA type, c0.config0: %d.";
-
-static inline unsigned int decode_config0(struct cpuinfo_mips *c)
-{
- unsigned int config0;
- int isa;
-
- config0 = read_c0_config();
-
- if (((config0 & MIPS_CONF_MT) >> 7) == 1)
- c->options |= MIPS_CPU_TLB;
- isa = (config0 & MIPS_CONF_AT) >> 13;
- switch (isa) {
- case 0:
- switch ((config0 & MIPS_CONF_AR) >> 10) {
- case 0:
- c->isa_level = MIPS_CPU_ISA_M32R1;
+ case PRID_IMP_LOONGSON_64: /* Loongson-2/3 */
+ switch (c->processor_id & PRID_REV_MASK) {
+ case PRID_REV_LOONGSON2E:
+ c->cputype = CPU_LOONGSON2;
+ __cpu_name[cpu] = "ICT Loongson-2";
+ set_elf_platform(cpu, "loongson2e");
break;
- case 1:
- c->isa_level = MIPS_CPU_ISA_M32R2;
+ case PRID_REV_LOONGSON2F:
+ c->cputype = CPU_LOONGSON2;
+ __cpu_name[cpu] = "ICT Loongson-2";
+ set_elf_platform(cpu, "loongson2f");
break;
- default:
- goto unknown;
- }
- break;
- case 2:
- switch ((config0 & MIPS_CONF_AR) >> 10) {
- case 0:
- c->isa_level = MIPS_CPU_ISA_M64R1;
+ case PRID_REV_LOONGSON3A:
+ c->cputype = CPU_LOONGSON3;
+ __cpu_name[cpu] = "ICT Loongson-3";
+ set_elf_platform(cpu, "loongson3a");
break;
- case 1:
- c->isa_level = MIPS_CPU_ISA_M64R2;
- break;
- default:
- goto unknown;
}
- break;
- default:
- goto unknown;
- }
-
- return config0 & MIPS_CONF_M;
-unknown:
- panic(unknown_isa, config0);
-}
+ set_isa(c, MIPS_CPU_ISA_III);
+ c->options = R4K_OPTS |
+ MIPS_CPU_FPU | MIPS_CPU_LLSC |
+ MIPS_CPU_32FPR;
+ c->tlbsize = 64;
+ break;
+ case PRID_IMP_LOONGSON_32: /* Loongson-1 */
+ decode_configs(c);
-static inline unsigned int decode_config1(struct cpuinfo_mips *c)
-{
- unsigned int config1;
+ c->cputype = CPU_LOONGSON1;
- config1 = read_c0_config1();
+ switch (c->processor_id & PRID_REV_MASK) {
+ case PRID_REV_LOONGSON1B:
+ __cpu_name[cpu] = "Loongson 1B";
+ break;
+ }
- if (config1 & MIPS_CONF1_MD)
- c->ases |= MIPS_ASE_MDMX;
- if (config1 & MIPS_CONF1_WR)
- c->options |= MIPS_CPU_WATCH;
- if (config1 & MIPS_CONF1_CA)
- c->ases |= MIPS_ASE_MIPS16;
- if (config1 & MIPS_CONF1_EP)
- c->options |= MIPS_CPU_EJTAG;
- if (config1 & MIPS_CONF1_FP) {
- c->options |= MIPS_CPU_FPU;
- c->options |= MIPS_CPU_32FPR;
+ break;
}
- if (cpu_has_tlb)
- c->tlbsize = ((config1 & MIPS_CONF1_TLBS) >> 25) + 1;
-
- return config1 & MIPS_CONF_M;
}
-static inline unsigned int decode_config2(struct cpuinfo_mips *c)
+static inline void cpu_probe_mips(struct cpuinfo_mips *c, unsigned int cpu)
{
- unsigned int config2;
-
- config2 = read_c0_config2();
-
- if (config2 & MIPS_CONF2_SL)
- c->scache.flags &= ~MIPS_CACHE_NOT_PRESENT;
-
- return config2 & MIPS_CONF_M;
-}
-
-static inline unsigned int decode_config3(struct cpuinfo_mips *c)
-{
- unsigned int config3;
-
- config3 = read_c0_config3();
-
- if (config3 & MIPS_CONF3_SM)
- c->ases |= MIPS_ASE_SMARTMIPS;
- if (config3 & MIPS_CONF3_DSP)
- c->ases |= MIPS_ASE_DSP;
- if (config3 & MIPS_CONF3_VINT)
- c->options |= MIPS_CPU_VINT;
- if (config3 & MIPS_CONF3_VEIC)
- c->options |= MIPS_CPU_VEIC;
- if (config3 & MIPS_CONF3_MT)
- c->ases |= MIPS_ASE_MIPSMT;
-
- return config3 & MIPS_CONF_M;
-}
-
-static void __init decode_configs(struct cpuinfo_mips *c)
-{
- /* MIPS32 or MIPS64 compliant CPU. */
- c->options = MIPS_CPU_4KEX | MIPS_CPU_4K_CACHE | MIPS_CPU_COUNTER |
- MIPS_CPU_DIVEC | MIPS_CPU_LLSC | MIPS_CPU_MCHECK;
-
- c->scache.flags = MIPS_CACHE_NOT_PRESENT;
-
- /* Read Config registers. */
- if (!decode_config0(c))
- return; /* actually worth a panic() */
- if (!decode_config1(c))
- return;
- if (!decode_config2(c))
- return;
- if (!decode_config3(c))
- return;
-}
-
-static inline void cpu_probe_mips(struct cpuinfo_mips *c)
-{
- decode_configs(c);
- switch (c->processor_id & 0xff00) {
+ switch (c->processor_id & PRID_IMP_MASK) {
case PRID_IMP_4KC:
c->cputype = CPU_4KC;
+ __cpu_name[cpu] = "MIPS 4Kc";
break;
case PRID_IMP_4KEC:
- c->cputype = CPU_4KEC;
- break;
case PRID_IMP_4KECR2:
c->cputype = CPU_4KEC;
+ __cpu_name[cpu] = "MIPS 4KEc";
break;
case PRID_IMP_4KSC:
case PRID_IMP_4KSD:
c->cputype = CPU_4KSC;
+ __cpu_name[cpu] = "MIPS 4KSc";
break;
case PRID_IMP_5KC:
c->cputype = CPU_5KC;
+ __cpu_name[cpu] = "MIPS 5Kc";
+ break;
+ case PRID_IMP_5KE:
+ c->cputype = CPU_5KE;
+ __cpu_name[cpu] = "MIPS 5KE";
break;
case PRID_IMP_20KC:
c->cputype = CPU_20KC;
+ __cpu_name[cpu] = "MIPS 20Kc";
break;
case PRID_IMP_24K:
+ c->cputype = CPU_24K;
+ __cpu_name[cpu] = "MIPS 24Kc";
+ break;
case PRID_IMP_24KE:
c->cputype = CPU_24K;
+ __cpu_name[cpu] = "MIPS 24KEc";
break;
case PRID_IMP_25KF:
c->cputype = CPU_25KF;
+ __cpu_name[cpu] = "MIPS 25Kc";
break;
case PRID_IMP_34K:
c->cputype = CPU_34K;
+ __cpu_name[cpu] = "MIPS 34Kc";
break;
case PRID_IMP_74K:
c->cputype = CPU_74K;
+ __cpu_name[cpu] = "MIPS 74Kc";
+ break;
+ case PRID_IMP_M14KC:
+ c->cputype = CPU_M14KC;
+ __cpu_name[cpu] = "MIPS M14Kc";
+ break;
+ case PRID_IMP_M14KEC:
+ c->cputype = CPU_M14KEC;
+ __cpu_name[cpu] = "MIPS M14KEc";
+ break;
+ case PRID_IMP_1004K:
+ c->cputype = CPU_1004K;
+ __cpu_name[cpu] = "MIPS 1004Kc";
+ break;
+ case PRID_IMP_1074K:
+ c->cputype = CPU_1074K;
+ __cpu_name[cpu] = "MIPS 1074Kc";
+ break;
+ case PRID_IMP_INTERAPTIV_UP:
+ c->cputype = CPU_INTERAPTIV;
+ __cpu_name[cpu] = "MIPS interAptiv";
+ break;
+ case PRID_IMP_INTERAPTIV_MP:
+ c->cputype = CPU_INTERAPTIV;
+ __cpu_name[cpu] = "MIPS interAptiv (multi)";
+ break;
+ case PRID_IMP_PROAPTIV_UP:
+ c->cputype = CPU_PROAPTIV;
+ __cpu_name[cpu] = "MIPS proAptiv";
+ break;
+ case PRID_IMP_PROAPTIV_MP:
+ c->cputype = CPU_PROAPTIV;
+ __cpu_name[cpu] = "MIPS proAptiv (multi)";
+ break;
+ case PRID_IMP_P5600:
+ c->cputype = CPU_P5600;
+ __cpu_name[cpu] = "MIPS P5600";
+ break;
+ case PRID_IMP_M5150:
+ c->cputype = CPU_M5150;
+ __cpu_name[cpu] = "MIPS M5150";
break;
}
+
+ decode_configs(c);
+
+ spram_config();
}
-static inline void cpu_probe_alchemy(struct cpuinfo_mips *c)
+static inline void cpu_probe_alchemy(struct cpuinfo_mips *c, unsigned int cpu)
{
decode_configs(c);
- switch (c->processor_id & 0xff00) {
+ switch (c->processor_id & PRID_IMP_MASK) {
case PRID_IMP_AU1_REV1:
case PRID_IMP_AU1_REV2:
+ c->cputype = CPU_ALCHEMY;
switch ((c->processor_id >> 24) & 0xff) {
case 0:
- c->cputype = CPU_AU1000;
+ __cpu_name[cpu] = "Au1000";
break;
case 1:
- c->cputype = CPU_AU1500;
+ __cpu_name[cpu] = "Au1500";
break;
case 2:
- c->cputype = CPU_AU1100;
+ __cpu_name[cpu] = "Au1100";
break;
case 3:
- c->cputype = CPU_AU1550;
+ __cpu_name[cpu] = "Au1550";
break;
case 4:
- c->cputype = CPU_AU1200;
+ __cpu_name[cpu] = "Au1200";
+ if ((c->processor_id & PRID_REV_MASK) == 2)
+ __cpu_name[cpu] = "Au1250";
+ break;
+ case 5:
+ __cpu_name[cpu] = "Au1210";
break;
default:
- panic("Unknown Au Core!");
+ __cpu_name[cpu] = "Au1xxx";
break;
}
break;
}
}
-static inline void cpu_probe_sibyte(struct cpuinfo_mips *c)
+static inline void cpu_probe_sibyte(struct cpuinfo_mips *c, unsigned int cpu)
{
decode_configs(c);
- /*
- * For historical reasons the SB1 comes with it's own variant of
- * cache code which eventually will be folded into c-r4k.c. Until
- * then we pretend it's got it's own cache architecture.
- */
- c->options &= ~MIPS_CPU_4K_CACHE;
- c->options |= MIPS_CPU_SB1_CACHE;
-
- switch (c->processor_id & 0xff00) {
+ switch (c->processor_id & PRID_IMP_MASK) {
case PRID_IMP_SB1:
c->cputype = CPU_SB1;
+ __cpu_name[cpu] = "SiByte SB1";
/* FPU in pass1 is known to have issues. */
- if ((c->processor_id & 0xff) < 0x02)
+ if ((c->processor_id & PRID_REV_MASK) < 0x02)
c->options &= ~(MIPS_CPU_FPU | MIPS_CPU_32FPR);
break;
case PRID_IMP_SB1A:
c->cputype = CPU_SB1A;
+ __cpu_name[cpu] = "SiByte SB1A";
break;
}
}
-static inline void cpu_probe_sandcraft(struct cpuinfo_mips *c)
+static inline void cpu_probe_sandcraft(struct cpuinfo_mips *c, unsigned int cpu)
{
decode_configs(c);
- switch (c->processor_id & 0xff00) {
+ switch (c->processor_id & PRID_IMP_MASK) {
case PRID_IMP_SR71000:
c->cputype = CPU_SR71000;
+ __cpu_name[cpu] = "Sandcraft SR71000";
c->scache.ways = 8;
c->tlbsize = 64;
break;
}
}
-static inline void cpu_probe_philips(struct cpuinfo_mips *c)
+static inline void cpu_probe_nxp(struct cpuinfo_mips *c, unsigned int cpu)
{
decode_configs(c);
- switch (c->processor_id & 0xff00) {
+ switch (c->processor_id & PRID_IMP_MASK) {
case PRID_IMP_PR4450:
c->cputype = CPU_PR4450;
- c->isa_level = MIPS_CPU_ISA_M32R1;
+ __cpu_name[cpu] = "Philips PR4450";
+ set_isa(c, MIPS_CPU_ISA_M32R1);
+ break;
+ }
+}
+
+static inline void cpu_probe_broadcom(struct cpuinfo_mips *c, unsigned int cpu)
+{
+ decode_configs(c);
+ switch (c->processor_id & PRID_IMP_MASK) {
+ case PRID_IMP_BMIPS32_REV4:
+ case PRID_IMP_BMIPS32_REV8:
+ c->cputype = CPU_BMIPS32;
+ __cpu_name[cpu] = "Broadcom BMIPS32";
+ set_elf_platform(cpu, "bmips32");
+ break;
+ case PRID_IMP_BMIPS3300:
+ case PRID_IMP_BMIPS3300_ALT:
+ case PRID_IMP_BMIPS3300_BUG:
+ c->cputype = CPU_BMIPS3300;
+ __cpu_name[cpu] = "Broadcom BMIPS3300";
+ set_elf_platform(cpu, "bmips3300");
+ break;
+ case PRID_IMP_BMIPS43XX: {
+ int rev = c->processor_id & PRID_REV_MASK;
+
+ if (rev >= PRID_REV_BMIPS4380_LO &&
+ rev <= PRID_REV_BMIPS4380_HI) {
+ c->cputype = CPU_BMIPS4380;
+ __cpu_name[cpu] = "Broadcom BMIPS4380";
+ set_elf_platform(cpu, "bmips4380");
+ } else {
+ c->cputype = CPU_BMIPS4350;
+ __cpu_name[cpu] = "Broadcom BMIPS4350";
+ set_elf_platform(cpu, "bmips4350");
+ }
+ break;
+ }
+ case PRID_IMP_BMIPS5000:
+ c->cputype = CPU_BMIPS5000;
+ __cpu_name[cpu] = "Broadcom BMIPS5000";
+ set_elf_platform(cpu, "bmips5000");
+ c->options |= MIPS_CPU_ULRI;
+ break;
+ }
+}
+
+static inline void cpu_probe_cavium(struct cpuinfo_mips *c, unsigned int cpu)
+{
+ decode_configs(c);
+ switch (c->processor_id & PRID_IMP_MASK) {
+ case PRID_IMP_CAVIUM_CN38XX:
+ case PRID_IMP_CAVIUM_CN31XX:
+ case PRID_IMP_CAVIUM_CN30XX:
+ c->cputype = CPU_CAVIUM_OCTEON;
+ __cpu_name[cpu] = "Cavium Octeon";
+ goto platform;
+ case PRID_IMP_CAVIUM_CN58XX:
+ case PRID_IMP_CAVIUM_CN56XX:
+ case PRID_IMP_CAVIUM_CN50XX:
+ case PRID_IMP_CAVIUM_CN52XX:
+ c->cputype = CPU_CAVIUM_OCTEON_PLUS;
+ __cpu_name[cpu] = "Cavium Octeon+";
+platform:
+ set_elf_platform(cpu, "octeon");
+ break;
+ case PRID_IMP_CAVIUM_CN61XX:
+ case PRID_IMP_CAVIUM_CN63XX:
+ case PRID_IMP_CAVIUM_CN66XX:
+ case PRID_IMP_CAVIUM_CN68XX:
+ case PRID_IMP_CAVIUM_CNF71XX:
+ c->cputype = CPU_CAVIUM_OCTEON2;
+ __cpu_name[cpu] = "Cavium Octeon II";
+ set_elf_platform(cpu, "octeon2");
+ break;
+ case PRID_IMP_CAVIUM_CN70XX:
+ case PRID_IMP_CAVIUM_CN78XX:
+ c->cputype = CPU_CAVIUM_OCTEON3;
+ __cpu_name[cpu] = "Cavium Octeon III";
+ set_elf_platform(cpu, "octeon3");
break;
default:
- panic("Unknown Philips Core!"); /* REVISIT: die? */
+ printk(KERN_INFO "Unknown Octeon chip!\n");
+ c->cputype = CPU_UNKNOWN;
break;
}
}
+static inline void cpu_probe_ingenic(struct cpuinfo_mips *c, unsigned int cpu)
+{
+ decode_configs(c);
+ /* JZRISC does not implement the CP0 counter. */
+ c->options &= ~MIPS_CPU_COUNTER;
+ BUG_ON(!__builtin_constant_p(cpu_has_counter) || cpu_has_counter);
+ switch (c->processor_id & PRID_IMP_MASK) {
+ case PRID_IMP_JZRISC:
+ c->cputype = CPU_JZRISC;
+ __cpu_name[cpu] = "Ingenic JZRISC";
+ break;
+ default:
+ panic("Unknown Ingenic Processor ID!");
+ break;
+ }
+}
+
+static inline void cpu_probe_netlogic(struct cpuinfo_mips *c, int cpu)
+{
+ decode_configs(c);
+
+ if ((c->processor_id & PRID_IMP_MASK) == PRID_IMP_NETLOGIC_AU13XX) {
+ c->cputype = CPU_ALCHEMY;
+ __cpu_name[cpu] = "Au1300";
+ /* following stuff is not for Alchemy */
+ return;
+ }
+
+ c->options = (MIPS_CPU_TLB |
+ MIPS_CPU_4KEX |
+ MIPS_CPU_COUNTER |
+ MIPS_CPU_DIVEC |
+ MIPS_CPU_WATCH |
+ MIPS_CPU_EJTAG |
+ MIPS_CPU_LLSC);
+
+ switch (c->processor_id & PRID_IMP_MASK) {
+ case PRID_IMP_NETLOGIC_XLP2XX:
+ case PRID_IMP_NETLOGIC_XLP9XX:
+ case PRID_IMP_NETLOGIC_XLP5XX:
+ c->cputype = CPU_XLP;
+ __cpu_name[cpu] = "Broadcom XLPII";
+ break;
+
+ case PRID_IMP_NETLOGIC_XLP8XX:
+ case PRID_IMP_NETLOGIC_XLP3XX:
+ c->cputype = CPU_XLP;
+ __cpu_name[cpu] = "Netlogic XLP";
+ break;
+
+ case PRID_IMP_NETLOGIC_XLR732:
+ case PRID_IMP_NETLOGIC_XLR716:
+ case PRID_IMP_NETLOGIC_XLR532:
+ case PRID_IMP_NETLOGIC_XLR308:
+ case PRID_IMP_NETLOGIC_XLR532C:
+ case PRID_IMP_NETLOGIC_XLR516C:
+ case PRID_IMP_NETLOGIC_XLR508C:
+ case PRID_IMP_NETLOGIC_XLR308C:
+ c->cputype = CPU_XLR;
+ __cpu_name[cpu] = "Netlogic XLR";
+ break;
+
+ case PRID_IMP_NETLOGIC_XLS608:
+ case PRID_IMP_NETLOGIC_XLS408:
+ case PRID_IMP_NETLOGIC_XLS404:
+ case PRID_IMP_NETLOGIC_XLS208:
+ case PRID_IMP_NETLOGIC_XLS204:
+ case PRID_IMP_NETLOGIC_XLS108:
+ case PRID_IMP_NETLOGIC_XLS104:
+ case PRID_IMP_NETLOGIC_XLS616B:
+ case PRID_IMP_NETLOGIC_XLS608B:
+ case PRID_IMP_NETLOGIC_XLS416B:
+ case PRID_IMP_NETLOGIC_XLS412B:
+ case PRID_IMP_NETLOGIC_XLS408B:
+ case PRID_IMP_NETLOGIC_XLS404B:
+ c->cputype = CPU_XLR;
+ __cpu_name[cpu] = "Netlogic XLS";
+ break;
+
+ default:
+ pr_info("Unknown Netlogic chip id [%02x]!\n",
+ c->processor_id);
+ c->cputype = CPU_XLR;
+ break;
+ }
-__init void cpu_probe(void)
+ if (c->cputype == CPU_XLP) {
+ set_isa(c, MIPS_CPU_ISA_M64R2);
+ c->options |= (MIPS_CPU_FPU | MIPS_CPU_ULRI | MIPS_CPU_MCHECK);
+ /* This will be updated again after all threads are woken up */
+ c->tlbsize = ((read_c0_config6() >> 16) & 0xffff) + 1;
+ } else {
+ set_isa(c, MIPS_CPU_ISA_M64R1);
+ c->tlbsize = ((read_c0_config1() >> 25) & 0x3f) + 1;
+ }
+ c->kscratch_mask = 0xf;
+}
+
+#ifdef CONFIG_64BIT
+/* For use by uaccess.h */
+u64 __ua_limit;
+EXPORT_SYMBOL(__ua_limit);
+#endif
+
+const char *__cpu_name[NR_CPUS];
+const char *__elf_platform;
+
+void cpu_probe(void)
{
struct cpuinfo_mips *c = &current_cpu_data;
+ unsigned int cpu = smp_processor_id();
- c->processor_id = PRID_IMP_UNKNOWN;
+ c->processor_id = PRID_IMP_UNKNOWN;
c->fpu_id = FPIR_IMP_NONE;
c->cputype = CPU_UNKNOWN;
c->processor_id = read_c0_prid();
- switch (c->processor_id & 0xff0000) {
+ switch (c->processor_id & PRID_COMP_MASK) {
case PRID_COMP_LEGACY:
- cpu_probe_legacy(c);
+ cpu_probe_legacy(c, cpu);
break;
case PRID_COMP_MIPS:
- cpu_probe_mips(c);
+ cpu_probe_mips(c, cpu);
break;
case PRID_COMP_ALCHEMY:
- cpu_probe_alchemy(c);
+ cpu_probe_alchemy(c, cpu);
break;
case PRID_COMP_SIBYTE:
- cpu_probe_sibyte(c);
+ cpu_probe_sibyte(c, cpu);
+ break;
+ case PRID_COMP_BROADCOM:
+ cpu_probe_broadcom(c, cpu);
break;
case PRID_COMP_SANDCRAFT:
- cpu_probe_sandcraft(c);
+ cpu_probe_sandcraft(c, cpu);
break;
- case PRID_COMP_PHILIPS:
- cpu_probe_philips(c);
+ case PRID_COMP_NXP:
+ cpu_probe_nxp(c, cpu);
+ break;
+ case PRID_COMP_CAVIUM:
+ cpu_probe_cavium(c, cpu);
+ break;
+ case PRID_COMP_INGENIC:
+ cpu_probe_ingenic(c, cpu);
+ break;
+ case PRID_COMP_NETLOGIC:
+ cpu_probe_netlogic(c, cpu);
break;
- default:
- c->cputype = CPU_UNKNOWN;
}
+
+ BUG_ON(!__cpu_name[cpu]);
+ BUG_ON(c->cputype == CPU_UNKNOWN);
+
+ /*
+ * Platform code can force the cpu type to optimize code
+ * generation. In that case be sure the cpu type is correctly
+ * manually setup otherwise it could trigger some nasty bugs.
+ */
+ BUG_ON(current_cpu_type() != c->cputype);
+
+ if (mips_fpu_disabled)
+ c->options &= ~MIPS_CPU_FPU;
+
+ if (mips_dsp_disabled)
+ c->ases &= ~(MIPS_ASE_DSP | MIPS_ASE_DSP2P);
+
if (c->options & MIPS_CPU_FPU) {
c->fpu_id = cpu_get_fpu_id();
- if (c->isa_level == MIPS_CPU_ISA_M32R1 ||
- c->isa_level == MIPS_CPU_ISA_M32R2 ||
- c->isa_level == MIPS_CPU_ISA_M64R1 ||
- c->isa_level == MIPS_CPU_ISA_M64R2) {
+ if (c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M32R2 |
+ MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M64R2)) {
if (c->fpu_id & MIPS_FPIR_3D)
c->ases |= MIPS_ASE_MIPS3D;
}
}
+
+ if (cpu_has_mips_r2) {
+ c->srsets = ((read_c0_srsctl() >> 26) & 0x0f) + 1;
+ /* R2 has Performance Counter Interrupt indicator */
+ c->options |= MIPS_CPU_PCI;
+ }
+ else
+ c->srsets = 1;
+
+ if (cpu_has_msa) {
+ c->msa_id = cpu_get_msa_id();
+ WARN(c->msa_id & MSA_IR_WRPF,
+ "Vector register partitioning unimplemented!");
+ }
+
+ cpu_probe_vmbits(c);
+
+#ifdef CONFIG_64BIT
+ if (cpu == 0)
+ __ua_limit = ~((1ull << cpu_vmbits) - 1);
+#endif
}
-__init void cpu_report(void)
+void cpu_report(void)
{
struct cpuinfo_mips *c = &current_cpu_data;
- printk("CPU revision is: %08x\n", c->processor_id);
+ pr_info("CPU%d revision is: %08x (%s)\n",
+ smp_processor_id(), c->processor_id, cpu_name_string());
if (c->options & MIPS_CPU_FPU)
- printk("FPU revision is: %08x\n", c->fpu_id);
+ printk(KERN_INFO "FPU revision is: %08x\n", c->fpu_id);
+ if (cpu_has_msa)
+ pr_info("MSA revision is: %08x\n", c->msa_id);
}
diff --git a/arch/mips/kernel/crash.c b/arch/mips/kernel/crash.c
new file mode 100644
index 00000000000..d21264681e9
--- /dev/null
+++ b/arch/mips/kernel/crash.c
@@ -0,0 +1,70 @@
+#include <linux/kernel.h>
+#include <linux/smp.h>
+#include <linux/reboot.h>
+#include <linux/kexec.h>
+#include <linux/bootmem.h>
+#include <linux/crash_dump.h>
+#include <linux/delay.h>
+#include <linux/irq.h>
+#include <linux/types.h>
+#include <linux/sched.h>
+
+/* This keeps a track of which one is crashing cpu. */
+static int crashing_cpu = -1;
+static cpumask_t cpus_in_crash = CPU_MASK_NONE;
+
+#ifdef CONFIG_SMP
+static void crash_shutdown_secondary(void *ignore)
+{
+ struct pt_regs *regs;
+ int cpu = smp_processor_id();
+
+ regs = task_pt_regs(current);
+
+ if (!cpu_online(cpu))
+ return;
+
+ local_irq_disable();
+ if (!cpu_isset(cpu, cpus_in_crash))
+ crash_save_cpu(regs, cpu);
+ cpu_set(cpu, cpus_in_crash);
+
+ while (!atomic_read(&kexec_ready_to_reboot))
+ cpu_relax();
+ relocated_kexec_smp_wait(NULL);
+ /* NOTREACHED */
+}
+
+static void crash_kexec_prepare_cpus(void)
+{
+ unsigned int msecs;
+
+ unsigned int ncpus = num_online_cpus() - 1;/* Excluding the panic cpu */
+
+ dump_send_ipi(crash_shutdown_secondary);
+ smp_wmb();
+
+ /*
+ * The crash CPU sends an IPI and wait for other CPUs to
+ * respond. Delay of at least 10 seconds.
+ */
+ pr_emerg("Sending IPI to other cpus...\n");
+ msecs = 10000;
+ while ((cpus_weight(cpus_in_crash) < ncpus) && (--msecs > 0)) {
+ cpu_relax();
+ mdelay(1);
+ }
+}
+
+#else /* !defined(CONFIG_SMP) */
+static void crash_kexec_prepare_cpus(void) {}
+#endif /* !defined(CONFIG_SMP) */
+
+void default_machine_crash_shutdown(struct pt_regs *regs)
+{
+ local_irq_disable();
+ crashing_cpu = smp_processor_id();
+ crash_save_cpu(regs, crashing_cpu);
+ crash_kexec_prepare_cpus();
+ cpu_set(crashing_cpu, cpus_in_crash);
+}
diff --git a/arch/mips/kernel/crash_dump.c b/arch/mips/kernel/crash_dump.c
new file mode 100644
index 00000000000..f291cf99b03
--- /dev/null
+++ b/arch/mips/kernel/crash_dump.c
@@ -0,0 +1,66 @@
+#include <linux/highmem.h>
+#include <linux/bootmem.h>
+#include <linux/crash_dump.h>
+#include <asm/uaccess.h>
+#include <linux/slab.h>
+
+static void *kdump_buf_page;
+
+/**
+ * copy_oldmem_page - copy one page from "oldmem"
+ * @pfn: page frame number to be copied
+ * @buf: target memory address for the copy; this can be in kernel address
+ * space or user address space (see @userbuf)
+ * @csize: number of bytes to copy
+ * @offset: offset in bytes into the page (based on pfn) to begin the copy
+ * @userbuf: if set, @buf is in user address space, use copy_to_user(),
+ * otherwise @buf is in kernel address space, use memcpy().
+ *
+ * Copy a page from "oldmem". For this page, there is no pte mapped
+ * in the current kernel.
+ *
+ * Calling copy_to_user() in atomic context is not desirable. Hence first
+ * copying the data to a pre-allocated kernel page and then copying to user
+ * space in non-atomic context.
+ */
+ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
+ size_t csize, unsigned long offset, int userbuf)
+{
+ void *vaddr;
+
+ if (!csize)
+ return 0;
+
+ vaddr = kmap_atomic_pfn(pfn);
+
+ if (!userbuf) {
+ memcpy(buf, (vaddr + offset), csize);
+ kunmap_atomic(vaddr);
+ } else {
+ if (!kdump_buf_page) {
+ pr_warning("Kdump: Kdump buffer page not allocated\n");
+
+ return -EFAULT;
+ }
+ copy_page(kdump_buf_page, vaddr);
+ kunmap_atomic(vaddr);
+ if (copy_to_user(buf, (kdump_buf_page + offset), csize))
+ return -EFAULT;
+ }
+
+ return csize;
+}
+
+static int __init kdump_buf_page_init(void)
+{
+ int ret = 0;
+
+ kdump_buf_page = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!kdump_buf_page) {
+ pr_warning("Kdump: Failed to allocate kdump buffer page\n");
+ ret = -ENOMEM;
+ }
+
+ return ret;
+}
+arch_initcall(kdump_buf_page_init);
diff --git a/arch/mips/kernel/csrc-bcm1480.c b/arch/mips/kernel/csrc-bcm1480.c
new file mode 100644
index 00000000000..468f3eba413
--- /dev/null
+++ b/arch/mips/kernel/csrc-bcm1480.c
@@ -0,0 +1,53 @@
+/*
+ * Copyright (C) 2000,2001,2004 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <linux/clocksource.h>
+
+#include <asm/addrspace.h>
+#include <asm/io.h>
+#include <asm/time.h>
+
+#include <asm/sibyte/bcm1480_regs.h>
+#include <asm/sibyte/sb1250_regs.h>
+#include <asm/sibyte/bcm1480_int.h>
+#include <asm/sibyte/bcm1480_scd.h>
+
+#include <asm/sibyte/sb1250.h>
+
+static cycle_t bcm1480_hpt_read(struct clocksource *cs)
+{
+ return (cycle_t) __raw_readq(IOADDR(A_SCD_ZBBUS_CYCLE_COUNT));
+}
+
+struct clocksource bcm1480_clocksource = {
+ .name = "zbbus-cycles",
+ .rating = 200,
+ .read = bcm1480_hpt_read,
+ .mask = CLOCKSOURCE_MASK(64),
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+};
+
+void __init sb1480_clocksource_init(void)
+{
+ struct clocksource *cs = &bcm1480_clocksource;
+ unsigned int plldiv;
+ unsigned long zbbus;
+
+ plldiv = G_BCM1480_SYS_PLL_DIV(__raw_readq(IOADDR(A_SCD_SYSTEM_CFG)));
+ zbbus = ((plldiv >> 1) * 50000000) + ((plldiv & 1) * 25000000);
+ clocksource_register_hz(cs, zbbus);
+}
diff --git a/arch/mips/kernel/csrc-gic.c b/arch/mips/kernel/csrc-gic.c
new file mode 100644
index 00000000000..e0262090111
--- /dev/null
+++ b/arch/mips/kernel/csrc-gic.c
@@ -0,0 +1,40 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
+ */
+#include <linux/init.h>
+#include <linux/time.h>
+
+#include <asm/gic.h>
+
+static cycle_t gic_hpt_read(struct clocksource *cs)
+{
+ return gic_read_count();
+}
+
+static struct clocksource gic_clocksource = {
+ .name = "GIC",
+ .read = gic_hpt_read,
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+};
+
+void __init gic_clocksource_init(unsigned int frequency)
+{
+ unsigned int config, bits;
+
+ /* Calculate the clocksource mask. */
+ GICREAD(GIC_REG(SHARED, GIC_SH_CONFIG), config);
+ bits = 32 + ((config & GIC_SH_CONFIG_COUNTBITS_MSK) >>
+ (GIC_SH_CONFIG_COUNTBITS_SHF - 2));
+
+ /* Set clocksource mask. */
+ gic_clocksource.mask = CLOCKSOURCE_MASK(bits);
+
+ /* Calculate a somewhat reasonable rating value. */
+ gic_clocksource.rating = 200 + frequency / 10000000;
+
+ clocksource_register_hz(&gic_clocksource, frequency);
+}
diff --git a/arch/mips/kernel/csrc-ioasic.c b/arch/mips/kernel/csrc-ioasic.c
new file mode 100644
index 00000000000..6cbbf6e106b
--- /dev/null
+++ b/arch/mips/kernel/csrc-ioasic.c
@@ -0,0 +1,69 @@
+/*
+ * DEC I/O ASIC's counter clocksource
+ *
+ * Copyright (C) 2008 Yoichi Yuasa <yuasa@linux-mips.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include <linux/clocksource.h>
+#include <linux/init.h>
+
+#include <asm/ds1287.h>
+#include <asm/time.h>
+#include <asm/dec/ioasic.h>
+#include <asm/dec/ioasic_addrs.h>
+
+static cycle_t dec_ioasic_hpt_read(struct clocksource *cs)
+{
+ return ioasic_read(IO_REG_FCTR);
+}
+
+static struct clocksource clocksource_dec = {
+ .name = "dec-ioasic",
+ .read = dec_ioasic_hpt_read,
+ .mask = CLOCKSOURCE_MASK(32),
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+};
+
+int __init dec_ioasic_clocksource_init(void)
+{
+ unsigned int freq;
+ u32 start, end;
+ int i = HZ / 8;
+
+ ds1287_timer_state();
+ while (!ds1287_timer_state())
+ ;
+
+ start = dec_ioasic_hpt_read(&clocksource_dec);
+
+ while (i--)
+ while (!ds1287_timer_state())
+ ;
+
+ end = dec_ioasic_hpt_read(&clocksource_dec);
+
+ freq = (end - start) * 8;
+
+ /* An early revision of the I/O ASIC didn't have the counter. */
+ if (!freq)
+ return -ENXIO;
+
+ printk(KERN_INFO "I/O ASIC clock frequency %dHz\n", freq);
+
+ clocksource_dec.rating = 200 + freq / 10000000;
+ clocksource_register_hz(&clocksource_dec, freq);
+ return 0;
+}
diff --git a/arch/mips/kernel/csrc-r4k.c b/arch/mips/kernel/csrc-r4k.c
new file mode 100644
index 00000000000..decd1fa38d5
--- /dev/null
+++ b/arch/mips/kernel/csrc-r4k.c
@@ -0,0 +1,36 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2007 by Ralf Baechle
+ */
+#include <linux/clocksource.h>
+#include <linux/init.h>
+
+#include <asm/time.h>
+
+static cycle_t c0_hpt_read(struct clocksource *cs)
+{
+ return read_c0_count();
+}
+
+static struct clocksource clocksource_mips = {
+ .name = "MIPS",
+ .read = c0_hpt_read,
+ .mask = CLOCKSOURCE_MASK(32),
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+};
+
+int __init init_r4k_clocksource(void)
+{
+ if (!cpu_has_counter || !mips_hpt_frequency)
+ return -ENXIO;
+
+ /* Calculate a somewhat reasonable rating value */
+ clocksource_mips.rating = 200 + mips_hpt_frequency / 10000000;
+
+ clocksource_register_hz(&clocksource_mips, mips_hpt_frequency);
+
+ return 0;
+}
diff --git a/arch/mips/kernel/csrc-sb1250.c b/arch/mips/kernel/csrc-sb1250.c
new file mode 100644
index 00000000000..6ecb77d8206
--- /dev/null
+++ b/arch/mips/kernel/csrc-sb1250.c
@@ -0,0 +1,69 @@
+/*
+ * Copyright (C) 2000, 2001 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <linux/clocksource.h>
+
+#include <asm/addrspace.h>
+#include <asm/io.h>
+#include <asm/time.h>
+
+#include <asm/sibyte/sb1250.h>
+#include <asm/sibyte/sb1250_regs.h>
+#include <asm/sibyte/sb1250_int.h>
+#include <asm/sibyte/sb1250_scd.h>
+
+#define SB1250_HPT_NUM 3
+#define SB1250_HPT_VALUE M_SCD_TIMER_CNT /* max value */
+
+/*
+ * The HPT is free running from SB1250_HPT_VALUE down to 0 then starts over
+ * again.
+ */
+static cycle_t sb1250_hpt_read(struct clocksource *cs)
+{
+ unsigned int count;
+
+ count = G_SCD_TIMER_CNT(__raw_readq(IOADDR(A_SCD_TIMER_REGISTER(SB1250_HPT_NUM, R_SCD_TIMER_CNT))));
+
+ return SB1250_HPT_VALUE - count;
+}
+
+struct clocksource bcm1250_clocksource = {
+ .name = "bcm1250-counter-3",
+ .rating = 200,
+ .read = sb1250_hpt_read,
+ .mask = CLOCKSOURCE_MASK(23),
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+};
+
+void __init sb1250_clocksource_init(void)
+{
+ struct clocksource *cs = &bcm1250_clocksource;
+
+ /* Setup hpt using timer #3 but do not enable irq for it */
+ __raw_writeq(0,
+ IOADDR(A_SCD_TIMER_REGISTER(SB1250_HPT_NUM,
+ R_SCD_TIMER_CFG)));
+ __raw_writeq(SB1250_HPT_VALUE,
+ IOADDR(A_SCD_TIMER_REGISTER(SB1250_HPT_NUM,
+ R_SCD_TIMER_INIT)));
+ __raw_writeq(M_SCD_TIMER_ENABLE | M_SCD_TIMER_MODE_CONTINUOUS,
+ IOADDR(A_SCD_TIMER_REGISTER(SB1250_HPT_NUM,
+ R_SCD_TIMER_CFG)));
+
+ clocksource_register_hz(cs, V_SCD_TIMER_FREQ);
+}
diff --git a/arch/mips/kernel/dma-no-isa.c b/arch/mips/kernel/dma-no-isa.c
deleted file mode 100644
index 6df8b07741e..00000000000
--- a/arch/mips/kernel/dma-no-isa.c
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2004 by Ralf Baechle
- *
- * Dummy ISA DMA functions for systems that don't have ISA but share drivers
- * with ISA such as legacy free PCI.
- */
-#include <linux/errno.h>
-#include <linux/module.h>
-#include <linux/spinlock.h>
-
-DEFINE_SPINLOCK(dma_spin_lock);
-
-int request_dma(unsigned int dmanr, const char * device_id)
-{
- return -EINVAL;
-}
-
-void free_dma(unsigned int dmanr)
-{
-}
-
-EXPORT_SYMBOL(dma_spin_lock);
-EXPORT_SYMBOL(request_dma);
-EXPORT_SYMBOL(free_dma);
diff --git a/arch/mips/kernel/early_printk.c b/arch/mips/kernel/early_printk.c
new file mode 100644
index 00000000000..505cb77d128
--- /dev/null
+++ b/arch/mips/kernel/early_printk.c
@@ -0,0 +1,43 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2002, 2003, 06, 07 Ralf Baechle (ralf@linux-mips.org)
+ * Copyright (C) 2007 MIPS Technologies, Inc.
+ * written by Ralf Baechle (ralf@linux-mips.org)
+ */
+#include <linux/kernel.h>
+#include <linux/console.h>
+#include <linux/printk.h>
+#include <linux/init.h>
+
+#include <asm/setup.h>
+
+extern void prom_putchar(char);
+
+static void early_console_write(struct console *con, const char *s, unsigned n)
+{
+ while (n-- && *s) {
+ if (*s == '\n')
+ prom_putchar('\r');
+ prom_putchar(*s);
+ s++;
+ }
+}
+
+static struct console early_console_prom = {
+ .name = "early",
+ .write = early_console_write,
+ .flags = CON_PRINTBUFFER | CON_BOOT,
+ .index = -1
+};
+
+void __init setup_early_printk(void)
+{
+ if (early_console)
+ return;
+ early_console = &early_console_prom;
+
+ register_console(&early_console_prom);
+}
diff --git a/arch/mips/kernel/early_printk_8250.c b/arch/mips/kernel/early_printk_8250.c
new file mode 100644
index 00000000000..83cea376755
--- /dev/null
+++ b/arch/mips/kernel/early_printk_8250.c
@@ -0,0 +1,66 @@
+/*
+ * 8250/16550-type serial ports prom_putchar()
+ *
+ * Copyright (C) 2010 Yoichi Yuasa <yuasa@linux-mips.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include <linux/io.h>
+#include <linux/serial_core.h>
+#include <linux/serial_reg.h>
+
+static void __iomem *serial8250_base;
+static unsigned int serial8250_reg_shift;
+static unsigned int serial8250_tx_timeout;
+
+void setup_8250_early_printk_port(unsigned long base, unsigned int reg_shift,
+ unsigned int timeout)
+{
+ serial8250_base = (void __iomem *)base;
+ serial8250_reg_shift = reg_shift;
+ serial8250_tx_timeout = timeout;
+}
+
+static inline u8 serial_in(int offset)
+{
+ return readb(serial8250_base + (offset << serial8250_reg_shift));
+}
+
+static inline void serial_out(int offset, char value)
+{
+ writeb(value, serial8250_base + (offset << serial8250_reg_shift));
+}
+
+void prom_putchar(char c)
+{
+ unsigned int timeout;
+ int status, bits;
+
+ if (!serial8250_base)
+ return;
+
+ timeout = serial8250_tx_timeout;
+ bits = UART_LSR_TEMT | UART_LSR_THRE;
+
+ do {
+ status = serial_in(UART_LSR);
+
+ if (--timeout == 0)
+ break;
+ } while ((status & bits) != bits);
+
+ if (timeout)
+ serial_out(UART_TX, c);
+}
diff --git a/arch/mips/kernel/entry.S b/arch/mips/kernel/entry.S
index 766655f3525..4353d323f01 100644
--- a/arch/mips/kernel/entry.S
+++ b/arch/mips/kernel/entry.S
@@ -16,25 +16,28 @@
#include <asm/isadep.h>
#include <asm/thread_info.h>
#include <asm/war.h>
-#ifdef CONFIG_MIPS_MT_SMTC
-#include <asm/mipsmtregs.h>
-#endif
-#ifdef CONFIG_PREEMPT
- .macro preempt_stop
- .endm
-#else
- .macro preempt_stop
- local_irq_disable
- .endm
+#ifndef CONFIG_PREEMPT
#define resume_kernel restore_all
+#else
+#define __ret_from_irq ret_from_exception
#endif
.text
.align 5
+#ifndef CONFIG_PREEMPT
FEXPORT(ret_from_exception)
- preempt_stop
+ local_irq_disable # preempt stop
+ b __ret_from_irq
+#endif
FEXPORT(ret_from_irq)
+ LONG_S s0, TI_REGS($28)
+FEXPORT(__ret_from_irq)
+/*
+ * We can be coming here from a syscall done in the kernel space,
+ * e.g. a failed kernel_execve().
+ */
+resume_userspace_check:
LONG_L t0, PT_STATUS(sp) # returning to kernel mode?
andi t0, t0, KU_USER
beqz t0, resume_kernel
@@ -64,6 +67,12 @@ need_resched:
b need_resched
#endif
+FEXPORT(ret_from_kernel_thread)
+ jal schedule_tail # a0 = struct task_struct *prev
+ move a0, s1
+ jal s0
+ j syscall_exit
+
FEXPORT(ret_from_fork)
jal schedule_tail # a0 = struct task_struct *prev
@@ -76,49 +85,22 @@ FEXPORT(syscall_exit)
and t0, a2, t0
bnez t0, syscall_exit_work
-FEXPORT(restore_all) # restore full frame
-#ifdef CONFIG_MIPS_MT_SMTC
-/* Detect and execute deferred IPI "interrupts" */
- move a0,sp
- jal deferred_smtc_ipi
-/* Re-arm any temporarily masked interrupts not explicitly "acked" */
- mfc0 v0, CP0_TCSTATUS
- ori v1, v0, TCSTATUS_IXMT
- mtc0 v1, CP0_TCSTATUS
- andi v0, TCSTATUS_IXMT
- _ehb
- mfc0 t0, CP0_TCCONTEXT
- DMT 9 # dmt t1
- jal mips_ihb
- mfc0 t2, CP0_STATUS
- andi t3, t0, 0xff00
- or t2, t2, t3
- mtc0 t2, CP0_STATUS
- _ehb
- andi t1, t1, VPECONTROL_TE
- beqz t1, 1f
- EMT
-1:
- mfc0 v1, CP0_TCSTATUS
- /* We set IXMT above, XOR should clear it here */
- xori v1, v1, TCSTATUS_IXMT
- or v1, v0, v1
- mtc0 v1, CP0_TCSTATUS
- _ehb
- xor t0, t0, t3
- mtc0 t0, CP0_TCCONTEXT
-#endif /* CONFIG_MIPS_MT_SMTC */
+restore_all: # restore full frame
.set noat
RESTORE_TEMP
RESTORE_AT
RESTORE_STATIC
-FEXPORT(restore_partial) # restore partial frame
+restore_partial: # restore partial frame
#ifdef CONFIG_TRACE_IRQFLAGS
SAVE_STATIC
SAVE_AT
SAVE_TEMP
LONG_L v0, PT_STATUS(sp)
- and v0, 1
+#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
+ and v0, ST0_IEP
+#else
+ and v0, ST0_IE
+#endif
beqz v0, 1f
jal trace_hardirqs_on
b 2f
@@ -153,19 +135,27 @@ work_notifysig: # deal with pending signals and
move a0, sp
li a1, 0
jal do_notify_resume # a2 already loaded
- j resume_userspace
+ j resume_userspace_check
-FEXPORT(syscall_exit_work_partial)
+FEXPORT(syscall_exit_partial)
+ local_irq_disable # make sure need_resched doesn't
+ # change between and return
+ LONG_L a2, TI_FLAGS($28) # current->work
+ li t0, _TIF_ALLWORK_MASK
+ and t0, a2
+ beqz t0, restore_partial
SAVE_STATIC
syscall_exit_work:
- li t0, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
+ LONG_L t0, PT_STATUS(sp) # returning to kernel mode?
+ andi t0, t0, KU_USER
+ beqz t0, resume_kernel
+ li t0, _TIF_WORK_SYSCALL_EXIT
and t0, a2 # a2 is preloaded with TI_FLAGS
beqz t0, work_pending # trace bit set?
- local_irq_enable # could let do_syscall_trace()
+ local_irq_enable # could let syscall_trace_leave()
# call schedule() instead
move a0, sp
- li a1, 1
- jal do_syscall_trace
+ jal syscall_trace_leave
b resume_userspace
#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_MIPS_MT)
diff --git a/arch/mips/kernel/ftrace.c b/arch/mips/kernel/ftrace.c
new file mode 100644
index 00000000000..60e7e5e45af
--- /dev/null
+++ b/arch/mips/kernel/ftrace.c
@@ -0,0 +1,399 @@
+/*
+ * Code for replacing ftrace calls with jumps.
+ *
+ * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
+ * Copyright (C) 2009, 2010 DSLab, Lanzhou University, China
+ * Author: Wu Zhangjin <wuzhangjin@gmail.com>
+ *
+ * Thanks goes to Steven Rostedt for writing the original x86 version.
+ */
+
+#include <linux/uaccess.h>
+#include <linux/init.h>
+#include <linux/ftrace.h>
+#include <linux/syscalls.h>
+
+#include <asm/asm.h>
+#include <asm/asm-offsets.h>
+#include <asm/cacheflush.h>
+#include <asm/syscall.h>
+#include <asm/uasm.h>
+#include <asm/unistd.h>
+
+#include <asm-generic/sections.h>
+
+#if defined(KBUILD_MCOUNT_RA_ADDRESS) && defined(CONFIG_32BIT)
+#define MCOUNT_OFFSET_INSNS 5
+#else
+#define MCOUNT_OFFSET_INSNS 4
+#endif
+
+#ifdef CONFIG_DYNAMIC_FTRACE
+
+/* Arch override because MIPS doesn't need to run this from stop_machine() */
+void arch_ftrace_update_code(int command)
+{
+ ftrace_modify_all_code(command);
+}
+
+#endif
+
+/*
+ * Check if the address is in kernel space
+ *
+ * Clone core_kernel_text() from kernel/extable.c, but doesn't call
+ * init_kernel_text() for Ftrace doesn't trace functions in init sections.
+ */
+static inline int in_kernel_space(unsigned long ip)
+{
+ if (ip >= (unsigned long)_stext &&
+ ip <= (unsigned long)_etext)
+ return 1;
+ return 0;
+}
+
+#ifdef CONFIG_DYNAMIC_FTRACE
+
+#define JAL 0x0c000000 /* jump & link: ip --> ra, jump to target */
+#define ADDR_MASK 0x03ffffff /* op_code|addr : 31...26|25 ....0 */
+#define JUMP_RANGE_MASK ((1UL << 28) - 1)
+
+#define INSN_NOP 0x00000000 /* nop */
+#define INSN_JAL(addr) \
+ ((unsigned int)(JAL | (((addr) >> 2) & ADDR_MASK)))
+
+static unsigned int insn_jal_ftrace_caller __read_mostly;
+static unsigned int insn_lui_v1_hi16_mcount __read_mostly;
+static unsigned int insn_j_ftrace_graph_caller __maybe_unused __read_mostly;
+
+static inline void ftrace_dyn_arch_init_insns(void)
+{
+ u32 *buf;
+ unsigned int v1;
+
+ /* lui v1, hi16_mcount */
+ v1 = 3;
+ buf = (u32 *)&insn_lui_v1_hi16_mcount;
+ UASM_i_LA_mostly(&buf, v1, MCOUNT_ADDR);
+
+ /* jal (ftrace_caller + 8), jump over the first two instruction */
+ buf = (u32 *)&insn_jal_ftrace_caller;
+ uasm_i_jal(&buf, (FTRACE_ADDR + 8) & JUMP_RANGE_MASK);
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ /* j ftrace_graph_caller */
+ buf = (u32 *)&insn_j_ftrace_graph_caller;
+ uasm_i_j(&buf, (unsigned long)ftrace_graph_caller & JUMP_RANGE_MASK);
+#endif
+}
+
+static int ftrace_modify_code(unsigned long ip, unsigned int new_code)
+{
+ int faulted;
+ mm_segment_t old_fs;
+
+ /* *(unsigned int *)ip = new_code; */
+ safe_store_code(new_code, ip, faulted);
+
+ if (unlikely(faulted))
+ return -EFAULT;
+
+ old_fs = get_fs();
+ set_fs(get_ds());
+ flush_icache_range(ip, ip + 8);
+ set_fs(old_fs);
+
+ return 0;
+}
+
+#ifndef CONFIG_64BIT
+static int ftrace_modify_code_2(unsigned long ip, unsigned int new_code1,
+ unsigned int new_code2)
+{
+ int faulted;
+
+ safe_store_code(new_code1, ip, faulted);
+ if (unlikely(faulted))
+ return -EFAULT;
+ safe_store_code(new_code2, ip + 4, faulted);
+ if (unlikely(faulted))
+ return -EFAULT;
+ flush_icache_range(ip, ip + 8);
+ return 0;
+}
+#endif
+
+/*
+ * The details about the calling site of mcount on MIPS
+ *
+ * 1. For kernel:
+ *
+ * move at, ra
+ * jal _mcount --> nop
+ *
+ * 2. For modules:
+ *
+ * 2.1 For KBUILD_MCOUNT_RA_ADDRESS and CONFIG_32BIT
+ *
+ * lui v1, hi_16bit_of_mcount --> b 1f (0x10000005)
+ * addiu v1, v1, low_16bit_of_mcount
+ * move at, ra
+ * move $12, ra_address
+ * jalr v1
+ * sub sp, sp, 8
+ * 1: offset = 5 instructions
+ * 2.2 For the Other situations
+ *
+ * lui v1, hi_16bit_of_mcount --> b 1f (0x10000004)
+ * addiu v1, v1, low_16bit_of_mcount
+ * move at, ra
+ * jalr v1
+ * nop | move $12, ra_address | sub sp, sp, 8
+ * 1: offset = 4 instructions
+ */
+
+#define INSN_B_1F (0x10000000 | MCOUNT_OFFSET_INSNS)
+
+int ftrace_make_nop(struct module *mod,
+ struct dyn_ftrace *rec, unsigned long addr)
+{
+ unsigned int new;
+ unsigned long ip = rec->ip;
+
+ /*
+ * If ip is in kernel space, no long call, otherwise, long call is
+ * needed.
+ */
+ new = in_kernel_space(ip) ? INSN_NOP : INSN_B_1F;
+#ifdef CONFIG_64BIT
+ return ftrace_modify_code(ip, new);
+#else
+ /*
+ * On 32 bit MIPS platforms, gcc adds a stack adjust
+ * instruction in the delay slot after the branch to
+ * mcount and expects mcount to restore the sp on return.
+ * This is based on a legacy API and does nothing but
+ * waste instructions so it's being removed at runtime.
+ */
+ return ftrace_modify_code_2(ip, new, INSN_NOP);
+#endif
+}
+
+int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
+{
+ unsigned int new;
+ unsigned long ip = rec->ip;
+
+ new = in_kernel_space(ip) ? insn_jal_ftrace_caller :
+ insn_lui_v1_hi16_mcount;
+
+ return ftrace_modify_code(ip, new);
+}
+
+#define FTRACE_CALL_IP ((unsigned long)(&ftrace_call))
+
+int ftrace_update_ftrace_func(ftrace_func_t func)
+{
+ unsigned int new;
+
+ new = INSN_JAL((unsigned long)func);
+
+ return ftrace_modify_code(FTRACE_CALL_IP, new);
+}
+
+int __init ftrace_dyn_arch_init(void)
+{
+ /* Encode the instructions when booting */
+ ftrace_dyn_arch_init_insns();
+
+ /* Remove "b ftrace_stub" to ensure ftrace_caller() is executed */
+ ftrace_modify_code(MCOUNT_ADDR, INSN_NOP);
+
+ return 0;
+}
+#endif /* CONFIG_DYNAMIC_FTRACE */
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+
+#ifdef CONFIG_DYNAMIC_FTRACE
+
+extern void ftrace_graph_call(void);
+#define FTRACE_GRAPH_CALL_IP ((unsigned long)(&ftrace_graph_call))
+
+int ftrace_enable_ftrace_graph_caller(void)
+{
+ return ftrace_modify_code(FTRACE_GRAPH_CALL_IP,
+ insn_j_ftrace_graph_caller);
+}
+
+int ftrace_disable_ftrace_graph_caller(void)
+{
+ return ftrace_modify_code(FTRACE_GRAPH_CALL_IP, INSN_NOP);
+}
+
+#endif /* CONFIG_DYNAMIC_FTRACE */
+
+#ifndef KBUILD_MCOUNT_RA_ADDRESS
+
+#define S_RA_SP (0xafbf << 16) /* s{d,w} ra, offset(sp) */
+#define S_R_SP (0xafb0 << 16) /* s{d,w} R, offset(sp) */
+#define OFFSET_MASK 0xffff /* stack offset range: 0 ~ PT_SIZE */
+
+unsigned long ftrace_get_parent_ra_addr(unsigned long self_ra, unsigned long
+ old_parent_ra, unsigned long parent_ra_addr, unsigned long fp)
+{
+ unsigned long sp, ip, tmp;
+ unsigned int code;
+ int faulted;
+
+ /*
+ * For module, move the ip from the return address after the
+ * instruction "lui v1, hi_16bit_of_mcount"(offset is 24), but for
+ * kernel, move after the instruction "move ra, at"(offset is 16)
+ */
+ ip = self_ra - (in_kernel_space(self_ra) ? 16 : 24);
+
+ /*
+ * search the text until finding the non-store instruction or "s{d,w}
+ * ra, offset(sp)" instruction
+ */
+ do {
+ /* get the code at "ip": code = *(unsigned int *)ip; */
+ safe_load_code(code, ip, faulted);
+
+ if (unlikely(faulted))
+ return 0;
+ /*
+ * If we hit the non-store instruction before finding where the
+ * ra is stored, then this is a leaf function and it does not
+ * store the ra on the stack
+ */
+ if ((code & S_R_SP) != S_R_SP)
+ return parent_ra_addr;
+
+ /* Move to the next instruction */
+ ip -= 4;
+ } while ((code & S_RA_SP) != S_RA_SP);
+
+ sp = fp + (code & OFFSET_MASK);
+
+ /* tmp = *(unsigned long *)sp; */
+ safe_load_stack(tmp, sp, faulted);
+ if (unlikely(faulted))
+ return 0;
+
+ if (tmp == old_parent_ra)
+ return sp;
+ return 0;
+}
+
+#endif /* !KBUILD_MCOUNT_RA_ADDRESS */
+
+/*
+ * Hook the return address and push it in the stack of return addrs
+ * in current thread info.
+ */
+void prepare_ftrace_return(unsigned long *parent_ra_addr, unsigned long self_ra,
+ unsigned long fp)
+{
+ unsigned long old_parent_ra;
+ struct ftrace_graph_ent trace;
+ unsigned long return_hooker = (unsigned long)
+ &return_to_handler;
+ int faulted, insns;
+
+ if (unlikely(atomic_read(&current->tracing_graph_pause)))
+ return;
+
+ /*
+ * "parent_ra_addr" is the stack address saved the return address of
+ * the caller of _mcount.
+ *
+ * if the gcc < 4.5, a leaf function does not save the return address
+ * in the stack address, so, we "emulate" one in _mcount's stack space,
+ * and hijack it directly, but for a non-leaf function, it save the
+ * return address to the its own stack space, we can not hijack it
+ * directly, but need to find the real stack address,
+ * ftrace_get_parent_addr() does it!
+ *
+ * if gcc>= 4.5, with the new -mmcount-ra-address option, for a
+ * non-leaf function, the location of the return address will be saved
+ * to $12 for us, and for a leaf function, only put a zero into $12. we
+ * do it in ftrace_graph_caller of mcount.S.
+ */
+
+ /* old_parent_ra = *parent_ra_addr; */
+ safe_load_stack(old_parent_ra, parent_ra_addr, faulted);
+ if (unlikely(faulted))
+ goto out;
+#ifndef KBUILD_MCOUNT_RA_ADDRESS
+ parent_ra_addr = (unsigned long *)ftrace_get_parent_ra_addr(self_ra,
+ old_parent_ra, (unsigned long)parent_ra_addr, fp);
+ /*
+ * If fails when getting the stack address of the non-leaf function's
+ * ra, stop function graph tracer and return
+ */
+ if (parent_ra_addr == 0)
+ goto out;
+#endif
+ /* *parent_ra_addr = return_hooker; */
+ safe_store_stack(return_hooker, parent_ra_addr, faulted);
+ if (unlikely(faulted))
+ goto out;
+
+ if (ftrace_push_return_trace(old_parent_ra, self_ra, &trace.depth, fp)
+ == -EBUSY) {
+ *parent_ra_addr = old_parent_ra;
+ return;
+ }
+
+ /*
+ * Get the recorded ip of the current mcount calling site in the
+ * __mcount_loc section, which will be used to filter the function
+ * entries configured through the tracing/set_graph_function interface.
+ */
+
+ insns = in_kernel_space(self_ra) ? 2 : MCOUNT_OFFSET_INSNS + 1;
+ trace.func = self_ra - (MCOUNT_INSN_SIZE * insns);
+
+ /* Only trace if the calling function expects to */
+ if (!ftrace_graph_entry(&trace)) {
+ current->curr_ret_stack--;
+ *parent_ra_addr = old_parent_ra;
+ }
+ return;
+out:
+ ftrace_graph_stop();
+ WARN_ON(1);
+}
+#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
+
+#ifdef CONFIG_FTRACE_SYSCALLS
+
+#ifdef CONFIG_32BIT
+unsigned long __init arch_syscall_addr(int nr)
+{
+ return (unsigned long)sys_call_table[nr - __NR_O32_Linux];
+}
+#endif
+
+#ifdef CONFIG_64BIT
+
+unsigned long __init arch_syscall_addr(int nr)
+{
+#ifdef CONFIG_MIPS32_N32
+ if (nr >= __NR_N32_Linux && nr <= __NR_N32_Linux + __NR_N32_Linux_syscalls)
+ return (unsigned long)sysn32_call_table[nr - __NR_N32_Linux];
+#endif
+ if (nr >= __NR_64_Linux && nr <= __NR_64_Linux + __NR_64_Linux_syscalls)
+ return (unsigned long)sys_call_table[nr - __NR_64_Linux];
+#ifdef CONFIG_MIPS32_O32
+ if (nr >= __NR_O32_Linux && nr <= __NR_O32_Linux + __NR_O32_Linux_syscalls)
+ return (unsigned long)sys32_call_table[nr - __NR_O32_Linux];
+#endif
+
+ return (unsigned long) &sys_ni_syscall;
+}
+#endif
+
+#endif /* CONFIG_FTRACE_SYSCALLS */
diff --git a/arch/mips/kernel/gdb-low.S b/arch/mips/kernel/gdb-low.S
deleted file mode 100644
index 2c446063636..00000000000
--- a/arch/mips/kernel/gdb-low.S
+++ /dev/null
@@ -1,394 +0,0 @@
-/*
- * gdb-low.S contains the low-level trap handler for the GDB stub.
- *
- * Copyright (C) 1995 Andreas Busse
- */
-#include <linux/sys.h>
-
-#include <asm/asm.h>
-#include <asm/errno.h>
-#include <asm/irqflags.h>
-#include <asm/mipsregs.h>
-#include <asm/regdef.h>
-#include <asm/stackframe.h>
-#include <asm/gdb-stub.h>
-
-#ifdef CONFIG_32BIT
-#define DMFC0 mfc0
-#define DMTC0 mtc0
-#define LDC1 lwc1
-#define SDC1 lwc1
-#endif
-#ifdef CONFIG_64BIT
-#define DMFC0 dmfc0
-#define DMTC0 dmtc0
-#define LDC1 ldc1
-#define SDC1 ldc1
-#endif
-
-/*
- * [jsun] We reserves about 2x GDB_FR_SIZE in stack. The lower (addressed)
- * part is used to store registers and passed to exception handler.
- * The upper part is reserved for "call func" feature where gdb client
- * saves some of the regs, setups call frame and passes args.
- *
- * A trace shows about 200 bytes are used to store about half of all regs.
- * The rest should be big enough for frame setup and passing args.
- */
-
-/*
- * The low level trap handler
- */
- .align 5
- NESTED(trap_low, GDB_FR_SIZE, sp)
- .set noat
- .set noreorder
-
- mfc0 k0, CP0_STATUS
- sll k0, 3 /* extract cu0 bit */
- bltz k0, 1f
- move k1, sp
-
- /*
- * Called from user mode, go somewhere else.
- */
- mfc0 k0, CP0_CAUSE
- andi k0, k0, 0x7c
-#ifdef CONFIG_64BIT
- dsll k0, k0, 1
-#endif
- PTR_L k1, saved_vectors(k0)
- jr k1
- nop
-1:
- move k0, sp
- PTR_SUBU sp, k1, GDB_FR_SIZE*2 # see comment above
- LONG_S k0, GDB_FR_REG29(sp)
- LONG_S $2, GDB_FR_REG2(sp)
-
-/*
- * First save the CP0 and special registers
- */
-
- mfc0 v0, CP0_STATUS
- LONG_S v0, GDB_FR_STATUS(sp)
- mfc0 v0, CP0_CAUSE
- LONG_S v0, GDB_FR_CAUSE(sp)
- DMFC0 v0, CP0_EPC
- LONG_S v0, GDB_FR_EPC(sp)
- DMFC0 v0, CP0_BADVADDR
- LONG_S v0, GDB_FR_BADVADDR(sp)
- mfhi v0
- LONG_S v0, GDB_FR_HI(sp)
- mflo v0
- LONG_S v0, GDB_FR_LO(sp)
-
-/*
- * Now the integer registers
- */
-
- LONG_S zero, GDB_FR_REG0(sp) /* I know... */
- LONG_S $1, GDB_FR_REG1(sp)
- /* v0 already saved */
- LONG_S $3, GDB_FR_REG3(sp)
- LONG_S $4, GDB_FR_REG4(sp)
- LONG_S $5, GDB_FR_REG5(sp)
- LONG_S $6, GDB_FR_REG6(sp)
- LONG_S $7, GDB_FR_REG7(sp)
- LONG_S $8, GDB_FR_REG8(sp)
- LONG_S $9, GDB_FR_REG9(sp)
- LONG_S $10, GDB_FR_REG10(sp)
- LONG_S $11, GDB_FR_REG11(sp)
- LONG_S $12, GDB_FR_REG12(sp)
- LONG_S $13, GDB_FR_REG13(sp)
- LONG_S $14, GDB_FR_REG14(sp)
- LONG_S $15, GDB_FR_REG15(sp)
- LONG_S $16, GDB_FR_REG16(sp)
- LONG_S $17, GDB_FR_REG17(sp)
- LONG_S $18, GDB_FR_REG18(sp)
- LONG_S $19, GDB_FR_REG19(sp)
- LONG_S $20, GDB_FR_REG20(sp)
- LONG_S $21, GDB_FR_REG21(sp)
- LONG_S $22, GDB_FR_REG22(sp)
- LONG_S $23, GDB_FR_REG23(sp)
- LONG_S $24, GDB_FR_REG24(sp)
- LONG_S $25, GDB_FR_REG25(sp)
- LONG_S $26, GDB_FR_REG26(sp)
- LONG_S $27, GDB_FR_REG27(sp)
- LONG_S $28, GDB_FR_REG28(sp)
- /* sp already saved */
- LONG_S $30, GDB_FR_REG30(sp)
- LONG_S $31, GDB_FR_REG31(sp)
-
- CLI /* disable interrupts */
- TRACE_IRQS_OFF
-
-/*
- * Followed by the floating point registers
- */
- mfc0 v0, CP0_STATUS /* FPU enabled? */
- srl v0, v0, 16
- andi v0, v0, (ST0_CU1 >> 16)
-
- beqz v0,2f /* disabled, skip */
- nop
-
- SDC1 $0, GDB_FR_FPR0(sp)
- SDC1 $1, GDB_FR_FPR1(sp)
- SDC1 $2, GDB_FR_FPR2(sp)
- SDC1 $3, GDB_FR_FPR3(sp)
- SDC1 $4, GDB_FR_FPR4(sp)
- SDC1 $5, GDB_FR_FPR5(sp)
- SDC1 $6, GDB_FR_FPR6(sp)
- SDC1 $7, GDB_FR_FPR7(sp)
- SDC1 $8, GDB_FR_FPR8(sp)
- SDC1 $9, GDB_FR_FPR9(sp)
- SDC1 $10, GDB_FR_FPR10(sp)
- SDC1 $11, GDB_FR_FPR11(sp)
- SDC1 $12, GDB_FR_FPR12(sp)
- SDC1 $13, GDB_FR_FPR13(sp)
- SDC1 $14, GDB_FR_FPR14(sp)
- SDC1 $15, GDB_FR_FPR15(sp)
- SDC1 $16, GDB_FR_FPR16(sp)
- SDC1 $17, GDB_FR_FPR17(sp)
- SDC1 $18, GDB_FR_FPR18(sp)
- SDC1 $19, GDB_FR_FPR19(sp)
- SDC1 $20, GDB_FR_FPR20(sp)
- SDC1 $21, GDB_FR_FPR21(sp)
- SDC1 $22, GDB_FR_FPR22(sp)
- SDC1 $23, GDB_FR_FPR23(sp)
- SDC1 $24, GDB_FR_FPR24(sp)
- SDC1 $25, GDB_FR_FPR25(sp)
- SDC1 $26, GDB_FR_FPR26(sp)
- SDC1 $27, GDB_FR_FPR27(sp)
- SDC1 $28, GDB_FR_FPR28(sp)
- SDC1 $29, GDB_FR_FPR29(sp)
- SDC1 $30, GDB_FR_FPR30(sp)
- SDC1 $31, GDB_FR_FPR31(sp)
-
-/*
- * FPU control registers
- */
-
- cfc1 v0, CP1_STATUS
- LONG_S v0, GDB_FR_FSR(sp)
- cfc1 v0, CP1_REVISION
- LONG_S v0, GDB_FR_FIR(sp)
-
-/*
- * Current stack frame ptr
- */
-
-2:
- LONG_S sp, GDB_FR_FRP(sp)
-
-/*
- * CP0 registers (R4000/R4400 unused registers skipped)
- */
-
- mfc0 v0, CP0_INDEX
- LONG_S v0, GDB_FR_CP0_INDEX(sp)
- mfc0 v0, CP0_RANDOM
- LONG_S v0, GDB_FR_CP0_RANDOM(sp)
- DMFC0 v0, CP0_ENTRYLO0
- LONG_S v0, GDB_FR_CP0_ENTRYLO0(sp)
- DMFC0 v0, CP0_ENTRYLO1
- LONG_S v0, GDB_FR_CP0_ENTRYLO1(sp)
- DMFC0 v0, CP0_CONTEXT
- LONG_S v0, GDB_FR_CP0_CONTEXT(sp)
- mfc0 v0, CP0_PAGEMASK
- LONG_S v0, GDB_FR_CP0_PAGEMASK(sp)
- mfc0 v0, CP0_WIRED
- LONG_S v0, GDB_FR_CP0_WIRED(sp)
- DMFC0 v0, CP0_ENTRYHI
- LONG_S v0, GDB_FR_CP0_ENTRYHI(sp)
- mfc0 v0, CP0_PRID
- LONG_S v0, GDB_FR_CP0_PRID(sp)
-
- .set at
-
-/*
- * Continue with the higher level handler
- */
-
- move a0,sp
-
- jal handle_exception
- nop
-
-/*
- * Restore all writable registers, in reverse order
- */
-
- .set noat
-
- LONG_L v0, GDB_FR_CP0_ENTRYHI(sp)
- LONG_L v1, GDB_FR_CP0_WIRED(sp)
- DMTC0 v0, CP0_ENTRYHI
- mtc0 v1, CP0_WIRED
- LONG_L v0, GDB_FR_CP0_PAGEMASK(sp)
- LONG_L v1, GDB_FR_CP0_ENTRYLO1(sp)
- mtc0 v0, CP0_PAGEMASK
- DMTC0 v1, CP0_ENTRYLO1
- LONG_L v0, GDB_FR_CP0_ENTRYLO0(sp)
- LONG_L v1, GDB_FR_CP0_INDEX(sp)
- DMTC0 v0, CP0_ENTRYLO0
- LONG_L v0, GDB_FR_CP0_CONTEXT(sp)
- mtc0 v1, CP0_INDEX
- DMTC0 v0, CP0_CONTEXT
-
-
-/*
- * Next, the floating point registers
- */
- mfc0 v0, CP0_STATUS /* check if the FPU is enabled */
- srl v0, v0, 16
- andi v0, v0, (ST0_CU1 >> 16)
-
- beqz v0, 3f /* disabled, skip */
- nop
-
- LDC1 $31, GDB_FR_FPR31(sp)
- LDC1 $30, GDB_FR_FPR30(sp)
- LDC1 $29, GDB_FR_FPR29(sp)
- LDC1 $28, GDB_FR_FPR28(sp)
- LDC1 $27, GDB_FR_FPR27(sp)
- LDC1 $26, GDB_FR_FPR26(sp)
- LDC1 $25, GDB_FR_FPR25(sp)
- LDC1 $24, GDB_FR_FPR24(sp)
- LDC1 $23, GDB_FR_FPR23(sp)
- LDC1 $22, GDB_FR_FPR22(sp)
- LDC1 $21, GDB_FR_FPR21(sp)
- LDC1 $20, GDB_FR_FPR20(sp)
- LDC1 $19, GDB_FR_FPR19(sp)
- LDC1 $18, GDB_FR_FPR18(sp)
- LDC1 $17, GDB_FR_FPR17(sp)
- LDC1 $16, GDB_FR_FPR16(sp)
- LDC1 $15, GDB_FR_FPR15(sp)
- LDC1 $14, GDB_FR_FPR14(sp)
- LDC1 $13, GDB_FR_FPR13(sp)
- LDC1 $12, GDB_FR_FPR12(sp)
- LDC1 $11, GDB_FR_FPR11(sp)
- LDC1 $10, GDB_FR_FPR10(sp)
- LDC1 $9, GDB_FR_FPR9(sp)
- LDC1 $8, GDB_FR_FPR8(sp)
- LDC1 $7, GDB_FR_FPR7(sp)
- LDC1 $6, GDB_FR_FPR6(sp)
- LDC1 $5, GDB_FR_FPR5(sp)
- LDC1 $4, GDB_FR_FPR4(sp)
- LDC1 $3, GDB_FR_FPR3(sp)
- LDC1 $2, GDB_FR_FPR2(sp)
- LDC1 $1, GDB_FR_FPR1(sp)
- LDC1 $0, GDB_FR_FPR0(sp)
-
-/*
- * Now the CP0 and integer registers
- */
-
-3:
-#ifdef CONFIG_MIPS_MT_SMTC
- /* Read-modify write of Status must be atomic */
- mfc0 t2, CP0_TCSTATUS
- ori t1, t2, TCSTATUS_IXMT
- mtc0 t1, CP0_TCSTATUS
- andi t2, t2, TCSTATUS_IXMT
- _ehb
- DMT 9 # dmt t1
- jal mips_ihb
- nop
-#endif /* CONFIG_MIPS_MT_SMTC */
- mfc0 t0, CP0_STATUS
- ori t0, 0x1f
- xori t0, 0x1f
- mtc0 t0, CP0_STATUS
-#ifdef CONFIG_MIPS_MT_SMTC
- andi t1, t1, VPECONTROL_TE
- beqz t1, 9f
- nop
- EMT # emt
-9:
- mfc0 t1, CP0_TCSTATUS
- xori t1, t1, TCSTATUS_IXMT
- or t1, t1, t2
- mtc0 t1, CP0_TCSTATUS
- _ehb
-#endif /* CONFIG_MIPS_MT_SMTC */
- LONG_L v0, GDB_FR_STATUS(sp)
- LONG_L v1, GDB_FR_EPC(sp)
- mtc0 v0, CP0_STATUS
- DMTC0 v1, CP0_EPC
- LONG_L v0, GDB_FR_HI(sp)
- LONG_L v1, GDB_FR_LO(sp)
- mthi v0
- mtlo v1
- LONG_L $31, GDB_FR_REG31(sp)
- LONG_L $30, GDB_FR_REG30(sp)
- LONG_L $28, GDB_FR_REG28(sp)
- LONG_L $27, GDB_FR_REG27(sp)
- LONG_L $26, GDB_FR_REG26(sp)
- LONG_L $25, GDB_FR_REG25(sp)
- LONG_L $24, GDB_FR_REG24(sp)
- LONG_L $23, GDB_FR_REG23(sp)
- LONG_L $22, GDB_FR_REG22(sp)
- LONG_L $21, GDB_FR_REG21(sp)
- LONG_L $20, GDB_FR_REG20(sp)
- LONG_L $19, GDB_FR_REG19(sp)
- LONG_L $18, GDB_FR_REG18(sp)
- LONG_L $17, GDB_FR_REG17(sp)
- LONG_L $16, GDB_FR_REG16(sp)
- LONG_L $15, GDB_FR_REG15(sp)
- LONG_L $14, GDB_FR_REG14(sp)
- LONG_L $13, GDB_FR_REG13(sp)
- LONG_L $12, GDB_FR_REG12(sp)
- LONG_L $11, GDB_FR_REG11(sp)
- LONG_L $10, GDB_FR_REG10(sp)
- LONG_L $9, GDB_FR_REG9(sp)
- LONG_L $8, GDB_FR_REG8(sp)
- LONG_L $7, GDB_FR_REG7(sp)
- LONG_L $6, GDB_FR_REG6(sp)
- LONG_L $5, GDB_FR_REG5(sp)
- LONG_L $4, GDB_FR_REG4(sp)
- LONG_L $3, GDB_FR_REG3(sp)
- LONG_L $2, GDB_FR_REG2(sp)
- LONG_L $1, GDB_FR_REG1(sp)
-#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
- LONG_L k0, GDB_FR_EPC(sp)
- LONG_L $29, GDB_FR_REG29(sp) /* Deallocate stack */
- jr k0
- rfe
-#else
- LONG_L sp, GDB_FR_REG29(sp) /* Deallocate stack */
-
- .set mips3
- eret
- .set mips0
-#endif
- .set at
- .set reorder
- END(trap_low)
-
-LEAF(kgdb_read_byte)
-4: lb t0, (a0)
- sb t0, (a1)
- li v0, 0
- jr ra
- .section __ex_table,"a"
- PTR 4b, kgdbfault
- .previous
- END(kgdb_read_byte)
-
-LEAF(kgdb_write_byte)
-5: sb a0, (a1)
- li v0, 0
- jr ra
- .section __ex_table,"a"
- PTR 5b, kgdbfault
- .previous
- END(kgdb_write_byte)
-
- .type kgdbfault@function
- .ent kgdbfault
-
-kgdbfault: li v0, -EFAULT
- jr ra
- .end kgdbfault
diff --git a/arch/mips/kernel/gdb-stub.c b/arch/mips/kernel/gdb-stub.c
deleted file mode 100644
index 719d26968cb..00000000000
--- a/arch/mips/kernel/gdb-stub.c
+++ /dev/null
@@ -1,1154 +0,0 @@
-/*
- * arch/mips/kernel/gdb-stub.c
- *
- * Originally written by Glenn Engel, Lake Stevens Instrument Division
- *
- * Contributed by HP Systems
- *
- * Modified for SPARC by Stu Grossman, Cygnus Support.
- *
- * Modified for Linux/MIPS (and MIPS in general) by Andreas Busse
- * Send complaints, suggestions etc. to <andy@waldorf-gmbh.de>
- *
- * Copyright (C) 1995 Andreas Busse
- *
- * Copyright (C) 2003 MontaVista Software Inc.
- * Author: Jun Sun, jsun@mvista.com or jsun@junsun.net
- */
-
-/*
- * To enable debugger support, two things need to happen. One, a
- * call to set_debug_traps() is necessary in order to allow any breakpoints
- * or error conditions to be properly intercepted and reported to gdb.
- * Two, a breakpoint needs to be generated to begin communication. This
- * is most easily accomplished by a call to breakpoint(). Breakpoint()
- * simulates a breakpoint by executing a BREAK instruction.
- *
- *
- * The following gdb commands are supported:
- *
- * command function Return value
- *
- * g return the value of the CPU registers hex data or ENN
- * G set the value of the CPU registers OK or ENN
- *
- * mAA..AA,LLLL Read LLLL bytes at address AA..AA hex data or ENN
- * MAA..AA,LLLL: Write LLLL bytes at address AA.AA OK or ENN
- *
- * c Resume at current address SNN ( signal NN)
- * cAA..AA Continue at address AA..AA SNN
- *
- * s Step one instruction SNN
- * sAA..AA Step one instruction from AA..AA SNN
- *
- * k kill
- *
- * ? What was the last sigval ? SNN (signal NN)
- *
- * bBB..BB Set baud rate to BB..BB OK or BNN, then sets
- * baud rate
- *
- * All commands and responses are sent with a packet which includes a
- * checksum. A packet consists of
- *
- * $<packet info>#<checksum>.
- *
- * where
- * <packet info> :: <characters representing the command or response>
- * <checksum> :: < two hex digits computed as modulo 256 sum of <packetinfo>>
- *
- * When a packet is received, it is first acknowledged with either '+' or '-'.
- * '+' indicates a successful transfer. '-' indicates a failed transfer.
- *
- * Example:
- *
- * Host: Reply:
- * $m0,10#2a +$00010203040506070809101112131415#42
- *
- *
- * ==============
- * MORE EXAMPLES:
- * ==============
- *
- * For reference -- the following are the steps that one
- * company took (RidgeRun Inc) to get remote gdb debugging
- * going. In this scenario the host machine was a PC and the
- * target platform was a Galileo EVB64120A MIPS evaluation
- * board.
- *
- * Step 1:
- * First download gdb-5.0.tar.gz from the internet.
- * and then build/install the package.
- *
- * Example:
- * $ tar zxf gdb-5.0.tar.gz
- * $ cd gdb-5.0
- * $ ./configure --target=mips-linux-elf
- * $ make
- * $ install
- * $ which mips-linux-elf-gdb
- * /usr/local/bin/mips-linux-elf-gdb
- *
- * Step 2:
- * Configure linux for remote debugging and build it.
- *
- * Example:
- * $ cd ~/linux
- * $ make menuconfig <go to "Kernel Hacking" and turn on remote debugging>
- * $ make
- *
- * Step 3:
- * Download the kernel to the remote target and start
- * the kernel running. It will promptly halt and wait
- * for the host gdb session to connect. It does this
- * since the "Kernel Hacking" option has defined
- * CONFIG_KGDB which in turn enables your calls
- * to:
- * set_debug_traps();
- * breakpoint();
- *
- * Step 4:
- * Start the gdb session on the host.
- *
- * Example:
- * $ mips-linux-elf-gdb vmlinux
- * (gdb) set remotebaud 115200
- * (gdb) target remote /dev/ttyS1
- * ...at this point you are connected to
- * the remote target and can use gdb
- * in the normal fasion. Setting
- * breakpoints, single stepping,
- * printing variables, etc.
- */
-#include <linux/string.h>
-#include <linux/kernel.h>
-#include <linux/signal.h>
-#include <linux/sched.h>
-#include <linux/mm.h>
-#include <linux/console.h>
-#include <linux/init.h>
-#include <linux/smp.h>
-#include <linux/spinlock.h>
-#include <linux/slab.h>
-#include <linux/reboot.h>
-
-#include <asm/asm.h>
-#include <asm/cacheflush.h>
-#include <asm/mipsregs.h>
-#include <asm/pgtable.h>
-#include <asm/system.h>
-#include <asm/gdb-stub.h>
-#include <asm/inst.h>
-#include <asm/smp.h>
-
-/*
- * external low-level support routines
- */
-
-extern int putDebugChar(char c); /* write a single character */
-extern char getDebugChar(void); /* read and return a single char */
-extern void trap_low(void);
-
-/*
- * breakpoint and test functions
- */
-extern void breakpoint(void);
-extern void breakinst(void);
-extern void async_breakpoint(void);
-extern void async_breakinst(void);
-extern void adel(void);
-
-/*
- * local prototypes
- */
-
-static void getpacket(char *buffer);
-static void putpacket(char *buffer);
-static int computeSignal(int tt);
-static int hex(unsigned char ch);
-static int hexToInt(char **ptr, int *intValue);
-static int hexToLong(char **ptr, long *longValue);
-static unsigned char *mem2hex(char *mem, char *buf, int count, int may_fault);
-void handle_exception(struct gdb_regs *regs);
-
-int kgdb_enabled;
-
-/*
- * spin locks for smp case
- */
-static DEFINE_SPINLOCK(kgdb_lock);
-static raw_spinlock_t kgdb_cpulock[NR_CPUS] = {
- [0 ... NR_CPUS-1] = __RAW_SPIN_LOCK_UNLOCKED,
-};
-
-/*
- * BUFMAX defines the maximum number of characters in inbound/outbound buffers
- * at least NUMREGBYTES*2 are needed for register packets
- */
-#define BUFMAX 2048
-
-static char input_buffer[BUFMAX];
-static char output_buffer[BUFMAX];
-static int initialized; /* !0 means we've been initialized */
-static int kgdb_started;
-static const char hexchars[]="0123456789abcdef";
-
-/* Used to prevent crashes in memory access. Note that they'll crash anyway if
- we haven't set up fault handlers yet... */
-int kgdb_read_byte(unsigned char *address, unsigned char *dest);
-int kgdb_write_byte(unsigned char val, unsigned char *dest);
-
-/*
- * Convert ch from a hex digit to an int
- */
-static int hex(unsigned char ch)
-{
- if (ch >= 'a' && ch <= 'f')
- return ch-'a'+10;
- if (ch >= '0' && ch <= '9')
- return ch-'0';
- if (ch >= 'A' && ch <= 'F')
- return ch-'A'+10;
- return -1;
-}
-
-/*
- * scan for the sequence $<data>#<checksum>
- */
-static void getpacket(char *buffer)
-{
- unsigned char checksum;
- unsigned char xmitcsum;
- int i;
- int count;
- unsigned char ch;
-
- do {
- /*
- * wait around for the start character,
- * ignore all other characters
- */
- while ((ch = (getDebugChar() & 0x7f)) != '$') ;
-
- checksum = 0;
- xmitcsum = -1;
- count = 0;
-
- /*
- * now, read until a # or end of buffer is found
- */
- while (count < BUFMAX) {
- ch = getDebugChar();
- if (ch == '#')
- break;
- checksum = checksum + ch;
- buffer[count] = ch;
- count = count + 1;
- }
-
- if (count >= BUFMAX)
- continue;
-
- buffer[count] = 0;
-
- if (ch == '#') {
- xmitcsum = hex(getDebugChar() & 0x7f) << 4;
- xmitcsum |= hex(getDebugChar() & 0x7f);
-
- if (checksum != xmitcsum)
- putDebugChar('-'); /* failed checksum */
- else {
- putDebugChar('+'); /* successful transfer */
-
- /*
- * if a sequence char is present,
- * reply the sequence ID
- */
- if (buffer[2] == ':') {
- putDebugChar(buffer[0]);
- putDebugChar(buffer[1]);
-
- /*
- * remove sequence chars from buffer
- */
- count = strlen(buffer);
- for (i=3; i <= count; i++)
- buffer[i-3] = buffer[i];
- }
- }
- }
- }
- while (checksum != xmitcsum);
-}
-
-/*
- * send the packet in buffer.
- */
-static void putpacket(char *buffer)
-{
- unsigned char checksum;
- int count;
- unsigned char ch;
-
- /*
- * $<packet info>#<checksum>.
- */
-
- do {
- putDebugChar('$');
- checksum = 0;
- count = 0;
-
- while ((ch = buffer[count]) != 0) {
- if (!(putDebugChar(ch)))
- return;
- checksum += ch;
- count += 1;
- }
-
- putDebugChar('#');
- putDebugChar(hexchars[checksum >> 4]);
- putDebugChar(hexchars[checksum & 0xf]);
-
- }
- while ((getDebugChar() & 0x7f) != '+');
-}
-
-
-/*
- * Convert the memory pointed to by mem into hex, placing result in buf.
- * Return a pointer to the last char put in buf (null), in case of mem fault,
- * return 0.
- * may_fault is non-zero if we are reading from arbitrary memory, but is currently
- * not used.
- */
-static unsigned char *mem2hex(char *mem, char *buf, int count, int may_fault)
-{
- unsigned char ch;
-
- while (count-- > 0) {
- if (kgdb_read_byte(mem++, &ch) != 0)
- return 0;
- *buf++ = hexchars[ch >> 4];
- *buf++ = hexchars[ch & 0xf];
- }
-
- *buf = 0;
-
- return buf;
-}
-
-/*
- * convert the hex array pointed to by buf into binary to be placed in mem
- * return a pointer to the character AFTER the last byte written
- * may_fault is non-zero if we are reading from arbitrary memory, but is currently
- * not used.
- */
-static char *hex2mem(char *buf, char *mem, int count, int binary, int may_fault)
-{
- int i;
- unsigned char ch;
-
- for (i=0; i<count; i++)
- {
- if (binary) {
- ch = *buf++;
- if (ch == 0x7d)
- ch = 0x20 ^ *buf++;
- }
- else {
- ch = hex(*buf++) << 4;
- ch |= hex(*buf++);
- }
- if (kgdb_write_byte(ch, mem++) != 0)
- return 0;
- }
-
- return mem;
-}
-
-/*
- * This table contains the mapping between SPARC hardware trap types, and
- * signals, which are primarily what GDB understands. It also indicates
- * which hardware traps we need to commandeer when initializing the stub.
- */
-static struct hard_trap_info {
- unsigned char tt; /* Trap type code for MIPS R3xxx and R4xxx */
- unsigned char signo; /* Signal that we map this trap into */
-} hard_trap_info[] = {
- { 6, SIGBUS }, /* instruction bus error */
- { 7, SIGBUS }, /* data bus error */
- { 9, SIGTRAP }, /* break */
- { 10, SIGILL }, /* reserved instruction */
-/* { 11, SIGILL }, */ /* CPU unusable */
- { 12, SIGFPE }, /* overflow */
- { 13, SIGTRAP }, /* trap */
- { 14, SIGSEGV }, /* virtual instruction cache coherency */
- { 15, SIGFPE }, /* floating point exception */
- { 23, SIGSEGV }, /* watch */
- { 31, SIGSEGV }, /* virtual data cache coherency */
- { 0, 0} /* Must be last */
-};
-
-/* Save the normal trap handlers for user-mode traps. */
-void *saved_vectors[32];
-
-/*
- * Set up exception handlers for tracing and breakpoints
- */
-void set_debug_traps(void)
-{
- struct hard_trap_info *ht;
- unsigned long flags;
- unsigned char c;
-
- local_irq_save(flags);
- for (ht = hard_trap_info; ht->tt && ht->signo; ht++)
- saved_vectors[ht->tt] = set_except_vector(ht->tt, trap_low);
-
- putDebugChar('+'); /* 'hello world' */
- /*
- * In case GDB is started before us, ack any packets
- * (presumably "$?#xx") sitting there.
- */
- while((c = getDebugChar()) != '$');
- while((c = getDebugChar()) != '#');
- c = getDebugChar(); /* eat first csum byte */
- c = getDebugChar(); /* eat second csum byte */
- putDebugChar('+'); /* ack it */
-
- initialized = 1;
- local_irq_restore(flags);
-}
-
-void restore_debug_traps(void)
-{
- struct hard_trap_info *ht;
- unsigned long flags;
-
- local_irq_save(flags);
- for (ht = hard_trap_info; ht->tt && ht->signo; ht++)
- set_except_vector(ht->tt, saved_vectors[ht->tt]);
- local_irq_restore(flags);
-}
-
-/*
- * Convert the MIPS hardware trap type code to a Unix signal number.
- */
-static int computeSignal(int tt)
-{
- struct hard_trap_info *ht;
-
- for (ht = hard_trap_info; ht->tt && ht->signo; ht++)
- if (ht->tt == tt)
- return ht->signo;
-
- return SIGHUP; /* default for things we don't know about */
-}
-
-/*
- * While we find nice hex chars, build an int.
- * Return number of chars processed.
- */
-static int hexToInt(char **ptr, int *intValue)
-{
- int numChars = 0;
- int hexValue;
-
- *intValue = 0;
-
- while (**ptr) {
- hexValue = hex(**ptr);
- if (hexValue < 0)
- break;
-
- *intValue = (*intValue << 4) | hexValue;
- numChars ++;
-
- (*ptr)++;
- }
-
- return (numChars);
-}
-
-static int hexToLong(char **ptr, long *longValue)
-{
- int numChars = 0;
- int hexValue;
-
- *longValue = 0;
-
- while (**ptr) {
- hexValue = hex(**ptr);
- if (hexValue < 0)
- break;
-
- *longValue = (*longValue << 4) | hexValue;
- numChars ++;
-
- (*ptr)++;
- }
-
- return numChars;
-}
-
-
-#if 0
-/*
- * Print registers (on target console)
- * Used only to debug the stub...
- */
-void show_gdbregs(struct gdb_regs * regs)
-{
- /*
- * Saved main processor registers
- */
- printk("$0 : %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
- regs->reg0, regs->reg1, regs->reg2, regs->reg3,
- regs->reg4, regs->reg5, regs->reg6, regs->reg7);
- printk("$8 : %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
- regs->reg8, regs->reg9, regs->reg10, regs->reg11,
- regs->reg12, regs->reg13, regs->reg14, regs->reg15);
- printk("$16: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
- regs->reg16, regs->reg17, regs->reg18, regs->reg19,
- regs->reg20, regs->reg21, regs->reg22, regs->reg23);
- printk("$24: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
- regs->reg24, regs->reg25, regs->reg26, regs->reg27,
- regs->reg28, regs->reg29, regs->reg30, regs->reg31);
-
- /*
- * Saved cp0 registers
- */
- printk("epc : %08lx\nStatus: %08lx\nCause : %08lx\n",
- regs->cp0_epc, regs->cp0_status, regs->cp0_cause);
-}
-#endif /* dead code */
-
-/*
- * We single-step by setting breakpoints. When an exception
- * is handled, we need to restore the instructions hoisted
- * when the breakpoints were set.
- *
- * This is where we save the original instructions.
- */
-static struct gdb_bp_save {
- unsigned long addr;
- unsigned int val;
-} step_bp[2];
-
-#define BP 0x0000000d /* break opcode */
-
-/*
- * Set breakpoint instructions for single stepping.
- */
-static void single_step(struct gdb_regs *regs)
-{
- union mips_instruction insn;
- unsigned long targ;
- int is_branch, is_cond, i;
-
- targ = regs->cp0_epc;
- insn.word = *(unsigned int *)targ;
- is_branch = is_cond = 0;
-
- switch (insn.i_format.opcode) {
- /*
- * jr and jalr are in r_format format.
- */
- case spec_op:
- switch (insn.r_format.func) {
- case jalr_op:
- case jr_op:
- targ = *(&regs->reg0 + insn.r_format.rs);
- is_branch = 1;
- break;
- }
- break;
-
- /*
- * This group contains:
- * bltz_op, bgez_op, bltzl_op, bgezl_op,
- * bltzal_op, bgezal_op, bltzall_op, bgezall_op.
- */
- case bcond_op:
- is_branch = is_cond = 1;
- targ += 4 + (insn.i_format.simmediate << 2);
- break;
-
- /*
- * These are unconditional and in j_format.
- */
- case jal_op:
- case j_op:
- is_branch = 1;
- targ += 4;
- targ >>= 28;
- targ <<= 28;
- targ |= (insn.j_format.target << 2);
- break;
-
- /*
- * These are conditional.
- */
- case beq_op:
- case beql_op:
- case bne_op:
- case bnel_op:
- case blez_op:
- case blezl_op:
- case bgtz_op:
- case bgtzl_op:
- case cop0_op:
- case cop1_op:
- case cop2_op:
- case cop1x_op:
- is_branch = is_cond = 1;
- targ += 4 + (insn.i_format.simmediate << 2);
- break;
- }
-
- if (is_branch) {
- i = 0;
- if (is_cond && targ != (regs->cp0_epc + 8)) {
- step_bp[i].addr = regs->cp0_epc + 8;
- step_bp[i++].val = *(unsigned *)(regs->cp0_epc + 8);
- *(unsigned *)(regs->cp0_epc + 8) = BP;
- }
- step_bp[i].addr = targ;
- step_bp[i].val = *(unsigned *)targ;
- *(unsigned *)targ = BP;
- } else {
- step_bp[0].addr = regs->cp0_epc + 4;
- step_bp[0].val = *(unsigned *)(regs->cp0_epc + 4);
- *(unsigned *)(regs->cp0_epc + 4) = BP;
- }
-}
-
-/*
- * If asynchronously interrupted by gdb, then we need to set a breakpoint
- * at the interrupted instruction so that we wind up stopped with a
- * reasonable stack frame.
- */
-static struct gdb_bp_save async_bp;
-
-/*
- * Swap the interrupted EPC with our asynchronous breakpoint routine.
- * This is safer than stuffing the breakpoint in-place, since no cache
- * flushes (or resulting smp_call_functions) are required. The
- * assumption is that only one CPU will be handling asynchronous bp's,
- * and only one can be active at a time.
- */
-extern spinlock_t smp_call_lock;
-
-void set_async_breakpoint(unsigned long *epc)
-{
- /* skip breaking into userland */
- if ((*epc & 0x80000000) == 0)
- return;
-
-#ifdef CONFIG_SMP
- /* avoid deadlock if someone is make IPC */
- if (spin_is_locked(&smp_call_lock))
- return;
-#endif
-
- async_bp.addr = *epc;
- *epc = (unsigned long)async_breakpoint;
-}
-
-static void kgdb_wait(void *arg)
-{
- unsigned flags;
- int cpu = smp_processor_id();
-
- local_irq_save(flags);
-
- __raw_spin_lock(&kgdb_cpulock[cpu]);
- __raw_spin_unlock(&kgdb_cpulock[cpu]);
-
- local_irq_restore(flags);
-}
-
-/*
- * GDB stub needs to call kgdb_wait on all processor with interrupts
- * disabled, so it uses it's own special variant.
- */
-static int kgdb_smp_call_kgdb_wait(void)
-{
-#ifdef CONFIG_SMP
- struct call_data_struct data;
- int i, cpus = num_online_cpus() - 1;
- int cpu = smp_processor_id();
-
- /*
- * Can die spectacularly if this CPU isn't yet marked online
- */
- BUG_ON(!cpu_online(cpu));
-
- if (!cpus)
- return 0;
-
- if (spin_is_locked(&smp_call_lock)) {
- /*
- * Some other processor is trying to make us do something
- * but we're not going to respond... give up
- */
- return -1;
- }
-
- /*
- * We will continue here, accepting the fact that
- * the kernel may deadlock if another CPU attempts
- * to call smp_call_function now...
- */
-
- data.func = kgdb_wait;
- data.info = NULL;
- atomic_set(&data.started, 0);
- data.wait = 0;
-
- spin_lock(&smp_call_lock);
- call_data = &data;
- mb();
-
- /* Send a message to all other CPUs and wait for them to respond */
- for (i = 0; i < NR_CPUS; i++)
- if (cpu_online(i) && i != cpu)
- core_send_ipi(i, SMP_CALL_FUNCTION);
-
- /* Wait for response */
- /* FIXME: lock-up detection, backtrace on lock-up */
- while (atomic_read(&data.started) != cpus)
- barrier();
-
- call_data = NULL;
- spin_unlock(&smp_call_lock);
-#endif
-
- return 0;
-}
-
-/*
- * This function does all command processing for interfacing to gdb. It
- * returns 1 if you should skip the instruction at the trap address, 0
- * otherwise.
- */
-void handle_exception (struct gdb_regs *regs)
-{
- int trap; /* Trap type */
- int sigval;
- long addr;
- int length;
- char *ptr;
- unsigned long *stack;
- int i;
- int bflag = 0;
-
- kgdb_started = 1;
-
- /*
- * acquire the big kgdb spinlock
- */
- if (!spin_trylock(&kgdb_lock)) {
- /*
- * some other CPU has the lock, we should go back to
- * receive the gdb_wait IPC
- */
- return;
- }
-
- /*
- * If we're in async_breakpoint(), restore the real EPC from
- * the breakpoint.
- */
- if (regs->cp0_epc == (unsigned long)async_breakinst) {
- regs->cp0_epc = async_bp.addr;
- async_bp.addr = 0;
- }
-
- /*
- * acquire the CPU spinlocks
- */
- for (i = num_online_cpus()-1; i >= 0; i--)
- if (__raw_spin_trylock(&kgdb_cpulock[i]) == 0)
- panic("kgdb: couldn't get cpulock %d\n", i);
-
- /*
- * force other cpus to enter kgdb
- */
- kgdb_smp_call_kgdb_wait();
-
- /*
- * If we're in breakpoint() increment the PC
- */
- trap = (regs->cp0_cause & 0x7c) >> 2;
- if (trap == 9 && regs->cp0_epc == (unsigned long)breakinst)
- regs->cp0_epc += 4;
-
- /*
- * If we were single_stepping, restore the opcodes hoisted
- * for the breakpoint[s].
- */
- if (step_bp[0].addr) {
- *(unsigned *)step_bp[0].addr = step_bp[0].val;
- step_bp[0].addr = 0;
-
- if (step_bp[1].addr) {
- *(unsigned *)step_bp[1].addr = step_bp[1].val;
- step_bp[1].addr = 0;
- }
- }
-
- stack = (long *)regs->reg29; /* stack ptr */
- sigval = computeSignal(trap);
-
- /*
- * reply to host that an exception has occurred
- */
- ptr = output_buffer;
-
- /*
- * Send trap type (converted to signal)
- */
- *ptr++ = 'T';
- *ptr++ = hexchars[sigval >> 4];
- *ptr++ = hexchars[sigval & 0xf];
-
- /*
- * Send Error PC
- */
- *ptr++ = hexchars[REG_EPC >> 4];
- *ptr++ = hexchars[REG_EPC & 0xf];
- *ptr++ = ':';
- ptr = mem2hex((char *)&regs->cp0_epc, ptr, sizeof(long), 0);
- *ptr++ = ';';
-
- /*
- * Send frame pointer
- */
- *ptr++ = hexchars[REG_FP >> 4];
- *ptr++ = hexchars[REG_FP & 0xf];
- *ptr++ = ':';
- ptr = mem2hex((char *)&regs->reg30, ptr, sizeof(long), 0);
- *ptr++ = ';';
-
- /*
- * Send stack pointer
- */
- *ptr++ = hexchars[REG_SP >> 4];
- *ptr++ = hexchars[REG_SP & 0xf];
- *ptr++ = ':';
- ptr = mem2hex((char *)&regs->reg29, ptr, sizeof(long), 0);
- *ptr++ = ';';
-
- *ptr++ = 0;
- putpacket(output_buffer); /* send it off... */
-
- /*
- * Wait for input from remote GDB
- */
- while (1) {
- output_buffer[0] = 0;
- getpacket(input_buffer);
-
- switch (input_buffer[0])
- {
- case '?':
- output_buffer[0] = 'S';
- output_buffer[1] = hexchars[sigval >> 4];
- output_buffer[2] = hexchars[sigval & 0xf];
- output_buffer[3] = 0;
- break;
-
- /*
- * Detach debugger; let CPU run
- */
- case 'D':
- putpacket(output_buffer);
- goto finish_kgdb;
- break;
-
- case 'd':
- /* toggle debug flag */
- break;
-
- /*
- * Return the value of the CPU registers
- */
- case 'g':
- ptr = output_buffer;
- ptr = mem2hex((char *)&regs->reg0, ptr, 32*sizeof(long), 0); /* r0...r31 */
- ptr = mem2hex((char *)&regs->cp0_status, ptr, 6*sizeof(long), 0); /* cp0 */
- ptr = mem2hex((char *)&regs->fpr0, ptr, 32*sizeof(long), 0); /* f0...31 */
- ptr = mem2hex((char *)&regs->cp1_fsr, ptr, 2*sizeof(long), 0); /* cp1 */
- ptr = mem2hex((char *)&regs->frame_ptr, ptr, 2*sizeof(long), 0); /* frp */
- ptr = mem2hex((char *)&regs->cp0_index, ptr, 16*sizeof(long), 0); /* cp0 */
- break;
-
- /*
- * set the value of the CPU registers - return OK
- */
- case 'G':
- {
- ptr = &input_buffer[1];
- hex2mem(ptr, (char *)&regs->reg0, 32*sizeof(long), 0, 0);
- ptr += 32*(2*sizeof(long));
- hex2mem(ptr, (char *)&regs->cp0_status, 6*sizeof(long), 0, 0);
- ptr += 6*(2*sizeof(long));
- hex2mem(ptr, (char *)&regs->fpr0, 32*sizeof(long), 0, 0);
- ptr += 32*(2*sizeof(long));
- hex2mem(ptr, (char *)&regs->cp1_fsr, 2*sizeof(long), 0, 0);
- ptr += 2*(2*sizeof(long));
- hex2mem(ptr, (char *)&regs->frame_ptr, 2*sizeof(long), 0, 0);
- ptr += 2*(2*sizeof(long));
- hex2mem(ptr, (char *)&regs->cp0_index, 16*sizeof(long), 0, 0);
- strcpy(output_buffer,"OK");
- }
- break;
-
- /*
- * mAA..AA,LLLL Read LLLL bytes at address AA..AA
- */
- case 'm':
- ptr = &input_buffer[1];
-
- if (hexToLong(&ptr, &addr)
- && *ptr++ == ','
- && hexToInt(&ptr, &length)) {
- if (mem2hex((char *)addr, output_buffer, length, 1))
- break;
- strcpy (output_buffer, "E03");
- } else
- strcpy(output_buffer,"E01");
- break;
-
- /*
- * XAA..AA,LLLL: Write LLLL escaped binary bytes at address AA.AA
- */
- case 'X':
- bflag = 1;
- /* fall through */
-
- /*
- * MAA..AA,LLLL: Write LLLL bytes at address AA.AA return OK
- */
- case 'M':
- ptr = &input_buffer[1];
-
- if (hexToLong(&ptr, &addr)
- && *ptr++ == ','
- && hexToInt(&ptr, &length)
- && *ptr++ == ':') {
- if (hex2mem(ptr, (char *)addr, length, bflag, 1))
- strcpy(output_buffer, "OK");
- else
- strcpy(output_buffer, "E03");
- }
- else
- strcpy(output_buffer, "E02");
- break;
-
- /*
- * cAA..AA Continue at address AA..AA(optional)
- */
- case 'c':
- /* try to read optional parameter, pc unchanged if no parm */
-
- ptr = &input_buffer[1];
- if (hexToLong(&ptr, &addr))
- regs->cp0_epc = addr;
-
- goto exit_kgdb_exception;
- break;
-
- /*
- * kill the program; let us try to restart the machine
- * Reset the whole machine.
- */
- case 'k':
- case 'r':
- machine_restart("kgdb restarts machine");
- break;
-
- /*
- * Step to next instruction
- */
- case 's':
- /*
- * There is no single step insn in the MIPS ISA, so we
- * use breakpoints and continue, instead.
- */
- single_step(regs);
- goto exit_kgdb_exception;
- /* NOTREACHED */
- break;
-
- /*
- * Set baud rate (bBB)
- * FIXME: Needs to be written
- */
- case 'b':
- {
-#if 0
- int baudrate;
- extern void set_timer_3();
-
- ptr = &input_buffer[1];
- if (!hexToInt(&ptr, &baudrate))
- {
- strcpy(output_buffer,"B01");
- break;
- }
-
- /* Convert baud rate to uart clock divider */
-
- switch (baudrate)
- {
- case 38400:
- baudrate = 16;
- break;
- case 19200:
- baudrate = 33;
- break;
- case 9600:
- baudrate = 65;
- break;
- default:
- baudrate = 0;
- strcpy(output_buffer,"B02");
- goto x1;
- }
-
- if (baudrate) {
- putpacket("OK"); /* Ack before changing speed */
- set_timer_3(baudrate); /* Set it */
- }
-#endif
- }
- break;
-
- } /* switch */
-
- /*
- * reply to the request
- */
-
- putpacket(output_buffer);
-
- } /* while */
-
- return;
-
-finish_kgdb:
- restore_debug_traps();
-
-exit_kgdb_exception:
- /* release locks so other CPUs can go */
- for (i = num_online_cpus()-1; i >= 0; i--)
- __raw_spin_unlock(&kgdb_cpulock[i]);
- spin_unlock(&kgdb_lock);
-
- __flush_cache_all();
- return;
-}
-
-/*
- * This function will generate a breakpoint exception. It is used at the
- * beginning of a program to sync up with a debugger and can be used
- * otherwise as a quick means to stop program execution and "break" into
- * the debugger.
- */
-void breakpoint(void)
-{
- if (!initialized)
- return;
-
- __asm__ __volatile__(
- ".globl breakinst\n\t"
- ".set\tnoreorder\n\t"
- "nop\n"
- "breakinst:\tbreak\n\t"
- "nop\n\t"
- ".set\treorder"
- );
-}
-
-/* Nothing but the break; don't pollute any registers */
-void async_breakpoint(void)
-{
- __asm__ __volatile__(
- ".globl async_breakinst\n\t"
- ".set\tnoreorder\n\t"
- "nop\n"
- "async_breakinst:\tbreak\n\t"
- "nop\n\t"
- ".set\treorder"
- );
-}
-
-void adel(void)
-{
- __asm__ __volatile__(
- ".globl\tadel\n\t"
- "lui\t$8,0x8000\n\t"
- "lw\t$9,1($8)\n\t"
- );
-}
-
-/*
- * malloc is needed by gdb client in "call func()", even a private one
- * will make gdb happy
- */
-static void * __attribute_used__ malloc(size_t size)
-{
- return kmalloc(size, GFP_ATOMIC);
-}
-
-static void __attribute_used__ free (void *where)
-{
- kfree(where);
-}
-
-#ifdef CONFIG_GDB_CONSOLE
-
-void gdb_putsn(const char *str, int l)
-{
- char outbuf[18];
-
- if (!kgdb_started)
- return;
-
- outbuf[0]='O';
-
- while(l) {
- int i = (l>8)?8:l;
- mem2hex((char *)str, &outbuf[1], i, 0);
- outbuf[(i*2)+1]=0;
- putpacket(outbuf);
- str += i;
- l -= i;
- }
-}
-
-static void gdb_console_write(struct console *con, const char *s, unsigned n)
-{
- gdb_putsn(s, n);
-}
-
-static struct console gdb_console = {
- .name = "gdb",
- .write = gdb_console_write,
- .flags = CON_PRINTBUFFER,
- .index = -1
-};
-
-static int __init register_gdb_console(void)
-{
- register_console(&gdb_console);
-
- return 0;
-}
-
-console_initcall(register_gdb_console);
-
-#endif
diff --git a/arch/mips/kernel/genex.S b/arch/mips/kernel/genex.S
index 37fda3dcdfc..ac35e12cb1f 100644
--- a/arch/mips/kernel/genex.S
+++ b/arch/mips/kernel/genex.S
@@ -5,8 +5,8 @@
*
* Copyright (C) 1994 - 2000, 2001, 2003 Ralf Baechle
* Copyright (C) 1999, 2000 Silicon Graphics, Inc.
- * Copyright (C) 2001 MIPS Technologies, Inc.
- * Copyright (C) 2002 Maciej W. Rozycki
+ * Copyright (C) 2002, 2007 Maciej W. Rozycki
+ * Copyright (C) 2001, 2012 MIPS Technologies, Inc. All rights reserved.
*/
#include <linux/init.h>
@@ -19,28 +19,10 @@
#include <asm/mipsregs.h>
#include <asm/stackframe.h>
#include <asm/war.h>
-
-#define PANIC_PIC(msg) \
- .set push; \
- .set reorder; \
- PTR_LA a0,8f; \
- .set noat; \
- PTR_LA AT, panic; \
- jr AT; \
-9: b 9b; \
- .set pop; \
- TEXT(msg)
+#include <asm/thread_info.h>
__INIT
-NESTED(except_vec0_generic, 0, sp)
- PANIC_PIC("Exception vector 0 called")
- END(except_vec0_generic)
-
-NESTED(except_vec1_generic, 0, sp)
- PANIC_PIC("Exception vector 1 called")
- END(except_vec1_generic)
-
/*
* General exception vector for all other CPUs.
*
@@ -71,7 +53,7 @@ NESTED(except_vec3_generic, 0, sp)
*/
NESTED(except_vec3_r4000, 0, sp)
.set push
- .set mips3
+ .set arch=r4000
.set noat
mfc0 k1, CP0_CAUSE
li k0, 31<<2
@@ -125,15 +107,93 @@ handle_vcei:
__FINIT
- .align 5
+ .align 5 /* 32 byte rollback region */
+LEAF(__r4k_wait)
+ .set push
+ .set noreorder
+ /* start of rollback region */
+ LONG_L t0, TI_FLAGS($28)
+ nop
+ andi t0, _TIF_NEED_RESCHED
+ bnez t0, 1f
+ nop
+ nop
+ nop
+#ifdef CONFIG_CPU_MICROMIPS
+ nop
+ nop
+ nop
+ nop
+#endif
+ .set arch=r4000
+ wait
+ /* end of rollback region (the region size must be power of two) */
+1:
+ jr ra
+ nop
+ .set pop
+ END(__r4k_wait)
+
+ .macro BUILD_ROLLBACK_PROLOGUE handler
+ FEXPORT(rollback_\handler)
+ .set push
+ .set noat
+ MFC0 k0, CP0_EPC
+ PTR_LA k1, __r4k_wait
+ ori k0, 0x1f /* 32 byte rollback region */
+ xori k0, 0x1f
+ bne k0, k1, 9f
+ MTC0 k0, CP0_EPC
+9:
+ .set pop
+ .endm
+
+ .align 5
+BUILD_ROLLBACK_PROLOGUE handle_int
NESTED(handle_int, PT_SIZE, sp)
+#ifdef CONFIG_TRACE_IRQFLAGS
+ /*
+ * Check to see if the interrupted code has just disabled
+ * interrupts and ignore this interrupt for now if so.
+ *
+ * local_irq_disable() disables interrupts and then calls
+ * trace_hardirqs_off() to track the state. If an interrupt is taken
+ * after interrupts are disabled but before the state is updated
+ * it will appear to restore_all that it is incorrectly returning with
+ * interrupts disabled
+ */
+ .set push
+ .set noat
+ mfc0 k0, CP0_STATUS
+#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
+ and k0, ST0_IEP
+ bnez k0, 1f
+
+ mfc0 k0, CP0_EPC
+ .set noreorder
+ j k0
+ rfe
+#else
+ and k0, ST0_IE
+ bnez k0, 1f
+
+ eret
+#endif
+1:
+ .set pop
+#endif
SAVE_ALL
CLI
TRACE_IRQS_OFF
+ LONG_L s0, TI_REGS($28)
+ LONG_S sp, TI_REGS($28)
PTR_LA ra, ret_from_irq
- move a0, sp
- j plat_irq_dispatch
+ PTR_LA v0, plat_irq_dispatch
+ jr v0
+#ifdef CONFIG_CPU_MICROMIPS
+ nop
+#endif
END(handle_int)
__INIT
@@ -154,11 +214,14 @@ NESTED(except_vec4, 0, sp)
/*
* EJTAG debug exception handler.
* The EJTAG debug exception entry point is 0xbfc00480, which
- * normally is in the boot PROM, so the boot PROM must do a
+ * normally is in the boot PROM, so the boot PROM must do an
* unconditional jump to this vector.
*/
NESTED(except_vec_ejtag_debug, 0, sp)
j ejtag_debug_handler
+#ifdef CONFIG_CPU_MICROMIPS
+ nop
+#endif
END(except_vec_ejtag_debug)
__FINIT
@@ -168,24 +231,17 @@ NESTED(except_vec_ejtag_debug, 0, sp)
* This prototype is copied to ebase + n*IntCtl.VS and patched
* to invoke the handler
*/
+BUILD_ROLLBACK_PROLOGUE except_vec_vi
NESTED(except_vec_vi, 0, sp)
SAVE_SOME
SAVE_AT
.set push
.set noreorder
-#ifdef CONFIG_MIPS_MT_SMTC
- /*
- * To keep from blindly blocking *all* interrupts
- * during service by SMTC kernel, we also want to
- * pass the IM value to be cleared.
- */
-EXPORT(except_vec_vi_mori)
- ori a0, $0, 0
-#endif /* CONFIG_MIPS_MT_SMTC */
-EXPORT(except_vec_vi_lui)
+ PTR_LA v1, except_vec_vi_handler
+FEXPORT(except_vec_vi_lui)
lui v0, 0 /* Patched */
- j except_vec_vi_handler
-EXPORT(except_vec_vi_ori)
+ jr v1
+FEXPORT(except_vec_vi_ori)
ori v0, 0 /* Patched */
.set pop
END(except_vec_vi)
@@ -198,30 +254,17 @@ EXPORT(except_vec_vi_end)
NESTED(except_vec_vi_handler, 0, sp)
SAVE_TEMP
SAVE_STATIC
-#ifdef CONFIG_MIPS_MT_SMTC
- /*
- * SMTC has an interesting problem that interrupts are level-triggered,
- * and the CLI macro will clear EXL, potentially causing a duplicate
- * interrupt service invocation. So we need to clear the associated
- * IM bit of Status prior to doing CLI, and restore it after the
- * service routine has been invoked - we must assume that the
- * service routine will have cleared the state, and any active
- * level represents a new or otherwised unserviced event...
- */
- mfc0 t1, CP0_STATUS
- and t0, a0, t1
- mfc0 t2, CP0_TCCONTEXT
- or t0, t0, t2
- mtc0 t0, CP0_TCCONTEXT
- xor t1, t1, t0
- mtc0 t1, CP0_STATUS
- _ehb
-#endif /* CONFIG_MIPS_MT_SMTC */
CLI
+#ifdef CONFIG_TRACE_IRQFLAGS
+ move s0, v0
TRACE_IRQS_OFF
- move a0, sp
- jalr v0
- j ret_from_irq
+ move v0, s0
+#endif
+
+ LONG_L s0, TI_REGS($28)
+ LONG_S sp, TI_REGS($28)
+ PTR_LA ra, ret_from_irq
+ jr v0
END(except_vec_vi_handler)
/*
@@ -271,6 +314,9 @@ EXPORT(ejtag_debug_buffer)
*/
NESTED(except_vec_nmi, 0, sp)
j nmi_handler
+#ifdef CONFIG_CPU_MICROMIPS
+ nop
+#endif
END(except_vec_nmi)
__FINIT
@@ -278,12 +324,20 @@ NESTED(except_vec_nmi, 0, sp)
NESTED(nmi_handler, PT_SIZE, sp)
.set push
.set noat
+ /*
+ * Clear ERL - restore segment mapping
+ * Clear BEV - required for page fault exception handler to work
+ */
+ mfc0 k0, CP0_STATUS
+ ori k0, k0, ST0_EXL
+ li k1, ~(ST0_BEV | ST0_ERL)
+ and k0, k0, k1
+ mtc0 k0, CP0_STATUS
+ _ehb
SAVE_ALL
- move a0, sp
+ move a0, sp
jal nmi_exception_handler
- RESTORE_ALL
- .set mips3
- eret
+ /* nmi_exception_handler never returns */
.set pop
END(nmi_handler)
@@ -301,10 +355,14 @@ NESTED(nmi_handler, PT_SIZE, sp)
.endm
.macro __build_clear_fpe
+ .set push
+ /* gas fails to assemble cfc1 for some archs (octeon).*/ \
+ .set mips1
cfc1 a1, fcr31
li a2, ~(0x3f << 12)
and a2, a1
ctc1 a2, fcr31
+ .set pop
TRACE_IRQS_ON
STI
.endm
@@ -322,7 +380,7 @@ NESTED(nmi_handler, PT_SIZE, sp)
string escapes and emits bogus warnings if it believes to
recognize an unknown escape code. So make the arguments
start with an n and gas will believe \n is ok ... */
- .macro __BUILD_verbose nexception
+ .macro __BUILD_verbose nexception
LONG_L a1, PT_EPC(sp)
#ifdef CONFIG_32BIT
PRINT("Got \nexception at %08lx\012")
@@ -349,13 +407,13 @@ NESTED(nmi_handler, PT_SIZE, sp)
.set at
__BUILD_\verbose \exception
move a0, sp
- jal do_\handler
- j ret_from_exception
+ PTR_LA ra, ret_from_exception
+ j do_\handler
END(handle_\exception)
.endm
.macro BUILD_HANDLER exception handler clear verbose
- __BUILD_HANDLER \exception \handler \clear \verbose _int
+ __BUILD_HANDLER \exception \handler \clear \verbose _int
.endm
BUILD_HANDLER adel ade ade silent /* #4 */
@@ -367,14 +425,111 @@ NESTED(nmi_handler, PT_SIZE, sp)
BUILD_HANDLER cpu cpu sti silent /* #11 */
BUILD_HANDLER ov ov sti silent /* #12 */
BUILD_HANDLER tr tr sti silent /* #13 */
+ BUILD_HANDLER msa_fpe msa_fpe sti silent /* #14 */
BUILD_HANDLER fpe fpe fpe silent /* #15 */
+ BUILD_HANDLER ftlb ftlb none silent /* #16 */
+ BUILD_HANDLER msa msa sti silent /* #21 */
BUILD_HANDLER mdmx mdmx sti silent /* #22 */
+#ifdef CONFIG_HARDWARE_WATCHPOINTS
+ /*
+ * For watch, interrupts will be enabled after the watch
+ * registers are read.
+ */
+ BUILD_HANDLER watch watch cli silent /* #23 */
+#else
BUILD_HANDLER watch watch sti verbose /* #23 */
+#endif
BUILD_HANDLER mcheck mcheck cli verbose /* #24 */
BUILD_HANDLER mt mt sti silent /* #25 */
BUILD_HANDLER dsp dsp sti silent /* #26 */
BUILD_HANDLER reserved reserved sti verbose /* others */
+ .align 5
+ LEAF(handle_ri_rdhwr_vivt)
+ .set push
+ .set noat
+ .set noreorder
+ /* check if TLB contains a entry for EPC */
+ MFC0 k1, CP0_ENTRYHI
+ andi k1, 0xff /* ASID_MASK */
+ MFC0 k0, CP0_EPC
+ PTR_SRL k0, _PAGE_SHIFT + 1
+ PTR_SLL k0, _PAGE_SHIFT + 1
+ or k1, k0
+ MTC0 k1, CP0_ENTRYHI
+ mtc0_tlbw_hazard
+ tlbp
+ tlb_probe_hazard
+ mfc0 k1, CP0_INDEX
+ .set pop
+ bltz k1, handle_ri /* slow path */
+ /* fall thru */
+ END(handle_ri_rdhwr_vivt)
+
+ LEAF(handle_ri_rdhwr)
+ .set push
+ .set noat
+ .set noreorder
+ /* MIPS32: 0x7c03e83b: rdhwr v1,$29 */
+ /* microMIPS: 0x007d6b3c: rdhwr v1,$29 */
+ MFC0 k1, CP0_EPC
+#if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_CPU_MIPS64_R2)
+ and k0, k1, 1
+ beqz k0, 1f
+ xor k1, k0
+ lhu k0, (k1)
+ lhu k1, 2(k1)
+ ins k1, k0, 16, 16
+ lui k0, 0x007d
+ b docheck
+ ori k0, 0x6b3c
+1:
+ lui k0, 0x7c03
+ lw k1, (k1)
+ ori k0, 0xe83b
+#else
+ andi k0, k1, 1
+ bnez k0, handle_ri
+ lui k0, 0x7c03
+ lw k1, (k1)
+ ori k0, 0xe83b
+#endif
+ .set reorder
+docheck:
+ bne k0, k1, handle_ri /* if not ours */
+
+isrdhwr:
+ /* The insn is rdhwr. No need to check CAUSE.BD here. */
+ get_saved_sp /* k1 := current_thread_info */
+ .set noreorder
+ MFC0 k0, CP0_EPC
+#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
+ ori k1, _THREAD_MASK
+ xori k1, _THREAD_MASK
+ LONG_L v1, TI_TP_VALUE(k1)
+ LONG_ADDIU k0, 4
+ jr k0
+ rfe
+#else
+#ifndef CONFIG_CPU_DADDI_WORKAROUNDS
+ LONG_ADDIU k0, 4 /* stall on $k0 */
+#else
+ .set at=v1
+ LONG_ADDIU k0, 4
+ .set noat
+#endif
+ MTC0 k0, CP0_EPC
+ /* I hope three instructions between MTC0 and ERET are enough... */
+ ori k1, _THREAD_MASK
+ xori k1, _THREAD_MASK
+ LONG_L v1, TI_TP_VALUE(k1)
+ .set arch=r4000
+ eret
+ .set mips0
+#endif
+ .set pop
+ END(handle_ri_rdhwr)
+
#ifdef CONFIG_64BIT
/* A temporary overflow handler used by check_daddi(). */
diff --git a/arch/mips/kernel/gpio_txx9.c b/arch/mips/kernel/gpio_txx9.c
new file mode 100644
index 00000000000..c6854d9df92
--- /dev/null
+++ b/arch/mips/kernel/gpio_txx9.c
@@ -0,0 +1,89 @@
+/*
+ * A gpio chip driver for TXx9 SoCs
+ *
+ * Copyright (C) 2008 Atsushi Nemoto <anemo@mba.ocn.ne.jp>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/gpio.h>
+#include <linux/errno.h>
+#include <linux/io.h>
+#include <asm/txx9pio.h>
+
+static DEFINE_SPINLOCK(txx9_gpio_lock);
+
+static struct txx9_pio_reg __iomem *txx9_pioptr;
+
+static int txx9_gpio_get(struct gpio_chip *chip, unsigned int offset)
+{
+ return __raw_readl(&txx9_pioptr->din) & (1 << offset);
+}
+
+static void txx9_gpio_set_raw(unsigned int offset, int value)
+{
+ u32 val;
+ val = __raw_readl(&txx9_pioptr->dout);
+ if (value)
+ val |= 1 << offset;
+ else
+ val &= ~(1 << offset);
+ __raw_writel(val, &txx9_pioptr->dout);
+}
+
+static void txx9_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&txx9_gpio_lock, flags);
+ txx9_gpio_set_raw(offset, value);
+ mmiowb();
+ spin_unlock_irqrestore(&txx9_gpio_lock, flags);
+}
+
+static int txx9_gpio_dir_in(struct gpio_chip *chip, unsigned int offset)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&txx9_gpio_lock, flags);
+ __raw_writel(__raw_readl(&txx9_pioptr->dir) & ~(1 << offset),
+ &txx9_pioptr->dir);
+ mmiowb();
+ spin_unlock_irqrestore(&txx9_gpio_lock, flags);
+ return 0;
+}
+
+static int txx9_gpio_dir_out(struct gpio_chip *chip, unsigned int offset,
+ int value)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&txx9_gpio_lock, flags);
+ txx9_gpio_set_raw(offset, value);
+ __raw_writel(__raw_readl(&txx9_pioptr->dir) | (1 << offset),
+ &txx9_pioptr->dir);
+ mmiowb();
+ spin_unlock_irqrestore(&txx9_gpio_lock, flags);
+ return 0;
+}
+
+static struct gpio_chip txx9_gpio_chip = {
+ .get = txx9_gpio_get,
+ .set = txx9_gpio_set,
+ .direction_input = txx9_gpio_dir_in,
+ .direction_output = txx9_gpio_dir_out,
+ .label = "TXx9",
+};
+
+int __init txx9_gpio_init(unsigned long baseaddr,
+ unsigned int base, unsigned int num)
+{
+ txx9_pioptr = ioremap(baseaddr, sizeof(struct txx9_pio_reg));
+ if (!txx9_pioptr)
+ return -ENODEV;
+ txx9_gpio_chip.base = base;
+ txx9_gpio_chip.ngpio = num;
+ return gpiochip_add(&txx9_gpio_chip);
+}
diff --git a/arch/mips/kernel/head.S b/arch/mips/kernel/head.S
index 8c6db0fc72f..95afd663cd4 100644
--- a/arch/mips/kernel/head.S
+++ b/arch/mips/kernel/head.S
@@ -16,65 +16,17 @@
#include <linux/init.h>
#include <linux/threads.h>
+#include <asm/addrspace.h>
#include <asm/asm.h>
#include <asm/asmmacro.h>
#include <asm/irqflags.h>
#include <asm/regdef.h>
-#include <asm/page.h>
+#include <asm/pgtable-bits.h>
#include <asm/mipsregs.h>
#include <asm/stackframe.h>
#include <kernel-entry-init.h>
- .macro ARC64_TWIDDLE_PC
-#if defined(CONFIG_ARC64) || defined(CONFIG_MAPPED_KERNEL)
- /* We get launched at a XKPHYS address but the kernel is linked to
- run at a KSEG0 address, so jump there. */
- PTR_LA t0, \@f
- jr t0
-\@:
-#endif
- .endm
-
- /*
- * inputs are the text nasid in t1, data nasid in t2.
- */
- .macro MAPPED_KERNEL_SETUP_TLB
-#ifdef CONFIG_MAPPED_KERNEL
- /*
- * This needs to read the nasid - assume 0 for now.
- * Drop in 0xffffffffc0000000 in tlbhi, 0+VG in tlblo_0,
- * 0+DVG in tlblo_1.
- */
- dli t0, 0xffffffffc0000000
- dmtc0 t0, CP0_ENTRYHI
- li t0, 0x1c000 # Offset of text into node memory
- dsll t1, NASID_SHFT # Shift text nasid into place
- dsll t2, NASID_SHFT # Same for data nasid
- or t1, t1, t0 # Physical load address of kernel text
- or t2, t2, t0 # Physical load address of kernel data
- dsrl t1, 12 # 4K pfn
- dsrl t2, 12 # 4K pfn
- dsll t1, 6 # Get pfn into place
- dsll t2, 6 # Get pfn into place
- li t0, ((_PAGE_GLOBAL|_PAGE_VALID| _CACHE_CACHABLE_COW) >> 6)
- or t0, t0, t1
- mtc0 t0, CP0_ENTRYLO0 # physaddr, VG, cach exlwr
- li t0, ((_PAGE_GLOBAL|_PAGE_VALID| _PAGE_DIRTY|_CACHE_CACHABLE_COW) >> 6)
- or t0, t0, t2
- mtc0 t0, CP0_ENTRYLO1 # physaddr, DVG, cach exlwr
- li t0, 0x1ffe000 # MAPPED_KERN_TLBMASK, TLBPGMASK_16M
- mtc0 t0, CP0_PAGEMASK
- li t0, 0 # KMAP_INX
- mtc0 t0, CP0_INDEX
- li t0, 1
- mtc0 t0, CP0_WIRED
- tlbwi
-#else
- mtc0 zero, CP0_WIRED
-#endif
- .endm
-
/*
* For the moment disable interrupts, mark the kernel mode and
* set ST0_KX so that the CPU does not spit fire when using
@@ -83,33 +35,12 @@
*/
.macro setup_c0_status set clr
.set push
-#ifdef CONFIG_MIPS_MT_SMTC
- /*
- * For SMTC, we need to set privilege and disable interrupts only for
- * the current TC, using the TCStatus register.
- */
- mfc0 t0, CP0_TCSTATUS
- /* Fortunately CU 0 is in the same place in both registers */
- /* Set TCU0, TMX, TKSU (for later inversion) and IXMT */
- li t1, ST0_CU0 | 0x08001c00
- or t0, t1
- /* Clear TKSU, leave IXMT */
- xori t0, 0x00001800
- mtc0 t0, CP0_TCSTATUS
- _ehb
- /* We need to leave the global IE bit set, but clear EXL...*/
- mfc0 t0, CP0_STATUS
- or t0, ST0_CU0 | ST0_EXL | ST0_ERL | \set | \clr
- xor t0, ST0_EXL | ST0_ERL | \clr
- mtc0 t0, CP0_STATUS
-#else
mfc0 t0, CP0_STATUS
or t0, ST0_CU0|\set|0x1f|\clr
xor t0, 0x1f|\clr
mtc0 t0, CP0_STATUS
.set noreorder
sll zero,3 # ehb
-#endif
.set pop
.endm
@@ -129,24 +60,27 @@
#endif
.endm
+#ifndef CONFIG_NO_EXCEPT_FILL
/*
* Reserved space for exception handlers.
* Necessary for machines which link their kernels at KSEG0.
*/
.fill 0x400
+#endif
-EXPORT(stext) # used for profiling
EXPORT(_stext)
-#if defined(CONFIG_QEMU) || defined(CONFIG_MIPS_SIM)
+#ifdef CONFIG_BOOT_RAW
/*
* Give us a fighting chance of running if execution beings at the
- * kernel load address. This is needed because this platform does
+ * kernel load address. This is needed because this platform does
* not have a ELF loader yet.
*/
+FEXPORT(__kernel_entry)
j kernel_entry
#endif
- __INIT
+
+ __REF
NESTED(kernel_entry, 16, sp) # kernel entry point
@@ -154,25 +88,11 @@ NESTED(kernel_entry, 16, sp) # kernel entry point
setup_c0_status_pri
- ARC64_TWIDDLE_PC
-
-#ifdef CONFIG_MIPS_MT_SMTC
- /*
- * In SMTC kernel, "CLI" is thread-specific, in TCStatus.
- * We still need to enable interrupts globally in Status,
- * and clear EXL/ERL.
- *
- * TCContext is used to track interrupt levels under
- * service in SMTC kernel. Clear for boot TC before
- * allowing any interrupts.
- */
- mtc0 zero, CP0_TCCONTEXT
-
- mfc0 t0, CP0_STATUS
- ori t0, t0, 0xff1f
- xori t0, t0, 0x001e
- mtc0 t0, CP0_STATUS
-#endif /* CONFIG_MIPS_MT_SMTC */
+ /* We might not get launched at the address the kernel is linked to,
+ so we jump there. */
+ PTR_LA t0, 0f
+ jr t0
+0:
PTR_LA t0, __bss_start # clear .bss
LONG_S zero, (t0)
@@ -189,66 +109,24 @@ NESTED(kernel_entry, 16, sp) # kernel entry point
MTC0 zero, CP0_CONTEXT # clear context register
PTR_LA $28, init_thread_union
- PTR_ADDIU sp, $28, _THREAD_SIZE - 32
+ /* Set the SP after an empty pt_regs. */
+ PTR_LI sp, _THREAD_SIZE - 32 - PT_SIZE
+ PTR_ADDU sp, $28
+ back_to_back_c0_hazard
set_saved_sp sp, t0, t1
PTR_SUBU sp, 4 * SZREG # init stack pointer
j start_kernel
END(kernel_entry)
-#ifdef CONFIG_QEMU
- __INIT
-#endif
-
#ifdef CONFIG_SMP
/*
- * SMP slave cpus entry point. Board specific code for bootstrap calls this
+ * SMP slave cpus entry point. Board specific code for bootstrap calls this
* function after setting up the stack and gp registers.
*/
NESTED(smp_bootstrap, 16, sp)
-#ifdef CONFIG_MIPS_MT_SMTC
- /*
- * Read-modify-writes of Status must be atomic, and this
- * is one case where CLI is invoked without EXL being
- * necessarily set. The CLI and setup_c0_status will
- * in fact be redundant for all but the first TC of
- * each VPE being booted.
- */
- DMT 10 # dmt t2 /* t0, t1 are used by CLI and setup_c0_status() */
- jal mips_ihb
-#endif /* CONFIG_MIPS_MT_SMTC */
- setup_c0_status_sec
smp_slave_setup
-#ifdef CONFIG_MIPS_MT_SMTC
- andi t2, t2, VPECONTROL_TE
- beqz t2, 2f
- EMT # emt
-2:
-#endif /* CONFIG_MIPS_MT_SMTC */
+ setup_c0_status_sec
j start_secondary
END(smp_bootstrap)
#endif /* CONFIG_SMP */
-
- __FINIT
-
- .comm kernelsp, NR_CPUS * 8, 8
- .comm pgd_current, NR_CPUS * 8, 8
-
- .comm fw_arg0, SZREG, SZREG # firmware arguments
- .comm fw_arg1, SZREG, SZREG
- .comm fw_arg2, SZREG, SZREG
- .comm fw_arg3, SZREG, SZREG
-
- .macro page name, order
- .comm \name, (_PAGE_SIZE << \order), (_PAGE_SIZE << \order)
- .endm
-
- /*
- * On 64-bit we've got three-level pagetables with a slightly
- * different layout ...
- */
- page swapper_pg_dir, _PGD_ORDER
-#ifdef CONFIG_64BIT
- page invalid_pmd_table, _PMD_ORDER
-#endif
- page invalid_pte_table, _PTE_ORDER
diff --git a/arch/mips/kernel/i8253.c b/arch/mips/kernel/i8253.c
index 475df690421..c5bc344fc74 100644
--- a/arch/mips/kernel/i8253.c
+++ b/arch/mips/kernel/i8253.c
@@ -1,28 +1,39 @@
/*
- * Copyright (C) 2006 IBM Corporation
+ * i8253.c 8253/PIT functions
*
- * Implements device information for i8253 timer chip
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation
*/
+#include <linux/clockchips.h>
+#include <linux/i8253.h>
+#include <linux/export.h>
+#include <linux/smp.h>
+#include <linux/irq.h>
-#include <linux/platform_device.h>
+#include <asm/time.h>
-static __init int add_pcspkr(void)
+static irqreturn_t timer_interrupt(int irq, void *dev_id)
{
- struct platform_device *pd;
- int ret;
+ i8253_clockevent.event_handler(&i8253_clockevent);
+
+ return IRQ_HANDLED;
+}
- pd = platform_device_alloc("pcspkr", -1);
- if (!pd)
- return -ENOMEM;
+static struct irqaction irq0 = {
+ .handler = timer_interrupt,
+ .flags = IRQF_NOBALANCING | IRQF_TIMER,
+ .name = "timer"
+};
- ret = platform_device_add(pd);
- if (ret)
- platform_device_put(pd);
+void __init setup_pit_timer(void)
+{
+ clockevent_i8253_init(true);
+ setup_irq(0, &irq0);
+}
+
+static int __init init_pit_clocksource(void)
+{
+ if (num_possible_cpus() > 1) /* PIT does not scale! */
+ return 0;
- return ret;
+ return clocksource_i8253_init();
}
-device_initcall(add_pcspkr);
+arch_initcall(init_pit_clocksource);
diff --git a/arch/mips/kernel/i8259.c b/arch/mips/kernel/i8259.c
index ea36c8e8852..50b364897dd 100644
--- a/arch/mips/kernel/i8259.c
+++ b/arch/mips/kernel/i8259.c
@@ -14,14 +14,12 @@
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/spinlock.h>
-#include <linux/sysdev.h>
+#include <linux/syscore_ops.h>
+#include <linux/irq.h>
#include <asm/i8259.h>
#include <asm/io.h>
-void enable_8259A_irq(unsigned int irq);
-void disable_8259A_irq(unsigned int irq);
-
/*
* This is the 'legacy' 8259A Programmable Interrupt Controller,
* present in the majority of PC/AT boxes.
@@ -31,34 +29,19 @@ void disable_8259A_irq(unsigned int irq);
* moves to arch independent land
*/
-DEFINE_SPINLOCK(i8259A_lock);
-
-static void end_8259A_irq (unsigned int irq)
-{
- if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)) &&
- irq_desc[irq].action)
- enable_8259A_irq(irq);
-}
-
-#define shutdown_8259A_irq disable_8259A_irq
-
-void mask_and_ack_8259A(unsigned int);
-
-static unsigned int startup_8259A_irq(unsigned int irq)
-{
- enable_8259A_irq(irq);
-
- return 0; /* never anything pending */
-}
-
-static struct irq_chip i8259A_irq_type = {
- .typename = "XT-PIC",
- .startup = startup_8259A_irq,
- .shutdown = shutdown_8259A_irq,
- .enable = enable_8259A_irq,
- .disable = disable_8259A_irq,
- .ack = mask_and_ack_8259A,
- .end = end_8259A_irq,
+static int i8259A_auto_eoi = -1;
+DEFINE_RAW_SPINLOCK(i8259A_lock);
+static void disable_8259A_irq(struct irq_data *d);
+static void enable_8259A_irq(struct irq_data *d);
+static void mask_and_ack_8259A(struct irq_data *d);
+static void init_8259A(int auto_eoi);
+
+static struct irq_chip i8259A_chip = {
+ .name = "XT-PIC",
+ .irq_mask = disable_8259A_irq,
+ .irq_disable = disable_8259A_irq,
+ .irq_unmask = enable_8259A_irq,
+ .irq_mask_ack = mask_and_ack_8259A,
};
/*
@@ -70,49 +53,53 @@ static struct irq_chip i8259A_irq_type = {
*/
static unsigned int cached_irq_mask = 0xffff;
-#define cached_21 (cached_irq_mask)
-#define cached_A1 (cached_irq_mask >> 8)
+#define cached_master_mask (cached_irq_mask)
+#define cached_slave_mask (cached_irq_mask >> 8)
-void disable_8259A_irq(unsigned int irq)
+static void disable_8259A_irq(struct irq_data *d)
{
- unsigned int mask = 1 << irq;
+ unsigned int mask, irq = d->irq - I8259A_IRQ_BASE;
unsigned long flags;
- spin_lock_irqsave(&i8259A_lock, flags);
+ mask = 1 << irq;
+ raw_spin_lock_irqsave(&i8259A_lock, flags);
cached_irq_mask |= mask;
if (irq & 8)
- outb(cached_A1,0xA1);
+ outb(cached_slave_mask, PIC_SLAVE_IMR);
else
- outb(cached_21,0x21);
- spin_unlock_irqrestore(&i8259A_lock, flags);
+ outb(cached_master_mask, PIC_MASTER_IMR);
+ raw_spin_unlock_irqrestore(&i8259A_lock, flags);
}
-void enable_8259A_irq(unsigned int irq)
+static void enable_8259A_irq(struct irq_data *d)
{
- unsigned int mask = ~(1 << irq);
+ unsigned int mask, irq = d->irq - I8259A_IRQ_BASE;
unsigned long flags;
- spin_lock_irqsave(&i8259A_lock, flags);
+ mask = ~(1 << irq);
+ raw_spin_lock_irqsave(&i8259A_lock, flags);
cached_irq_mask &= mask;
if (irq & 8)
- outb(cached_A1,0xA1);
+ outb(cached_slave_mask, PIC_SLAVE_IMR);
else
- outb(cached_21,0x21);
- spin_unlock_irqrestore(&i8259A_lock, flags);
+ outb(cached_master_mask, PIC_MASTER_IMR);
+ raw_spin_unlock_irqrestore(&i8259A_lock, flags);
}
int i8259A_irq_pending(unsigned int irq)
{
- unsigned int mask = 1 << irq;
+ unsigned int mask;
unsigned long flags;
int ret;
- spin_lock_irqsave(&i8259A_lock, flags);
+ irq -= I8259A_IRQ_BASE;
+ mask = 1 << irq;
+ raw_spin_lock_irqsave(&i8259A_lock, flags);
if (irq < 8)
- ret = inb(0x20) & mask;
+ ret = inb(PIC_MASTER_CMD) & mask;
else
- ret = inb(0xA0) & (mask >> 8);
- spin_unlock_irqrestore(&i8259A_lock, flags);
+ ret = inb(PIC_SLAVE_CMD) & (mask >> 8);
+ raw_spin_unlock_irqrestore(&i8259A_lock, flags);
return ret;
}
@@ -120,7 +107,7 @@ int i8259A_irq_pending(unsigned int irq)
void make_8259A_irq(unsigned int irq)
{
disable_irq_nosync(irq);
- irq_desc[irq].chip = &i8259A_irq_type;
+ irq_set_chip_and_handler(irq, &i8259A_chip, handle_level_irq);
enable_irq(irq);
}
@@ -136,14 +123,14 @@ static inline int i8259A_irq_real(unsigned int irq)
int irqmask = 1 << irq;
if (irq < 8) {
- outb(0x0B,0x20); /* ISR register */
- value = inb(0x20) & irqmask;
- outb(0x0A,0x20); /* back to the IRR register */
+ outb(0x0B, PIC_MASTER_CMD); /* ISR register */
+ value = inb(PIC_MASTER_CMD) & irqmask;
+ outb(0x0A, PIC_MASTER_CMD); /* back to the IRR register */
return value;
}
- outb(0x0B,0xA0); /* ISR register */
- value = inb(0xA0) & (irqmask >> 8);
- outb(0x0A,0xA0); /* back to the IRR register */
+ outb(0x0B, PIC_SLAVE_CMD); /* ISR register */
+ value = inb(PIC_SLAVE_CMD) & (irqmask >> 8);
+ outb(0x0A, PIC_SLAVE_CMD); /* back to the IRR register */
return value;
}
@@ -153,24 +140,27 @@ static inline int i8259A_irq_real(unsigned int irq)
* first, _then_ send the EOI, and the order of EOI
* to the two 8259s is important!
*/
-void mask_and_ack_8259A(unsigned int irq)
+static void mask_and_ack_8259A(struct irq_data *d)
{
- unsigned int irqmask = 1 << irq;
+ unsigned int irqmask, irq = d->irq - I8259A_IRQ_BASE;
unsigned long flags;
- spin_lock_irqsave(&i8259A_lock, flags);
+ irqmask = 1 << irq;
+ raw_spin_lock_irqsave(&i8259A_lock, flags);
/*
- * Lightweight spurious IRQ detection. We do not want to overdo
- * spurious IRQ handling - it's usually a sign of hardware problems, so
- * we only do the checks we can do without slowing down good hardware
- * nnecesserily.
+ * Lightweight spurious IRQ detection. We do not want
+ * to overdo spurious IRQ handling - it's usually a sign
+ * of hardware problems, so we only do the checks we can
+ * do without slowing down good hardware unnecessarily.
*
- * Note that IRQ7 and IRQ15 (the two spurious IRQs usually resulting
- * rom the 8259A-1|2 PICs) occur even if the IRQ is masked in the 8259A.
- * Thus we can check spurious 8259A IRQs without doing the quite slow
- * i8259A_irq_real() call for every IRQ. This does not cover 100% of
- * spurious interrupts, but should be enough to warn the user that
- * there is something bad going on ...
+ * Note that IRQ7 and IRQ15 (the two spurious IRQs
+ * usually resulting from the 8259A-1|2 PICs) occur
+ * even if the IRQ is masked in the 8259A. Thus we
+ * can check spurious 8259A IRQs without doing the
+ * quite slow i8259A_irq_real() call for every IRQ.
+ * This does not cover 100% of spurious interrupts,
+ * but should be enough to warn the user that there
+ * is something bad going on ...
*/
if (cached_irq_mask & irqmask)
goto spurious_8259A_irq;
@@ -178,20 +168,16 @@ void mask_and_ack_8259A(unsigned int irq)
handle_real_irq:
if (irq & 8) {
- inb(0xA1); /* DUMMY - (do we need this?) */
- outb(cached_A1,0xA1);
- outb(0x60+(irq&7),0xA0);/* 'Specific EOI' to slave */
- outb(0x62,0x20); /* 'Specific EOI' to master-IRQ2 */
+ inb(PIC_SLAVE_IMR); /* DUMMY - (do we need this?) */
+ outb(cached_slave_mask, PIC_SLAVE_IMR);
+ outb(0x60+(irq&7), PIC_SLAVE_CMD);/* 'Specific EOI' to slave */
+ outb(0x60+PIC_CASCADE_IR, PIC_MASTER_CMD); /* 'Specific EOI' to master-IRQ2 */
} else {
- inb(0x21); /* DUMMY - (do we need this?) */
- outb(cached_21,0x21);
- outb(0x60+irq,0x20); /* 'Specific EOI' to master */
+ inb(PIC_MASTER_IMR); /* DUMMY - (do we need this?) */
+ outb(cached_master_mask, PIC_MASTER_IMR);
+ outb(0x60+irq, PIC_MASTER_CMD); /* 'Specific EOI to master */
}
-#ifdef CONFIG_MIPS_MT_SMTC
- if (irq_hwmask[irq] & ST0_IM)
- set_c0_status(irq_hwmask[irq] & ST0_IM);
-#endif /* CONFIG_MIPS_MT_SMTC */
- spin_unlock_irqrestore(&i8259A_lock, flags);
+ raw_spin_unlock_irqrestore(&i8259A_lock, flags);
return;
spurious_8259A_irq:
@@ -206,7 +192,7 @@ spurious_8259A_irq:
goto handle_real_irq;
{
- static int spurious_irq_mask = 0;
+ static int spurious_irq_mask;
/*
* At this point we can be sure the IRQ is spurious,
* lets ACK and report it. [once per IRQ]
@@ -225,88 +211,101 @@ spurious_8259A_irq:
}
}
-static int i8259A_resume(struct sys_device *dev)
+static void i8259A_resume(void)
{
- init_8259A(0);
- return 0;
+ if (i8259A_auto_eoi >= 0)
+ init_8259A(i8259A_auto_eoi);
}
-static struct sysdev_class i8259_sysdev_class = {
- set_kset_name("i8259"),
- .resume = i8259A_resume,
-};
+static void i8259A_shutdown(void)
+{
+ /* Put the i8259A into a quiescent state that
+ * the kernel initialization code can get it
+ * out of.
+ */
+ if (i8259A_auto_eoi >= 0) {
+ outb(0xff, PIC_MASTER_IMR); /* mask all of 8259A-1 */
+ outb(0xff, PIC_SLAVE_IMR); /* mask all of 8259A-2 */
+ }
+}
-static struct sys_device device_i8259A = {
- .id = 0,
- .cls = &i8259_sysdev_class,
+static struct syscore_ops i8259_syscore_ops = {
+ .resume = i8259A_resume,
+ .shutdown = i8259A_shutdown,
};
static int __init i8259A_init_sysfs(void)
{
- int error = sysdev_class_register(&i8259_sysdev_class);
- if (!error)
- error = sysdev_register(&device_i8259A);
- return error;
+ register_syscore_ops(&i8259_syscore_ops);
+ return 0;
}
device_initcall(i8259A_init_sysfs);
-void __init init_8259A(int auto_eoi)
+static void init_8259A(int auto_eoi)
{
unsigned long flags;
- spin_lock_irqsave(&i8259A_lock, flags);
+ i8259A_auto_eoi = auto_eoi;
+
+ raw_spin_lock_irqsave(&i8259A_lock, flags);
- outb(0xff, 0x21); /* mask all of 8259A-1 */
- outb(0xff, 0xA1); /* mask all of 8259A-2 */
+ outb(0xff, PIC_MASTER_IMR); /* mask all of 8259A-1 */
+ outb(0xff, PIC_SLAVE_IMR); /* mask all of 8259A-2 */
/*
* outb_p - this has to work on a wide range of PC hardware.
*/
- outb_p(0x11, 0x20); /* ICW1: select 8259A-1 init */
- outb_p(0x00, 0x21); /* ICW2: 8259A-1 IR0-7 mapped to 0x00-0x07 */
- outb_p(0x04, 0x21); /* 8259A-1 (the master) has a slave on IR2 */
- if (auto_eoi)
- outb_p(0x03, 0x21); /* master does Auto EOI */
- else
- outb_p(0x01, 0x21); /* master expects normal EOI */
-
- outb_p(0x11, 0xA0); /* ICW1: select 8259A-2 init */
- outb_p(0x08, 0xA1); /* ICW2: 8259A-2 IR0-7 mapped to 0x08-0x0f */
- outb_p(0x02, 0xA1); /* 8259A-2 is a slave on master's IR2 */
- outb_p(0x01, 0xA1); /* (slave's support for AEOI in flat mode
- is to be investigated) */
-
+ outb_p(0x11, PIC_MASTER_CMD); /* ICW1: select 8259A-1 init */
+ outb_p(I8259A_IRQ_BASE + 0, PIC_MASTER_IMR); /* ICW2: 8259A-1 IR0 mapped to I8259A_IRQ_BASE + 0x00 */
+ outb_p(1U << PIC_CASCADE_IR, PIC_MASTER_IMR); /* 8259A-1 (the master) has a slave on IR2 */
+ if (auto_eoi) /* master does Auto EOI */
+ outb_p(MASTER_ICW4_DEFAULT | PIC_ICW4_AEOI, PIC_MASTER_IMR);
+ else /* master expects normal EOI */
+ outb_p(MASTER_ICW4_DEFAULT, PIC_MASTER_IMR);
+
+ outb_p(0x11, PIC_SLAVE_CMD); /* ICW1: select 8259A-2 init */
+ outb_p(I8259A_IRQ_BASE + 8, PIC_SLAVE_IMR); /* ICW2: 8259A-2 IR0 mapped to I8259A_IRQ_BASE + 0x08 */
+ outb_p(PIC_CASCADE_IR, PIC_SLAVE_IMR); /* 8259A-2 is a slave on master's IR2 */
+ outb_p(SLAVE_ICW4_DEFAULT, PIC_SLAVE_IMR); /* (slave's support for AEOI in flat mode is to be investigated) */
if (auto_eoi)
/*
- * in AEOI mode we just have to mask the interrupt
+ * In AEOI mode we just have to mask the interrupt
* when acking.
*/
- i8259A_irq_type.ack = disable_8259A_irq;
+ i8259A_chip.irq_mask_ack = disable_8259A_irq;
else
- i8259A_irq_type.ack = mask_and_ack_8259A;
+ i8259A_chip.irq_mask_ack = mask_and_ack_8259A;
udelay(100); /* wait for 8259A to initialize */
- outb(cached_21, 0x21); /* restore master IRQ mask */
- outb(cached_A1, 0xA1); /* restore slave IRQ mask */
+ outb(cached_master_mask, PIC_MASTER_IMR); /* restore master IRQ mask */
+ outb(cached_slave_mask, PIC_SLAVE_IMR); /* restore slave IRQ mask */
- spin_unlock_irqrestore(&i8259A_lock, flags);
+ raw_spin_unlock_irqrestore(&i8259A_lock, flags);
}
/*
* IRQ2 is cascade interrupt to second interrupt controller
*/
static struct irqaction irq2 = {
- no_action, 0, CPU_MASK_NONE, "cascade", NULL, NULL
+ .handler = no_action,
+ .name = "cascade",
+ .flags = IRQF_NO_THREAD,
};
static struct resource pic1_io_resource = {
- .name = "pic1", .start = 0x20, .end = 0x3f, .flags = IORESOURCE_BUSY
+ .name = "pic1",
+ .start = PIC_MASTER_CMD,
+ .end = PIC_MASTER_IMR,
+ .flags = IORESOURCE_BUSY
};
static struct resource pic2_io_resource = {
- .name = "pic2", .start = 0xa0, .end = 0xbf, .flags = IORESOURCE_BUSY
+ .name = "pic2",
+ .start = PIC_SLAVE_CMD,
+ .end = PIC_SLAVE_IMR,
+ .flags = IORESOURCE_BUSY
};
/*
@@ -314,21 +313,19 @@ static struct resource pic2_io_resource = {
* driver compatibility reasons interrupts 0 - 15 to be the i8259
* interrupts even if the hardware uses a different interrupt numbering.
*/
-void __init init_i8259_irqs (void)
+void __init init_i8259_irqs(void)
{
int i;
- request_resource(&ioport_resource, &pic1_io_resource);
- request_resource(&ioport_resource, &pic2_io_resource);
+ insert_resource(&ioport_resource, &pic1_io_resource);
+ insert_resource(&ioport_resource, &pic2_io_resource);
init_8259A(0);
- for (i = 0; i < 16; i++) {
- irq_desc[i].status = IRQ_DISABLED;
- irq_desc[i].action = NULL;
- irq_desc[i].depth = 1;
- irq_desc[i].chip = &i8259A_irq_type;
+ for (i = I8259A_IRQ_BASE; i < I8259A_IRQ_BASE + 16; i++) {
+ irq_set_chip_and_handler(i, &i8259A_chip, handle_level_irq);
+ irq_set_probe(i);
}
- setup_irq(2, &irq2);
+ setup_irq(I8259A_IRQ_BASE + PIC_CASCADE_IR, &irq2);
}
diff --git a/arch/mips/kernel/idle.c b/arch/mips/kernel/idle.c
new file mode 100644
index 00000000000..09ce4598075
--- /dev/null
+++ b/arch/mips/kernel/idle.c
@@ -0,0 +1,249 @@
+/*
+ * MIPS idle loop and WAIT instruction support.
+ *
+ * Copyright (C) xxxx the Anonymous
+ * Copyright (C) 1994 - 2006 Ralf Baechle
+ * Copyright (C) 2003, 2004 Maciej W. Rozycki
+ * Copyright (C) 2001, 2004, 2011, 2012 MIPS Technologies, Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <linux/export.h>
+#include <linux/init.h>
+#include <linux/irqflags.h>
+#include <linux/printk.h>
+#include <linux/sched.h>
+#include <asm/cpu.h>
+#include <asm/cpu-info.h>
+#include <asm/cpu-type.h>
+#include <asm/idle.h>
+#include <asm/mipsregs.h>
+
+/*
+ * Not all of the MIPS CPUs have the "wait" instruction available. Moreover,
+ * the implementation of the "wait" feature differs between CPU families. This
+ * points to the function that implements CPU specific wait.
+ * The wait instruction stops the pipeline and reduces the power consumption of
+ * the CPU very much.
+ */
+void (*cpu_wait)(void);
+EXPORT_SYMBOL(cpu_wait);
+
+static void r3081_wait(void)
+{
+ unsigned long cfg = read_c0_conf();
+ write_c0_conf(cfg | R30XX_CONF_HALT);
+ local_irq_enable();
+}
+
+static void r39xx_wait(void)
+{
+ if (!need_resched())
+ write_c0_conf(read_c0_conf() | TX39_CONF_HALT);
+ local_irq_enable();
+}
+
+void r4k_wait(void)
+{
+ local_irq_enable();
+ __r4k_wait();
+}
+
+/*
+ * This variant is preferable as it allows testing need_resched and going to
+ * sleep depending on the outcome atomically. Unfortunately the "It is
+ * implementation-dependent whether the pipeline restarts when a non-enabled
+ * interrupt is requested" restriction in the MIPS32/MIPS64 architecture makes
+ * using this version a gamble.
+ */
+void r4k_wait_irqoff(void)
+{
+ if (!need_resched())
+ __asm__(
+ " .set push \n"
+ " .set arch=r4000 \n"
+ " wait \n"
+ " .set pop \n");
+ local_irq_enable();
+ __asm__(
+ " .globl __pastwait \n"
+ "__pastwait: \n");
+}
+
+/*
+ * The RM7000 variant has to handle erratum 38. The workaround is to not
+ * have any pending stores when the WAIT instruction is executed.
+ */
+static void rm7k_wait_irqoff(void)
+{
+ if (!need_resched())
+ __asm__(
+ " .set push \n"
+ " .set arch=r4000 \n"
+ " .set noat \n"
+ " mfc0 $1, $12 \n"
+ " sync \n"
+ " mtc0 $1, $12 # stalls until W stage \n"
+ " wait \n"
+ " mtc0 $1, $12 # stalls until W stage \n"
+ " .set pop \n");
+ local_irq_enable();
+}
+
+/*
+ * Au1 'wait' is only useful when the 32kHz counter is used as timer,
+ * since coreclock (and the cp0 counter) stops upon executing it. Only an
+ * interrupt can wake it, so they must be enabled before entering idle modes.
+ */
+static void au1k_wait(void)
+{
+ unsigned long c0status = read_c0_status() | 1; /* irqs on */
+
+ __asm__(
+ " .set arch=r4000 \n"
+ " cache 0x14, 0(%0) \n"
+ " cache 0x14, 32(%0) \n"
+ " sync \n"
+ " mtc0 %1, $12 \n" /* wr c0status */
+ " wait \n"
+ " nop \n"
+ " nop \n"
+ " nop \n"
+ " nop \n"
+ " .set mips0 \n"
+ : : "r" (au1k_wait), "r" (c0status));
+}
+
+static int __initdata nowait;
+
+static int __init wait_disable(char *s)
+{
+ nowait = 1;
+
+ return 1;
+}
+
+__setup("nowait", wait_disable);
+
+void __init check_wait(void)
+{
+ struct cpuinfo_mips *c = &current_cpu_data;
+
+ if (nowait) {
+ printk("Wait instruction disabled.\n");
+ return;
+ }
+
+ switch (current_cpu_type()) {
+ case CPU_R3081:
+ case CPU_R3081E:
+ cpu_wait = r3081_wait;
+ break;
+ case CPU_TX3927:
+ cpu_wait = r39xx_wait;
+ break;
+ case CPU_R4200:
+/* case CPU_R4300: */
+ case CPU_R4600:
+ case CPU_R4640:
+ case CPU_R4650:
+ case CPU_R4700:
+ case CPU_R5000:
+ case CPU_R5500:
+ case CPU_NEVADA:
+ case CPU_4KC:
+ case CPU_4KEC:
+ case CPU_4KSC:
+ case CPU_5KC:
+ case CPU_25KF:
+ case CPU_PR4450:
+ case CPU_BMIPS3300:
+ case CPU_BMIPS4350:
+ case CPU_BMIPS4380:
+ case CPU_BMIPS5000:
+ case CPU_CAVIUM_OCTEON:
+ case CPU_CAVIUM_OCTEON_PLUS:
+ case CPU_CAVIUM_OCTEON2:
+ case CPU_CAVIUM_OCTEON3:
+ case CPU_JZRISC:
+ case CPU_LOONGSON1:
+ case CPU_XLR:
+ case CPU_XLP:
+ cpu_wait = r4k_wait;
+ break;
+
+ case CPU_RM7000:
+ cpu_wait = rm7k_wait_irqoff;
+ break;
+
+ case CPU_M14KC:
+ case CPU_M14KEC:
+ case CPU_24K:
+ case CPU_34K:
+ case CPU_1004K:
+ case CPU_1074K:
+ case CPU_INTERAPTIV:
+ case CPU_PROAPTIV:
+ case CPU_P5600:
+ case CPU_M5150:
+ cpu_wait = r4k_wait;
+ if (read_c0_config7() & MIPS_CONF7_WII)
+ cpu_wait = r4k_wait_irqoff;
+ break;
+
+ case CPU_74K:
+ cpu_wait = r4k_wait;
+ if ((c->processor_id & 0xff) >= PRID_REV_ENCODE_332(2, 1, 0))
+ cpu_wait = r4k_wait_irqoff;
+ break;
+
+ case CPU_TX49XX:
+ cpu_wait = r4k_wait_irqoff;
+ break;
+ case CPU_ALCHEMY:
+ cpu_wait = au1k_wait;
+ break;
+ case CPU_20KC:
+ /*
+ * WAIT on Rev1.0 has E1, E2, E3 and E16.
+ * WAIT on Rev2.0 and Rev3.0 has E16.
+ * Rev3.1 WAIT is nop, why bother
+ */
+ if ((c->processor_id & 0xff) <= 0x64)
+ break;
+
+ /*
+ * Another rev is incremeting c0_count at a reduced clock
+ * rate while in WAIT mode. So we basically have the choice
+ * between using the cp0 timer as clocksource or avoiding
+ * the WAIT instruction. Until more details are known,
+ * disable the use of WAIT for 20Kc entirely.
+ cpu_wait = r4k_wait;
+ */
+ break;
+ default:
+ break;
+ }
+}
+
+void arch_cpu_idle(void)
+{
+ if (cpu_wait)
+ cpu_wait();
+ else
+ local_irq_enable();
+}
+
+#ifdef CONFIG_CPU_IDLE
+
+int mips_cpuidle_wait_enter(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int index)
+{
+ arch_cpu_idle();
+ return index;
+}
+
+#endif
diff --git a/arch/mips/kernel/init_task.c b/arch/mips/kernel/init_task.c
deleted file mode 100644
index aeda7f58391..00000000000
--- a/arch/mips/kernel/init_task.c
+++ /dev/null
@@ -1,42 +0,0 @@
-#include <linux/mm.h>
-#include <linux/module.h>
-#include <linux/sched.h>
-#include <linux/init_task.h>
-#include <linux/fs.h>
-#include <linux/mqueue.h>
-
-#include <asm/thread_info.h>
-#include <asm/uaccess.h>
-#include <asm/pgtable.h>
-
-static struct fs_struct init_fs = INIT_FS;
-static struct files_struct init_files = INIT_FILES;
-static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
-static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
-struct mm_struct init_mm = INIT_MM(init_mm);
-
-EXPORT_SYMBOL(init_mm);
-
-/*
- * Initial thread structure.
- *
- * We need to make sure that this is 8192-byte aligned due to the
- * way process stacks are handled. This is done by making sure
- * the linker maps this in the .text segment right after head.S,
- * and making head.S ensure the proper alignment.
- *
- * The things we do for performance..
- */
-union thread_union init_thread_union
- __attribute__((__section__(".data.init_task"),
- __aligned__(THREAD_SIZE))) =
- { INIT_THREAD_INFO(init_task) };
-
-/*
- * Initial task structure.
- *
- * All other task structs will be allocated on slabs in fork.c
- */
-struct task_struct init_task = INIT_TASK(init_task);
-
-EXPORT_SYMBOL(init_task);
diff --git a/arch/mips/kernel/irix5sys.S b/arch/mips/kernel/irix5sys.S
deleted file mode 100644
index eeef891093e..00000000000
--- a/arch/mips/kernel/irix5sys.S
+++ /dev/null
@@ -1,1041 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * 32-bit IRIX5 ABI system call table derived from original file 'irix5sys.h'
- * created by David S. Miller.
- *
- * Copyright (C) 1996 - 2004 David S. Miller <dm@engr.sgi.com>
- * Copyright (C) 2004 Steven J. Hill <sjhill@realitydiluted.com>
- */
-#include <asm/asm.h>
-
- /*
- * Key:
- * V == Valid and should work as expected for most cases.
- * HV == Half Valid, some things will work, some likely will not
- * IV == InValid, certainly will not work at all yet
- * ?V == ?'ably Valid, I have not done enough looking into it
- * DC == Don't Care, a rats ass we couldn't give
- */
-
- .macro irix5syscalltable
-
- sys sys_syscall 0 /* 1000 sysindir() V*/
- sys sys_exit 1 /* 1001 exit() V*/
- sys sys_fork 0 /* 1002 fork() V*/
- sys sys_read 3 /* 1003 read() V*/
- sys sys_write 3 /* 1004 write() V*/
- sys sys_open 3 /* 1005 open() V*/
- sys sys_close 1 /* 1006 close() V*/
- sys irix_unimp 0 /* 1007 (XXX IRIX 4 wait) V*/
- sys sys_creat 2 /* 1008 creat() V*/
- sys sys_link 2 /* 1009 link() V*/
- sys sys_unlink 1 /* 1010 unlink() V*/
- sys irix_exec 0 /* 1011 exec() V*/
- sys sys_chdir 1 /* 1012 chdir() V*/
- sys irix_gtime 0 /* 1013 time() V*/
- sys irix_unimp 0 /* 1014 (XXX IRIX 4 mknod) V*/
- sys sys_chmod 2 /* 1015 chmod() V*/
- sys sys_chown 3 /* 1016 chown() V*/
- sys irix_brk 1 /* 1017 break() V*/
- sys irix_unimp 0 /* 1018 (XXX IRIX 4 stat) V*/
- sys sys_lseek 3 /* 1019 lseek() XXX64bit HV*/
- sys irix_getpid 0 /* 1020 getpid() V*/
- sys irix_mount 6 /* 1021 mount() IV*/
- sys sys_umount 1 /* 1022 umount() V*/
- sys sys_setuid 1 /* 1023 setuid() V*/
- sys irix_getuid 0 /* 1024 getuid() V*/
- sys irix_stime 1 /* 1025 stime() V*/
- sys irix_unimp 4 /* 1026 XXX ptrace() IV*/
- sys irix_alarm 1 /* 1027 alarm() V*/
- sys irix_unimp 0 /* 1028 (XXX IRIX 4 fstat) V*/
- sys irix_pause 0 /* 1029 pause() V*/
- sys sys_utime 2 /* 1030 utime() V*/
- sys irix_unimp 0 /* 1031 nuthin' V*/
- sys irix_unimp 0 /* 1032 nobody home man... V*/
- sys sys_access 2 /* 1033 access() V*/
- sys sys_nice 1 /* 1034 nice() V*/
- sys irix_statfs 2 /* 1035 statfs() V*/
- sys sys_sync 0 /* 1036 sync() V*/
- sys sys_kill 2 /* 1037 kill() V*/
- sys irix_fstatfs 2 /* 1038 fstatfs() V*/
- sys irix_setpgrp 1 /* 1039 setpgrp() V*/
- sys irix_syssgi 0 /* 1040 syssgi() HV*/
- sys sys_dup 1 /* 1041 dup() V*/
- sys sys_pipe 0 /* 1042 pipe() V*/
- sys irix_times 1 /* 1043 times() V*/
- sys irix_unimp 0 /* 1044 XXX profil() IV*/
- sys irix_unimp 0 /* 1045 XXX lock() IV*/
- sys sys_setgid 1 /* 1046 setgid() V*/
- sys irix_getgid 0 /* 1047 getgid() V*/
- sys irix_unimp 0 /* 1048 (XXX IRIX 4 ssig) V*/
- sys irix_msgsys 6 /* 1049 sys_msgsys V*/
- sys sys_sysmips 4 /* 1050 sysmips() HV*/
- sys irix_unimp 0 /* 1051 XXX sysacct() IV*/
- sys irix_shmsys 5 /* 1052 sys_shmsys V*/
- sys irix_semsys 0 /* 1053 sys_semsys V*/
- sys irix_ioctl 3 /* 1054 ioctl() HV*/
- sys irix_uadmin 0 /* 1055 XXX sys_uadmin() HC*/
- sys irix_sysmp 0 /* 1056 sysmp() HV*/
- sys irix_utssys 4 /* 1057 sys_utssys() HV*/
- sys irix_unimp 0 /* 1058 nada enchilada V*/
- sys irix_exece 0 /* 1059 exece() V*/
- sys sys_umask 1 /* 1060 umask() V*/
- sys sys_chroot 1 /* 1061 chroot() V*/
- sys irix_fcntl 3 /* 1062 fcntl() ?V*/
- sys irix_ulimit 2 /* 1063 ulimit() HV*/
- sys irix_unimp 0 /* 1064 XXX AFS shit DC*/
- sys irix_unimp 0 /* 1065 XXX AFS shit DC*/
- sys irix_unimp 0 /* 1066 XXX AFS shit DC*/
- sys irix_unimp 0 /* 1067 XXX AFS shit DC*/
- sys irix_unimp 0 /* 1068 XXX AFS shit DC*/
- sys irix_unimp 0 /* 1069 XXX AFS shit DC*/
- sys irix_unimp 0 /* 1070 XXX AFS shit DC*/
- sys irix_unimp 0 /* 1071 XXX AFS shit DC*/
- sys irix_unimp 0 /* 1072 XXX AFS shit DC*/
- sys irix_unimp 0 /* 1073 XXX AFS shit DC*/
- sys irix_unimp 0 /* 1074 nuttin' V*/
- sys irix_unimp 0 /* 1075 XXX sys_getrlimit64()IV*/
- sys irix_unimp 0 /* 1076 XXX sys_setrlimit64()IV*/
- sys sys_nanosleep 2 /* 1077 nanosleep() V*/
- sys irix_lseek64 5 /* 1078 lseek64() ?V*/
- sys sys_rmdir 1 /* 1079 rmdir() V*/
- sys sys_mkdir 2 /* 1080 mkdir() V*/
- sys sys_getdents 3 /* 1081 getdents() V*/
- sys irix_sginap 1 /* 1082 sys_sginap() V*/
- sys irix_sgikopt 3 /* 1083 sys_sgikopt() DC*/
- sys sys_sysfs 3 /* 1084 sysfs() ?V*/
- sys irix_unimp 0 /* 1085 XXX sys_getmsg() DC*/
- sys irix_unimp 0 /* 1086 XXX sys_putmsg() DC*/
- sys sys_poll 3 /* 1087 poll() V*/
- sys irix_sigreturn 0 /* 1088 sigreturn() ?V*/
- sys sys_accept 3 /* 1089 accept() V*/
- sys sys_bind 3 /* 1090 bind() V*/
- sys sys_connect 3 /* 1091 connect() V*/
- sys irix_gethostid 0 /* 1092 sys_gethostid() ?V*/
- sys sys_getpeername 3 /* 1093 getpeername() V*/
- sys sys_getsockname 3 /* 1094 getsockname() V*/
- sys sys_getsockopt 5 /* 1095 getsockopt() V*/
- sys sys_listen 2 /* 1096 listen() V*/
- sys sys_recv 4 /* 1097 recv() V*/
- sys sys_recvfrom 6 /* 1098 recvfrom() V*/
- sys sys_recvmsg 3 /* 1099 recvmsg() V*/
- sys sys_select 5 /* 1100 select() V*/
- sys sys_send 4 /* 1101 send() V*/
- sys sys_sendmsg 3 /* 1102 sendmsg() V*/
- sys sys_sendto 6 /* 1103 sendto() V*/
- sys irix_sethostid 1 /* 1104 sys_sethostid() ?V*/
- sys sys_setsockopt 5 /* 1105 setsockopt() V*/
- sys sys_shutdown 2 /* 1106 shutdown() ?V*/
- sys irix_socket 3 /* 1107 socket() V*/
- sys sys_gethostname 2 /* 1108 sys_gethostname() ?V*/
- sys sys_sethostname 2 /* 1109 sethostname() ?V*/
- sys irix_getdomainname 2 /* 1110 sys_getdomainname() ?V*/
- sys sys_setdomainname 2 /* 1111 setdomainname() ?V*/
- sys sys_truncate 2 /* 1112 truncate() V*/
- sys sys_ftruncate 2 /* 1113 ftruncate() V*/
- sys sys_rename 2 /* 1114 rename() V*/
- sys sys_symlink 2 /* 1115 symlink() V*/
- sys sys_readlink 3 /* 1116 readlink() V*/
- sys irix_unimp 0 /* 1117 XXX IRIX 4 lstat() DC*/
- sys irix_unimp 0 /* 1118 nothin' V*/
- sys irix_unimp 0 /* 1119 XXX nfs_svc() DC*/
- sys irix_unimp 0 /* 1120 XXX nfs_getfh() DC*/
- sys irix_unimp 0 /* 1121 XXX async_daemon() DC*/
- sys irix_unimp 0 /* 1122 XXX exportfs() DC*/
- sys sys_setregid 2 /* 1123 setregid() V*/
- sys sys_setreuid 2 /* 1124 setreuid() V*/
- sys sys_getitimer 2 /* 1125 getitimer() V*/
- sys sys_setitimer 3 /* 1126 setitimer() V*/
- sys irix_unimp 1 /* 1127 XXX adjtime() IV*/
- sys irix_gettimeofday 1 /* 1128 gettimeofday() V*/
- sys irix_unimp 0 /* 1129 XXX sproc() IV*/
- sys irix_prctl 0 /* 1130 prctl() HV*/
- sys irix_unimp 0 /* 1131 XXX procblk() IV*/
- sys irix_unimp 0 /* 1132 XXX sprocsp() IV*/
- sys irix_unimp 0 /* 1133 XXX sgigsc() IV*/
- sys irix_mmap32 6 /* 1134 mmap() XXXflags? ?V*/
- sys sys_munmap 2 /* 1135 munmap() V*/
- sys sys_mprotect 3 /* 1136 mprotect() V*/
- sys sys_msync 4 /* 1137 msync() V*/
- sys irix_madvise 3 /* 1138 madvise() DC*/
- sys irix_pagelock 3 /* 1139 pagelock() IV*/
- sys irix_getpagesize 0 /* 1140 getpagesize() V*/
- sys irix_quotactl 0 /* 1141 quotactl() V*/
- sys irix_unimp 0 /* 1142 nobody home man V*/
- sys sys_getpgid 1 /* 1143 BSD getpgrp() V*/
- sys irix_BSDsetpgrp 2 /* 1143 BSD setpgrp() V*/
- sys sys_vhangup 0 /* 1144 vhangup() V*/
- sys sys_fsync 1 /* 1145 fsync() V*/
- sys sys_fchdir 1 /* 1146 fchdir() V*/
- sys sys_getrlimit 2 /* 1147 getrlimit() ?V*/
- sys sys_setrlimit 2 /* 1148 setrlimit() ?V*/
- sys sys_cacheflush 3 /* 1150 cacheflush() HV*/
- sys sys_cachectl 3 /* 1151 cachectl() HV*/
- sys sys_fchown 3 /* 1152 fchown() ?V*/
- sys sys_fchmod 2 /* 1153 fchmod() ?V*/
- sys irix_unimp 0 /* 1154 XXX IRIX 4 wait3() V*/
- sys sys_socketpair 4 /* 1155 socketpair() V*/
- sys irix_systeminfo 3 /* 1156 systeminfo() IV*/
- sys irix_uname 1 /* 1157 uname() IV*/
- sys irix_xstat 3 /* 1158 xstat() V*/
- sys irix_lxstat 3 /* 1159 lxstat() V*/
- sys irix_fxstat 3 /* 1160 fxstat() V*/
- sys irix_xmknod 0 /* 1161 xmknod() ?V*/
- sys irix_sigaction 4 /* 1162 sigaction() ?V*/
- sys irix_sigpending 1 /* 1163 sigpending() ?V*/
- sys irix_sigprocmask 3 /* 1164 sigprocmask() ?V*/
- sys irix_sigsuspend 0 /* 1165 sigsuspend() ?V*/
- sys irix_sigpoll_sys 3 /* 1166 sigpoll_sys() IV*/
- sys irix_swapctl 2 /* 1167 swapctl() IV*/
- sys irix_getcontext 0 /* 1168 getcontext() HV*/
- sys irix_setcontext 0 /* 1169 setcontext() HV*/
- sys irix_waitsys 5 /* 1170 waitsys() IV*/
- sys irix_sigstack 2 /* 1171 sigstack() HV*/
- sys irix_sigaltstack 2 /* 1172 sigaltstack() HV*/
- sys irix_sigsendset 2 /* 1173 sigsendset() IV*/
- sys irix_statvfs 2 /* 1174 statvfs() V*/
- sys irix_fstatvfs 2 /* 1175 fstatvfs() V*/
- sys irix_unimp 0 /* 1176 XXX getpmsg() DC*/
- sys irix_unimp 0 /* 1177 XXX putpmsg() DC*/
- sys sys_lchown 3 /* 1178 lchown() V*/
- sys irix_priocntl 0 /* 1179 priocntl() DC*/
- sys irix_sigqueue 4 /* 1180 sigqueue() IV*/
- sys sys_readv 3 /* 1181 readv() V*/
- sys sys_writev 3 /* 1182 writev() V*/
- sys irix_truncate64 4 /* 1183 truncate64() XX32bit HV*/
- sys irix_ftruncate64 4 /* 1184 ftruncate64()XX32bit HV*/
- sys irix_mmap64 0 /* 1185 mmap64() XX32bit HV*/
- sys irix_dmi 0 /* 1186 dmi() DC*/
- sys irix_pread 6 /* 1187 pread() IV*/
- sys irix_pwrite 6 /* 1188 pwrite() IV*/
- sys sys_fsync 1 /* 1189 fdatasync() XXPOSIX HV*/
- sys irix_sgifastpath 7 /* 1190 sgifastpath() WHEEE IV*/
- sys irix_unimp 0 /* 1191 XXX attr_get() DC*/
- sys irix_unimp 0 /* 1192 XXX attr_getf() DC*/
- sys irix_unimp 0 /* 1193 XXX attr_set() DC*/
- sys irix_unimp 0 /* 1194 XXX attr_setf() DC*/
- sys irix_unimp 0 /* 1195 XXX attr_remove() DC*/
- sys irix_unimp 0 /* 1196 XXX attr_removef() DC*/
- sys irix_unimp 0 /* 1197 XXX attr_list() DC*/
- sys irix_unimp 0 /* 1198 XXX attr_listf() DC*/
- sys irix_unimp 0 /* 1199 XXX attr_multi() DC*/
- sys irix_unimp 0 /* 1200 XXX attr_multif() DC*/
- sys irix_statvfs64 2 /* 1201 statvfs64() V*/
- sys irix_fstatvfs64 2 /* 1202 fstatvfs64() V*/
- sys irix_getmountid 2 /* 1203 getmountid()XXXfsids HV*/
- sys irix_nsproc 5 /* 1204 nsproc() IV*/
- sys irix_getdents64 3 /* 1205 getdents64() HV*/
- sys irix_unimp 0 /* 1206 XXX DFS garbage DC*/
- sys irix_ngetdents 4 /* 1207 ngetdents() XXXeop HV*/
- sys irix_ngetdents64 4 /* 1208 ngetdents64() XXXeop HV*/
- sys irix_unimp 0 /* 1209 nothin' V*/
- sys irix_unimp 0 /* 1210 XXX pidsprocsp() */
- sys irix_unimp 0 /* 1211 XXX rexec() */
- sys irix_unimp 0 /* 1212 XXX timer_create() */
- sys irix_unimp 0 /* 1213 XXX timer_delete() */
- sys irix_unimp 0 /* 1214 XXX timer_settime() */
- sys irix_unimp 0 /* 1215 XXX timer_gettime() */
- sys irix_unimp 0 /* 1216 XXX timer_setoverrun() */
- sys sys_sched_rr_get_interval 2 /* 1217 sched_rr_get_interval()V*/
- sys sys_sched_yield 0 /* 1218 sched_yield() V*/
- sys sys_sched_getscheduler 1 /* 1219 sched_getscheduler() V*/
- sys sys_sched_setscheduler 3 /* 1220 sched_setscheduler() V*/
- sys sys_sched_getparam 2 /* 1221 sched_getparam() V*/
- sys sys_sched_setparam 2 /* 1222 sched_setparam() V*/
- sys irix_unimp 0 /* 1223 XXX usync_cntl() */
- sys irix_unimp 0 /* 1224 XXX psema_cntl() */
- sys irix_unimp 0 /* 1225 XXX restartreturn() */
-
- /* Just to pad things out nicely. */
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
-
- .endm
-
- /*
- * Pre-compute the number of _instruction_ bytes needed to load
- * or store the arguments 6-8. Negative values are ignored.
- */
- .macro sys function, nargs
- PTR \function
- LONG (\nargs << 2) - (5 << 2)
- .endm
-
- .align 4
-EXPORT(sys_call_table_irix5)
- irix5syscalltable
diff --git a/arch/mips/kernel/irixelf.c b/arch/mips/kernel/irixelf.c
deleted file mode 100644
index ab12c8f0151..00000000000
--- a/arch/mips/kernel/irixelf.c
+++ /dev/null
@@ -1,1336 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * irixelf.c: Code to load IRIX ELF executables conforming to the MIPS ABI.
- * Based off of work by Eric Youngdale.
- *
- * Copyright (C) 1993 - 1994 Eric Youngdale <ericy@cais.com>
- * Copyright (C) 1996 - 2004 David S. Miller <dm@engr.sgi.com>
- * Copyright (C) 2004 - 2005 Steven J. Hill <sjhill@realitydiluted.com>
- */
-#include <linux/module.h>
-#include <linux/fs.h>
-#include <linux/stat.h>
-#include <linux/sched.h>
-#include <linux/mm.h>
-#include <linux/mman.h>
-#include <linux/a.out.h>
-#include <linux/errno.h>
-#include <linux/init.h>
-#include <linux/signal.h>
-#include <linux/binfmts.h>
-#include <linux/string.h>
-#include <linux/file.h>
-#include <linux/fcntl.h>
-#include <linux/ptrace.h>
-#include <linux/slab.h>
-#include <linux/shm.h>
-#include <linux/personality.h>
-#include <linux/elfcore.h>
-#include <linux/smp_lock.h>
-
-#include <asm/mipsregs.h>
-#include <asm/namei.h>
-#include <asm/prctl.h>
-#include <asm/uaccess.h>
-
-#define DLINFO_ITEMS 12
-
-#include <linux/elf.h>
-
-#undef DEBUG
-
-static int load_irix_binary(struct linux_binprm * bprm, struct pt_regs * regs);
-static int load_irix_library(struct file *);
-static int irix_core_dump(long signr, struct pt_regs * regs,
- struct file *file);
-
-static struct linux_binfmt irix_format = {
- NULL, THIS_MODULE, load_irix_binary, load_irix_library,
- irix_core_dump, PAGE_SIZE
-};
-
-#ifndef elf_addr_t
-#define elf_addr_t unsigned long
-#endif
-
-#ifdef DEBUG
-/* Debugging routines. */
-static char *get_elf_p_type(Elf32_Word p_type)
-{
- int i = (int) p_type;
-
- switch(i) {
- case PT_NULL: return("PT_NULL"); break;
- case PT_LOAD: return("PT_LOAD"); break;
- case PT_DYNAMIC: return("PT_DYNAMIC"); break;
- case PT_INTERP: return("PT_INTERP"); break;
- case PT_NOTE: return("PT_NOTE"); break;
- case PT_SHLIB: return("PT_SHLIB"); break;
- case PT_PHDR: return("PT_PHDR"); break;
- case PT_LOPROC: return("PT_LOPROC/REGINFO"); break;
- case PT_HIPROC: return("PT_HIPROC"); break;
- default: return("PT_BOGUS"); break;
- }
-}
-
-static void print_elfhdr(struct elfhdr *ehp)
-{
- int i;
-
- printk("ELFHDR: e_ident<");
- for(i = 0; i < (EI_NIDENT - 1); i++) printk("%x ", ehp->e_ident[i]);
- printk("%x>\n", ehp->e_ident[i]);
- printk(" e_type[%04x] e_machine[%04x] e_version[%08lx]\n",
- (unsigned short) ehp->e_type, (unsigned short) ehp->e_machine,
- (unsigned long) ehp->e_version);
- printk(" e_entry[%08lx] e_phoff[%08lx] e_shoff[%08lx] "
- "e_flags[%08lx]\n",
- (unsigned long) ehp->e_entry, (unsigned long) ehp->e_phoff,
- (unsigned long) ehp->e_shoff, (unsigned long) ehp->e_flags);
- printk(" e_ehsize[%04x] e_phentsize[%04x] e_phnum[%04x]\n",
- (unsigned short) ehp->e_ehsize, (unsigned short) ehp->e_phentsize,
- (unsigned short) ehp->e_phnum);
- printk(" e_shentsize[%04x] e_shnum[%04x] e_shstrndx[%04x]\n",
- (unsigned short) ehp->e_shentsize, (unsigned short) ehp->e_shnum,
- (unsigned short) ehp->e_shstrndx);
-}
-
-static void print_phdr(int i, struct elf_phdr *ep)
-{
- printk("PHDR[%d]: p_type[%s] p_offset[%08lx] p_vaddr[%08lx] "
- "p_paddr[%08lx]\n", i, get_elf_p_type(ep->p_type),
- (unsigned long) ep->p_offset, (unsigned long) ep->p_vaddr,
- (unsigned long) ep->p_paddr);
- printk(" p_filesz[%08lx] p_memsz[%08lx] p_flags[%08lx] "
- "p_align[%08lx]\n", (unsigned long) ep->p_filesz,
- (unsigned long) ep->p_memsz, (unsigned long) ep->p_flags,
- (unsigned long) ep->p_align);
-}
-
-static void dump_phdrs(struct elf_phdr *ep, int pnum)
-{
- int i;
-
- for(i = 0; i < pnum; i++, ep++) {
- if((ep->p_type == PT_LOAD) ||
- (ep->p_type == PT_INTERP) ||
- (ep->p_type == PT_PHDR))
- print_phdr(i, ep);
- }
-}
-#endif /* DEBUG */
-
-static void set_brk(unsigned long start, unsigned long end)
-{
- start = PAGE_ALIGN(start);
- end = PAGE_ALIGN(end);
- if (end <= start)
- return;
- down_write(&current->mm->mmap_sem);
- do_brk(start, end - start);
- up_write(&current->mm->mmap_sem);
-}
-
-
-/* We need to explicitly zero any fractional pages
- * after the data section (i.e. bss). This would
- * contain the junk from the file that should not
- * be in memory.
- */
-static void padzero(unsigned long elf_bss)
-{
- unsigned long nbyte;
-
- nbyte = elf_bss & (PAGE_SIZE-1);
- if (nbyte) {
- nbyte = PAGE_SIZE - nbyte;
- clear_user((void __user *) elf_bss, nbyte);
- }
-}
-
-static unsigned long * create_irix_tables(char * p, int argc, int envc,
- struct elfhdr * exec, unsigned int load_addr,
- unsigned int interp_load_addr, struct pt_regs *regs,
- struct elf_phdr *ephdr)
-{
- elf_addr_t *argv;
- elf_addr_t *envp;
- elf_addr_t *sp, *csp;
-
-#ifdef DEBUG
- printk("create_irix_tables: p[%p] argc[%d] envc[%d] "
- "load_addr[%08x] interp_load_addr[%08x]\n",
- p, argc, envc, load_addr, interp_load_addr);
-#endif
- sp = (elf_addr_t *) (~15UL & (unsigned long) p);
- csp = sp;
- csp -= exec ? DLINFO_ITEMS*2 : 2;
- csp -= envc+1;
- csp -= argc+1;
- csp -= 1; /* argc itself */
- if ((unsigned long)csp & 15UL) {
- sp -= (16UL - ((unsigned long)csp & 15UL)) / sizeof(*sp);
- }
-
- /*
- * Put the ELF interpreter info on the stack
- */
-#define NEW_AUX_ENT(nr, id, val) \
- __put_user ((id), sp+(nr*2)); \
- __put_user ((val), sp+(nr*2+1)); \
-
- sp -= 2;
- NEW_AUX_ENT(0, AT_NULL, 0);
-
- if(exec) {
- sp -= 11*2;
-
- NEW_AUX_ENT (0, AT_PHDR, load_addr + exec->e_phoff);
- NEW_AUX_ENT (1, AT_PHENT, sizeof (struct elf_phdr));
- NEW_AUX_ENT (2, AT_PHNUM, exec->e_phnum);
- NEW_AUX_ENT (3, AT_PAGESZ, ELF_EXEC_PAGESIZE);
- NEW_AUX_ENT (4, AT_BASE, interp_load_addr);
- NEW_AUX_ENT (5, AT_FLAGS, 0);
- NEW_AUX_ENT (6, AT_ENTRY, (elf_addr_t) exec->e_entry);
- NEW_AUX_ENT (7, AT_UID, (elf_addr_t) current->uid);
- NEW_AUX_ENT (8, AT_EUID, (elf_addr_t) current->euid);
- NEW_AUX_ENT (9, AT_GID, (elf_addr_t) current->gid);
- NEW_AUX_ENT (10, AT_EGID, (elf_addr_t) current->egid);
- }
-#undef NEW_AUX_ENT
-
- sp -= envc+1;
- envp = sp;
- sp -= argc+1;
- argv = sp;
-
- __put_user((elf_addr_t)argc,--sp);
- current->mm->arg_start = (unsigned long) p;
- while (argc-->0) {
- __put_user((unsigned long)p,argv++);
- p += strlen_user(p);
- }
- __put_user((unsigned long) NULL, argv);
- current->mm->arg_end = current->mm->env_start = (unsigned long) p;
- while (envc-->0) {
- __put_user((unsigned long)p,envp++);
- p += strlen_user(p);
- }
- __put_user((unsigned long) NULL, envp);
- current->mm->env_end = (unsigned long) p;
- return sp;
-}
-
-
-/* This is much more generalized than the library routine read function,
- * so we keep this separate. Technically the library read function
- * is only provided so that we can read a.out libraries that have
- * an ELF header.
- */
-static unsigned int load_irix_interp(struct elfhdr * interp_elf_ex,
- struct file * interpreter,
- unsigned int *interp_load_addr)
-{
- struct elf_phdr *elf_phdata = NULL;
- struct elf_phdr *eppnt;
- unsigned int len;
- unsigned int load_addr;
- int elf_bss;
- int retval;
- unsigned int last_bss;
- int error;
- int i;
- unsigned int k;
-
- elf_bss = 0;
- last_bss = 0;
- error = load_addr = 0;
-
-#ifdef DEBUG
- print_elfhdr(interp_elf_ex);
-#endif
-
- /* First of all, some simple consistency checks */
- if ((interp_elf_ex->e_type != ET_EXEC &&
- interp_elf_ex->e_type != ET_DYN) ||
- !interpreter->f_op->mmap) {
- printk("IRIX interp has bad e_type %d\n", interp_elf_ex->e_type);
- return 0xffffffff;
- }
-
- /* Now read in all of the header information */
- if(sizeof(struct elf_phdr) * interp_elf_ex->e_phnum > PAGE_SIZE) {
- printk("IRIX interp header bigger than a page (%d)\n",
- (sizeof(struct elf_phdr) * interp_elf_ex->e_phnum));
- return 0xffffffff;
- }
-
- elf_phdata = kmalloc(sizeof(struct elf_phdr) * interp_elf_ex->e_phnum,
- GFP_KERNEL);
-
- if(!elf_phdata) {
- printk("Cannot kmalloc phdata for IRIX interp.\n");
- return 0xffffffff;
- }
-
- /* If the size of this structure has changed, then punt, since
- * we will be doing the wrong thing.
- */
- if(interp_elf_ex->e_phentsize != 32) {
- printk("IRIX interp e_phentsize == %d != 32 ",
- interp_elf_ex->e_phentsize);
- kfree(elf_phdata);
- return 0xffffffff;
- }
-
- retval = kernel_read(interpreter, interp_elf_ex->e_phoff,
- (char *) elf_phdata,
- sizeof(struct elf_phdr) * interp_elf_ex->e_phnum);
-
-#ifdef DEBUG
- dump_phdrs(elf_phdata, interp_elf_ex->e_phnum);
-#endif
-
- eppnt = elf_phdata;
- for(i=0; i<interp_elf_ex->e_phnum; i++, eppnt++) {
- if(eppnt->p_type == PT_LOAD) {
- int elf_type = MAP_PRIVATE | MAP_DENYWRITE;
- int elf_prot = 0;
- unsigned long vaddr = 0;
- if (eppnt->p_flags & PF_R) elf_prot = PROT_READ;
- if (eppnt->p_flags & PF_W) elf_prot |= PROT_WRITE;
- if (eppnt->p_flags & PF_X) elf_prot |= PROT_EXEC;
- elf_type |= MAP_FIXED;
- vaddr = eppnt->p_vaddr;
-
- pr_debug("INTERP do_mmap(%p, %08lx, %08lx, %08lx, %08lx, %08lx) ",
- interpreter, vaddr,
- (unsigned long) (eppnt->p_filesz + (eppnt->p_vaddr & 0xfff)),
- (unsigned long) elf_prot, (unsigned long) elf_type,
- (unsigned long) (eppnt->p_offset & 0xfffff000));
- down_write(&current->mm->mmap_sem);
- error = do_mmap(interpreter, vaddr,
- eppnt->p_filesz + (eppnt->p_vaddr & 0xfff),
- elf_prot, elf_type,
- eppnt->p_offset & 0xfffff000);
- up_write(&current->mm->mmap_sem);
-
- if(error < 0 && error > -1024) {
- printk("Aieee IRIX interp mmap error=%d\n", error);
- break; /* Real error */
- }
- pr_debug("error=%08lx ", (unsigned long) error);
- if(!load_addr && interp_elf_ex->e_type == ET_DYN) {
- load_addr = error;
- pr_debug("load_addr = error ");
- }
-
- /* Find the end of the file mapping for this phdr, and keep
- * track of the largest address we see for this.
- */
- k = eppnt->p_vaddr + eppnt->p_filesz;
- if(k > elf_bss) elf_bss = k;
-
- /* Do the same thing for the memory mapping - between
- * elf_bss and last_bss is the bss section.
- */
- k = eppnt->p_memsz + eppnt->p_vaddr;
- if(k > last_bss) last_bss = k;
- pr_debug("\n");
- }
- }
-
- /* Now use mmap to map the library into memory. */
- if(error < 0 && error > -1024) {
- pr_debug("got error %d\n", error);
- kfree(elf_phdata);
- return 0xffffffff;
- }
-
- /* Now fill out the bss section. First pad the last page up
- * to the page boundary, and then perform a mmap to make sure
- * that there are zero-mapped pages up to and including the
- * last bss page.
- */
- pr_debug("padzero(%08lx) ", (unsigned long) (elf_bss));
- padzero(elf_bss);
- len = (elf_bss + 0xfff) & 0xfffff000; /* What we have mapped so far */
-
- pr_debug("last_bss[%08lx] len[%08lx]\n", (unsigned long) last_bss,
- (unsigned long) len);
-
- /* Map the last of the bss segment */
- if (last_bss > len) {
- down_write(&current->mm->mmap_sem);
- do_brk(len, (last_bss - len));
- up_write(&current->mm->mmap_sem);
- }
- kfree(elf_phdata);
-
- *interp_load_addr = load_addr;
- return ((unsigned int) interp_elf_ex->e_entry);
-}
-
-/* Check sanity of IRIX elf executable header. */
-static int verify_binary(struct elfhdr *ehp, struct linux_binprm *bprm)
-{
- if (memcmp(ehp->e_ident, ELFMAG, SELFMAG) != 0)
- return -ENOEXEC;
-
- /* First of all, some simple consistency checks */
- if((ehp->e_type != ET_EXEC && ehp->e_type != ET_DYN) ||
- !bprm->file->f_op->mmap) {
- return -ENOEXEC;
- }
-
- /* XXX Don't support N32 or 64bit binaries yet because they can
- * XXX and do execute 64 bit instructions and expect all registers
- * XXX to be 64 bit as well. We need to make the kernel save
- * XXX all registers as 64bits on cpu's capable of this at
- * XXX exception time plus frob the XTLB exception vector.
- */
- if((ehp->e_flags & EF_MIPS_ABI2))
- return -ENOEXEC;
-
- return 0;
-}
-
-/*
- * This is where the detailed check is performed. Irix binaries
- * use interpreters with 'libc.so' in the name, so this function
- * can differentiate between Linux and Irix binaries.
- */
-static inline int look_for_irix_interpreter(char **name,
- struct file **interpreter,
- struct elfhdr *interp_elf_ex,
- struct elf_phdr *epp,
- struct linux_binprm *bprm, int pnum)
-{
- int i;
- int retval = -EINVAL;
- struct file *file = NULL;
-
- *name = NULL;
- for(i = 0; i < pnum; i++, epp++) {
- if (epp->p_type != PT_INTERP)
- continue;
-
- /* It is illegal to have two interpreters for one executable. */
- if (*name != NULL)
- goto out;
-
- *name = kmalloc(epp->p_filesz + strlen(IRIX_EMUL), GFP_KERNEL);
- if (!*name)
- return -ENOMEM;
-
- strcpy(*name, IRIX_EMUL);
- retval = kernel_read(bprm->file, epp->p_offset, (*name + 16),
- epp->p_filesz);
- if (retval < 0)
- goto out;
-
- file = open_exec(*name);
- if (IS_ERR(file)) {
- retval = PTR_ERR(file);
- goto out;
- }
- retval = kernel_read(file, 0, bprm->buf, 128);
- if (retval < 0)
- goto dput_and_out;
-
- *interp_elf_ex = *(struct elfhdr *) bprm->buf;
- }
- *interpreter = file;
- return 0;
-
-dput_and_out:
- fput(file);
-out:
- kfree(*name);
- return retval;
-}
-
-static inline int verify_irix_interpreter(struct elfhdr *ihp)
-{
- if (memcmp(ihp->e_ident, ELFMAG, SELFMAG) != 0)
- return -ELIBBAD;
- return 0;
-}
-
-#define EXEC_MAP_FLAGS (MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE)
-
-static inline void map_executable(struct file *fp, struct elf_phdr *epp, int pnum,
- unsigned int *estack, unsigned int *laddr,
- unsigned int *scode, unsigned int *ebss,
- unsigned int *ecode, unsigned int *edata,
- unsigned int *ebrk)
-{
- unsigned int tmp;
- int i, prot;
-
- for(i = 0; i < pnum; i++, epp++) {
- if(epp->p_type != PT_LOAD)
- continue;
-
- /* Map it. */
- prot = (epp->p_flags & PF_R) ? PROT_READ : 0;
- prot |= (epp->p_flags & PF_W) ? PROT_WRITE : 0;
- prot |= (epp->p_flags & PF_X) ? PROT_EXEC : 0;
- down_write(&current->mm->mmap_sem);
- (void) do_mmap(fp, (epp->p_vaddr & 0xfffff000),
- (epp->p_filesz + (epp->p_vaddr & 0xfff)),
- prot, EXEC_MAP_FLAGS,
- (epp->p_offset & 0xfffff000));
- up_write(&current->mm->mmap_sem);
-
- /* Fixup location tracking vars. */
- if((epp->p_vaddr & 0xfffff000) < *estack)
- *estack = (epp->p_vaddr & 0xfffff000);
- if(!*laddr)
- *laddr = epp->p_vaddr - epp->p_offset;
- if(epp->p_vaddr < *scode)
- *scode = epp->p_vaddr;
-
- tmp = epp->p_vaddr + epp->p_filesz;
- if(tmp > *ebss)
- *ebss = tmp;
- if((epp->p_flags & PF_X) && *ecode < tmp)
- *ecode = tmp;
- if(*edata < tmp)
- *edata = tmp;
-
- tmp = epp->p_vaddr + epp->p_memsz;
- if(tmp > *ebrk)
- *ebrk = tmp;
- }
-
-}
-
-static inline int map_interpreter(struct elf_phdr *epp, struct elfhdr *ihp,
- struct file *interp, unsigned int *iladdr,
- int pnum, mm_segment_t old_fs,
- unsigned int *eentry)
-{
- int i;
-
- *eentry = 0xffffffff;
- for(i = 0; i < pnum; i++, epp++) {
- if(epp->p_type != PT_INTERP)
- continue;
-
- /* We should have fielded this error elsewhere... */
- if(*eentry != 0xffffffff)
- return -1;
-
- set_fs(old_fs);
- *eentry = load_irix_interp(ihp, interp, iladdr);
- old_fs = get_fs();
- set_fs(get_ds());
-
- fput(interp);
-
- if (*eentry == 0xffffffff)
- return -1;
- }
- return 0;
-}
-
-/*
- * IRIX maps a page at 0x200000 that holds information about the
- * process and the system, here we map the page and fill the
- * structure
- */
-static void irix_map_prda_page(void)
-{
- unsigned long v;
- struct prda *pp;
-
- down_write(&current->mm->mmap_sem);
- v = do_brk (PRDA_ADDRESS, PAGE_SIZE);
- up_write(&current->mm->mmap_sem);
-
- if (v < 0)
- return;
-
- pp = (struct prda *) v;
- pp->prda_sys.t_pid = current->pid;
- pp->prda_sys.t_prid = read_c0_prid();
- pp->prda_sys.t_rpid = current->pid;
-
- /* We leave the rest set to zero */
-}
-
-
-
-/* These are the functions used to load ELF style executables and shared
- * libraries. There is no binary dependent code anywhere else.
- */
-static int load_irix_binary(struct linux_binprm * bprm, struct pt_regs * regs)
-{
- struct elfhdr elf_ex, interp_elf_ex;
- struct file *interpreter;
- struct elf_phdr *elf_phdata, *elf_ihdr, *elf_ephdr;
- unsigned int load_addr, elf_bss, elf_brk;
- unsigned int elf_entry, interp_load_addr = 0;
- unsigned int start_code, end_code, end_data, elf_stack;
- int retval, has_interp, has_ephdr, size, i;
- char *elf_interpreter;
- mm_segment_t old_fs;
-
- load_addr = 0;
- has_interp = has_ephdr = 0;
- elf_ihdr = elf_ephdr = NULL;
- elf_ex = *((struct elfhdr *) bprm->buf);
- retval = -ENOEXEC;
-
- if (verify_binary(&elf_ex, bprm))
- goto out;
-
- /*
- * Telling -o32 static binaries from Linux and Irix apart from each
- * other is difficult. There are 2 differences to be noted for static
- * binaries from the 2 operating systems:
- *
- * 1) Irix binaries have their .text section before their .init
- * section. Linux binaries are just the opposite.
- *
- * 2) Irix binaries usually have <= 12 sections and Linux
- * binaries have > 20.
- *
- * We will use Method #2 since Method #1 would require us to read in
- * the section headers which is way too much overhead. This appears
- * to work for everything we have ran into so far. If anyone has a
- * better method to tell the binaries apart, I'm listening.
- */
- if (elf_ex.e_shnum > 20)
- goto out;
-
-#ifdef DEBUG
- print_elfhdr(&elf_ex);
-#endif
-
- /* Now read in all of the header information */
- size = elf_ex.e_phentsize * elf_ex.e_phnum;
- if (size > 65536)
- goto out;
- elf_phdata = kmalloc(size, GFP_KERNEL);
- if (elf_phdata == NULL) {
- retval = -ENOMEM;
- goto out;
- }
-
- retval = kernel_read(bprm->file, elf_ex.e_phoff, (char *)elf_phdata, size);
- if (retval < 0)
- goto out_free_ph;
-
-#ifdef DEBUG
- dump_phdrs(elf_phdata, elf_ex.e_phnum);
-#endif
-
- /* Set some things for later. */
- for(i = 0; i < elf_ex.e_phnum; i++) {
- switch(elf_phdata[i].p_type) {
- case PT_INTERP:
- has_interp = 1;
- elf_ihdr = &elf_phdata[i];
- break;
- case PT_PHDR:
- has_ephdr = 1;
- elf_ephdr = &elf_phdata[i];
- break;
- };
- }
-
- pr_debug("\n");
-
- elf_bss = 0;
- elf_brk = 0;
-
- elf_stack = 0xffffffff;
- elf_interpreter = NULL;
- start_code = 0xffffffff;
- end_code = 0;
- end_data = 0;
-
- /*
- * If we get a return value, we change the value to be ENOEXEC
- * so that we can exit gracefully and the main binary format
- * search loop in 'fs/exec.c' will move onto the next handler
- * which should be the normal ELF binary handler.
- */
- retval = look_for_irix_interpreter(&elf_interpreter, &interpreter,
- &interp_elf_ex, elf_phdata, bprm,
- elf_ex.e_phnum);
- if (retval) {
- retval = -ENOEXEC;
- goto out_free_file;
- }
-
- if (elf_interpreter) {
- retval = verify_irix_interpreter(&interp_elf_ex);
- if(retval)
- goto out_free_interp;
- }
-
- /* OK, we are done with that, now set up the arg stuff,
- * and then start this sucker up.
- */
- retval = -E2BIG;
- if (!bprm->sh_bang && !bprm->p)
- goto out_free_interp;
-
- /* Flush all traces of the currently running executable */
- retval = flush_old_exec(bprm);
- if (retval)
- goto out_free_dentry;
-
- /* OK, This is the point of no return */
- current->mm->end_data = 0;
- current->mm->end_code = 0;
- current->mm->mmap = NULL;
- current->flags &= ~PF_FORKNOEXEC;
- elf_entry = (unsigned int) elf_ex.e_entry;
-
- /* Do this so that we can load the interpreter, if need be. We will
- * change some of these later.
- */
- setup_arg_pages(bprm, STACK_TOP, EXSTACK_DEFAULT);
- current->mm->start_stack = bprm->p;
-
- /* At this point, we assume that the image should be loaded at
- * fixed address, not at a variable address.
- */
- old_fs = get_fs();
- set_fs(get_ds());
-
- map_executable(bprm->file, elf_phdata, elf_ex.e_phnum, &elf_stack,
- &load_addr, &start_code, &elf_bss, &end_code,
- &end_data, &elf_brk);
-
- if(elf_interpreter) {
- retval = map_interpreter(elf_phdata, &interp_elf_ex,
- interpreter, &interp_load_addr,
- elf_ex.e_phnum, old_fs, &elf_entry);
- kfree(elf_interpreter);
- if(retval) {
- set_fs(old_fs);
- printk("Unable to load IRIX ELF interpreter\n");
- send_sig(SIGSEGV, current, 0);
- retval = 0;
- goto out_free_file;
- }
- }
-
- set_fs(old_fs);
-
- kfree(elf_phdata);
- set_personality(PER_IRIX32);
- set_binfmt(&irix_format);
- compute_creds(bprm);
- current->flags &= ~PF_FORKNOEXEC;
- bprm->p = (unsigned long)
- create_irix_tables((char *)bprm->p, bprm->argc, bprm->envc,
- (elf_interpreter ? &elf_ex : NULL),
- load_addr, interp_load_addr, regs, elf_ephdr);
- current->mm->start_brk = current->mm->brk = elf_brk;
- current->mm->end_code = end_code;
- current->mm->start_code = start_code;
- current->mm->end_data = end_data;
- current->mm->start_stack = bprm->p;
-
- /* Calling set_brk effectively mmaps the pages that we need for the
- * bss and break sections.
- */
- set_brk(elf_bss, elf_brk);
-
- /*
- * IRIX maps a page at 0x200000 which holds some system
- * information. Programs depend on this.
- */
- irix_map_prda_page();
-
- padzero(elf_bss);
-
- pr_debug("(start_brk) %lx\n" , (long) current->mm->start_brk);
- pr_debug("(end_code) %lx\n" , (long) current->mm->end_code);
- pr_debug("(start_code) %lx\n" , (long) current->mm->start_code);
- pr_debug("(end_data) %lx\n" , (long) current->mm->end_data);
- pr_debug("(start_stack) %lx\n" , (long) current->mm->start_stack);
- pr_debug("(brk) %lx\n" , (long) current->mm->brk);
-
-#if 0 /* XXX No fucking way dude... */
- /* Why this, you ask??? Well SVr4 maps page 0 as read-only,
- * and some applications "depend" upon this behavior.
- * Since we do not have the power to recompile these, we
- * emulate the SVr4 behavior. Sigh.
- */
- down_write(&current->mm->mmap_sem);
- (void) do_mmap(NULL, 0, 4096, PROT_READ | PROT_EXEC,
- MAP_FIXED | MAP_PRIVATE, 0);
- up_write(&current->mm->mmap_sem);
-#endif
-
- start_thread(regs, elf_entry, bprm->p);
- if (current->ptrace & PT_PTRACED)
- send_sig(SIGTRAP, current, 0);
- return 0;
-out:
- return retval;
-
-out_free_dentry:
- allow_write_access(interpreter);
- fput(interpreter);
-out_free_interp:
- kfree(elf_interpreter);
-out_free_file:
-out_free_ph:
- kfree (elf_phdata);
- goto out;
-}
-
-/* This is really simpleminded and specialized - we are loading an
- * a.out library that is given an ELF header.
- */
-static int load_irix_library(struct file *file)
-{
- struct elfhdr elf_ex;
- struct elf_phdr *elf_phdata = NULL;
- unsigned int len = 0;
- int elf_bss = 0;
- int retval;
- unsigned int bss;
- int error;
- int i,j, k;
-
- error = kernel_read(file, 0, (char *) &elf_ex, sizeof(elf_ex));
- if (error != sizeof(elf_ex))
- return -ENOEXEC;
-
- if (memcmp(elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
- return -ENOEXEC;
-
- /* First of all, some simple consistency checks. */
- if(elf_ex.e_type != ET_EXEC || elf_ex.e_phnum > 2 ||
- !file->f_op->mmap)
- return -ENOEXEC;
-
- /* Now read in all of the header information. */
- if(sizeof(struct elf_phdr) * elf_ex.e_phnum > PAGE_SIZE)
- return -ENOEXEC;
-
- elf_phdata = kmalloc(sizeof(struct elf_phdr) * elf_ex.e_phnum, GFP_KERNEL);
- if (elf_phdata == NULL)
- return -ENOMEM;
-
- retval = kernel_read(file, elf_ex.e_phoff, (char *) elf_phdata,
- sizeof(struct elf_phdr) * elf_ex.e_phnum);
-
- j = 0;
- for(i=0; i<elf_ex.e_phnum; i++)
- if((elf_phdata + i)->p_type == PT_LOAD) j++;
-
- if(j != 1) {
- kfree(elf_phdata);
- return -ENOEXEC;
- }
-
- while(elf_phdata->p_type != PT_LOAD) elf_phdata++;
-
- /* Now use mmap to map the library into memory. */
- down_write(&current->mm->mmap_sem);
- error = do_mmap(file,
- elf_phdata->p_vaddr & 0xfffff000,
- elf_phdata->p_filesz + (elf_phdata->p_vaddr & 0xfff),
- PROT_READ | PROT_WRITE | PROT_EXEC,
- MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE,
- elf_phdata->p_offset & 0xfffff000);
- up_write(&current->mm->mmap_sem);
-
- k = elf_phdata->p_vaddr + elf_phdata->p_filesz;
- if (k > elf_bss) elf_bss = k;
-
- if (error != (elf_phdata->p_vaddr & 0xfffff000)) {
- kfree(elf_phdata);
- return error;
- }
-
- padzero(elf_bss);
-
- len = (elf_phdata->p_filesz + elf_phdata->p_vaddr+ 0xfff) & 0xfffff000;
- bss = elf_phdata->p_memsz + elf_phdata->p_vaddr;
- if (bss > len) {
- down_write(&current->mm->mmap_sem);
- do_brk(len, bss-len);
- up_write(&current->mm->mmap_sem);
- }
- kfree(elf_phdata);
- return 0;
-}
-
-/* Called through irix_syssgi() to map an elf image given an FD,
- * a phdr ptr USER_PHDRP in userspace, and a count CNT telling how many
- * phdrs there are in the USER_PHDRP array. We return the vaddr the
- * first phdr was successfully mapped to.
- */
-unsigned long irix_mapelf(int fd, struct elf_phdr __user *user_phdrp, int cnt)
-{
- unsigned long type, vaddr, filesz, offset, flags;
- struct elf_phdr __user *hp;
- struct file *filp;
- int i, retval;
-
- pr_debug("irix_mapelf: fd[%d] user_phdrp[%p] cnt[%d]\n",
- fd, user_phdrp, cnt);
-
- /* First get the verification out of the way. */
- hp = user_phdrp;
- if (!access_ok(VERIFY_READ, hp, (sizeof(struct elf_phdr) * cnt))) {
- pr_debug("irix_mapelf: bad pointer to ELF PHDR!\n");
-
- return -EFAULT;
- }
-
-#ifdef DEBUG
- dump_phdrs(user_phdrp, cnt);
-#endif
-
- for (i = 0; i < cnt; i++, hp++) {
- if (__get_user(type, &hp->p_type))
- return -EFAULT;
- if (type != PT_LOAD) {
- printk("irix_mapelf: One section is not PT_LOAD!\n");
- return -ENOEXEC;
- }
- }
-
- filp = fget(fd);
- if (!filp)
- return -EACCES;
- if(!filp->f_op) {
- printk("irix_mapelf: Bogon filp!\n");
- fput(filp);
- return -EACCES;
- }
-
- hp = user_phdrp;
- for(i = 0; i < cnt; i++, hp++) {
- int prot;
-
- retval = __get_user(vaddr, &hp->p_vaddr);
- retval |= __get_user(filesz, &hp->p_filesz);
- retval |= __get_user(offset, &hp->p_offset);
- retval |= __get_user(flags, &hp->p_flags);
- if (retval)
- return retval;
-
- prot = (flags & PF_R) ? PROT_READ : 0;
- prot |= (flags & PF_W) ? PROT_WRITE : 0;
- prot |= (flags & PF_X) ? PROT_EXEC : 0;
-
- down_write(&current->mm->mmap_sem);
- retval = do_mmap(filp, (vaddr & 0xfffff000),
- (filesz + (vaddr & 0xfff)),
- prot, (MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE),
- (offset & 0xfffff000));
- up_write(&current->mm->mmap_sem);
-
- if (retval != (vaddr & 0xfffff000)) {
- printk("irix_mapelf: do_mmap fails with %d!\n", retval);
- fput(filp);
- return retval;
- }
- }
-
- pr_debug("irix_mapelf: Success, returning %08lx\n",
- (unsigned long) user_phdrp->p_vaddr);
-
- fput(filp);
-
- if (__get_user(vaddr, &user_phdrp->p_vaddr))
- return -EFAULT;
-
- return vaddr;
-}
-
-/*
- * ELF core dumper
- *
- * Modelled on fs/exec.c:aout_core_dump()
- * Jeremy Fitzhardinge <jeremy@sw.oz.au>
- */
-
-/* These are the only things you should do on a core-file: use only these
- * functions to write out all the necessary info.
- */
-static int dump_write(struct file *file, const void __user *addr, int nr)
-{
- return file->f_op->write(file, (const char __user *) addr, nr, &file->f_pos) == nr;
-}
-
-static int dump_seek(struct file *file, off_t off)
-{
- if (file->f_op->llseek) {
- if (file->f_op->llseek(file, off, 0) != off)
- return 0;
- } else
- file->f_pos = off;
- return 1;
-}
-
-/* Decide whether a segment is worth dumping; default is yes to be
- * sure (missing info is worse than too much; etc).
- * Personally I'd include everything, and use the coredump limit...
- *
- * I think we should skip something. But I am not sure how. H.J.
- */
-static inline int maydump(struct vm_area_struct *vma)
-{
- if (!(vma->vm_flags & (VM_READ|VM_WRITE|VM_EXEC)))
- return 0;
-#if 1
- if (vma->vm_flags & (VM_WRITE|VM_GROWSUP|VM_GROWSDOWN))
- return 1;
- if (vma->vm_flags & (VM_READ|VM_EXEC|VM_EXECUTABLE|VM_SHARED))
- return 0;
-#endif
- return 1;
-}
-
-/* An ELF note in memory. */
-struct memelfnote
-{
- const char *name;
- int type;
- unsigned int datasz;
- void *data;
-};
-
-static int notesize(struct memelfnote *en)
-{
- int sz;
-
- sz = sizeof(struct elf_note);
- sz += roundup(strlen(en->name), 4);
- sz += roundup(en->datasz, 4);
-
- return sz;
-}
-
-/* #define DEBUG */
-
-#define DUMP_WRITE(addr, nr) \
- if (!dump_write(file, (addr), (nr))) \
- goto end_coredump;
-#define DUMP_SEEK(off) \
- if (!dump_seek(file, (off))) \
- goto end_coredump;
-
-static int writenote(struct memelfnote *men, struct file *file)
-{
- struct elf_note en;
-
- en.n_namesz = strlen(men->name);
- en.n_descsz = men->datasz;
- en.n_type = men->type;
-
- DUMP_WRITE(&en, sizeof(en));
- DUMP_WRITE(men->name, en.n_namesz);
- /* XXX - cast from long long to long to avoid need for libgcc.a */
- DUMP_SEEK(roundup((unsigned long)file->f_pos, 4)); /* XXX */
- DUMP_WRITE(men->data, men->datasz);
- DUMP_SEEK(roundup((unsigned long)file->f_pos, 4)); /* XXX */
-
- return 1;
-
-end_coredump:
- return 0;
-}
-#undef DUMP_WRITE
-#undef DUMP_SEEK
-
-#define DUMP_WRITE(addr, nr) \
- if (!dump_write(file, (addr), (nr))) \
- goto end_coredump;
-#define DUMP_SEEK(off) \
- if (!dump_seek(file, (off))) \
- goto end_coredump;
-
-/* Actual dumper.
- *
- * This is a two-pass process; first we find the offsets of the bits,
- * and then they are actually written out. If we run out of core limit
- * we just truncate.
- */
-static int irix_core_dump(long signr, struct pt_regs * regs, struct file *file)
-{
- int has_dumped = 0;
- mm_segment_t fs;
- int segs;
- int i;
- size_t size;
- struct vm_area_struct *vma;
- struct elfhdr elf;
- off_t offset = 0, dataoff;
- int limit = current->signal->rlim[RLIMIT_CORE].rlim_cur;
- int numnote = 3;
- struct memelfnote notes[3];
- struct elf_prstatus prstatus; /* NT_PRSTATUS */
- elf_fpregset_t fpu; /* NT_PRFPREG */
- struct elf_prpsinfo psinfo; /* NT_PRPSINFO */
-
- /* Count what's needed to dump, up to the limit of coredump size. */
- segs = 0;
- size = 0;
- for (vma = current->mm->mmap; vma != NULL; vma = vma->vm_next) {
- if (maydump(vma))
- {
- int sz = vma->vm_end-vma->vm_start;
-
- if (size+sz >= limit)
- break;
- else
- size += sz;
- }
-
- segs++;
- }
-#ifdef DEBUG
- printk("irix_core_dump: %d segs taking %d bytes\n", segs, size);
-#endif
-
- /* Set up header. */
- memcpy(elf.e_ident, ELFMAG, SELFMAG);
- elf.e_ident[EI_CLASS] = ELFCLASS32;
- elf.e_ident[EI_DATA] = ELFDATA2LSB;
- elf.e_ident[EI_VERSION] = EV_CURRENT;
- elf.e_ident[EI_OSABI] = ELF_OSABI;
- memset(elf.e_ident+EI_PAD, 0, EI_NIDENT-EI_PAD);
-
- elf.e_type = ET_CORE;
- elf.e_machine = ELF_ARCH;
- elf.e_version = EV_CURRENT;
- elf.e_entry = 0;
- elf.e_phoff = sizeof(elf);
- elf.e_shoff = 0;
- elf.e_flags = 0;
- elf.e_ehsize = sizeof(elf);
- elf.e_phentsize = sizeof(struct elf_phdr);
- elf.e_phnum = segs+1; /* Include notes. */
- elf.e_shentsize = 0;
- elf.e_shnum = 0;
- elf.e_shstrndx = 0;
-
- fs = get_fs();
- set_fs(KERNEL_DS);
-
- has_dumped = 1;
- current->flags |= PF_DUMPCORE;
-
- DUMP_WRITE(&elf, sizeof(elf));
- offset += sizeof(elf); /* Elf header. */
- offset += (segs+1) * sizeof(struct elf_phdr); /* Program headers. */
-
- /* Set up the notes in similar form to SVR4 core dumps made
- * with info from their /proc.
- */
- memset(&psinfo, 0, sizeof(psinfo));
- memset(&prstatus, 0, sizeof(prstatus));
-
- notes[0].name = "CORE";
- notes[0].type = NT_PRSTATUS;
- notes[0].datasz = sizeof(prstatus);
- notes[0].data = &prstatus;
- prstatus.pr_info.si_signo = prstatus.pr_cursig = signr;
- prstatus.pr_sigpend = current->pending.signal.sig[0];
- prstatus.pr_sighold = current->blocked.sig[0];
- psinfo.pr_pid = prstatus.pr_pid = current->pid;
- psinfo.pr_ppid = prstatus.pr_ppid = current->parent->pid;
- psinfo.pr_pgrp = prstatus.pr_pgrp = process_group(current);
- psinfo.pr_sid = prstatus.pr_sid = current->signal->session;
- if (current->pid == current->tgid) {
- /*
- * This is the record for the group leader. Add in the
- * cumulative times of previous dead threads. This total
- * won't include the time of each live thread whose state
- * is included in the core dump. The final total reported
- * to our parent process when it calls wait4 will include
- * those sums as well as the little bit more time it takes
- * this and each other thread to finish dying after the
- * core dump synchronization phase.
- */
- jiffies_to_timeval(current->utime + current->signal->utime,
- &prstatus.pr_utime);
- jiffies_to_timeval(current->stime + current->signal->stime,
- &prstatus.pr_stime);
- } else {
- jiffies_to_timeval(current->utime, &prstatus.pr_utime);
- jiffies_to_timeval(current->stime, &prstatus.pr_stime);
- }
- jiffies_to_timeval(current->signal->cutime, &prstatus.pr_cutime);
- jiffies_to_timeval(current->signal->cstime, &prstatus.pr_cstime);
-
- if (sizeof(elf_gregset_t) != sizeof(struct pt_regs)) {
- printk("sizeof(elf_gregset_t) (%d) != sizeof(struct pt_regs) "
- "(%d)\n", sizeof(elf_gregset_t), sizeof(struct pt_regs));
- } else {
- *(struct pt_regs *)&prstatus.pr_reg = *regs;
- }
-
- notes[1].name = "CORE";
- notes[1].type = NT_PRPSINFO;
- notes[1].datasz = sizeof(psinfo);
- notes[1].data = &psinfo;
- i = current->state ? ffz(~current->state) + 1 : 0;
- psinfo.pr_state = i;
- psinfo.pr_sname = (i < 0 || i > 5) ? '.' : "RSDZTD"[i];
- psinfo.pr_zomb = psinfo.pr_sname == 'Z';
- psinfo.pr_nice = task_nice(current);
- psinfo.pr_flag = current->flags;
- psinfo.pr_uid = current->uid;
- psinfo.pr_gid = current->gid;
- {
- int i, len;
-
- set_fs(fs);
-
- len = current->mm->arg_end - current->mm->arg_start;
- len = len >= ELF_PRARGSZ ? ELF_PRARGSZ : len;
- (void *) copy_from_user(&psinfo.pr_psargs,
- (const char __user *)current->mm->arg_start, len);
- for (i = 0; i < len; i++)
- if (psinfo.pr_psargs[i] == 0)
- psinfo.pr_psargs[i] = ' ';
- psinfo.pr_psargs[len] = 0;
-
- set_fs(KERNEL_DS);
- }
- strlcpy(psinfo.pr_fname, current->comm, sizeof(psinfo.pr_fname));
-
- /* Try to dump the FPU. */
- prstatus.pr_fpvalid = dump_fpu (regs, &fpu);
- if (!prstatus.pr_fpvalid) {
- numnote--;
- } else {
- notes[2].name = "CORE";
- notes[2].type = NT_PRFPREG;
- notes[2].datasz = sizeof(fpu);
- notes[2].data = &fpu;
- }
-
- /* Write notes phdr entry. */
- {
- struct elf_phdr phdr;
- int sz = 0;
-
- for(i = 0; i < numnote; i++)
- sz += notesize(&notes[i]);
-
- phdr.p_type = PT_NOTE;
- phdr.p_offset = offset;
- phdr.p_vaddr = 0;
- phdr.p_paddr = 0;
- phdr.p_filesz = sz;
- phdr.p_memsz = 0;
- phdr.p_flags = 0;
- phdr.p_align = 0;
-
- offset += phdr.p_filesz;
- DUMP_WRITE(&phdr, sizeof(phdr));
- }
-
- /* Page-align dumped data. */
- dataoff = offset = roundup(offset, PAGE_SIZE);
-
- /* Write program headers for segments dump. */
- for(vma = current->mm->mmap, i = 0;
- i < segs && vma != NULL; vma = vma->vm_next) {
- struct elf_phdr phdr;
- size_t sz;
-
- i++;
-
- sz = vma->vm_end - vma->vm_start;
-
- phdr.p_type = PT_LOAD;
- phdr.p_offset = offset;
- phdr.p_vaddr = vma->vm_start;
- phdr.p_paddr = 0;
- phdr.p_filesz = maydump(vma) ? sz : 0;
- phdr.p_memsz = sz;
- offset += phdr.p_filesz;
- phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
- if (vma->vm_flags & VM_WRITE)
- phdr.p_flags |= PF_W;
- if (vma->vm_flags & VM_EXEC)
- phdr.p_flags |= PF_X;
- phdr.p_align = PAGE_SIZE;
-
- DUMP_WRITE(&phdr, sizeof(phdr));
- }
-
- for(i = 0; i < numnote; i++)
- if (!writenote(&notes[i], file))
- goto end_coredump;
-
- set_fs(fs);
-
- DUMP_SEEK(dataoff);
-
- for(i = 0, vma = current->mm->mmap;
- i < segs && vma != NULL;
- vma = vma->vm_next) {
- unsigned long addr = vma->vm_start;
- unsigned long len = vma->vm_end - vma->vm_start;
-
- if (!maydump(vma))
- continue;
- i++;
-#ifdef DEBUG
- printk("elf_core_dump: writing %08lx %lx\n", addr, len);
-#endif
- DUMP_WRITE((void __user *)addr, len);
- }
-
- if ((off_t) file->f_pos != offset) {
- /* Sanity check. */
- printk("elf_core_dump: file->f_pos (%ld) != offset (%ld)\n",
- (off_t) file->f_pos, offset);
- }
-
-end_coredump:
- set_fs(fs);
- return has_dumped;
-}
-
-static int __init init_irix_binfmt(void)
-{
- extern int init_inventory(void);
- extern asmlinkage unsigned long sys_call_table;
- extern asmlinkage unsigned long sys_call_table_irix5;
-
- init_inventory();
-
- /*
- * Copy the IRIX5 syscall table (8000 bytes) into the main syscall
- * table. The IRIX5 calls are located by an offset of 8000 bytes
- * from the beginning of the main table.
- */
- memcpy((void *) ((unsigned long) &sys_call_table + 8000),
- &sys_call_table_irix5, 8000);
-
- return register_binfmt(&irix_format);
-}
-
-static void __exit exit_irix_binfmt(void)
-{
- /*
- * Remove the Irix ELF loader.
- */
- unregister_binfmt(&irix_format);
-}
-
-module_init(init_irix_binfmt)
-module_exit(exit_irix_binfmt)
diff --git a/arch/mips/kernel/irixinv.c b/arch/mips/kernel/irixinv.c
deleted file mode 100644
index de8584f6231..00000000000
--- a/arch/mips/kernel/irixinv.c
+++ /dev/null
@@ -1,78 +0,0 @@
-/*
- * Support the inventory interface for IRIX binaries
- * This is invoked before the mm layer is working, so we do not
- * use the linked lists for the inventory yet.
- *
- * Miguel de Icaza, 1997.
- */
-#include <linux/mm.h>
-#include <asm/inventory.h>
-#include <asm/uaccess.h>
-
-#define MAX_INVENTORY 50
-int inventory_items = 0;
-
-static inventory_t inventory [MAX_INVENTORY];
-
-void add_to_inventory (int class, int type, int controller, int unit, int state)
-{
- inventory_t *ni = &inventory [inventory_items];
-
- if (inventory_items == MAX_INVENTORY)
- return;
-
- ni->inv_class = class;
- ni->inv_type = type;
- ni->inv_controller = controller;
- ni->inv_unit = unit;
- ni->inv_state = state;
- ni->inv_next = ni;
- inventory_items++;
-}
-
-int dump_inventory_to_user (void __user *userbuf, int size)
-{
- inventory_t *inv = &inventory [0];
- inventory_t __user *user = userbuf;
- int v;
-
- if (!access_ok(VERIFY_WRITE, userbuf, size))
- return -EFAULT;
-
- for (v = 0; v < inventory_items; v++){
- inv = &inventory [v];
- if (copy_to_user (user, inv, sizeof (inventory_t)))
- return -EFAULT;
- user++;
- }
- return inventory_items * sizeof (inventory_t);
-}
-
-int __init init_inventory(void)
-{
- /*
- * gross hack while we put the right bits all over the kernel
- * most likely this will not let just anyone run the X server
- * until we put the right values all over the place
- */
- add_to_inventory (10, 3, 0, 0, 16400);
- add_to_inventory (1, 1, 150, -1, 12);
- add_to_inventory (1, 3, 0, 0, 8976);
- add_to_inventory (1, 2, 0, 0, 8976);
- add_to_inventory (4, 8, 0, 0, 2);
- add_to_inventory (5, 5, 0, 0, 1);
- add_to_inventory (3, 3, 0, 0, 32768);
- add_to_inventory (3, 4, 0, 0, 32768);
- add_to_inventory (3, 8, 0, 0, 524288);
- add_to_inventory (3, 9, 0, 0, 64);
- add_to_inventory (3, 1, 0, 0, 67108864);
- add_to_inventory (12, 3, 0, 0, 16);
- add_to_inventory (8, 7, 17, 0, 16777472);
- add_to_inventory (8, 0, 0, 0, 1);
- add_to_inventory (2, 1, 0, 13, 2);
- add_to_inventory (2, 2, 0, 2, 0);
- add_to_inventory (2, 2, 0, 1, 0);
- add_to_inventory (7, 14, 0, 0, 6);
-
- return 0;
-}
diff --git a/arch/mips/kernel/irixioctl.c b/arch/mips/kernel/irixioctl.c
deleted file mode 100644
index e2863821a3d..00000000000
--- a/arch/mips/kernel/irixioctl.c
+++ /dev/null
@@ -1,251 +0,0 @@
-/*
- * irixioctl.c: A fucking mess...
- *
- * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com)
- */
-
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/fs.h>
-#include <linux/mm.h>
-#include <linux/smp.h>
-#include <linux/smp_lock.h>
-#include <linux/sockios.h>
-#include <linux/syscalls.h>
-#include <linux/tty.h>
-#include <linux/file.h>
-#include <linux/rcupdate.h>
-
-#include <asm/uaccess.h>
-#include <asm/ioctl.h>
-#include <asm/ioctls.h>
-
-#undef DEBUG_IOCTLS
-#undef DEBUG_MISSING_IOCTL
-
-struct irix_termios {
- tcflag_t c_iflag, c_oflag, c_cflag, c_lflag;
- cc_t c_cc[NCCS];
-};
-
-extern void start_tty(struct tty_struct *tty);
-static struct tty_struct *get_tty(int fd)
-{
- struct file *filp;
- struct tty_struct *ttyp = NULL;
-
- rcu_read_lock();
- filp = fcheck(fd);
- if(filp && filp->private_data) {
- ttyp = (struct tty_struct *) filp->private_data;
-
- if(ttyp->magic != TTY_MAGIC)
- ttyp =NULL;
- }
- rcu_read_unlock();
- return ttyp;
-}
-
-static struct tty_struct *get_real_tty(struct tty_struct *tp)
-{
- if (tp->driver->type == TTY_DRIVER_TYPE_PTY &&
- tp->driver->subtype == PTY_TYPE_MASTER)
- return tp->link;
- else
- return tp;
-}
-
-asmlinkage int irix_ioctl(int fd, unsigned long cmd, unsigned long arg)
-{
- struct tty_struct *tp, *rtp;
- mm_segment_t old_fs;
- int i, error = 0;
-
-#ifdef DEBUG_IOCTLS
- printk("[%s:%d] irix_ioctl(%d, ", current->comm, current->pid, fd);
-#endif
- switch(cmd) {
- case 0x00005401:
-#ifdef DEBUG_IOCTLS
- printk("TCGETA, %08lx) ", arg);
-#endif
- error = sys_ioctl(fd, TCGETA, arg);
- break;
-
- case 0x0000540d: {
- struct termios kt;
- struct irix_termios __user *it =
- (struct irix_termios __user *) arg;
-
-#ifdef DEBUG_IOCTLS
- printk("TCGETS, %08lx) ", arg);
-#endif
- if (!access_ok(VERIFY_WRITE, it, sizeof(*it))) {
- error = -EFAULT;
- break;
- }
- old_fs = get_fs(); set_fs(get_ds());
- error = sys_ioctl(fd, TCGETS, (unsigned long) &kt);
- set_fs(old_fs);
- if (error)
- break;
-
- error = __put_user(kt.c_iflag, &it->c_iflag);
- error |= __put_user(kt.c_oflag, &it->c_oflag);
- error |= __put_user(kt.c_cflag, &it->c_cflag);
- error |= __put_user(kt.c_lflag, &it->c_lflag);
-
- for (i = 0; i < NCCS; i++)
- error |= __put_user(kt.c_cc[i], &it->c_cc[i]);
- break;
- }
-
- case 0x0000540e: {
- struct termios kt;
- struct irix_termios *it = (struct irix_termios *) arg;
-
-#ifdef DEBUG_IOCTLS
- printk("TCSETS, %08lx) ", arg);
-#endif
- if (!access_ok(VERIFY_READ, it, sizeof(*it))) {
- error = -EFAULT;
- break;
- }
- old_fs = get_fs(); set_fs(get_ds());
- error = sys_ioctl(fd, TCGETS, (unsigned long) &kt);
- set_fs(old_fs);
- if (error)
- break;
-
- error = __get_user(kt.c_iflag, &it->c_iflag);
- error |= __get_user(kt.c_oflag, &it->c_oflag);
- error |= __get_user(kt.c_cflag, &it->c_cflag);
- error |= __get_user(kt.c_lflag, &it->c_lflag);
-
- for (i = 0; i < NCCS; i++)
- error |= __get_user(kt.c_cc[i], &it->c_cc[i]);
-
- if (error)
- break;
- old_fs = get_fs(); set_fs(get_ds());
- error = sys_ioctl(fd, TCSETS, (unsigned long) &kt);
- set_fs(old_fs);
- break;
- }
-
- case 0x0000540f:
-#ifdef DEBUG_IOCTLS
- printk("TCSETSW, %08lx) ", arg);
-#endif
- error = sys_ioctl(fd, TCSETSW, arg);
- break;
-
- case 0x00005471:
-#ifdef DEBUG_IOCTLS
- printk("TIOCNOTTY, %08lx) ", arg);
-#endif
- error = sys_ioctl(fd, TIOCNOTTY, arg);
- break;
-
- case 0x00007416:
-#ifdef DEBUG_IOCTLS
- printk("TIOCGSID, %08lx) ", arg);
-#endif
- tp = get_tty(fd);
- if(!tp) {
- error = -EINVAL;
- break;
- }
- rtp = get_real_tty(tp);
-#ifdef DEBUG_IOCTLS
- printk("rtp->session=%d ", rtp->session);
-#endif
- error = put_user(rtp->session, (unsigned long __user *) arg);
- break;
-
- case 0x746e:
- /* TIOCSTART, same effect as hitting ^Q */
-#ifdef DEBUG_IOCTLS
- printk("TIOCSTART, %08lx) ", arg);
-#endif
- tp = get_tty(fd);
- if(!tp) {
- error = -EINVAL;
- break;
- }
- rtp = get_real_tty(tp);
- start_tty(rtp);
- break;
-
- case 0x20006968:
-#ifdef DEBUG_IOCTLS
- printk("SIOCGETLABEL, %08lx) ", arg);
-#endif
- error = -ENOPKG;
- break;
-
- case 0x40047477:
-#ifdef DEBUG_IOCTLS
- printk("TIOCGPGRP, %08lx) ", arg);
-#endif
- error = sys_ioctl(fd, TIOCGPGRP, arg);
-#ifdef DEBUG_IOCTLS
- printk("arg=%d ", *(int *)arg);
-#endif
- break;
-
- case 0x40087468:
-#ifdef DEBUG_IOCTLS
- printk("TIOCGWINSZ, %08lx) ", arg);
-#endif
- error = sys_ioctl(fd, TIOCGWINSZ, arg);
- break;
-
- case 0x8004667e:
- error = sys_ioctl(fd, FIONBIO, arg);
- break;
-
- case 0x80047476:
- error = sys_ioctl(fd, TIOCSPGRP, arg);
- break;
-
- case 0x8020690c:
- error = sys_ioctl(fd, SIOCSIFADDR, arg);
- break;
-
- case 0x80206910:
- error = sys_ioctl(fd, SIOCSIFFLAGS, arg);
- break;
-
- case 0xc0206911:
- error = sys_ioctl(fd, SIOCGIFFLAGS, arg);
- break;
-
- case 0xc020691b:
- error = sys_ioctl(fd, SIOCGIFMETRIC, arg);
- break;
-
- default: {
-#ifdef DEBUG_MISSING_IOCTL
- char *msg = "Unimplemented IOCTL cmd tell linux-mips@linux-mips.org\n";
-
-#ifdef DEBUG_IOCTLS
- printk("UNIMP_IOCTL, %08lx)\n", arg);
-#endif
- old_fs = get_fs(); set_fs(get_ds());
- sys_write(2, msg, strlen(msg));
- set_fs(old_fs);
- printk("[%s:%d] Does unimplemented IRIX ioctl cmd %08lx\n",
- current->comm, current->pid, cmd);
- do_exit(255);
-#else
- error = sys_ioctl (fd, cmd, arg);
-#endif
- }
-
- };
-#ifdef DEBUG_IOCTLS
- printk("error=%d\n", error);
-#endif
- return error;
-}
diff --git a/arch/mips/kernel/irixsig.c b/arch/mips/kernel/irixsig.c
deleted file mode 100644
index 2132485caa7..00000000000
--- a/arch/mips/kernel/irixsig.c
+++ /dev/null
@@ -1,880 +0,0 @@
-/*
- * irixsig.c: WHEEE, IRIX signals! YOW, am I compatible or what?!?!
- *
- * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com)
- * Copyright (C) 1997 - 2000 Ralf Baechle (ralf@gnu.org)
- * Copyright (C) 2000 Silicon Graphics, Inc.
- */
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/mm.h>
-#include <linux/errno.h>
-#include <linux/smp.h>
-#include <linux/smp_lock.h>
-#include <linux/time.h>
-#include <linux/ptrace.h>
-#include <linux/resource.h>
-
-#include <asm/ptrace.h>
-#include <asm/uaccess.h>
-#include <asm/unistd.h>
-
-#undef DEBUG_SIG
-
-#define _S(nr) (1<<((nr)-1))
-
-#define _BLOCKABLE (~(_S(SIGKILL) | _S(SIGSTOP)))
-
-typedef struct {
- unsigned long sig[4];
-} irix_sigset_t;
-
-struct sigctx_irix5 {
- u32 rmask, cp0_status;
- u64 pc;
- u64 regs[32];
- u64 fpregs[32];
- u32 usedfp, fpcsr, fpeir, sstk_flags;
- u64 hi, lo;
- u64 cp0_cause, cp0_badvaddr, _unused0;
- irix_sigset_t sigset;
- u64 weird_fpu_thing;
- u64 _unused1[31];
-};
-
-#ifdef DEBUG_SIG
-/* Debugging */
-static inline void dump_irix5_sigctx(struct sigctx_irix5 *c)
-{
- int i;
-
- printk("misc: rmask[%08lx] status[%08lx] pc[%08lx]\n",
- (unsigned long) c->rmask,
- (unsigned long) c->cp0_status,
- (unsigned long) c->pc);
- printk("regs: ");
- for(i = 0; i < 16; i++)
- printk("[%d]<%08lx> ", i, (unsigned long) c->regs[i]);
- printk("\nregs: ");
- for(i = 16; i < 32; i++)
- printk("[%d]<%08lx> ", i, (unsigned long) c->regs[i]);
- printk("\nfpregs: ");
- for(i = 0; i < 16; i++)
- printk("[%d]<%08lx> ", i, (unsigned long) c->fpregs[i]);
- printk("\nfpregs: ");
- for(i = 16; i < 32; i++)
- printk("[%d]<%08lx> ", i, (unsigned long) c->fpregs[i]);
- printk("misc: usedfp[%d] fpcsr[%08lx] fpeir[%08lx] stk_flgs[%08lx]\n",
- (int) c->usedfp, (unsigned long) c->fpcsr,
- (unsigned long) c->fpeir, (unsigned long) c->sstk_flags);
- printk("misc: hi[%08lx] lo[%08lx] cause[%08lx] badvaddr[%08lx]\n",
- (unsigned long) c->hi, (unsigned long) c->lo,
- (unsigned long) c->cp0_cause, (unsigned long) c->cp0_badvaddr);
- printk("misc: sigset<0>[%08lx] sigset<1>[%08lx] sigset<2>[%08lx] "
- "sigset<3>[%08lx]\n", (unsigned long) c->sigset.sig[0],
- (unsigned long) c->sigset.sig[1],
- (unsigned long) c->sigset.sig[2],
- (unsigned long) c->sigset.sig[3]);
-}
-#endif
-
-static int setup_irix_frame(struct k_sigaction *ka, struct pt_regs *regs,
- int signr, sigset_t *oldmask)
-{
- struct sigctx_irix5 __user *ctx;
- unsigned long sp;
- int error, i;
-
- sp = regs->regs[29];
- sp -= sizeof(struct sigctx_irix5);
- sp &= ~(0xf);
- ctx = (struct sigctx_irix5 __user *) sp;
- if (!access_ok(VERIFY_WRITE, ctx, sizeof(*ctx)))
- goto segv_and_exit;
-
- error = __put_user(0, &ctx->weird_fpu_thing);
- error |= __put_user(~(0x00000001), &ctx->rmask);
- error |= __put_user(0, &ctx->regs[0]);
- for(i = 1; i < 32; i++)
- error |= __put_user((u64) regs->regs[i], &ctx->regs[i]);
-
- error |= __put_user((u64) regs->hi, &ctx->hi);
- error |= __put_user((u64) regs->lo, &ctx->lo);
- error |= __put_user((u64) regs->cp0_epc, &ctx->pc);
- error |= __put_user(!!used_math(), &ctx->usedfp);
- error |= __put_user((u64) regs->cp0_cause, &ctx->cp0_cause);
- error |= __put_user((u64) regs->cp0_badvaddr, &ctx->cp0_badvaddr);
-
- error |= __put_user(0, &ctx->sstk_flags); /* XXX sigstack unimp... todo... */
-
- error |= __copy_to_user(&ctx->sigset, oldmask, sizeof(irix_sigset_t)) ? -EFAULT : 0;
-
- if (error)
- goto segv_and_exit;
-
-#ifdef DEBUG_SIG
- dump_irix5_sigctx(ctx);
-#endif
-
- regs->regs[4] = (unsigned long) signr;
- regs->regs[5] = 0; /* XXX sigcode XXX */
- regs->regs[6] = regs->regs[29] = sp;
- regs->regs[7] = (unsigned long) ka->sa.sa_handler;
- regs->regs[25] = regs->cp0_epc = (unsigned long) ka->sa_restorer;
-
- return 1;
-
-segv_and_exit:
- force_sigsegv(signr, current);
- return 0;
-}
-
-static int inline
-setup_irix_rt_frame(struct k_sigaction * ka, struct pt_regs *regs,
- int signr, sigset_t *oldmask, siginfo_t *info)
-{
- printk("Aiee: setup_tr_frame wants to be written");
- do_exit(SIGSEGV);
-}
-
-static inline int handle_signal(unsigned long sig, siginfo_t *info,
- struct k_sigaction *ka, sigset_t *oldset, struct pt_regs * regs)
-{
- int ret;
-
- switch(regs->regs[0]) {
- case ERESTARTNOHAND:
- regs->regs[2] = EINTR;
- break;
- case ERESTARTSYS:
- if(!(ka->sa.sa_flags & SA_RESTART)) {
- regs->regs[2] = EINTR;
- break;
- }
- /* fallthrough */
- case ERESTARTNOINTR: /* Userland will reload $v0. */
- regs->cp0_epc -= 8;
- }
-
- regs->regs[0] = 0; /* Don't deal with this again. */
-
- if (ka->sa.sa_flags & SA_SIGINFO)
- ret = setup_irix_rt_frame(ka, regs, sig, oldset, info);
- else
- ret = setup_irix_frame(ka, regs, sig, oldset);
-
- spin_lock_irq(&current->sighand->siglock);
- sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
- if (!(ka->sa.sa_flags & SA_NODEFER))
- sigaddset(&current->blocked,sig);
- recalc_sigpending();
- spin_unlock_irq(&current->sighand->siglock);
-
- return ret;
-}
-
-void do_irix_signal(struct pt_regs *regs)
-{
- struct k_sigaction ka;
- siginfo_t info;
- int signr;
- sigset_t *oldset;
-
- /*
- * We want the common case to go fast, which is why we may in certain
- * cases get here from kernel mode. Just return without doing anything
- * if so.
- */
- if (!user_mode(regs))
- return;
-
- if (test_thread_flag(TIF_RESTORE_SIGMASK))
- oldset = &current->saved_sigmask;
- else
- oldset = &current->blocked;
-
- signr = get_signal_to_deliver(&info, &ka, regs, NULL);
- if (signr > 0) {
- /* Whee! Actually deliver the signal. */
- if (handle_signal(signr, &info, &ka, oldset, regs) == 0) {
- /* a signal was successfully delivered; the saved
- * sigmask will have been stored in the signal frame,
- * and will be restored by sigreturn, so we can simply
- * clear the TIF_RESTORE_SIGMASK flag */
- if (test_thread_flag(TIF_RESTORE_SIGMASK))
- clear_thread_flag(TIF_RESTORE_SIGMASK);
- }
-
- return;
- }
-
- /*
- * Who's code doesn't conform to the restartable syscall convention
- * dies here!!! The li instruction, a single machine instruction,
- * must directly be followed by the syscall instruction.
- */
- if (regs->regs[0]) {
- if (regs->regs[2] == ERESTARTNOHAND ||
- regs->regs[2] == ERESTARTSYS ||
- regs->regs[2] == ERESTARTNOINTR) {
- regs->cp0_epc -= 8;
- }
- if (regs->regs[2] == ERESTART_RESTARTBLOCK) {
- regs->regs[2] = __NR_restart_syscall;
- regs->regs[7] = regs->regs[26];
- regs->cp0_epc -= 4;
- }
- regs->regs[0] = 0; /* Don't deal with this again. */
- }
-
- /*
- * If there's no signal to deliver, we just put the saved sigmask
- * back
- */
- if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
- clear_thread_flag(TIF_RESTORE_SIGMASK);
- sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
- }
-}
-
-asmlinkage void
-irix_sigreturn(struct pt_regs *regs)
-{
- struct sigctx_irix5 __user *context, *magic;
- unsigned long umask, mask;
- u64 *fregs;
- u32 usedfp;
- int error, sig, i, base = 0;
- sigset_t blocked;
-
- /* Always make any pending restarted system calls return -EINTR */
- current_thread_info()->restart_block.fn = do_no_restart_syscall;
-
- if (regs->regs[2] == 1000)
- base = 1;
-
- context = (struct sigctx_irix5 __user *) regs->regs[base + 4];
- magic = (struct sigctx_irix5 __user *) regs->regs[base + 5];
- sig = (int) regs->regs[base + 6];
-#ifdef DEBUG_SIG
- printk("[%s:%d] IRIX sigreturn(scp[%p],ucp[%p],sig[%d])\n",
- current->comm, current->pid, context, magic, sig);
-#endif
- if (!context)
- context = magic;
- if (!access_ok(VERIFY_READ, context, sizeof(struct sigctx_irix5)))
- goto badframe;
-
-#ifdef DEBUG_SIG
- dump_irix5_sigctx(context);
-#endif
-
- error = __get_user(regs->cp0_epc, &context->pc);
- error |= __get_user(umask, &context->rmask);
-
- mask = 2;
- for (i = 1; i < 32; i++, mask <<= 1) {
- if (umask & mask)
- error |= __get_user(regs->regs[i], &context->regs[i]);
- }
- error |= __get_user(regs->hi, &context->hi);
- error |= __get_user(regs->lo, &context->lo);
-
- error |= __get_user(usedfp, &context->usedfp);
- if ((umask & 1) && usedfp) {
- fregs = (u64 *) &current->thread.fpu;
-
- for(i = 0; i < 32; i++)
- error |= __get_user(fregs[i], &context->fpregs[i]);
- error |= __get_user(current->thread.fpu.fcr31, &context->fpcsr);
- }
-
- /* XXX do sigstack crapola here... XXX */
-
- error |= __copy_from_user(&blocked, &context->sigset, sizeof(blocked)) ? -EFAULT : 0;
-
- if (error)
- goto badframe;
-
- sigdelsetmask(&blocked, ~_BLOCKABLE);
- spin_lock_irq(&current->sighand->siglock);
- current->blocked = blocked;
- recalc_sigpending();
- spin_unlock_irq(&current->sighand->siglock);
-
- /*
- * Don't let your children do this ...
- */
- __asm__ __volatile__(
- "move\t$29,%0\n\t"
- "j\tsyscall_exit"
- :/* no outputs */
- :"r" (&regs));
- /* Unreached */
-
-badframe:
- force_sig(SIGSEGV, current);
-}
-
-struct sigact_irix5 {
- int flags;
- void (*handler)(int);
- u32 sigset[4];
- int _unused0[2];
-};
-
-#define SIG_SETMASK32 256 /* Goodie from SGI for BSD compatibility:
- set only the low 32 bit of the sigset. */
-
-#ifdef DEBUG_SIG
-static inline void dump_sigact_irix5(struct sigact_irix5 *p)
-{
- printk("<f[%d] hndlr[%08lx] msk[%08lx]>", p->flags,
- (unsigned long) p->handler,
- (unsigned long) p->sigset[0]);
-}
-#endif
-
-asmlinkage int
-irix_sigaction(int sig, const struct sigaction __user *act,
- struct sigaction __user *oact, void __user *trampoline)
-{
- struct k_sigaction new_ka, old_ka;
- int ret;
-
-#ifdef DEBUG_SIG
- printk(" (%d,%s,%s,%08lx) ", sig, (!new ? "0" : "NEW"),
- (!old ? "0" : "OLD"), trampoline);
- if(new) {
- dump_sigact_irix5(new); printk(" ");
- }
-#endif
- if (act) {
- sigset_t mask;
- int err;
-
- if (!access_ok(VERIFY_READ, act, sizeof(*act)))
- return -EFAULT;
- err = __get_user(new_ka.sa.sa_handler, &act->sa_handler);
- err |= __get_user(new_ka.sa.sa_flags, &act->sa_flags);
-
- err |= __copy_from_user(&mask, &act->sa_mask, sizeof(sigset_t)) ? -EFAULT : 0;
- if (err)
- return err;
-
- /*
- * Hmmm... methinks IRIX libc always passes a valid trampoline
- * value for all invocations of sigaction. Will have to
- * investigate. POSIX POSIX, die die die...
- */
- new_ka.sa_restorer = trampoline;
- }
-
-/* XXX Implement SIG_SETMASK32 for IRIX compatibility */
- ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
-
- if (!ret && oact) {
- int err;
-
- if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)))
- return -EFAULT;
-
- err = __put_user(old_ka.sa.sa_handler, &oact->sa_handler);
- err |= __put_user(old_ka.sa.sa_flags, &oact->sa_flags);
- err |= __copy_to_user(&oact->sa_mask, &old_ka.sa.sa_mask,
- sizeof(sigset_t)) ? -EFAULT : 0;
- if (err)
- return -EFAULT;
- }
-
- return ret;
-}
-
-asmlinkage int irix_sigpending(irix_sigset_t __user *set)
-{
- return do_sigpending(set, sizeof(*set));
-}
-
-asmlinkage int irix_sigprocmask(int how, irix_sigset_t __user *new,
- irix_sigset_t __user *old)
-{
- sigset_t oldbits, newbits;
-
- if (new) {
- if (!access_ok(VERIFY_READ, new, sizeof(*new)))
- return -EFAULT;
- if (__copy_from_user(&newbits, new, sizeof(unsigned long)*4))
- return -EFAULT;
- sigdelsetmask(&newbits, ~_BLOCKABLE);
-
- spin_lock_irq(&current->sighand->siglock);
- oldbits = current->blocked;
-
- switch(how) {
- case 1:
- sigorsets(&newbits, &oldbits, &newbits);
- break;
-
- case 2:
- sigandsets(&newbits, &oldbits, &newbits);
- break;
-
- case 3:
- break;
-
- case 256:
- siginitset(&newbits, newbits.sig[0]);
- break;
-
- default:
- return -EINVAL;
- }
- recalc_sigpending();
- spin_unlock_irq(&current->sighand->siglock);
- }
- if (old)
- return copy_to_user(old, &current->blocked,
- sizeof(unsigned long)*4) ? -EFAULT : 0;
-
- return 0;
-}
-
-asmlinkage int irix_sigsuspend(struct pt_regs *regs)
-{
- sigset_t newset;
- sigset_t __user *uset;
-
- uset = (sigset_t __user *) regs->regs[4];
- if (copy_from_user(&newset, uset, sizeof(sigset_t)))
- return -EFAULT;
- sigdelsetmask(&newset, ~_BLOCKABLE);
-
- spin_lock_irq(&current->sighand->siglock);
- current->saved_sigmask = current->blocked;
- current->blocked = newset;
- recalc_sigpending();
- spin_unlock_irq(&current->sighand->siglock);
-
- current->state = TASK_INTERRUPTIBLE;
- schedule();
- set_thread_flag(TIF_RESTORE_SIGMASK);
- return -ERESTARTNOHAND;
-}
-
-/* hate hate hate... */
-struct irix5_siginfo {
- int sig, code, error;
- union {
- char unused[128 - (3 * 4)]; /* Safety net. */
- struct {
- int pid;
- union {
- int uid;
- struct {
- int utime, status, stime;
- } child;
- } procdata;
- } procinfo;
-
- unsigned long fault_addr;
-
- struct {
- int fd;
- long band;
- } fileinfo;
-
- unsigned long sigval;
- } stuff;
-};
-
-asmlinkage int irix_sigpoll_sys(unsigned long __user *set,
- struct irix5_siginfo __user *info, struct timespec __user *tp)
-{
- long expire = MAX_SCHEDULE_TIMEOUT;
- sigset_t kset;
- int i, sig, error, timeo = 0;
- struct timespec ktp;
-
-#ifdef DEBUG_SIG
- printk("[%s:%d] irix_sigpoll_sys(%p,%p,%p)\n",
- current->comm, current->pid, set, info, tp);
-#endif
-
- /* Must always specify the signal set. */
- if (!set)
- return -EINVAL;
-
- if (copy_from_user(&kset, set, sizeof(set)))
- return -EFAULT;
-
- if (info && clear_user(info, sizeof(*info))) {
- error = -EFAULT;
- goto out;
- }
-
- if (tp) {
- if (copy_from_user(&ktp, tp, sizeof(*tp)))
- return -EFAULT;
-
- if (!ktp.tv_sec && !ktp.tv_nsec)
- return -EINVAL;
-
- expire = timespec_to_jiffies(&ktp) +
- (ktp.tv_sec || ktp.tv_nsec);
- }
-
- while(1) {
- long tmp = 0;
-
- expire = schedule_timeout_interruptible(expire);
-
- for (i=0; i<=4; i++)
- tmp |= (current->pending.signal.sig[i] & kset.sig[i]);
-
- if (tmp)
- break;
- if (!expire) {
- timeo = 1;
- break;
- }
- if (signal_pending(current))
- return -EINTR;
- }
- if (timeo)
- return -EAGAIN;
-
- for (sig = 1; i <= 65 /* IRIX_NSIG */; sig++) {
- if (sigismember (&kset, sig))
- continue;
- if (sigismember (&current->pending.signal, sig)) {
- /* XXX need more than this... */
- if (info)
- return copy_to_user(&info->sig, &sig, sizeof(sig));
- return 0;
- }
- }
-
- /* Should not get here, but do something sane if we do. */
- error = -EINTR;
-
-out:
- return error;
-}
-
-/* This is here because of irix5_siginfo definition. */
-#define IRIX_P_PID 0
-#define IRIX_P_PGID 2
-#define IRIX_P_ALL 7
-
-#define W_EXITED 1
-#define W_TRAPPED 2
-#define W_STOPPED 4
-#define W_CONT 8
-#define W_NOHANG 64
-
-#define W_MASK (W_EXITED | W_TRAPPED | W_STOPPED | W_CONT | W_NOHANG)
-
-asmlinkage int irix_waitsys(int type, int pid,
- struct irix5_siginfo __user *info, int options,
- struct rusage __user *ru)
-{
- int flag, retval;
- DECLARE_WAITQUEUE(wait, current);
- struct task_struct *tsk;
- struct task_struct *p;
- struct list_head *_p;
-
- if (!info)
- return -EINVAL;
-
- if (!access_ok(VERIFY_WRITE, info, sizeof(*info)))
- return -EFAULT;
-
- if (ru)
- if (!access_ok(VERIFY_WRITE, ru, sizeof(*ru)))
- return -EFAULT;
-
- if (options & ~W_MASK)
- return -EINVAL;
-
- if (type != IRIX_P_PID && type != IRIX_P_PGID && type != IRIX_P_ALL)
- return -EINVAL;
-
- add_wait_queue(&current->signal->wait_chldexit, &wait);
-repeat:
- flag = 0;
- current->state = TASK_INTERRUPTIBLE;
- read_lock(&tasklist_lock);
- tsk = current;
- list_for_each(_p,&tsk->children) {
- p = list_entry(_p,struct task_struct,sibling);
- if ((type == IRIX_P_PID) && p->pid != pid)
- continue;
- if ((type == IRIX_P_PGID) && process_group(p) != pid)
- continue;
- if ((p->exit_signal != SIGCHLD))
- continue;
- flag = 1;
- switch (p->state) {
- case TASK_STOPPED:
- if (!p->exit_code)
- continue;
- if (!(options & (W_TRAPPED|W_STOPPED)) &&
- !(p->ptrace & PT_PTRACED))
- continue;
- read_unlock(&tasklist_lock);
-
- /* move to end of parent's list to avoid starvation */
- write_lock_irq(&tasklist_lock);
- remove_parent(p);
- add_parent(p);
- write_unlock_irq(&tasklist_lock);
- retval = ru ? getrusage(p, RUSAGE_BOTH, ru) : 0;
- if (retval)
- goto end_waitsys;
-
- retval = __put_user(SIGCHLD, &info->sig);
- retval |= __put_user(0, &info->code);
- retval |= __put_user(p->pid, &info->stuff.procinfo.pid);
- retval |= __put_user((p->exit_code >> 8) & 0xff,
- &info->stuff.procinfo.procdata.child.status);
- retval |= __put_user(p->utime, &info->stuff.procinfo.procdata.child.utime);
- retval |= __put_user(p->stime, &info->stuff.procinfo.procdata.child.stime);
- if (retval)
- goto end_waitsys;
-
- p->exit_code = 0;
- goto end_waitsys;
-
- case EXIT_ZOMBIE:
- current->signal->cutime += p->utime + p->signal->cutime;
- current->signal->cstime += p->stime + p->signal->cstime;
- if (ru != NULL)
- getrusage(p, RUSAGE_BOTH, ru);
- retval = __put_user(SIGCHLD, &info->sig);
- retval |= __put_user(1, &info->code); /* CLD_EXITED */
- retval |= __put_user(p->pid, &info->stuff.procinfo.pid);
- retval |= __put_user((p->exit_code >> 8) & 0xff,
- &info->stuff.procinfo.procdata.child.status);
- retval |= __put_user(p->utime,
- &info->stuff.procinfo.procdata.child.utime);
- retval |= __put_user(p->stime,
- &info->stuff.procinfo.procdata.child.stime);
- if (retval)
- return retval;
-
- if (p->real_parent != p->parent) {
- write_lock_irq(&tasklist_lock);
- remove_parent(p);
- p->parent = p->real_parent;
- add_parent(p);
- do_notify_parent(p, SIGCHLD);
- write_unlock_irq(&tasklist_lock);
- } else
- release_task(p);
- goto end_waitsys;
- default:
- continue;
- }
- tsk = next_thread(tsk);
- }
- read_unlock(&tasklist_lock);
- if (flag) {
- retval = 0;
- if (options & W_NOHANG)
- goto end_waitsys;
- retval = -ERESTARTSYS;
- if (signal_pending(current))
- goto end_waitsys;
- current->state = TASK_INTERRUPTIBLE;
- schedule();
- goto repeat;
- }
- retval = -ECHILD;
-end_waitsys:
- current->state = TASK_RUNNING;
- remove_wait_queue(&current->signal->wait_chldexit, &wait);
-
- return retval;
-}
-
-struct irix5_context {
- u32 flags;
- u32 link;
- u32 sigmask[4];
- struct { u32 sp, size, flags; } stack;
- int regs[36];
- u32 fpregs[32];
- u32 fpcsr;
- u32 _unused0;
- u32 _unused1[47];
- u32 weird_graphics_thing;
-};
-
-asmlinkage int irix_getcontext(struct pt_regs *regs)
-{
- int error, i, base = 0;
- struct irix5_context __user *ctx;
- unsigned long flags;
-
- if (regs->regs[2] == 1000)
- base = 1;
- ctx = (struct irix5_context __user *) regs->regs[base + 4];
-
-#ifdef DEBUG_SIG
- printk("[%s:%d] irix_getcontext(%p)\n",
- current->comm, current->pid, ctx);
-#endif
-
- if (!access_ok(VERIFY_WRITE, ctx, sizeof(*ctx)));
- return -EFAULT;
-
- error = __put_user(current->thread.irix_oldctx, &ctx->link);
-
- error |= __copy_to_user(&ctx->sigmask, &current->blocked, sizeof(irix_sigset_t)) ? -EFAULT : 0;
-
- /* XXX Do sigstack stuff someday... */
- error |= __put_user(0, &ctx->stack.sp);
- error |= __put_user(0, &ctx->stack.size);
- error |= __put_user(0, &ctx->stack.flags);
-
- error |= __put_user(0, &ctx->weird_graphics_thing);
- error |= __put_user(0, &ctx->regs[0]);
- for (i = 1; i < 32; i++)
- error |= __put_user(regs->regs[i], &ctx->regs[i]);
- error |= __put_user(regs->lo, &ctx->regs[32]);
- error |= __put_user(regs->hi, &ctx->regs[33]);
- error |= __put_user(regs->cp0_cause, &ctx->regs[34]);
- error |= __put_user(regs->cp0_epc, &ctx->regs[35]);
-
- flags = 0x0f;
- if (!used_math()) {
- flags &= ~(0x08);
- } else {
- /* XXX wheee... */
- printk("Wheee, no code for saving IRIX FPU context yet.\n");
- }
- error |= __put_user(flags, &ctx->flags);
-
- return error;
-}
-
-asmlinkage void irix_setcontext(struct pt_regs *regs)
-{
- struct irix5_context __user *ctx;
- int err, base = 0;
- u32 flags;
-
- if (regs->regs[2] == 1000)
- base = 1;
- ctx = (struct irix5_context __user *) regs->regs[base + 4];
-
-#ifdef DEBUG_SIG
- printk("[%s:%d] irix_setcontext(%p)\n",
- current->comm, current->pid, ctx);
-#endif
-
- if (!access_ok(VERIFY_READ, ctx, sizeof(*ctx)))
- goto segv_and_exit;
-
- err = __get_user(flags, &ctx->flags);
- if (flags & 0x02) {
- /* XXX sigstack garbage, todo... */
- printk("Wheee, cannot do sigstack stuff in setcontext\n");
- }
-
- if (flags & 0x04) {
- int i;
-
- /* XXX extra control block stuff... todo... */
- for (i = 1; i < 32; i++)
- err |= __get_user(regs->regs[i], &ctx->regs[i]);
- err |= __get_user(regs->lo, &ctx->regs[32]);
- err |= __get_user(regs->hi, &ctx->regs[33]);
- err |= __get_user(regs->cp0_epc, &ctx->regs[35]);
- }
-
- if (flags & 0x08)
- /* XXX fpu context, blah... */
- printk(KERN_ERR "Wheee, cannot restore FPU context yet...\n");
-
- err |= __get_user(current->thread.irix_oldctx, &ctx->link);
- if (err)
- goto segv_and_exit;
-
- /*
- * Don't let your children do this ...
- */
- __asm__ __volatile__(
- "move\t$29,%0\n\t"
- "j\tsyscall_exit"
- :/* no outputs */
- :"r" (&regs));
- /* Unreached */
-
-segv_and_exit:
- force_sigsegv(SIGSEGV, current);
-}
-
-struct irix_sigstack {
- unsigned long sp;
- int status;
-};
-
-asmlinkage int irix_sigstack(struct irix_sigstack __user *new,
- struct irix_sigstack __user *old)
-{
-#ifdef DEBUG_SIG
- printk("[%s:%d] irix_sigstack(%p,%p)\n",
- current->comm, current->pid, new, old);
-#endif
- if (new) {
- if (!access_ok(VERIFY_READ, new, sizeof(*new)))
- return -EFAULT;
- }
-
- if (old) {
- if (!access_ok(VERIFY_WRITE, old, sizeof(*old)))
- return -EFAULT;
- }
-
- return 0;
-}
-
-struct irix_sigaltstack { unsigned long sp; int size; int status; };
-
-asmlinkage int irix_sigaltstack(struct irix_sigaltstack __user *new,
- struct irix_sigaltstack __user *old)
-{
-#ifdef DEBUG_SIG
- printk("[%s:%d] irix_sigaltstack(%p,%p)\n",
- current->comm, current->pid, new, old);
-#endif
- if (new)
- if (!access_ok(VERIFY_READ, new, sizeof(*new)))
- return -EFAULT;
-
- if (old) {
- if (!access_ok(VERIFY_WRITE, old, sizeof(*old)))
- return -EFAULT;
- }
-
- return 0;
-}
-
-struct irix_procset {
- int cmd, ltype, lid, rtype, rid;
-};
-
-asmlinkage int irix_sigsendset(struct irix_procset __user *pset, int sig)
-{
- if (!access_ok(VERIFY_READ, pset, sizeof(*pset)))
- return -EFAULT;
-#ifdef DEBUG_SIG
- printk("[%s:%d] irix_sigsendset([%d,%d,%d,%d,%d],%d)\n",
- current->comm, current->pid,
- pset->cmd, pset->ltype, pset->lid, pset->rtype, pset->rid,
- sig);
-#endif
- return -EINVAL;
-}
diff --git a/arch/mips/kernel/irq-gic.c b/arch/mips/kernel/irq-gic.c
new file mode 100644
index 00000000000..88e4c323382
--- /dev/null
+++ b/arch/mips/kernel/irq-gic.c
@@ -0,0 +1,380 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2008 Ralf Baechle (ralf@linux-mips.org)
+ * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
+ */
+#include <linux/bitmap.h>
+#include <linux/init.h>
+#include <linux/smp.h>
+#include <linux/irq.h>
+#include <linux/clocksource.h>
+
+#include <asm/io.h>
+#include <asm/gic.h>
+#include <asm/setup.h>
+#include <asm/traps.h>
+#include <linux/hardirq.h>
+#include <asm-generic/bitops/find.h>
+
+unsigned int gic_frequency;
+unsigned int gic_present;
+unsigned long _gic_base;
+unsigned int gic_irq_base;
+unsigned int gic_irq_flags[GIC_NUM_INTRS];
+
+/* The index into this array is the vector # of the interrupt. */
+struct gic_shared_intr_map gic_shared_intr_map[GIC_NUM_INTRS];
+
+static struct gic_pcpu_mask pcpu_masks[NR_CPUS];
+static struct gic_pending_regs pending_regs[NR_CPUS];
+static struct gic_intrmask_regs intrmask_regs[NR_CPUS];
+
+#if defined(CONFIG_CSRC_GIC) || defined(CONFIG_CEVT_GIC)
+cycle_t gic_read_count(void)
+{
+ unsigned int hi, hi2, lo;
+
+ do {
+ GICREAD(GIC_REG(SHARED, GIC_SH_COUNTER_63_32), hi);
+ GICREAD(GIC_REG(SHARED, GIC_SH_COUNTER_31_00), lo);
+ GICREAD(GIC_REG(SHARED, GIC_SH_COUNTER_63_32), hi2);
+ } while (hi2 != hi);
+
+ return (((cycle_t) hi) << 32) + lo;
+}
+
+void gic_write_compare(cycle_t cnt)
+{
+ GICWRITE(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_HI),
+ (int)(cnt >> 32));
+ GICWRITE(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_LO),
+ (int)(cnt & 0xffffffff));
+}
+
+void gic_write_cpu_compare(cycle_t cnt, int cpu)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+
+ GICWRITE(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), cpu);
+ GICWRITE(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE_HI),
+ (int)(cnt >> 32));
+ GICWRITE(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE_LO),
+ (int)(cnt & 0xffffffff));
+
+ local_irq_restore(flags);
+}
+
+cycle_t gic_read_compare(void)
+{
+ unsigned int hi, lo;
+
+ GICREAD(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_HI), hi);
+ GICREAD(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_LO), lo);
+
+ return (((cycle_t) hi) << 32) + lo;
+}
+#endif
+
+unsigned int gic_get_timer_pending(void)
+{
+ unsigned int vpe_pending;
+
+ GICWRITE(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), 0);
+ GICREAD(GIC_REG(VPE_OTHER, GIC_VPE_PEND), vpe_pending);
+ return (vpe_pending & GIC_VPE_PEND_TIMER_MSK);
+}
+
+void gic_bind_eic_interrupt(int irq, int set)
+{
+ /* Convert irq vector # to hw int # */
+ irq -= GIC_PIN_TO_VEC_OFFSET;
+
+ /* Set irq to use shadow set */
+ GICWRITE(GIC_REG_ADDR(VPE_LOCAL, GIC_VPE_EIC_SS(irq)), set);
+}
+
+void gic_send_ipi(unsigned int intr)
+{
+ GICWRITE(GIC_REG(SHARED, GIC_SH_WEDGE), 0x80000000 | intr);
+}
+
+static void gic_eic_irq_dispatch(void)
+{
+ unsigned int cause = read_c0_cause();
+ int irq;
+
+ irq = (cause & ST0_IM) >> STATUSB_IP2;
+ if (irq == 0)
+ irq = -1;
+
+ if (irq >= 0)
+ do_IRQ(gic_irq_base + irq);
+ else
+ spurious_interrupt();
+}
+
+static void __init vpe_local_setup(unsigned int numvpes)
+{
+ unsigned long timer_intr = GIC_INT_TMR;
+ unsigned long perf_intr = GIC_INT_PERFCTR;
+ unsigned int vpe_ctl;
+ int i;
+
+ if (cpu_has_veic) {
+ /*
+ * GIC timer interrupt -> CPU HW Int X (vector X+2) ->
+ * map to pin X+2-1 (since GIC adds 1)
+ */
+ timer_intr += (GIC_CPU_TO_VEC_OFFSET - GIC_PIN_TO_VEC_OFFSET);
+ /*
+ * GIC perfcnt interrupt -> CPU HW Int X (vector X+2) ->
+ * map to pin X+2-1 (since GIC adds 1)
+ */
+ perf_intr += (GIC_CPU_TO_VEC_OFFSET - GIC_PIN_TO_VEC_OFFSET);
+ }
+
+ /*
+ * Setup the default performance counter timer interrupts
+ * for all VPEs
+ */
+ for (i = 0; i < numvpes; i++) {
+ GICWRITE(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), i);
+
+ /* Are Interrupts locally routable? */
+ GICREAD(GIC_REG(VPE_OTHER, GIC_VPE_CTL), vpe_ctl);
+ if (vpe_ctl & GIC_VPE_CTL_TIMER_RTBL_MSK)
+ GICWRITE(GIC_REG(VPE_OTHER, GIC_VPE_TIMER_MAP),
+ GIC_MAP_TO_PIN_MSK | timer_intr);
+ if (cpu_has_veic) {
+ set_vi_handler(timer_intr + GIC_PIN_TO_VEC_OFFSET,
+ gic_eic_irq_dispatch);
+ gic_shared_intr_map[timer_intr + GIC_PIN_TO_VEC_OFFSET].local_intr_mask |= GIC_VPE_RMASK_TIMER_MSK;
+ }
+
+ if (vpe_ctl & GIC_VPE_CTL_PERFCNT_RTBL_MSK)
+ GICWRITE(GIC_REG(VPE_OTHER, GIC_VPE_PERFCTR_MAP),
+ GIC_MAP_TO_PIN_MSK | perf_intr);
+ if (cpu_has_veic) {
+ set_vi_handler(perf_intr + GIC_PIN_TO_VEC_OFFSET, gic_eic_irq_dispatch);
+ gic_shared_intr_map[perf_intr + GIC_PIN_TO_VEC_OFFSET].local_intr_mask |= GIC_VPE_RMASK_PERFCNT_MSK;
+ }
+ }
+}
+
+unsigned int gic_compare_int(void)
+{
+ unsigned int pending;
+
+ GICREAD(GIC_REG(VPE_LOCAL, GIC_VPE_PEND), pending);
+ if (pending & GIC_VPE_PEND_CMP_MSK)
+ return 1;
+ else
+ return 0;
+}
+
+unsigned int gic_get_int(void)
+{
+ unsigned int i;
+ unsigned long *pending, *intrmask, *pcpu_mask;
+ unsigned long *pending_abs, *intrmask_abs;
+
+ /* Get per-cpu bitmaps */
+ pending = pending_regs[smp_processor_id()].pending;
+ intrmask = intrmask_regs[smp_processor_id()].intrmask;
+ pcpu_mask = pcpu_masks[smp_processor_id()].pcpu_mask;
+
+ pending_abs = (unsigned long *) GIC_REG_ABS_ADDR(SHARED,
+ GIC_SH_PEND_31_0_OFS);
+ intrmask_abs = (unsigned long *) GIC_REG_ABS_ADDR(SHARED,
+ GIC_SH_MASK_31_0_OFS);
+
+ for (i = 0; i < BITS_TO_LONGS(GIC_NUM_INTRS); i++) {
+ GICREAD(*pending_abs, pending[i]);
+ GICREAD(*intrmask_abs, intrmask[i]);
+ pending_abs++;
+ intrmask_abs++;
+ }
+
+ bitmap_and(pending, pending, intrmask, GIC_NUM_INTRS);
+ bitmap_and(pending, pending, pcpu_mask, GIC_NUM_INTRS);
+
+ return find_first_bit(pending, GIC_NUM_INTRS);
+}
+
+static void gic_mask_irq(struct irq_data *d)
+{
+ GIC_CLR_INTR_MASK(d->irq - gic_irq_base);
+}
+
+static void gic_unmask_irq(struct irq_data *d)
+{
+ GIC_SET_INTR_MASK(d->irq - gic_irq_base);
+}
+
+#ifdef CONFIG_SMP
+static DEFINE_SPINLOCK(gic_lock);
+
+static int gic_set_affinity(struct irq_data *d, const struct cpumask *cpumask,
+ bool force)
+{
+ unsigned int irq = (d->irq - gic_irq_base);
+ cpumask_t tmp = CPU_MASK_NONE;
+ unsigned long flags;
+ int i;
+
+ cpumask_and(&tmp, cpumask, cpu_online_mask);
+ if (cpus_empty(tmp))
+ return -1;
+
+ /* Assumption : cpumask refers to a single CPU */
+ spin_lock_irqsave(&gic_lock, flags);
+
+ /* Re-route this IRQ */
+ GIC_SH_MAP_TO_VPE_SMASK(irq, first_cpu(tmp));
+
+ /* Update the pcpu_masks */
+ for (i = 0; i < NR_CPUS; i++)
+ clear_bit(irq, pcpu_masks[i].pcpu_mask);
+ set_bit(irq, pcpu_masks[first_cpu(tmp)].pcpu_mask);
+
+ cpumask_copy(d->affinity, cpumask);
+ spin_unlock_irqrestore(&gic_lock, flags);
+
+ return IRQ_SET_MASK_OK_NOCOPY;
+}
+#endif
+
+static struct irq_chip gic_irq_controller = {
+ .name = "MIPS GIC",
+ .irq_ack = gic_irq_ack,
+ .irq_mask = gic_mask_irq,
+ .irq_mask_ack = gic_mask_irq,
+ .irq_unmask = gic_unmask_irq,
+ .irq_eoi = gic_finish_irq,
+#ifdef CONFIG_SMP
+ .irq_set_affinity = gic_set_affinity,
+#endif
+};
+
+static void __init gic_setup_intr(unsigned int intr, unsigned int cpu,
+ unsigned int pin, unsigned int polarity, unsigned int trigtype,
+ unsigned int flags)
+{
+ struct gic_shared_intr_map *map_ptr;
+
+ /* Setup Intr to Pin mapping */
+ if (pin & GIC_MAP_TO_NMI_MSK) {
+ GICWRITE(GIC_REG_ADDR(SHARED, GIC_SH_MAP_TO_PIN(intr)), pin);
+ /* FIXME: hack to route NMI to all cpu's */
+ for (cpu = 0; cpu < NR_CPUS; cpu += 32) {
+ GICWRITE(GIC_REG_ADDR(SHARED,
+ GIC_SH_MAP_TO_VPE_REG_OFF(intr, cpu)),
+ 0xffffffff);
+ }
+ } else {
+ GICWRITE(GIC_REG_ADDR(SHARED, GIC_SH_MAP_TO_PIN(intr)),
+ GIC_MAP_TO_PIN_MSK | pin);
+ /* Setup Intr to CPU mapping */
+ GIC_SH_MAP_TO_VPE_SMASK(intr, cpu);
+ if (cpu_has_veic) {
+ set_vi_handler(pin + GIC_PIN_TO_VEC_OFFSET,
+ gic_eic_irq_dispatch);
+ map_ptr = &gic_shared_intr_map[pin + GIC_PIN_TO_VEC_OFFSET];
+ if (map_ptr->num_shared_intr >= GIC_MAX_SHARED_INTR)
+ BUG();
+ map_ptr->intr_list[map_ptr->num_shared_intr++] = intr;
+ }
+ }
+
+ /* Setup Intr Polarity */
+ GIC_SET_POLARITY(intr, polarity);
+
+ /* Setup Intr Trigger Type */
+ GIC_SET_TRIGGER(intr, trigtype);
+
+ /* Init Intr Masks */
+ GIC_CLR_INTR_MASK(intr);
+ /* Initialise per-cpu Interrupt software masks */
+ if (flags & GIC_FLAG_IPI)
+ set_bit(intr, pcpu_masks[cpu].pcpu_mask);
+ if ((flags & GIC_FLAG_TRANSPARENT) && (cpu_has_veic == 0))
+ GIC_SET_INTR_MASK(intr);
+ if (trigtype == GIC_TRIG_EDGE)
+ gic_irq_flags[intr] |= GIC_TRIG_EDGE;
+}
+
+static void __init gic_basic_init(int numintrs, int numvpes,
+ struct gic_intr_map *intrmap, int mapsize)
+{
+ unsigned int i, cpu;
+ unsigned int pin_offset = 0;
+
+ board_bind_eic_interrupt = &gic_bind_eic_interrupt;
+
+ /* Setup defaults */
+ for (i = 0; i < numintrs; i++) {
+ GIC_SET_POLARITY(i, GIC_POL_POS);
+ GIC_SET_TRIGGER(i, GIC_TRIG_LEVEL);
+ GIC_CLR_INTR_MASK(i);
+ if (i < GIC_NUM_INTRS) {
+ gic_irq_flags[i] = 0;
+ gic_shared_intr_map[i].num_shared_intr = 0;
+ gic_shared_intr_map[i].local_intr_mask = 0;
+ }
+ }
+
+ /*
+ * In EIC mode, the HW_INT# is offset by (2-1). Need to subtract
+ * one because the GIC will add one (since 0=no intr).
+ */
+ if (cpu_has_veic)
+ pin_offset = (GIC_CPU_TO_VEC_OFFSET - GIC_PIN_TO_VEC_OFFSET);
+
+ /* Setup specifics */
+ for (i = 0; i < mapsize; i++) {
+ cpu = intrmap[i].cpunum;
+ if (cpu == GIC_UNUSED)
+ continue;
+ if (cpu == 0 && i != 0 && intrmap[i].flags == 0)
+ continue;
+ gic_setup_intr(i,
+ intrmap[i].cpunum,
+ intrmap[i].pin + pin_offset,
+ intrmap[i].polarity,
+ intrmap[i].trigtype,
+ intrmap[i].flags);
+ }
+
+ vpe_local_setup(numvpes);
+}
+
+void __init gic_init(unsigned long gic_base_addr,
+ unsigned long gic_addrspace_size,
+ struct gic_intr_map *intr_map, unsigned int intr_map_size,
+ unsigned int irqbase)
+{
+ unsigned int gicconfig;
+ int numvpes, numintrs;
+
+ _gic_base = (unsigned long) ioremap_nocache(gic_base_addr,
+ gic_addrspace_size);
+ gic_irq_base = irqbase;
+
+ GICREAD(GIC_REG(SHARED, GIC_SH_CONFIG), gicconfig);
+ numintrs = (gicconfig & GIC_SH_CONFIG_NUMINTRS_MSK) >>
+ GIC_SH_CONFIG_NUMINTRS_SHF;
+ numintrs = ((numintrs + 1) * 8);
+
+ numvpes = (gicconfig & GIC_SH_CONFIG_NUMVPES_MSK) >>
+ GIC_SH_CONFIG_NUMVPES_SHF;
+ numvpes = numvpes + 1;
+
+ gic_basic_init(numintrs, numvpes, intr_map, intr_map_size);
+
+ gic_platform_init(numintrs, &gic_irq_controller);
+}
diff --git a/arch/mips/kernel/irq-gt641xx.c b/arch/mips/kernel/irq-gt641xx.c
new file mode 100644
index 00000000000..44a1f792e39
--- /dev/null
+++ b/arch/mips/kernel/irq-gt641xx.c
@@ -0,0 +1,131 @@
+/*
+ * GT641xx IRQ routines.
+ *
+ * Copyright (C) 2007 Yoichi Yuasa <yuasa@linux-mips.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include <linux/hardirq.h>
+#include <linux/init.h>
+#include <linux/irq.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
+#include <asm/gt64120.h>
+
+#define GT641XX_IRQ_TO_BIT(irq) (1U << (irq - GT641XX_IRQ_BASE))
+
+static DEFINE_RAW_SPINLOCK(gt641xx_irq_lock);
+
+static void ack_gt641xx_irq(struct irq_data *d)
+{
+ unsigned long flags;
+ u32 cause;
+
+ raw_spin_lock_irqsave(&gt641xx_irq_lock, flags);
+ cause = GT_READ(GT_INTRCAUSE_OFS);
+ cause &= ~GT641XX_IRQ_TO_BIT(d->irq);
+ GT_WRITE(GT_INTRCAUSE_OFS, cause);
+ raw_spin_unlock_irqrestore(&gt641xx_irq_lock, flags);
+}
+
+static void mask_gt641xx_irq(struct irq_data *d)
+{
+ unsigned long flags;
+ u32 mask;
+
+ raw_spin_lock_irqsave(&gt641xx_irq_lock, flags);
+ mask = GT_READ(GT_INTRMASK_OFS);
+ mask &= ~GT641XX_IRQ_TO_BIT(d->irq);
+ GT_WRITE(GT_INTRMASK_OFS, mask);
+ raw_spin_unlock_irqrestore(&gt641xx_irq_lock, flags);
+}
+
+static void mask_ack_gt641xx_irq(struct irq_data *d)
+{
+ unsigned long flags;
+ u32 cause, mask;
+
+ raw_spin_lock_irqsave(&gt641xx_irq_lock, flags);
+ mask = GT_READ(GT_INTRMASK_OFS);
+ mask &= ~GT641XX_IRQ_TO_BIT(d->irq);
+ GT_WRITE(GT_INTRMASK_OFS, mask);
+
+ cause = GT_READ(GT_INTRCAUSE_OFS);
+ cause &= ~GT641XX_IRQ_TO_BIT(d->irq);
+ GT_WRITE(GT_INTRCAUSE_OFS, cause);
+ raw_spin_unlock_irqrestore(&gt641xx_irq_lock, flags);
+}
+
+static void unmask_gt641xx_irq(struct irq_data *d)
+{
+ unsigned long flags;
+ u32 mask;
+
+ raw_spin_lock_irqsave(&gt641xx_irq_lock, flags);
+ mask = GT_READ(GT_INTRMASK_OFS);
+ mask |= GT641XX_IRQ_TO_BIT(d->irq);
+ GT_WRITE(GT_INTRMASK_OFS, mask);
+ raw_spin_unlock_irqrestore(&gt641xx_irq_lock, flags);
+}
+
+static struct irq_chip gt641xx_irq_chip = {
+ .name = "GT641xx",
+ .irq_ack = ack_gt641xx_irq,
+ .irq_mask = mask_gt641xx_irq,
+ .irq_mask_ack = mask_ack_gt641xx_irq,
+ .irq_unmask = unmask_gt641xx_irq,
+};
+
+void gt641xx_irq_dispatch(void)
+{
+ u32 cause, mask;
+ int i;
+
+ cause = GT_READ(GT_INTRCAUSE_OFS);
+ mask = GT_READ(GT_INTRMASK_OFS);
+ cause &= mask;
+
+ /*
+ * bit0 : logical or of all the interrupt bits.
+ * bit30: logical or of bits[29:26,20:1].
+ * bit31: logical or of bits[25:1].
+ */
+ for (i = 1; i < 30; i++) {
+ if (cause & (1U << i)) {
+ do_IRQ(GT641XX_IRQ_BASE + i);
+ return;
+ }
+ }
+
+ atomic_inc(&irq_err_count);
+}
+
+void __init gt641xx_irq_init(void)
+{
+ int i;
+
+ GT_WRITE(GT_INTRMASK_OFS, 0);
+ GT_WRITE(GT_INTRCAUSE_OFS, 0);
+
+ /*
+ * bit0 : logical or of all the interrupt bits.
+ * bit30: logical or of bits[29:26,20:1].
+ * bit31: logical or of bits[25:1].
+ */
+ for (i = 1; i < 30; i++)
+ irq_set_chip_and_handler(GT641XX_IRQ_BASE + i,
+ &gt641xx_irq_chip, handle_level_irq);
+}
diff --git a/arch/mips/kernel/irq-msc01.c b/arch/mips/kernel/irq-msc01.c
index 63dfeb41796..a734b2c2f9e 100644
--- a/arch/mips/kernel/irq-msc01.c
+++ b/arch/mips/kernel/irq-msc01.c
@@ -1,21 +1,22 @@
/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
* Copyright (c) 2004 MIPS Inc
* Author: chris@mips.com
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
+ * Copyright (C) 2004, 06 Ralf Baechle <ralf@linux-mips.org>
*/
-#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
-#include <asm/ptrace.h>
#include <linux/sched.h>
#include <linux/kernel_stat.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/msc01_ic.h>
+#include <asm/traps.h>
static unsigned long _icctrl_msc;
#define MSC01_IC_REG_BASE _icctrl_msc
@@ -26,8 +27,10 @@ static unsigned long _icctrl_msc;
static unsigned int irq_base;
/* mask off an interrupt */
-static inline void mask_msc_irq(unsigned int irq)
+static inline void mask_msc_irq(struct irq_data *d)
{
+ unsigned int irq = d->irq;
+
if (irq < (irq_base + 32))
MSCIC_WRITE(MSC01_IC_DISL, 1<<(irq - irq_base));
else
@@ -35,8 +38,10 @@ static inline void mask_msc_irq(unsigned int irq)
}
/* unmask an interrupt */
-static inline void unmask_msc_irq(unsigned int irq)
+static inline void unmask_msc_irq(struct irq_data *d)
{
+ unsigned int irq = d->irq;
+
if (irq < (irq_base + 32))
MSCIC_WRITE(MSC01_IC_ENAL, 1<<(irq - irq_base));
else
@@ -44,51 +49,23 @@ static inline void unmask_msc_irq(unsigned int irq)
}
/*
- * Enables the IRQ on SOC-it
- */
-static void enable_msc_irq(unsigned int irq)
-{
- unmask_msc_irq(irq);
-}
-
-/*
- * Initialize the IRQ on SOC-it
- */
-static unsigned int startup_msc_irq(unsigned int irq)
-{
- unmask_msc_irq(irq);
- return 0;
-}
-
-/*
- * Disables the IRQ on SOC-it
- */
-static void disable_msc_irq(unsigned int irq)
-{
- mask_msc_irq(irq);
-}
-
-/*
* Masks and ACKs an IRQ
*/
-static void level_mask_and_ack_msc_irq(unsigned int irq)
+static void level_mask_and_ack_msc_irq(struct irq_data *d)
{
- mask_msc_irq(irq);
+ mask_msc_irq(d);
if (!cpu_has_veic)
MSCIC_WRITE(MSC01_IC_EOI, 0);
-#ifdef CONFIG_MIPS_MT_SMTC
- /* This actually needs to be a call into platform code */
- if (irq_hwmask[irq] & ST0_IM)
- set_c0_status(irq_hwmask[irq] & ST0_IM);
-#endif /* CONFIG_MIPS_MT_SMTC */
}
/*
* Masks and ACKs an IRQ
*/
-static void edge_mask_and_ack_msc_irq(unsigned int irq)
+static void edge_mask_and_ack_msc_irq(struct irq_data *d)
{
- mask_msc_irq(irq);
+ unsigned int irq = d->irq;
+
+ mask_msc_irq(d);
if (!cpu_has_veic)
MSCIC_WRITE(MSC01_IC_EOI, 0);
else {
@@ -97,91 +74,77 @@ static void edge_mask_and_ack_msc_irq(unsigned int irq)
MSCIC_WRITE(MSC01_IC_SUP+irq*8, r | ~MSC01_IC_SUP_EDGE_BIT);
MSCIC_WRITE(MSC01_IC_SUP+irq*8, r);
}
-#ifdef CONFIG_MIPS_MT_SMTC
- if (irq_hwmask[irq] & ST0_IM)
- set_c0_status(irq_hwmask[irq] & ST0_IM);
-#endif /* CONFIG_MIPS_MT_SMTC */
-}
-
-/*
- * End IRQ processing
- */
-static void end_msc_irq(unsigned int irq)
-{
- if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
- unmask_msc_irq(irq);
}
/*
* Interrupt handler for interrupts coming from SOC-it.
*/
-void ll_msc_irq(struct pt_regs *regs)
+void ll_msc_irq(void)
{
- unsigned int irq;
+ unsigned int irq;
/* read the interrupt vector register */
MSCIC_READ(MSC01_IC_VEC, irq);
if (irq < 64)
- do_IRQ(irq + irq_base, regs);
+ do_IRQ(irq + irq_base);
else {
/* Ignore spurious interrupt */
}
}
-void
-msc_bind_eic_interrupt (unsigned int irq, unsigned int set)
+static void msc_bind_eic_interrupt(int irq, int set)
{
MSCIC_WRITE(MSC01_IC_RAMW,
(irq<<MSC01_IC_RAMW_ADDR_SHF) | (set<<MSC01_IC_RAMW_DATA_SHF));
}
-#define shutdown_msc_irq disable_msc_irq
-
-struct irq_chip msc_levelirq_type = {
- .typename = "SOC-it-Level",
- .startup = startup_msc_irq,
- .shutdown = shutdown_msc_irq,
- .enable = enable_msc_irq,
- .disable = disable_msc_irq,
- .ack = level_mask_and_ack_msc_irq,
- .end = end_msc_irq,
+static struct irq_chip msc_levelirq_type = {
+ .name = "SOC-it-Level",
+ .irq_ack = level_mask_and_ack_msc_irq,
+ .irq_mask = mask_msc_irq,
+ .irq_mask_ack = level_mask_and_ack_msc_irq,
+ .irq_unmask = unmask_msc_irq,
+ .irq_eoi = unmask_msc_irq,
};
-struct irq_chip msc_edgeirq_type = {
- .typename = "SOC-it-Edge",
- .startup =startup_msc_irq,
- .shutdown = shutdown_msc_irq,
- .enable = enable_msc_irq,
- .disable = disable_msc_irq,
- .ack = edge_mask_and_ack_msc_irq,
- .end = end_msc_irq,
+static struct irq_chip msc_edgeirq_type = {
+ .name = "SOC-it-Edge",
+ .irq_ack = edge_mask_and_ack_msc_irq,
+ .irq_mask = mask_msc_irq,
+ .irq_mask_ack = edge_mask_and_ack_msc_irq,
+ .irq_unmask = unmask_msc_irq,
+ .irq_eoi = unmask_msc_irq,
};
-void __init init_msc_irqs(unsigned int base, msc_irqmap_t *imp, int nirq)
+void __init init_msc_irqs(unsigned long icubase, unsigned int irqbase, msc_irqmap_t *imp, int nirq)
{
- extern void (*board_bind_eic_interrupt)(unsigned int irq, unsigned int regset);
-
- _icctrl_msc = (unsigned long) ioremap (MIPS_MSC01_IC_REG_BASE, 0x40000);
+ _icctrl_msc = (unsigned long) ioremap(icubase, 0x40000);
/* Reset interrupt controller - initialises all registers to 0 */
MSCIC_WRITE(MSC01_IC_RST, MSC01_IC_RST_RST_BIT);
board_bind_eic_interrupt = &msc_bind_eic_interrupt;
- for (; nirq >= 0; nirq--, imp++) {
+ for (; nirq > 0; nirq--, imp++) {
int n = imp->im_irq;
switch (imp->im_type) {
case MSC01_IRQ_EDGE:
- irq_desc[base+n].chip = &msc_edgeirq_type;
+ irq_set_chip_and_handler_name(irqbase + n,
+ &msc_edgeirq_type,
+ handle_edge_irq,
+ "edge");
if (cpu_has_veic)
MSCIC_WRITE(MSC01_IC_SUP+n*8, MSC01_IC_SUP_EDGE_BIT);
else
MSCIC_WRITE(MSC01_IC_SUP+n*8, MSC01_IC_SUP_EDGE_BIT | imp->im_lvl);
break;
case MSC01_IRQ_LEVEL:
- irq_desc[base+n].chip = &msc_levelirq_type;
+ irq_set_chip_and_handler_name(irqbase + n,
+ &msc_levelirq_type,
+ handle_level_irq,
+ "level");
if (cpu_has_veic)
MSCIC_WRITE(MSC01_IC_SUP+n*8, 0);
else
@@ -189,7 +152,7 @@ void __init init_msc_irqs(unsigned int base, msc_irqmap_t *imp, int nirq)
}
}
- irq_base = base;
+ irq_base = irqbase;
MSCIC_WRITE(MSC01_IC_GENA, MSC01_IC_GENA_GENA_BIT); /* Enable interrupt generation */
diff --git a/arch/mips/kernel/irq-mv6434x.c b/arch/mips/kernel/irq-mv6434x.c
deleted file mode 100644
index b117e64da64..00000000000
--- a/arch/mips/kernel/irq-mv6434x.c
+++ /dev/null
@@ -1,162 +0,0 @@
-/*
- * Copyright 2002 Momentum Computer
- * Author: mdharm@momenco.com
- * Copyright (C) 2004 Ralf Baechle <ralf@linux-mips.org>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
-#include <linux/module.h>
-#include <linux/interrupt.h>
-#include <linux/kernel.h>
-#include <linux/kernel_stat.h>
-#include <linux/mv643xx.h>
-#include <linux/sched.h>
-
-#include <asm/ptrace.h>
-#include <asm/io.h>
-#include <asm/irq.h>
-#include <asm/marvell.h>
-
-static unsigned int irq_base;
-
-static inline int ls1bit32(unsigned int x)
-{
- int b = 31, s;
-
- s = 16; if (x << 16 == 0) s = 0; b -= s; x <<= s;
- s = 8; if (x << 8 == 0) s = 0; b -= s; x <<= s;
- s = 4; if (x << 4 == 0) s = 0; b -= s; x <<= s;
- s = 2; if (x << 2 == 0) s = 0; b -= s; x <<= s;
- s = 1; if (x << 1 == 0) s = 0; b -= s;
-
- return b;
-}
-
-/* mask off an interrupt -- 1 is enable, 0 is disable */
-static inline void mask_mv64340_irq(unsigned int irq)
-{
- uint32_t value;
-
- if (irq < (irq_base + 32)) {
- value = MV_READ(MV64340_INTERRUPT0_MASK_0_LOW);
- value &= ~(1 << (irq - irq_base));
- MV_WRITE(MV64340_INTERRUPT0_MASK_0_LOW, value);
- } else {
- value = MV_READ(MV64340_INTERRUPT0_MASK_0_HIGH);
- value &= ~(1 << (irq - irq_base - 32));
- MV_WRITE(MV64340_INTERRUPT0_MASK_0_HIGH, value);
- }
-}
-
-/* unmask an interrupt -- 1 is enable, 0 is disable */
-static inline void unmask_mv64340_irq(unsigned int irq)
-{
- uint32_t value;
-
- if (irq < (irq_base + 32)) {
- value = MV_READ(MV64340_INTERRUPT0_MASK_0_LOW);
- value |= 1 << (irq - irq_base);
- MV_WRITE(MV64340_INTERRUPT0_MASK_0_LOW, value);
- } else {
- value = MV_READ(MV64340_INTERRUPT0_MASK_0_HIGH);
- value |= 1 << (irq - irq_base - 32);
- MV_WRITE(MV64340_INTERRUPT0_MASK_0_HIGH, value);
- }
-}
-
-/*
- * Enables the IRQ on Marvell Chip
- */
-static void enable_mv64340_irq(unsigned int irq)
-{
- unmask_mv64340_irq(irq);
-}
-
-/*
- * Initialize the IRQ on Marvell Chip
- */
-static unsigned int startup_mv64340_irq(unsigned int irq)
-{
- unmask_mv64340_irq(irq);
- return 0;
-}
-
-/*
- * Disables the IRQ on Marvell Chip
- */
-static void disable_mv64340_irq(unsigned int irq)
-{
- mask_mv64340_irq(irq);
-}
-
-/*
- * Masks and ACKs an IRQ
- */
-static void mask_and_ack_mv64340_irq(unsigned int irq)
-{
- mask_mv64340_irq(irq);
-}
-
-/*
- * End IRQ processing
- */
-static void end_mv64340_irq(unsigned int irq)
-{
- if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
- unmask_mv64340_irq(irq);
-}
-
-/*
- * Interrupt handler for interrupts coming from the Marvell chip.
- * It could be built in ethernet ports etc...
- */
-void ll_mv64340_irq(struct pt_regs *regs)
-{
- unsigned int irq_src_low, irq_src_high;
- unsigned int irq_mask_low, irq_mask_high;
-
- /* read the interrupt status registers */
- irq_mask_low = MV_READ(MV64340_INTERRUPT0_MASK_0_LOW);
- irq_mask_high = MV_READ(MV64340_INTERRUPT0_MASK_0_HIGH);
- irq_src_low = MV_READ(MV64340_MAIN_INTERRUPT_CAUSE_LOW);
- irq_src_high = MV_READ(MV64340_MAIN_INTERRUPT_CAUSE_HIGH);
-
- /* mask for just the interrupts we want */
- irq_src_low &= irq_mask_low;
- irq_src_high &= irq_mask_high;
-
- if (irq_src_low)
- do_IRQ(ls1bit32(irq_src_low) + irq_base, regs);
- else
- do_IRQ(ls1bit32(irq_src_high) + irq_base + 32, regs);
-}
-
-#define shutdown_mv64340_irq disable_mv64340_irq
-
-struct irq_chip mv64340_irq_type = {
- .typename = "MV-64340",
- .startup = startup_mv64340_irq,
- .shutdown = shutdown_mv64340_irq,
- .enable = enable_mv64340_irq,
- .disable = disable_mv64340_irq,
- .ack = mask_and_ack_mv64340_irq,
- .end = end_mv64340_irq,
-};
-
-void __init mv64340_irq_init(unsigned int base)
-{
- int i;
-
- /* Reset irq handlers pointers to NULL */
- for (i = base; i < base + 64; i++) {
- irq_desc[i].status = IRQ_DISABLED;
- irq_desc[i].action = 0;
- irq_desc[i].depth = 2;
- irq_desc[i].chip = &mv64340_irq_type;
- }
-
- irq_base = base;
-}
diff --git a/arch/mips/kernel/irq-rm7000.c b/arch/mips/kernel/irq-rm7000.c
index 6b54c7109e2..26f4e4c9db1 100644
--- a/arch/mips/kernel/irq-rm7000.c
+++ b/arch/mips/kernel/irq-rm7000.c
@@ -1,8 +1,8 @@
/*
* Copyright (C) 2003 Ralf Baechle
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
@@ -11,88 +11,39 @@
*/
#include <linux/init.h>
#include <linux/interrupt.h>
+#include <linux/irq.h>
#include <linux/kernel.h>
#include <asm/irq_cpu.h>
#include <asm/mipsregs.h>
-#include <asm/system.h>
-static int irq_base;
-
-static inline void unmask_rm7k_irq(unsigned int irq)
-{
- set_c0_intcontrol(0x100 << (irq - irq_base));
-}
-
-static inline void mask_rm7k_irq(unsigned int irq)
-{
- clear_c0_intcontrol(0x100 << (irq - irq_base));
-}
-
-static inline void rm7k_cpu_irq_enable(unsigned int irq)
+static inline void unmask_rm7k_irq(struct irq_data *d)
{
- unsigned long flags;
-
- local_irq_save(flags);
- unmask_rm7k_irq(irq);
- local_irq_restore(flags);
+ set_c0_intcontrol(0x100 << (d->irq - RM7K_CPU_IRQ_BASE));
}
-static void rm7k_cpu_irq_disable(unsigned int irq)
+static inline void mask_rm7k_irq(struct irq_data *d)
{
- unsigned long flags;
-
- local_irq_save(flags);
- mask_rm7k_irq(irq);
- local_irq_restore(flags);
-}
-
-static unsigned int rm7k_cpu_irq_startup(unsigned int irq)
-{
- rm7k_cpu_irq_enable(irq);
-
- return 0;
-}
-
-#define rm7k_cpu_irq_shutdown rm7k_cpu_irq_disable
-
-/*
- * While we ack the interrupt interrupts are disabled and thus we don't need
- * to deal with concurrency issues. Same for rm7k_cpu_irq_end.
- */
-static void rm7k_cpu_irq_ack(unsigned int irq)
-{
- mask_rm7k_irq(irq);
-}
-
-static void rm7k_cpu_irq_end(unsigned int irq)
-{
- if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS)))
- unmask_rm7k_irq(irq);
+ clear_c0_intcontrol(0x100 << (d->irq - RM7K_CPU_IRQ_BASE));
}
static struct irq_chip rm7k_irq_controller = {
- .typename = "RM7000",
- .startup = rm7k_cpu_irq_startup,
- .shutdown = rm7k_cpu_irq_shutdown,
- .enable = rm7k_cpu_irq_enable,
- .disable = rm7k_cpu_irq_disable,
- .ack = rm7k_cpu_irq_ack,
- .end = rm7k_cpu_irq_end,
+ .name = "RM7000",
+ .irq_ack = mask_rm7k_irq,
+ .irq_mask = mask_rm7k_irq,
+ .irq_mask_ack = mask_rm7k_irq,
+ .irq_unmask = unmask_rm7k_irq,
+ .irq_eoi = unmask_rm7k_irq
};
-void __init rm7k_cpu_irq_init(int base)
+void __init rm7k_cpu_irq_init(void)
{
+ int base = RM7K_CPU_IRQ_BASE;
int i;
clear_c0_intcontrol(0x00000f00); /* Mask all */
- for (i = base; i < base + 4; i++) {
- irq_desc[i].status = IRQ_DISABLED;
- irq_desc[i].action = NULL;
- irq_desc[i].depth = 1;
- irq_desc[i].chip = &rm7k_irq_controller;
- }
-
- irq_base = base;
+ for (i = base; i < base + 4; i++)
+ irq_set_chip_and_handler(i, &rm7k_irq_controller,
+ handle_percpu_irq);
}
diff --git a/arch/mips/kernel/irq-rm9000.c b/arch/mips/kernel/irq-rm9000.c
deleted file mode 100644
index 62f011ba97a..00000000000
--- a/arch/mips/kernel/irq-rm9000.c
+++ /dev/null
@@ -1,149 +0,0 @@
-/*
- * Copyright (C) 2003 Ralf Baechle
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * Handler for RM9000 extended interrupts. These are a non-standard
- * feature so we handle them separately from standard interrupts.
- */
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-
-#include <asm/irq_cpu.h>
-#include <asm/mipsregs.h>
-#include <asm/system.h>
-
-static int irq_base;
-
-static inline void unmask_rm9k_irq(unsigned int irq)
-{
- set_c0_intcontrol(0x1000 << (irq - irq_base));
-}
-
-static inline void mask_rm9k_irq(unsigned int irq)
-{
- clear_c0_intcontrol(0x1000 << (irq - irq_base));
-}
-
-static inline void rm9k_cpu_irq_enable(unsigned int irq)
-{
- unsigned long flags;
-
- local_irq_save(flags);
- unmask_rm9k_irq(irq);
- local_irq_restore(flags);
-}
-
-static void rm9k_cpu_irq_disable(unsigned int irq)
-{
- unsigned long flags;
-
- local_irq_save(flags);
- mask_rm9k_irq(irq);
- local_irq_restore(flags);
-}
-
-static unsigned int rm9k_cpu_irq_startup(unsigned int irq)
-{
- rm9k_cpu_irq_enable(irq);
-
- return 0;
-}
-
-#define rm9k_cpu_irq_shutdown rm9k_cpu_irq_disable
-
-/*
- * Performance counter interrupts are global on all processors.
- */
-static void local_rm9k_perfcounter_irq_startup(void *args)
-{
- unsigned int irq = (unsigned int) args;
-
- rm9k_cpu_irq_enable(irq);
-}
-
-static unsigned int rm9k_perfcounter_irq_startup(unsigned int irq)
-{
- on_each_cpu(local_rm9k_perfcounter_irq_startup, (void *) irq, 0, 1);
-
- return 0;
-}
-
-static void local_rm9k_perfcounter_irq_shutdown(void *args)
-{
- unsigned int irq = (unsigned int) args;
- unsigned long flags;
-
- local_irq_save(flags);
- mask_rm9k_irq(irq);
- local_irq_restore(flags);
-}
-
-static void rm9k_perfcounter_irq_shutdown(unsigned int irq)
-{
- on_each_cpu(local_rm9k_perfcounter_irq_shutdown, (void *) irq, 0, 1);
-}
-
-
-/*
- * While we ack the interrupt interrupts are disabled and thus we don't need
- * to deal with concurrency issues. Same for rm9k_cpu_irq_end.
- */
-static void rm9k_cpu_irq_ack(unsigned int irq)
-{
- mask_rm9k_irq(irq);
-}
-
-static void rm9k_cpu_irq_end(unsigned int irq)
-{
- if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS)))
- unmask_rm9k_irq(irq);
-}
-
-static struct irq_chip rm9k_irq_controller = {
- .typename = "RM9000",
- .startup = rm9k_cpu_irq_startup,
- .shutdown = rm9k_cpu_irq_shutdown,
- .enable = rm9k_cpu_irq_enable,
- .disable = rm9k_cpu_irq_disable,
- .ack = rm9k_cpu_irq_ack,
- .end = rm9k_cpu_irq_end,
-};
-
-static struct irq_chip rm9k_perfcounter_irq = {
- .typename = "RM9000",
- .startup = rm9k_perfcounter_irq_startup,
- .shutdown = rm9k_perfcounter_irq_shutdown,
- .enable = rm9k_cpu_irq_enable,
- .disable = rm9k_cpu_irq_disable,
- .ack = rm9k_cpu_irq_ack,
- .end = rm9k_cpu_irq_end,
-};
-
-unsigned int rm9000_perfcount_irq;
-
-EXPORT_SYMBOL(rm9000_perfcount_irq);
-
-void __init rm9k_cpu_irq_init(int base)
-{
- int i;
-
- clear_c0_intcontrol(0x0000f000); /* Mask all */
-
- for (i = base; i < base + 4; i++) {
- irq_desc[i].status = IRQ_DISABLED;
- irq_desc[i].action = NULL;
- irq_desc[i].depth = 1;
- irq_desc[i].chip = &rm9k_irq_controller;
- }
-
- rm9000_perfcount_irq = base + 1;
- irq_desc[rm9000_perfcount_irq].chip = &rm9k_perfcounter_irq;
-
- irq_base = base;
-}
diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c
index d955aaefbb8..d2bfbc2e899 100644
--- a/arch/mips/kernel/irq.c
+++ b/arch/mips/kernel/irq.c
@@ -13,143 +13,135 @@
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/kernel_stat.h>
-#include <linux/module.h>
#include <linux/proc_fs.h>
-#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/random.h>
#include <linux/sched.h>
#include <linux/seq_file.h>
#include <linux/kallsyms.h>
+#include <linux/kgdb.h>
+#include <linux/ftrace.h>
-#include <asm/atomic.h>
-#include <asm/system.h>
+#include <linux/atomic.h>
#include <asm/uaccess.h>
-/*
- * 'what should we do if we get a hw irq event on an illegal vector'.
- * each architecture has to answer this themselves.
- */
-void ack_bad_irq(unsigned int irq)
+#ifdef CONFIG_KGDB
+int kgdb_early_setup;
+#endif
+
+static unsigned long irq_map[NR_IRQS / BITS_PER_LONG];
+
+int allocate_irqno(void)
{
- printk("unexpected IRQ # %d\n", irq);
-}
+ int irq;
-atomic_t irq_err_count;
+again:
+ irq = find_first_zero_bit(irq_map, NR_IRQS);
-#ifdef CONFIG_MIPS_MT_SMTC
-/*
- * SMTC Kernel needs to manipulate low-level CPU interrupt mask
- * in do_IRQ. These are passed in setup_irq_smtc() and stored
- * in this table.
- */
-unsigned long irq_hwmask[NR_IRQS];
-#endif /* CONFIG_MIPS_MT_SMTC */
+ if (irq >= NR_IRQS)
+ return -ENOSPC;
+
+ if (test_and_set_bit(irq, irq_map))
+ goto again;
-#undef do_IRQ
+ return irq;
+}
/*
- * do_IRQ handles all normal device IRQ's (the special
- * SMP cross-CPU interrupts have their own specific
- * handlers).
+ * Allocate the 16 legacy interrupts for i8259 devices. This happens early
+ * in the kernel initialization so treating allocation failure as BUG() is
+ * ok.
*/
-asmlinkage unsigned int do_IRQ(unsigned int irq, struct pt_regs *regs)
+void __init alloc_legacy_irqno(void)
{
- irq_enter();
-
- __DO_IRQ_SMTC_HOOK();
- __do_IRQ(irq, regs);
+ int i;
- irq_exit();
+ for (i = 0; i <= 16; i++)
+ BUG_ON(test_and_set_bit(i, irq_map));
+}
- return 1;
+void free_irqno(unsigned int irq)
+{
+ smp_mb__before_atomic();
+ clear_bit(irq, irq_map);
+ smp_mb__after_atomic();
}
/*
- * Generic, controller-independent functions:
+ * 'what should we do if we get a hw irq event on an illegal vector'.
+ * each architecture has to answer this themselves.
*/
-
-int show_interrupts(struct seq_file *p, void *v)
+void ack_bad_irq(unsigned int irq)
{
- int i = *(loff_t *) v, j;
- struct irqaction * action;
- unsigned long flags;
-
- if (i == 0) {
- seq_printf(p, " ");
- for_each_online_cpu(j)
- seq_printf(p, "CPU%d ",j);
- seq_putc(p, '\n');
- }
-
- if (i < NR_IRQS) {
- spin_lock_irqsave(&irq_desc[i].lock, flags);
- action = irq_desc[i].action;
- if (!action)
- goto skip;
- seq_printf(p, "%3d: ",i);
-#ifndef CONFIG_SMP
- seq_printf(p, "%10u ", kstat_irqs(i));
-#else
- for_each_online_cpu(j)
- seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
-#endif
- seq_printf(p, " %14s", irq_desc[i].chip->typename);
- seq_printf(p, " %s", action->name);
-
- for (action=action->next; action; action = action->next)
- seq_printf(p, ", %s", action->name);
-
- seq_putc(p, '\n');
-skip:
- spin_unlock_irqrestore(&irq_desc[i].lock, flags);
- } else if (i == NR_IRQS) {
- seq_putc(p, '\n');
- seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
- }
- return 0;
+ printk("unexpected IRQ # %d\n", irq);
}
-asmlinkage void spurious_interrupt(struct pt_regs *regs)
+atomic_t irq_err_count;
+
+int arch_show_interrupts(struct seq_file *p, int prec)
{
- atomic_inc(&irq_err_count);
+ seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
+ return 0;
}
-#ifdef CONFIG_KGDB
-extern void breakpoint(void);
-extern void set_debug_traps(void);
-
-static int kgdb_flag = 1;
-static int __init nokgdb(char *str)
+asmlinkage void spurious_interrupt(void)
{
- kgdb_flag = 0;
- return 1;
+ atomic_inc(&irq_err_count);
}
-__setup("nokgdb", nokgdb);
-#endif
void __init init_IRQ(void)
{
int i;
- for (i = 0; i < NR_IRQS; i++) {
- irq_desc[i].status = IRQ_DISABLED;
- irq_desc[i].action = NULL;
- irq_desc[i].depth = 1;
- irq_desc[i].chip = &no_irq_chip;
- spin_lock_init(&irq_desc[i].lock);
-#ifdef CONFIG_MIPS_MT_SMTC
- irq_hwmask[i] = 0;
-#endif /* CONFIG_MIPS_MT_SMTC */
- }
+#ifdef CONFIG_KGDB
+ if (kgdb_early_setup)
+ return;
+#endif
+
+ for (i = 0; i < NR_IRQS; i++)
+ irq_set_noprobe(i);
arch_init_irq();
#ifdef CONFIG_KGDB
- if (kgdb_flag) {
- printk("Wait for gdb client connection ...\n");
- set_debug_traps();
- breakpoint();
+ if (!kgdb_early_setup)
+ kgdb_early_setup = 1;
+#endif
+}
+
+#ifdef DEBUG_STACKOVERFLOW
+static inline void check_stack_overflow(void)
+{
+ unsigned long sp;
+
+ __asm__ __volatile__("move %0, $sp" : "=r" (sp));
+ sp &= THREAD_MASK;
+
+ /*
+ * Check for stack overflow: is there less than STACK_WARN free?
+ * STACK_WARN is defined as 1/8 of THREAD_SIZE by default.
+ */
+ if (unlikely(sp < (sizeof(struct thread_info) + STACK_WARN))) {
+ printk("do_IRQ: stack overflow: %ld\n",
+ sp - sizeof(struct thread_info));
+ dump_stack();
}
+}
+#else
+static inline void check_stack_overflow(void) {}
#endif
+
+
+/*
+ * do_IRQ handles all normal device IRQ's (the special
+ * SMP cross-CPU interrupts have their own specific
+ * handlers).
+ */
+void __irq_entry do_IRQ(unsigned int irq)
+{
+ irq_enter();
+ check_stack_overflow();
+ generic_handle_irq(irq);
+ irq_exit();
}
+
diff --git a/arch/mips/kernel/irq_cpu.c b/arch/mips/kernel/irq_cpu.c
index 9bb21c7f214..e498f2b3646 100644
--- a/arch/mips/kernel/irq_cpu.c
+++ b/arch/mips/kernel/irq_cpu.c
@@ -3,13 +3,13 @@
* Author: Jun Sun, jsun@mvista.com or jsun@junsun.net
*
* Copyright (C) 2001 Ralf Baechle
- * Copyright (C) 2005 MIPS Technologies, Inc. All rights reserved.
- * Author: Maciej W. Rozycki <macro@mips.com>
+ * Copyright (C) 2005 MIPS Technologies, Inc. All rights reserved.
+ * Author: Maciej W. Rozycki <macro@mips.com>
*
* This file define the irq handler for MIPS CPU interrupts.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
@@ -25,157 +25,133 @@
* Don't even think about using this on SMP. You have been warned.
*
* This file exports one global function:
- * void mips_cpu_irq_init(int irq_base);
+ * void mips_cpu_irq_init(void);
*/
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
#include <asm/irq_cpu.h>
#include <asm/mipsregs.h>
#include <asm/mipsmtregs.h>
-#include <asm/system.h>
-static int mips_cpu_irq_base;
-
-static inline void unmask_mips_irq(unsigned int irq)
+static inline void unmask_mips_irq(struct irq_data *d)
{
- set_c0_status(0x100 << (irq - mips_cpu_irq_base));
+ set_c0_status(0x100 << (d->irq - MIPS_CPU_IRQ_BASE));
irq_enable_hazard();
}
-static inline void mask_mips_irq(unsigned int irq)
+static inline void mask_mips_irq(struct irq_data *d)
{
- clear_c0_status(0x100 << (irq - mips_cpu_irq_base));
+ clear_c0_status(0x100 << (d->irq - MIPS_CPU_IRQ_BASE));
irq_disable_hazard();
}
-static inline void mips_cpu_irq_enable(unsigned int irq)
-{
- unsigned long flags;
-
- local_irq_save(flags);
- unmask_mips_irq(irq);
- back_to_back_c0_hazard();
- local_irq_restore(flags);
-}
-
-static void mips_cpu_irq_disable(unsigned int irq)
-{
- unsigned long flags;
-
- local_irq_save(flags);
- mask_mips_irq(irq);
- back_to_back_c0_hazard();
- local_irq_restore(flags);
-}
-
-static unsigned int mips_cpu_irq_startup(unsigned int irq)
-{
- mips_cpu_irq_enable(irq);
-
- return 0;
-}
-
-#define mips_cpu_irq_shutdown mips_cpu_irq_disable
-
-/*
- * While we ack the interrupt interrupts are disabled and thus we don't need
- * to deal with concurrency issues. Same for mips_cpu_irq_end.
- */
-static void mips_cpu_irq_ack(unsigned int irq)
-{
- mask_mips_irq(irq);
-}
-
-static void mips_cpu_irq_end(unsigned int irq)
-{
- if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS)))
- unmask_mips_irq(irq);
-}
-
static struct irq_chip mips_cpu_irq_controller = {
- .typename = "MIPS",
- .startup = mips_cpu_irq_startup,
- .shutdown = mips_cpu_irq_shutdown,
- .enable = mips_cpu_irq_enable,
- .disable = mips_cpu_irq_disable,
- .ack = mips_cpu_irq_ack,
- .end = mips_cpu_irq_end,
+ .name = "MIPS",
+ .irq_ack = mask_mips_irq,
+ .irq_mask = mask_mips_irq,
+ .irq_mask_ack = mask_mips_irq,
+ .irq_unmask = unmask_mips_irq,
+ .irq_eoi = unmask_mips_irq,
};
/*
* Basically the same as above but taking care of all the MT stuff
*/
-#define unmask_mips_mt_irq unmask_mips_irq
-#define mask_mips_mt_irq mask_mips_irq
-#define mips_mt_cpu_irq_enable mips_cpu_irq_enable
-#define mips_mt_cpu_irq_disable mips_cpu_irq_disable
-
-static unsigned int mips_mt_cpu_irq_startup(unsigned int irq)
+static unsigned int mips_mt_cpu_irq_startup(struct irq_data *d)
{
unsigned int vpflags = dvpe();
- clear_c0_cause(0x100 << (irq - mips_cpu_irq_base));
+ clear_c0_cause(0x100 << (d->irq - MIPS_CPU_IRQ_BASE));
evpe(vpflags);
- mips_mt_cpu_irq_enable(irq);
-
+ unmask_mips_irq(d);
return 0;
}
-#define mips_mt_cpu_irq_shutdown mips_mt_cpu_irq_disable
-
/*
* While we ack the interrupt interrupts are disabled and thus we don't need
* to deal with concurrency issues. Same for mips_cpu_irq_end.
*/
-static void mips_mt_cpu_irq_ack(unsigned int irq)
+static void mips_mt_cpu_irq_ack(struct irq_data *d)
{
unsigned int vpflags = dvpe();
- clear_c0_cause(0x100 << (irq - mips_cpu_irq_base));
+ clear_c0_cause(0x100 << (d->irq - MIPS_CPU_IRQ_BASE));
evpe(vpflags);
- mask_mips_mt_irq(irq);
+ mask_mips_irq(d);
}
-#define mips_mt_cpu_irq_end mips_cpu_irq_end
-
static struct irq_chip mips_mt_cpu_irq_controller = {
- .typename = "MIPS",
- .startup = mips_mt_cpu_irq_startup,
- .shutdown = mips_mt_cpu_irq_shutdown,
- .enable = mips_mt_cpu_irq_enable,
- .disable = mips_mt_cpu_irq_disable,
- .ack = mips_mt_cpu_irq_ack,
- .end = mips_mt_cpu_irq_end,
+ .name = "MIPS",
+ .irq_startup = mips_mt_cpu_irq_startup,
+ .irq_ack = mips_mt_cpu_irq_ack,
+ .irq_mask = mask_mips_irq,
+ .irq_mask_ack = mips_mt_cpu_irq_ack,
+ .irq_unmask = unmask_mips_irq,
+ .irq_eoi = unmask_mips_irq,
};
-void __init mips_cpu_irq_init(int irq_base)
+void __init mips_cpu_irq_init(void)
{
+ int irq_base = MIPS_CPU_IRQ_BASE;
int i;
/* Mask interrupts. */
clear_c0_status(ST0_IM);
clear_c0_cause(CAUSEF_IP);
- /*
- * Only MT is using the software interrupts currently, so we just
- * leave them uninitialized for other processors.
- */
- if (cpu_has_mipsmt)
- for (i = irq_base; i < irq_base + 2; i++) {
- irq_desc[i].status = IRQ_DISABLED;
- irq_desc[i].action = NULL;
- irq_desc[i].depth = 1;
- irq_desc[i].chip = &mips_mt_cpu_irq_controller;
- }
-
- for (i = irq_base + 2; i < irq_base + 8; i++) {
- irq_desc[i].status = IRQ_DISABLED;
- irq_desc[i].action = NULL;
- irq_desc[i].depth = 1;
- irq_desc[i].chip = &mips_cpu_irq_controller;
+ /* Software interrupts are used for MT/CMT IPI */
+ for (i = irq_base; i < irq_base + 2; i++)
+ irq_set_chip_and_handler(i, cpu_has_mipsmt ?
+ &mips_mt_cpu_irq_controller :
+ &mips_cpu_irq_controller,
+ handle_percpu_irq);
+
+ for (i = irq_base + 2; i < irq_base + 8; i++)
+ irq_set_chip_and_handler(i, &mips_cpu_irq_controller,
+ handle_percpu_irq);
+}
+
+#ifdef CONFIG_IRQ_DOMAIN
+static int mips_cpu_intc_map(struct irq_domain *d, unsigned int irq,
+ irq_hw_number_t hw)
+{
+ static struct irq_chip *chip;
+
+ if (hw < 2 && cpu_has_mipsmt) {
+ /* Software interrupts are used for MT/CMT IPI */
+ chip = &mips_mt_cpu_irq_controller;
+ } else {
+ chip = &mips_cpu_irq_controller;
}
- mips_cpu_irq_base = irq_base;
+ irq_set_chip_and_handler(irq, chip, handle_percpu_irq);
+
+ return 0;
+}
+
+static const struct irq_domain_ops mips_cpu_intc_irq_domain_ops = {
+ .map = mips_cpu_intc_map,
+ .xlate = irq_domain_xlate_onecell,
+};
+
+int __init mips_cpu_intc_init(struct device_node *of_node,
+ struct device_node *parent)
+{
+ struct irq_domain *domain;
+
+ /* Mask interrupts. */
+ clear_c0_status(ST0_IM);
+ clear_c0_cause(CAUSEF_IP);
+
+ domain = irq_domain_add_legacy(of_node, 8, MIPS_CPU_IRQ_BASE, 0,
+ &mips_cpu_intc_irq_domain_ops, NULL);
+ if (!domain)
+ panic("Failed to add irqdomain for MIPS CPU");
+
+ return 0;
}
+#endif /* CONFIG_IRQ_DOMAIN */
diff --git a/arch/mips/kernel/irq_txx9.c b/arch/mips/kernel/irq_txx9.c
new file mode 100644
index 00000000000..ab00e490482
--- /dev/null
+++ b/arch/mips/kernel/irq_txx9.c
@@ -0,0 +1,191 @@
+/*
+ * Based on linux/arch/mips/jmr3927/rbhma3100/irq.c,
+ * linux/arch/mips/tx4927/common/tx4927_irq.c,
+ * linux/arch/mips/tx4938/common/irq.c
+ *
+ * Copyright 2001, 2003-2005 MontaVista Software Inc.
+ * Author: MontaVista Software, Inc.
+ * ahennessy@mvista.com
+ * source@mvista.com
+ * Copyright (C) 2000-2001 Toshiba Corporation
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/types.h>
+#include <linux/irq.h>
+#include <asm/txx9irq.h>
+
+struct txx9_irc_reg {
+ u32 cer;
+ u32 cr[2];
+ u32 unused0;
+ u32 ilr[8];
+ u32 unused1[4];
+ u32 imr;
+ u32 unused2[7];
+ u32 scr;
+ u32 unused3[7];
+ u32 ssr;
+ u32 unused4[7];
+ u32 csr;
+};
+
+/* IRCER : Int. Control Enable */
+#define TXx9_IRCER_ICE 0x00000001
+
+/* IRCR : Int. Control */
+#define TXx9_IRCR_LOW 0x00000000
+#define TXx9_IRCR_HIGH 0x00000001
+#define TXx9_IRCR_DOWN 0x00000002
+#define TXx9_IRCR_UP 0x00000003
+#define TXx9_IRCR_EDGE(cr) ((cr) & 0x00000002)
+
+/* IRSCR : Int. Status Control */
+#define TXx9_IRSCR_EIClrE 0x00000100
+#define TXx9_IRSCR_EIClr_MASK 0x0000000f
+
+/* IRCSR : Int. Current Status */
+#define TXx9_IRCSR_IF 0x00010000
+#define TXx9_IRCSR_ILV_MASK 0x00000700
+#define TXx9_IRCSR_IVL_MASK 0x0000001f
+
+#define irc_dlevel 0
+#define irc_elevel 1
+
+static struct txx9_irc_reg __iomem *txx9_ircptr __read_mostly;
+
+static struct {
+ unsigned char level;
+ unsigned char mode;
+} txx9irq[TXx9_MAX_IR] __read_mostly;
+
+static void txx9_irq_unmask(struct irq_data *d)
+{
+ unsigned int irq_nr = d->irq - TXX9_IRQ_BASE;
+ u32 __iomem *ilrp = &txx9_ircptr->ilr[(irq_nr % 16 ) / 2];
+ int ofs = irq_nr / 16 * 16 + (irq_nr & 1) * 8;
+
+ __raw_writel((__raw_readl(ilrp) & ~(0xff << ofs))
+ | (txx9irq[irq_nr].level << ofs),
+ ilrp);
+#ifdef CONFIG_CPU_TX39XX
+ /* update IRCSR */
+ __raw_writel(0, &txx9_ircptr->imr);
+ __raw_writel(irc_elevel, &txx9_ircptr->imr);
+#endif
+}
+
+static inline void txx9_irq_mask(struct irq_data *d)
+{
+ unsigned int irq_nr = d->irq - TXX9_IRQ_BASE;
+ u32 __iomem *ilrp = &txx9_ircptr->ilr[(irq_nr % 16) / 2];
+ int ofs = irq_nr / 16 * 16 + (irq_nr & 1) * 8;
+
+ __raw_writel((__raw_readl(ilrp) & ~(0xff << ofs))
+ | (irc_dlevel << ofs),
+ ilrp);
+#ifdef CONFIG_CPU_TX39XX
+ /* update IRCSR */
+ __raw_writel(0, &txx9_ircptr->imr);
+ __raw_writel(irc_elevel, &txx9_ircptr->imr);
+ /* flush write buffer */
+ __raw_readl(&txx9_ircptr->ssr);
+#else
+ mmiowb();
+#endif
+}
+
+static void txx9_irq_mask_ack(struct irq_data *d)
+{
+ unsigned int irq_nr = d->irq - TXX9_IRQ_BASE;
+
+ txx9_irq_mask(d);
+ /* clear edge detection */
+ if (unlikely(TXx9_IRCR_EDGE(txx9irq[irq_nr].mode)))
+ __raw_writel(TXx9_IRSCR_EIClrE | irq_nr, &txx9_ircptr->scr);
+}
+
+static int txx9_irq_set_type(struct irq_data *d, unsigned int flow_type)
+{
+ unsigned int irq_nr = d->irq - TXX9_IRQ_BASE;
+ u32 cr;
+ u32 __iomem *crp;
+ int ofs;
+ int mode;
+
+ if (flow_type & IRQF_TRIGGER_PROBE)
+ return 0;
+ switch (flow_type & IRQF_TRIGGER_MASK) {
+ case IRQF_TRIGGER_RISING: mode = TXx9_IRCR_UP; break;
+ case IRQF_TRIGGER_FALLING: mode = TXx9_IRCR_DOWN; break;
+ case IRQF_TRIGGER_HIGH: mode = TXx9_IRCR_HIGH; break;
+ case IRQF_TRIGGER_LOW: mode = TXx9_IRCR_LOW; break;
+ default:
+ return -EINVAL;
+ }
+ crp = &txx9_ircptr->cr[(unsigned int)irq_nr / 8];
+ cr = __raw_readl(crp);
+ ofs = (irq_nr & (8 - 1)) * 2;
+ cr &= ~(0x3 << ofs);
+ cr |= (mode & 0x3) << ofs;
+ __raw_writel(cr, crp);
+ txx9irq[irq_nr].mode = mode;
+ return 0;
+}
+
+static struct irq_chip txx9_irq_chip = {
+ .name = "TXX9",
+ .irq_ack = txx9_irq_mask_ack,
+ .irq_mask = txx9_irq_mask,
+ .irq_mask_ack = txx9_irq_mask_ack,
+ .irq_unmask = txx9_irq_unmask,
+ .irq_set_type = txx9_irq_set_type,
+};
+
+void __init txx9_irq_init(unsigned long baseaddr)
+{
+ int i;
+
+ txx9_ircptr = ioremap(baseaddr, sizeof(struct txx9_irc_reg));
+ for (i = 0; i < TXx9_MAX_IR; i++) {
+ txx9irq[i].level = 4; /* middle level */
+ txx9irq[i].mode = TXx9_IRCR_LOW;
+ irq_set_chip_and_handler(TXX9_IRQ_BASE + i, &txx9_irq_chip,
+ handle_level_irq);
+ }
+
+ /* mask all IRC interrupts */
+ __raw_writel(0, &txx9_ircptr->imr);
+ for (i = 0; i < 8; i++)
+ __raw_writel(0, &txx9_ircptr->ilr[i]);
+ /* setup IRC interrupt mode (Low Active) */
+ for (i = 0; i < 2; i++)
+ __raw_writel(0, &txx9_ircptr->cr[i]);
+ /* enable interrupt control */
+ __raw_writel(TXx9_IRCER_ICE, &txx9_ircptr->cer);
+ __raw_writel(irc_elevel, &txx9_ircptr->imr);
+}
+
+int __init txx9_irq_set_pri(int irc_irq, int new_pri)
+{
+ int old_pri;
+
+ if ((unsigned int)irc_irq >= TXx9_MAX_IR)
+ return 0;
+ old_pri = txx9irq[irc_irq].level;
+ txx9irq[irc_irq].level = new_pri;
+ return old_pri;
+}
+
+int txx9_irq(void)
+{
+ u32 csr = __raw_readl(&txx9_ircptr->csr);
+
+ if (likely(!(csr & TXx9_IRCSR_IF)))
+ return TXX9_IRQ_BASE + (csr & (TXx9_MAX_IR - 1));
+ return -1;
+}
diff --git a/arch/mips/kernel/jump_label.c b/arch/mips/kernel/jump_label.c
new file mode 100644
index 00000000000..6001610cfe5
--- /dev/null
+++ b/arch/mips/kernel/jump_label.c
@@ -0,0 +1,54 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 2010 Cavium Networks, Inc.
+ */
+
+#include <linux/jump_label.h>
+#include <linux/kernel.h>
+#include <linux/memory.h>
+#include <linux/mutex.h>
+#include <linux/types.h>
+#include <linux/cpu.h>
+
+#include <asm/cacheflush.h>
+#include <asm/inst.h>
+
+#ifdef HAVE_JUMP_LABEL
+
+#define J_RANGE_MASK ((1ul << 28) - 1)
+
+void arch_jump_label_transform(struct jump_entry *e,
+ enum jump_label_type type)
+{
+ union mips_instruction insn;
+ union mips_instruction *insn_p =
+ (union mips_instruction *)(unsigned long)e->code;
+
+ /* Jump only works within a 256MB aligned region. */
+ BUG_ON((e->target & ~J_RANGE_MASK) != (e->code & ~J_RANGE_MASK));
+
+ /* Target must have 4 byte alignment. */
+ BUG_ON((e->target & 3) != 0);
+
+ if (type == JUMP_LABEL_ENABLE) {
+ insn.j_format.opcode = j_op;
+ insn.j_format.target = (e->target & J_RANGE_MASK) >> 2;
+ } else {
+ insn.word = 0; /* nop */
+ }
+
+ get_online_cpus();
+ mutex_lock(&text_mutex);
+ *insn_p = insn;
+
+ flush_icache_range((unsigned long)insn_p,
+ (unsigned long)insn_p + sizeof(*insn_p));
+
+ mutex_unlock(&text_mutex);
+ put_online_cpus();
+}
+
+#endif /* HAVE_JUMP_LABEL */
diff --git a/arch/mips/kernel/kgdb.c b/arch/mips/kernel/kgdb.c
new file mode 100644
index 00000000000..7afcc2f22c0
--- /dev/null
+++ b/arch/mips/kernel/kgdb.c
@@ -0,0 +1,409 @@
+/*
+ * Originally written by Glenn Engel, Lake Stevens Instrument Division
+ *
+ * Contributed by HP Systems
+ *
+ * Modified for Linux/MIPS (and MIPS in general) by Andreas Busse
+ * Send complaints, suggestions etc. to <andy@waldorf-gmbh.de>
+ *
+ * Copyright (C) 1995 Andreas Busse
+ *
+ * Copyright (C) 2003 MontaVista Software Inc.
+ * Author: Jun Sun, jsun@mvista.com or jsun@junsun.net
+ *
+ * Copyright (C) 2004-2005 MontaVista Software Inc.
+ * Author: Manish Lachwani, mlachwani@mvista.com or manish@koffee-break.com
+ *
+ * Copyright (C) 2007-2008 Wind River Systems, Inc.
+ * Author/Maintainer: Jason Wessel, jason.wessel@windriver.com
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#include <linux/ptrace.h> /* for linux pt_regs struct */
+#include <linux/kgdb.h>
+#include <linux/kdebug.h>
+#include <linux/sched.h>
+#include <linux/smp.h>
+#include <asm/inst.h>
+#include <asm/fpu.h>
+#include <asm/cacheflush.h>
+#include <asm/processor.h>
+#include <asm/sigcontext.h>
+#include <asm/uaccess.h>
+
+static struct hard_trap_info {
+ unsigned char tt; /* Trap type code for MIPS R3xxx and R4xxx */
+ unsigned char signo; /* Signal that we map this trap into */
+} hard_trap_info[] = {
+ { 6, SIGBUS }, /* instruction bus error */
+ { 7, SIGBUS }, /* data bus error */
+ { 9, SIGTRAP }, /* break */
+/* { 11, SIGILL }, */ /* CPU unusable */
+ { 12, SIGFPE }, /* overflow */
+ { 13, SIGTRAP }, /* trap */
+ { 14, SIGSEGV }, /* virtual instruction cache coherency */
+ { 15, SIGFPE }, /* floating point exception */
+ { 23, SIGSEGV }, /* watch */
+ { 31, SIGSEGV }, /* virtual data cache coherency */
+ { 0, 0} /* Must be last */
+};
+
+struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] =
+{
+ { "zero", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[0]) },
+ { "at", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[1]) },
+ { "v0", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[2]) },
+ { "v1", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[3]) },
+ { "a0", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[4]) },
+ { "a1", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[5]) },
+ { "a2", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[6]) },
+ { "a3", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[7]) },
+ { "t0", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[8]) },
+ { "t1", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[9]) },
+ { "t2", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[10]) },
+ { "t3", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[11]) },
+ { "t4", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[12]) },
+ { "t5", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[13]) },
+ { "t6", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[14]) },
+ { "t7", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[15]) },
+ { "s0", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[16]) },
+ { "s1", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[17]) },
+ { "s2", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[18]) },
+ { "s3", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[19]) },
+ { "s4", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[20]) },
+ { "s5", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[21]) },
+ { "s6", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[22]) },
+ { "s7", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[23]) },
+ { "t8", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[24]) },
+ { "t9", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[25]) },
+ { "k0", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[26]) },
+ { "k1", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[27]) },
+ { "gp", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[28]) },
+ { "sp", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[29]) },
+ { "s8", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[30]) },
+ { "ra", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[31]) },
+ { "sr", GDB_SIZEOF_REG, offsetof(struct pt_regs, cp0_status) },
+ { "lo", GDB_SIZEOF_REG, offsetof(struct pt_regs, lo) },
+ { "hi", GDB_SIZEOF_REG, offsetof(struct pt_regs, hi) },
+ { "bad", GDB_SIZEOF_REG, offsetof(struct pt_regs, cp0_badvaddr) },
+ { "cause", GDB_SIZEOF_REG, offsetof(struct pt_regs, cp0_cause) },
+ { "pc", GDB_SIZEOF_REG, offsetof(struct pt_regs, cp0_epc) },
+ { "f0", GDB_SIZEOF_REG, 0 },
+ { "f1", GDB_SIZEOF_REG, 1 },
+ { "f2", GDB_SIZEOF_REG, 2 },
+ { "f3", GDB_SIZEOF_REG, 3 },
+ { "f4", GDB_SIZEOF_REG, 4 },
+ { "f5", GDB_SIZEOF_REG, 5 },
+ { "f6", GDB_SIZEOF_REG, 6 },
+ { "f7", GDB_SIZEOF_REG, 7 },
+ { "f8", GDB_SIZEOF_REG, 8 },
+ { "f9", GDB_SIZEOF_REG, 9 },
+ { "f10", GDB_SIZEOF_REG, 10 },
+ { "f11", GDB_SIZEOF_REG, 11 },
+ { "f12", GDB_SIZEOF_REG, 12 },
+ { "f13", GDB_SIZEOF_REG, 13 },
+ { "f14", GDB_SIZEOF_REG, 14 },
+ { "f15", GDB_SIZEOF_REG, 15 },
+ { "f16", GDB_SIZEOF_REG, 16 },
+ { "f17", GDB_SIZEOF_REG, 17 },
+ { "f18", GDB_SIZEOF_REG, 18 },
+ { "f19", GDB_SIZEOF_REG, 19 },
+ { "f20", GDB_SIZEOF_REG, 20 },
+ { "f21", GDB_SIZEOF_REG, 21 },
+ { "f22", GDB_SIZEOF_REG, 22 },
+ { "f23", GDB_SIZEOF_REG, 23 },
+ { "f24", GDB_SIZEOF_REG, 24 },
+ { "f25", GDB_SIZEOF_REG, 25 },
+ { "f26", GDB_SIZEOF_REG, 26 },
+ { "f27", GDB_SIZEOF_REG, 27 },
+ { "f28", GDB_SIZEOF_REG, 28 },
+ { "f29", GDB_SIZEOF_REG, 29 },
+ { "f30", GDB_SIZEOF_REG, 30 },
+ { "f31", GDB_SIZEOF_REG, 31 },
+ { "fsr", GDB_SIZEOF_REG, 0 },
+ { "fir", GDB_SIZEOF_REG, 0 },
+};
+
+int dbg_set_reg(int regno, void *mem, struct pt_regs *regs)
+{
+ int fp_reg;
+
+ if (regno < 0 || regno >= DBG_MAX_REG_NUM)
+ return -EINVAL;
+
+ if (dbg_reg_def[regno].offset != -1 && regno < 38) {
+ memcpy((void *)regs + dbg_reg_def[regno].offset, mem,
+ dbg_reg_def[regno].size);
+ } else if (current && dbg_reg_def[regno].offset != -1 && regno < 72) {
+ /* FP registers 38 -> 69 */
+ if (!(regs->cp0_status & ST0_CU1))
+ return 0;
+ if (regno == 70) {
+ /* Process the fcr31/fsr (register 70) */
+ memcpy((void *)&current->thread.fpu.fcr31, mem,
+ dbg_reg_def[regno].size);
+ goto out_save;
+ } else if (regno == 71) {
+ /* Ignore the fir (register 71) */
+ goto out_save;
+ }
+ fp_reg = dbg_reg_def[regno].offset;
+ memcpy((void *)&current->thread.fpu.fpr[fp_reg], mem,
+ dbg_reg_def[regno].size);
+out_save:
+ restore_fp(current);
+ }
+
+ return 0;
+}
+
+char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
+{
+ int fp_reg;
+
+ if (regno >= DBG_MAX_REG_NUM || regno < 0)
+ return NULL;
+
+ if (dbg_reg_def[regno].offset != -1 && regno < 38) {
+ /* First 38 registers */
+ memcpy(mem, (void *)regs + dbg_reg_def[regno].offset,
+ dbg_reg_def[regno].size);
+ } else if (current && dbg_reg_def[regno].offset != -1 && regno < 72) {
+ /* FP registers 38 -> 69 */
+ if (!(regs->cp0_status & ST0_CU1))
+ goto out;
+ save_fp(current);
+ if (regno == 70) {
+ /* Process the fcr31/fsr (register 70) */
+ memcpy(mem, (void *)&current->thread.fpu.fcr31,
+ dbg_reg_def[regno].size);
+ goto out;
+ } else if (regno == 71) {
+ /* Ignore the fir (register 71) */
+ memset(mem, 0, dbg_reg_def[regno].size);
+ goto out;
+ }
+ fp_reg = dbg_reg_def[regno].offset;
+ memcpy(mem, (void *)&current->thread.fpu.fpr[fp_reg],
+ dbg_reg_def[regno].size);
+ }
+
+out:
+ return dbg_reg_def[regno].name;
+
+}
+
+void arch_kgdb_breakpoint(void)
+{
+ __asm__ __volatile__(
+ ".globl breakinst\n\t"
+ ".set\tnoreorder\n\t"
+ "nop\n"
+ "breakinst:\tbreak\n\t"
+ "nop\n\t"
+ ".set\treorder");
+}
+
+static void kgdb_call_nmi_hook(void *ignored)
+{
+ mm_segment_t old_fs;
+
+ old_fs = get_fs();
+ set_fs(get_ds());
+
+ kgdb_nmicallback(raw_smp_processor_id(), NULL);
+
+ set_fs(old_fs);
+}
+
+void kgdb_roundup_cpus(unsigned long flags)
+{
+ local_irq_enable();
+ smp_call_function(kgdb_call_nmi_hook, NULL, 0);
+ local_irq_disable();
+}
+
+static int compute_signal(int tt)
+{
+ struct hard_trap_info *ht;
+
+ for (ht = hard_trap_info; ht->tt && ht->signo; ht++)
+ if (ht->tt == tt)
+ return ht->signo;
+
+ return SIGHUP; /* default for things we don't know about */
+}
+
+/*
+ * Similar to regs_to_gdb_regs() except that process is sleeping and so
+ * we may not be able to get all the info.
+ */
+void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p)
+{
+ int reg;
+ struct thread_info *ti = task_thread_info(p);
+ unsigned long ksp = (unsigned long)ti + THREAD_SIZE - 32;
+ struct pt_regs *regs = (struct pt_regs *)ksp - 1;
+#if (KGDB_GDB_REG_SIZE == 32)
+ u32 *ptr = (u32 *)gdb_regs;
+#else
+ u64 *ptr = (u64 *)gdb_regs;
+#endif
+
+ for (reg = 0; reg < 16; reg++)
+ *(ptr++) = regs->regs[reg];
+
+ /* S0 - S7 */
+ for (reg = 16; reg < 24; reg++)
+ *(ptr++) = regs->regs[reg];
+
+ for (reg = 24; reg < 28; reg++)
+ *(ptr++) = 0;
+
+ /* GP, SP, FP, RA */
+ for (reg = 28; reg < 32; reg++)
+ *(ptr++) = regs->regs[reg];
+
+ *(ptr++) = regs->cp0_status;
+ *(ptr++) = regs->lo;
+ *(ptr++) = regs->hi;
+ *(ptr++) = regs->cp0_badvaddr;
+ *(ptr++) = regs->cp0_cause;
+ *(ptr++) = regs->cp0_epc;
+}
+
+void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc)
+{
+ regs->cp0_epc = pc;
+}
+
+/*
+ * Calls linux_debug_hook before the kernel dies. If KGDB is enabled,
+ * then try to fall into the debugger
+ */
+static int kgdb_mips_notify(struct notifier_block *self, unsigned long cmd,
+ void *ptr)
+{
+ struct die_args *args = (struct die_args *)ptr;
+ struct pt_regs *regs = args->regs;
+ int trap = (regs->cp0_cause & 0x7c) >> 2;
+ mm_segment_t old_fs;
+
+#ifdef CONFIG_KPROBES
+ /*
+ * Return immediately if the kprobes fault notifier has set
+ * DIE_PAGE_FAULT.
+ */
+ if (cmd == DIE_PAGE_FAULT)
+ return NOTIFY_DONE;
+#endif /* CONFIG_KPROBES */
+
+ /* Userspace events, ignore. */
+ if (user_mode(regs))
+ return NOTIFY_DONE;
+
+ /* Kernel mode. Set correct address limit */
+ old_fs = get_fs();
+ set_fs(get_ds());
+
+ if (atomic_read(&kgdb_active) != -1)
+ kgdb_nmicallback(smp_processor_id(), regs);
+
+ if (kgdb_handle_exception(trap, compute_signal(trap), cmd, regs)) {
+ set_fs(old_fs);
+ return NOTIFY_DONE;
+ }
+
+ if (atomic_read(&kgdb_setting_breakpoint))
+ if ((trap == 9) && (regs->cp0_epc == (unsigned long)breakinst))
+ regs->cp0_epc += 4;
+
+ /* In SMP mode, __flush_cache_all does IPI */
+ local_irq_enable();
+ __flush_cache_all();
+
+ set_fs(old_fs);
+ return NOTIFY_STOP;
+}
+
+#ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
+int kgdb_ll_trap(int cmd, const char *str,
+ struct pt_regs *regs, long err, int trap, int sig)
+{
+ struct die_args args = {
+ .regs = regs,
+ .str = str,
+ .err = err,
+ .trapnr = trap,
+ .signr = sig,
+
+ };
+
+ if (!kgdb_io_module_registered)
+ return NOTIFY_DONE;
+
+ return kgdb_mips_notify(NULL, cmd, &args);
+}
+#endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */
+
+static struct notifier_block kgdb_notifier = {
+ .notifier_call = kgdb_mips_notify,
+};
+
+/*
+ * Handle the 'c' command
+ */
+int kgdb_arch_handle_exception(int vector, int signo, int err_code,
+ char *remcom_in_buffer, char *remcom_out_buffer,
+ struct pt_regs *regs)
+{
+ char *ptr;
+ unsigned long address;
+
+ switch (remcom_in_buffer[0]) {
+ case 'c':
+ /* handle the optional parameter */
+ ptr = &remcom_in_buffer[1];
+ if (kgdb_hex2long(&ptr, &address))
+ regs->cp0_epc = address;
+
+ return 0;
+ }
+
+ return -1;
+}
+
+struct kgdb_arch arch_kgdb_ops;
+
+/*
+ * We use kgdb_early_setup so that functions we need to call now don't
+ * cause trouble when called again later.
+ */
+int kgdb_arch_init(void)
+{
+ union mips_instruction insn = {
+ .r_format = {
+ .opcode = spec_op,
+ .func = break_op,
+ }
+ };
+ memcpy(arch_kgdb_ops.gdb_bpt_instr, insn.byte, BREAK_INSTR_SIZE);
+
+ register_die_notifier(&kgdb_notifier);
+
+ return 0;
+}
+
+/*
+ * kgdb_arch_exit - Perform any architecture specific uninitalization.
+ *
+ * This function will handle the uninitalization of any architecture
+ * specific callbacks, for dynamic registration and unregistration.
+ */
+void kgdb_arch_exit(void)
+{
+ unregister_die_notifier(&kgdb_notifier);
+}
diff --git a/arch/mips/kernel/kprobes.c b/arch/mips/kernel/kprobes.c
new file mode 100644
index 00000000000..1f8187ab099
--- /dev/null
+++ b/arch/mips/kernel/kprobes.c
@@ -0,0 +1,679 @@
+/*
+ * Kernel Probes (KProbes)
+ * arch/mips/kernel/kprobes.c
+ *
+ * Copyright 2006 Sony Corp.
+ * Copyright 2010 Cavium Networks
+ *
+ * Some portions copied from the powerpc version.
+ *
+ * Copyright (C) IBM Corporation, 2002, 2004
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/kprobes.h>
+#include <linux/preempt.h>
+#include <linux/uaccess.h>
+#include <linux/kdebug.h>
+#include <linux/slab.h>
+
+#include <asm/ptrace.h>
+#include <asm/branch.h>
+#include <asm/break.h>
+#include <asm/inst.h>
+
+static const union mips_instruction breakpoint_insn = {
+ .b_format = {
+ .opcode = spec_op,
+ .code = BRK_KPROBE_BP,
+ .func = break_op
+ }
+};
+
+static const union mips_instruction breakpoint2_insn = {
+ .b_format = {
+ .opcode = spec_op,
+ .code = BRK_KPROBE_SSTEPBP,
+ .func = break_op
+ }
+};
+
+DEFINE_PER_CPU(struct kprobe *, current_kprobe);
+DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
+
+static int __kprobes insn_has_delayslot(union mips_instruction insn)
+{
+ switch (insn.i_format.opcode) {
+
+ /*
+ * This group contains:
+ * jr and jalr are in r_format format.
+ */
+ case spec_op:
+ switch (insn.r_format.func) {
+ case jr_op:
+ case jalr_op:
+ break;
+ default:
+ goto insn_ok;
+ }
+
+ /*
+ * This group contains:
+ * bltz_op, bgez_op, bltzl_op, bgezl_op,
+ * bltzal_op, bgezal_op, bltzall_op, bgezall_op.
+ */
+ case bcond_op:
+
+ /*
+ * These are unconditional and in j_format.
+ */
+ case jal_op:
+ case j_op:
+
+ /*
+ * These are conditional and in i_format.
+ */
+ case beq_op:
+ case beql_op:
+ case bne_op:
+ case bnel_op:
+ case blez_op:
+ case blezl_op:
+ case bgtz_op:
+ case bgtzl_op:
+
+ /*
+ * These are the FPA/cp1 branch instructions.
+ */
+ case cop1_op:
+
+#ifdef CONFIG_CPU_CAVIUM_OCTEON
+ case lwc2_op: /* This is bbit0 on Octeon */
+ case ldc2_op: /* This is bbit032 on Octeon */
+ case swc2_op: /* This is bbit1 on Octeon */
+ case sdc2_op: /* This is bbit132 on Octeon */
+#endif
+ return 1;
+ default:
+ break;
+ }
+insn_ok:
+ return 0;
+}
+
+/*
+ * insn_has_ll_or_sc function checks whether instruction is ll or sc
+ * one; putting breakpoint on top of atomic ll/sc pair is bad idea;
+ * so we need to prevent it and refuse kprobes insertion for such
+ * instructions; cannot do much about breakpoint in the middle of
+ * ll/sc pair; it is upto user to avoid those places
+ */
+static int __kprobes insn_has_ll_or_sc(union mips_instruction insn)
+{
+ int ret = 0;
+
+ switch (insn.i_format.opcode) {
+ case ll_op:
+ case lld_op:
+ case sc_op:
+ case scd_op:
+ ret = 1;
+ break;
+ default:
+ break;
+ }
+ return ret;
+}
+
+int __kprobes arch_prepare_kprobe(struct kprobe *p)
+{
+ union mips_instruction insn;
+ union mips_instruction prev_insn;
+ int ret = 0;
+
+ insn = p->addr[0];
+
+ if (insn_has_ll_or_sc(insn)) {
+ pr_notice("Kprobes for ll and sc instructions are not"
+ "supported\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if ((probe_kernel_read(&prev_insn, p->addr - 1,
+ sizeof(mips_instruction)) == 0) &&
+ insn_has_delayslot(prev_insn)) {
+ pr_notice("Kprobes for branch delayslot are not supported\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* insn: must be on special executable page on mips. */
+ p->ainsn.insn = get_insn_slot();
+ if (!p->ainsn.insn) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ /*
+ * In the kprobe->ainsn.insn[] array we store the original
+ * instruction at index zero and a break trap instruction at
+ * index one.
+ *
+ * On MIPS arch if the instruction at probed address is a
+ * branch instruction, we need to execute the instruction at
+ * Branch Delayslot (BD) at the time of probe hit. As MIPS also
+ * doesn't have single stepping support, the BD instruction can
+ * not be executed in-line and it would be executed on SSOL slot
+ * using a normal breakpoint instruction in the next slot.
+ * So, read the instruction and save it for later execution.
+ */
+ if (insn_has_delayslot(insn))
+ memcpy(&p->ainsn.insn[0], p->addr + 1, sizeof(kprobe_opcode_t));
+ else
+ memcpy(&p->ainsn.insn[0], p->addr, sizeof(kprobe_opcode_t));
+
+ p->ainsn.insn[1] = breakpoint2_insn;
+ p->opcode = *p->addr;
+
+out:
+ return ret;
+}
+
+void __kprobes arch_arm_kprobe(struct kprobe *p)
+{
+ *p->addr = breakpoint_insn;
+ flush_insn_slot(p);
+}
+
+void __kprobes arch_disarm_kprobe(struct kprobe *p)
+{
+ *p->addr = p->opcode;
+ flush_insn_slot(p);
+}
+
+void __kprobes arch_remove_kprobe(struct kprobe *p)
+{
+ if (p->ainsn.insn) {
+ free_insn_slot(p->ainsn.insn, 0);
+ p->ainsn.insn = NULL;
+ }
+}
+
+static void save_previous_kprobe(struct kprobe_ctlblk *kcb)
+{
+ kcb->prev_kprobe.kp = kprobe_running();
+ kcb->prev_kprobe.status = kcb->kprobe_status;
+ kcb->prev_kprobe.old_SR = kcb->kprobe_old_SR;
+ kcb->prev_kprobe.saved_SR = kcb->kprobe_saved_SR;
+ kcb->prev_kprobe.saved_epc = kcb->kprobe_saved_epc;
+}
+
+static void restore_previous_kprobe(struct kprobe_ctlblk *kcb)
+{
+ __get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp;
+ kcb->kprobe_status = kcb->prev_kprobe.status;
+ kcb->kprobe_old_SR = kcb->prev_kprobe.old_SR;
+ kcb->kprobe_saved_SR = kcb->prev_kprobe.saved_SR;
+ kcb->kprobe_saved_epc = kcb->prev_kprobe.saved_epc;
+}
+
+static void set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
+ struct kprobe_ctlblk *kcb)
+{
+ __get_cpu_var(current_kprobe) = p;
+ kcb->kprobe_saved_SR = kcb->kprobe_old_SR = (regs->cp0_status & ST0_IE);
+ kcb->kprobe_saved_epc = regs->cp0_epc;
+}
+
+/**
+ * evaluate_branch_instrucion -
+ *
+ * Evaluate the branch instruction at probed address during probe hit. The
+ * result of evaluation would be the updated epc. The insturction in delayslot
+ * would actually be single stepped using a normal breakpoint) on SSOL slot.
+ *
+ * The result is also saved in the kprobe control block for later use,
+ * in case we need to execute the delayslot instruction. The latter will be
+ * false for NOP instruction in dealyslot and the branch-likely instructions
+ * when the branch is taken. And for those cases we set a flag as
+ * SKIP_DELAYSLOT in the kprobe control block
+ */
+static int evaluate_branch_instruction(struct kprobe *p, struct pt_regs *regs,
+ struct kprobe_ctlblk *kcb)
+{
+ union mips_instruction insn = p->opcode;
+ long epc;
+ int ret = 0;
+
+ epc = regs->cp0_epc;
+ if (epc & 3)
+ goto unaligned;
+
+ if (p->ainsn.insn->word == 0)
+ kcb->flags |= SKIP_DELAYSLOT;
+ else
+ kcb->flags &= ~SKIP_DELAYSLOT;
+
+ ret = __compute_return_epc_for_insn(regs, insn);
+ if (ret < 0)
+ return ret;
+
+ if (ret == BRANCH_LIKELY_TAKEN)
+ kcb->flags |= SKIP_DELAYSLOT;
+
+ kcb->target_epc = regs->cp0_epc;
+
+ return 0;
+
+unaligned:
+ pr_notice("%s: unaligned epc - sending SIGBUS.\n", current->comm);
+ force_sig(SIGBUS, current);
+ return -EFAULT;
+
+}
+
+static void prepare_singlestep(struct kprobe *p, struct pt_regs *regs,
+ struct kprobe_ctlblk *kcb)
+{
+ int ret = 0;
+
+ regs->cp0_status &= ~ST0_IE;
+
+ /* single step inline if the instruction is a break */
+ if (p->opcode.word == breakpoint_insn.word ||
+ p->opcode.word == breakpoint2_insn.word)
+ regs->cp0_epc = (unsigned long)p->addr;
+ else if (insn_has_delayslot(p->opcode)) {
+ ret = evaluate_branch_instruction(p, regs, kcb);
+ if (ret < 0) {
+ pr_notice("Kprobes: Error in evaluating branch\n");
+ return;
+ }
+ }
+ regs->cp0_epc = (unsigned long)&p->ainsn.insn[0];
+}
+
+/*
+ * Called after single-stepping. p->addr is the address of the
+ * instruction whose first byte has been replaced by the "break 0"
+ * instruction. To avoid the SMP problems that can occur when we
+ * temporarily put back the original opcode to single-step, we
+ * single-stepped a copy of the instruction. The address of this
+ * copy is p->ainsn.insn.
+ *
+ * This function prepares to return from the post-single-step
+ * breakpoint trap. In case of branch instructions, the target
+ * epc to be restored.
+ */
+static void __kprobes resume_execution(struct kprobe *p,
+ struct pt_regs *regs,
+ struct kprobe_ctlblk *kcb)
+{
+ if (insn_has_delayslot(p->opcode))
+ regs->cp0_epc = kcb->target_epc;
+ else {
+ unsigned long orig_epc = kcb->kprobe_saved_epc;
+ regs->cp0_epc = orig_epc + 4;
+ }
+}
+
+static int __kprobes kprobe_handler(struct pt_regs *regs)
+{
+ struct kprobe *p;
+ int ret = 0;
+ kprobe_opcode_t *addr;
+ struct kprobe_ctlblk *kcb;
+
+ addr = (kprobe_opcode_t *) regs->cp0_epc;
+
+ /*
+ * We don't want to be preempted for the entire
+ * duration of kprobe processing
+ */
+ preempt_disable();
+ kcb = get_kprobe_ctlblk();
+
+ /* Check we're not actually recursing */
+ if (kprobe_running()) {
+ p = get_kprobe(addr);
+ if (p) {
+ if (kcb->kprobe_status == KPROBE_HIT_SS &&
+ p->ainsn.insn->word == breakpoint_insn.word) {
+ regs->cp0_status &= ~ST0_IE;
+ regs->cp0_status |= kcb->kprobe_saved_SR;
+ goto no_kprobe;
+ }
+ /*
+ * We have reentered the kprobe_handler(), since
+ * another probe was hit while within the handler.
+ * We here save the original kprobes variables and
+ * just single step on the instruction of the new probe
+ * without calling any user handlers.
+ */
+ save_previous_kprobe(kcb);
+ set_current_kprobe(p, regs, kcb);
+ kprobes_inc_nmissed_count(p);
+ prepare_singlestep(p, regs, kcb);
+ kcb->kprobe_status = KPROBE_REENTER;
+ if (kcb->flags & SKIP_DELAYSLOT) {
+ resume_execution(p, regs, kcb);
+ restore_previous_kprobe(kcb);
+ preempt_enable_no_resched();
+ }
+ return 1;
+ } else {
+ if (addr->word != breakpoint_insn.word) {
+ /*
+ * The breakpoint instruction was removed by
+ * another cpu right after we hit, no further
+ * handling of this interrupt is appropriate
+ */
+ ret = 1;
+ goto no_kprobe;
+ }
+ p = __get_cpu_var(current_kprobe);
+ if (p->break_handler && p->break_handler(p, regs))
+ goto ss_probe;
+ }
+ goto no_kprobe;
+ }
+
+ p = get_kprobe(addr);
+ if (!p) {
+ if (addr->word != breakpoint_insn.word) {
+ /*
+ * The breakpoint instruction was removed right
+ * after we hit it. Another cpu has removed
+ * either a probepoint or a debugger breakpoint
+ * at this address. In either case, no further
+ * handling of this interrupt is appropriate.
+ */
+ ret = 1;
+ }
+ /* Not one of ours: let kernel handle it */
+ goto no_kprobe;
+ }
+
+ set_current_kprobe(p, regs, kcb);
+ kcb->kprobe_status = KPROBE_HIT_ACTIVE;
+
+ if (p->pre_handler && p->pre_handler(p, regs)) {
+ /* handler has already set things up, so skip ss setup */
+ return 1;
+ }
+
+ss_probe:
+ prepare_singlestep(p, regs, kcb);
+ if (kcb->flags & SKIP_DELAYSLOT) {
+ kcb->kprobe_status = KPROBE_HIT_SSDONE;
+ if (p->post_handler)
+ p->post_handler(p, regs, 0);
+ resume_execution(p, regs, kcb);
+ preempt_enable_no_resched();
+ } else
+ kcb->kprobe_status = KPROBE_HIT_SS;
+
+ return 1;
+
+no_kprobe:
+ preempt_enable_no_resched();
+ return ret;
+
+}
+
+static inline int post_kprobe_handler(struct pt_regs *regs)
+{
+ struct kprobe *cur = kprobe_running();
+ struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+
+ if (!cur)
+ return 0;
+
+ if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) {
+ kcb->kprobe_status = KPROBE_HIT_SSDONE;
+ cur->post_handler(cur, regs, 0);
+ }
+
+ resume_execution(cur, regs, kcb);
+
+ regs->cp0_status |= kcb->kprobe_saved_SR;
+
+ /* Restore back the original saved kprobes variables and continue. */
+ if (kcb->kprobe_status == KPROBE_REENTER) {
+ restore_previous_kprobe(kcb);
+ goto out;
+ }
+ reset_current_kprobe();
+out:
+ preempt_enable_no_resched();
+
+ return 1;
+}
+
+static inline int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
+{
+ struct kprobe *cur = kprobe_running();
+ struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+
+ if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
+ return 1;
+
+ if (kcb->kprobe_status & KPROBE_HIT_SS) {
+ resume_execution(cur, regs, kcb);
+ regs->cp0_status |= kcb->kprobe_old_SR;
+
+ reset_current_kprobe();
+ preempt_enable_no_resched();
+ }
+ return 0;
+}
+
+/*
+ * Wrapper routine for handling exceptions.
+ */
+int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
+ unsigned long val, void *data)
+{
+
+ struct die_args *args = (struct die_args *)data;
+ int ret = NOTIFY_DONE;
+
+ switch (val) {
+ case DIE_BREAK:
+ if (kprobe_handler(args->regs))
+ ret = NOTIFY_STOP;
+ break;
+ case DIE_SSTEPBP:
+ if (post_kprobe_handler(args->regs))
+ ret = NOTIFY_STOP;
+ break;
+
+ case DIE_PAGE_FAULT:
+ /* kprobe_running() needs smp_processor_id() */
+ preempt_disable();
+
+ if (kprobe_running()
+ && kprobe_fault_handler(args->regs, args->trapnr))
+ ret = NOTIFY_STOP;
+ preempt_enable();
+ break;
+ default:
+ break;
+ }
+ return ret;
+}
+
+int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
+{
+ struct jprobe *jp = container_of(p, struct jprobe, kp);
+ struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+
+ kcb->jprobe_saved_regs = *regs;
+ kcb->jprobe_saved_sp = regs->regs[29];
+
+ memcpy(kcb->jprobes_stack, (void *)kcb->jprobe_saved_sp,
+ MIN_JPROBES_STACK_SIZE(kcb->jprobe_saved_sp));
+
+ regs->cp0_epc = (unsigned long)(jp->entry);
+
+ return 1;
+}
+
+/* Defined in the inline asm below. */
+void jprobe_return_end(void);
+
+void __kprobes jprobe_return(void)
+{
+ /* Assembler quirk necessitates this '0,code' business. */
+ asm volatile(
+ "break 0,%0\n\t"
+ ".globl jprobe_return_end\n"
+ "jprobe_return_end:\n"
+ : : "n" (BRK_KPROBE_BP) : "memory");
+}
+
+int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
+{
+ struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+
+ if (regs->cp0_epc >= (unsigned long)jprobe_return &&
+ regs->cp0_epc <= (unsigned long)jprobe_return_end) {
+ *regs = kcb->jprobe_saved_regs;
+ memcpy((void *)kcb->jprobe_saved_sp, kcb->jprobes_stack,
+ MIN_JPROBES_STACK_SIZE(kcb->jprobe_saved_sp));
+ preempt_enable_no_resched();
+
+ return 1;
+ }
+ return 0;
+}
+
+/*
+ * Function return probe trampoline:
+ * - init_kprobes() establishes a probepoint here
+ * - When the probed function returns, this probe causes the
+ * handlers to fire
+ */
+static void __used kretprobe_trampoline_holder(void)
+{
+ asm volatile(
+ ".set push\n\t"
+ /* Keep the assembler from reordering and placing JR here. */
+ ".set noreorder\n\t"
+ "nop\n\t"
+ ".global kretprobe_trampoline\n"
+ "kretprobe_trampoline:\n\t"
+ "nop\n\t"
+ ".set pop"
+ : : : "memory");
+}
+
+void kretprobe_trampoline(void);
+
+void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
+ struct pt_regs *regs)
+{
+ ri->ret_addr = (kprobe_opcode_t *) regs->regs[31];
+
+ /* Replace the return addr with trampoline addr */
+ regs->regs[31] = (unsigned long)kretprobe_trampoline;
+}
+
+/*
+ * Called when the probe at kretprobe trampoline is hit
+ */
+static int __kprobes trampoline_probe_handler(struct kprobe *p,
+ struct pt_regs *regs)
+{
+ struct kretprobe_instance *ri = NULL;
+ struct hlist_head *head, empty_rp;
+ struct hlist_node *tmp;
+ unsigned long flags, orig_ret_address = 0;
+ unsigned long trampoline_address = (unsigned long)kretprobe_trampoline;
+
+ INIT_HLIST_HEAD(&empty_rp);
+ kretprobe_hash_lock(current, &head, &flags);
+
+ /*
+ * It is possible to have multiple instances associated with a given
+ * task either because an multiple functions in the call path
+ * have a return probe installed on them, and/or more than one return
+ * return probe was registered for a target function.
+ *
+ * We can handle this because:
+ * - instances are always inserted at the head of the list
+ * - when multiple return probes are registered for the same
+ * function, the first instance's ret_addr will point to the
+ * real return address, and all the rest will point to
+ * kretprobe_trampoline
+ */
+ hlist_for_each_entry_safe(ri, tmp, head, hlist) {
+ if (ri->task != current)
+ /* another task is sharing our hash bucket */
+ continue;
+
+ if (ri->rp && ri->rp->handler)
+ ri->rp->handler(ri, regs);
+
+ orig_ret_address = (unsigned long)ri->ret_addr;
+ recycle_rp_inst(ri, &empty_rp);
+
+ if (orig_ret_address != trampoline_address)
+ /*
+ * This is the real return address. Any other
+ * instances associated with this task are for
+ * other calls deeper on the call stack
+ */
+ break;
+ }
+
+ kretprobe_assert(ri, orig_ret_address, trampoline_address);
+ instruction_pointer(regs) = orig_ret_address;
+
+ reset_current_kprobe();
+ kretprobe_hash_unlock(current, &flags);
+ preempt_enable_no_resched();
+
+ hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
+ hlist_del(&ri->hlist);
+ kfree(ri);
+ }
+ /*
+ * By returning a non-zero value, we are telling
+ * kprobe_handler() that we don't want the post_handler
+ * to run (and have re-enabled preemption)
+ */
+ return 1;
+}
+
+int __kprobes arch_trampoline_kprobe(struct kprobe *p)
+{
+ if (p->addr == (kprobe_opcode_t *)kretprobe_trampoline)
+ return 1;
+
+ return 0;
+}
+
+static struct kprobe trampoline_p = {
+ .addr = (kprobe_opcode_t *)kretprobe_trampoline,
+ .pre_handler = trampoline_probe_handler
+};
+
+int __init arch_init_kprobes(void)
+{
+ return register_kprobe(&trampoline_p);
+}
diff --git a/arch/mips/kernel/kspd.c b/arch/mips/kernel/kspd.c
deleted file mode 100644
index f06a144c788..00000000000
--- a/arch/mips/kernel/kspd.c
+++ /dev/null
@@ -1,398 +0,0 @@
-/*
- * Copyright (C) 2005 MIPS Technologies, Inc. All rights reserved.
- *
- * This program is free software; you can distribute it and/or modify it
- * under the terms of the GNU General Public License (Version 2) as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
- *
- */
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/unistd.h>
-#include <linux/file.h>
-#include <linux/fs.h>
-#include <linux/syscalls.h>
-#include <linux/workqueue.h>
-#include <linux/errno.h>
-#include <linux/list.h>
-
-#include <asm/vpe.h>
-#include <asm/rtlx.h>
-#include <asm/kspd.h>
-
-static struct workqueue_struct *workqueue = NULL;
-static struct work_struct work;
-
-extern unsigned long cpu_khz;
-
-struct mtsp_syscall {
- int cmd;
- unsigned char abi;
- unsigned char size;
-};
-
-struct mtsp_syscall_ret {
- int retval;
- int errno;
-};
-
-struct mtsp_syscall_generic {
- int arg0;
- int arg1;
- int arg2;
- int arg3;
- int arg4;
- int arg5;
- int arg6;
-};
-
-static struct list_head kspd_notifylist;
-static int sp_stopping = 0;
-
-/* these should match with those in the SDE kit */
-#define MTSP_SYSCALL_BASE 0
-#define MTSP_SYSCALL_EXIT (MTSP_SYSCALL_BASE + 0)
-#define MTSP_SYSCALL_OPEN (MTSP_SYSCALL_BASE + 1)
-#define MTSP_SYSCALL_READ (MTSP_SYSCALL_BASE + 2)
-#define MTSP_SYSCALL_WRITE (MTSP_SYSCALL_BASE + 3)
-#define MTSP_SYSCALL_CLOSE (MTSP_SYSCALL_BASE + 4)
-#define MTSP_SYSCALL_LSEEK32 (MTSP_SYSCALL_BASE + 5)
-#define MTSP_SYSCALL_ISATTY (MTSP_SYSCALL_BASE + 6)
-#define MTSP_SYSCALL_GETTIME (MTSP_SYSCALL_BASE + 7)
-#define MTSP_SYSCALL_PIPEFREQ (MTSP_SYSCALL_BASE + 8)
-#define MTSP_SYSCALL_GETTOD (MTSP_SYSCALL_BASE + 9)
-
-#define MTSP_O_RDONLY 0x0000
-#define MTSP_O_WRONLY 0x0001
-#define MTSP_O_RDWR 0x0002
-#define MTSP_O_NONBLOCK 0x0004
-#define MTSP_O_APPEND 0x0008
-#define MTSP_O_SHLOCK 0x0010
-#define MTSP_O_EXLOCK 0x0020
-#define MTSP_O_ASYNC 0x0040
-#define MTSP_O_FSYNC O_SYNC
-#define MTSP_O_NOFOLLOW 0x0100
-#define MTSP_O_SYNC 0x0080
-#define MTSP_O_CREAT 0x0200
-#define MTSP_O_TRUNC 0x0400
-#define MTSP_O_EXCL 0x0800
-#define MTSP_O_BINARY 0x8000
-
-#define SP_VPE 1
-
-struct apsp_table {
- int sp;
- int ap;
-};
-
-/* we might want to do the mode flags too */
-struct apsp_table open_flags_table[] = {
- { MTSP_O_RDWR, O_RDWR },
- { MTSP_O_WRONLY, O_WRONLY },
- { MTSP_O_CREAT, O_CREAT },
- { MTSP_O_TRUNC, O_TRUNC },
- { MTSP_O_NONBLOCK, O_NONBLOCK },
- { MTSP_O_APPEND, O_APPEND },
- { MTSP_O_NOFOLLOW, O_NOFOLLOW }
-};
-
-struct apsp_table syscall_command_table[] = {
- { MTSP_SYSCALL_OPEN, __NR_open },
- { MTSP_SYSCALL_CLOSE, __NR_close },
- { MTSP_SYSCALL_READ, __NR_read },
- { MTSP_SYSCALL_WRITE, __NR_write },
- { MTSP_SYSCALL_LSEEK32, __NR_lseek }
-};
-
-static int sp_syscall(int num, int arg0, int arg1, int arg2, int arg3)
-{
- register long int _num __asm__ ("$2") = num;
- register long int _arg0 __asm__ ("$4") = arg0;
- register long int _arg1 __asm__ ("$5") = arg1;
- register long int _arg2 __asm__ ("$6") = arg2;
- register long int _arg3 __asm__ ("$7") = arg3;
-
- mm_segment_t old_fs;
-
- old_fs = get_fs();
- set_fs(KERNEL_DS);
-
- __asm__ __volatile__ (
- " syscall \n"
- : "=r" (_num), "=r" (_arg3)
- : "r" (_num), "r" (_arg0), "r" (_arg1), "r" (_arg2), "r" (_arg3));
-
- set_fs(old_fs);
-
- /* $a3 is error flag */
- if (_arg3)
- return -_num;
-
- return _num;
-}
-
-static int translate_syscall_command(int cmd)
-{
- int i;
- int ret = -1;
-
- for (i = 0; i < ARRAY_SIZE(syscall_command_table); i++) {
- if ((cmd == syscall_command_table[i].sp))
- return syscall_command_table[i].ap;
- }
-
- return ret;
-}
-
-static unsigned int translate_open_flags(int flags)
-{
- int i;
- unsigned int ret = 0;
-
- for (i = 0; i < (sizeof(open_flags_table) / sizeof(struct apsp_table));
- i++) {
- if( (flags & open_flags_table[i].sp) ) {
- ret |= open_flags_table[i].ap;
- }
- }
-
- return ret;
-}
-
-
-static void sp_setfsuidgid( uid_t uid, gid_t gid)
-{
- current->fsuid = uid;
- current->fsgid = gid;
-
- key_fsuid_changed(current);
- key_fsgid_changed(current);
-}
-
-/*
- * Expects a request to be on the sysio channel. Reads it. Decides whether
- * its a linux syscall and runs it, or whatever. Puts the return code back
- * into the request and sends the whole thing back.
- */
-void sp_work_handle_request(void)
-{
- struct mtsp_syscall sc;
- struct mtsp_syscall_generic generic;
- struct mtsp_syscall_ret ret;
- struct kspd_notifications *n;
- struct timeval tv;
- struct timezone tz;
- int cmd;
-
- char *vcwd;
- mm_segment_t old_fs;
- int size;
-
- ret.retval = -1;
-
- if (!rtlx_read(RTLX_CHANNEL_SYSIO, &sc, sizeof(struct mtsp_syscall), 0)) {
- printk(KERN_ERR "Expected request but nothing to read\n");
- return;
- }
-
- size = sc.size;
-
- if (size) {
- if (!rtlx_read(RTLX_CHANNEL_SYSIO, &generic, size, 0)) {
- printk(KERN_ERR "Expected request but nothing to read\n");
- return;
- }
- }
-
- /* Run the syscall at the priviledge of the user who loaded the
- SP program */
-
- if (vpe_getuid(SP_VPE))
- sp_setfsuidgid( vpe_getuid(SP_VPE), vpe_getgid(SP_VPE));
-
- switch (sc.cmd) {
- /* needs the flags argument translating from SDE kit to
- linux */
- case MTSP_SYSCALL_PIPEFREQ:
- ret.retval = cpu_khz * 1000;
- ret.errno = 0;
- break;
-
- case MTSP_SYSCALL_GETTOD:
- memset(&tz, 0, sizeof(tz));
- if ((ret.retval = sp_syscall(__NR_gettimeofday, (int)&tv,
- (int)&tz, 0,0)) == 0)
- ret.retval = tv.tv_sec;
-
- ret.errno = errno;
- break;
-
- case MTSP_SYSCALL_EXIT:
- list_for_each_entry(n, &kspd_notifylist, list)
- n->kspd_sp_exit(SP_VPE);
- sp_stopping = 1;
-
- printk(KERN_DEBUG "KSPD got exit syscall from SP exitcode %d\n",
- generic.arg0);
- break;
-
- case MTSP_SYSCALL_OPEN:
- generic.arg1 = translate_open_flags(generic.arg1);
-
- vcwd = vpe_getcwd(SP_VPE);
-
- /* change to the cwd of the process that loaded the SP program */
- old_fs = get_fs();
- set_fs(KERNEL_DS);
- sys_chdir(vcwd);
- set_fs(old_fs);
-
- sc.cmd = __NR_open;
-
- /* fall through */
-
- default:
- if ((sc.cmd >= __NR_Linux) &&
- (sc.cmd <= (__NR_Linux + __NR_Linux_syscalls)) )
- cmd = sc.cmd;
- else
- cmd = translate_syscall_command(sc.cmd);
-
- if (cmd >= 0) {
- ret.retval = sp_syscall(cmd, generic.arg0, generic.arg1,
- generic.arg2, generic.arg3);
- ret.errno = errno;
- } else
- printk(KERN_WARNING
- "KSPD: Unknown SP syscall number %d\n", sc.cmd);
- break;
- } /* switch */
-
- if (vpe_getuid(SP_VPE))
- sp_setfsuidgid( 0, 0);
-
- if ((rtlx_write(RTLX_CHANNEL_SYSIO, &ret, sizeof(struct mtsp_syscall_ret), 0))
- < sizeof(struct mtsp_syscall_ret))
- printk("KSPD: sp_work_handle_request failed to send to SP\n");
-}
-
-static void sp_cleanup(void)
-{
- struct files_struct *files = current->files;
- int i, j;
- struct fdtable *fdt;
-
- j = 0;
-
- /*
- * It is safe to dereference the fd table without RCU or
- * ->file_lock
- */
- fdt = files_fdtable(files);
- for (;;) {
- unsigned long set;
- i = j * __NFDBITS;
- if (i >= fdt->max_fdset || i >= fdt->max_fds)
- break;
- set = fdt->open_fds->fds_bits[j++];
- while (set) {
- if (set & 1) {
- struct file * file = xchg(&fdt->fd[i], NULL);
- if (file)
- filp_close(file, files);
- }
- i++;
- set >>= 1;
- }
- }
-}
-
-static int channel_open = 0;
-
-/* the work handler */
-static void sp_work(void *data)
-{
- if (!channel_open) {
- if( rtlx_open(RTLX_CHANNEL_SYSIO, 1) != 0) {
- printk("KSPD: unable to open sp channel\n");
- sp_stopping = 1;
- } else {
- channel_open++;
- printk(KERN_DEBUG "KSPD: SP channel opened\n");
- }
- } else {
- /* wait for some data, allow it to sleep */
- rtlx_read_poll(RTLX_CHANNEL_SYSIO, 1);
-
- /* Check we haven't been woken because we are stopping */
- if (!sp_stopping)
- sp_work_handle_request();
- }
-
- if (!sp_stopping)
- queue_work(workqueue, &work);
- else
- sp_cleanup();
-}
-
-static void startwork(int vpe)
-{
- sp_stopping = channel_open = 0;
-
- if (workqueue == NULL) {
- if ((workqueue = create_singlethread_workqueue("kspd")) == NULL) {
- printk(KERN_ERR "unable to start kspd\n");
- return;
- }
-
- INIT_WORK(&work, sp_work, NULL);
- queue_work(workqueue, &work);
- } else
- queue_work(workqueue, &work);
-
-}
-
-static void stopwork(int vpe)
-{
- sp_stopping = 1;
-
- printk(KERN_DEBUG "KSPD: SP stopping\n");
-}
-
-void kspd_notify(struct kspd_notifications *notify)
-{
- list_add(&notify->list, &kspd_notifylist);
-}
-
-static struct vpe_notifications notify;
-static int kspd_module_init(void)
-{
- INIT_LIST_HEAD(&kspd_notifylist);
-
- notify.start = startwork;
- notify.stop = stopwork;
- vpe_notify(SP_VPE, &notify);
-
- return 0;
-}
-
-static void kspd_module_exit(void)
-{
-
-}
-
-module_init(kspd_module_init);
-module_exit(kspd_module_exit);
-
-MODULE_DESCRIPTION("MIPS KSPD");
-MODULE_AUTHOR("Elizabeth Oldham, MIPS Technologies, Inc.");
-MODULE_LICENSE("GPL");
diff --git a/arch/mips/kernel/linux32.c b/arch/mips/kernel/linux32.c
index dc500e20cf1..0b29646bcee 100644
--- a/arch/mips/kernel/linux32.c
+++ b/arch/mips/kernel/linux32.c
@@ -3,21 +3,17 @@
*
* Copyright (C) 2000 Silicon Graphics, Inc.
* Written by Ulf Carlsson (ulfc@engr.sgi.com)
- * sys32_execve from ia64/ia32 code, Feb 2000, Kanoj Sarcar (kanoj@sgi.com)
*/
#include <linux/compiler.h>
#include <linux/mm.h>
#include <linux/errno.h>
#include <linux/file.h>
-#include <linux/smp_lock.h>
#include <linux/highuid.h>
-#include <linux/dirent.h>
#include <linux/resource.h>
#include <linux/highmem.h>
#include <linux/time.h>
#include <linux/times.h>
#include <linux/poll.h>
-#include <linux/slab.h>
#include <linux/skbuff.h>
#include <linux/filter.h>
#include <linux/shm.h>
@@ -35,11 +31,13 @@
#include <linux/security.h>
#include <linux/compat.h>
#include <linux/vfs.h>
+#include <linux/ipc.h>
+#include <linux/slab.h>
#include <net/sock.h>
#include <net/scm.h>
-#include <asm/ipc.h>
+#include <asm/compat-signal.h>
#include <asm/sim.h>
#include <asm/uaccess.h>
#include <asm/mmu_context.h>
@@ -57,185 +55,28 @@
#define AA(__x) ((unsigned long)((int)__x))
#ifdef __MIPSEB__
-#define merge_64(r1,r2) ((((r1) & 0xffffffffUL) << 32) + ((r2) & 0xffffffffUL))
+#define merge_64(r1, r2) ((((r1) & 0xffffffffUL) << 32) + ((r2) & 0xffffffffUL))
#endif
#ifdef __MIPSEL__
-#define merge_64(r1,r2) ((((r2) & 0xffffffffUL) << 32) + ((r1) & 0xffffffffUL))
+#define merge_64(r1, r2) ((((r2) & 0xffffffffUL) << 32) + ((r1) & 0xffffffffUL))
#endif
-/*
- * Revalidate the inode. This is required for proper NFS attribute caching.
- */
-
-int cp_compat_stat(struct kstat *stat, struct compat_stat __user *statbuf)
+SYSCALL_DEFINE6(32_mmap2, unsigned long, addr, unsigned long, len,
+ unsigned long, prot, unsigned long, flags, unsigned long, fd,
+ unsigned long, pgoff)
{
- struct compat_stat tmp;
-
- if (!new_valid_dev(stat->dev) || !new_valid_dev(stat->rdev))
- return -EOVERFLOW;
-
- memset(&tmp, 0, sizeof(tmp));
- tmp.st_dev = new_encode_dev(stat->dev);
- tmp.st_ino = stat->ino;
- tmp.st_mode = stat->mode;
- tmp.st_nlink = stat->nlink;
- SET_UID(tmp.st_uid, stat->uid);
- SET_GID(tmp.st_gid, stat->gid);
- tmp.st_rdev = new_encode_dev(stat->rdev);
- tmp.st_size = stat->size;
- tmp.st_atime = stat->atime.tv_sec;
- tmp.st_mtime = stat->mtime.tv_sec;
- tmp.st_ctime = stat->ctime.tv_sec;
-#ifdef STAT_HAVE_NSEC
- tmp.st_atime_nsec = stat->atime.tv_nsec;
- tmp.st_mtime_nsec = stat->mtime.tv_nsec;
- tmp.st_ctime_nsec = stat->ctime.tv_nsec;
-#endif
- tmp.st_blocks = stat->blocks;
- tmp.st_blksize = stat->blksize;
- return copy_to_user(statbuf,&tmp,sizeof(tmp)) ? -EFAULT : 0;
-}
-
-asmlinkage unsigned long
-sys32_mmap2(unsigned long addr, unsigned long len, unsigned long prot,
- unsigned long flags, unsigned long fd, unsigned long pgoff)
-{
- struct file * file = NULL;
unsigned long error;
error = -EINVAL;
if (pgoff & (~PAGE_MASK >> 12))
goto out;
- pgoff >>= PAGE_SHIFT-12;
-
- if (!(flags & MAP_ANONYMOUS)) {
- error = -EBADF;
- file = fget(fd);
- if (!file)
- goto out;
- }
- flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
-
- down_write(&current->mm->mmap_sem);
- error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
- up_write(&current->mm->mmap_sem);
- if (file)
- fput(file);
-
+ error = sys_mmap_pgoff(addr, len, prot, flags, fd,
+ pgoff >> (PAGE_SHIFT-12));
out:
return error;
}
-
-asmlinkage int sys_truncate64(const char __user *path, unsigned int high,
- unsigned int low)
-{
- if ((int)high < 0)
- return -EINVAL;
- return sys_truncate(path, ((long) high << 32) | low);
-}
-
-asmlinkage int sys_ftruncate64(unsigned int fd, unsigned int high,
- unsigned int low)
-{
- if ((int)high < 0)
- return -EINVAL;
- return sys_ftruncate(fd, ((long) high << 32) | low);
-}
-
-/*
- * sys_execve() executes a new program.
- */
-asmlinkage int sys32_execve(nabi_no_regargs struct pt_regs regs)
-{
- int error;
- char * filename;
-
- filename = getname(compat_ptr(regs.regs[4]));
- error = PTR_ERR(filename);
- if (IS_ERR(filename))
- goto out;
- error = compat_do_execve(filename, compat_ptr(regs.regs[5]),
- compat_ptr(regs.regs[6]), &regs);
- putname(filename);
-
-out:
- return error;
-}
-
-asmlinkage long
-sysn32_waitid(int which, compat_pid_t pid,
- siginfo_t __user *uinfo, int options,
- struct compat_rusage __user *uru)
-{
- struct rusage ru;
- long ret;
- mm_segment_t old_fs = get_fs();
- int si_signo;
-
- if (!access_ok(VERIFY_WRITE, uinfo, sizeof(*uinfo)))
- return -EFAULT;
-
- set_fs (KERNEL_DS);
- ret = sys_waitid(which, pid, uinfo, options,
- uru ? (struct rusage __user *) &ru : NULL);
- set_fs (old_fs);
-
- if (__get_user(si_signo, &uinfo->si_signo))
- return -EFAULT;
- if (ret < 0 || si_signo == 0)
- return ret;
-
- if (uru)
- ret = put_compat_rusage(&ru, uru);
- return ret;
-}
-
-struct sysinfo32 {
- s32 uptime;
- u32 loads[3];
- u32 totalram;
- u32 freeram;
- u32 sharedram;
- u32 bufferram;
- u32 totalswap;
- u32 freeswap;
- u16 procs;
- u32 totalhigh;
- u32 freehigh;
- u32 mem_unit;
- char _f[8];
-};
-
-asmlinkage int sys32_sysinfo(struct sysinfo32 __user *info)
-{
- struct sysinfo s;
- int ret, err;
- mm_segment_t old_fs = get_fs ();
-
- set_fs (KERNEL_DS);
- ret = sys_sysinfo((struct sysinfo __user *)&s);
- set_fs (old_fs);
- err = put_user (s.uptime, &info->uptime);
- err |= __put_user (s.loads[0], &info->loads[0]);
- err |= __put_user (s.loads[1], &info->loads[1]);
- err |= __put_user (s.loads[2], &info->loads[2]);
- err |= __put_user (s.totalram, &info->totalram);
- err |= __put_user (s.freeram, &info->freeram);
- err |= __put_user (s.sharedram, &info->sharedram);
- err |= __put_user (s.bufferram, &info->bufferram);
- err |= __put_user (s.totalswap, &info->totalswap);
- err |= __put_user (s.freeswap, &info->freeswap);
- err |= __put_user (s.procs, &info->procs);
- err |= __put_user (s.totalhigh, &info->totalhigh);
- err |= __put_user (s.freehigh, &info->freehigh);
- err |= __put_user (s.mem_unit, &info->mem_unit);
- if (err)
- return -EFAULT;
- return ret;
-}
-
-#define RLIM_INFINITY32 0x7fffffff
+#define RLIM_INFINITY32 0x7fffffff
#define RESOURCE32(x) ((x > RLIM_INFINITY32) ? RLIM_INFINITY32 : x)
struct rlimit32 {
@@ -243,880 +84,57 @@ struct rlimit32 {
int rlim_max;
};
-#ifdef __MIPSEB__
-asmlinkage long sys32_truncate64(const char __user * path, unsigned long __dummy,
- int length_hi, int length_lo)
-#endif
-#ifdef __MIPSEL__
-asmlinkage long sys32_truncate64(const char __user * path, unsigned long __dummy,
- int length_lo, int length_hi)
-#endif
+SYSCALL_DEFINE4(32_truncate64, const char __user *, path,
+ unsigned long, __dummy, unsigned long, a2, unsigned long, a3)
{
- loff_t length;
-
- length = ((unsigned long) length_hi << 32) | (unsigned int) length_lo;
-
- return sys_truncate(path, length);
+ return sys_truncate(path, merge_64(a2, a3));
}
-#ifdef __MIPSEB__
-asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long __dummy,
- int length_hi, int length_lo)
-#endif
-#ifdef __MIPSEL__
-asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long __dummy,
- int length_lo, int length_hi)
-#endif
+SYSCALL_DEFINE4(32_ftruncate64, unsigned long, fd, unsigned long, __dummy,
+ unsigned long, a2, unsigned long, a3)
{
- loff_t length;
-
- length = ((unsigned long) length_hi << 32) | (unsigned int) length_lo;
-
- return sys_ftruncate(fd, length);
+ return sys_ftruncate(fd, merge_64(a2, a3));
}
-static inline long
-get_tv32(struct timeval *o, struct compat_timeval __user *i)
-{
- return (!access_ok(VERIFY_READ, i, sizeof(*i)) ||
- (__get_user(o->tv_sec, &i->tv_sec) |
- __get_user(o->tv_usec, &i->tv_usec)));
-}
-
-static inline long
-put_tv32(struct compat_timeval __user *o, struct timeval *i)
-{
- return (!access_ok(VERIFY_WRITE, o, sizeof(*o)) ||
- (__put_user(i->tv_sec, &o->tv_sec) |
- __put_user(i->tv_usec, &o->tv_usec)));
-}
-
-extern struct timezone sys_tz;
-
-asmlinkage int
-sys32_gettimeofday(struct compat_timeval __user *tv, struct timezone __user *tz)
-{
- if (tv) {
- struct timeval ktv;
- do_gettimeofday(&ktv);
- if (put_tv32(tv, &ktv))
- return -EFAULT;
- }
- if (tz) {
- if (copy_to_user(tz, &sys_tz, sizeof(sys_tz)))
- return -EFAULT;
- }
- return 0;
-}
-
-static inline long get_ts32(struct timespec *o, struct compat_timeval __user *i)
-{
- long usec;
-
- if (!access_ok(VERIFY_READ, i, sizeof(*i)))
- return -EFAULT;
- if (__get_user(o->tv_sec, &i->tv_sec))
- return -EFAULT;
- if (__get_user(usec, &i->tv_usec))
- return -EFAULT;
- o->tv_nsec = usec * 1000;
- return 0;
-}
-
-asmlinkage int
-sys32_settimeofday(struct compat_timeval __user *tv, struct timezone __user *tz)
-{
- struct timespec kts;
- struct timezone ktz;
-
- if (tv) {
- if (get_ts32(&kts, tv))
- return -EFAULT;
- }
- if (tz) {
- if (copy_from_user(&ktz, tz, sizeof(ktz)))
- return -EFAULT;
- }
-
- return do_sys_settimeofday(tv ? &kts : NULL, tz ? &ktz : NULL);
-}
-
-asmlinkage int sys32_llseek(unsigned int fd, unsigned int offset_high,
- unsigned int offset_low, loff_t __user * result,
- unsigned int origin)
+SYSCALL_DEFINE5(32_llseek, unsigned int, fd, unsigned int, offset_high,
+ unsigned int, offset_low, loff_t __user *, result,
+ unsigned int, origin)
{
return sys_llseek(fd, offset_high, offset_low, result, origin);
}
/* From the Single Unix Spec: pread & pwrite act like lseek to pos + op +
lseek back to original location. They fail just like lseek does on
- non-seekable files. */
+ non-seekable files. */
-asmlinkage ssize_t sys32_pread(unsigned int fd, char __user * buf,
- size_t count, u32 unused, u64 a4, u64 a5)
+SYSCALL_DEFINE6(32_pread, unsigned long, fd, char __user *, buf, size_t, count,
+ unsigned long, unused, unsigned long, a4, unsigned long, a5)
{
return sys_pread64(fd, buf, count, merge_64(a4, a5));
}
-asmlinkage ssize_t sys32_pwrite(unsigned int fd, const char __user * buf,
- size_t count, u32 unused, u64 a4, u64 a5)
+SYSCALL_DEFINE6(32_pwrite, unsigned int, fd, const char __user *, buf,
+ size_t, count, u32, unused, u64, a4, u64, a5)
{
return sys_pwrite64(fd, buf, count, merge_64(a4, a5));
}
-asmlinkage int sys32_sched_rr_get_interval(compat_pid_t pid,
- struct compat_timespec __user *interval)
-{
- struct timespec t;
- int ret;
- mm_segment_t old_fs = get_fs ();
-
- set_fs (KERNEL_DS);
- ret = sys_sched_rr_get_interval(pid, (struct timespec __user *)&t);
- set_fs (old_fs);
- if (put_user (t.tv_sec, &interval->tv_sec) ||
- __put_user (t.tv_nsec, &interval->tv_nsec))
- return -EFAULT;
- return ret;
-}
-
-struct msgbuf32 { s32 mtype; char mtext[1]; };
-
-struct ipc_perm32
-{
- key_t key;
- __compat_uid_t uid;
- __compat_gid_t gid;
- __compat_uid_t cuid;
- __compat_gid_t cgid;
- compat_mode_t mode;
- unsigned short seq;
-};
-
-struct ipc64_perm32 {
- key_t key;
- __compat_uid_t uid;
- __compat_gid_t gid;
- __compat_uid_t cuid;
- __compat_gid_t cgid;
- compat_mode_t mode;
- unsigned short seq;
- unsigned short __pad1;
- unsigned int __unused1;
- unsigned int __unused2;
-};
-
-struct semid_ds32 {
- struct ipc_perm32 sem_perm; /* permissions .. see ipc.h */
- compat_time_t sem_otime; /* last semop time */
- compat_time_t sem_ctime; /* last change time */
- u32 sem_base; /* ptr to first semaphore in array */
- u32 sem_pending; /* pending operations to be processed */
- u32 sem_pending_last; /* last pending operation */
- u32 undo; /* undo requests on this array */
- unsigned short sem_nsems; /* no. of semaphores in array */
-};
-
-struct semid64_ds32 {
- struct ipc64_perm32 sem_perm;
- compat_time_t sem_otime;
- compat_time_t sem_ctime;
- unsigned int sem_nsems;
- unsigned int __unused1;
- unsigned int __unused2;
-};
-
-struct msqid_ds32
-{
- struct ipc_perm32 msg_perm;
- u32 msg_first;
- u32 msg_last;
- compat_time_t msg_stime;
- compat_time_t msg_rtime;
- compat_time_t msg_ctime;
- u32 wwait;
- u32 rwait;
- unsigned short msg_cbytes;
- unsigned short msg_qnum;
- unsigned short msg_qbytes;
- compat_ipc_pid_t msg_lspid;
- compat_ipc_pid_t msg_lrpid;
-};
-
-struct msqid64_ds32 {
- struct ipc64_perm32 msg_perm;
- compat_time_t msg_stime;
- unsigned int __unused1;
- compat_time_t msg_rtime;
- unsigned int __unused2;
- compat_time_t msg_ctime;
- unsigned int __unused3;
- unsigned int msg_cbytes;
- unsigned int msg_qnum;
- unsigned int msg_qbytes;
- compat_pid_t msg_lspid;
- compat_pid_t msg_lrpid;
- unsigned int __unused4;
- unsigned int __unused5;
-};
-
-struct shmid_ds32 {
- struct ipc_perm32 shm_perm;
- int shm_segsz;
- compat_time_t shm_atime;
- compat_time_t shm_dtime;
- compat_time_t shm_ctime;
- compat_ipc_pid_t shm_cpid;
- compat_ipc_pid_t shm_lpid;
- unsigned short shm_nattch;
-};
-
-struct shmid64_ds32 {
- struct ipc64_perm32 shm_perm;
- compat_size_t shm_segsz;
- compat_time_t shm_atime;
- compat_time_t shm_dtime;
- compat_time_t shm_ctime;
- compat_pid_t shm_cpid;
- compat_pid_t shm_lpid;
- unsigned int shm_nattch;
- unsigned int __unused1;
- unsigned int __unused2;
-};
-
-struct ipc_kludge32 {
- u32 msgp;
- s32 msgtyp;
-};
-
-static int
-do_sys32_semctl(int first, int second, int third, void __user *uptr)
-{
- union semun fourth;
- u32 pad;
- int err, err2;
- struct semid64_ds s;
- mm_segment_t old_fs;
-
- if (!uptr)
- return -EINVAL;
- err = -EFAULT;
- if (get_user (pad, (u32 __user *)uptr))
- return err;
- if ((third & ~IPC_64) == SETVAL)
- fourth.val = (int)pad;
- else
- fourth.__pad = (void __user *)A(pad);
- switch (third & ~IPC_64) {
- case IPC_INFO:
- case IPC_RMID:
- case IPC_SET:
- case SEM_INFO:
- case GETVAL:
- case GETPID:
- case GETNCNT:
- case GETZCNT:
- case GETALL:
- case SETVAL:
- case SETALL:
- err = sys_semctl (first, second, third, fourth);
- break;
-
- case IPC_STAT:
- case SEM_STAT:
- fourth.__pad = (struct semid64_ds __user *)&s;
- old_fs = get_fs();
- set_fs(KERNEL_DS);
- err = sys_semctl(first, second, third | IPC_64, fourth);
- set_fs(old_fs);
-
- if (third & IPC_64) {
- struct semid64_ds32 __user *usp64 = (struct semid64_ds32 __user *) A(pad);
-
- if (!access_ok(VERIFY_WRITE, usp64, sizeof(*usp64))) {
- err = -EFAULT;
- break;
- }
- err2 = __put_user(s.sem_perm.key, &usp64->sem_perm.key);
- err2 |= __put_user(s.sem_perm.uid, &usp64->sem_perm.uid);
- err2 |= __put_user(s.sem_perm.gid, &usp64->sem_perm.gid);
- err2 |= __put_user(s.sem_perm.cuid, &usp64->sem_perm.cuid);
- err2 |= __put_user(s.sem_perm.cgid, &usp64->sem_perm.cgid);
- err2 |= __put_user(s.sem_perm.mode, &usp64->sem_perm.mode);
- err2 |= __put_user(s.sem_perm.seq, &usp64->sem_perm.seq);
- err2 |= __put_user(s.sem_otime, &usp64->sem_otime);
- err2 |= __put_user(s.sem_ctime, &usp64->sem_ctime);
- err2 |= __put_user(s.sem_nsems, &usp64->sem_nsems);
- } else {
- struct semid_ds32 __user *usp32 = (struct semid_ds32 __user *) A(pad);
-
- if (!access_ok(VERIFY_WRITE, usp32, sizeof(*usp32))) {
- err = -EFAULT;
- break;
- }
- err2 = __put_user(s.sem_perm.key, &usp32->sem_perm.key);
- err2 |= __put_user(s.sem_perm.uid, &usp32->sem_perm.uid);
- err2 |= __put_user(s.sem_perm.gid, &usp32->sem_perm.gid);
- err2 |= __put_user(s.sem_perm.cuid, &usp32->sem_perm.cuid);
- err2 |= __put_user(s.sem_perm.cgid, &usp32->sem_perm.cgid);
- err2 |= __put_user(s.sem_perm.mode, &usp32->sem_perm.mode);
- err2 |= __put_user(s.sem_perm.seq, &usp32->sem_perm.seq);
- err2 |= __put_user(s.sem_otime, &usp32->sem_otime);
- err2 |= __put_user(s.sem_ctime, &usp32->sem_ctime);
- err2 |= __put_user(s.sem_nsems, &usp32->sem_nsems);
- }
- if (err2)
- err = -EFAULT;
- break;
-
- default:
- err = - EINVAL;
- break;
- }
-
- return err;
-}
-
-static int
-do_sys32_msgsnd (int first, int second, int third, void __user *uptr)
-{
- struct msgbuf32 __user *up = (struct msgbuf32 __user *)uptr;
- struct msgbuf *p;
- mm_segment_t old_fs;
- int err;
-
- if (second < 0)
- return -EINVAL;
- p = kmalloc (second + sizeof (struct msgbuf)
- + 4, GFP_USER);
- if (!p)
- return -ENOMEM;
- err = get_user (p->mtype, &up->mtype);
- if (err)
- goto out;
- err |= __copy_from_user (p->mtext, &up->mtext, second);
- if (err)
- goto out;
- old_fs = get_fs ();
- set_fs (KERNEL_DS);
- err = sys_msgsnd (first, (struct msgbuf __user *)p, second, third);
- set_fs (old_fs);
-out:
- kfree (p);
-
- return err;
-}
-
-static int
-do_sys32_msgrcv (int first, int second, int msgtyp, int third,
- int version, void __user *uptr)
-{
- struct msgbuf32 __user *up;
- struct msgbuf *p;
- mm_segment_t old_fs;
- int err;
-
- if (!version) {
- struct ipc_kludge32 __user *uipck = (struct ipc_kludge32 __user *)uptr;
- struct ipc_kludge32 ipck;
-
- err = -EINVAL;
- if (!uptr)
- goto out;
- err = -EFAULT;
- if (copy_from_user (&ipck, uipck, sizeof (struct ipc_kludge32)))
- goto out;
- uptr = (void __user *)AA(ipck.msgp);
- msgtyp = ipck.msgtyp;
- }
-
- if (second < 0)
- return -EINVAL;
- err = -ENOMEM;
- p = kmalloc (second + sizeof (struct msgbuf) + 4, GFP_USER);
- if (!p)
- goto out;
- old_fs = get_fs ();
- set_fs (KERNEL_DS);
- err = sys_msgrcv (first, (struct msgbuf __user *)p, second + 4, msgtyp, third);
- set_fs (old_fs);
- if (err < 0)
- goto free_then_out;
- up = (struct msgbuf32 __user *)uptr;
- if (put_user (p->mtype, &up->mtype) ||
- __copy_to_user (&up->mtext, p->mtext, err))
- err = -EFAULT;
-free_then_out:
- kfree (p);
-out:
- return err;
-}
-
-static int
-do_sys32_msgctl (int first, int second, void __user *uptr)
-{
- int err = -EINVAL, err2;
- struct msqid64_ds m;
- struct msqid_ds32 __user *up32 = (struct msqid_ds32 __user *)uptr;
- struct msqid64_ds32 __user *up64 = (struct msqid64_ds32 __user *)uptr;
- mm_segment_t old_fs;
-
- switch (second & ~IPC_64) {
- case IPC_INFO:
- case IPC_RMID:
- case MSG_INFO:
- err = sys_msgctl (first, second, (struct msqid_ds __user *)uptr);
- break;
-
- case IPC_SET:
- if (second & IPC_64) {
- if (!access_ok(VERIFY_READ, up64, sizeof(*up64))) {
- err = -EFAULT;
- break;
- }
- err = __get_user(m.msg_perm.uid, &up64->msg_perm.uid);
- err |= __get_user(m.msg_perm.gid, &up64->msg_perm.gid);
- err |= __get_user(m.msg_perm.mode, &up64->msg_perm.mode);
- err |= __get_user(m.msg_qbytes, &up64->msg_qbytes);
- } else {
- if (!access_ok(VERIFY_READ, up32, sizeof(*up32))) {
- err = -EFAULT;
- break;
- }
- err = __get_user(m.msg_perm.uid, &up32->msg_perm.uid);
- err |= __get_user(m.msg_perm.gid, &up32->msg_perm.gid);
- err |= __get_user(m.msg_perm.mode, &up32->msg_perm.mode);
- err |= __get_user(m.msg_qbytes, &up32->msg_qbytes);
- }
- if (err)
- break;
- old_fs = get_fs();
- set_fs(KERNEL_DS);
- err = sys_msgctl(first, second | IPC_64, (struct msqid_ds __user *)&m);
- set_fs(old_fs);
- break;
-
- case IPC_STAT:
- case MSG_STAT:
- old_fs = get_fs();
- set_fs(KERNEL_DS);
- err = sys_msgctl(first, second | IPC_64, (struct msqid_ds __user *)&m);
- set_fs(old_fs);
- if (second & IPC_64) {
- if (!access_ok(VERIFY_WRITE, up64, sizeof(*up64))) {
- err = -EFAULT;
- break;
- }
- err2 = __put_user(m.msg_perm.key, &up64->msg_perm.key);
- err2 |= __put_user(m.msg_perm.uid, &up64->msg_perm.uid);
- err2 |= __put_user(m.msg_perm.gid, &up64->msg_perm.gid);
- err2 |= __put_user(m.msg_perm.cuid, &up64->msg_perm.cuid);
- err2 |= __put_user(m.msg_perm.cgid, &up64->msg_perm.cgid);
- err2 |= __put_user(m.msg_perm.mode, &up64->msg_perm.mode);
- err2 |= __put_user(m.msg_perm.seq, &up64->msg_perm.seq);
- err2 |= __put_user(m.msg_stime, &up64->msg_stime);
- err2 |= __put_user(m.msg_rtime, &up64->msg_rtime);
- err2 |= __put_user(m.msg_ctime, &up64->msg_ctime);
- err2 |= __put_user(m.msg_cbytes, &up64->msg_cbytes);
- err2 |= __put_user(m.msg_qnum, &up64->msg_qnum);
- err2 |= __put_user(m.msg_qbytes, &up64->msg_qbytes);
- err2 |= __put_user(m.msg_lspid, &up64->msg_lspid);
- err2 |= __put_user(m.msg_lrpid, &up64->msg_lrpid);
- if (err2)
- err = -EFAULT;
- } else {
- if (!access_ok(VERIFY_WRITE, up32, sizeof(*up32))) {
- err = -EFAULT;
- break;
- }
- err2 = __put_user(m.msg_perm.key, &up32->msg_perm.key);
- err2 |= __put_user(m.msg_perm.uid, &up32->msg_perm.uid);
- err2 |= __put_user(m.msg_perm.gid, &up32->msg_perm.gid);
- err2 |= __put_user(m.msg_perm.cuid, &up32->msg_perm.cuid);
- err2 |= __put_user(m.msg_perm.cgid, &up32->msg_perm.cgid);
- err2 |= __put_user(m.msg_perm.mode, &up32->msg_perm.mode);
- err2 |= __put_user(m.msg_perm.seq, &up32->msg_perm.seq);
- err2 |= __put_user(m.msg_stime, &up32->msg_stime);
- err2 |= __put_user(m.msg_rtime, &up32->msg_rtime);
- err2 |= __put_user(m.msg_ctime, &up32->msg_ctime);
- err2 |= __put_user(m.msg_cbytes, &up32->msg_cbytes);
- err2 |= __put_user(m.msg_qnum, &up32->msg_qnum);
- err2 |= __put_user(m.msg_qbytes, &up32->msg_qbytes);
- err2 |= __put_user(m.msg_lspid, &up32->msg_lspid);
- err2 |= __put_user(m.msg_lrpid, &up32->msg_lrpid);
- if (err2)
- err = -EFAULT;
- }
- break;
- }
-
- return err;
-}
-
-static int
-do_sys32_shmat (int first, int second, int third, int version, void __user *uptr)
+SYSCALL_DEFINE1(32_personality, unsigned long, personality)
{
- unsigned long raddr;
- u32 __user *uaddr = (u32 __user *)A((u32)third);
- int err = -EINVAL;
-
- if (version == 1)
- return err;
- err = do_shmat (first, uptr, second, &raddr);
- if (err)
- return err;
- err = put_user (raddr, uaddr);
- return err;
-}
-
-struct shm_info32 {
- int used_ids;
- u32 shm_tot, shm_rss, shm_swp;
- u32 swap_attempts, swap_successes;
-};
-
-static int
-do_sys32_shmctl (int first, int second, void __user *uptr)
-{
- struct shmid64_ds32 __user *up64 = (struct shmid64_ds32 __user *)uptr;
- struct shmid_ds32 __user *up32 = (struct shmid_ds32 __user *)uptr;
- struct shm_info32 __user *uip = (struct shm_info32 __user *)uptr;
- int err = -EFAULT, err2;
- struct shmid64_ds s64;
- mm_segment_t old_fs;
- struct shm_info si;
- struct shmid_ds s;
-
- switch (second & ~IPC_64) {
- case IPC_INFO:
- second = IPC_INFO; /* So that we don't have to translate it */
- case IPC_RMID:
- case SHM_LOCK:
- case SHM_UNLOCK:
- err = sys_shmctl(first, second, (struct shmid_ds __user *)uptr);
- break;
- case IPC_SET:
- if (second & IPC_64) {
- err = get_user(s.shm_perm.uid, &up64->shm_perm.uid);
- err |= get_user(s.shm_perm.gid, &up64->shm_perm.gid);
- err |= get_user(s.shm_perm.mode, &up64->shm_perm.mode);
- } else {
- err = get_user(s.shm_perm.uid, &up32->shm_perm.uid);
- err |= get_user(s.shm_perm.gid, &up32->shm_perm.gid);
- err |= get_user(s.shm_perm.mode, &up32->shm_perm.mode);
- }
- if (err)
- break;
- old_fs = get_fs();
- set_fs(KERNEL_DS);
- err = sys_shmctl(first, second & ~IPC_64, (struct shmid_ds __user *)&s);
- set_fs(old_fs);
- break;
-
- case IPC_STAT:
- case SHM_STAT:
- old_fs = get_fs();
- set_fs(KERNEL_DS);
- err = sys_shmctl(first, second | IPC_64, (void __user *) &s64);
- set_fs(old_fs);
- if (err < 0)
- break;
- if (second & IPC_64) {
- if (!access_ok(VERIFY_WRITE, up64, sizeof(*up64))) {
- err = -EFAULT;
- break;
- }
- err2 = __put_user(s64.shm_perm.key, &up64->shm_perm.key);
- err2 |= __put_user(s64.shm_perm.uid, &up64->shm_perm.uid);
- err2 |= __put_user(s64.shm_perm.gid, &up64->shm_perm.gid);
- err2 |= __put_user(s64.shm_perm.cuid, &up64->shm_perm.cuid);
- err2 |= __put_user(s64.shm_perm.cgid, &up64->shm_perm.cgid);
- err2 |= __put_user(s64.shm_perm.mode, &up64->shm_perm.mode);
- err2 |= __put_user(s64.shm_perm.seq, &up64->shm_perm.seq);
- err2 |= __put_user(s64.shm_atime, &up64->shm_atime);
- err2 |= __put_user(s64.shm_dtime, &up64->shm_dtime);
- err2 |= __put_user(s64.shm_ctime, &up64->shm_ctime);
- err2 |= __put_user(s64.shm_segsz, &up64->shm_segsz);
- err2 |= __put_user(s64.shm_nattch, &up64->shm_nattch);
- err2 |= __put_user(s64.shm_cpid, &up64->shm_cpid);
- err2 |= __put_user(s64.shm_lpid, &up64->shm_lpid);
- } else {
- if (!access_ok(VERIFY_WRITE, up32, sizeof(*up32))) {
- err = -EFAULT;
- break;
- }
- err2 = __put_user(s64.shm_perm.key, &up32->shm_perm.key);
- err2 |= __put_user(s64.shm_perm.uid, &up32->shm_perm.uid);
- err2 |= __put_user(s64.shm_perm.gid, &up32->shm_perm.gid);
- err2 |= __put_user(s64.shm_perm.cuid, &up32->shm_perm.cuid);
- err2 |= __put_user(s64.shm_perm.cgid, &up32->shm_perm.cgid);
- err2 |= __put_user(s64.shm_perm.mode, &up32->shm_perm.mode);
- err2 |= __put_user(s64.shm_perm.seq, &up32->shm_perm.seq);
- err2 |= __put_user(s64.shm_atime, &up32->shm_atime);
- err2 |= __put_user(s64.shm_dtime, &up32->shm_dtime);
- err2 |= __put_user(s64.shm_ctime, &up32->shm_ctime);
- err2 |= __put_user(s64.shm_segsz, &up32->shm_segsz);
- err2 |= __put_user(s64.shm_nattch, &up32->shm_nattch);
- err2 |= __put_user(s64.shm_cpid, &up32->shm_cpid);
- err2 |= __put_user(s64.shm_lpid, &up32->shm_lpid);
- }
- if (err2)
- err = -EFAULT;
- break;
-
- case SHM_INFO:
- old_fs = get_fs();
- set_fs(KERNEL_DS);
- err = sys_shmctl(first, second, (void __user *)&si);
- set_fs(old_fs);
- if (err < 0)
- break;
- err2 = put_user(si.used_ids, &uip->used_ids);
- err2 |= __put_user(si.shm_tot, &uip->shm_tot);
- err2 |= __put_user(si.shm_rss, &uip->shm_rss);
- err2 |= __put_user(si.shm_swp, &uip->shm_swp);
- err2 |= __put_user(si.swap_attempts, &uip->swap_attempts);
- err2 |= __put_user (si.swap_successes, &uip->swap_successes);
- if (err2)
- err = -EFAULT;
- break;
-
- default:
- err = -EINVAL;
- break;
- }
-
- return err;
-}
-
-static int sys32_semtimedop(int semid, struct sembuf __user *tsems, int nsems,
- const struct compat_timespec __user *timeout32)
-{
- struct compat_timespec t32;
- struct timespec __user *t64 = compat_alloc_user_space(sizeof(*t64));
-
- if (copy_from_user(&t32, timeout32, sizeof(t32)))
- return -EFAULT;
-
- if (put_user(t32.tv_sec, &t64->tv_sec) ||
- put_user(t32.tv_nsec, &t64->tv_nsec))
- return -EFAULT;
-
- return sys_semtimedop(semid, tsems, nsems, t64);
-}
-
-asmlinkage long
-sys32_ipc (u32 call, int first, int second, int third, u32 ptr, u32 fifth)
-{
- int version, err;
-
- version = call >> 16; /* hack for backward compatibility */
- call &= 0xffff;
-
- switch (call) {
- case SEMOP:
- /* struct sembuf is the same on 32 and 64bit :)) */
- err = sys_semtimedop (first, (struct sembuf __user *)AA(ptr), second,
- NULL);
- break;
- case SEMTIMEDOP:
- err = sys32_semtimedop (first, (struct sembuf __user *)AA(ptr), second,
- (const struct compat_timespec __user *)AA(fifth));
- break;
- case SEMGET:
- err = sys_semget (first, second, third);
- break;
- case SEMCTL:
- err = do_sys32_semctl (first, second, third,
- (void __user *)AA(ptr));
- break;
-
- case MSGSND:
- err = do_sys32_msgsnd (first, second, third,
- (void __user *)AA(ptr));
- break;
- case MSGRCV:
- err = do_sys32_msgrcv (first, second, fifth, third,
- version, (void __user *)AA(ptr));
- break;
- case MSGGET:
- err = sys_msgget ((key_t) first, second);
- break;
- case MSGCTL:
- err = do_sys32_msgctl (first, second, (void __user *)AA(ptr));
- break;
-
- case SHMAT:
- err = do_sys32_shmat (first, second, third,
- version, (void __user *)AA(ptr));
- break;
- case SHMDT:
- err = sys_shmdt ((char __user *)A(ptr));
- break;
- case SHMGET:
- err = sys_shmget (first, (unsigned)second, third);
- break;
- case SHMCTL:
- err = do_sys32_shmctl (first, second, (void __user *)AA(ptr));
- break;
- default:
- err = -EINVAL;
- break;
- }
-
- return err;
-}
-
-asmlinkage long sys32_shmat(int shmid, char __user *shmaddr,
- int shmflg, int32_t __user *addr)
-{
- unsigned long raddr;
- int err;
-
- err = do_shmat(shmid, shmaddr, shmflg, &raddr);
- if (err)
- return err;
-
- return put_user(raddr, addr);
-}
-
-struct sysctl_args32
-{
- compat_caddr_t name;
- int nlen;
- compat_caddr_t oldval;
- compat_caddr_t oldlenp;
- compat_caddr_t newval;
- compat_size_t newlen;
- unsigned int __unused[4];
-};
-
-#ifdef CONFIG_SYSCTL
-
-asmlinkage long sys32_sysctl(struct sysctl_args32 __user *args)
-{
- struct sysctl_args32 tmp;
- int error;
- size_t oldlen;
- size_t __user *oldlenp = NULL;
- unsigned long addr = (((unsigned long)&args->__unused[0]) + 7) & ~7;
-
- if (copy_from_user(&tmp, args, sizeof(tmp)))
- return -EFAULT;
-
- if (tmp.oldval && tmp.oldlenp) {
- /* Duh, this is ugly and might not work if sysctl_args
- is in read-only memory, but do_sysctl does indirectly
- a lot of uaccess in both directions and we'd have to
- basically copy the whole sysctl.c here, and
- glibc's __sysctl uses rw memory for the structure
- anyway. */
- if (get_user(oldlen, (u32 __user *)A(tmp.oldlenp)) ||
- put_user(oldlen, (size_t __user *)addr))
- return -EFAULT;
- oldlenp = (size_t __user *)addr;
- }
-
- lock_kernel();
- error = do_sysctl((int __user *)A(tmp.name), tmp.nlen, (void __user *)A(tmp.oldval),
- oldlenp, (void __user *)A(tmp.newval), tmp.newlen);
- unlock_kernel();
- if (oldlenp) {
- if (!error) {
- if (get_user(oldlen, (size_t __user *)addr) ||
- put_user(oldlen, (u32 __user *)A(tmp.oldlenp)))
- error = -EFAULT;
- }
- copy_to_user(args->__unused, tmp.__unused, sizeof(tmp.__unused));
- }
- return error;
-}
-
-#endif /* CONFIG_SYSCTL */
-
-asmlinkage long sys32_newuname(struct new_utsname __user * name)
-{
- int ret = 0;
-
- down_read(&uts_sem);
- if (copy_to_user(name,&system_utsname,sizeof *name))
- ret = -EFAULT;
- up_read(&uts_sem);
-
- if (current->personality == PER_LINUX32 && !ret)
- if (copy_to_user(name->machine, "mips\0\0\0", 8))
- ret = -EFAULT;
-
- return ret;
-}
-
-asmlinkage int sys32_personality(unsigned long personality)
-{
- int ret;
- if (current->personality == PER_LINUX32 && personality == PER_LINUX)
- personality = PER_LINUX32;
- ret = sys_personality(personality);
- if (ret == PER_LINUX32)
- ret = PER_LINUX;
- return ret;
-}
-
-/* ustat compatibility */
-struct ustat32 {
- compat_daddr_t f_tfree;
- compat_ino_t f_tinode;
- char f_fname[6];
- char f_fpack[6];
-};
-
-extern asmlinkage long sys_ustat(dev_t dev, struct ustat __user * ubuf);
-
-asmlinkage int sys32_ustat(dev_t dev, struct ustat32 __user * ubuf32)
-{
- int err;
- struct ustat tmp;
- struct ustat32 tmp32;
- mm_segment_t old_fs = get_fs();
-
- set_fs(KERNEL_DS);
- err = sys_ustat(dev, (struct ustat __user *)&tmp);
- set_fs (old_fs);
-
- if (err)
- goto out;
-
- memset(&tmp32,0,sizeof(struct ustat32));
- tmp32.f_tfree = tmp.f_tfree;
- tmp32.f_tinode = tmp.f_tinode;
-
- err = copy_to_user(ubuf32,&tmp32,sizeof(struct ustat32)) ? -EFAULT : 0;
-
-out:
- return err;
-}
-
-asmlinkage int sys32_sendfile(int out_fd, int in_fd, compat_off_t __user *offset,
- s32 count)
-{
- mm_segment_t old_fs = get_fs();
+ unsigned int p = personality & 0xffffffff;
int ret;
- off_t of;
-
- if (offset && get_user(of, offset))
- return -EFAULT;
-
- set_fs(KERNEL_DS);
- ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __user *)&of : NULL, count);
- set_fs(old_fs);
-
- if (offset && put_user(of, offset))
- return -EFAULT;
+ if (personality(current->personality) == PER_LINUX32 &&
+ personality(p) == PER_LINUX)
+ p = (p & ~PER_MASK) | PER_LINUX32;
+ ret = sys_personality(p);
+ if (ret != -1 && personality(ret) == PER_LINUX32)
+ ret = (ret & ~PER_MASK) | PER_LINUX;
return ret;
}
asmlinkage ssize_t sys32_readahead(int fd, u32 pad0, u64 a2, u64 a3,
- size_t count)
+ size_t count)
{
return sys_readahead(fd, merge_64(a2, a3), count);
}
@@ -1131,168 +149,19 @@ asmlinkage long sys32_sync_file_range(int fd, int __pad,
flags);
}
-/* Argument list sizes for sys_socketcall */
-#define AL(x) ((x) * sizeof(unsigned int))
-static unsigned char socketcall_nargs[18]={AL(0),AL(3),AL(3),AL(3),AL(2),AL(3),
- AL(3),AL(3),AL(4),AL(4),AL(4),AL(6),
- AL(6),AL(2),AL(5),AL(5),AL(3),AL(3)};
-#undef AL
-
-/*
- * System call vectors.
- *
- * Argument checking cleaned up. Saved 20% in size.
- * This function doesn't need to set the kernel lock because
- * it is set by the callees.
- */
-
-asmlinkage long sys32_socketcall(int call, unsigned int __user *args32)
-{
- unsigned int a[6];
- unsigned int a0,a1;
- int err;
-
- extern asmlinkage long sys_socket(int family, int type, int protocol);
- extern asmlinkage long sys_bind(int fd, struct sockaddr __user *umyaddr, int addrlen);
- extern asmlinkage long sys_connect(int fd, struct sockaddr __user *uservaddr, int addrlen);
- extern asmlinkage long sys_listen(int fd, int backlog);
- extern asmlinkage long sys_accept(int fd, struct sockaddr __user *upeer_sockaddr, int __user *upeer_addrlen);
- extern asmlinkage long sys_getsockname(int fd, struct sockaddr __user *usockaddr, int __user *usockaddr_len);
- extern asmlinkage long sys_getpeername(int fd, struct sockaddr __user *usockaddr, int __user *usockaddr_len);
- extern asmlinkage long sys_socketpair(int family, int type, int protocol, int __user *usockvec);
- extern asmlinkage long sys_send(int fd, void __user * buff, size_t len, unsigned flags);
- extern asmlinkage long sys_sendto(int fd, void __user * buff, size_t len, unsigned flags,
- struct sockaddr __user *addr, int addr_len);
- extern asmlinkage long sys_recv(int fd, void __user * ubuf, size_t size, unsigned flags);
- extern asmlinkage long sys_recvfrom(int fd, void __user * ubuf, size_t size, unsigned flags,
- struct sockaddr __user *addr, int __user *addr_len);
- extern asmlinkage long sys_shutdown(int fd, int how);
- extern asmlinkage long sys_setsockopt(int fd, int level, int optname, char __user *optval, int optlen);
- extern asmlinkage long sys_getsockopt(int fd, int level, int optname, char __user *optval, int __user *optlen);
- extern asmlinkage long sys_sendmsg(int fd, struct msghdr __user *msg, unsigned flags);
- extern asmlinkage long sys_recvmsg(int fd, struct msghdr __user *msg, unsigned int flags);
-
-
- if(call<1||call>SYS_RECVMSG)
- return -EINVAL;
-
- /* copy_from_user should be SMP safe. */
- if (copy_from_user(a, args32, socketcall_nargs[call]))
- return -EFAULT;
-
- a0=a[0];
- a1=a[1];
-
- switch(call)
- {
- case SYS_SOCKET:
- err = sys_socket(a0,a1,a[2]);
- break;
- case SYS_BIND:
- err = sys_bind(a0,(struct sockaddr __user *)A(a1), a[2]);
- break;
- case SYS_CONNECT:
- err = sys_connect(a0, (struct sockaddr __user *)A(a1), a[2]);
- break;
- case SYS_LISTEN:
- err = sys_listen(a0,a1);
- break;
- case SYS_ACCEPT:
- err = sys_accept(a0,(struct sockaddr __user *)A(a1), (int __user *)A(a[2]));
- break;
- case SYS_GETSOCKNAME:
- err = sys_getsockname(a0,(struct sockaddr __user *)A(a1), (int __user *)A(a[2]));
- break;
- case SYS_GETPEERNAME:
- err = sys_getpeername(a0, (struct sockaddr __user *)A(a1), (int __user *)A(a[2]));
- break;
- case SYS_SOCKETPAIR:
- err = sys_socketpair(a0,a1, a[2], (int __user *)A(a[3]));
- break;
- case SYS_SEND:
- err = sys_send(a0, (void __user *)A(a1), a[2], a[3]);
- break;
- case SYS_SENDTO:
- err = sys_sendto(a0,(void __user *)A(a1), a[2], a[3],
- (struct sockaddr __user *)A(a[4]), a[5]);
- break;
- case SYS_RECV:
- err = sys_recv(a0, (void __user *)A(a1), a[2], a[3]);
- break;
- case SYS_RECVFROM:
- err = sys_recvfrom(a0, (void __user *)A(a1), a[2], a[3],
- (struct sockaddr __user *)A(a[4]), (int __user *)A(a[5]));
- break;
- case SYS_SHUTDOWN:
- err = sys_shutdown(a0,a1);
- break;
- case SYS_SETSOCKOPT:
- err = sys_setsockopt(a0, a1, a[2], (char __user *)A(a[3]), a[4]);
- break;
- case SYS_GETSOCKOPT:
- err = sys_getsockopt(a0, a1, a[2], (char __user *)A(a[3]), (int __user *)A(a[4]));
- break;
- case SYS_SENDMSG:
- err = sys_sendmsg(a0, (struct msghdr __user *) A(a1), a[2]);
- break;
- case SYS_RECVMSG:
- err = sys_recvmsg(a0, (struct msghdr __user *) A(a1), a[2]);
- break;
- default:
- err = -EINVAL;
- break;
- }
- return err;
-}
-
-struct sigevent32 {
- u32 sigev_value;
- u32 sigev_signo;
- u32 sigev_notify;
- u32 payload[(64 / 4) - 3];
-};
-
-extern asmlinkage long
-sys_timer_create(clockid_t which_clock,
- struct sigevent __user *timer_event_spec,
- timer_t __user * created_timer_id);
-
-long
-sys32_timer_create(u32 clock, struct sigevent32 __user *se32, timer_t __user *timer_id)
+asmlinkage long sys32_fadvise64_64(int fd, int __pad,
+ unsigned long a2, unsigned long a3,
+ unsigned long a4, unsigned long a5,
+ int flags)
{
- struct sigevent __user *p = NULL;
- if (se32) {
- struct sigevent se;
- p = compat_alloc_user_space(sizeof(struct sigevent));
- memset(&se, 0, sizeof(struct sigevent));
- if (get_user(se.sigev_value.sival_int, &se32->sigev_value) ||
- __get_user(se.sigev_signo, &se32->sigev_signo) ||
- __get_user(se.sigev_notify, &se32->sigev_notify) ||
- __copy_from_user(&se._sigev_un._pad, &se32->payload,
- sizeof(se32->payload)) ||
- copy_to_user(p, &se, sizeof(se)))
- return -EFAULT;
- }
- return sys_timer_create(clock, p, timer_id);
+ return sys_fadvise64_64(fd,
+ merge_64(a2, a3), merge_64(a4, a5),
+ flags);
}
-save_static_function(sys32_clone);
-__attribute_used__ noinline static int
-_sys32_clone(nabi_no_regargs struct pt_regs regs)
+asmlinkage long sys32_fallocate(int fd, int mode, unsigned offset_a2,
+ unsigned offset_a3, unsigned len_a4, unsigned len_a5)
{
- unsigned long clone_flags;
- unsigned long newsp;
- int __user *parent_tidptr, *child_tidptr;
-
- clone_flags = regs.regs[4];
- newsp = regs.regs[5];
- if (!newsp)
- newsp = regs.regs[29];
- parent_tidptr = (int __user *) regs.regs[6];
-
- /* Use __dummy4 instead of getting it off the stack, so that
- syscall() works. */
- child_tidptr = (int __user *) __dummy4;
- return do_fork(clone_flags, newsp, &regs, 0,
- parent_tidptr, child_tidptr);
+ return sys_fallocate(fd, mode, merge_64(offset_a2, offset_a3),
+ merge_64(len_a4, len_a5));
}
diff --git a/arch/mips/kernel/machine_kexec.c b/arch/mips/kernel/machine_kexec.c
new file mode 100644
index 00000000000..992e18474da
--- /dev/null
+++ b/arch/mips/kernel/machine_kexec.c
@@ -0,0 +1,110 @@
+/*
+ * machine_kexec.c for kexec
+ * Created by <nschichan@corp.free.fr> on Thu Oct 12 15:15:06 2006
+ *
+ * This source code is licensed under the GNU General Public License,
+ * Version 2. See the file COPYING for more details.
+ */
+#include <linux/compiler.h>
+#include <linux/kexec.h>
+#include <linux/mm.h>
+#include <linux/delay.h>
+
+#include <asm/cacheflush.h>
+#include <asm/page.h>
+
+extern const unsigned char relocate_new_kernel[];
+extern const size_t relocate_new_kernel_size;
+
+extern unsigned long kexec_start_address;
+extern unsigned long kexec_indirection_page;
+
+int (*_machine_kexec_prepare)(struct kimage *) = NULL;
+void (*_machine_kexec_shutdown)(void) = NULL;
+void (*_machine_crash_shutdown)(struct pt_regs *regs) = NULL;
+#ifdef CONFIG_SMP
+void (*relocated_kexec_smp_wait) (void *);
+atomic_t kexec_ready_to_reboot = ATOMIC_INIT(0);
+#endif
+
+int
+machine_kexec_prepare(struct kimage *kimage)
+{
+ if (_machine_kexec_prepare)
+ return _machine_kexec_prepare(kimage);
+ return 0;
+}
+
+void
+machine_kexec_cleanup(struct kimage *kimage)
+{
+}
+
+void
+machine_shutdown(void)
+{
+ if (_machine_kexec_shutdown)
+ _machine_kexec_shutdown();
+}
+
+void
+machine_crash_shutdown(struct pt_regs *regs)
+{
+ if (_machine_crash_shutdown)
+ _machine_crash_shutdown(regs);
+ else
+ default_machine_crash_shutdown(regs);
+}
+
+typedef void (*noretfun_t)(void) __noreturn;
+
+void
+machine_kexec(struct kimage *image)
+{
+ unsigned long reboot_code_buffer;
+ unsigned long entry;
+ unsigned long *ptr;
+
+ reboot_code_buffer =
+ (unsigned long)page_address(image->control_code_page);
+
+ kexec_start_address =
+ (unsigned long) phys_to_virt(image->start);
+
+ kexec_indirection_page =
+ (unsigned long) phys_to_virt(image->head & PAGE_MASK);
+
+ memcpy((void*)reboot_code_buffer, relocate_new_kernel,
+ relocate_new_kernel_size);
+
+ /*
+ * The generic kexec code builds a page list with physical
+ * addresses. they are directly accessible through KSEG0 (or
+ * CKSEG0 or XPHYS if on 64bit system), hence the
+ * phys_to_virt() call.
+ */
+ for (ptr = &image->head; (entry = *ptr) && !(entry &IND_DONE);
+ ptr = (entry & IND_INDIRECTION) ?
+ phys_to_virt(entry & PAGE_MASK) : ptr + 1) {
+ if (*ptr & IND_SOURCE || *ptr & IND_INDIRECTION ||
+ *ptr & IND_DESTINATION)
+ *ptr = (unsigned long) phys_to_virt(*ptr);
+ }
+
+ /*
+ * we do not want to be bothered.
+ */
+ local_irq_disable();
+
+ printk("Will call new kernel at %08lx\n", image->start);
+ printk("Bye ...\n");
+ __flush_cache_all();
+#ifdef CONFIG_SMP
+ /* All secondary cpus now may jump to kexec_wait cycle */
+ relocated_kexec_smp_wait = reboot_code_buffer +
+ (void *)(kexec_smp_wait - relocate_new_kernel);
+ smp_wmb();
+ atomic_set(&kexec_ready_to_reboot, 1);
+#endif
+ ((noretfun_t) reboot_code_buffer)();
+}
diff --git a/arch/mips/kernel/mcount.S b/arch/mips/kernel/mcount.S
new file mode 100644
index 00000000000..539b6294b61
--- /dev/null
+++ b/arch/mips/kernel/mcount.S
@@ -0,0 +1,202 @@
+/*
+ * MIPS specific _mcount support
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive for
+ * more details.
+ *
+ * Copyright (C) 2009 Lemote Inc. & DSLab, Lanzhou University, China
+ * Copyright (C) 2010 DSLab, Lanzhou University, China
+ * Author: Wu Zhangjin <wuzhangjin@gmail.com>
+ */
+
+#include <asm/regdef.h>
+#include <asm/stackframe.h>
+#include <asm/ftrace.h>
+
+ .text
+ .set noreorder
+ .set noat
+
+ .macro MCOUNT_SAVE_REGS
+ PTR_SUBU sp, PT_SIZE
+ PTR_S ra, PT_R31(sp)
+ PTR_S AT, PT_R1(sp)
+ PTR_S a0, PT_R4(sp)
+ PTR_S a1, PT_R5(sp)
+ PTR_S a2, PT_R6(sp)
+ PTR_S a3, PT_R7(sp)
+#ifdef CONFIG_64BIT
+ PTR_S a4, PT_R8(sp)
+ PTR_S a5, PT_R9(sp)
+ PTR_S a6, PT_R10(sp)
+ PTR_S a7, PT_R11(sp)
+#endif
+ .endm
+
+ .macro MCOUNT_RESTORE_REGS
+ PTR_L ra, PT_R31(sp)
+ PTR_L AT, PT_R1(sp)
+ PTR_L a0, PT_R4(sp)
+ PTR_L a1, PT_R5(sp)
+ PTR_L a2, PT_R6(sp)
+ PTR_L a3, PT_R7(sp)
+#ifdef CONFIG_64BIT
+ PTR_L a4, PT_R8(sp)
+ PTR_L a5, PT_R9(sp)
+ PTR_L a6, PT_R10(sp)
+ PTR_L a7, PT_R11(sp)
+#endif
+ PTR_ADDIU sp, PT_SIZE
+ .endm
+
+ .macro RETURN_BACK
+ jr ra
+ move ra, AT
+ .endm
+
+/*
+ * The -mmcount-ra-address option of gcc 4.5 uses register $12 to pass
+ * the location of the parent's return address.
+ */
+#define MCOUNT_RA_ADDRESS_REG $12
+
+#ifdef CONFIG_DYNAMIC_FTRACE
+
+NESTED(ftrace_caller, PT_SIZE, ra)
+ .globl _mcount
+_mcount:
+ b ftrace_stub
+#ifdef CONFIG_32BIT
+ addiu sp,sp,8
+#else
+ nop
+#endif
+
+ /* When tracing is activated, it calls ftrace_caller+8 (aka here) */
+ lw t1, function_trace_stop
+ bnez t1, ftrace_stub
+ nop
+
+ MCOUNT_SAVE_REGS
+#ifdef KBUILD_MCOUNT_RA_ADDRESS
+ PTR_S MCOUNT_RA_ADDRESS_REG, PT_R12(sp)
+#endif
+
+ PTR_SUBU a0, ra, 8 /* arg1: self address */
+ .globl ftrace_call
+ftrace_call:
+ nop /* a placeholder for the call to a real tracing function */
+ move a1, AT /* arg2: parent's return address */
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ .globl ftrace_graph_call
+ftrace_graph_call:
+ nop
+ nop
+#endif
+
+ MCOUNT_RESTORE_REGS
+ .globl ftrace_stub
+ftrace_stub:
+ RETURN_BACK
+ END(ftrace_caller)
+
+#else /* ! CONFIG_DYNAMIC_FTRACE */
+
+NESTED(_mcount, PT_SIZE, ra)
+ lw t1, function_trace_stop
+ bnez t1, ftrace_stub
+ nop
+ PTR_LA t1, ftrace_stub
+ PTR_L t2, ftrace_trace_function /* Prepare t2 for (1) */
+ bne t1, t2, static_trace
+ nop
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ PTR_L t3, ftrace_graph_return
+ bne t1, t3, ftrace_graph_caller
+ nop
+ PTR_LA t1, ftrace_graph_entry_stub
+ PTR_L t3, ftrace_graph_entry
+ bne t1, t3, ftrace_graph_caller
+ nop
+#endif
+ b ftrace_stub
+ nop
+
+static_trace:
+ MCOUNT_SAVE_REGS
+
+ move a0, ra /* arg1: self return address */
+ jalr t2 /* (1) call *ftrace_trace_function */
+ move a1, AT /* arg2: parent's return address */
+
+ MCOUNT_RESTORE_REGS
+ .globl ftrace_stub
+ftrace_stub:
+ RETURN_BACK
+ END(_mcount)
+
+#endif /* ! CONFIG_DYNAMIC_FTRACE */
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+
+NESTED(ftrace_graph_caller, PT_SIZE, ra)
+#ifndef CONFIG_DYNAMIC_FTRACE
+ MCOUNT_SAVE_REGS
+#endif
+
+ /* arg1: Get the location of the parent's return address */
+#ifdef KBUILD_MCOUNT_RA_ADDRESS
+#ifdef CONFIG_DYNAMIC_FTRACE
+ PTR_L a0, PT_R12(sp)
+#else
+ move a0, MCOUNT_RA_ADDRESS_REG
+#endif
+ bnez a0, 1f /* non-leaf func: stored in MCOUNT_RA_ADDRESS_REG */
+ nop
+#endif
+ PTR_LA a0, PT_R1(sp) /* leaf func: the location in current stack */
+1:
+
+ /* arg2: Get self return address */
+#ifdef CONFIG_DYNAMIC_FTRACE
+ PTR_L a1, PT_R31(sp)
+#else
+ move a1, ra
+#endif
+
+ /* arg3: Get frame pointer of current stack */
+#ifdef CONFIG_64BIT
+ PTR_LA a2, PT_SIZE(sp)
+#else
+ PTR_LA a2, (PT_SIZE+8)(sp)
+#endif
+
+ jal prepare_ftrace_return
+ nop
+ MCOUNT_RESTORE_REGS
+ RETURN_BACK
+ END(ftrace_graph_caller)
+
+ .align 2
+ .globl return_to_handler
+return_to_handler:
+ PTR_SUBU sp, PT_SIZE
+ PTR_S v0, PT_R2(sp)
+
+ jal ftrace_return_to_handler
+ PTR_S v1, PT_R3(sp)
+
+ /* restore the real parent address: v0 -> ra */
+ move ra, v0
+
+ PTR_L v0, PT_R2(sp)
+ PTR_L v1, PT_R3(sp)
+ jr ra
+ PTR_ADDIU sp, PT_SIZE
+#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
+
+ .set at
+ .set reorder
diff --git a/arch/mips/kernel/mips-cm.c b/arch/mips/kernel/mips-cm.c
new file mode 100644
index 00000000000..f76f7a08412
--- /dev/null
+++ b/arch/mips/kernel/mips-cm.c
@@ -0,0 +1,121 @@
+/*
+ * Copyright (C) 2013 Imagination Technologies
+ * Author: Paul Burton <paul.burton@imgtec.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/errno.h>
+
+#include <asm/mips-cm.h>
+#include <asm/mipsregs.h>
+
+void __iomem *mips_cm_base;
+void __iomem *mips_cm_l2sync_base;
+
+phys_t __mips_cm_phys_base(void)
+{
+ u32 config3 = read_c0_config3();
+ u32 cmgcr;
+
+ /* Check the CMGCRBase register is implemented */
+ if (!(config3 & MIPS_CONF3_CMGCR))
+ return 0;
+
+ /* Read the address from CMGCRBase */
+ cmgcr = read_c0_cmgcrbase();
+ return (cmgcr & MIPS_CMGCRF_BASE) << (36 - 32);
+}
+
+phys_t mips_cm_phys_base(void)
+ __attribute__((weak, alias("__mips_cm_phys_base")));
+
+phys_t __mips_cm_l2sync_phys_base(void)
+{
+ u32 base_reg;
+
+ /*
+ * If the L2-only sync region is already enabled then leave it at it's
+ * current location.
+ */
+ base_reg = read_gcr_l2_only_sync_base();
+ if (base_reg & CM_GCR_L2_ONLY_SYNC_BASE_SYNCEN_MSK)
+ return base_reg & CM_GCR_L2_ONLY_SYNC_BASE_SYNCBASE_MSK;
+
+ /* Default to following the CM */
+ return mips_cm_phys_base() + MIPS_CM_GCR_SIZE;
+}
+
+phys_t mips_cm_l2sync_phys_base(void)
+ __attribute__((weak, alias("__mips_cm_l2sync_phys_base")));
+
+static void mips_cm_probe_l2sync(void)
+{
+ unsigned major_rev;
+ phys_t addr;
+
+ /* L2-only sync was introduced with CM major revision 6 */
+ major_rev = (read_gcr_rev() & CM_GCR_REV_MAJOR_MSK) >>
+ CM_GCR_REV_MAJOR_SHF;
+ if (major_rev < 6)
+ return;
+
+ /* Find a location for the L2 sync region */
+ addr = mips_cm_l2sync_phys_base();
+ BUG_ON((addr & CM_GCR_L2_ONLY_SYNC_BASE_SYNCBASE_MSK) != addr);
+ if (!addr)
+ return;
+
+ /* Set the region base address & enable it */
+ write_gcr_l2_only_sync_base(addr | CM_GCR_L2_ONLY_SYNC_BASE_SYNCEN_MSK);
+
+ /* Map the region */
+ mips_cm_l2sync_base = ioremap_nocache(addr, MIPS_CM_L2SYNC_SIZE);
+}
+
+int mips_cm_probe(void)
+{
+ phys_t addr;
+ u32 base_reg;
+
+ addr = mips_cm_phys_base();
+ BUG_ON((addr & CM_GCR_BASE_GCRBASE_MSK) != addr);
+ if (!addr)
+ return -ENODEV;
+
+ mips_cm_base = ioremap_nocache(addr, MIPS_CM_GCR_SIZE);
+ if (!mips_cm_base)
+ return -ENXIO;
+
+ /* sanity check that we're looking at a CM */
+ base_reg = read_gcr_base();
+ if ((base_reg & CM_GCR_BASE_GCRBASE_MSK) != addr) {
+ pr_err("GCRs appear to have been moved (expected them at 0x%08lx)!\n",
+ (unsigned long)addr);
+ mips_cm_base = NULL;
+ return -ENODEV;
+ }
+
+ /* set default target to memory */
+ base_reg &= ~CM_GCR_BASE_CMDEFTGT_MSK;
+ base_reg |= CM_GCR_BASE_CMDEFTGT_MEM;
+ write_gcr_base(base_reg);
+
+ /* disable CM regions */
+ write_gcr_reg0_base(CM_GCR_REGn_BASE_BASEADDR_MSK);
+ write_gcr_reg0_mask(CM_GCR_REGn_MASK_ADDRMASK_MSK);
+ write_gcr_reg1_base(CM_GCR_REGn_BASE_BASEADDR_MSK);
+ write_gcr_reg1_mask(CM_GCR_REGn_MASK_ADDRMASK_MSK);
+ write_gcr_reg2_base(CM_GCR_REGn_BASE_BASEADDR_MSK);
+ write_gcr_reg2_mask(CM_GCR_REGn_MASK_ADDRMASK_MSK);
+ write_gcr_reg3_base(CM_GCR_REGn_BASE_BASEADDR_MSK);
+ write_gcr_reg3_mask(CM_GCR_REGn_MASK_ADDRMASK_MSK);
+
+ /* probe for an L2-only sync region */
+ mips_cm_probe_l2sync();
+
+ return 0;
+}
diff --git a/arch/mips/kernel/mips-cpc.c b/arch/mips/kernel/mips-cpc.c
new file mode 100644
index 00000000000..ba473608a34
--- /dev/null
+++ b/arch/mips/kernel/mips-cpc.c
@@ -0,0 +1,80 @@
+/*
+ * Copyright (C) 2013 Imagination Technologies
+ * Author: Paul Burton <paul.burton@imgtec.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/errno.h>
+#include <linux/percpu.h>
+#include <linux/spinlock.h>
+
+#include <asm/mips-cm.h>
+#include <asm/mips-cpc.h>
+
+void __iomem *mips_cpc_base;
+
+static DEFINE_PER_CPU_ALIGNED(spinlock_t, cpc_core_lock);
+
+static DEFINE_PER_CPU_ALIGNED(unsigned long, cpc_core_lock_flags);
+
+phys_t __weak mips_cpc_phys_base(void)
+{
+ u32 cpc_base;
+
+ if (!mips_cm_present())
+ return 0;
+
+ if (!(read_gcr_cpc_status() & CM_GCR_CPC_STATUS_EX_MSK))
+ return 0;
+
+ /* If the CPC is already enabled, leave it so */
+ cpc_base = read_gcr_cpc_base();
+ if (cpc_base & CM_GCR_CPC_BASE_CPCEN_MSK)
+ return cpc_base & CM_GCR_CPC_BASE_CPCBASE_MSK;
+
+ /* Otherwise, give it the default address & enable it */
+ cpc_base = mips_cpc_default_phys_base();
+ write_gcr_cpc_base(cpc_base | CM_GCR_CPC_BASE_CPCEN_MSK);
+ return cpc_base;
+}
+
+int mips_cpc_probe(void)
+{
+ phys_t addr;
+ unsigned cpu;
+
+ for_each_possible_cpu(cpu)
+ spin_lock_init(&per_cpu(cpc_core_lock, cpu));
+
+ addr = mips_cpc_phys_base();
+ if (!addr)
+ return -ENODEV;
+
+ mips_cpc_base = ioremap_nocache(addr, 0x8000);
+ if (!mips_cpc_base)
+ return -ENXIO;
+
+ return 0;
+}
+
+void mips_cpc_lock_other(unsigned int core)
+{
+ unsigned curr_core;
+ preempt_disable();
+ curr_core = current_cpu_data.core;
+ spin_lock_irqsave(&per_cpu(cpc_core_lock, curr_core),
+ per_cpu(cpc_core_lock_flags, curr_core));
+ write_cpc_cl_other(core << CPC_Cx_OTHER_CORENUM_SHF);
+}
+
+void mips_cpc_unlock_other(void)
+{
+ unsigned curr_core = current_cpu_data.core;
+ spin_unlock_irqrestore(&per_cpu(cpc_core_lock, curr_core),
+ per_cpu(cpc_core_lock_flags, curr_core));
+ preempt_enable();
+}
diff --git a/arch/mips/kernel/mips-mt-fpaff.c b/arch/mips/kernel/mips-mt-fpaff.c
new file mode 100644
index 00000000000..362bb3707e6
--- /dev/null
+++ b/arch/mips/kernel/mips-mt-fpaff.c
@@ -0,0 +1,214 @@
+/*
+ * General MIPS MT support routines, usable in AP/SP and SMVP.
+ * Copyright (C) 2005 Mips Technologies, Inc
+ */
+#include <linux/cpu.h>
+#include <linux/cpuset.h>
+#include <linux/cpumask.h>
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/security.h>
+#include <linux/types.h>
+#include <asm/uaccess.h>
+
+/*
+ * CPU mask used to set process affinity for MT VPEs/TCs with FPUs
+ */
+cpumask_t mt_fpu_cpumask;
+
+static int fpaff_threshold = -1;
+unsigned long mt_fpemul_threshold;
+
+/*
+ * Replacement functions for the sys_sched_setaffinity() and
+ * sys_sched_getaffinity() system calls, so that we can integrate
+ * FPU affinity with the user's requested processor affinity.
+ * This code is 98% identical with the sys_sched_setaffinity()
+ * and sys_sched_getaffinity() system calls, and should be
+ * updated when kernel/sched/core.c changes.
+ */
+
+/*
+ * find_process_by_pid - find a process with a matching PID value.
+ * used in sys_sched_set/getaffinity() in kernel/sched/core.c, so
+ * cloned here.
+ */
+static inline struct task_struct *find_process_by_pid(pid_t pid)
+{
+ return pid ? find_task_by_vpid(pid) : current;
+}
+
+/*
+ * check the target process has a UID that matches the current process's
+ */
+static bool check_same_owner(struct task_struct *p)
+{
+ const struct cred *cred = current_cred(), *pcred;
+ bool match;
+
+ rcu_read_lock();
+ pcred = __task_cred(p);
+ match = (uid_eq(cred->euid, pcred->euid) ||
+ uid_eq(cred->euid, pcred->uid));
+ rcu_read_unlock();
+ return match;
+}
+
+/*
+ * mipsmt_sys_sched_setaffinity - set the cpu affinity of a process
+ */
+asmlinkage long mipsmt_sys_sched_setaffinity(pid_t pid, unsigned int len,
+ unsigned long __user *user_mask_ptr)
+{
+ cpumask_var_t cpus_allowed, new_mask, effective_mask;
+ struct thread_info *ti;
+ struct task_struct *p;
+ int retval;
+
+ if (len < sizeof(new_mask))
+ return -EINVAL;
+
+ if (copy_from_user(&new_mask, user_mask_ptr, sizeof(new_mask)))
+ return -EFAULT;
+
+ get_online_cpus();
+ rcu_read_lock();
+
+ p = find_process_by_pid(pid);
+ if (!p) {
+ rcu_read_unlock();
+ put_online_cpus();
+ return -ESRCH;
+ }
+
+ /* Prevent p going away */
+ get_task_struct(p);
+ rcu_read_unlock();
+
+ if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) {
+ retval = -ENOMEM;
+ goto out_put_task;
+ }
+ if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) {
+ retval = -ENOMEM;
+ goto out_free_cpus_allowed;
+ }
+ if (!alloc_cpumask_var(&effective_mask, GFP_KERNEL)) {
+ retval = -ENOMEM;
+ goto out_free_new_mask;
+ }
+ retval = -EPERM;
+ if (!check_same_owner(p) && !capable(CAP_SYS_NICE))
+ goto out_unlock;
+
+ retval = security_task_setscheduler(p);
+ if (retval)
+ goto out_unlock;
+
+ /* Record new user-specified CPU set for future reference */
+ cpumask_copy(&p->thread.user_cpus_allowed, new_mask);
+
+ again:
+ /* Compute new global allowed CPU set if necessary */
+ ti = task_thread_info(p);
+ if (test_ti_thread_flag(ti, TIF_FPUBOUND) &&
+ cpus_intersects(*new_mask, mt_fpu_cpumask)) {
+ cpus_and(*effective_mask, *new_mask, mt_fpu_cpumask);
+ retval = set_cpus_allowed_ptr(p, effective_mask);
+ } else {
+ cpumask_copy(effective_mask, new_mask);
+ clear_ti_thread_flag(ti, TIF_FPUBOUND);
+ retval = set_cpus_allowed_ptr(p, new_mask);
+ }
+
+ if (!retval) {
+ cpuset_cpus_allowed(p, cpus_allowed);
+ if (!cpumask_subset(effective_mask, cpus_allowed)) {
+ /*
+ * We must have raced with a concurrent cpuset
+ * update. Just reset the cpus_allowed to the
+ * cpuset's cpus_allowed
+ */
+ cpumask_copy(new_mask, cpus_allowed);
+ goto again;
+ }
+ }
+out_unlock:
+ free_cpumask_var(effective_mask);
+out_free_new_mask:
+ free_cpumask_var(new_mask);
+out_free_cpus_allowed:
+ free_cpumask_var(cpus_allowed);
+out_put_task:
+ put_task_struct(p);
+ put_online_cpus();
+ return retval;
+}
+
+/*
+ * mipsmt_sys_sched_getaffinity - get the cpu affinity of a process
+ */
+asmlinkage long mipsmt_sys_sched_getaffinity(pid_t pid, unsigned int len,
+ unsigned long __user *user_mask_ptr)
+{
+ unsigned int real_len;
+ cpumask_t mask;
+ int retval;
+ struct task_struct *p;
+
+ real_len = sizeof(mask);
+ if (len < real_len)
+ return -EINVAL;
+
+ get_online_cpus();
+ read_lock(&tasklist_lock);
+
+ retval = -ESRCH;
+ p = find_process_by_pid(pid);
+ if (!p)
+ goto out_unlock;
+ retval = security_task_getscheduler(p);
+ if (retval)
+ goto out_unlock;
+
+ cpumask_and(&mask, &p->thread.user_cpus_allowed, cpu_possible_mask);
+
+out_unlock:
+ read_unlock(&tasklist_lock);
+ put_online_cpus();
+ if (retval)
+ return retval;
+ if (copy_to_user(user_mask_ptr, &mask, real_len))
+ return -EFAULT;
+ return real_len;
+}
+
+
+static int __init fpaff_thresh(char *str)
+{
+ get_option(&str, &fpaff_threshold);
+ return 1;
+}
+__setup("fpaff=", fpaff_thresh);
+
+/*
+ * FPU Use Factor empirically derived from experiments on 34K
+ */
+#define FPUSEFACTOR 2000
+
+static __init int mt_fp_affinity_init(void)
+{
+ if (fpaff_threshold >= 0) {
+ mt_fpemul_threshold = fpaff_threshold;
+ } else {
+ mt_fpemul_threshold =
+ (FPUSEFACTOR * (loops_per_jiffy/(500000/HZ))) / HZ;
+ }
+ printk(KERN_DEBUG "FPU Affinity set after %ld emulations\n",
+ mt_fpemul_threshold);
+
+ return 0;
+}
+arch_initcall(mt_fp_affinity_init);
diff --git a/arch/mips/kernel/mips-mt.c b/arch/mips/kernel/mips-mt.c
index c1373a6e668..88b1ef5f868 100644
--- a/arch/mips/kernel/mips-mt.c
+++ b/arch/mips/kernel/mips-mt.c
@@ -1,162 +1,45 @@
/*
- * General MIPS MT support routines, usable in AP/SP, SMVP, or SMTC kernels
+ * General MIPS MT support routines, usable in AP/SP and SMVP.
* Copyright (C) 2005 Mips Technologies, Inc
*/
+#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/sched.h>
-#include <linux/cpumask.h>
+#include <linux/export.h>
#include <linux/interrupt.h>
#include <linux/security.h>
#include <asm/cpu.h>
#include <asm/processor.h>
-#include <asm/atomic.h>
-#include <asm/system.h>
+#include <linux/atomic.h>
#include <asm/hardirq.h>
#include <asm/mmu_context.h>
-#include <asm/smp.h>
#include <asm/mipsmtregs.h>
#include <asm/r4kcache.h>
#include <asm/cacheflush.h>
-/*
- * CPU mask used to set process affinity for MT VPEs/TCs with FPUs
- */
-
-cpumask_t mt_fpu_cpumask;
-
-#ifdef CONFIG_MIPS_MT_FPAFF
-
-#include <linux/cpu.h>
-#include <linux/delay.h>
-#include <asm/uaccess.h>
-
-unsigned long mt_fpemul_threshold = 0;
-
-/*
- * Replacement functions for the sys_sched_setaffinity() and
- * sys_sched_getaffinity() system calls, so that we can integrate
- * FPU affinity with the user's requested processor affinity.
- * This code is 98% identical with the sys_sched_setaffinity()
- * and sys_sched_getaffinity() system calls, and should be
- * updated when kernel/sched.c changes.
- */
+int vpelimit;
-/*
- * find_process_by_pid - find a process with a matching PID value.
- * used in sys_sched_set/getaffinity() in kernel/sched.c, so
- * cloned here.
- */
-static inline struct task_struct *find_process_by_pid(pid_t pid)
+static int __init maxvpes(char *str)
{
- return pid ? find_task_by_pid(pid) : current;
-}
-
+ get_option(&str, &vpelimit);
-/*
- * mipsmt_sys_sched_setaffinity - set the cpu affinity of a process
- */
-asmlinkage long mipsmt_sys_sched_setaffinity(pid_t pid, unsigned int len,
- unsigned long __user *user_mask_ptr)
-{
- cpumask_t new_mask;
- cpumask_t effective_mask;
- int retval;
- struct task_struct *p;
-
- if (len < sizeof(new_mask))
- return -EINVAL;
-
- if (copy_from_user(&new_mask, user_mask_ptr, sizeof(new_mask)))
- return -EFAULT;
-
- lock_cpu_hotplug();
- read_lock(&tasklist_lock);
-
- p = find_process_by_pid(pid);
- if (!p) {
- read_unlock(&tasklist_lock);
- unlock_cpu_hotplug();
- return -ESRCH;
- }
-
- /*
- * It is not safe to call set_cpus_allowed with the
- * tasklist_lock held. We will bump the task_struct's
- * usage count and drop tasklist_lock before invoking
- * set_cpus_allowed.
- */
- get_task_struct(p);
-
- retval = -EPERM;
- if ((current->euid != p->euid) && (current->euid != p->uid) &&
- !capable(CAP_SYS_NICE)) {
- read_unlock(&tasklist_lock);
- goto out_unlock;
- }
-
- /* Record new user-specified CPU set for future reference */
- p->thread.user_cpus_allowed = new_mask;
-
- /* Unlock the task list */
- read_unlock(&tasklist_lock);
-
- /* Compute new global allowed CPU set if necessary */
- if( (p->thread.mflags & MF_FPUBOUND)
- && cpus_intersects(new_mask, mt_fpu_cpumask)) {
- cpus_and(effective_mask, new_mask, mt_fpu_cpumask);
- retval = set_cpus_allowed(p, effective_mask);
- } else {
- p->thread.mflags &= ~MF_FPUBOUND;
- retval = set_cpus_allowed(p, new_mask);
- }
+ return 1;
+}
+__setup("maxvpes=", maxvpes);
-out_unlock:
- put_task_struct(p);
- unlock_cpu_hotplug();
- return retval;
-}
+int tclimit;
-/*
- * mipsmt_sys_sched_getaffinity - get the cpu affinity of a process
- */
-asmlinkage long mipsmt_sys_sched_getaffinity(pid_t pid, unsigned int len,
- unsigned long __user *user_mask_ptr)
+static int __init maxtcs(char *str)
{
- unsigned int real_len;
- cpumask_t mask;
- int retval;
- struct task_struct *p;
-
- real_len = sizeof(mask);
- if (len < real_len)
- return -EINVAL;
-
- lock_cpu_hotplug();
- read_lock(&tasklist_lock);
-
- retval = -ESRCH;
- p = find_process_by_pid(pid);
- if (!p)
- goto out_unlock;
-
- retval = 0;
-
- cpus_and(mask, p->thread.user_cpus_allowed, cpu_possible_map);
-
-out_unlock:
- read_unlock(&tasklist_lock);
- unlock_cpu_hotplug();
- if (retval)
- return retval;
- if (copy_to_user(user_mask_ptr, &mask, real_len))
- return -EFAULT;
- return real_len;
+ get_option(&str, &tclimit);
+
+ return 1;
}
-#endif /* CONFIG_MIPS_MT_FPAFF */
+__setup("maxtcs=", maxtcs);
/*
* Dump new MIPS MT state for the core. Does not leave TCs halted.
@@ -174,9 +57,6 @@ void mips_mt_regdump(unsigned long mvpctl)
int tc;
unsigned long haltval;
unsigned long tcstatval;
-#ifdef CONFIG_MIPS_MT_SMTC
- void smtc_soft_dump(void);
-#endif /* CONFIG_MIPT_MT_SMTC */
local_irq_save(flags);
vpflags = dvpe();
@@ -188,27 +68,32 @@ void mips_mt_regdump(unsigned long mvpctl)
nvpe = ((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1;
ntc = ((mvpconf0 & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1;
printk("-- per-VPE State --\n");
- for(i = 0; i < nvpe; i++) {
- for(tc = 0; tc < ntc; tc++) {
+ for (i = 0; i < nvpe; i++) {
+ for (tc = 0; tc < ntc; tc++) {
settc(tc);
- if((read_tc_c0_tcbind() & TCBIND_CURVPE) == i) {
- printk(" VPE %d\n", i);
- printk(" VPEControl : %08lx\n", read_vpe_c0_vpecontrol());
- printk(" VPEConf0 : %08lx\n", read_vpe_c0_vpeconf0());
- printk(" VPE%d.Status : %08lx\n",
- i, read_vpe_c0_status());
- printk(" VPE%d.EPC : %08lx\n", i, read_vpe_c0_epc());
- printk(" VPE%d.Cause : %08lx\n", i, read_vpe_c0_cause());
- printk(" VPE%d.Config7 : %08lx\n",
- i, read_vpe_c0_config7());
- break; /* Next VPE */
+ if ((read_tc_c0_tcbind() & TCBIND_CURVPE) == i) {
+ printk(" VPE %d\n", i);
+ printk(" VPEControl : %08lx\n",
+ read_vpe_c0_vpecontrol());
+ printk(" VPEConf0 : %08lx\n",
+ read_vpe_c0_vpeconf0());
+ printk(" VPE%d.Status : %08lx\n",
+ i, read_vpe_c0_status());
+ printk(" VPE%d.EPC : %08lx %pS\n",
+ i, read_vpe_c0_epc(),
+ (void *) read_vpe_c0_epc());
+ printk(" VPE%d.Cause : %08lx\n",
+ i, read_vpe_c0_cause());
+ printk(" VPE%d.Config7 : %08lx\n",
+ i, read_vpe_c0_config7());
+ break; /* Next VPE */
+ }
}
- }
}
printk("-- per-TC State --\n");
- for(tc = 0; tc < ntc; tc++) {
+ for (tc = 0; tc < ntc; tc++) {
settc(tc);
- if(read_tc_c0_tcbind() == read_c0_tcbind()) {
+ if (read_tc_c0_tcbind() == read_c0_tcbind()) {
/* Are we dumping ourself? */
haltval = 0; /* Then we're not halted, and mustn't be */
tcstatval = flags; /* And pre-dump TCStatus is flags */
@@ -221,24 +106,22 @@ void mips_mt_regdump(unsigned long mvpctl)
}
printk(" TCStatus : %08lx\n", tcstatval);
printk(" TCBind : %08lx\n", read_tc_c0_tcbind());
- printk(" TCRestart : %08lx\n", read_tc_c0_tcrestart());
+ printk(" TCRestart : %08lx %pS\n",
+ read_tc_c0_tcrestart(), (void *) read_tc_c0_tcrestart());
printk(" TCHalt : %08lx\n", haltval);
printk(" TCContext : %08lx\n", read_tc_c0_tccontext());
if (!haltval)
write_tc_c0_tchalt(0);
}
-#ifdef CONFIG_MIPS_MT_SMTC
- smtc_soft_dump();
-#endif /* CONFIG_MIPT_MT_SMTC */
printk("===========================\n");
evpe(vpflags);
local_irq_restore(flags);
}
-static int mt_opt_norps = 0;
+static int mt_opt_norps;
static int mt_opt_rpsctl = -1;
static int mt_opt_nblsu = -1;
-static int mt_opt_forceconfig7 = 0;
+static int mt_opt_forceconfig7;
static int mt_opt_config7 = -1;
static int __init rps_disable(char *s)
@@ -271,8 +154,8 @@ static int __init config7_set(char *str)
__setup("config7=", config7_set);
/* Experimental cache flush control parameters that should go away some day */
-int mt_protiflush = 0;
-int mt_protdflush = 0;
+int mt_protiflush;
+int mt_protdflush;
int mt_n_iflushes = 1;
int mt_n_dflushes = 1;
@@ -303,19 +186,8 @@ static int __init ndflush(char *s)
return 1;
}
__setup("ndflush=", ndflush);
-#ifdef CONFIG_MIPS_MT_FPAFF
-static int fpaff_threshold = -1;
-static int __init fpaff_thresh(char *str)
-{
- get_option(&str, &fpaff_threshold);
- return 1;
-}
-
-__setup("fpaff=", fpaff_thresh);
-#endif /* CONFIG_MIPS_MT_FPAFF */
-
-static unsigned int itc_base = 0;
+static unsigned int itc_base;
static int __init set_itc_base(char *str)
{
@@ -331,7 +203,7 @@ void mips_mt_set_cpuoptions(void)
unsigned int nconfig7 = oconfig7;
if (mt_opt_norps) {
- printk("\"norps\" option deprectated: use \"rpsctl=\"\n");
+ printk("\"norps\" option deprecated: use \"rpsctl=\"\n");
}
if (mt_opt_rpsctl >= 0) {
printk("34K return prediction stack override set to %d.\n",
@@ -355,7 +227,7 @@ void mips_mt_set_cpuoptions(void)
if (oconfig7 != nconfig7) {
__asm__ __volatile("sync");
write_c0_config7(nconfig7);
- ehb ();
+ ehb();
printk("Config7: 0x%08x\n", read_c0_config7());
}
@@ -369,20 +241,6 @@ void mips_mt_set_cpuoptions(void)
if (mt_n_dflushes != 1)
printk("D-Cache Flushes Repeated %d times\n", mt_n_dflushes);
-#ifdef CONFIG_MIPS_MT_FPAFF
- /* FPU Use Factor empirically derived from experiments on 34K */
-#define FPUSEFACTOR 333
-
- if (fpaff_threshold >= 0) {
- mt_fpemul_threshold = fpaff_threshold;
- } else {
- mt_fpemul_threshold =
- (FPUSEFACTOR * (loops_per_jiffy/(500000/HZ))) / HZ;
- }
- printk("FPU Affinity set after %ld emulations\n",
- mt_fpemul_threshold);
-#endif /* CONFIG_MIPS_MT_FPAFF */
-
if (itc_base != 0) {
/*
* Configure ITC mapping. This code is very
@@ -431,20 +289,27 @@ void mips_mt_set_cpuoptions(void)
void mt_cflush_lockdown(void)
{
-#ifdef CONFIG_MIPS_MT_SMTC
- void smtc_cflush_lockdown(void);
-
- smtc_cflush_lockdown();
-#endif /* CONFIG_MIPS_MT_SMTC */
/* FILL IN VSMP and AP/SP VERSIONS HERE */
}
void mt_cflush_release(void)
{
-#ifdef CONFIG_MIPS_MT_SMTC
- void smtc_cflush_release(void);
-
- smtc_cflush_release();
-#endif /* CONFIG_MIPS_MT_SMTC */
/* FILL IN VSMP and AP/SP VERSIONS HERE */
}
+
+struct class *mt_class;
+
+static int __init mt_init(void)
+{
+ struct class *mtc;
+
+ mtc = class_create(THIS_MODULE, "mt");
+ if (IS_ERR(mtc))
+ return PTR_ERR(mtc);
+
+ mt_class = mtc;
+
+ return 0;
+}
+
+subsys_initcall(mt_init);
diff --git a/arch/mips/kernel/mips_ksyms.c b/arch/mips/kernel/mips_ksyms.c
index f44a01357ad..2607c3a4ff7 100644
--- a/arch/mips/kernel/mips_ksyms.c
+++ b/arch/mips/kernel/mips_ksyms.c
@@ -5,22 +5,31 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 1996, 97, 98, 99, 2000, 01, 03, 04, 05 by Ralf Baechle
+ * Copyright (C) 1996, 97, 98, 99, 2000, 01, 03, 04, 05, 12 by Ralf Baechle
* Copyright (C) 1999, 2000, 01 Silicon Graphics, Inc.
*/
#include <linux/interrupt.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <asm/checksum.h>
-#include <asm/pgtable.h>
+#include <linux/mm.h>
#include <asm/uaccess.h>
+#include <asm/ftrace.h>
extern void *__bzero(void *__s, size_t __count);
+extern long __strncpy_from_kernel_nocheck_asm(char *__to,
+ const char *__from, long __len);
+extern long __strncpy_from_kernel_asm(char *__to, const char *__from,
+ long __len);
extern long __strncpy_from_user_nocheck_asm(char *__to,
- const char *__from, long __len);
+ const char *__from, long __len);
extern long __strncpy_from_user_asm(char *__to, const char *__from,
- long __len);
+ long __len);
+extern long __strlen_kernel_nocheck_asm(const char *s);
+extern long __strlen_kernel_asm(const char *s);
extern long __strlen_user_nocheck_asm(const char *s);
extern long __strlen_user_asm(const char *s);
+extern long __strnlen_kernel_nocheck_asm(const char *s);
+extern long __strnlen_kernel_asm(const char *s);
extern long __strnlen_user_nocheck_asm(const char *s);
extern long __strnlen_user_asm(const char *s);
@@ -31,20 +40,45 @@ EXPORT_SYMBOL(memset);
EXPORT_SYMBOL(memcpy);
EXPORT_SYMBOL(memmove);
-EXPORT_SYMBOL(kernel_thread);
+/*
+ * Functions that operate on entire pages. Mostly used by memory management.
+ */
+EXPORT_SYMBOL(clear_page);
+EXPORT_SYMBOL(copy_page);
/*
* Userspace access stuff.
*/
EXPORT_SYMBOL(__copy_user);
+EXPORT_SYMBOL(__copy_user_inatomic);
+#ifdef CONFIG_EVA
+EXPORT_SYMBOL(__copy_from_user_eva);
+EXPORT_SYMBOL(__copy_in_user_eva);
+EXPORT_SYMBOL(__copy_to_user_eva);
+EXPORT_SYMBOL(__copy_user_inatomic_eva);
+#endif
EXPORT_SYMBOL(__bzero);
+EXPORT_SYMBOL(__strncpy_from_kernel_nocheck_asm);
+EXPORT_SYMBOL(__strncpy_from_kernel_asm);
EXPORT_SYMBOL(__strncpy_from_user_nocheck_asm);
EXPORT_SYMBOL(__strncpy_from_user_asm);
+EXPORT_SYMBOL(__strlen_kernel_nocheck_asm);
+EXPORT_SYMBOL(__strlen_kernel_asm);
EXPORT_SYMBOL(__strlen_user_nocheck_asm);
EXPORT_SYMBOL(__strlen_user_asm);
+EXPORT_SYMBOL(__strnlen_kernel_nocheck_asm);
+EXPORT_SYMBOL(__strnlen_kernel_asm);
EXPORT_SYMBOL(__strnlen_user_nocheck_asm);
EXPORT_SYMBOL(__strnlen_user_asm);
EXPORT_SYMBOL(csum_partial);
+EXPORT_SYMBOL(csum_partial_copy_nocheck);
+EXPORT_SYMBOL(__csum_partial_copy_kernel);
+EXPORT_SYMBOL(__csum_partial_copy_to_user);
+EXPORT_SYMBOL(__csum_partial_copy_from_user);
EXPORT_SYMBOL(invalid_pte_table);
+#ifdef CONFIG_FUNCTION_TRACER
+/* _mcount is defined in arch/mips/kernel/mcount.S */
+EXPORT_SYMBOL(_mcount);
+#endif
diff --git a/arch/mips/kernel/mips_machine.c b/arch/mips/kernel/mips_machine.c
new file mode 100644
index 00000000000..87609752969
--- /dev/null
+++ b/arch/mips/kernel/mips_machine.c
@@ -0,0 +1,66 @@
+/*
+ * Copyright (C) 2008-2010 Gabor Juhos <juhosg@openwrt.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ */
+#include <linux/mm.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+
+#include <asm/mips_machine.h>
+#include <asm/prom.h>
+
+static struct mips_machine *mips_machine __initdata;
+
+#define for_each_machine(mach) \
+ for ((mach) = (struct mips_machine *)&__mips_machines_start; \
+ (mach) && \
+ (unsigned long)(mach) < (unsigned long)&__mips_machines_end; \
+ (mach)++)
+
+__init int mips_machtype_setup(char *id)
+{
+ struct mips_machine *mach;
+
+ for_each_machine(mach) {
+ if (mach->mach_id == NULL)
+ continue;
+
+ if (strcmp(mach->mach_id, id) == 0) {
+ mips_machtype = mach->mach_type;
+ return 0;
+ }
+ }
+
+ pr_err("MIPS: no machine found for id '%s', supported machines:\n", id);
+ pr_err("%-24s %s\n", "id", "name");
+ for_each_machine(mach)
+ pr_err("%-24s %s\n", mach->mach_id, mach->mach_name);
+
+ return 1;
+}
+
+__setup("machtype=", mips_machtype_setup);
+
+__init void mips_machine_setup(void)
+{
+ struct mips_machine *mach;
+
+ for_each_machine(mach) {
+ if (mips_machtype == mach->mach_type) {
+ mips_machine = mach;
+ break;
+ }
+ }
+
+ if (!mips_machine)
+ return;
+
+ mips_set_machine_name(mips_machine->mach_name);
+
+ if (mips_machine->mach_setup)
+ mips_machine->mach_setup();
+}
diff --git a/arch/mips/kernel/module-rela.c b/arch/mips/kernel/module-rela.c
new file mode 100644
index 00000000000..2b70723071c
--- /dev/null
+++ b/arch/mips/kernel/module-rela.c
@@ -0,0 +1,145 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * Copyright (C) 2001 Rusty Russell.
+ * Copyright (C) 2003, 2004 Ralf Baechle (ralf@linux-mips.org)
+ * Copyright (C) 2005 Thiemo Seufer
+ */
+
+#include <linux/elf.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/moduleloader.h>
+
+extern int apply_r_mips_none(struct module *me, u32 *location, Elf_Addr v);
+
+static int apply_r_mips_32_rela(struct module *me, u32 *location, Elf_Addr v)
+{
+ *location = v;
+
+ return 0;
+}
+
+static int apply_r_mips_26_rela(struct module *me, u32 *location, Elf_Addr v)
+{
+ if (v % 4) {
+ pr_err("module %s: dangerous R_MIPS_26 RELArelocation\n",
+ me->name);
+ return -ENOEXEC;
+ }
+
+ if ((v & 0xf0000000) != (((unsigned long)location + 4) & 0xf0000000)) {
+ printk(KERN_ERR
+ "module %s: relocation overflow\n",
+ me->name);
+ return -ENOEXEC;
+ }
+
+ *location = (*location & ~0x03ffffff) | ((v >> 2) & 0x03ffffff);
+
+ return 0;
+}
+
+static int apply_r_mips_hi16_rela(struct module *me, u32 *location, Elf_Addr v)
+{
+ *location = (*location & 0xffff0000) |
+ ((((long long) v + 0x8000LL) >> 16) & 0xffff);
+
+ return 0;
+}
+
+static int apply_r_mips_lo16_rela(struct module *me, u32 *location, Elf_Addr v)
+{
+ *location = (*location & 0xffff0000) | (v & 0xffff);
+
+ return 0;
+}
+
+static int apply_r_mips_64_rela(struct module *me, u32 *location, Elf_Addr v)
+{
+ *(Elf_Addr *)location = v;
+
+ return 0;
+}
+
+static int apply_r_mips_higher_rela(struct module *me, u32 *location,
+ Elf_Addr v)
+{
+ *location = (*location & 0xffff0000) |
+ ((((long long) v + 0x80008000LL) >> 32) & 0xffff);
+
+ return 0;
+}
+
+static int apply_r_mips_highest_rela(struct module *me, u32 *location,
+ Elf_Addr v)
+{
+ *location = (*location & 0xffff0000) |
+ ((((long long) v + 0x800080008000LL) >> 48) & 0xffff);
+
+ return 0;
+}
+
+static int (*reloc_handlers_rela[]) (struct module *me, u32 *location,
+ Elf_Addr v) = {
+ [R_MIPS_NONE] = apply_r_mips_none,
+ [R_MIPS_32] = apply_r_mips_32_rela,
+ [R_MIPS_26] = apply_r_mips_26_rela,
+ [R_MIPS_HI16] = apply_r_mips_hi16_rela,
+ [R_MIPS_LO16] = apply_r_mips_lo16_rela,
+ [R_MIPS_64] = apply_r_mips_64_rela,
+ [R_MIPS_HIGHER] = apply_r_mips_higher_rela,
+ [R_MIPS_HIGHEST] = apply_r_mips_highest_rela
+};
+
+int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
+ unsigned int symindex, unsigned int relsec,
+ struct module *me)
+{
+ Elf_Mips_Rela *rel = (void *) sechdrs[relsec].sh_addr;
+ Elf_Sym *sym;
+ u32 *location;
+ unsigned int i;
+ Elf_Addr v;
+ int res;
+
+ pr_debug("Applying relocate section %u to %u\n", relsec,
+ sechdrs[relsec].sh_info);
+
+ for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
+ /* This is where to make the change */
+ location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
+ + rel[i].r_offset;
+ /* This is the symbol it is referring to */
+ sym = (Elf_Sym *)sechdrs[symindex].sh_addr
+ + ELF_MIPS_R_SYM(rel[i]);
+ if (IS_ERR_VALUE(sym->st_value)) {
+ /* Ignore unresolved weak symbol */
+ if (ELF_ST_BIND(sym->st_info) == STB_WEAK)
+ continue;
+ printk(KERN_WARNING "%s: Unknown symbol %s\n",
+ me->name, strtab + sym->st_name);
+ return -ENOENT;
+ }
+
+ v = sym->st_value + rel[i].r_addend;
+
+ res = reloc_handlers_rela[ELF_MIPS_R_TYPE(rel[i])](me, location, v);
+ if (res)
+ return res;
+ }
+
+ return 0;
+}
diff --git a/arch/mips/kernel/module.c b/arch/mips/kernel/module.c
index d7bf0215bc1..2a52568dbcd 100644
--- a/arch/mips/kernel/module.c
+++ b/arch/mips/kernel/module.c
@@ -22,13 +22,17 @@
#include <linux/moduleloader.h>
#include <linux/elf.h>
+#include <linux/mm.h>
+#include <linux/numa.h>
#include <linux/vmalloc.h>
#include <linux/slab.h>
#include <linux/fs.h>
#include <linux/string.h>
#include <linux/kernel.h>
-#include <linux/module.h>
#include <linux/spinlock.h>
+#include <linux/jump_label.h>
+
+#include <asm/pgtable.h> /* MODULE_START */
struct mips_hi16 {
struct mips_hi16 *next;
@@ -36,33 +40,19 @@ struct mips_hi16 {
Elf_Addr value;
};
-static struct mips_hi16 *mips_hi16_list;
-
static LIST_HEAD(dbe_list);
static DEFINE_SPINLOCK(dbe_lock);
+#ifdef MODULE_START
void *module_alloc(unsigned long size)
{
- if (size == 0)
- return NULL;
- return vmalloc(size);
-}
-
-/* Free memory returned from module_alloc */
-void module_free(struct module *mod, void *module_region)
-{
- vfree(module_region);
- /* FIXME: If module_region == mod->init_region, trim exception
- table entries. */
-}
-
-int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
- char *secstrings, struct module *mod)
-{
- return 0;
+ return __vmalloc_node_range(size, 1, MODULE_START, MODULE_END,
+ GFP_KERNEL, PAGE_KERNEL, NUMA_NO_NODE,
+ __builtin_return_address(0));
}
+#endif
-static int apply_r_mips_none(struct module *me, u32 *location, Elf_Addr v)
+int apply_r_mips_none(struct module *me, u32 *location, Elf_Addr v)
{
return 0;
}
@@ -74,40 +64,14 @@ static int apply_r_mips_32_rel(struct module *me, u32 *location, Elf_Addr v)
return 0;
}
-static int apply_r_mips_32_rela(struct module *me, u32 *location, Elf_Addr v)
-{
- *location = v;
-
- return 0;
-}
-
static int apply_r_mips_26_rel(struct module *me, u32 *location, Elf_Addr v)
{
if (v % 4) {
- printk(KERN_ERR "module %s: dangerous relocation\n", me->name);
- return -ENOEXEC;
- }
-
- if ((v & 0xf0000000) != (((unsigned long)location + 4) & 0xf0000000)) {
- printk(KERN_ERR
- "module %s: relocation overflow\n",
+ pr_err("module %s: dangerous R_MIPS_26 REL relocation\n",
me->name);
return -ENOEXEC;
}
- *location = (*location & ~0x03ffffff) |
- ((*location + (v >> 2)) & 0x03ffffff);
-
- return 0;
-}
-
-static int apply_r_mips_26_rela(struct module *me, u32 *location, Elf_Addr v)
-{
- if (v % 4) {
- printk(KERN_ERR "module %s: dangerous relocation\n", me->name);
- return -ENOEXEC;
- }
-
if ((v & 0xf0000000) != (((unsigned long)location + 4) & 0xf0000000)) {
printk(KERN_ERR
"module %s: relocation overflow\n",
@@ -115,7 +79,8 @@ static int apply_r_mips_26_rela(struct module *me, u32 *location, Elf_Addr v)
return -ENOEXEC;
}
- *location = (*location & ~0x03ffffff) | ((v >> 2) & 0x03ffffff);
+ *location = (*location & ~0x03ffffff) |
+ ((*location + (v >> 2)) & 0x03ffffff);
return 0;
}
@@ -135,32 +100,34 @@ static int apply_r_mips_hi16_rel(struct module *me, u32 *location, Elf_Addr v)
n->addr = (Elf_Addr *)location;
n->value = v;
- n->next = mips_hi16_list;
- mips_hi16_list = n;
+ n->next = me->arch.r_mips_hi16_list;
+ me->arch.r_mips_hi16_list = n;
return 0;
}
-static int apply_r_mips_hi16_rela(struct module *me, u32 *location, Elf_Addr v)
+static void free_relocation_chain(struct mips_hi16 *l)
{
- *location = (*location & 0xffff0000) |
- ((((long long) v + 0x8000LL) >> 16) & 0xffff);
+ struct mips_hi16 *next;
- return 0;
+ while (l) {
+ next = l->next;
+ kfree(l);
+ l = next;
+ }
}
static int apply_r_mips_lo16_rel(struct module *me, u32 *location, Elf_Addr v)
{
unsigned long insnlo = *location;
+ struct mips_hi16 *l;
Elf_Addr val, vallo;
- /* Sign extend the addend we extract from the lo insn. */
+ /* Sign extend the addend we extract from the lo insn. */
vallo = ((insnlo & 0xffff) ^ 0x8000) - 0x8000;
- if (mips_hi16_list != NULL) {
- struct mips_hi16 *l;
-
- l = mips_hi16_list;
+ if (me->arch.r_mips_hi16_list != NULL) {
+ l = me->arch.r_mips_hi16_list;
while (l != NULL) {
struct mips_hi16 *next;
unsigned long insn;
@@ -195,11 +162,11 @@ static int apply_r_mips_lo16_rel(struct module *me, u32 *location, Elf_Addr v)
l = next;
}
- mips_hi16_list = NULL;
+ me->arch.r_mips_hi16_list = NULL;
}
/*
- * Ok, we're done with the HI16 relocs. Now deal with the LO16.
+ * Ok, we're done with the HI16 relocs. Now deal with the LO16.
*/
val = v + vallo;
insnlo = (insnlo & ~0xffff) | (val & 0xffff);
@@ -208,41 +175,12 @@ static int apply_r_mips_lo16_rel(struct module *me, u32 *location, Elf_Addr v)
return 0;
out_danger:
- printk(KERN_ERR "module %s: dangerous " "relocation\n", me->name);
-
- return -ENOEXEC;
-}
-
-static int apply_r_mips_lo16_rela(struct module *me, u32 *location, Elf_Addr v)
-{
- *location = (*location & 0xffff0000) | (v & 0xffff);
-
- return 0;
-}
-
-static int apply_r_mips_64_rela(struct module *me, u32 *location, Elf_Addr v)
-{
- *(Elf_Addr *)location = v;
-
- return 0;
-}
-
-static int apply_r_mips_higher_rela(struct module *me, u32 *location,
- Elf_Addr v)
-{
- *location = (*location & 0xffff0000) |
- ((((long long) v + 0x80008000LL) >> 32) & 0xffff);
-
- return 0;
-}
+ free_relocation_chain(l);
+ me->arch.r_mips_hi16_list = NULL;
-static int apply_r_mips_highest_rela(struct module *me, u32 *location,
- Elf_Addr v)
-{
- *location = (*location & 0xffff0000) |
- ((((long long) v + 0x800080008000LL) >> 48) & 0xffff);
+ pr_err("module %s: dangerous R_MIPS_LO16 REL relocation\n", me->name);
- return 0;
+ return -ENOEXEC;
}
static int (*reloc_handlers_rel[]) (struct module *me, u32 *location,
@@ -254,18 +192,6 @@ static int (*reloc_handlers_rel[]) (struct module *me, u32 *location,
[R_MIPS_LO16] = apply_r_mips_lo16_rel
};
-static int (*reloc_handlers_rela[]) (struct module *me, u32 *location,
- Elf_Addr v) = {
- [R_MIPS_NONE] = apply_r_mips_none,
- [R_MIPS_32] = apply_r_mips_32_rela,
- [R_MIPS_26] = apply_r_mips_26_rela,
- [R_MIPS_HI16] = apply_r_mips_hi16_rela,
- [R_MIPS_LO16] = apply_r_mips_lo16_rela,
- [R_MIPS_64] = apply_r_mips_64_rela,
- [R_MIPS_HIGHER] = apply_r_mips_higher_rela,
- [R_MIPS_HIGHEST] = apply_r_mips_highest_rela
-};
-
int apply_relocate(Elf_Shdr *sechdrs, const char *strtab,
unsigned int symindex, unsigned int relsec,
struct module *me)
@@ -280,6 +206,7 @@ int apply_relocate(Elf_Shdr *sechdrs, const char *strtab,
pr_debug("Applying relocate section %u to %u\n", relsec,
sechdrs[relsec].sh_info);
+ me->arch.r_mips_hi16_list = NULL;
for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
/* This is where to make the change */
location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
@@ -287,7 +214,7 @@ int apply_relocate(Elf_Shdr *sechdrs, const char *strtab,
/* This is the symbol it is referring to */
sym = (Elf_Sym *)sechdrs[symindex].sh_addr
+ ELF_MIPS_R_SYM(rel[i]);
- if (!sym->st_value) {
+ if (IS_ERR_VALUE(sym->st_value)) {
/* Ignore unresolved weak symbol */
if (ELF_ST_BIND(sym->st_info) == STB_WEAK)
continue;
@@ -303,44 +230,17 @@ int apply_relocate(Elf_Shdr *sechdrs, const char *strtab,
return res;
}
- return 0;
-}
-
-int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
- unsigned int symindex, unsigned int relsec,
- struct module *me)
-{
- Elf_Mips_Rela *rel = (void *) sechdrs[relsec].sh_addr;
- Elf_Sym *sym;
- u32 *location;
- unsigned int i;
- Elf_Addr v;
- int res;
-
- pr_debug("Applying relocate section %u to %u\n", relsec,
- sechdrs[relsec].sh_info);
-
- for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
- /* This is where to make the change */
- location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
- + rel[i].r_offset;
- /* This is the symbol it is referring to */
- sym = (Elf_Sym *)sechdrs[symindex].sh_addr
- + ELF_MIPS_R_SYM(rel[i]);
- if (!sym->st_value) {
- /* Ignore unresolved weak symbol */
- if (ELF_ST_BIND(sym->st_info) == STB_WEAK)
- continue;
- printk(KERN_WARNING "%s: Unknown symbol %s\n",
- me->name, strtab + sym->st_name);
- return -ENOENT;
- }
-
- v = sym->st_value + rel[i].r_addend;
+ /*
+ * Normally the hi16 list should be deallocated at this point. A
+ * malformed binary however could contain a series of R_MIPS_HI16
+ * relocations not followed by a R_MIPS_LO16 relocation. In that
+ * case, free up the list and return an error.
+ */
+ if (me->arch.r_mips_hi16_list) {
+ free_relocation_chain(me->arch.r_mips_hi16_list);
+ me->arch.r_mips_hi16_list = NULL;
- res = reloc_handlers_rela[ELF_MIPS_R_TYPE(rel[i])](me, location, v);
- if (res)
- return res;
+ return -ENOEXEC;
}
return 0;
@@ -362,11 +262,11 @@ const struct exception_table_entry *search_module_dbetables(unsigned long addr)
spin_unlock_irqrestore(&dbe_lock, flags);
/* Now, if we found one, we are running inside it now, hence
- we cannot unload the module, hence no refcnt needed. */
+ we cannot unload the module, hence no refcnt needed. */
return e;
}
-/* Put in dbe list if neccessary. */
+/* Put in dbe list if necessary. */
int module_finalize(const Elf_Ehdr *hdr,
const Elf_Shdr *sechdrs,
struct module *me)
@@ -374,6 +274,9 @@ int module_finalize(const Elf_Ehdr *hdr,
const Elf_Shdr *s;
char *secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
+ /* Make jump label nops. */
+ jump_label_apply_nops(me);
+
INIT_LIST_HEAD(&me->arch.dbe_list);
for (s = sechdrs; s < sechdrs + hdr->e_shnum; s++) {
if (strcmp("__dbe_table", secstrings + s->sh_name) != 0)
diff --git a/arch/mips/kernel/octeon_switch.S b/arch/mips/kernel/octeon_switch.S
new file mode 100644
index 00000000000..f6547680c81
--- /dev/null
+++ b/arch/mips/kernel/octeon_switch.S
@@ -0,0 +1,519 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1994, 1995, 1996, 1998, 1999, 2002, 2003 Ralf Baechle
+ * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
+ * Copyright (C) 1994, 1995, 1996, by Andreas Busse
+ * Copyright (C) 1999 Silicon Graphics, Inc.
+ * Copyright (C) 2000 MIPS Technologies, Inc.
+ * written by Carsten Langgaard, carstenl@mips.com
+ */
+
+#define USE_ALTERNATE_RESUME_IMPL 1
+ .set push
+ .set arch=mips64r2
+#include "r4k_switch.S"
+ .set pop
+/*
+ * task_struct *resume(task_struct *prev, task_struct *next,
+ * struct thread_info *next_ti, int usedfpu)
+ */
+ .align 7
+ LEAF(resume)
+ .set arch=octeon
+ mfc0 t1, CP0_STATUS
+ LONG_S t1, THREAD_STATUS(a0)
+ cpu_save_nonscratch a0
+ LONG_S ra, THREAD_REG31(a0)
+
+ /*
+ * check if we need to save FPU registers
+ */
+ PTR_L t3, TASK_THREAD_INFO(a0)
+ LONG_L t0, TI_FLAGS(t3)
+ li t1, _TIF_USEDFPU
+ and t2, t0, t1
+ beqz t2, 1f
+ nor t1, zero, t1
+
+ and t0, t0, t1
+ LONG_S t0, TI_FLAGS(t3)
+
+ /*
+ * clear saved user stack CU1 bit
+ */
+ LONG_L t0, ST_OFF(t3)
+ li t1, ~ST0_CU1
+ and t0, t0, t1
+ LONG_S t0, ST_OFF(t3)
+
+ .set push
+ .set arch=mips64r2
+ fpu_save_double a0 t0 t1 # c0_status passed in t0
+ # clobbers t1
+ .set pop
+1:
+
+ /* check if we need to save COP2 registers */
+ PTR_L t2, TASK_THREAD_INFO(a0)
+ LONG_L t0, ST_OFF(t2)
+ bbit0 t0, 30, 1f
+
+ /* Disable COP2 in the stored process state */
+ li t1, ST0_CU2
+ xor t0, t1
+ LONG_S t0, ST_OFF(t2)
+
+ /* Enable COP2 so we can save it */
+ mfc0 t0, CP0_STATUS
+ or t0, t1
+ mtc0 t0, CP0_STATUS
+
+ /* Save COP2 */
+ daddu a0, THREAD_CP2
+ jal octeon_cop2_save
+ dsubu a0, THREAD_CP2
+
+ /* Disable COP2 now that we are done */
+ mfc0 t0, CP0_STATUS
+ li t1, ST0_CU2
+ xor t0, t1
+ mtc0 t0, CP0_STATUS
+
+1:
+#if CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE > 0
+ /* Check if we need to store CVMSEG state */
+ mfc0 t0, $11,7 /* CvmMemCtl */
+ bbit0 t0, 6, 3f /* Is user access enabled? */
+
+ /* Store the CVMSEG state */
+ /* Extract the size of CVMSEG */
+ andi t0, 0x3f
+ /* Multiply * (cache line size/sizeof(long)/2) */
+ sll t0, 7-LONGLOG-1
+ li t1, -32768 /* Base address of CVMSEG */
+ LONG_ADDI t2, a0, THREAD_CVMSEG /* Where to store CVMSEG to */
+ synciobdma
+2:
+ .set noreorder
+ LONG_L t8, 0(t1) /* Load from CVMSEG */
+ subu t0, 1 /* Decrement loop var */
+ LONG_L t9, LONGSIZE(t1)/* Load from CVMSEG */
+ LONG_ADDU t1, LONGSIZE*2 /* Increment loc in CVMSEG */
+ LONG_S t8, 0(t2) /* Store CVMSEG to thread storage */
+ LONG_ADDU t2, LONGSIZE*2 /* Increment loc in thread storage */
+ bnez t0, 2b /* Loop until we've copied it all */
+ LONG_S t9, -LONGSIZE(t2)/* Store CVMSEG to thread storage */
+ .set reorder
+
+ /* Disable access to CVMSEG */
+ mfc0 t0, $11,7 /* CvmMemCtl */
+ xori t0, t0, 0x40 /* Bit 6 is CVMSEG user enable */
+ mtc0 t0, $11,7 /* CvmMemCtl */
+#endif
+3:
+
+#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
+ PTR_LA t8, __stack_chk_guard
+ LONG_L t9, TASK_STACK_CANARY(a1)
+ LONG_S t9, 0(t8)
+#endif
+
+ /*
+ * The order of restoring the registers takes care of the race
+ * updating $28, $29 and kernelsp without disabling ints.
+ */
+ move $28, a2
+ cpu_restore_nonscratch a1
+
+ PTR_ADDU t0, $28, _THREAD_SIZE - 32
+ set_saved_sp t0, t1, t2
+
+ mfc0 t1, CP0_STATUS /* Do we really need this? */
+ li a3, 0xff01
+ and t1, a3
+ LONG_L a2, THREAD_STATUS(a1)
+ nor a3, $0, a3
+ and a2, a3
+ or a2, t1
+ mtc0 a2, CP0_STATUS
+ move v0, a0
+ jr ra
+ END(resume)
+
+/*
+ * void octeon_cop2_save(struct octeon_cop2_state *a0)
+ */
+ .align 7
+ LEAF(octeon_cop2_save)
+
+ dmfc0 t9, $9,7 /* CvmCtl register. */
+
+ /* Save the COP2 CRC state */
+ dmfc2 t0, 0x0201
+ dmfc2 t1, 0x0202
+ dmfc2 t2, 0x0200
+ sd t0, OCTEON_CP2_CRC_IV(a0)
+ sd t1, OCTEON_CP2_CRC_LENGTH(a0)
+ sd t2, OCTEON_CP2_CRC_POLY(a0)
+ /* Skip next instructions if CvmCtl[NODFA_CP2] set */
+ bbit1 t9, 28, 1f
+
+ /* Save the LLM state */
+ dmfc2 t0, 0x0402
+ dmfc2 t1, 0x040A
+ sd t0, OCTEON_CP2_LLM_DAT(a0)
+ sd t1, OCTEON_CP2_LLM_DAT+8(a0)
+
+1: bbit1 t9, 26, 3f /* done if CvmCtl[NOCRYPTO] set */
+
+ /* Save the COP2 crypto state */
+ /* this part is mostly common to both pass 1 and later revisions */
+ dmfc2 t0, 0x0084
+ dmfc2 t1, 0x0080
+ dmfc2 t2, 0x0081
+ dmfc2 t3, 0x0082
+ sd t0, OCTEON_CP2_3DES_IV(a0)
+ dmfc2 t0, 0x0088
+ sd t1, OCTEON_CP2_3DES_KEY(a0)
+ dmfc2 t1, 0x0111 /* only necessary for pass 1 */
+ sd t2, OCTEON_CP2_3DES_KEY+8(a0)
+ dmfc2 t2, 0x0102
+ sd t3, OCTEON_CP2_3DES_KEY+16(a0)
+ dmfc2 t3, 0x0103
+ sd t0, OCTEON_CP2_3DES_RESULT(a0)
+ dmfc2 t0, 0x0104
+ sd t1, OCTEON_CP2_AES_INP0(a0) /* only necessary for pass 1 */
+ dmfc2 t1, 0x0105
+ sd t2, OCTEON_CP2_AES_IV(a0)
+ dmfc2 t2, 0x0106
+ sd t3, OCTEON_CP2_AES_IV+8(a0)
+ dmfc2 t3, 0x0107
+ sd t0, OCTEON_CP2_AES_KEY(a0)
+ dmfc2 t0, 0x0110
+ sd t1, OCTEON_CP2_AES_KEY+8(a0)
+ dmfc2 t1, 0x0100
+ sd t2, OCTEON_CP2_AES_KEY+16(a0)
+ dmfc2 t2, 0x0101
+ sd t3, OCTEON_CP2_AES_KEY+24(a0)
+ mfc0 t3, $15,0 /* Get the processor ID register */
+ sd t0, OCTEON_CP2_AES_KEYLEN(a0)
+ li t0, 0x000d0000 /* This is the processor ID of Octeon Pass1 */
+ sd t1, OCTEON_CP2_AES_RESULT(a0)
+ sd t2, OCTEON_CP2_AES_RESULT+8(a0)
+ /* Skip to the Pass1 version of the remainder of the COP2 state */
+ beq t3, t0, 2f
+
+ /* the non-pass1 state when !CvmCtl[NOCRYPTO] */
+ dmfc2 t1, 0x0240
+ dmfc2 t2, 0x0241
+ dmfc2 t3, 0x0242
+ dmfc2 t0, 0x0243
+ sd t1, OCTEON_CP2_HSH_DATW(a0)
+ dmfc2 t1, 0x0244
+ sd t2, OCTEON_CP2_HSH_DATW+8(a0)
+ dmfc2 t2, 0x0245
+ sd t3, OCTEON_CP2_HSH_DATW+16(a0)
+ dmfc2 t3, 0x0246
+ sd t0, OCTEON_CP2_HSH_DATW+24(a0)
+ dmfc2 t0, 0x0247
+ sd t1, OCTEON_CP2_HSH_DATW+32(a0)
+ dmfc2 t1, 0x0248
+ sd t2, OCTEON_CP2_HSH_DATW+40(a0)
+ dmfc2 t2, 0x0249
+ sd t3, OCTEON_CP2_HSH_DATW+48(a0)
+ dmfc2 t3, 0x024A
+ sd t0, OCTEON_CP2_HSH_DATW+56(a0)
+ dmfc2 t0, 0x024B
+ sd t1, OCTEON_CP2_HSH_DATW+64(a0)
+ dmfc2 t1, 0x024C
+ sd t2, OCTEON_CP2_HSH_DATW+72(a0)
+ dmfc2 t2, 0x024D
+ sd t3, OCTEON_CP2_HSH_DATW+80(a0)
+ dmfc2 t3, 0x024E
+ sd t0, OCTEON_CP2_HSH_DATW+88(a0)
+ dmfc2 t0, 0x0250
+ sd t1, OCTEON_CP2_HSH_DATW+96(a0)
+ dmfc2 t1, 0x0251
+ sd t2, OCTEON_CP2_HSH_DATW+104(a0)
+ dmfc2 t2, 0x0252
+ sd t3, OCTEON_CP2_HSH_DATW+112(a0)
+ dmfc2 t3, 0x0253
+ sd t0, OCTEON_CP2_HSH_IVW(a0)
+ dmfc2 t0, 0x0254
+ sd t1, OCTEON_CP2_HSH_IVW+8(a0)
+ dmfc2 t1, 0x0255
+ sd t2, OCTEON_CP2_HSH_IVW+16(a0)
+ dmfc2 t2, 0x0256
+ sd t3, OCTEON_CP2_HSH_IVW+24(a0)
+ dmfc2 t3, 0x0257
+ sd t0, OCTEON_CP2_HSH_IVW+32(a0)
+ dmfc2 t0, 0x0258
+ sd t1, OCTEON_CP2_HSH_IVW+40(a0)
+ dmfc2 t1, 0x0259
+ sd t2, OCTEON_CP2_HSH_IVW+48(a0)
+ dmfc2 t2, 0x025E
+ sd t3, OCTEON_CP2_HSH_IVW+56(a0)
+ dmfc2 t3, 0x025A
+ sd t0, OCTEON_CP2_GFM_MULT(a0)
+ dmfc2 t0, 0x025B
+ sd t1, OCTEON_CP2_GFM_MULT+8(a0)
+ sd t2, OCTEON_CP2_GFM_POLY(a0)
+ sd t3, OCTEON_CP2_GFM_RESULT(a0)
+ sd t0, OCTEON_CP2_GFM_RESULT+8(a0)
+ jr ra
+
+2: /* pass 1 special stuff when !CvmCtl[NOCRYPTO] */
+ dmfc2 t3, 0x0040
+ dmfc2 t0, 0x0041
+ dmfc2 t1, 0x0042
+ dmfc2 t2, 0x0043
+ sd t3, OCTEON_CP2_HSH_DATW(a0)
+ dmfc2 t3, 0x0044
+ sd t0, OCTEON_CP2_HSH_DATW+8(a0)
+ dmfc2 t0, 0x0045
+ sd t1, OCTEON_CP2_HSH_DATW+16(a0)
+ dmfc2 t1, 0x0046
+ sd t2, OCTEON_CP2_HSH_DATW+24(a0)
+ dmfc2 t2, 0x0048
+ sd t3, OCTEON_CP2_HSH_DATW+32(a0)
+ dmfc2 t3, 0x0049
+ sd t0, OCTEON_CP2_HSH_DATW+40(a0)
+ dmfc2 t0, 0x004A
+ sd t1, OCTEON_CP2_HSH_DATW+48(a0)
+ sd t2, OCTEON_CP2_HSH_IVW(a0)
+ sd t3, OCTEON_CP2_HSH_IVW+8(a0)
+ sd t0, OCTEON_CP2_HSH_IVW+16(a0)
+
+3: /* pass 1 or CvmCtl[NOCRYPTO] set */
+ jr ra
+ END(octeon_cop2_save)
+
+/*
+ * void octeon_cop2_restore(struct octeon_cop2_state *a0)
+ */
+ .align 7
+ .set push
+ .set noreorder
+ LEAF(octeon_cop2_restore)
+ /* First cache line was prefetched before the call */
+ pref 4, 128(a0)
+ dmfc0 t9, $9,7 /* CvmCtl register. */
+
+ pref 4, 256(a0)
+ ld t0, OCTEON_CP2_CRC_IV(a0)
+ pref 4, 384(a0)
+ ld t1, OCTEON_CP2_CRC_LENGTH(a0)
+ ld t2, OCTEON_CP2_CRC_POLY(a0)
+
+ /* Restore the COP2 CRC state */
+ dmtc2 t0, 0x0201
+ dmtc2 t1, 0x1202
+ bbit1 t9, 28, 2f /* Skip LLM if CvmCtl[NODFA_CP2] is set */
+ dmtc2 t2, 0x4200
+
+ /* Restore the LLM state */
+ ld t0, OCTEON_CP2_LLM_DAT(a0)
+ ld t1, OCTEON_CP2_LLM_DAT+8(a0)
+ dmtc2 t0, 0x0402
+ dmtc2 t1, 0x040A
+
+2:
+ bbit1 t9, 26, done_restore /* done if CvmCtl[NOCRYPTO] set */
+ nop
+
+ /* Restore the COP2 crypto state common to pass 1 and pass 2 */
+ ld t0, OCTEON_CP2_3DES_IV(a0)
+ ld t1, OCTEON_CP2_3DES_KEY(a0)
+ ld t2, OCTEON_CP2_3DES_KEY+8(a0)
+ dmtc2 t0, 0x0084
+ ld t0, OCTEON_CP2_3DES_KEY+16(a0)
+ dmtc2 t1, 0x0080
+ ld t1, OCTEON_CP2_3DES_RESULT(a0)
+ dmtc2 t2, 0x0081
+ ld t2, OCTEON_CP2_AES_INP0(a0) /* only really needed for pass 1 */
+ dmtc2 t0, 0x0082
+ ld t0, OCTEON_CP2_AES_IV(a0)
+ dmtc2 t1, 0x0098
+ ld t1, OCTEON_CP2_AES_IV+8(a0)
+ dmtc2 t2, 0x010A /* only really needed for pass 1 */
+ ld t2, OCTEON_CP2_AES_KEY(a0)
+ dmtc2 t0, 0x0102
+ ld t0, OCTEON_CP2_AES_KEY+8(a0)
+ dmtc2 t1, 0x0103
+ ld t1, OCTEON_CP2_AES_KEY+16(a0)
+ dmtc2 t2, 0x0104
+ ld t2, OCTEON_CP2_AES_KEY+24(a0)
+ dmtc2 t0, 0x0105
+ ld t0, OCTEON_CP2_AES_KEYLEN(a0)
+ dmtc2 t1, 0x0106
+ ld t1, OCTEON_CP2_AES_RESULT(a0)
+ dmtc2 t2, 0x0107
+ ld t2, OCTEON_CP2_AES_RESULT+8(a0)
+ mfc0 t3, $15,0 /* Get the processor ID register */
+ dmtc2 t0, 0x0110
+ li t0, 0x000d0000 /* This is the processor ID of Octeon Pass1 */
+ dmtc2 t1, 0x0100
+ bne t0, t3, 3f /* Skip the next stuff for non-pass1 */
+ dmtc2 t2, 0x0101
+
+ /* this code is specific for pass 1 */
+ ld t0, OCTEON_CP2_HSH_DATW(a0)
+ ld t1, OCTEON_CP2_HSH_DATW+8(a0)
+ ld t2, OCTEON_CP2_HSH_DATW+16(a0)
+ dmtc2 t0, 0x0040
+ ld t0, OCTEON_CP2_HSH_DATW+24(a0)
+ dmtc2 t1, 0x0041
+ ld t1, OCTEON_CP2_HSH_DATW+32(a0)
+ dmtc2 t2, 0x0042
+ ld t2, OCTEON_CP2_HSH_DATW+40(a0)
+ dmtc2 t0, 0x0043
+ ld t0, OCTEON_CP2_HSH_DATW+48(a0)
+ dmtc2 t1, 0x0044
+ ld t1, OCTEON_CP2_HSH_IVW(a0)
+ dmtc2 t2, 0x0045
+ ld t2, OCTEON_CP2_HSH_IVW+8(a0)
+ dmtc2 t0, 0x0046
+ ld t0, OCTEON_CP2_HSH_IVW+16(a0)
+ dmtc2 t1, 0x0048
+ dmtc2 t2, 0x0049
+ b done_restore /* unconditional branch */
+ dmtc2 t0, 0x004A
+
+3: /* this is post-pass1 code */
+ ld t2, OCTEON_CP2_HSH_DATW(a0)
+ ld t0, OCTEON_CP2_HSH_DATW+8(a0)
+ ld t1, OCTEON_CP2_HSH_DATW+16(a0)
+ dmtc2 t2, 0x0240
+ ld t2, OCTEON_CP2_HSH_DATW+24(a0)
+ dmtc2 t0, 0x0241
+ ld t0, OCTEON_CP2_HSH_DATW+32(a0)
+ dmtc2 t1, 0x0242
+ ld t1, OCTEON_CP2_HSH_DATW+40(a0)
+ dmtc2 t2, 0x0243
+ ld t2, OCTEON_CP2_HSH_DATW+48(a0)
+ dmtc2 t0, 0x0244
+ ld t0, OCTEON_CP2_HSH_DATW+56(a0)
+ dmtc2 t1, 0x0245
+ ld t1, OCTEON_CP2_HSH_DATW+64(a0)
+ dmtc2 t2, 0x0246
+ ld t2, OCTEON_CP2_HSH_DATW+72(a0)
+ dmtc2 t0, 0x0247
+ ld t0, OCTEON_CP2_HSH_DATW+80(a0)
+ dmtc2 t1, 0x0248
+ ld t1, OCTEON_CP2_HSH_DATW+88(a0)
+ dmtc2 t2, 0x0249
+ ld t2, OCTEON_CP2_HSH_DATW+96(a0)
+ dmtc2 t0, 0x024A
+ ld t0, OCTEON_CP2_HSH_DATW+104(a0)
+ dmtc2 t1, 0x024B
+ ld t1, OCTEON_CP2_HSH_DATW+112(a0)
+ dmtc2 t2, 0x024C
+ ld t2, OCTEON_CP2_HSH_IVW(a0)
+ dmtc2 t0, 0x024D
+ ld t0, OCTEON_CP2_HSH_IVW+8(a0)
+ dmtc2 t1, 0x024E
+ ld t1, OCTEON_CP2_HSH_IVW+16(a0)
+ dmtc2 t2, 0x0250
+ ld t2, OCTEON_CP2_HSH_IVW+24(a0)
+ dmtc2 t0, 0x0251
+ ld t0, OCTEON_CP2_HSH_IVW+32(a0)
+ dmtc2 t1, 0x0252
+ ld t1, OCTEON_CP2_HSH_IVW+40(a0)
+ dmtc2 t2, 0x0253
+ ld t2, OCTEON_CP2_HSH_IVW+48(a0)
+ dmtc2 t0, 0x0254
+ ld t0, OCTEON_CP2_HSH_IVW+56(a0)
+ dmtc2 t1, 0x0255
+ ld t1, OCTEON_CP2_GFM_MULT(a0)
+ dmtc2 t2, 0x0256
+ ld t2, OCTEON_CP2_GFM_MULT+8(a0)
+ dmtc2 t0, 0x0257
+ ld t0, OCTEON_CP2_GFM_POLY(a0)
+ dmtc2 t1, 0x0258
+ ld t1, OCTEON_CP2_GFM_RESULT(a0)
+ dmtc2 t2, 0x0259
+ ld t2, OCTEON_CP2_GFM_RESULT+8(a0)
+ dmtc2 t0, 0x025E
+ dmtc2 t1, 0x025A
+ dmtc2 t2, 0x025B
+
+done_restore:
+ jr ra
+ nop
+ END(octeon_cop2_restore)
+ .set pop
+
+/*
+ * void octeon_mult_save()
+ * sp is assumed to point to a struct pt_regs
+ *
+ * NOTE: This is called in SAVE_SOME in stackframe.h. It can only
+ * safely modify k0 and k1.
+ */
+ .align 7
+ .set push
+ .set noreorder
+ LEAF(octeon_mult_save)
+ dmfc0 k0, $9,7 /* CvmCtl register. */
+ bbit1 k0, 27, 1f /* Skip CvmCtl[NOMUL] */
+ nop
+
+ /* Save the multiplier state */
+ v3mulu k0, $0, $0
+ v3mulu k1, $0, $0
+ sd k0, PT_MTP(sp) /* PT_MTP has P0 */
+ v3mulu k0, $0, $0
+ sd k1, PT_MTP+8(sp) /* PT_MTP+8 has P1 */
+ ori k1, $0, 1
+ v3mulu k1, k1, $0
+ sd k0, PT_MTP+16(sp) /* PT_MTP+16 has P2 */
+ v3mulu k0, $0, $0
+ sd k1, PT_MPL(sp) /* PT_MPL has MPL0 */
+ v3mulu k1, $0, $0
+ sd k0, PT_MPL+8(sp) /* PT_MPL+8 has MPL1 */
+ jr ra
+ sd k1, PT_MPL+16(sp) /* PT_MPL+16 has MPL2 */
+
+1: /* Resume here if CvmCtl[NOMUL] */
+ jr ra
+ END(octeon_mult_save)
+ .set pop
+
+/*
+ * void octeon_mult_restore()
+ * sp is assumed to point to a struct pt_regs
+ *
+ * NOTE: This is called in RESTORE_SOME in stackframe.h.
+ */
+ .align 7
+ .set push
+ .set noreorder
+ LEAF(octeon_mult_restore)
+ dmfc0 k1, $9,7 /* CvmCtl register. */
+ ld v0, PT_MPL(sp) /* MPL0 */
+ ld v1, PT_MPL+8(sp) /* MPL1 */
+ ld k0, PT_MPL+16(sp) /* MPL2 */
+ bbit1 k1, 27, 1f /* Skip CvmCtl[NOMUL] */
+ /* Normally falls through, so no time wasted here */
+ nop
+
+ /* Restore the multiplier state */
+ ld k1, PT_MTP+16(sp) /* P2 */
+ MTM0 v0 /* MPL0 */
+ ld v0, PT_MTP+8(sp) /* P1 */
+ MTM1 v1 /* MPL1 */
+ ld v1, PT_MTP(sp) /* P0 */
+ MTM2 k0 /* MPL2 */
+ MTP2 k1 /* P2 */
+ MTP1 v0 /* P1 */
+ jr ra
+ MTP0 v1 /* P0 */
+
+1: /* Resume here if CvmCtl[NOMUL] */
+ jr ra
+ nop
+ END(octeon_mult_restore)
+ .set pop
diff --git a/arch/mips/kernel/perf_event.c b/arch/mips/kernel/perf_event.c
new file mode 100644
index 00000000000..c1cf9c6c3f7
--- /dev/null
+++ b/arch/mips/kernel/perf_event.c
@@ -0,0 +1,69 @@
+/*
+ * Linux performance counter support for MIPS.
+ *
+ * Copyright (C) 2010 MIPS Technologies, Inc.
+ * Author: Deng-Cheng Zhu
+ *
+ * This code is based on the implementation for ARM, which is in turn
+ * based on the sparc64 perf event code and the x86 code. Performance
+ * counter access is based on the MIPS Oprofile code. And the callchain
+ * support references the code of MIPS stacktrace.c.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/perf_event.h>
+
+#include <asm/stacktrace.h>
+
+/* Callchain handling code. */
+
+/*
+ * Leave userspace callchain empty for now. When we find a way to trace
+ * the user stack callchains, we will add it here.
+ */
+
+static void save_raw_perf_callchain(struct perf_callchain_entry *entry,
+ unsigned long reg29)
+{
+ unsigned long *sp = (unsigned long *)reg29;
+ unsigned long addr;
+
+ while (!kstack_end(sp)) {
+ addr = *sp++;
+ if (__kernel_text_address(addr)) {
+ perf_callchain_store(entry, addr);
+ if (entry->nr >= PERF_MAX_STACK_DEPTH)
+ break;
+ }
+ }
+}
+
+void perf_callchain_kernel(struct perf_callchain_entry *entry,
+ struct pt_regs *regs)
+{
+ unsigned long sp = regs->regs[29];
+#ifdef CONFIG_KALLSYMS
+ unsigned long ra = regs->regs[31];
+ unsigned long pc = regs->cp0_epc;
+
+ if (raw_show_trace || !__kernel_text_address(pc)) {
+ unsigned long stack_page =
+ (unsigned long)task_stack_page(current);
+ if (stack_page && sp >= stack_page &&
+ sp <= stack_page + THREAD_SIZE - 32)
+ save_raw_perf_callchain(entry, sp);
+ return;
+ }
+ do {
+ perf_callchain_store(entry, pc);
+ if (entry->nr >= PERF_MAX_STACK_DEPTH)
+ break;
+ pc = unwind_stack(current, &sp, pc, &ra);
+ } while (pc);
+#else
+ save_raw_perf_callchain(entry, sp);
+#endif
+}
diff --git a/arch/mips/kernel/perf_event_mipsxx.c b/arch/mips/kernel/perf_event_mipsxx.c
new file mode 100644
index 00000000000..4f2d9dece7a
--- /dev/null
+++ b/arch/mips/kernel/perf_event_mipsxx.c
@@ -0,0 +1,1710 @@
+/*
+ * Linux performance counter support for MIPS.
+ *
+ * Copyright (C) 2010 MIPS Technologies, Inc.
+ * Copyright (C) 2011 Cavium Networks, Inc.
+ * Author: Deng-Cheng Zhu
+ *
+ * This code is based on the implementation for ARM, which is in turn
+ * based on the sparc64 perf event code and the x86 code. Performance
+ * counter access is based on the MIPS Oprofile code. And the callchain
+ * support references the code of MIPS stacktrace.c.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/cpumask.h>
+#include <linux/interrupt.h>
+#include <linux/smp.h>
+#include <linux/kernel.h>
+#include <linux/perf_event.h>
+#include <linux/uaccess.h>
+
+#include <asm/irq.h>
+#include <asm/irq_regs.h>
+#include <asm/stacktrace.h>
+#include <asm/time.h> /* For perf_irq */
+
+#define MIPS_MAX_HWEVENTS 4
+#define MIPS_TCS_PER_COUNTER 2
+#define MIPS_CPUID_TO_COUNTER_MASK (MIPS_TCS_PER_COUNTER - 1)
+
+struct cpu_hw_events {
+ /* Array of events on this cpu. */
+ struct perf_event *events[MIPS_MAX_HWEVENTS];
+
+ /*
+ * Set the bit (indexed by the counter number) when the counter
+ * is used for an event.
+ */
+ unsigned long used_mask[BITS_TO_LONGS(MIPS_MAX_HWEVENTS)];
+
+ /*
+ * Software copy of the control register for each performance counter.
+ * MIPS CPUs vary in performance counters. They use this differently,
+ * and even may not use it.
+ */
+ unsigned int saved_ctrl[MIPS_MAX_HWEVENTS];
+};
+DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
+ .saved_ctrl = {0},
+};
+
+/* The description of MIPS performance events. */
+struct mips_perf_event {
+ unsigned int event_id;
+ /*
+ * MIPS performance counters are indexed starting from 0.
+ * CNTR_EVEN indicates the indexes of the counters to be used are
+ * even numbers.
+ */
+ unsigned int cntr_mask;
+ #define CNTR_EVEN 0x55555555
+ #define CNTR_ODD 0xaaaaaaaa
+ #define CNTR_ALL 0xffffffff
+#ifdef CONFIG_MIPS_MT_SMP
+ enum {
+ T = 0,
+ V = 1,
+ P = 2,
+ } range;
+#else
+ #define T
+ #define V
+ #define P
+#endif
+};
+
+static struct mips_perf_event raw_event;
+static DEFINE_MUTEX(raw_event_mutex);
+
+#define C(x) PERF_COUNT_HW_CACHE_##x
+
+struct mips_pmu {
+ u64 max_period;
+ u64 valid_count;
+ u64 overflow;
+ const char *name;
+ int irq;
+ u64 (*read_counter)(unsigned int idx);
+ void (*write_counter)(unsigned int idx, u64 val);
+ const struct mips_perf_event *(*map_raw_event)(u64 config);
+ const struct mips_perf_event (*general_event_map)[PERF_COUNT_HW_MAX];
+ const struct mips_perf_event (*cache_event_map)
+ [PERF_COUNT_HW_CACHE_MAX]
+ [PERF_COUNT_HW_CACHE_OP_MAX]
+ [PERF_COUNT_HW_CACHE_RESULT_MAX];
+ unsigned int num_counters;
+};
+
+static struct mips_pmu mipspmu;
+
+#define M_CONFIG1_PC (1 << 4)
+
+#define M_PERFCTL_EXL (1 << 0)
+#define M_PERFCTL_KERNEL (1 << 1)
+#define M_PERFCTL_SUPERVISOR (1 << 2)
+#define M_PERFCTL_USER (1 << 3)
+#define M_PERFCTL_INTERRUPT_ENABLE (1 << 4)
+#define M_PERFCTL_EVENT(event) (((event) & 0x3ff) << 5)
+#define M_PERFCTL_VPEID(vpe) ((vpe) << 16)
+
+#ifdef CONFIG_CPU_BMIPS5000
+#define M_PERFCTL_MT_EN(filter) 0
+#else /* !CONFIG_CPU_BMIPS5000 */
+#define M_PERFCTL_MT_EN(filter) ((filter) << 20)
+#endif /* CONFIG_CPU_BMIPS5000 */
+
+#define M_TC_EN_ALL M_PERFCTL_MT_EN(0)
+#define M_TC_EN_VPE M_PERFCTL_MT_EN(1)
+#define M_TC_EN_TC M_PERFCTL_MT_EN(2)
+#define M_PERFCTL_TCID(tcid) ((tcid) << 22)
+#define M_PERFCTL_WIDE (1 << 30)
+#define M_PERFCTL_MORE (1 << 31)
+#define M_PERFCTL_TC (1 << 30)
+
+#define M_PERFCTL_COUNT_EVENT_WHENEVER (M_PERFCTL_EXL | \
+ M_PERFCTL_KERNEL | \
+ M_PERFCTL_USER | \
+ M_PERFCTL_SUPERVISOR | \
+ M_PERFCTL_INTERRUPT_ENABLE)
+
+#ifdef CONFIG_MIPS_MT_SMP
+#define M_PERFCTL_CONFIG_MASK 0x3fff801f
+#else
+#define M_PERFCTL_CONFIG_MASK 0x1f
+#endif
+#define M_PERFCTL_EVENT_MASK 0xfe0
+
+
+#ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS
+static int cpu_has_mipsmt_pertccounters;
+
+static DEFINE_RWLOCK(pmuint_rwlock);
+
+#if defined(CONFIG_CPU_BMIPS5000)
+#define vpe_id() (cpu_has_mipsmt_pertccounters ? \
+ 0 : (smp_processor_id() & MIPS_CPUID_TO_COUNTER_MASK))
+#else
+/*
+ * FIXME: For VSMP, vpe_id() is redefined for Perf-events, because
+ * cpu_data[cpuid].vpe_id reports 0 for _both_ CPUs.
+ */
+#define vpe_id() (cpu_has_mipsmt_pertccounters ? \
+ 0 : smp_processor_id())
+#endif
+
+/* Copied from op_model_mipsxx.c */
+static unsigned int vpe_shift(void)
+{
+ if (num_possible_cpus() > 1)
+ return 1;
+
+ return 0;
+}
+
+static unsigned int counters_total_to_per_cpu(unsigned int counters)
+{
+ return counters >> vpe_shift();
+}
+
+#else /* !CONFIG_MIPS_PERF_SHARED_TC_COUNTERS */
+#define vpe_id() 0
+
+#endif /* CONFIG_MIPS_PERF_SHARED_TC_COUNTERS */
+
+static void resume_local_counters(void);
+static void pause_local_counters(void);
+static irqreturn_t mipsxx_pmu_handle_irq(int, void *);
+static int mipsxx_pmu_handle_shared_irq(void);
+
+static unsigned int mipsxx_pmu_swizzle_perf_idx(unsigned int idx)
+{
+ if (vpe_id() == 1)
+ idx = (idx + 2) & 3;
+ return idx;
+}
+
+static u64 mipsxx_pmu_read_counter(unsigned int idx)
+{
+ idx = mipsxx_pmu_swizzle_perf_idx(idx);
+
+ switch (idx) {
+ case 0:
+ /*
+ * The counters are unsigned, we must cast to truncate
+ * off the high bits.
+ */
+ return (u32)read_c0_perfcntr0();
+ case 1:
+ return (u32)read_c0_perfcntr1();
+ case 2:
+ return (u32)read_c0_perfcntr2();
+ case 3:
+ return (u32)read_c0_perfcntr3();
+ default:
+ WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx);
+ return 0;
+ }
+}
+
+static u64 mipsxx_pmu_read_counter_64(unsigned int idx)
+{
+ idx = mipsxx_pmu_swizzle_perf_idx(idx);
+
+ switch (idx) {
+ case 0:
+ return read_c0_perfcntr0_64();
+ case 1:
+ return read_c0_perfcntr1_64();
+ case 2:
+ return read_c0_perfcntr2_64();
+ case 3:
+ return read_c0_perfcntr3_64();
+ default:
+ WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx);
+ return 0;
+ }
+}
+
+static void mipsxx_pmu_write_counter(unsigned int idx, u64 val)
+{
+ idx = mipsxx_pmu_swizzle_perf_idx(idx);
+
+ switch (idx) {
+ case 0:
+ write_c0_perfcntr0(val);
+ return;
+ case 1:
+ write_c0_perfcntr1(val);
+ return;
+ case 2:
+ write_c0_perfcntr2(val);
+ return;
+ case 3:
+ write_c0_perfcntr3(val);
+ return;
+ }
+}
+
+static void mipsxx_pmu_write_counter_64(unsigned int idx, u64 val)
+{
+ idx = mipsxx_pmu_swizzle_perf_idx(idx);
+
+ switch (idx) {
+ case 0:
+ write_c0_perfcntr0_64(val);
+ return;
+ case 1:
+ write_c0_perfcntr1_64(val);
+ return;
+ case 2:
+ write_c0_perfcntr2_64(val);
+ return;
+ case 3:
+ write_c0_perfcntr3_64(val);
+ return;
+ }
+}
+
+static unsigned int mipsxx_pmu_read_control(unsigned int idx)
+{
+ idx = mipsxx_pmu_swizzle_perf_idx(idx);
+
+ switch (idx) {
+ case 0:
+ return read_c0_perfctrl0();
+ case 1:
+ return read_c0_perfctrl1();
+ case 2:
+ return read_c0_perfctrl2();
+ case 3:
+ return read_c0_perfctrl3();
+ default:
+ WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx);
+ return 0;
+ }
+}
+
+static void mipsxx_pmu_write_control(unsigned int idx, unsigned int val)
+{
+ idx = mipsxx_pmu_swizzle_perf_idx(idx);
+
+ switch (idx) {
+ case 0:
+ write_c0_perfctrl0(val);
+ return;
+ case 1:
+ write_c0_perfctrl1(val);
+ return;
+ case 2:
+ write_c0_perfctrl2(val);
+ return;
+ case 3:
+ write_c0_perfctrl3(val);
+ return;
+ }
+}
+
+static int mipsxx_pmu_alloc_counter(struct cpu_hw_events *cpuc,
+ struct hw_perf_event *hwc)
+{
+ int i;
+
+ /*
+ * We only need to care the counter mask. The range has been
+ * checked definitely.
+ */
+ unsigned long cntr_mask = (hwc->event_base >> 8) & 0xffff;
+
+ for (i = mipspmu.num_counters - 1; i >= 0; i--) {
+ /*
+ * Note that some MIPS perf events can be counted by both
+ * even and odd counters, wheresas many other are only by
+ * even _or_ odd counters. This introduces an issue that
+ * when the former kind of event takes the counter the
+ * latter kind of event wants to use, then the "counter
+ * allocation" for the latter event will fail. In fact if
+ * they can be dynamically swapped, they both feel happy.
+ * But here we leave this issue alone for now.
+ */
+ if (test_bit(i, &cntr_mask) &&
+ !test_and_set_bit(i, cpuc->used_mask))
+ return i;
+ }
+
+ return -EAGAIN;
+}
+
+static void mipsxx_pmu_enable_event(struct hw_perf_event *evt, int idx)
+{
+ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+
+ WARN_ON(idx < 0 || idx >= mipspmu.num_counters);
+
+ cpuc->saved_ctrl[idx] = M_PERFCTL_EVENT(evt->event_base & 0xff) |
+ (evt->config_base & M_PERFCTL_CONFIG_MASK) |
+ /* Make sure interrupt enabled. */
+ M_PERFCTL_INTERRUPT_ENABLE;
+ if (IS_ENABLED(CONFIG_CPU_BMIPS5000))
+ /* enable the counter for the calling thread */
+ cpuc->saved_ctrl[idx] |=
+ (1 << (12 + vpe_id())) | M_PERFCTL_TC;
+
+ /*
+ * We do not actually let the counter run. Leave it until start().
+ */
+}
+
+static void mipsxx_pmu_disable_event(int idx)
+{
+ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ unsigned long flags;
+
+ WARN_ON(idx < 0 || idx >= mipspmu.num_counters);
+
+ local_irq_save(flags);
+ cpuc->saved_ctrl[idx] = mipsxx_pmu_read_control(idx) &
+ ~M_PERFCTL_COUNT_EVENT_WHENEVER;
+ mipsxx_pmu_write_control(idx, cpuc->saved_ctrl[idx]);
+ local_irq_restore(flags);
+}
+
+static int mipspmu_event_set_period(struct perf_event *event,
+ struct hw_perf_event *hwc,
+ int idx)
+{
+ u64 left = local64_read(&hwc->period_left);
+ u64 period = hwc->sample_period;
+ int ret = 0;
+
+ if (unlikely((left + period) & (1ULL << 63))) {
+ /* left underflowed by more than period. */
+ left = period;
+ local64_set(&hwc->period_left, left);
+ hwc->last_period = period;
+ ret = 1;
+ } else if (unlikely((left + period) <= period)) {
+ /* left underflowed by less than period. */
+ left += period;
+ local64_set(&hwc->period_left, left);
+ hwc->last_period = period;
+ ret = 1;
+ }
+
+ if (left > mipspmu.max_period) {
+ left = mipspmu.max_period;
+ local64_set(&hwc->period_left, left);
+ }
+
+ local64_set(&hwc->prev_count, mipspmu.overflow - left);
+
+ mipspmu.write_counter(idx, mipspmu.overflow - left);
+
+ perf_event_update_userpage(event);
+
+ return ret;
+}
+
+static void mipspmu_event_update(struct perf_event *event,
+ struct hw_perf_event *hwc,
+ int idx)
+{
+ u64 prev_raw_count, new_raw_count;
+ u64 delta;
+
+again:
+ prev_raw_count = local64_read(&hwc->prev_count);
+ new_raw_count = mipspmu.read_counter(idx);
+
+ if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
+ new_raw_count) != prev_raw_count)
+ goto again;
+
+ delta = new_raw_count - prev_raw_count;
+
+ local64_add(delta, &event->count);
+ local64_sub(delta, &hwc->period_left);
+}
+
+static void mipspmu_start(struct perf_event *event, int flags)
+{
+ struct hw_perf_event *hwc = &event->hw;
+
+ if (flags & PERF_EF_RELOAD)
+ WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
+
+ hwc->state = 0;
+
+ /* Set the period for the event. */
+ mipspmu_event_set_period(event, hwc, hwc->idx);
+
+ /* Enable the event. */
+ mipsxx_pmu_enable_event(hwc, hwc->idx);
+}
+
+static void mipspmu_stop(struct perf_event *event, int flags)
+{
+ struct hw_perf_event *hwc = &event->hw;
+
+ if (!(hwc->state & PERF_HES_STOPPED)) {
+ /* We are working on a local event. */
+ mipsxx_pmu_disable_event(hwc->idx);
+ barrier();
+ mipspmu_event_update(event, hwc, hwc->idx);
+ hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
+ }
+}
+
+static int mipspmu_add(struct perf_event *event, int flags)
+{
+ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct hw_perf_event *hwc = &event->hw;
+ int idx;
+ int err = 0;
+
+ perf_pmu_disable(event->pmu);
+
+ /* To look for a free counter for this event. */
+ idx = mipsxx_pmu_alloc_counter(cpuc, hwc);
+ if (idx < 0) {
+ err = idx;
+ goto out;
+ }
+
+ /*
+ * If there is an event in the counter we are going to use then
+ * make sure it is disabled.
+ */
+ event->hw.idx = idx;
+ mipsxx_pmu_disable_event(idx);
+ cpuc->events[idx] = event;
+
+ hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
+ if (flags & PERF_EF_START)
+ mipspmu_start(event, PERF_EF_RELOAD);
+
+ /* Propagate our changes to the userspace mapping. */
+ perf_event_update_userpage(event);
+
+out:
+ perf_pmu_enable(event->pmu);
+ return err;
+}
+
+static void mipspmu_del(struct perf_event *event, int flags)
+{
+ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct hw_perf_event *hwc = &event->hw;
+ int idx = hwc->idx;
+
+ WARN_ON(idx < 0 || idx >= mipspmu.num_counters);
+
+ mipspmu_stop(event, PERF_EF_UPDATE);
+ cpuc->events[idx] = NULL;
+ clear_bit(idx, cpuc->used_mask);
+
+ perf_event_update_userpage(event);
+}
+
+static void mipspmu_read(struct perf_event *event)
+{
+ struct hw_perf_event *hwc = &event->hw;
+
+ /* Don't read disabled counters! */
+ if (hwc->idx < 0)
+ return;
+
+ mipspmu_event_update(event, hwc, hwc->idx);
+}
+
+static void mipspmu_enable(struct pmu *pmu)
+{
+#ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS
+ write_unlock(&pmuint_rwlock);
+#endif
+ resume_local_counters();
+}
+
+/*
+ * MIPS performance counters can be per-TC. The control registers can
+ * not be directly accessed accross CPUs. Hence if we want to do global
+ * control, we need cross CPU calls. on_each_cpu() can help us, but we
+ * can not make sure this function is called with interrupts enabled. So
+ * here we pause local counters and then grab a rwlock and leave the
+ * counters on other CPUs alone. If any counter interrupt raises while
+ * we own the write lock, simply pause local counters on that CPU and
+ * spin in the handler. Also we know we won't be switched to another
+ * CPU after pausing local counters and before grabbing the lock.
+ */
+static void mipspmu_disable(struct pmu *pmu)
+{
+ pause_local_counters();
+#ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS
+ write_lock(&pmuint_rwlock);
+#endif
+}
+
+static atomic_t active_events = ATOMIC_INIT(0);
+static DEFINE_MUTEX(pmu_reserve_mutex);
+static int (*save_perf_irq)(void);
+
+static int mipspmu_get_irq(void)
+{
+ int err;
+
+ if (mipspmu.irq >= 0) {
+ /* Request my own irq handler. */
+ err = request_irq(mipspmu.irq, mipsxx_pmu_handle_irq,
+ IRQF_PERCPU | IRQF_NOBALANCING,
+ "mips_perf_pmu", NULL);
+ if (err) {
+ pr_warning("Unable to request IRQ%d for MIPS "
+ "performance counters!\n", mipspmu.irq);
+ }
+ } else if (cp0_perfcount_irq < 0) {
+ /*
+ * We are sharing the irq number with the timer interrupt.
+ */
+ save_perf_irq = perf_irq;
+ perf_irq = mipsxx_pmu_handle_shared_irq;
+ err = 0;
+ } else {
+ pr_warning("The platform hasn't properly defined its "
+ "interrupt controller.\n");
+ err = -ENOENT;
+ }
+
+ return err;
+}
+
+static void mipspmu_free_irq(void)
+{
+ if (mipspmu.irq >= 0)
+ free_irq(mipspmu.irq, NULL);
+ else if (cp0_perfcount_irq < 0)
+ perf_irq = save_perf_irq;
+}
+
+/*
+ * mipsxx/rm9000/loongson2 have different performance counters, they have
+ * specific low-level init routines.
+ */
+static void reset_counters(void *arg);
+static int __hw_perf_event_init(struct perf_event *event);
+
+static void hw_perf_event_destroy(struct perf_event *event)
+{
+ if (atomic_dec_and_mutex_lock(&active_events,
+ &pmu_reserve_mutex)) {
+ /*
+ * We must not call the destroy function with interrupts
+ * disabled.
+ */
+ on_each_cpu(reset_counters,
+ (void *)(long)mipspmu.num_counters, 1);
+ mipspmu_free_irq();
+ mutex_unlock(&pmu_reserve_mutex);
+ }
+}
+
+static int mipspmu_event_init(struct perf_event *event)
+{
+ int err = 0;
+
+ /* does not support taken branch sampling */
+ if (has_branch_stack(event))
+ return -EOPNOTSUPP;
+
+ switch (event->attr.type) {
+ case PERF_TYPE_RAW:
+ case PERF_TYPE_HARDWARE:
+ case PERF_TYPE_HW_CACHE:
+ break;
+
+ default:
+ return -ENOENT;
+ }
+
+ if (event->cpu >= nr_cpumask_bits ||
+ (event->cpu >= 0 && !cpu_online(event->cpu)))
+ return -ENODEV;
+
+ if (!atomic_inc_not_zero(&active_events)) {
+ mutex_lock(&pmu_reserve_mutex);
+ if (atomic_read(&active_events) == 0)
+ err = mipspmu_get_irq();
+
+ if (!err)
+ atomic_inc(&active_events);
+ mutex_unlock(&pmu_reserve_mutex);
+ }
+
+ if (err)
+ return err;
+
+ return __hw_perf_event_init(event);
+}
+
+static struct pmu pmu = {
+ .pmu_enable = mipspmu_enable,
+ .pmu_disable = mipspmu_disable,
+ .event_init = mipspmu_event_init,
+ .add = mipspmu_add,
+ .del = mipspmu_del,
+ .start = mipspmu_start,
+ .stop = mipspmu_stop,
+ .read = mipspmu_read,
+};
+
+static unsigned int mipspmu_perf_event_encode(const struct mips_perf_event *pev)
+{
+/*
+ * Top 8 bits for range, next 16 bits for cntr_mask, lowest 8 bits for
+ * event_id.
+ */
+#ifdef CONFIG_MIPS_MT_SMP
+ return ((unsigned int)pev->range << 24) |
+ (pev->cntr_mask & 0xffff00) |
+ (pev->event_id & 0xff);
+#else
+ return (pev->cntr_mask & 0xffff00) |
+ (pev->event_id & 0xff);
+#endif
+}
+
+static const struct mips_perf_event *mipspmu_map_general_event(int idx)
+{
+
+ if ((*mipspmu.general_event_map)[idx].cntr_mask == 0)
+ return ERR_PTR(-EOPNOTSUPP);
+ return &(*mipspmu.general_event_map)[idx];
+}
+
+static const struct mips_perf_event *mipspmu_map_cache_event(u64 config)
+{
+ unsigned int cache_type, cache_op, cache_result;
+ const struct mips_perf_event *pev;
+
+ cache_type = (config >> 0) & 0xff;
+ if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
+ return ERR_PTR(-EINVAL);
+
+ cache_op = (config >> 8) & 0xff;
+ if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
+ return ERR_PTR(-EINVAL);
+
+ cache_result = (config >> 16) & 0xff;
+ if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
+ return ERR_PTR(-EINVAL);
+
+ pev = &((*mipspmu.cache_event_map)
+ [cache_type]
+ [cache_op]
+ [cache_result]);
+
+ if (pev->cntr_mask == 0)
+ return ERR_PTR(-EOPNOTSUPP);
+
+ return pev;
+
+}
+
+static int validate_group(struct perf_event *event)
+{
+ struct perf_event *sibling, *leader = event->group_leader;
+ struct cpu_hw_events fake_cpuc;
+
+ memset(&fake_cpuc, 0, sizeof(fake_cpuc));
+
+ if (mipsxx_pmu_alloc_counter(&fake_cpuc, &leader->hw) < 0)
+ return -EINVAL;
+
+ list_for_each_entry(sibling, &leader->sibling_list, group_entry) {
+ if (mipsxx_pmu_alloc_counter(&fake_cpuc, &sibling->hw) < 0)
+ return -EINVAL;
+ }
+
+ if (mipsxx_pmu_alloc_counter(&fake_cpuc, &event->hw) < 0)
+ return -EINVAL;
+
+ return 0;
+}
+
+/* This is needed by specific irq handlers in perf_event_*.c */
+static void handle_associated_event(struct cpu_hw_events *cpuc,
+ int idx, struct perf_sample_data *data,
+ struct pt_regs *regs)
+{
+ struct perf_event *event = cpuc->events[idx];
+ struct hw_perf_event *hwc = &event->hw;
+
+ mipspmu_event_update(event, hwc, idx);
+ data->period = event->hw.last_period;
+ if (!mipspmu_event_set_period(event, hwc, idx))
+ return;
+
+ if (perf_event_overflow(event, data, regs))
+ mipsxx_pmu_disable_event(idx);
+}
+
+
+static int __n_counters(void)
+{
+ if (!(read_c0_config1() & M_CONFIG1_PC))
+ return 0;
+ if (!(read_c0_perfctrl0() & M_PERFCTL_MORE))
+ return 1;
+ if (!(read_c0_perfctrl1() & M_PERFCTL_MORE))
+ return 2;
+ if (!(read_c0_perfctrl2() & M_PERFCTL_MORE))
+ return 3;
+
+ return 4;
+}
+
+static int n_counters(void)
+{
+ int counters;
+
+ switch (current_cpu_type()) {
+ case CPU_R10000:
+ counters = 2;
+ break;
+
+ case CPU_R12000:
+ case CPU_R14000:
+ counters = 4;
+ break;
+
+ default:
+ counters = __n_counters();
+ }
+
+ return counters;
+}
+
+static void reset_counters(void *arg)
+{
+ int counters = (int)(long)arg;
+ switch (counters) {
+ case 4:
+ mipsxx_pmu_write_control(3, 0);
+ mipspmu.write_counter(3, 0);
+ case 3:
+ mipsxx_pmu_write_control(2, 0);
+ mipspmu.write_counter(2, 0);
+ case 2:
+ mipsxx_pmu_write_control(1, 0);
+ mipspmu.write_counter(1, 0);
+ case 1:
+ mipsxx_pmu_write_control(0, 0);
+ mipspmu.write_counter(0, 0);
+ }
+}
+
+/* 24K/34K/1004K/interAptiv/loongson1 cores share the same event map. */
+static const struct mips_perf_event mipsxxcore_event_map
+ [PERF_COUNT_HW_MAX] = {
+ [PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN | CNTR_ODD, P },
+ [PERF_COUNT_HW_INSTRUCTIONS] = { 0x01, CNTR_EVEN | CNTR_ODD, T },
+ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x02, CNTR_EVEN, T },
+ [PERF_COUNT_HW_BRANCH_MISSES] = { 0x02, CNTR_ODD, T },
+};
+
+/* 74K/proAptiv core has different branch event code. */
+static const struct mips_perf_event mipsxxcore_event_map2
+ [PERF_COUNT_HW_MAX] = {
+ [PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN | CNTR_ODD, P },
+ [PERF_COUNT_HW_INSTRUCTIONS] = { 0x01, CNTR_EVEN | CNTR_ODD, T },
+ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x27, CNTR_EVEN, T },
+ [PERF_COUNT_HW_BRANCH_MISSES] = { 0x27, CNTR_ODD, T },
+};
+
+static const struct mips_perf_event octeon_event_map[PERF_COUNT_HW_MAX] = {
+ [PERF_COUNT_HW_CPU_CYCLES] = { 0x01, CNTR_ALL },
+ [PERF_COUNT_HW_INSTRUCTIONS] = { 0x03, CNTR_ALL },
+ [PERF_COUNT_HW_CACHE_REFERENCES] = { 0x2b, CNTR_ALL },
+ [PERF_COUNT_HW_CACHE_MISSES] = { 0x2e, CNTR_ALL },
+ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x08, CNTR_ALL },
+ [PERF_COUNT_HW_BRANCH_MISSES] = { 0x09, CNTR_ALL },
+ [PERF_COUNT_HW_BUS_CYCLES] = { 0x25, CNTR_ALL },
+};
+
+static const struct mips_perf_event bmips5000_event_map
+ [PERF_COUNT_HW_MAX] = {
+ [PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN | CNTR_ODD, T },
+ [PERF_COUNT_HW_INSTRUCTIONS] = { 0x01, CNTR_EVEN | CNTR_ODD, T },
+ [PERF_COUNT_HW_BRANCH_MISSES] = { 0x02, CNTR_ODD, T },
+};
+
+static const struct mips_perf_event xlp_event_map[PERF_COUNT_HW_MAX] = {
+ [PERF_COUNT_HW_CPU_CYCLES] = { 0x01, CNTR_ALL },
+ [PERF_COUNT_HW_INSTRUCTIONS] = { 0x18, CNTR_ALL }, /* PAPI_TOT_INS */
+ [PERF_COUNT_HW_CACHE_REFERENCES] = { 0x04, CNTR_ALL }, /* PAPI_L1_ICA */
+ [PERF_COUNT_HW_CACHE_MISSES] = { 0x07, CNTR_ALL }, /* PAPI_L1_ICM */
+ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x1b, CNTR_ALL }, /* PAPI_BR_CN */
+ [PERF_COUNT_HW_BRANCH_MISSES] = { 0x1c, CNTR_ALL }, /* PAPI_BR_MSP */
+};
+
+/* 24K/34K/1004K/interAptiv/loongson1 cores share the same cache event map. */
+static const struct mips_perf_event mipsxxcore_cache_map
+ [PERF_COUNT_HW_CACHE_MAX]
+ [PERF_COUNT_HW_CACHE_OP_MAX]
+ [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
+[C(L1D)] = {
+ /*
+ * Like some other architectures (e.g. ARM), the performance
+ * counters don't differentiate between read and write
+ * accesses/misses, so this isn't strictly correct, but it's the
+ * best we can do. Writes and reads get combined.
+ */
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = { 0x0a, CNTR_EVEN, T },
+ [C(RESULT_MISS)] = { 0x0b, CNTR_EVEN | CNTR_ODD, T },
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = { 0x0a, CNTR_EVEN, T },
+ [C(RESULT_MISS)] = { 0x0b, CNTR_EVEN | CNTR_ODD, T },
+ },
+},
+[C(L1I)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = { 0x09, CNTR_EVEN, T },
+ [C(RESULT_MISS)] = { 0x09, CNTR_ODD, T },
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = { 0x09, CNTR_EVEN, T },
+ [C(RESULT_MISS)] = { 0x09, CNTR_ODD, T },
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = { 0x14, CNTR_EVEN, T },
+ /*
+ * Note that MIPS has only "hit" events countable for
+ * the prefetch operation.
+ */
+ },
+},
+[C(LL)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = { 0x15, CNTR_ODD, P },
+ [C(RESULT_MISS)] = { 0x16, CNTR_EVEN, P },
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = { 0x15, CNTR_ODD, P },
+ [C(RESULT_MISS)] = { 0x16, CNTR_EVEN, P },
+ },
+},
+[C(DTLB)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = { 0x06, CNTR_EVEN, T },
+ [C(RESULT_MISS)] = { 0x06, CNTR_ODD, T },
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = { 0x06, CNTR_EVEN, T },
+ [C(RESULT_MISS)] = { 0x06, CNTR_ODD, T },
+ },
+},
+[C(ITLB)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = { 0x05, CNTR_EVEN, T },
+ [C(RESULT_MISS)] = { 0x05, CNTR_ODD, T },
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = { 0x05, CNTR_EVEN, T },
+ [C(RESULT_MISS)] = { 0x05, CNTR_ODD, T },
+ },
+},
+[C(BPU)] = {
+ /* Using the same code for *HW_BRANCH* */
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = { 0x02, CNTR_EVEN, T },
+ [C(RESULT_MISS)] = { 0x02, CNTR_ODD, T },
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = { 0x02, CNTR_EVEN, T },
+ [C(RESULT_MISS)] = { 0x02, CNTR_ODD, T },
+ },
+},
+};
+
+/* 74K/proAptiv core has completely different cache event map. */
+static const struct mips_perf_event mipsxxcore_cache_map2
+ [PERF_COUNT_HW_CACHE_MAX]
+ [PERF_COUNT_HW_CACHE_OP_MAX]
+ [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
+[C(L1D)] = {
+ /*
+ * Like some other architectures (e.g. ARM), the performance
+ * counters don't differentiate between read and write
+ * accesses/misses, so this isn't strictly correct, but it's the
+ * best we can do. Writes and reads get combined.
+ */
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = { 0x17, CNTR_ODD, T },
+ [C(RESULT_MISS)] = { 0x18, CNTR_ODD, T },
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = { 0x17, CNTR_ODD, T },
+ [C(RESULT_MISS)] = { 0x18, CNTR_ODD, T },
+ },
+},
+[C(L1I)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = { 0x06, CNTR_EVEN, T },
+ [C(RESULT_MISS)] = { 0x06, CNTR_ODD, T },
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = { 0x06, CNTR_EVEN, T },
+ [C(RESULT_MISS)] = { 0x06, CNTR_ODD, T },
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = { 0x34, CNTR_EVEN, T },
+ /*
+ * Note that MIPS has only "hit" events countable for
+ * the prefetch operation.
+ */
+ },
+},
+[C(LL)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = { 0x1c, CNTR_ODD, P },
+ [C(RESULT_MISS)] = { 0x1d, CNTR_EVEN, P },
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = { 0x1c, CNTR_ODD, P },
+ [C(RESULT_MISS)] = { 0x1d, CNTR_EVEN, P },
+ },
+},
+/*
+ * 74K core does not have specific DTLB events. proAptiv core has
+ * "speculative" DTLB events which are numbered 0x63 (even/odd) and
+ * not included here. One can use raw events if really needed.
+ */
+[C(ITLB)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = { 0x04, CNTR_EVEN, T },
+ [C(RESULT_MISS)] = { 0x04, CNTR_ODD, T },
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = { 0x04, CNTR_EVEN, T },
+ [C(RESULT_MISS)] = { 0x04, CNTR_ODD, T },
+ },
+},
+[C(BPU)] = {
+ /* Using the same code for *HW_BRANCH* */
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = { 0x27, CNTR_EVEN, T },
+ [C(RESULT_MISS)] = { 0x27, CNTR_ODD, T },
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = { 0x27, CNTR_EVEN, T },
+ [C(RESULT_MISS)] = { 0x27, CNTR_ODD, T },
+ },
+},
+};
+
+/* BMIPS5000 */
+static const struct mips_perf_event bmips5000_cache_map
+ [PERF_COUNT_HW_CACHE_MAX]
+ [PERF_COUNT_HW_CACHE_OP_MAX]
+ [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
+[C(L1D)] = {
+ /*
+ * Like some other architectures (e.g. ARM), the performance
+ * counters don't differentiate between read and write
+ * accesses/misses, so this isn't strictly correct, but it's the
+ * best we can do. Writes and reads get combined.
+ */
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = { 12, CNTR_EVEN, T },
+ [C(RESULT_MISS)] = { 12, CNTR_ODD, T },
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = { 12, CNTR_EVEN, T },
+ [C(RESULT_MISS)] = { 12, CNTR_ODD, T },
+ },
+},
+[C(L1I)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = { 10, CNTR_EVEN, T },
+ [C(RESULT_MISS)] = { 10, CNTR_ODD, T },
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = { 10, CNTR_EVEN, T },
+ [C(RESULT_MISS)] = { 10, CNTR_ODD, T },
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = { 23, CNTR_EVEN, T },
+ /*
+ * Note that MIPS has only "hit" events countable for
+ * the prefetch operation.
+ */
+ },
+},
+[C(LL)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = { 28, CNTR_EVEN, P },
+ [C(RESULT_MISS)] = { 28, CNTR_ODD, P },
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = { 28, CNTR_EVEN, P },
+ [C(RESULT_MISS)] = { 28, CNTR_ODD, P },
+ },
+},
+[C(BPU)] = {
+ /* Using the same code for *HW_BRANCH* */
+ [C(OP_READ)] = {
+ [C(RESULT_MISS)] = { 0x02, CNTR_ODD, T },
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_MISS)] = { 0x02, CNTR_ODD, T },
+ },
+},
+};
+
+
+static const struct mips_perf_event octeon_cache_map
+ [PERF_COUNT_HW_CACHE_MAX]
+ [PERF_COUNT_HW_CACHE_OP_MAX]
+ [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
+[C(L1D)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = { 0x2b, CNTR_ALL },
+ [C(RESULT_MISS)] = { 0x2e, CNTR_ALL },
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = { 0x30, CNTR_ALL },
+ },
+},
+[C(L1I)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = { 0x18, CNTR_ALL },
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = { 0x19, CNTR_ALL },
+ },
+},
+[C(DTLB)] = {
+ /*
+ * Only general DTLB misses are counted use the same event for
+ * read and write.
+ */
+ [C(OP_READ)] = {
+ [C(RESULT_MISS)] = { 0x35, CNTR_ALL },
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_MISS)] = { 0x35, CNTR_ALL },
+ },
+},
+[C(ITLB)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_MISS)] = { 0x37, CNTR_ALL },
+ },
+},
+};
+
+static const struct mips_perf_event xlp_cache_map
+ [PERF_COUNT_HW_CACHE_MAX]
+ [PERF_COUNT_HW_CACHE_OP_MAX]
+ [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
+[C(L1D)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = { 0x31, CNTR_ALL }, /* PAPI_L1_DCR */
+ [C(RESULT_MISS)] = { 0x30, CNTR_ALL }, /* PAPI_L1_LDM */
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = { 0x2f, CNTR_ALL }, /* PAPI_L1_DCW */
+ [C(RESULT_MISS)] = { 0x2e, CNTR_ALL }, /* PAPI_L1_STM */
+ },
+},
+[C(L1I)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = { 0x04, CNTR_ALL }, /* PAPI_L1_ICA */
+ [C(RESULT_MISS)] = { 0x07, CNTR_ALL }, /* PAPI_L1_ICM */
+ },
+},
+[C(LL)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = { 0x35, CNTR_ALL }, /* PAPI_L2_DCR */
+ [C(RESULT_MISS)] = { 0x37, CNTR_ALL }, /* PAPI_L2_LDM */
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = { 0x34, CNTR_ALL }, /* PAPI_L2_DCA */
+ [C(RESULT_MISS)] = { 0x36, CNTR_ALL }, /* PAPI_L2_DCM */
+ },
+},
+[C(DTLB)] = {
+ /*
+ * Only general DTLB misses are counted use the same event for
+ * read and write.
+ */
+ [C(OP_READ)] = {
+ [C(RESULT_MISS)] = { 0x2d, CNTR_ALL }, /* PAPI_TLB_DM */
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_MISS)] = { 0x2d, CNTR_ALL }, /* PAPI_TLB_DM */
+ },
+},
+[C(ITLB)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_MISS)] = { 0x08, CNTR_ALL }, /* PAPI_TLB_IM */
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_MISS)] = { 0x08, CNTR_ALL }, /* PAPI_TLB_IM */
+ },
+},
+[C(BPU)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_MISS)] = { 0x25, CNTR_ALL },
+ },
+},
+};
+
+#ifdef CONFIG_MIPS_MT_SMP
+static void check_and_calc_range(struct perf_event *event,
+ const struct mips_perf_event *pev)
+{
+ struct hw_perf_event *hwc = &event->hw;
+
+ if (event->cpu >= 0) {
+ if (pev->range > V) {
+ /*
+ * The user selected an event that is processor
+ * wide, while expecting it to be VPE wide.
+ */
+ hwc->config_base |= M_TC_EN_ALL;
+ } else {
+ /*
+ * FIXME: cpu_data[event->cpu].vpe_id reports 0
+ * for both CPUs.
+ */
+ hwc->config_base |= M_PERFCTL_VPEID(event->cpu);
+ hwc->config_base |= M_TC_EN_VPE;
+ }
+ } else
+ hwc->config_base |= M_TC_EN_ALL;
+}
+#else
+static void check_and_calc_range(struct perf_event *event,
+ const struct mips_perf_event *pev)
+{
+}
+#endif
+
+static int __hw_perf_event_init(struct perf_event *event)
+{
+ struct perf_event_attr *attr = &event->attr;
+ struct hw_perf_event *hwc = &event->hw;
+ const struct mips_perf_event *pev;
+ int err;
+
+ /* Returning MIPS event descriptor for generic perf event. */
+ if (PERF_TYPE_HARDWARE == event->attr.type) {
+ if (event->attr.config >= PERF_COUNT_HW_MAX)
+ return -EINVAL;
+ pev = mipspmu_map_general_event(event->attr.config);
+ } else if (PERF_TYPE_HW_CACHE == event->attr.type) {
+ pev = mipspmu_map_cache_event(event->attr.config);
+ } else if (PERF_TYPE_RAW == event->attr.type) {
+ /* We are working on the global raw event. */
+ mutex_lock(&raw_event_mutex);
+ pev = mipspmu.map_raw_event(event->attr.config);
+ } else {
+ /* The event type is not (yet) supported. */
+ return -EOPNOTSUPP;
+ }
+
+ if (IS_ERR(pev)) {
+ if (PERF_TYPE_RAW == event->attr.type)
+ mutex_unlock(&raw_event_mutex);
+ return PTR_ERR(pev);
+ }
+
+ /*
+ * We allow max flexibility on how each individual counter shared
+ * by the single CPU operates (the mode exclusion and the range).
+ */
+ hwc->config_base = M_PERFCTL_INTERRUPT_ENABLE;
+
+ /* Calculate range bits and validate it. */
+ if (num_possible_cpus() > 1)
+ check_and_calc_range(event, pev);
+
+ hwc->event_base = mipspmu_perf_event_encode(pev);
+ if (PERF_TYPE_RAW == event->attr.type)
+ mutex_unlock(&raw_event_mutex);
+
+ if (!attr->exclude_user)
+ hwc->config_base |= M_PERFCTL_USER;
+ if (!attr->exclude_kernel) {
+ hwc->config_base |= M_PERFCTL_KERNEL;
+ /* MIPS kernel mode: KSU == 00b || EXL == 1 || ERL == 1 */
+ hwc->config_base |= M_PERFCTL_EXL;
+ }
+ if (!attr->exclude_hv)
+ hwc->config_base |= M_PERFCTL_SUPERVISOR;
+
+ hwc->config_base &= M_PERFCTL_CONFIG_MASK;
+ /*
+ * The event can belong to another cpu. We do not assign a local
+ * counter for it for now.
+ */
+ hwc->idx = -1;
+ hwc->config = 0;
+
+ if (!hwc->sample_period) {
+ hwc->sample_period = mipspmu.max_period;
+ hwc->last_period = hwc->sample_period;
+ local64_set(&hwc->period_left, hwc->sample_period);
+ }
+
+ err = 0;
+ if (event->group_leader != event)
+ err = validate_group(event);
+
+ event->destroy = hw_perf_event_destroy;
+
+ if (err)
+ event->destroy(event);
+
+ return err;
+}
+
+static void pause_local_counters(void)
+{
+ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ int ctr = mipspmu.num_counters;
+ unsigned long flags;
+
+ local_irq_save(flags);
+ do {
+ ctr--;
+ cpuc->saved_ctrl[ctr] = mipsxx_pmu_read_control(ctr);
+ mipsxx_pmu_write_control(ctr, cpuc->saved_ctrl[ctr] &
+ ~M_PERFCTL_COUNT_EVENT_WHENEVER);
+ } while (ctr > 0);
+ local_irq_restore(flags);
+}
+
+static void resume_local_counters(void)
+{
+ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ int ctr = mipspmu.num_counters;
+
+ do {
+ ctr--;
+ mipsxx_pmu_write_control(ctr, cpuc->saved_ctrl[ctr]);
+ } while (ctr > 0);
+}
+
+static int mipsxx_pmu_handle_shared_irq(void)
+{
+ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct perf_sample_data data;
+ unsigned int counters = mipspmu.num_counters;
+ u64 counter;
+ int handled = IRQ_NONE;
+ struct pt_regs *regs;
+
+ if (cpu_has_perf_cntr_intr_bit && !(read_c0_cause() & CAUSEF_PCI))
+ return handled;
+ /*
+ * First we pause the local counters, so that when we are locked
+ * here, the counters are all paused. When it gets locked due to
+ * perf_disable(), the timer interrupt handler will be delayed.
+ *
+ * See also mipsxx_pmu_start().
+ */
+ pause_local_counters();
+#ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS
+ read_lock(&pmuint_rwlock);
+#endif
+
+ regs = get_irq_regs();
+
+ perf_sample_data_init(&data, 0, 0);
+
+ switch (counters) {
+#define HANDLE_COUNTER(n) \
+ case n + 1: \
+ if (test_bit(n, cpuc->used_mask)) { \
+ counter = mipspmu.read_counter(n); \
+ if (counter & mipspmu.overflow) { \
+ handle_associated_event(cpuc, n, &data, regs); \
+ handled = IRQ_HANDLED; \
+ } \
+ }
+ HANDLE_COUNTER(3)
+ HANDLE_COUNTER(2)
+ HANDLE_COUNTER(1)
+ HANDLE_COUNTER(0)
+ }
+
+ /*
+ * Do all the work for the pending perf events. We can do this
+ * in here because the performance counter interrupt is a regular
+ * interrupt, not NMI.
+ */
+ if (handled == IRQ_HANDLED)
+ irq_work_run();
+
+#ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS
+ read_unlock(&pmuint_rwlock);
+#endif
+ resume_local_counters();
+ return handled;
+}
+
+static irqreturn_t mipsxx_pmu_handle_irq(int irq, void *dev)
+{
+ return mipsxx_pmu_handle_shared_irq();
+}
+
+/* 24K */
+#define IS_BOTH_COUNTERS_24K_EVENT(b) \
+ ((b) == 0 || (b) == 1 || (b) == 11)
+
+/* 34K */
+#define IS_BOTH_COUNTERS_34K_EVENT(b) \
+ ((b) == 0 || (b) == 1 || (b) == 11)
+#ifdef CONFIG_MIPS_MT_SMP
+#define IS_RANGE_P_34K_EVENT(r, b) \
+ ((b) == 0 || (r) == 18 || (b) == 21 || (b) == 22 || \
+ (b) == 25 || (b) == 39 || (r) == 44 || (r) == 174 || \
+ (r) == 176 || ((b) >= 50 && (b) <= 55) || \
+ ((b) >= 64 && (b) <= 67))
+#define IS_RANGE_V_34K_EVENT(r) ((r) == 47)
+#endif
+
+/* 74K */
+#define IS_BOTH_COUNTERS_74K_EVENT(b) \
+ ((b) == 0 || (b) == 1)
+
+/* proAptiv */
+#define IS_BOTH_COUNTERS_PROAPTIV_EVENT(b) \
+ ((b) == 0 || (b) == 1)
+
+/* 1004K */
+#define IS_BOTH_COUNTERS_1004K_EVENT(b) \
+ ((b) == 0 || (b) == 1 || (b) == 11)
+#ifdef CONFIG_MIPS_MT_SMP
+#define IS_RANGE_P_1004K_EVENT(r, b) \
+ ((b) == 0 || (r) == 18 || (b) == 21 || (b) == 22 || \
+ (b) == 25 || (b) == 36 || (b) == 39 || (r) == 44 || \
+ (r) == 174 || (r) == 176 || ((b) >= 50 && (b) <= 59) || \
+ (r) == 188 || (b) == 61 || (b) == 62 || \
+ ((b) >= 64 && (b) <= 67))
+#define IS_RANGE_V_1004K_EVENT(r) ((r) == 47)
+#endif
+
+/* interAptiv */
+#define IS_BOTH_COUNTERS_INTERAPTIV_EVENT(b) \
+ ((b) == 0 || (b) == 1 || (b) == 11)
+#ifdef CONFIG_MIPS_MT_SMP
+/* The P/V/T info is not provided for "(b) == 38" in SUM, assume P. */
+#define IS_RANGE_P_INTERAPTIV_EVENT(r, b) \
+ ((b) == 0 || (r) == 18 || (b) == 21 || (b) == 22 || \
+ (b) == 25 || (b) == 36 || (b) == 38 || (b) == 39 || \
+ (r) == 44 || (r) == 174 || (r) == 176 || ((b) >= 50 && \
+ (b) <= 59) || (r) == 188 || (b) == 61 || (b) == 62 || \
+ ((b) >= 64 && (b) <= 67))
+#define IS_RANGE_V_INTERAPTIV_EVENT(r) ((r) == 47 || (r) == 175)
+#endif
+
+/* BMIPS5000 */
+#define IS_BOTH_COUNTERS_BMIPS5000_EVENT(b) \
+ ((b) == 0 || (b) == 1)
+
+
+/*
+ * User can use 0-255 raw events, where 0-127 for the events of even
+ * counters, and 128-255 for odd counters. Note that bit 7 is used to
+ * indicate the parity. So, for example, when user wants to take the
+ * Event Num of 15 for odd counters (by referring to the user manual),
+ * then 128 needs to be added to 15 as the input for the event config,
+ * i.e., 143 (0x8F) to be used.
+ */
+static const struct mips_perf_event *mipsxx_pmu_map_raw_event(u64 config)
+{
+ unsigned int raw_id = config & 0xff;
+ unsigned int base_id = raw_id & 0x7f;
+
+ raw_event.event_id = base_id;
+
+ switch (current_cpu_type()) {
+ case CPU_24K:
+ if (IS_BOTH_COUNTERS_24K_EVENT(base_id))
+ raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
+ else
+ raw_event.cntr_mask =
+ raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
+#ifdef CONFIG_MIPS_MT_SMP
+ /*
+ * This is actually doing nothing. Non-multithreading
+ * CPUs will not check and calculate the range.
+ */
+ raw_event.range = P;
+#endif
+ break;
+ case CPU_34K:
+ if (IS_BOTH_COUNTERS_34K_EVENT(base_id))
+ raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
+ else
+ raw_event.cntr_mask =
+ raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
+#ifdef CONFIG_MIPS_MT_SMP
+ if (IS_RANGE_P_34K_EVENT(raw_id, base_id))
+ raw_event.range = P;
+ else if (unlikely(IS_RANGE_V_34K_EVENT(raw_id)))
+ raw_event.range = V;
+ else
+ raw_event.range = T;
+#endif
+ break;
+ case CPU_74K:
+ case CPU_1074K:
+ if (IS_BOTH_COUNTERS_74K_EVENT(base_id))
+ raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
+ else
+ raw_event.cntr_mask =
+ raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
+#ifdef CONFIG_MIPS_MT_SMP
+ raw_event.range = P;
+#endif
+ break;
+ case CPU_PROAPTIV:
+ if (IS_BOTH_COUNTERS_PROAPTIV_EVENT(base_id))
+ raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
+ else
+ raw_event.cntr_mask =
+ raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
+#ifdef CONFIG_MIPS_MT_SMP
+ raw_event.range = P;
+#endif
+ break;
+ case CPU_1004K:
+ if (IS_BOTH_COUNTERS_1004K_EVENT(base_id))
+ raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
+ else
+ raw_event.cntr_mask =
+ raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
+#ifdef CONFIG_MIPS_MT_SMP
+ if (IS_RANGE_P_1004K_EVENT(raw_id, base_id))
+ raw_event.range = P;
+ else if (unlikely(IS_RANGE_V_1004K_EVENT(raw_id)))
+ raw_event.range = V;
+ else
+ raw_event.range = T;
+#endif
+ break;
+ case CPU_INTERAPTIV:
+ if (IS_BOTH_COUNTERS_INTERAPTIV_EVENT(base_id))
+ raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
+ else
+ raw_event.cntr_mask =
+ raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
+#ifdef CONFIG_MIPS_MT_SMP
+ if (IS_RANGE_P_INTERAPTIV_EVENT(raw_id, base_id))
+ raw_event.range = P;
+ else if (unlikely(IS_RANGE_V_INTERAPTIV_EVENT(raw_id)))
+ raw_event.range = V;
+ else
+ raw_event.range = T;
+#endif
+ break;
+ case CPU_BMIPS5000:
+ if (IS_BOTH_COUNTERS_BMIPS5000_EVENT(base_id))
+ raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
+ else
+ raw_event.cntr_mask =
+ raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
+ }
+
+ return &raw_event;
+}
+
+static const struct mips_perf_event *octeon_pmu_map_raw_event(u64 config)
+{
+ unsigned int raw_id = config & 0xff;
+ unsigned int base_id = raw_id & 0x7f;
+
+
+ raw_event.cntr_mask = CNTR_ALL;
+ raw_event.event_id = base_id;
+
+ if (current_cpu_type() == CPU_CAVIUM_OCTEON2) {
+ if (base_id > 0x42)
+ return ERR_PTR(-EOPNOTSUPP);
+ } else {
+ if (base_id > 0x3a)
+ return ERR_PTR(-EOPNOTSUPP);
+ }
+
+ switch (base_id) {
+ case 0x00:
+ case 0x0f:
+ case 0x1e:
+ case 0x1f:
+ case 0x2f:
+ case 0x34:
+ case 0x3b ... 0x3f:
+ return ERR_PTR(-EOPNOTSUPP);
+ default:
+ break;
+ }
+
+ return &raw_event;
+}
+
+static const struct mips_perf_event *xlp_pmu_map_raw_event(u64 config)
+{
+ unsigned int raw_id = config & 0xff;
+
+ /* Only 1-63 are defined */
+ if ((raw_id < 0x01) || (raw_id > 0x3f))
+ return ERR_PTR(-EOPNOTSUPP);
+
+ raw_event.cntr_mask = CNTR_ALL;
+ raw_event.event_id = raw_id;
+
+ return &raw_event;
+}
+
+static int __init
+init_hw_perf_events(void)
+{
+ int counters, irq;
+ int counter_bits;
+
+ pr_info("Performance counters: ");
+
+ counters = n_counters();
+ if (counters == 0) {
+ pr_cont("No available PMU.\n");
+ return -ENODEV;
+ }
+
+#ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS
+ cpu_has_mipsmt_pertccounters = read_c0_config7() & (1<<19);
+ if (!cpu_has_mipsmt_pertccounters)
+ counters = counters_total_to_per_cpu(counters);
+#endif
+
+#ifdef MSC01E_INT_BASE
+ if (cpu_has_veic) {
+ /*
+ * Using platform specific interrupt controller defines.
+ */
+ irq = MSC01E_INT_BASE + MSC01E_INT_PERFCTR;
+ } else {
+#endif
+ if ((cp0_perfcount_irq >= 0) &&
+ (cp0_compare_irq != cp0_perfcount_irq))
+ irq = MIPS_CPU_IRQ_BASE + cp0_perfcount_irq;
+ else
+ irq = -1;
+#ifdef MSC01E_INT_BASE
+ }
+#endif
+
+ mipspmu.map_raw_event = mipsxx_pmu_map_raw_event;
+
+ switch (current_cpu_type()) {
+ case CPU_24K:
+ mipspmu.name = "mips/24K";
+ mipspmu.general_event_map = &mipsxxcore_event_map;
+ mipspmu.cache_event_map = &mipsxxcore_cache_map;
+ break;
+ case CPU_34K:
+ mipspmu.name = "mips/34K";
+ mipspmu.general_event_map = &mipsxxcore_event_map;
+ mipspmu.cache_event_map = &mipsxxcore_cache_map;
+ break;
+ case CPU_74K:
+ mipspmu.name = "mips/74K";
+ mipspmu.general_event_map = &mipsxxcore_event_map2;
+ mipspmu.cache_event_map = &mipsxxcore_cache_map2;
+ break;
+ case CPU_PROAPTIV:
+ mipspmu.name = "mips/proAptiv";
+ mipspmu.general_event_map = &mipsxxcore_event_map2;
+ mipspmu.cache_event_map = &mipsxxcore_cache_map2;
+ break;
+ case CPU_1004K:
+ mipspmu.name = "mips/1004K";
+ mipspmu.general_event_map = &mipsxxcore_event_map;
+ mipspmu.cache_event_map = &mipsxxcore_cache_map;
+ break;
+ case CPU_1074K:
+ mipspmu.name = "mips/1074K";
+ mipspmu.general_event_map = &mipsxxcore_event_map;
+ mipspmu.cache_event_map = &mipsxxcore_cache_map;
+ break;
+ case CPU_INTERAPTIV:
+ mipspmu.name = "mips/interAptiv";
+ mipspmu.general_event_map = &mipsxxcore_event_map;
+ mipspmu.cache_event_map = &mipsxxcore_cache_map;
+ break;
+ case CPU_LOONGSON1:
+ mipspmu.name = "mips/loongson1";
+ mipspmu.general_event_map = &mipsxxcore_event_map;
+ mipspmu.cache_event_map = &mipsxxcore_cache_map;
+ break;
+ case CPU_CAVIUM_OCTEON:
+ case CPU_CAVIUM_OCTEON_PLUS:
+ case CPU_CAVIUM_OCTEON2:
+ mipspmu.name = "octeon";
+ mipspmu.general_event_map = &octeon_event_map;
+ mipspmu.cache_event_map = &octeon_cache_map;
+ mipspmu.map_raw_event = octeon_pmu_map_raw_event;
+ break;
+ case CPU_BMIPS5000:
+ mipspmu.name = "BMIPS5000";
+ mipspmu.general_event_map = &bmips5000_event_map;
+ mipspmu.cache_event_map = &bmips5000_cache_map;
+ break;
+ case CPU_XLP:
+ mipspmu.name = "xlp";
+ mipspmu.general_event_map = &xlp_event_map;
+ mipspmu.cache_event_map = &xlp_cache_map;
+ mipspmu.map_raw_event = xlp_pmu_map_raw_event;
+ break;
+ default:
+ pr_cont("Either hardware does not support performance "
+ "counters, or not yet implemented.\n");
+ return -ENODEV;
+ }
+
+ mipspmu.num_counters = counters;
+ mipspmu.irq = irq;
+
+ if (read_c0_perfctrl0() & M_PERFCTL_WIDE) {
+ mipspmu.max_period = (1ULL << 63) - 1;
+ mipspmu.valid_count = (1ULL << 63) - 1;
+ mipspmu.overflow = 1ULL << 63;
+ mipspmu.read_counter = mipsxx_pmu_read_counter_64;
+ mipspmu.write_counter = mipsxx_pmu_write_counter_64;
+ counter_bits = 64;
+ } else {
+ mipspmu.max_period = (1ULL << 31) - 1;
+ mipspmu.valid_count = (1ULL << 31) - 1;
+ mipspmu.overflow = 1ULL << 31;
+ mipspmu.read_counter = mipsxx_pmu_read_counter;
+ mipspmu.write_counter = mipsxx_pmu_write_counter;
+ counter_bits = 32;
+ }
+
+ on_each_cpu(reset_counters, (void *)(long)counters, 1);
+
+ pr_cont("%s PMU enabled, %d %d-bit counters available to each "
+ "CPU, irq %d%s\n", mipspmu.name, counters, counter_bits, irq,
+ irq < 0 ? " (share with timer interrupt)" : "");
+
+ perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
+
+ return 0;
+}
+early_initcall(init_hw_perf_events);
diff --git a/arch/mips/kernel/pm-cps.c b/arch/mips/kernel/pm-cps.c
new file mode 100644