aboutsummaryrefslogtreecommitdiff
path: root/fs/afs/vlocation.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/afs/vlocation.c')
-rw-r--r--fs/afs/vlocation.c1219
1 files changed, 493 insertions, 726 deletions
diff --git a/fs/afs/vlocation.c b/fs/afs/vlocation.c
index 331f730a1fb..b6df2e83809 100644
--- a/fs/afs/vlocation.c
+++ b/fs/afs/vlocation.c
@@ -1,6 +1,6 @@
-/* vlocation.c: volume location management
+/* AFS volume location management
*
- * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
+ * Copyright (C) 2002, 2007 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
@@ -11,132 +11,66 @@
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/slab.h>
-#include <linux/fs.h>
-#include <linux/pagemap.h>
-#include "volume.h"
-#include "cell.h"
-#include "cmservice.h"
-#include "fsclient.h"
-#include "vlclient.h"
-#include "kafstimod.h"
-#include <rxrpc/connection.h>
+#include <linux/init.h>
+#include <linux/sched.h>
#include "internal.h"
-#define AFS_VLDB_TIMEOUT HZ*1000
+static unsigned afs_vlocation_timeout = 10; /* volume location timeout in seconds */
+static unsigned afs_vlocation_update_timeout = 10 * 60;
-static void afs_vlocation_update_timer(struct afs_timer *timer);
-static void afs_vlocation_update_attend(struct afs_async_op *op);
-static void afs_vlocation_update_discard(struct afs_async_op *op);
-static void __afs_put_vlocation(struct afs_vlocation *vlocation);
+static void afs_vlocation_reaper(struct work_struct *);
+static void afs_vlocation_updater(struct work_struct *);
-static void __afs_vlocation_timeout(struct afs_timer *timer)
-{
- struct afs_vlocation *vlocation =
- list_entry(timer, struct afs_vlocation, timeout);
+static LIST_HEAD(afs_vlocation_updates);
+static LIST_HEAD(afs_vlocation_graveyard);
+static DEFINE_SPINLOCK(afs_vlocation_updates_lock);
+static DEFINE_SPINLOCK(afs_vlocation_graveyard_lock);
+static DECLARE_DELAYED_WORK(afs_vlocation_reap, afs_vlocation_reaper);
+static DECLARE_DELAYED_WORK(afs_vlocation_update, afs_vlocation_updater);
+static struct workqueue_struct *afs_vlocation_update_worker;
- _debug("VL TIMEOUT [%s{u=%d}]",
- vlocation->vldb.name, atomic_read(&vlocation->usage));
-
- afs_vlocation_do_timeout(vlocation);
-}
-
-static const struct afs_timer_ops afs_vlocation_timer_ops = {
- .timed_out = __afs_vlocation_timeout,
-};
-
-static const struct afs_timer_ops afs_vlocation_update_timer_ops = {
- .timed_out = afs_vlocation_update_timer,
-};
-
-static const struct afs_async_op_ops afs_vlocation_update_op_ops = {
- .attend = afs_vlocation_update_attend,
- .discard = afs_vlocation_update_discard,
-};
-
-static LIST_HEAD(afs_vlocation_update_pendq); /* queue of VLs awaiting update */
-static struct afs_vlocation *afs_vlocation_update; /* VL currently being updated */
-static DEFINE_SPINLOCK(afs_vlocation_update_lock); /* lock guarding update queue */
-
-#ifdef AFS_CACHING_SUPPORT
-static cachefs_match_val_t afs_vlocation_cache_match(void *target,
- const void *entry);
-static void afs_vlocation_cache_update(void *source, void *entry);
-
-struct cachefs_index_def afs_vlocation_cache_index_def = {
- .name = "vldb",
- .data_size = sizeof(struct afs_cache_vlocation),
- .keys[0] = { CACHEFS_INDEX_KEYS_ASCIIZ, 64 },
- .match = afs_vlocation_cache_match,
- .update = afs_vlocation_cache_update,
-};
-#endif
-
-/*****************************************************************************/
/*
* iterate through the VL servers in a cell until one of them admits knowing
* about the volume in question
- * - caller must have cell->vl_sem write-locked
*/
-static int afs_vlocation_access_vl_by_name(struct afs_vlocation *vlocation,
- const char *name,
- unsigned namesz,
+static int afs_vlocation_access_vl_by_name(struct afs_vlocation *vl,
+ struct key *key,
struct afs_cache_vlocation *vldb)
{
- struct afs_server *server = NULL;
- struct afs_cell *cell = vlocation->cell;
+ struct afs_cell *cell = vl->cell;
+ struct in_addr addr;
int count, ret;
- _enter("%s,%*.*s,%u", cell->name, namesz, namesz, name, namesz);
+ _enter("%s,%s", cell->name, vl->vldb.name);
+ down_write(&vl->cell->vl_sem);
ret = -ENOMEDIUM;
for (count = cell->vl_naddrs; count > 0; count--) {
- _debug("CellServ[%hu]: %08x",
- cell->vl_curr_svix,
- cell->vl_addrs[cell->vl_curr_svix].s_addr);
-
- /* try and create a server */
- ret = afs_server_lookup(cell,
- &cell->vl_addrs[cell->vl_curr_svix],
- &server);
- switch (ret) {
- case 0:
- break;
- case -ENOMEM:
- case -ENONET:
- goto out;
- default:
- goto rotate;
- }
+ addr = cell->vl_addrs[cell->vl_curr_svix];
+
+ _debug("CellServ[%hu]: %08x", cell->vl_curr_svix, addr.s_addr);
/* attempt to access the VL server */
- ret = afs_rxvl_get_entry_by_name(server, name, namesz, vldb);
+ ret = afs_vl_get_entry_by_name(&addr, key, vl->vldb.name, vldb,
+ &afs_sync_call);
switch (ret) {
case 0:
- afs_put_server(server);
goto out;
case -ENOMEM:
case -ENONET:
case -ENETUNREACH:
case -EHOSTUNREACH:
case -ECONNREFUSED:
- down_write(&server->sem);
- if (server->vlserver) {
- rxrpc_put_connection(server->vlserver);
- server->vlserver = NULL;
- }
- up_write(&server->sem);
- afs_put_server(server);
if (ret == -ENOMEM || ret == -ENONET)
goto out;
goto rotate;
case -ENOMEDIUM:
- afs_put_server(server);
+ case -EKEYREJECTED:
+ case -EKEYEXPIRED:
goto out;
default:
- afs_put_server(server);
- ret = -ENOMEDIUM;
+ ret = -EIO;
goto rotate;
}
@@ -146,76 +80,66 @@ static int afs_vlocation_access_vl_by_name(struct afs_vlocation *vlocation,
cell->vl_curr_svix %= cell->vl_naddrs;
}
- out:
+out:
+ up_write(&vl->cell->vl_sem);
_leave(" = %d", ret);
return ret;
+}
-} /* end afs_vlocation_access_vl_by_name() */
-
-/*****************************************************************************/
/*
* iterate through the VL servers in a cell until one of them admits knowing
* about the volume in question
- * - caller must have cell->vl_sem write-locked
*/
-static int afs_vlocation_access_vl_by_id(struct afs_vlocation *vlocation,
+static int afs_vlocation_access_vl_by_id(struct afs_vlocation *vl,
+ struct key *key,
afs_volid_t volid,
afs_voltype_t voltype,
struct afs_cache_vlocation *vldb)
{
- struct afs_server *server = NULL;
- struct afs_cell *cell = vlocation->cell;
+ struct afs_cell *cell = vl->cell;
+ struct in_addr addr;
int count, ret;
_enter("%s,%x,%d,", cell->name, volid, voltype);
+ down_write(&vl->cell->vl_sem);
ret = -ENOMEDIUM;
for (count = cell->vl_naddrs; count > 0; count--) {
- _debug("CellServ[%hu]: %08x",
- cell->vl_curr_svix,
- cell->vl_addrs[cell->vl_curr_svix].s_addr);
-
- /* try and create a server */
- ret = afs_server_lookup(cell,
- &cell->vl_addrs[cell->vl_curr_svix],
- &server);
- switch (ret) {
- case 0:
- break;
- case -ENOMEM:
- case -ENONET:
- goto out;
- default:
- goto rotate;
- }
+ addr = cell->vl_addrs[cell->vl_curr_svix];
+
+ _debug("CellServ[%hu]: %08x", cell->vl_curr_svix, addr.s_addr);
/* attempt to access the VL server */
- ret = afs_rxvl_get_entry_by_id(server, volid, voltype, vldb);
+ ret = afs_vl_get_entry_by_id(&addr, key, volid, voltype, vldb,
+ &afs_sync_call);
switch (ret) {
case 0:
- afs_put_server(server);
goto out;
case -ENOMEM:
case -ENONET:
case -ENETUNREACH:
case -EHOSTUNREACH:
case -ECONNREFUSED:
- down_write(&server->sem);
- if (server->vlserver) {
- rxrpc_put_connection(server->vlserver);
- server->vlserver = NULL;
- }
- up_write(&server->sem);
- afs_put_server(server);
if (ret == -ENOMEM || ret == -ENONET)
goto out;
goto rotate;
+ case -EBUSY:
+ vl->upd_busy_cnt++;
+ if (vl->upd_busy_cnt <= 3) {
+ if (vl->upd_busy_cnt > 1) {
+ /* second+ BUSY - sleep a little bit */
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(1);
+ __set_current_state(TASK_RUNNING);
+ }
+ continue;
+ }
+ break;
case -ENOMEDIUM:
- afs_put_server(server);
- goto out;
+ vl->upd_rej_cnt++;
+ goto rotate;
default:
- afs_put_server(server);
- ret = -ENOMEDIUM;
+ ret = -EIO;
goto rotate;
}
@@ -223,730 +147,573 @@ static int afs_vlocation_access_vl_by_id(struct afs_vlocation *vlocation,
rotate:
cell->vl_curr_svix++;
cell->vl_curr_svix %= cell->vl_naddrs;
+ vl->upd_busy_cnt = 0;
+ }
+
+out:
+ if (ret < 0 && vl->upd_rej_cnt > 0) {
+ printk(KERN_NOTICE "kAFS:"
+ " Active volume no longer valid '%s'\n",
+ vl->vldb.name);
+ vl->valid = 0;
+ ret = -ENOMEDIUM;
}
- out:
+ up_write(&vl->cell->vl_sem);
_leave(" = %d", ret);
return ret;
+}
-} /* end afs_vlocation_access_vl_by_id() */
-
-/*****************************************************************************/
/*
- * lookup volume location
- * - caller must have cell->vol_sem write-locked
- * - iterate through the VL servers in a cell until one of them admits knowing
- * about the volume in question
- * - lookup in the local cache if not able to find on the VL server
- * - insert/update in the local cache if did get a VL response
+ * allocate a volume location record
*/
-int afs_vlocation_lookup(struct afs_cell *cell,
- const char *name,
- unsigned namesz,
- struct afs_vlocation **_vlocation)
+static struct afs_vlocation *afs_vlocation_alloc(struct afs_cell *cell,
+ const char *name,
+ size_t namesz)
{
- struct afs_cache_vlocation vldb;
- struct afs_vlocation *vlocation;
- afs_voltype_t voltype;
- afs_volid_t vid;
- int active = 0, ret;
-
- _enter("{%s},%*.*s,%u,", cell->name, namesz, namesz, name, namesz);
-
- if (namesz > sizeof(vlocation->vldb.name)) {
- _leave(" = -ENAMETOOLONG");
- return -ENAMETOOLONG;
- }
-
- /* search the cell's active list first */
- list_for_each_entry(vlocation, &cell->vl_list, link) {
- if (namesz < sizeof(vlocation->vldb.name) &&
- vlocation->vldb.name[namesz] != '\0')
- continue;
-
- if (memcmp(vlocation->vldb.name, name, namesz) == 0)
- goto found_in_memory;
- }
-
- /* search the cell's graveyard list second */
- spin_lock(&cell->vl_gylock);
- list_for_each_entry(vlocation, &cell->vl_graveyard, link) {
- if (namesz < sizeof(vlocation->vldb.name) &&
- vlocation->vldb.name[namesz] != '\0')
- continue;
-
- if (memcmp(vlocation->vldb.name, name, namesz) == 0)
- goto found_in_graveyard;
- }
- spin_unlock(&cell->vl_gylock);
-
- /* not in the cell's in-memory lists - create a new record */
- vlocation = kmalloc(sizeof(struct afs_vlocation), GFP_KERNEL);
- if (!vlocation)
- return -ENOMEM;
-
- memset(vlocation, 0, sizeof(struct afs_vlocation));
- atomic_set(&vlocation->usage, 1);
- INIT_LIST_HEAD(&vlocation->link);
- rwlock_init(&vlocation->lock);
- memcpy(vlocation->vldb.name, name, namesz);
-
- afs_timer_init(&vlocation->timeout, &afs_vlocation_timer_ops);
- afs_timer_init(&vlocation->upd_timer, &afs_vlocation_update_timer_ops);
- afs_async_op_init(&vlocation->upd_op, &afs_vlocation_update_op_ops);
-
- afs_get_cell(cell);
- vlocation->cell = cell;
-
- list_add_tail(&vlocation->link, &cell->vl_list);
-
-#ifdef AFS_CACHING_SUPPORT
- /* we want to store it in the cache, plus it might already be
- * encached */
- cachefs_acquire_cookie(cell->cache,
- &afs_volume_cache_index_def,
- vlocation,
- &vlocation->cache);
-
- if (vlocation->valid)
- goto found_in_cache;
-#endif
-
- /* try to look up an unknown volume in the cell VL databases by name */
- ret = afs_vlocation_access_vl_by_name(vlocation, name, namesz, &vldb);
- if (ret < 0) {
- printk("kAFS: failed to locate '%*.*s' in cell '%s'\n",
- namesz, namesz, name, cell->name);
- goto error;
+ struct afs_vlocation *vl;
+
+ vl = kzalloc(sizeof(struct afs_vlocation), GFP_KERNEL);
+ if (vl) {
+ vl->cell = cell;
+ vl->state = AFS_VL_NEW;
+ atomic_set(&vl->usage, 1);
+ INIT_LIST_HEAD(&vl->link);
+ INIT_LIST_HEAD(&vl->grave);
+ INIT_LIST_HEAD(&vl->update);
+ init_waitqueue_head(&vl->waitq);
+ spin_lock_init(&vl->lock);
+ memcpy(vl->vldb.name, name, namesz);
}
- goto found_on_vlserver;
-
- found_in_graveyard:
- /* found in the graveyard - resurrect */
- _debug("found in graveyard");
- atomic_inc(&vlocation->usage);
- list_move_tail(&vlocation->link, &cell->vl_list);
- spin_unlock(&cell->vl_gylock);
-
- afs_kafstimod_del_timer(&vlocation->timeout);
- goto active;
-
- found_in_memory:
- /* found in memory - check to see if it's active */
- _debug("found in memory");
- atomic_inc(&vlocation->usage);
+ _leave(" = %p", vl);
+ return vl;
+}
- active:
- active = 1;
+/*
+ * update record if we found it in the cache
+ */
+static int afs_vlocation_update_record(struct afs_vlocation *vl,
+ struct key *key,
+ struct afs_cache_vlocation *vldb)
+{
+ afs_voltype_t voltype;
+ afs_volid_t vid;
+ int ret;
-#ifdef AFS_CACHING_SUPPORT
- found_in_cache:
-#endif
/* try to look up a cached volume in the cell VL databases by ID */
- _debug("found in cache");
-
_debug("Locally Cached: %s %02x { %08x(%x) %08x(%x) %08x(%x) }",
- vlocation->vldb.name,
- vlocation->vldb.vidmask,
- ntohl(vlocation->vldb.servers[0].s_addr),
- vlocation->vldb.srvtmask[0],
- ntohl(vlocation->vldb.servers[1].s_addr),
- vlocation->vldb.srvtmask[1],
- ntohl(vlocation->vldb.servers[2].s_addr),
- vlocation->vldb.srvtmask[2]
- );
+ vl->vldb.name,
+ vl->vldb.vidmask,
+ ntohl(vl->vldb.servers[0].s_addr),
+ vl->vldb.srvtmask[0],
+ ntohl(vl->vldb.servers[1].s_addr),
+ vl->vldb.srvtmask[1],
+ ntohl(vl->vldb.servers[2].s_addr),
+ vl->vldb.srvtmask[2]);
_debug("Vids: %08x %08x %08x",
- vlocation->vldb.vid[0],
- vlocation->vldb.vid[1],
- vlocation->vldb.vid[2]);
+ vl->vldb.vid[0],
+ vl->vldb.vid[1],
+ vl->vldb.vid[2]);
- if (vlocation->vldb.vidmask & AFS_VOL_VTM_RW) {
- vid = vlocation->vldb.vid[0];
+ if (vl->vldb.vidmask & AFS_VOL_VTM_RW) {
+ vid = vl->vldb.vid[0];
voltype = AFSVL_RWVOL;
- }
- else if (vlocation->vldb.vidmask & AFS_VOL_VTM_RO) {
- vid = vlocation->vldb.vid[1];
+ } else if (vl->vldb.vidmask & AFS_VOL_VTM_RO) {
+ vid = vl->vldb.vid[1];
voltype = AFSVL_ROVOL;
- }
- else if (vlocation->vldb.vidmask & AFS_VOL_VTM_BAK) {
- vid = vlocation->vldb.vid[2];
+ } else if (vl->vldb.vidmask & AFS_VOL_VTM_BAK) {
+ vid = vl->vldb.vid[2];
voltype = AFSVL_BACKVOL;
- }
- else {
+ } else {
BUG();
vid = 0;
voltype = 0;
}
- ret = afs_vlocation_access_vl_by_id(vlocation, vid, voltype, &vldb);
+ /* contact the server to make sure the volume is still available
+ * - TODO: need to handle disconnected operation here
+ */
+ ret = afs_vlocation_access_vl_by_id(vl, key, vid, voltype, vldb);
switch (ret) {
/* net error */
default:
- printk("kAFS: failed to volume '%*.*s' (%x) up in '%s': %d\n",
- namesz, namesz, name, vid, cell->name, ret);
- goto error;
+ printk(KERN_WARNING "kAFS:"
+ " failed to update volume '%s' (%x) up in '%s': %d\n",
+ vl->vldb.name, vid, vl->cell->name, ret);
+ _leave(" = %d", ret);
+ return ret;
/* pulled from local cache into memory */
case 0:
- goto found_on_vlserver;
+ _leave(" = 0");
+ return 0;
/* uh oh... looks like the volume got deleted */
case -ENOMEDIUM:
- printk("kAFS: volume '%*.*s' (%x) does not exist '%s'\n",
- namesz, namesz, name, vid, cell->name);
+ printk(KERN_ERR "kAFS:"
+ " volume '%s' (%x) does not exist '%s'\n",
+ vl->vldb.name, vid, vl->cell->name);
/* TODO: make existing record unavailable */
- goto error;
+ _leave(" = %d", ret);
+ return ret;
}
+}
- found_on_vlserver:
- _debug("Done VL Lookup: %*.*s %02x { %08x(%x) %08x(%x) %08x(%x) }",
- namesz, namesz, name,
- vldb.vidmask,
- ntohl(vldb.servers[0].s_addr), vldb.srvtmask[0],
- ntohl(vldb.servers[1].s_addr), vldb.srvtmask[1],
- ntohl(vldb.servers[2].s_addr), vldb.srvtmask[2]
- );
-
- _debug("Vids: %08x %08x %08x", vldb.vid[0], vldb.vid[1], vldb.vid[2]);
-
- if ((namesz < sizeof(vlocation->vldb.name) &&
- vlocation->vldb.name[namesz] != '\0') ||
- memcmp(vldb.name, name, namesz) != 0)
- printk("kAFS: name of volume '%*.*s' changed to '%s' on server\n",
- namesz, namesz, name, vldb.name);
-
- memcpy(&vlocation->vldb, &vldb, sizeof(vlocation->vldb));
+/*
+ * apply the update to a VL record
+ */
+static void afs_vlocation_apply_update(struct afs_vlocation *vl,
+ struct afs_cache_vlocation *vldb)
+{
+ _debug("Done VL Lookup: %s %02x { %08x(%x) %08x(%x) %08x(%x) }",
+ vldb->name, vldb->vidmask,
+ ntohl(vldb->servers[0].s_addr), vldb->srvtmask[0],
+ ntohl(vldb->servers[1].s_addr), vldb->srvtmask[1],
+ ntohl(vldb->servers[2].s_addr), vldb->srvtmask[2]);
- afs_kafstimod_add_timer(&vlocation->upd_timer, 10 * HZ);
+ _debug("Vids: %08x %08x %08x",
+ vldb->vid[0], vldb->vid[1], vldb->vid[2]);
-#ifdef AFS_CACHING_SUPPORT
- /* update volume entry in local cache */
- cachefs_update_cookie(vlocation->cache);
-#endif
+ if (strcmp(vldb->name, vl->vldb.name) != 0)
+ printk(KERN_NOTICE "kAFS:"
+ " name of volume '%s' changed to '%s' on server\n",
+ vl->vldb.name, vldb->name);
- *_vlocation = vlocation;
- _leave(" = 0 (%p)",vlocation);
- return 0;
+ vl->vldb = *vldb;
- error:
- if (vlocation) {
- if (active) {
- __afs_put_vlocation(vlocation);
- }
- else {
- list_del(&vlocation->link);
-#ifdef AFS_CACHING_SUPPORT
- cachefs_relinquish_cookie(vlocation->cache, 0);
+#ifdef CONFIG_AFS_FSCACHE
+ fscache_update_cookie(vl->cache);
#endif
- afs_put_cell(vlocation->cell);
- kfree(vlocation);
- }
- }
-
- _leave(" = %d", ret);
- return ret;
-} /* end afs_vlocation_lookup() */
+}
-/*****************************************************************************/
/*
- * finish using a volume location record
- * - caller must have cell->vol_sem write-locked
+ * fill in a volume location record, consulting the cache and the VL server
+ * both
*/
-static void __afs_put_vlocation(struct afs_vlocation *vlocation)
+static int afs_vlocation_fill_in_record(struct afs_vlocation *vl,
+ struct key *key)
{
- struct afs_cell *cell;
+ struct afs_cache_vlocation vldb;
+ int ret;
- if (!vlocation)
- return;
+ _enter("");
- _enter("%s", vlocation->vldb.name);
+ ASSERTCMP(vl->valid, ==, 0);
- cell = vlocation->cell;
+ memset(&vldb, 0, sizeof(vldb));
- /* sanity check */
- BUG_ON(atomic_read(&vlocation->usage) <= 0);
+ /* see if we have an in-cache copy (will set vl->valid if there is) */
+#ifdef CONFIG_AFS_FSCACHE
+ vl->cache = fscache_acquire_cookie(vl->cell->cache,
+ &afs_vlocation_cache_index_def, vl,
+ true);
+#endif
- spin_lock(&cell->vl_gylock);
- if (likely(!atomic_dec_and_test(&vlocation->usage))) {
- spin_unlock(&cell->vl_gylock);
- _leave("");
- return;
+ if (vl->valid) {
+ /* try to update a known volume in the cell VL databases by
+ * ID as the name may have changed */
+ _debug("found in cache");
+ ret = afs_vlocation_update_record(vl, key, &vldb);
+ } else {
+ /* try to look up an unknown volume in the cell VL databases by
+ * name */
+ ret = afs_vlocation_access_vl_by_name(vl, key, &vldb);
+ if (ret < 0) {
+ printk("kAFS: failed to locate '%s' in cell '%s'\n",
+ vl->vldb.name, vl->cell->name);
+ return ret;
+ }
}
- /* move to graveyard queue */
- list_move_tail(&vlocation->link,&cell->vl_graveyard);
-
- /* remove from pending timeout queue (refcounted if actually being
- * updated) */
- list_del_init(&vlocation->upd_op.link);
-
- /* time out in 10 secs */
- afs_kafstimod_del_timer(&vlocation->upd_timer);
- afs_kafstimod_add_timer(&vlocation->timeout, 10 * HZ);
-
- spin_unlock(&cell->vl_gylock);
-
- _leave(" [killed]");
-} /* end __afs_put_vlocation() */
+ afs_vlocation_apply_update(vl, &vldb);
+ _leave(" = 0");
+ return 0;
+}
-/*****************************************************************************/
/*
- * finish using a volume location record
+ * queue a vlocation record for updates
*/
-void afs_put_vlocation(struct afs_vlocation *vlocation)
+static void afs_vlocation_queue_for_updates(struct afs_vlocation *vl)
{
- if (vlocation) {
- struct afs_cell *cell = vlocation->cell;
+ struct afs_vlocation *xvl;
- down_write(&cell->vl_sem);
- __afs_put_vlocation(vlocation);
- up_write(&cell->vl_sem);
+ /* wait at least 10 minutes before updating... */
+ vl->update_at = get_seconds() + afs_vlocation_update_timeout;
+
+ spin_lock(&afs_vlocation_updates_lock);
+
+ if (!list_empty(&afs_vlocation_updates)) {
+ /* ... but wait at least 1 second more than the newest record
+ * already queued so that we don't spam the VL server suddenly
+ * with lots of requests
+ */
+ xvl = list_entry(afs_vlocation_updates.prev,
+ struct afs_vlocation, update);
+ if (vl->update_at <= xvl->update_at)
+ vl->update_at = xvl->update_at + 1;
+ } else {
+ queue_delayed_work(afs_vlocation_update_worker,
+ &afs_vlocation_update,
+ afs_vlocation_update_timeout * HZ);
}
-} /* end afs_put_vlocation() */
-/*****************************************************************************/
+ list_add_tail(&vl->update, &afs_vlocation_updates);
+ spin_unlock(&afs_vlocation_updates_lock);
+}
+
/*
- * timeout vlocation record
- * - removes from the cell's graveyard if the usage count is zero
+ * lookup volume location
+ * - iterate through the VL servers in a cell until one of them admits knowing
+ * about the volume in question
+ * - lookup in the local cache if not able to find on the VL server
+ * - insert/update in the local cache if did get a VL response
*/
-void afs_vlocation_do_timeout(struct afs_vlocation *vlocation)
+struct afs_vlocation *afs_vlocation_lookup(struct afs_cell *cell,
+ struct key *key,
+ const char *name,
+ size_t namesz)
{
- struct afs_cell *cell;
-
- _enter("%s", vlocation->vldb.name);
+ struct afs_vlocation *vl;
+ int ret;
- cell = vlocation->cell;
+ _enter("{%s},{%x},%*.*s,%zu",
+ cell->name, key_serial(key),
+ (int) namesz, (int) namesz, name, namesz);
- BUG_ON(atomic_read(&vlocation->usage) < 0);
+ if (namesz >= sizeof(vl->vldb.name)) {
+ _leave(" = -ENAMETOOLONG");
+ return ERR_PTR(-ENAMETOOLONG);
+ }
- /* remove from graveyard if still dead */
- spin_lock(&cell->vl_gylock);
- if (atomic_read(&vlocation->usage) == 0)
- list_del_init(&vlocation->link);
- else
- vlocation = NULL;
- spin_unlock(&cell->vl_gylock);
+ /* see if we have an in-memory copy first */
+ down_write(&cell->vl_sem);
+ spin_lock(&cell->vl_lock);
+ list_for_each_entry(vl, &cell->vl_list, link) {
+ if (vl->vldb.name[namesz] != '\0')
+ continue;
+ if (memcmp(vl->vldb.name, name, namesz) == 0)
+ goto found_in_memory;
+ }
+ spin_unlock(&cell->vl_lock);
- if (!vlocation) {
- _leave("");
- return; /* resurrected */
+ /* not in the cell's in-memory lists - create a new record */
+ vl = afs_vlocation_alloc(cell, name, namesz);
+ if (!vl) {
+ up_write(&cell->vl_sem);
+ return ERR_PTR(-ENOMEM);
}
- /* we can now destroy it properly */
-#ifdef AFS_CACHING_SUPPORT
- cachefs_relinquish_cookie(vlocation->cache, 0);
-#endif
- afs_put_cell(cell);
+ afs_get_cell(cell);
- kfree(vlocation);
+ list_add_tail(&vl->link, &cell->vl_list);
+ vl->state = AFS_VL_CREATING;
+ up_write(&cell->vl_sem);
- _leave(" [destroyed]");
-} /* end afs_vlocation_do_timeout() */
+fill_in_record:
+ ret = afs_vlocation_fill_in_record(vl, key);
+ if (ret < 0)
+ goto error_abandon;
+ spin_lock(&vl->lock);
+ vl->state = AFS_VL_VALID;
+ spin_unlock(&vl->lock);
+ wake_up(&vl->waitq);
-/*****************************************************************************/
-/*
- * send an update operation to the currently selected server
- */
-static int afs_vlocation_update_begin(struct afs_vlocation *vlocation)
-{
- afs_voltype_t voltype;
- afs_volid_t vid;
- int ret;
+ /* update volume entry in local cache */
+#ifdef CONFIG_AFS_FSCACHE
+ fscache_update_cookie(vl->cache);
+#endif
- _enter("%s{ufs=%u ucs=%u}",
- vlocation->vldb.name,
- vlocation->upd_first_svix,
- vlocation->upd_curr_svix);
+ /* schedule for regular updates */
+ afs_vlocation_queue_for_updates(vl);
+ goto success;
- /* try to look up a cached volume in the cell VL databases by ID */
- if (vlocation->vldb.vidmask & AFS_VOL_VTM_RW) {
- vid = vlocation->vldb.vid[0];
- voltype = AFSVL_RWVOL;
- }
- else if (vlocation->vldb.vidmask & AFS_VOL_VTM_RO) {
- vid = vlocation->vldb.vid[1];
- voltype = AFSVL_ROVOL;
- }
- else if (vlocation->vldb.vidmask & AFS_VOL_VTM_BAK) {
- vid = vlocation->vldb.vid[2];
- voltype = AFSVL_BACKVOL;
- }
- else {
- BUG();
- vid = 0;
- voltype = 0;
+found_in_memory:
+ /* found in memory */
+ _debug("found in memory");
+ atomic_inc(&vl->usage);
+ spin_unlock(&cell->vl_lock);
+ if (!list_empty(&vl->grave)) {
+ spin_lock(&afs_vlocation_graveyard_lock);
+ list_del_init(&vl->grave);
+ spin_unlock(&afs_vlocation_graveyard_lock);
}
+ up_write(&cell->vl_sem);
- /* contact the chosen server */
- ret = afs_server_lookup(
- vlocation->cell,
- &vlocation->cell->vl_addrs[vlocation->upd_curr_svix],
- &vlocation->upd_op.server);
+ /* see if it was an abandoned record that we might try filling in */
+ spin_lock(&vl->lock);
+ while (vl->state != AFS_VL_VALID) {
+ afs_vlocation_state_t state = vl->state;
- switch (ret) {
- case 0:
- break;
- case -ENOMEM:
- case -ENONET:
- default:
- _leave(" = %d", ret);
- return ret;
- }
+ _debug("invalid [state %d]", state);
- /* initiate the update operation */
- ret = afs_rxvl_get_entry_by_id_async(&vlocation->upd_op, vid, voltype);
- if (ret < 0) {
- _leave(" = %d", ret);
- return ret;
- }
+ if (state == AFS_VL_NEW || state == AFS_VL_NO_VOLUME) {
+ vl->state = AFS_VL_CREATING;
+ spin_unlock(&vl->lock);
+ goto fill_in_record;
+ }
+
+ /* must now wait for creation or update by someone else to
+ * complete */
+ _debug("wait");
+ spin_unlock(&vl->lock);
+ ret = wait_event_interruptible(vl->waitq,
+ vl->state == AFS_VL_NEW ||
+ vl->state == AFS_VL_VALID ||
+ vl->state == AFS_VL_NO_VOLUME);
+ if (ret < 0)
+ goto error;
+ spin_lock(&vl->lock);
+ }
+ spin_unlock(&vl->lock);
+
+success:
+ _leave(" = %p", vl);
+ return vl;
+
+error_abandon:
+ spin_lock(&vl->lock);
+ vl->state = AFS_VL_NEW;
+ spin_unlock(&vl->lock);
+ wake_up(&vl->waitq);
+error:
+ ASSERT(vl != NULL);
+ afs_put_vlocation(vl);
_leave(" = %d", ret);
- return ret;
-} /* end afs_vlocation_update_begin() */
+ return ERR_PTR(ret);
+}
-/*****************************************************************************/
/*
- * abandon updating a VL record
- * - does not restart the update timer
+ * finish using a volume location record
*/
-static void afs_vlocation_update_abandon(struct afs_vlocation *vlocation,
- afs_vlocation_upd_t state,
- int ret)
+void afs_put_vlocation(struct afs_vlocation *vl)
{
- _enter("%s,%u", vlocation->vldb.name, state);
+ if (!vl)
+ return;
- if (ret < 0)
- printk("kAFS: Abandoning VL update '%s': %d\n",
- vlocation->vldb.name, ret);
+ _enter("%s", vl->vldb.name);
- /* discard the server record */
- afs_put_server(vlocation->upd_op.server);
- vlocation->upd_op.server = NULL;
+ ASSERTCMP(atomic_read(&vl->usage), >, 0);
- spin_lock(&afs_vlocation_update_lock);
- afs_vlocation_update = NULL;
- vlocation->upd_state = state;
+ if (likely(!atomic_dec_and_test(&vl->usage))) {
+ _leave("");
+ return;
+ }
- /* TODO: start updating next VL record on pending list */
+ spin_lock(&afs_vlocation_graveyard_lock);
+ if (atomic_read(&vl->usage) == 0) {
+ _debug("buried");
+ list_move_tail(&vl->grave, &afs_vlocation_graveyard);
+ vl->time_of_death = get_seconds();
+ queue_delayed_work(afs_wq, &afs_vlocation_reap,
+ afs_vlocation_timeout * HZ);
+
+ /* suspend updates on this record */
+ if (!list_empty(&vl->update)) {
+ spin_lock(&afs_vlocation_updates_lock);
+ list_del_init(&vl->update);
+ spin_unlock(&afs_vlocation_updates_lock);
+ }
+ }
+ spin_unlock(&afs_vlocation_graveyard_lock);
+ _leave(" [killed?]");
+}
- spin_unlock(&afs_vlocation_update_lock);
+/*
+ * destroy a dead volume location record
+ */
+static void afs_vlocation_destroy(struct afs_vlocation *vl)
+{
+ _enter("%p", vl);
- _leave("");
-} /* end afs_vlocation_update_abandon() */
+#ifdef CONFIG_AFS_FSCACHE
+ fscache_relinquish_cookie(vl->cache, 0);
+#endif
+ afs_put_cell(vl->cell);
+ kfree(vl);
+}
-/*****************************************************************************/
/*
- * handle periodic update timeouts and busy retry timeouts
- * - called from kafstimod
+ * reap dead volume location records
*/
-static void afs_vlocation_update_timer(struct afs_timer *timer)
+static void afs_vlocation_reaper(struct work_struct *work)
{
- struct afs_vlocation *vlocation =
- list_entry(timer, struct afs_vlocation, upd_timer);
- int ret;
+ LIST_HEAD(corpses);
+ struct afs_vlocation *vl;
+ unsigned long delay, expiry;
+ time_t now;
- _enter("%s", vlocation->vldb.name);
+ _enter("");
- /* only update if not in the graveyard (defend against putting too) */
- spin_lock(&vlocation->cell->vl_gylock);
+ now = get_seconds();
+ spin_lock(&afs_vlocation_graveyard_lock);
- if (!atomic_read(&vlocation->usage))
- goto out_unlock1;
+ while (!list_empty(&afs_vlocation_graveyard)) {
+ vl = list_entry(afs_vlocation_graveyard.next,
+ struct afs_vlocation, grave);
- spin_lock(&afs_vlocation_update_lock);
+ _debug("check %p", vl);
- /* if we were woken up due to EBUSY sleep then restart immediately if
- * possible or else jump to front of pending queue */
- if (vlocation->upd_state == AFS_VLUPD_BUSYSLEEP) {
- if (afs_vlocation_update) {
- list_add(&vlocation->upd_op.link,
- &afs_vlocation_update_pendq);
+ /* the queue is ordered most dead first */
+ expiry = vl->time_of_death + afs_vlocation_timeout;
+ if (expiry > now) {
+ delay = (expiry - now) * HZ;
+ _debug("delay %lu", delay);
+ mod_delayed_work(afs_wq, &afs_vlocation_reap, delay);
+ break;
}
- else {
- afs_get_vlocation(vlocation);
- afs_vlocation_update = vlocation;
- vlocation->upd_state = AFS_VLUPD_INPROGRESS;
+
+ spin_lock(&vl->cell->vl_lock);
+ if (atomic_read(&vl->usage) > 0) {
+ _debug("no reap");
+ list_del_init(&vl->grave);
+ } else {
+ _debug("reap");
+ list_move_tail(&vl->grave, &corpses);
+ list_del_init(&vl->link);
}
- goto out_unlock2;
+ spin_unlock(&vl->cell->vl_lock);
}
- /* put on pending queue if there's already another update in progress */
- if (afs_vlocation_update) {
- vlocation->upd_state = AFS_VLUPD_PENDING;
- list_add_tail(&vlocation->upd_op.link,
- &afs_vlocation_update_pendq);
- goto out_unlock2;
- }
+ spin_unlock(&afs_vlocation_graveyard_lock);
- /* hold a ref on it while actually updating */
- afs_get_vlocation(vlocation);
- afs_vlocation_update = vlocation;
- vlocation->upd_state = AFS_VLUPD_INPROGRESS;
-
- spin_unlock(&afs_vlocation_update_lock);
- spin_unlock(&vlocation->cell->vl_gylock);
-
- /* okay... we can start the update */
- _debug("BEGIN VL UPDATE [%s]", vlocation->vldb.name);
- vlocation->upd_first_svix = vlocation->cell->vl_curr_svix;
- vlocation->upd_curr_svix = vlocation->upd_first_svix;
- vlocation->upd_rej_cnt = 0;
- vlocation->upd_busy_cnt = 0;
-
- ret = afs_vlocation_update_begin(vlocation);
- if (ret < 0) {
- afs_vlocation_update_abandon(vlocation, AFS_VLUPD_SLEEP, ret);
- afs_kafstimod_add_timer(&vlocation->upd_timer,
- AFS_VLDB_TIMEOUT);
- afs_put_vlocation(vlocation);
+ /* now reap the corpses we've extracted */
+ while (!list_empty(&corpses)) {
+ vl = list_entry(corpses.next, struct afs_vlocation, grave);
+ list_del(&vl->grave);
+ afs_vlocation_destroy(vl);
}
_leave("");
- return;
-
- out_unlock2:
- spin_unlock(&afs_vlocation_update_lock);
- out_unlock1:
- spin_unlock(&vlocation->cell->vl_gylock);
- _leave("");
- return;
-
-} /* end afs_vlocation_update_timer() */
+}
-/*****************************************************************************/
/*
- * attend to an update operation upon which an event happened
- * - called in kafsasyncd context
+ * initialise the VL update process
*/
-static void afs_vlocation_update_attend(struct afs_async_op *op)
+int __init afs_vlocation_update_init(void)
{
- struct afs_cache_vlocation vldb;
- struct afs_vlocation *vlocation =
- list_entry(op, struct afs_vlocation, upd_op);
- unsigned tmp;
- int ret;
-
- _enter("%s", vlocation->vldb.name);
-
- ret = afs_rxvl_get_entry_by_id_async2(op, &vldb);
- switch (ret) {
- case -EAGAIN:
- _leave(" [unfinished]");
- return;
-
- case 0:
- _debug("END VL UPDATE: %d\n", ret);
- vlocation->valid = 1;
-
- _debug("Done VL Lookup: %02x { %08x(%x) %08x(%x) %08x(%x) }",
- vldb.vidmask,
- ntohl(vldb.servers[0].s_addr), vldb.srvtmask[0],
- ntohl(vldb.servers[1].s_addr), vldb.srvtmask[1],
- ntohl(vldb.servers[2].s_addr), vldb.srvtmask[2]
- );
-
- _debug("Vids: %08x %08x %08x",
- vldb.vid[0], vldb.vid[1], vldb.vid[2]);
-
- afs_vlocation_update_abandon(vlocation, AFS_VLUPD_SLEEP, 0);
-
- down_write(&vlocation->cell->vl_sem);
-
- /* actually update the cache */
- if (strncmp(vldb.name, vlocation->vldb.name,
- sizeof(vlocation->vldb.name)) != 0)
- printk("kAFS: name of volume '%s'"
- " changed to '%s' on server\n",
- vlocation->vldb.name, vldb.name);
-
- memcpy(&vlocation->vldb, &vldb, sizeof(vlocation->vldb));
-
-#if 0
- /* TODO update volume entry in local cache */
-#endif
-
- up_write(&vlocation->cell->vl_sem);
-
- if (ret < 0)
- printk("kAFS: failed to update local cache: %d\n", ret);
-
- afs_kafstimod_add_timer(&vlocation->upd_timer,
- AFS_VLDB_TIMEOUT);
- afs_put_vlocation(vlocation);
- _leave(" [found]");
- return;
-
- case -ENOMEDIUM:
- vlocation->upd_rej_cnt++;
- goto try_next;
-
- /* the server is locked - retry in a very short while */
- case -EBUSY:
- vlocation->upd_busy_cnt++;
- if (vlocation->upd_busy_cnt > 3)
- goto try_next; /* too many retries */
-
- afs_vlocation_update_abandon(vlocation,
- AFS_VLUPD_BUSYSLEEP, 0);
- afs_kafstimod_add_timer(&vlocation->upd_timer, HZ / 2);
- afs_put_vlocation(vlocation);
- _leave(" [busy]");
- return;
+ afs_vlocation_update_worker =
+ create_singlethread_workqueue("kafs_vlupdated");
+ return afs_vlocation_update_worker ? 0 : -ENOMEM;
+}
- case -ENETUNREACH:
- case -EHOSTUNREACH:
- case -ECONNREFUSED:
- case -EREMOTEIO:
- /* record bad vlserver info in the cell too
- * - TODO: use down_write_trylock() if available
- */
- if (vlocation->upd_curr_svix == vlocation->cell->vl_curr_svix)
- vlocation->cell->vl_curr_svix =
- vlocation->cell->vl_curr_svix %
- vlocation->cell->vl_naddrs;
+/*
+ * discard all the volume location records for rmmod
+ */
+void afs_vlocation_purge(void)
+{
+ afs_vlocation_timeout = 0;
- case -EBADRQC:
- case -EINVAL:
- case -EACCES:
- case -EBADMSG:
- goto try_next;
+ spin_lock(&afs_vlocation_updates_lock);
+ list_del_init(&afs_vlocation_updates);
+ spin_unlock(&afs_vlocation_updates_lock);
+ mod_delayed_work(afs_vlocation_update_worker, &afs_vlocation_update, 0);
+ destroy_workqueue(afs_vlocation_update_worker);
- default:
- goto abandon;
- }
+ mod_delayed_work(afs_wq, &afs_vlocation_reap, 0);
+}
- /* try contacting the next server */
- try_next:
- vlocation->upd_busy_cnt = 0;
+/*
+ * update a volume location
+ */
+static void afs_vlocation_updater(struct work_struct *work)
+{
+ struct afs_cache_vlocation vldb;
+ struct afs_vlocation *vl, *xvl;
+ time_t now;
+ long timeout;
+ int ret;
- /* discard the server record */
- afs_put_server(vlocation->upd_op.server);
- vlocation->upd_op.server = NULL;
+ _enter("");
- tmp = vlocation->cell->vl_naddrs;
- if (tmp == 0)
- goto abandon;
+ now = get_seconds();
- vlocation->upd_curr_svix++;
- if (vlocation->upd_curr_svix >= tmp)
- vlocation->upd_curr_svix = 0;
- if (vlocation->upd_first_svix >= tmp)
- vlocation->upd_first_svix = tmp - 1;
+ /* find a record to update */
+ spin_lock(&afs_vlocation_updates_lock);
+ for (;;) {
+ if (list_empty(&afs_vlocation_updates)) {
+ spin_unlock(&afs_vlocation_updates_lock);
+ _leave(" [nothing]");
+ return;
+ }
- /* move to the next server */
- if (vlocation->upd_curr_svix != vlocation->upd_first_svix) {
- afs_vlocation_update_begin(vlocation);
- _leave(" [next]");
- return;
+ vl = list_entry(afs_vlocation_updates.next,
+ struct afs_vlocation, update);
+ if (atomic_read(&vl->usage) > 0)
+ break;
+ list_del_init(&vl->update);
}
- /* run out of servers to try - was the volume rejected? */
- if (vlocation->upd_rej_cnt > 0) {
- printk("kAFS: Active volume no longer valid '%s'\n",
- vlocation->vldb.name);
- vlocation->valid = 0;
- afs_vlocation_update_abandon(vlocation, AFS_VLUPD_SLEEP, 0);
- afs_kafstimod_add_timer(&vlocation->upd_timer,
- AFS_VLDB_TIMEOUT);
- afs_put_vlocation(vlocation);
- _leave(" [invalidated]");
+ timeout = vl->update_at - now;
+ if (timeout > 0) {
+ queue_delayed_work(afs_vlocation_update_worker,
+ &afs_vlocation_update, timeout * HZ);
+ spin_unlock(&afs_vlocation_updates_lock);
+ _leave(" [nothing]");
return;
}
- /* abandon the update */
- abandon:
- afs_vlocation_update_abandon(vlocation, AFS_VLUPD_SLEEP, ret);
- afs_kafstimod_add_timer(&vlocation->upd_timer, HZ * 10);
- afs_put_vlocation(vlocation);
- _leave(" [abandoned]");
+ list_del_init(&vl->update);
+ atomic_inc(&vl->usage);
+ spin_unlock(&afs_vlocation_updates_lock);
-} /* end afs_vlocation_update_attend() */
+ /* we can now perform the update */
+ _debug("update %s", vl->vldb.name);
+ vl->state = AFS_VL_UPDATING;
+ vl->upd_rej_cnt = 0;
+ vl->upd_busy_cnt = 0;
-/*****************************************************************************/
-/*
- * deal with an update operation being discarded
- * - called in kafsasyncd context when it's dying due to rmmod
- * - the call has already been aborted and put()'d
- */
-static void afs_vlocation_update_discard(struct afs_async_op *op)
-{
- struct afs_vlocation *vlocation =
- list_entry(op, struct afs_vlocation, upd_op);
-
- _enter("%s", vlocation->vldb.name);
-
- afs_put_server(op->server);
- op->server = NULL;
+ ret = afs_vlocation_update_record(vl, NULL, &vldb);
+ spin_lock(&vl->lock);
+ switch (ret) {
+ case 0:
+ afs_vlocation_apply_update(vl, &vldb);
+ vl->state = AFS_VL_VALID;
+ break;
+ case -ENOMEDIUM:
+ vl->state = AFS_VL_VOLUME_DELETED;
+ break;
+ default:
+ vl->state = AFS_VL_UNCERTAIN;
+ break;
+ }
+ spin_unlock(&vl->lock);
+ wake_up(&vl->waitq);
- afs_put_vlocation(vlocation);
+ /* and then reschedule */
+ _debug("reschedule");
+ vl->update_at = get_seconds() + afs_vlocation_update_timeout;
- _leave("");
-} /* end afs_vlocation_update_discard() */
+ spin_lock(&afs_vlocation_updates_lock);
-/*****************************************************************************/
-/*
- * match a VLDB record stored in the cache
- * - may also load target from entry
- */
-#ifdef AFS_CACHING_SUPPORT
-static cachefs_match_val_t afs_vlocation_cache_match(void *target,
- const void *entry)
-{
- const struct afs_cache_vlocation *vldb = entry;
- struct afs_vlocation *vlocation = target;
-
- _enter("{%s},{%s}", vlocation->vldb.name, vldb->name);
-
- if (strncmp(vlocation->vldb.name, vldb->name, sizeof(vldb->name)) == 0
- ) {
- if (!vlocation->valid ||
- vlocation->vldb.rtime == vldb->rtime
- ) {
- vlocation->vldb = *vldb;
- vlocation->valid = 1;
- _leave(" = SUCCESS [c->m]");
- return CACHEFS_MATCH_SUCCESS;
- }
- /* need to update cache if cached info differs */
- else if (memcmp(&vlocation->vldb, vldb, sizeof(*vldb)) != 0) {
- /* delete if VIDs for this name differ */
- if (memcmp(&vlocation->vldb.vid,
- &vldb->vid,
- sizeof(vldb->vid)) != 0) {
- _leave(" = DELETE");
- return CACHEFS_MATCH_SUCCESS_DELETE;
- }
-
- _leave(" = UPDATE");
- return CACHEFS_MATCH_SUCCESS_UPDATE;
- }
- else {
- _leave(" = SUCCESS");
- return CACHEFS_MATCH_SUCCESS;
- }
+ if (!list_empty(&afs_vlocation_updates)) {
+ /* next update in 10 minutes, but wait at least 1 second more
+ * than the newest record already queued so that we don't spam
+ * the VL server suddenly with lots of requests
+ */
+ xvl = list_entry(afs_vlocation_updates.prev,
+ struct afs_vlocation, update);
+ if (vl->update_at <= xvl->update_at)
+ vl->update_at = xvl->update_at + 1;
+ xvl = list_entry(afs_vlocation_updates.next,
+ struct afs_vlocation, update);
+ timeout = xvl->update_at - now;
+ if (timeout < 0)
+ timeout = 0;
+ } else {
+ timeout = afs_vlocation_update_timeout;
}
- _leave(" = FAILED");
- return CACHEFS_MATCH_FAILED;
-} /* end afs_vlocation_cache_match() */
-#endif
+ ASSERT(list_empty(&vl->update));
-/*****************************************************************************/
-/*
- * update a VLDB record stored in the cache
- */
-#ifdef AFS_CACHING_SUPPORT
-static void afs_vlocation_cache_update(void *source, void *entry)
-{
- struct afs_cache_vlocation *vldb = entry;
- struct afs_vlocation *vlocation = source;
-
- _enter("");
-
- *vldb = vlocation->vldb;
+ list_add_tail(&vl->update, &afs_vlocation_updates);
-} /* end afs_vlocation_cache_update() */
-#endif
+ _debug("timeout %ld", timeout);
+ queue_delayed_work(afs_vlocation_update_worker,
+ &afs_vlocation_update, timeout * HZ);
+ spin_unlock(&afs_vlocation_updates_lock);
+ afs_put_vlocation(vl);
+}
ernel/pcic.c?id2=77b67063bb6bce6d475e910d3b886a606d0d91f7'>arch/sparc/kernel/pcic.c121
-rw-r--r--arch/sparc/kernel/perf_event.c23
-rw-r--r--arch/sparc/kernel/pmc.c5
-rw-r--r--arch/sparc/kernel/power.c4
-rw-r--r--arch/sparc/kernel/process_32.c218
-rw-r--r--arch/sparc/kernel/process_64.c225
-rw-r--r--arch/sparc/kernel/prom.h2
-rw-r--r--arch/sparc/kernel/prom_64.c61
-rw-r--r--arch/sparc/kernel/prom_common.c5
-rw-r--r--arch/sparc/kernel/psycho_common.h22
-rw-r--r--arch/sparc/kernel/ptrace_32.c2
-rw-r--r--arch/sparc/kernel/ptrace_64.c14
-rw-r--r--arch/sparc/kernel/rtrap_64.S14
-rw-r--r--arch/sparc/kernel/sbus.c6
-rw-r--r--arch/sparc/kernel/setup_32.c6
-rw-r--r--arch/sparc/kernel/setup_64.c16
-rw-r--r--arch/sparc/kernel/signal32.c264
-rw-r--r--arch/sparc/kernel/signal_32.c186
-rw-r--r--arch/sparc/kernel/signal_64.c159
-rw-r--r--arch/sparc/kernel/smp_32.c101
-rw-r--r--arch/sparc/kernel/smp_64.c92
-rw-r--r--arch/sparc/kernel/sparc_ksyms_32.c1
-rw-r--r--arch/sparc/kernel/sparc_ksyms_64.c1
-rw-r--r--arch/sparc/kernel/sun4d_irq.c18
-rw-r--r--arch/sparc/kernel/sun4d_smp.c31
-rw-r--r--arch/sparc/kernel/sun4m_irq.c2
-rw-r--r--arch/sparc/kernel/sun4m_smp.c35
-rw-r--r--arch/sparc/kernel/sun4v_tlb_miss.S2
-rw-r--r--arch/sparc/kernel/sys32.S102
-rw-r--r--arch/sparc/kernel/sys_sparc32.c322
-rw-r--r--arch/sparc/kernel/sys_sparc_32.c109
-rw-r--r--arch/sparc/kernel/sys_sparc_64.c187
-rw-r--r--arch/sparc/kernel/syscalls.S74
-rw-r--r--arch/sparc/kernel/sysfs.c12
-rw-r--r--arch/sparc/kernel/systbls.h126
-rw-r--r--arch/sparc/kernel/systbls_32.S5
-rw-r--r--arch/sparc/kernel/systbls_64.S96
-rw-r--r--arch/sparc/kernel/tadpole.c126
-rw-r--r--arch/sparc/kernel/time_32.c10
-rw-r--r--arch/sparc/kernel/time_64.c15
-rw-r--r--arch/sparc/kernel/trampoline_32.S21
-rw-r--r--arch/sparc/kernel/trampoline_64.S8
-rw-r--r--arch/sparc/kernel/traps_32.c6
-rw-r--r--arch/sparc/kernel/traps_64.c109
-rw-r--r--arch/sparc/kernel/tsb.S41
-rw-r--r--arch/sparc/kernel/unaligned_32.c4
-rw-r--r--arch/sparc/kernel/unaligned_64.c30
-rw-r--r--arch/sparc/kernel/us2e_cpufreq.c413
-rw-r--r--arch/sparc/kernel/us3_cpufreq.c274
-rw-r--r--arch/sparc/kernel/vio.c1
-rw-r--r--arch/sparc/kernel/vmlinux.lds.S5
-rw-r--r--arch/sparc/kernel/windows.c3
-rw-r--r--arch/sparc/lib/Makefile3
-rw-r--r--arch/sparc/lib/NG2memcpy.S1
-rw-r--r--arch/sparc/lib/bitext.c6
-rw-r--r--arch/sparc/lib/clear_page.S4
-rw-r--r--arch/sparc/lib/copy_page.S4
-rw-r--r--arch/sparc/lib/ksyms.c9
-rw-r--r--arch/sparc/lib/usercopy.c9
-rw-r--r--arch/sparc/math-emu/sfp-util_32.h20
-rw-r--r--arch/sparc/math-emu/sfp-util_64.h12
-rw-r--r--arch/sparc/mm/fault_32.c21
-rw-r--r--arch/sparc/mm/fault_64.c135
-rw-r--r--arch/sparc/mm/gup.c58
-rw-r--r--arch/sparc/mm/hugetlbpage.c127
-rw-r--r--arch/sparc/mm/hypersparc.S8
-rw-r--r--arch/sparc/mm/init_32.c89
-rw-r--r--arch/sparc/mm/init_64.c487
-rw-r--r--arch/sparc/mm/init_64.h8
-rw-r--r--arch/sparc/mm/io-unit.c21
-rw-r--r--arch/sparc/mm/iommu.c27
-rw-r--r--arch/sparc/mm/leon_mm.c4
-rw-r--r--arch/sparc/mm/mm_32.h24
-rw-r--r--arch/sparc/mm/srmmu.c36
-rw-r--r--arch/sparc/mm/srmmu.h4
-rw-r--r--arch/sparc/mm/swift.S8
-rw-r--r--arch/sparc/mm/tlb.c103
-rw-r--r--arch/sparc/mm/tsb.c85
-rw-r--r--arch/sparc/mm/tsunami.S6
-rw-r--r--arch/sparc/mm/ultra.S131
-rw-r--r--arch/sparc/mm/viking.S10
-rw-r--r--arch/sparc/net/bpf_jit_comp.c216
-rw-r--r--arch/sparc/power/Makefile3
-rw-r--r--arch/sparc/power/hibernate.c42
-rw-r--r--arch/sparc/power/hibernate_asm.S131
-rw-r--r--arch/sparc/prom/bootstr_32.c12
-rw-r--r--arch/sparc/prom/misc_64.c5
-rw-r--r--arch/sparc/prom/p1275.c1
-rw-r--r--arch/sparc/prom/tree_64.c16
271 files changed, 5066 insertions, 5668 deletions
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 9f2edb5c555..407c87d9879 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -12,6 +12,8 @@ config 64BIT
config SPARC
bool
default y
+ select ARCH_MIGHT_HAVE_PC_PARPORT if SPARC64 && PCI
+ select ARCH_MIGHT_HAVE_PC_SERIO
select OF
select OF_PROMTREE
select HAVE_IDE
@@ -23,14 +25,11 @@ config SPARC
select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
select RTC_CLASS
select RTC_DRV_M48T59
- select HAVE_IRQ_WORK
select HAVE_DMA_ATTRS
select HAVE_DMA_API_DEBUG
- select HAVE_ARCH_JUMP_LABEL
- select HAVE_GENERIC_HARDIRQS
+ select HAVE_ARCH_JUMP_LABEL if SPARC64
select GENERIC_IRQ_SHOW
select ARCH_WANT_IPC_PARSE_VERSION
- select USE_GENERIC_SMP_HELPERS if SMP
select GENERIC_PCI_IOMAP
select HAVE_NMI_WATCHDOG if SPARC64
select HAVE_BPF_JIT
@@ -41,16 +40,18 @@ config SPARC
select GENERIC_STRNCPY_FROM_USER
select GENERIC_STRNLEN_USER
select MODULES_USE_ELF_RELA
+ select ODD_RT_SIGACTION
+ select OLD_SIGSUSPEND
config SPARC32
def_bool !64BIT
select GENERIC_ATOMIC64
select CLZ_TAB
select HAVE_UID16
+ select OLD_SIGACTION
config SPARC64
def_bool 64BIT
- select ARCH_SUPPORTS_MSI
select HAVE_FUNCTION_TRACER
select HAVE_FUNCTION_GRAPH_TRACER
select HAVE_FUNCTION_GRAPH_FP_TEST
@@ -60,10 +61,11 @@ config SPARC64
select HAVE_RCU_TABLE_FREE if SMP
select HAVE_MEMBLOCK
select HAVE_MEMBLOCK_NODE_MAP
- select HAVE_SYSCALL_WRAPPERS
+ select HAVE_ARCH_TRANSPARENT_HUGEPAGE
select HAVE_DYNAMIC_FTRACE
select HAVE_FTRACE_MCOUNT_RECORD
select HAVE_SYSCALL_TRACEPOINTS
+ select HAVE_CONTEXT_TRACKING
select HAVE_DEBUG_KMEMLEAK
select RTC_DRV_CMOS
select RTC_DRV_BQ4802
@@ -75,18 +77,14 @@ config SPARC64
select ARCH_HAVE_NMI_SAFE_CMPXCHG
select HAVE_C_RECORDMCOUNT
select NO_BOOTMEM
+ select HAVE_ARCH_AUDITSYSCALL
+ select ARCH_SUPPORTS_ATOMIC_RMW
config ARCH_DEFCONFIG
string
default "arch/sparc/configs/sparc32_defconfig" if SPARC32
default "arch/sparc/configs/sparc64_defconfig" if SPARC64
-# CONFIG_BITS can be used at source level to get 32/64 bits
-config BITS
- int
- default 32 if SPARC32
- default 64 if SPARC64
-
config IOMMU_HELPER
bool
default y if SPARC64
@@ -103,6 +101,9 @@ config HAVE_LATENCYTOP_SUPPORT
bool
default y if SPARC64
+config ARCH_HIBERNATION_POSSIBLE
+ def_bool y if SPARC64
+
config AUDIT_ARCH
bool
default y
@@ -138,14 +139,6 @@ config GENERIC_ISA_DMA
bool
default y if SPARC32
-config GENERIC_GPIO
- bool
- help
- Generic GPIO API support
-
-config ARCH_NO_VIRT_TO_BUS
- def_bool y
-
config ARCH_SUPPORTS_DEBUG_PAGEALLOC
def_bool y if SPARC64
@@ -162,10 +155,10 @@ config SMP
a system with only one CPU, say N. If you have a system with more
than one CPU, say Y.
- If you say N here, the kernel will run on single and multiprocessor
+ If you say N here, the kernel will run on uni- and multiprocessor
machines, but will use only one CPU of a multiprocessor machine. If
you say Y here, the kernel will run on many, but not all,
- singleprocessor machines. On a singleprocessor machine, the kernel
+ uniprocessor machines. On a uniprocessor machine, the kernel
will run faster if you say N here.
People using multiprocessor machines who say Y here should also say
@@ -197,7 +190,7 @@ config RWSEM_XCHGADD_ALGORITHM
config GENERIC_HWEIGHT
bool
- default y if !ULTRA_HAS_POPULATION_COUNT
+ default y
config GENERIC_CALIBRATE_DELAY
bool
@@ -252,7 +245,6 @@ config SECCOMP
config HOTPLUG_CPU
bool "Support for hot-pluggable CPUs"
depends on SPARC64 && SMP
- select HOTPLUG
help
Say Y here to experiment with turning CPUs off and on. CPUs
can be controlled through /sys/devices/system/cpu/cpu#.
@@ -260,29 +252,6 @@ config HOTPLUG_CPU
if SPARC64
source "drivers/cpufreq/Kconfig"
-
-config US3_FREQ
- tristate "UltraSPARC-III CPU Frequency driver"
- depends on CPU_FREQ
- select CPU_FREQ_TABLE
- help
- This adds the CPUFreq driver for UltraSPARC-III processors.
-
- For details, take a look at <file:Documentation/cpu-freq>.
-
- If in doubt, say N.
-
-config US2E_FREQ
- tristate "UltraSPARC-IIe CPU Frequency driver"
- depends on CPU_FREQ
- select CPU_FREQ_TABLE
- help
- This adds the CPUFreq driver for UltraSPARC-IIe processors.
-
- For details, take a look at <file:Documentation/cpu-freq>.
-
- If in doubt, say N.
-
endif
config US3_MC
@@ -333,6 +302,10 @@ config ARCH_SPARSEMEM_DEFAULT
source "mm/Kconfig"
+if SPARC64
+source "kernel/power/Kconfig"
+endif
+
config SCHED_SMT
bool "SMT (Hyperthreading) scheduler support"
depends on SPARC64 && SMP
@@ -413,6 +386,8 @@ config SERIAL_CONSOLE
config SPARC_LEON
bool "Sparc Leon processor family"
depends on SPARC32
+ select USB_EHCI_BIG_ENDIAN_MMIO
+ select USB_EHCI_BIG_ENDIAN_DESC
---help---
If you say Y here if you are running on a SPARC-LEON processor.
The LEON processor is a synthesizable VHDL model of the
@@ -500,7 +475,18 @@ config LEON_PCI
depends on PCI && SPARC_LEON
default y
-config GRPCI2
+config SPARC_GRPCI1
+ bool "GRPCI Host Bridge Support"
+ depends on LEON_PCI
+ default y
+ help
+ Say Y here to include the GRPCI Host Bridge Driver. The GRPCI
+ PCI host controller is typically found in GRLIB SPARC32/LEON
+ systems. The driver has one property (all_pci_errors) controlled
+ from the bootloader that makes the GRPCI to generate interrupts
+ on detected PCI Parity and System errors.
+
+config SPARC_GRPCI2
bool "GRPCI2 Host Bridge Support"
depends on LEON_PCI
default y
@@ -524,12 +510,17 @@ config SUN_OPENPROMFS
Only choose N if you know in advance that you will not need to modify
OpenPROM settings on the running system.
-# Makefile helper
+# Makefile helpers
config SPARC64_PCI
bool
default y
depends on SPARC64 && PCI
+config SPARC64_PCI_MSI
+ bool
+ default y
+ depends on SPARC64_PCI && PCI_MSI
+
endmenu
menu "Executable file formats"
@@ -543,6 +534,7 @@ config COMPAT
select COMPAT_BINFMT_ELF
select HAVE_UID16
select ARCH_WANT_OLD_COMPAT_IPC
+ select COMPAT_OLD_SIGACTION
config SYSVIPC_COMPAT
bool
diff --git a/arch/sparc/Makefile b/arch/sparc/Makefile
index 541b8b075c7..9ff423678cb 100644
--- a/arch/sparc/Makefile
+++ b/arch/sparc/Makefile
@@ -57,6 +57,7 @@ core-y += arch/sparc/
libs-y += arch/sparc/prom/
libs-y += arch/sparc/lib/
+drivers-$(CONFIG_PM) += arch/sparc/power/
drivers-$(CONFIG_OPROFILE) += arch/sparc/oprofile/
boot := arch/sparc/boot
diff --git a/arch/sparc/boot/piggyback.c b/arch/sparc/boot/piggyback.c
index c0a798fcf03..bb7c95161d7 100644
--- a/arch/sparc/boot/piggyback.c
+++ b/arch/sparc/boot/piggyback.c
@@ -81,18 +81,18 @@ static void usage(void)
static int start_line(const char *line)
{
- if (strcmp(line + 8, " T _start\n") == 0)
+ if (strcmp(line + 10, " _start\n") == 0)
return 1;
- else if (strcmp(line + 16, " T _start\n") == 0)
+ else if (strcmp(line + 18, " _start\n") == 0)
return 1;
return 0;
}
static int end_line(const char *line)
{
- if (strcmp(line + 8, " A _end\n") == 0)
+ if (strcmp(line + 10, " _end\n") == 0)
return 1;
- else if (strcmp (line + 16, " A _end\n") == 0)
+ else if (strcmp (line + 18, " _end\n") == 0)
return 1;
return 0;
}
@@ -100,8 +100,8 @@ static int end_line(const char *line)
/*
* Find address for start and end in System.map.
* The file looks like this:
- * f0004000 T _start
- * f0379f79 A _end
+ * f0004000 ... _start
+ * f0379f79 ... _end
* 1234567890123456
* ^coloumn 1
* There is support for 64 bit addresses too.
diff --git a/arch/sparc/crypto/aes_asm.S b/arch/sparc/crypto/aes_asm.S
index 23f6cbb910d..1cda8aa7cb8 100644
--- a/arch/sparc/crypto/aes_asm.S
+++ b/arch/sparc/crypto/aes_asm.S
@@ -1024,7 +1024,11 @@ ENTRY(aes_sparc64_ecb_encrypt_256)
add %o2, 0x20, %o2
brlz,pt %o3, 11f
nop
-10: ldx [%o1 + 0x00], %g3
+10: ldd [%o0 + 0xd0], %f56
+ ldd [%o0 + 0xd8], %f58
+ ldd [%o0 + 0xe0], %f60
+ ldd [%o0 + 0xe8], %f62
+ ldx [%o1 + 0x00], %g3
ldx [%o1 + 0x08], %g7
xor %g1, %g3, %g3
xor %g2, %g7, %g7
@@ -1128,9 +1132,9 @@ ENTRY(aes_sparc64_ecb_decrypt_256)
/* %o0=&key[key_len], %o1=input, %o2=output, %o3=len */
ldx [%o0 - 0x10], %g1
subcc %o3, 0x10, %o3
+ ldx [%o0 - 0x08], %g2
be 10f
- ldx [%o0 - 0x08], %g2
- sub %o0, 0xf0, %o0
+ sub %o0, 0xf0, %o0
1: ldx [%o1 + 0x00], %g3
ldx [%o1 + 0x08], %g7
ldx [%o1 + 0x10], %o4
@@ -1154,7 +1158,11 @@ ENTRY(aes_sparc64_ecb_decrypt_256)
add %o2, 0x20, %o2
brlz,pt %o3, 11f
nop
-10: ldx [%o1 + 0x00], %g3
+10: ldd [%o0 + 0x18], %f56
+ ldd [%o0 + 0x10], %f58
+ ldd [%o0 + 0x08], %f60
+ ldd [%o0 + 0x00], %f62
+ ldx [%o1 + 0x00], %g3
ldx [%o1 + 0x08], %g7
xor %g1, %g3, %g3
xor %g2, %g7, %g7
@@ -1511,11 +1519,11 @@ ENTRY(aes_sparc64_ctr_crypt_256)
add %o2, 0x20, %o2
brlz,pt %o3, 11f
nop
- ldd [%o0 + 0xd0], %f56
+10: ldd [%o0 + 0xd0], %f56
ldd [%o0 + 0xd8], %f58
ldd [%o0 + 0xe0], %f60
ldd [%o0 + 0xe8], %f62
-10: xor %g1, %g3, %o5
+ xor %g1, %g3, %o5
MOVXTOD_O5_F0
xor %g2, %g7, %o5
MOVXTOD_O5_F2
diff --git a/arch/sparc/crypto/aes_glue.c b/arch/sparc/crypto/aes_glue.c
index 3965d1d36df..df922f52d76 100644
--- a/arch/sparc/crypto/aes_glue.c
+++ b/arch/sparc/crypto/aes_glue.c
@@ -124,7 +124,7 @@ extern void aes_sparc64_ctr_crypt_256(const u64 *key, const u64 *input,
u64 *output, unsigned int len,
u64 *iv);
-struct aes_ops aes128_ops = {
+static struct aes_ops aes128_ops = {
.encrypt = aes_sparc64_encrypt_128,
.decrypt = aes_sparc64_decrypt_128,
.load_encrypt_keys = aes_sparc64_load_encrypt_keys_128,
@@ -136,7 +136,7 @@ struct aes_ops aes128_ops = {
.ctr_crypt = aes_sparc64_ctr_crypt_128,
};
-struct aes_ops aes192_ops = {
+static struct aes_ops aes192_ops = {
.encrypt = aes_sparc64_encrypt_192,
.decrypt = aes_sparc64_decrypt_192,
.load_encrypt_keys = aes_sparc64_load_encrypt_keys_192,
@@ -148,7 +148,7 @@ struct aes_ops aes192_ops = {
.ctr_crypt = aes_sparc64_ctr_crypt_192,
};
-struct aes_ops aes256_ops = {
+static struct aes_ops aes256_ops = {
.encrypt = aes_sparc64_encrypt_256,
.decrypt = aes_sparc64_decrypt_256,
.load_encrypt_keys = aes_sparc64_load_encrypt_keys_256,
@@ -222,6 +222,7 @@ static int ecb_encrypt(struct blkcipher_desc *desc,
blkcipher_walk_init(&walk, dst, src, nbytes);
err = blkcipher_walk_virt(desc, &walk);
+ desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
ctx->ops->load_encrypt_keys(&ctx->key[0]);
while ((nbytes = walk.nbytes)) {
@@ -251,6 +252,7 @@ static int ecb_decrypt(struct blkcipher_desc *desc,
blkcipher_walk_init(&walk, dst, src, nbytes);
err = blkcipher_walk_virt(desc, &walk);
+ desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
ctx->ops->load_decrypt_keys(&ctx->key[0]);
key_end = &ctx->key[ctx->expanded_key_length / sizeof(u64)];
@@ -280,6 +282,7 @@ static int cbc_encrypt(struct blkcipher_desc *desc,
blkcipher_walk_init(&walk, dst, src, nbytes);
err = blkcipher_walk_virt(desc, &walk);
+ desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
ctx->ops->load_encrypt_keys(&ctx->key[0]);
while ((nbytes = walk.nbytes)) {
@@ -309,6 +312,7 @@ static int cbc_decrypt(struct blkcipher_desc *desc,
blkcipher_walk_init(&walk, dst, src, nbytes);
err = blkcipher_walk_virt(desc, &walk);
+ desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
ctx->ops->load_decrypt_keys(&ctx->key[0]);
key_end = &ctx->key[ctx->expanded_key_length / sizeof(u64)];
@@ -329,6 +333,22 @@ static int cbc_decrypt(struct blkcipher_desc *desc,
return err;
}
+static void ctr_crypt_final(struct crypto_sparc64_aes_ctx *ctx,
+ struct blkcipher_walk *walk)
+{
+ u8 *ctrblk = walk->iv;
+ u64 keystream[AES_BLOCK_SIZE / sizeof(u64)];
+ u8 *src = walk->src.virt.addr;
+ u8 *dst = walk->dst.virt.addr;
+ unsigned int nbytes = walk->nbytes;
+
+ ctx->ops->ecb_encrypt(&ctx->key[0], (const u64 *)ctrblk,
+ keystream, AES_BLOCK_SIZE);
+ crypto_xor((u8 *) keystream, src, nbytes);
+ memcpy(dst, keystream, nbytes);
+ crypto_inc(ctrblk, AES_BLOCK_SIZE);
+}
+
static int ctr_crypt(struct blkcipher_desc *desc,
struct scatterlist *dst, struct scatterlist *src,
unsigned int nbytes)
@@ -338,10 +358,11 @@ static int ctr_crypt(struct blkcipher_desc *desc,
int err;
blkcipher_walk_init(&walk, dst, src, nbytes);
- err = blkcipher_walk_virt(desc, &walk);
+ err = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE);
+ desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
ctx->ops->load_encrypt_keys(&ctx->key[0]);
- while ((nbytes = walk.nbytes)) {
+ while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
unsigned int block_len = nbytes & AES_BLOCK_MASK;
if (likely(block_len)) {
@@ -353,6 +374,10 @@ static int ctr_crypt(struct blkcipher_desc *desc,
nbytes &= AES_BLOCK_SIZE - 1;
err = blkcipher_walk_done(desc, &walk, nbytes);
}
+ if (walk.nbytes) {
+ ctr_crypt_final(ctx, &walk);
+ err = blkcipher_walk_done(desc, &walk, 0);
+ }
fprs_write(0);
return err;
}
@@ -418,7 +443,7 @@ static struct crypto_alg algs[] = { {
.cra_driver_name = "ctr-aes-sparc64",
.cra_priority = SPARC_CR_OPCODE_PRIORITY,
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
- .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_blocksize = 1,
.cra_ctxsize = sizeof(struct crypto_sparc64_aes_ctx),
.cra_alignmask = 7,
.cra_type = &crypto_blkcipher_type,
diff --git a/arch/sparc/crypto/camellia_glue.c b/arch/sparc/crypto/camellia_glue.c
index 62c89af3fd3..888f6260b4e 100644
--- a/arch/sparc/crypto/camellia_glue.c
+++ b/arch/sparc/crypto/camellia_glue.c
@@ -98,6 +98,7 @@ static int __ecb_crypt(struct blkcipher_desc *desc,
blkcipher_walk_init(&walk, dst, src, nbytes);
err = blkcipher_walk_virt(desc, &walk);
+ desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
if (encrypt)
key = &ctx->encrypt_key[0];
@@ -160,6 +161,7 @@ static int cbc_encrypt(struct blkcipher_desc *desc,
blkcipher_walk_init(&walk, dst, src, nbytes);
err = blkcipher_walk_virt(desc, &walk);
+ desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
key = &ctx->encrypt_key[0];
camellia_sparc64_load_keys(key, ctx->key_len);
@@ -198,6 +200,7 @@ static int cbc_decrypt(struct blkcipher_desc *desc,
blkcipher_walk_init(&walk, dst, src, nbytes);
err = blkcipher_walk_virt(desc, &walk);
+ desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
key = &ctx->decrypt_key[0];
camellia_sparc64_load_keys(key, ctx->key_len);
diff --git a/arch/sparc/crypto/des_asm.S b/arch/sparc/crypto/des_asm.S
index 30b6e90b28b..b5c8fc269b5 100644
--- a/arch/sparc/crypto/des_asm.S
+++ b/arch/sparc/crypto/des_asm.S
@@ -376,6 +376,7 @@ ENTRY(des3_ede_sparc64_ecb_crypt)
1: ldd [%o1 + 0x00], %f60
DES3_LOOP_BODY(60)
std %f60, [%o2 + 0x00]
+ add %o1, 0x08, %o1
subcc %o3, 0x08, %o3
bne,pt %icc, 1b
add %o2, 0x08, %o2
diff --git a/arch/sparc/crypto/des_glue.c b/arch/sparc/crypto/des_glue.c
index 41524cebcc4..3065bc61f9d 100644
--- a/arch/sparc/crypto/des_glue.c
+++ b/arch/sparc/crypto/des_glue.c
@@ -100,6 +100,7 @@ static int __ecb_crypt(struct blkcipher_desc *desc,
blkcipher_walk_init(&walk, dst, src, nbytes);
err = blkcipher_walk_virt(desc, &walk);
+ desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
if (encrypt)
des_sparc64_load_keys(&ctx->encrypt_expkey[0]);
@@ -147,6 +148,7 @@ static int cbc_encrypt(struct blkcipher_desc *desc,
blkcipher_walk_init(&walk, dst, src, nbytes);
err = blkcipher_walk_virt(desc, &walk);
+ desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
des_sparc64_load_keys(&ctx->encrypt_expkey[0]);
while ((nbytes = walk.nbytes)) {
@@ -177,6 +179,7 @@ static int cbc_decrypt(struct blkcipher_desc *desc,
blkcipher_walk_init(&walk, dst, src, nbytes);
err = blkcipher_walk_virt(desc, &walk);
+ desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
des_sparc64_load_keys(&ctx->decrypt_expkey[0]);
while ((nbytes = walk.nbytes)) {
@@ -266,6 +269,7 @@ static int __ecb3_crypt(struct blkcipher_desc *desc,
blkcipher_walk_init(&walk, dst, src, nbytes);
err = blkcipher_walk_virt(desc, &walk);
+ desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
if (encrypt)
K = &ctx->encrypt_expkey[0];
@@ -317,6 +321,7 @@ static int cbc3_encrypt(struct blkcipher_desc *desc,
blkcipher_walk_init(&walk, dst, src, nbytes);
err = blkcipher_walk_virt(desc, &walk);
+ desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
K = &ctx->encrypt_expkey[0];
des3_ede_sparc64_load_keys(K);
@@ -352,6 +357,7 @@ static int cbc3_decrypt(struct blkcipher_desc *desc,
blkcipher_walk_init(&walk, dst, src, nbytes);
err = blkcipher_walk_virt(desc, &walk);
+ desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
K = &ctx->decrypt_expkey[0];
des3_ede_sparc64_load_keys(K);
diff --git a/arch/sparc/include/asm/Kbuild b/arch/sparc/include/asm/Kbuild
index 645a58da0e8..a4582181800 100644
--- a/arch/sparc/include/asm/Kbuild
+++ b/arch/sparc/include/asm/Kbuild
@@ -2,10 +2,20 @@
generic-y += clkdev.h
+generic-y += cputime.h
generic-y += div64.h
+generic-y += emergency-restart.h
generic-y += exec.h
-generic-y += local64.h
+generic-y += hash.h
generic-y += irq_regs.h
+generic-y += linkage.h
generic-y += local.h
+generic-y += local64.h
+generic-y += mcs_spinlock.h
generic-y += module.h
+generic-y += mutex.h
+generic-y += preempt.h
+generic-y += serial.h
+generic-y += trace_clock.h
+generic-y += types.h
generic-y += word-at-a-time.h
diff --git a/arch/sparc/include/asm/atomic_32.h b/arch/sparc/include/asm/atomic_32.h
index 905832aa9e9..7aed2be45b4 100644
--- a/arch/sparc/include/asm/atomic_32.h
+++ b/arch/sparc/include/asm/atomic_32.h
@@ -14,16 +14,17 @@
#include <linux/types.h>
#include <asm/cmpxchg.h>
+#include <asm/barrier.h>
#include <asm-generic/atomic64.h>
#define ATOMIC_INIT(i) { (i) }
-extern int __atomic_add_return(int, atomic_t *);
-extern int atomic_cmpxchg(atomic_t *, int, int);
+int __atomic_add_return(int, atomic_t *);
+int atomic_cmpxchg(atomic_t *, int, int);
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
-extern int __atomic_add_unless(atomic_t *, int, int);
-extern void atomic_set(atomic_t *, int);
+int __atomic_add_unless(atomic_t *, int, int);
+void atomic_set(atomic_t *, int);
#define atomic_read(v) (*(volatile int *)&(v)->counter)
@@ -52,10 +53,4 @@ extern void atomic_set(atomic_t *, int);
#define atomic_dec_and_test(v) (atomic_dec_return(v) == 0)
#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
-/* Atomic operations are already serializing */
-#define smp_mb__before_atomic_dec() barrier()
-#define smp_mb__after_atomic_dec() barrier()
-#define smp_mb__before_atomic_inc() barrier()
-#define smp_mb__after_atomic_inc() barrier()
-
#endif /* !(__ARCH_SPARC_ATOMIC__) */
diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
index be56a244c9c..bb894c8bec5 100644
--- a/arch/sparc/include/asm/atomic_64.h
+++ b/arch/sparc/include/asm/atomic_64.h
@@ -9,6 +9,7 @@
#include <linux/types.h>
#include <asm/cmpxchg.h>
+#include <asm/barrier.h>
#define ATOMIC_INIT(i) { (i) }
#define ATOMIC64_INIT(i) { (i) }
@@ -19,15 +20,15 @@
#define atomic_set(v, i) (((v)->counter) = i)
#define atomic64_set(v, i) (((v)->counter) = i)
-extern void atomic_add(int, atomic_t *);
-extern void atomic64_add(long, atomic64_t *);
-extern void atomic_sub(int, atomic_t *);
-extern void atomic64_sub(long, atomic64_t *);
+void atomic_add(int, atomic_t *);
+void atomic64_add(long, atomic64_t *);
+void atomic_sub(int, atomic_t *);
+void atomic64_sub(long, atomic64_t *);
-extern int atomic_add_ret(int, atomic_t *);
-extern long atomic64_add_ret(long, atomic64_t *);
-extern int atomic_sub_ret(int, atomic_t *);
-extern long atomic64_sub_ret(long, atomic64_t *);
+int atomic_add_ret(int, atomic_t *);
+long atomic64_add_ret(long, atomic64_t *);
+int atomic_sub_ret(int, atomic_t *);
+long atomic64_sub_ret(long, atomic64_t *);
#define atomic_dec_return(v) atomic_sub_ret(1, v)
#define atomic64_dec_return(v) atomic64_sub_ret(1, v)
@@ -106,12 +107,6 @@ static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
-extern long atomic64_dec_if_positive(atomic64_t *v);
-
-/* Atomic operations are already serializing */
-#define smp_mb__before_atomic_dec() barrier()
-#define smp_mb__after_atomic_dec() barrier()
-#define smp_mb__before_atomic_inc() barrier()
-#define smp_mb__after_atomic_inc() barrier()
+long atomic64_dec_if_positive(atomic64_t *v);
#endif /* !(__ARCH_SPARC64_ATOMIC__) */
diff --git a/arch/sparc/include/asm/auxio.h b/arch/sparc/include/asm/auxio.h
index 13dc67f0301..3e09a07b77e 100644
--- a/arch/sparc/include/asm/auxio.h
+++ b/arch/sparc/include/asm/auxio.h
@@ -1,5 +1,12 @@
#ifndef ___ASM_SPARC_AUXIO_H
#define ___ASM_SPARC_AUXIO_H
+
+#ifndef __ASSEMBLY__
+
+extern void __iomem *auxio_register;
+
+#endif /* ifndef __ASSEMBLY__ */
+
#if defined(__sparc__) && defined(__arch64__)
#include <asm/auxio_64.h>
#else
diff --git a/arch/sparc/include/asm/auxio_32.h b/arch/sparc/include/asm/auxio_32.h
index 3a319775ae3..5d685df427b 100644
--- a/arch/sparc/include/asm/auxio_32.h
+++ b/arch/sparc/include/asm/auxio_32.h
@@ -34,8 +34,8 @@
* NOTE: these routines are implementation dependent--
* understand the hardware you are querying!
*/
-extern void set_auxio(unsigned char bits_on, unsigned char bits_off);
-extern unsigned char get_auxio(void); /* .../asm/floppy.h */
+void set_auxio(unsigned char bits_on, unsigned char bits_off);
+unsigned char get_auxio(void); /* .../asm/floppy.h */
/*
* The following routines are provided for driver-compatibility
@@ -78,7 +78,7 @@ do { \
/* AUXIO2 (Power Off Control) */
-extern __volatile__ unsigned char * auxio_power_register;
+extern volatile u8 __iomem *auxio_power_register;
#define AUXIO_POWER_DETECT_FAILURE 32
#define AUXIO_POWER_CLEAR_FAILURE 2
diff --git a/arch/sparc/include/asm/auxio_64.h b/arch/sparc/include/asm/auxio_64.h
index f61cd1e3e39..6079e59a7ad 100644
--- a/arch/sparc/include/asm/auxio_64.h
+++ b/arch/sparc/include/asm/auxio_64.h
@@ -75,8 +75,6 @@
#ifndef __ASSEMBLY__
-extern void __iomem *auxio_register;
-
#define AUXIO_LTE_ON 1
#define AUXIO_LTE_OFF 0
@@ -84,7 +82,7 @@ extern void __iomem *auxio_register;
*
* on - AUXIO_LTE_ON or AUXIO_LTE_OFF
*/
-extern void auxio_set_lte(int on);
+void auxio_set_lte(int on);
#define AUXIO_LED_ON 1
#define AUXIO_LED_OFF 0
@@ -93,7 +91,7 @@ extern void auxio_set_lte(int on);
*
* on - AUXIO_LED_ON or AUXIO_LED_OFF
*/
-extern void auxio_set_led(int on);
+void auxio_set_led(int on);
#endif /* ifndef __ASSEMBLY__ */
diff --git a/arch/sparc/include/asm/barrier_32.h b/arch/sparc/include/asm/barrier_32.h
index c1b76654ee7..ae69eda288f 100644
--- a/arch/sparc/include/asm/barrier_32.h
+++ b/arch/sparc/include/asm/barrier_32.h
@@ -1,15 +1,7 @@
#ifndef __SPARC_BARRIER_H
#define __SPARC_BARRIER_H
-/* XXX Change this if we ever use a PSO mode kernel. */
-#define mb() __asm__ __volatile__ ("" : : : "memory")
-#define rmb() mb()
-#define wmb() mb()
-#define read_barrier_depends() do { } while(0)
-#define set_mb(__var, __value) do { __var = __value; mb(); } while(0)
-#define smp_mb() __asm__ __volatile__("":::"memory")
-#define smp_rmb() __asm__ __volatile__("":::"memory")
-#define smp_wmb() __asm__ __volatile__("":::"memory")
-#define smp_read_barrier_depends() do { } while(0)
+#include <asm/processor.h> /* for nop() */
+#include <asm-generic/barrier.h>
#endif /* !(__SPARC_BARRIER_H) */
diff --git a/arch/sparc/include/asm/barrier_64.h b/arch/sparc/include/asm/barrier_64.h
index 95d45986f90..305dcc3dc72 100644
--- a/arch/sparc/include/asm/barrier_64.h
+++ b/arch/sparc/include/asm/barrier_64.h
@@ -53,4 +53,22 @@ do { __asm__ __volatile__("ba,pt %%xcc, 1f\n\t" \
#define smp_read_barrier_depends() do { } while(0)
+#define smp_store_release(p, v) \
+do { \
+ compiletime_assert_atomic_type(*p); \
+ barrier(); \
+ ACCESS_ONCE(*p) = (v); \
+} while (0)
+
+#define smp_load_acquire(p) \
+({ \
+ typeof(*p) ___p1 = ACCESS_ONCE(*p); \
+ compiletime_assert_atomic_type(*p); \
+ barrier(); \
+ ___p1; \
+})
+
+#define smp_mb__before_atomic() barrier()
+#define smp_mb__after_atomic() barrier()
+
#endif /* !(__SPARC64_BARRIER_H) */
diff --git a/arch/sparc/include/asm/bitext.h b/arch/sparc/include/asm/bitext.h
index 297b2f2fcb4..9c988bf3adb 100644
--- a/arch/sparc/include/asm/bitext.h
+++ b/arch/sparc/include/asm/bitext.h
@@ -20,8 +20,8 @@ struct bit_map {
int num_colors;
};
-extern int bit_map_string_get(struct bit_map *t, int len, int align);
-extern void bit_map_clear(struct bit_map *t, int offset, int len);
-extern void bit_map_init(struct bit_map *t, unsigned long *map, int size);
+int bit_map_string_get(struct bit_map *t, int len, int align);
+void bit_map_clear(struct bit_map *t, int offset, int len);
+void bit_map_init(struct bit_map *t, unsigned long *map, int size);
#endif /* defined(_SPARC_BITEXT_H) */
diff --git a/arch/sparc/include/asm/bitops_32.h b/arch/sparc/include/asm/bitops_32.h
index 25a676653d4..600ed1d9c8c 100644
--- a/arch/sparc/include/asm/bitops_32.h
+++ b/arch/sparc/include/asm/bitops_32.h
@@ -18,9 +18,9 @@
#error only <linux/bitops.h> can be included directly
#endif
-extern unsigned long ___set_bit(unsigned long *addr, unsigned long mask);
-extern unsigned long ___clear_bit(unsigned long *addr, unsigned long mask);
-extern unsigned long ___change_bit(unsigned long *addr, unsigned long mask);
+unsigned long ___set_bit(unsigned long *addr, unsigned long mask);
+unsigned long ___clear_bit(unsigned long *addr, unsigned long mask);
+unsigned long ___change_bit(unsigned long *addr, unsigned long mask);
/*
* Set bit 'nr' in 32-bit quantity at address 'addr' where bit '0'
@@ -90,9 +90,6 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
#include <asm-generic/bitops/non-atomic.h>
-#define smp_mb__before_clear_bit() do { } while(0)
-#define smp_mb__after_clear_bit() do { } while(0)
-
#include <asm-generic/bitops/ffz.h>
#include <asm-generic/bitops/__ffs.h>
#include <asm-generic/bitops/sched.h>
diff --git a/arch/sparc/include/asm/bitops_64.h b/arch/sparc/include/asm/bitops_64.h
index 29011cc0e4b..2d522402a93 100644
--- a/arch/sparc/include/asm/bitops_64.h
+++ b/arch/sparc/include/asm/bitops_64.h
@@ -13,27 +13,25 @@
#include <linux/compiler.h>
#include <asm/byteorder.h>
+#include <asm/barrier.h>
-extern int test_and_set_bit(unsigned long nr, volatile unsigned long *addr);
-extern int test_and_clear_bit(unsigned long nr, volatile unsigned long *addr);
-extern int test_and_change_bit(unsigned long nr, volatile unsigned long *addr);
-extern void set_bit(unsigned long nr, volatile unsigned long *addr);
-extern void clear_bit(unsigned long nr, volatile unsigned long *addr);
-extern void change_bit(unsigned long nr, volatile unsigned long *addr);
+int test_and_set_bit(unsigned long nr, volatile unsigned long *addr);
+int test_and_clear_bit(unsigned long nr, volatile unsigned long *addr);
+int test_and_change_bit(unsigned long nr, volatile unsigned long *addr);
+void set_bit(unsigned long nr, volatile unsigned long *addr);
+void clear_bit(unsigned long nr, volatile unsigned long *addr);
+void change_bit(unsigned long nr, volatile unsigned long *addr);
#include <asm-generic/bitops/non-atomic.h>
-#define smp_mb__before_clear_bit() barrier()
-#define smp_mb__after_clear_bit() barrier()
-
#include <asm-generic/bitops/fls.h>
#include <asm-generic/bitops/__fls.h>
#include <asm-generic/bitops/fls64.h>
#ifdef __KERNEL__
-extern int ffs(int x);
-extern unsigned long __ffs(unsigned long);
+int ffs(int x);
+unsigned long __ffs(unsigned long);
#include <asm-generic/bitops/ffz.h>
#include <asm-generic/bitops/sched.h>
@@ -43,10 +41,10 @@ extern unsigned long __ffs(unsigned long);
* of bits set) of a N-bit word
*/
-extern unsigned long __arch_hweight64(__u64 w);
-extern unsigned int __arch_hweight32(unsigned int w);
-extern unsigned int __arch_hweight16(unsigned int w);
-extern unsigned int __arch_hweight8(unsigned int w);
+unsigned long __arch_hweight64(__u64 w);
+unsigned int __arch_hweight32(unsigned int w);
+unsigned int __arch_hweight16(unsigned int w);
+unsigned int __arch_hweight8(unsigned int w);
#include <asm-generic/bitops/const_hweight.h>
#include <asm-generic/bitops/lock.h>
diff --git a/arch/sparc/include/asm/btext.h b/arch/sparc/include/asm/btext.h
index 9b2bc6b6ed0..75a32b109e1 100644
--- a/arch/sparc/include/asm/btext.h
+++ b/arch/sparc/include/asm/btext.h
@@ -1,6 +1,6 @@
#ifndef _SPARC_BTEXT_H
#define _SPARC_BTEXT_H
-extern int btext_find_display(void);
+int btext_find_display(void);
#endif /* _SPARC_BTEXT_H */
diff --git a/arch/sparc/include/asm/bug.h b/arch/sparc/include/asm/bug.h
index 6bd9f43cb5a..eaa8f8d3812 100644
--- a/arch/sparc/include/asm/bug.h
+++ b/arch/sparc/include/asm/bug.h
@@ -5,7 +5,7 @@
#include <linux/compiler.h>
#ifdef CONFIG_DEBUG_BUGVERBOSE
-extern void do_BUG(const char *file, int line);
+void do_BUG(const char *file, int line);
#define BUG() do { \
do_BUG(__FILE__, __LINE__); \
__builtin_trap(); \
@@ -20,6 +20,6 @@ extern void do_BUG(const char *file, int line);
#include <asm-generic/bug.h>
struct pt_regs;
-extern void die_if_kernel(char *str, struct pt_regs *regs) __attribute__ ((noreturn));
+void __noreturn die_if_kernel(char *str, struct pt_regs *regs);
#endif
diff --git a/arch/sparc/include/asm/cacheflush_32.h b/arch/sparc/include/asm/cacheflush_32.h
index bb014c24f31..12164006181 100644
--- a/arch/sparc/include/asm/cacheflush_32.h
+++ b/arch/sparc/include/asm/cacheflush_32.h
@@ -36,7 +36,7 @@
#define flush_page_for_dma(addr) \
sparc32_cachetlb_ops->page_for_dma(addr)
-extern void sparc_flush_page_to_ram(struct page *page);
+void sparc_flush_page_to_ram(struct page *page);
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
#define flush_dcache_page(page) sparc_flush_page_to_ram(page)
@@ -51,8 +51,8 @@ extern void sparc_flush_page_to_ram(struct page *page);
* way the windows are all clean for the next process and the stack
* frames are up to date.
*/
-extern void flush_user_windows(void);
-extern void kill_user_windows(void);
-extern void flushw_all(void);
+void flush_user_windows(void);
+void kill_user_windows(void);
+void flushw_all(void);
#endif /* _SPARC_CACHEFLUSH_H */
diff --git a/arch/sparc/include/asm/cacheflush_64.h b/arch/sparc/include/asm/cacheflush_64.h
index 301736d9e7a..38965379e35 100644
--- a/arch/sparc/include/asm/cacheflush_64.h
+++ b/arch/sparc/include/asm/cacheflush_64.h
@@ -10,7 +10,7 @@
/* Cache flush operations. */
#define flushw_all() __asm__ __volatile__("flushw")
-extern void __flushw_user(void);
+void __flushw_user(void);
#define flushw_user() __flushw_user()
#define flush_user_windows flushw_user
@@ -30,29 +30,29 @@ extern void __flushw_user(void);
* use block commit stores (which invalidate icache lines) during
* module load, so we need this.
*/
-extern void flush_icache_range(unsigned long start, unsigned long end);
-extern void __flush_icache_page(unsigned long);
+void flush_icache_range(unsigned long start, unsigned long end);
+void __flush_icache_page(unsigned long);
-extern void __flush_dcache_page(void *addr, int flush_icache);
-extern void flush_dcache_page_impl(struct page *page);
+void __flush_dcache_page(void *addr, int flush_icache);
+void flush_dcache_page_impl(struct page *page);
#ifdef CONFIG_SMP
-extern void smp_flush_dcache_page_impl(struct page *page, int cpu);
-extern void flush_dcache_page_all(struct mm_struct *mm, struct page *page);
+void smp_flush_dcache_page_impl(struct page *page, int cpu);
+void flush_dcache_page_all(struct mm_struct *mm, struct page *page);
#else
#define smp_flush_dcache_page_impl(page,cpu) flush_dcache_page_impl(page)
#define flush_dcache_page_all(mm,page) flush_dcache_page_impl(page)
#endif
-extern void __flush_dcache_range(unsigned long start, unsigned long end);
+void __flush_dcache_range(unsigned long start, unsigned long end);
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
-extern void flush_dcache_page(struct page *page);
+void flush_dcache_page(struct page *page);
#define flush_icache_page(vma, pg) do { } while(0)
#define flush_icache_user_range(vma,pg,adr,len) do { } while (0)
-extern void flush_ptrace_access(struct vm_area_struct *, struct page *,
- unsigned long uaddr, void *kaddr,
- unsigned long len, int write);
+void flush_ptrace_access(struct vm_area_struct *, struct page *,
+ unsigned long uaddr, void *kaddr,
+ unsigned long len, int write);
#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
do { \
diff --git a/arch/sparc/include/asm/checksum_32.h b/arch/sparc/include/asm/checksum_32.h
index bdbda1453aa..426b2389a1c 100644
--- a/arch/sparc/include/asm/checksum_32.h
+++ b/arch/sparc/include/asm/checksum_32.h
@@ -29,7 +29,7 @@
*
* it's best to have buff aligned on a 32-bit boundary
*/
-extern __wsum csum_partial(const void *buff, int len, __wsum sum);
+__wsum csum_partial(const void *buff, int len, __wsum sum);
/* the same as csum_partial, but copies from fs:src while it
* checksums
@@ -38,7 +38,7 @@ extern __wsum csum_partial(const void *buff, int len, __wsum sum);
* better 64-bit) boundary
*/
-extern unsigned int __csum_partial_copy_sparc_generic (const unsigned char *, unsigned char *);
+unsigned int __csum_partial_copy_sparc_generic (const unsigned char *, unsigned char *);
static inline __wsum
csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum)
@@ -238,4 +238,16 @@ static inline __sum16 ip_compute_csum(const void *buff, int len)
return csum_fold(csum_partial(buff, len, 0));
}
+#define HAVE_ARCH_CSUM_ADD
+static inline __wsum csum_add(__wsum csum, __wsum addend)
+{
+ __asm__ __volatile__(
+ "addcc %0, %1, %0\n"
+ "addx %0, %%g0, %0"
+ : "=r" (csum)
+ : "r" (addend), "0" (csum));
+
+ return csum;
+}
+
#endif /* !(__SPARC_CHECKSUM_H) */
diff --git a/arch/sparc/include/asm/checksum_64.h b/arch/sparc/include/asm/checksum_64.h
index 019b9615e43..b8779a6a591 100644
--- a/arch/sparc/include/asm/checksum_64.h
+++ b/arch/sparc/include/asm/checksum_64.h
@@ -29,7 +29,7 @@
*
* it's best to have buff aligned on a 32-bit boundary
*/
-extern __wsum csum_partial(const void * buff, int len, __wsum sum);
+__wsum csum_partial(const void * buff, int len, __wsum sum);
/* the same as csum_partial, but copies from user space while it
* checksums
@@ -37,12 +37,12 @@ extern __wsum csum_partial(const void * buff, int len, __wsum sum);
* here even more important to align src and dst on a 32-bit (or even
* better 64-bit) boundary
*/
-extern __wsum csum_partial_copy_nocheck(const void *src, void *dst,
- int len, __wsum sum);
+__wsum csum_partial_copy_nocheck(const void *src, void *dst,
+ int len, __wsum sum);
-extern long __csum_partial_copy_from_user(const void __user *src,
- void *dst, int len,
- __wsum sum);
+long __csum_partial_copy_from_user(const void __user *src,
+ void *dst, int len,
+ __wsum sum);
static inline __wsum
csum_partial_copy_from_user(const void __user *src,
@@ -59,9 +59,9 @@ csum_partial_copy_from_user(const void __user *src,
* Copy and checksum to user
*/
#define HAVE_CSUM_COPY_USER
-extern long __csum_partial_copy_to_user(const void *src,
- void __user *dst, int len,
- __wsum sum);
+long __csum_partial_copy_to_user(const void *src,
+ void __user *dst, int len,
+ __wsum sum);
static inline __wsum
csum_and_copy_to_user(const void *src,
@@ -77,7 +77,7 @@ csum_and_copy_to_user(const void *src,
/* ihl is always 5 or greater, almost always is 5, and iph is word aligned
* the majority of the time.
*/
-extern __sum16 ip_fast_csum(const void *iph, unsigned int ihl);
+__sum16 ip_fast_csum(const void *iph, unsigned int ihl);
/* Fold a partial checksum without adding pseudo headers. */
static inline __sum16 csum_fold(__wsum sum)
@@ -96,9 +96,9 @@ static inline __sum16 csum_fold(__wsum sum)
}
static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
- unsigned int len,
- unsigned short proto,
- __wsum sum)
+ unsigned int len,
+ unsigned short proto,
+ __wsum sum)
{
__asm__ __volatile__(
" addcc %1, %0, %0\n"
@@ -116,9 +116,9 @@ static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
* returns a 16-bit checksum, already complemented
*/
static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
- unsigned short len,
- unsigned short proto,
- __wsum sum)
+ unsigned short len,
+ unsigned short proto,
+ __wsum sum)
{
return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum));
}
@@ -164,4 +164,16 @@ static inline __sum16 ip_compute_csum(const void *buff, int len)
return csum_fold(csum_partial(buff, len, 0));
}
+#define HAVE_ARCH_CSUM_ADD
+static inline __wsum csum_add(__wsum csum, __wsum addend)
+{
+ __asm__ __volatile__(
+ "addcc %0, %1, %0\n"
+ "addx %0, %%g0, %0"
+ : "=r" (csum)
+ : "r" (addend), "0" (csum));
+
+ return csum;
+}
+
#endif /* !(__SPARC64_CHECKSUM_H) */
diff --git a/arch/sparc/include/asm/cmpxchg_32.h b/arch/sparc/include/asm/cmpxchg_32.h
index 1fae1a02e3c..32c29a133f9 100644
--- a/arch/sparc/include/asm/cmpxchg_32.h
+++ b/arch/sparc/include/asm/cmpxchg_32.h
@@ -20,7 +20,7 @@ static inline unsigned long xchg_u32(__volatile__ unsigned long *m, unsigned lon
return val;
}
-extern void __xchg_called_with_bad_pointer(void);
+void __xchg_called_with_bad_pointer(void);
static inline unsigned long __xchg(unsigned long x, __volatile__ void * ptr, int size)
{
@@ -45,9 +45,9 @@ static inline unsigned long __xchg(unsigned long x, __volatile__ void * ptr, int
#define __HAVE_ARCH_CMPXCHG 1
/* bug catcher for when unsupported size is used - won't link */
-extern void __cmpxchg_called_with_bad_pointer(void);
+void __cmpxchg_called_with_bad_pointer(void);
/* we only need to support cmpxchg of a u32 on sparc */
-extern unsigned long __cmpxchg_u32(volatile u32 *m, u32 old, u32 new_);
+unsigned long __cmpxchg_u32(volatile u32 *m, u32 old, u32 new_);
/* don't worry...optimizer will get rid of most of this */
static inline unsigned long
diff --git a/arch/sparc/include/asm/cmpxchg_64.h b/arch/sparc/include/asm/cmpxchg_64.h
index b30eb37294c..0e1ed6cfbf6 100644
--- a/arch/sparc/include/asm/cmpxchg_64.h
+++ b/arch/sparc/include/asm/cmpxchg_64.h
@@ -42,7 +42,7 @@ static inline unsigned long xchg64(__volatile__ unsigned long *m, unsigned long
#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
-extern void __xchg_called_with_bad_pointer(void);
+void __xchg_called_with_bad_pointer(void);
static inline unsigned long __xchg(unsigned long x, __volatile__ void * ptr,
int size)
@@ -91,7 +91,7 @@ __cmpxchg_u64(volatile long *m, unsigned long old, unsigned long new)
/* This function doesn't exist, so you'll get a linker error
if something tries to do an invalid cmpxchg(). */
-extern void __cmpxchg_called_with_bad_pointer(void);
+void __cmpxchg_called_with_bad_pointer(void);
static inline unsigned long
__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
@@ -141,5 +141,6 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr,
BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
cmpxchg_local((ptr), (o), (n)); \
})
+#define cmpxchg64(ptr, o, n) cmpxchg64_local((ptr), (o), (n))
#endif /* __ARCH_SPARC64_CMPXCHG__ */
diff --git a/arch/sparc/include/asm/compat_signal.h b/arch/sparc/include/asm/compat_signal.h
index b759eab9b51..9ed1f128b4d 100644
--- a/arch/sparc/include/asm/compat_signal.h
+++ b/arch/sparc/include/asm/compat_signal.h
@@ -18,12 +18,6 @@ struct __old_sigaction32 {
unsigned int sa_flags;
unsigned sa_restorer; /* not used by Linux/SPARC yet */
};
-
-typedef struct sigaltstack32 {
- u32 ss_sp;
- int ss_flags;
- compat_size_t ss_size;
-} stack_t32;
#endif
#endif /* !(_COMPAT_SIGNAL_H) */
diff --git a/arch/sparc/include/asm/cpudata.h b/arch/sparc/include/asm/cpudata.h
index b5976de7cac..128b56b0867 100644
--- a/arch/sparc/include/asm/cpudata.h
+++ b/arch/sparc/include/asm/cpudata.h
@@ -1,5 +1,15 @@
#ifndef ___ASM_SPARC_CPUDATA_H
#define ___ASM_SPARC_CPUDATA_H
+
+#ifndef __ASSEMBLY__
+
+#include <linux/threads.h>
+#include <linux/percpu.h>
+
+extern const struct seq_operations cpuinfo_op;
+
+#endif /* !(__ASSEMBLY__) */
+
#if defined(__sparc__) && defined(__arch64__)
#include <asm/cpudata_64.h>
#else
diff --git a/arch/sparc/include/asm/cpudata_64.h b/arch/sparc/include/asm/cpudata_64.h
index 050ef35b9dc..0e594076912 100644
--- a/arch/sparc/include/asm/cpudata_64.h
+++ b/arch/sparc/include/asm/cpudata_64.h
@@ -8,9 +8,6 @@
#ifndef __ASSEMBLY__
-#include <linux/percpu.h>
-#include <linux/threads.h>
-
typedef struct {
/* Dcache line 1 */
unsigned int __softirq_pending; /* must be 1st, see rtrap.S */
@@ -35,8 +32,6 @@ DECLARE_PER_CPU(cpuinfo_sparc, __cpu_data);
#define cpu_data(__cpu) per_cpu(__cpu_data, (__cpu))
#define local_cpu_data() __get_cpu_var(__cpu_data)
-extern const struct seq_operations cpuinfo_op;
-
#endif /* !(__ASSEMBLY__) */
#include <asm/trap_block.h>
diff --git a/arch/sparc/include/asm/cputime.h b/arch/sparc/include/asm/cputime.h
deleted file mode 100644
index 1a642b81e01..00000000000
--- a/arch/sparc/include/asm/cputime.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef __SPARC_CPUTIME_H
-#define __SPARC_CPUTIME_H
-
-#include <asm-generic/cputime.h>
-
-#endif /* __SPARC_CPUTIME_H */
diff --git a/arch/sparc/include/asm/delay_32.h b/arch/sparc/include/asm/delay_32.h
index bc9aba2bead..3fb8ca144b4 100644
--- a/arch/sparc/include/asm/delay_32.h
+++ b/arch/sparc/include/asm/delay_32.h
@@ -20,8 +20,8 @@ static inline void __delay(unsigned long loops)
}
/* This is too messy with inline asm on the Sparc. */
-extern void __udelay(unsigned long usecs, unsigned long lpj);
-extern void __ndelay(unsigned long nsecs, unsigned long lpj);
+void __udelay(unsigned long usecs, unsigned long lpj);
+void __ndelay(unsigned long nsecs, unsigned long lpj);
#ifdef CONFIG_SMP
#define __udelay_val cpu_data(smp_processor_id()).udelay_val
diff --git a/arch/sparc/include/asm/delay_64.h b/arch/sparc/include/asm/delay_64.h
index a77aa622d76..0ba5424856d 100644
--- a/arch/sparc/include/asm/delay_64.h
+++ b/arch/sparc/include/asm/delay_64.h
@@ -8,8 +8,8 @@
#ifndef __ASSEMBLY__
-extern void __delay(unsigned long loops);
-extern void udelay(unsigned long usecs);
+void __delay(unsigned long loops);
+void udelay(unsigned long usecs);
#define mdelay(n) udelay((n) * 1000)
#endif /* !__ASSEMBLY__ */
diff --git a/arch/sparc/include/asm/device.h b/arch/sparc/include/asm/device.h
index daa6a8a5e9c..bb3f0b0c675 100644
--- a/arch/sparc/include/asm/device.h
+++ b/arch/sparc/include/asm/device.h
@@ -19,7 +19,7 @@ struct dev_archdata {
int numa_node;
};
-extern void of_propagate_archdata(struct platform_device *bus);
+void of_propagate_archdata(struct platform_device *bus);
struct pdev_archdata {
struct resource resource[PROMREG_MAX];
diff --git a/arch/sparc/include/asm/dma-mapping.h b/arch/sparc/include/asm/dma-mapping.h
index 8493fd3c7ba..1ee02710b2d 100644
--- a/arch/sparc/include/asm/dma-mapping.h
+++ b/arch/sparc/include/asm/dma-mapping.h
@@ -7,7 +7,7 @@
#define DMA_ERROR_CODE (~(dma_addr_t)0x0)
-extern int dma_supported(struct device *dev, u64 mask);
+int dma_supported(struct device *dev, u64 mask);
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
@@ -59,6 +59,7 @@ static inline void dma_free_attrs(struct device *dev, size_t size,
static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
{
+ debug_dma_mapping_error(dev, dma_addr);
return (dma_addr == DMA_ERROR_CODE);
}
diff --git a/arch/sparc/include/asm/ebus_dma.h b/arch/sparc/include/asm/ebus_dma.h
index f07a5b541c9..fcfb4948147 100644
--- a/arch/sparc/include/asm/ebus_dma.h
+++ b/arch/sparc/include/asm/ebus_dma.h
@@ -22,14 +22,14 @@ struct ebus_dma_info {
unsigned char name[64];
};
-extern int ebus_dma_register(struct ebus_dma_info *p);
-extern int ebus_dma_irq_enable(struct ebus_dma_info *p, int on);
-extern void ebus_dma_unregister(struct ebus_dma_info *p);
-extern int ebus_dma_request(struct ebus_dma_info *p, dma_addr_t bus_addr,
+int ebus_dma_register(struct ebus_dma_info *p);
+int ebus_dma_irq_enable(struct ebus_dma_info *p, int on);
+void ebus_dma_unregister(struct ebus_dma_info *p);
+int ebus_dma_request(struct ebus_dma_info *p, dma_addr_t bus_addr,
size_t len);
-extern void ebus_dma_prepare(struct ebus_dma_info *p, int write);
-extern unsigned int ebus_dma_residue(struct ebus_dma_info *p);
-extern unsigned int ebus_dma_addr(struct ebus_dma_info *p);
-extern void ebus_dma_enable(struct ebus_dma_info *p, int on);
+void ebus_dma_prepare(struct ebus_dma_info *p, int write);
+unsigned int ebus_dma_residue(struct ebus_dma_info *p);
+unsigned int ebus_dma_addr(struct ebus_dma_info *p);
+void ebus_dma_enable(struct ebus_dma_info *p, int on);
#endif /* __ASM_SPARC_EBUS_DMA_H */
diff --git a/arch/sparc/include/asm/elf_32.h b/arch/sparc/include/asm/elf_32.h
index ac74a2c98e6..a24e41fcdde 100644
--- a/arch/sparc/include/asm/elf_32.h
+++ b/arch/sparc/include/asm/elf_32.h
@@ -128,7 +128,4 @@ typedef struct {
#define ELF_PLATFORM (NULL)
-#define SET_PERSONALITY(ex) \
- set_personality(PER_LINUX | (current->personality & (~PER_MASK)))
-
#endif /* !(__ASMSPARC_ELF_H) */
diff --git a/arch/sparc/include/asm/emergency-restart.h b/arch/sparc/include/asm/emergency-restart.h
deleted file mode 100644
index 108d8c48e42..00000000000
--- a/arch/sparc/include/asm/emergency-restart.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef _ASM_EMERGENCY_RESTART_H
-#define _ASM_EMERGENCY_RESTART_H
-
-#include <asm-generic/emergency-restart.h>
-
-#endif /* _ASM_EMERGENCY_RESTART_H */
diff --git a/arch/sparc/include/asm/floppy_32.h b/arch/sparc/include/asm/floppy_32.h
index fb3f16954c6..071b83e52f1 100644
--- a/arch/sparc/include/asm/floppy_32.h
+++ b/arch/sparc/include/asm/floppy_32.h
@@ -9,11 +9,12 @@
#include <linux/of.h>
#include <linux/of_device.h>
-#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/idprom.h>
#include <asm/oplib.h>
#include <asm/auxio.h>
+#include <asm/setup.h>
+#include <asm/page.h>
#include <asm/irq.h>
/* We don't need no stinkin' I/O port allocation crap. */
@@ -49,7 +50,6 @@ struct sun_flpy_controller {
/* You'll only ever find one controller on a SparcStation anyways. */
static struct sun_flpy_controller *sun_fdc = NULL;
-extern volatile unsigned char *fdc_status;
struct sun_floppy_ops {
unsigned char (*fd_inb)(int port);
@@ -212,13 +212,6 @@ static void sun_82077_fd_outb(unsigned char value, int port)
* underruns. If non-zero, doing_pdma encodes the direction of
* the transfer for debugging. 1=read 2=write
*/
-extern char *pdma_vaddr;
-extern unsigned long pdma_size;
-extern volatile int doing_pdma;
-
-/* This is software state */
-extern char *pdma_base;
-extern unsigned long pdma_areasize;
/* Common routines to all controller types on the Sparc. */
static inline void virtual_dma_init(void)
@@ -263,8 +256,7 @@ static inline void sun_fd_enable_dma(void)
pdma_areasize = pdma_size;
}
-extern int sparc_floppy_request_irq(unsigned int irq,
- irq_handler_t irq_handler);
+int sparc_floppy_request_irq(unsigned int irq, irq_handler_t irq_handler);
static int sun_fd_request_irq(void)
{
diff --git a/arch/sparc/include/asm/floppy_64.h b/arch/sparc/include/asm/floppy_64.h
index e204f902e6c..625756406a7 100644
--- a/arch/sparc/include/asm/floppy_64.h
+++ b/arch/sparc/include/asm/floppy_64.h
@@ -254,7 +254,7 @@ static int sun_fd_request_irq(void)
once = 1;
error = request_irq(FLOPPY_IRQ, sparc_floppy_irq,
- IRQF_DISABLED, "floppy", NULL);
+ 0, "floppy", NULL);
return ((error == 0) ? 0 : -1);
}
@@ -296,7 +296,7 @@ struct sun_pci_dma_op {
static struct sun_pci_dma_op sun_pci_dma_current = { -1U, 0, 0, NULL};
static struct sun_pci_dma_op sun_pci_dma_pending = { -1U, 0, 0, NULL};
-extern irqreturn_t floppy_interrupt(int irq, void *dev_id);
+irqreturn_t floppy_interrupt(int irq, void *dev_id);
static unsigned char sun_pci_fd_inb(unsigned long port)
{
diff --git a/arch/sparc/include/asm/ftrace.h b/arch/sparc/include/asm/ftrace.h
index b0f18e9893d..9ec94ad116f 100644
--- a/arch/sparc/include/asm/ftrace.h
+++ b/arch/sparc/include/asm/ftrace.h
@@ -6,7 +6,7 @@
#define MCOUNT_INSN_SIZE 4 /* sizeof mcount call */
#ifndef __ASSEMBLY__
-extern void _mcount(void);
+void _mcount(void);
#endif
#endif
@@ -22,4 +22,8 @@ struct dyn_arch_ftrace {
};
#endif /* CONFIG_DYNAMIC_FTRACE */
+unsigned long prepare_ftrace_return(unsigned long parent,
+ unsigned long self_addr,
+ unsigned long frame_pointer);
+
#endif /* _ASM_SPARC64_FTRACE */
diff --git a/arch/sparc/include/asm/hardirq_32.h b/arch/sparc/include/asm/hardirq_32.h
index 162007643cd..ee93923b7f8 100644
--- a/arch/sparc/include/asm/hardirq_32.h
+++ b/arch/sparc/include/asm/hardirq_32.h
@@ -7,7 +7,6 @@
#ifndef __SPARC_HARDIRQ_H
#define __SPARC_HARDIRQ_H
-#define HARDIRQ_BITS 8
#include <asm-generic/hardirq.h>
#endif /* __SPARC_HARDIRQ_H */
diff --git a/arch/sparc/include/asm/hardirq_64.h b/arch/sparc/include/asm/hardirq_64.h
index 7c29fd1a87a..f478ff1ddd0 100644
--- a/arch/sparc/include/asm/hardirq_64.h
+++ b/arch/sparc/include/asm/hardirq_64.h
@@ -14,6 +14,4 @@
void ack_bad_irq(unsigned int irq);
-#define HARDIRQ_BITS 8
-
#endif /* !(__SPARC64_HARDIRQ_H) */
diff --git a/arch/sparc/include/asm/head_32.h b/arch/sparc/include/asm/head_32.h
index a76874838f6..5f1dbe315bc 100644
--- a/arch/sparc/include/asm/head_32.h
+++ b/arch/sparc/include/asm/head_32.h
@@ -55,15 +55,15 @@
/* The Get Condition Codes software trap for userland. */
#define GETCC_TRAP \
- b getcc_trap_handler; mov %psr, %l0; nop; nop;
+ b getcc_trap_handler; rd %psr, %l0; nop; nop;
/* The Set Condition Codes software trap for userland. */
#define SETCC_TRAP \
- b setcc_trap_handler; mov %psr, %l0; nop; nop;
+ b setcc_trap_handler; rd %psr, %l0; nop; nop;
/* The Get PSR software trap for userland. */
#define GETPSR_TRAP \
- mov %psr, %i0; jmp %l2; rett %l2 + 4; nop;
+ rd %psr, %i0; jmp %l2; rett %l2 + 4; nop;
/* This is for hard interrupts from level 1-14, 15 is non-maskable (nmi) and
* gets handled with another macro.
diff --git a/arch/sparc/include/asm/hibernate.h b/arch/sparc/include/asm/hibernate.h
new file mode 100644
index 00000000000..2ec34f84224
--- /dev/null
+++ b/arch/sparc/include/asm/hibernate.h
@@ -0,0 +1,23 @@
+/*
+ * hibernate.h: Hibernaton support specific for sparc64.
+ *
+ * Copyright (C) 2013 Kirill V Tkhai (tkhai@yandex.ru)
+ */
+
+#ifndef ___SPARC_HIBERNATE_H
+#define ___SPARC_HIBERNATE_H
+
+struct saved_context {
+ unsigned long fp;
+ unsigned long cwp;
+ unsigned long wstate;
+
+ unsigned long tick;
+ unsigned long pstate;
+
+ unsigned long g4;
+ unsigned long g5;
+ unsigned long g6;
+};
+
+#endif
diff --git a/arch/sparc/include/asm/highmem.h b/arch/sparc/include/asm/highmem.h
index 4f9e15c757e..92ded294a4e 100644
--- a/arch/sparc/include/asm/highmem.h
+++ b/arch/sparc/include/asm/highmem.h
@@ -31,7 +31,7 @@ extern unsigned long highstart_pfn, highend_pfn;
extern pgprot_t kmap_prot;
extern pte_t *pkmap_page_table;
-extern void kmap_init(void) __init;
+void kmap_init(void) __init;
/*
* Right now we initialize only a single pte table. It can be extended
@@ -49,8 +49,8 @@ extern void kmap_init(void) __init;
#define PKMAP_END (PKMAP_ADDR(LAST_PKMAP))
-extern void *kmap_high(struct page *page);
-extern void kunmap_high(struct page *page);
+void *kmap_high(struct page *page);
+void kunmap_high(struct page *page);
static inline void *kmap(struct page *page)
{
@@ -68,8 +68,8 @@ static inline void kunmap(struct page *page)
kunmap_high(page);
}
-extern void *kmap_atomic(struct page *page);
-extern void __kunmap_atomic(void *kvaddr);
+void *kmap_atomic(struct page *page);
+void __kunmap_atomic(void *kvaddr);
#define flush_cache_kmaps() flush_cache_all()
diff --git a/arch/sparc/include/asm/hugetlb.h b/arch/sparc/include/asm/hugetlb.h
index 8c5eed6d267..e4cab465b81 100644
--- a/arch/sparc/include/asm/hugetlb.h
+++ b/arch/sparc/include/asm/hugetlb.h
@@ -2,6 +2,7 @@
#define _ASM_SPARC64_HUGETLB_H
#include <asm/page.h>
+#include <asm-generic/hugetlb.h>
void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
@@ -12,7 +13,6 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
static inline void hugetlb_prefault_arch_hook(struct mm_struct *mm)
{
- hugetlb_setup(mm);
}
static inline int is_hugepage_only_range(struct mm_struct *mm,
@@ -61,14 +61,20 @@ static inline pte_t huge_pte_wrprotect(pte_t pte)
static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
{
- ptep_set_wrprotect(mm, addr, ptep);
+ pte_t old_pte = *ptep;
+ set_huge_pte_at(mm, addr, ptep, pte_wrprotect(old_pte));
}
static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep,
pte_t pte, int dirty)
{
- return ptep_set_access_flags(vma, addr, ptep, pte, dirty);
+ int changed = !pte_same(*ptep, pte);
+ if (changed) {
+ set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
+ flush_tlb_page(vma, addr);
+ }
+ return changed;
}
static inline pte_t huge_ptep_get(pte_t *ptep)
diff --git a/arch/sparc/include/asm/hvtramp.h b/arch/sparc/include/asm/hvtramp.h
index b2b9b947b3a..04b56f862bb 100644
--- a/arch/sparc/include/asm/hvtramp.h
+++ b/arch/sparc/include/asm/hvtramp.h
@@ -19,7 +19,7 @@ struct hvtramp_descr {
struct hvtramp_mapping maps[1];
};
-extern void hv_cpu_startup(unsigned long hvdescr_pa);
+void hv_cpu_startup(unsigned long hvdescr_pa);
#endif
diff --git a/arch/sparc/include/asm/hypervisor.h b/arch/sparc/include/asm/hypervisor.h
index ca121f0fa3e..94b39caea3e 100644
--- a/arch/sparc/include/asm/hypervisor.h
+++ b/arch/sparc/include/asm/hypervisor.h
@@ -98,7 +98,7 @@
#define HV_FAST_MACH_EXIT 0x00
#ifndef __ASSEMBLY__
-extern void sun4v_mach_exit(unsigned long exit_code);
+void sun4v_mach_exit(unsigned long exit_code);
#endif
/* Domain services. */
@@ -127,9 +127,9 @@ extern void sun4v_mach_exit(unsigned long exit_code);
#define HV_FAST_MACH_DESC 0x01
#ifndef __ASSEMBLY__
-extern unsigned long sun4v_mach_desc(unsigned long buffer_pa,
- unsigned long buf_len,
- unsigned long *real_buf_len);
+unsigned long sun4v_mach_desc(unsigned long buffer_pa,
+ unsigned long buf_len,
+ unsigned long *real_buf_len);
#endif
/* mach_sir()
@@ -148,7 +148,7 @@ extern unsigned long sun4v_mach_desc(unsigned long buffer_pa,
#define HV_FAST_MACH_SIR 0x02
#ifndef __ASSEMBLY__
-extern void sun4v_mach_sir(void);
+void sun4v_mach_sir(void);
#endif
/* mach_set_watchdog()
@@ -204,8 +204,8 @@ extern void sun4v_mach_sir(void);
#define HV_FAST_MACH_SET_WATCHDOG 0x05
#ifndef __ASSEMBLY__
-extern unsigned long sun4v_mach_set_watchdog(unsigned long timeout,
- unsigned long *orig_timeout);
+unsigned long sun4v_mach_set_watchdog(unsigned long timeout,
+ unsigned long *orig_timeout);
#endif
/* CPU services.
@@ -250,10 +250,10 @@ extern unsigned long sun4v_mach_set_watchdog(unsigned long timeout,
#define HV_FAST_CPU_START 0x10
#ifndef __ASSEMBLY__
-extern unsigned long sun4v_cpu_start(unsigned long cpuid,
- unsigned long pc,
- unsigned long rtba,
- unsigned long arg0);
+unsigned long sun4v_cpu_start(unsigned long cpuid,
+ unsigned long pc,
+ unsigned long rtba,
+ unsigned long arg0);
#endif
/* cpu_stop()
@@ -278,7 +278,7 @@ extern unsigned long sun4v_cpu_start(unsigned long cpuid,
#define HV_FAST_CPU_STOP 0x11
#ifndef __ASSEMBLY__
-extern unsigned long sun4v_cpu_stop(unsigned long cpuid);
+unsigned long sun4v_cpu_stop(unsigned long cpuid);
#endif
/* cpu_yield()
@@ -295,7 +295,7 @@ extern unsigned long sun4v_cpu_stop(unsigned long cpuid);
#define HV_FAST_CPU_YIELD 0x12
#ifndef __ASSEMBLY__
-extern unsigned long sun4v_cpu_yield(void);
+unsigned long sun4v_cpu_yield(void);
#endif
/* cpu_qconf()
@@ -341,9 +341,9 @@ extern unsigned long sun4v_cpu_yield(void);
#define HV_CPU_QUEUE_NONRES_ERROR 0x3f
#ifndef __ASSEMBLY__
-extern unsigned long sun4v_cpu_qconf(unsigned long type,
- unsigned long queue_paddr,
- unsigned long num_queue_entries);
+unsigned long sun4v_cpu_qconf(unsigned long type,
+ unsigned long queue_paddr,
+ unsigned long num_queue_entries);
#endif
/* cpu_qinfo()
@@ -394,7 +394,9 @@ extern unsigned long sun4v_cpu_qconf(unsigned long type,
#define HV_FAST_CPU_MONDO_SEND 0x42
#ifndef __ASSEMBLY__
-extern unsigned long sun4v_cpu_mondo_send(unsigned long cpu_count, unsigned long cpu_list_pa, unsigned long mondo_block_pa);
+unsigned long sun4v_cpu_mondo_send(unsigned long cpu_count,
+ unsigned long cpu_list_pa,
+ unsigned long mondo_block_pa);
#endif
/* cpu_myid()
@@ -425,7 +427,7 @@ extern unsigned long sun4v_cpu_mondo_send(unsigned long cpu_count, unsigned long
#define HV_CPU_STATE_ERROR 0x03
#ifndef __ASSEMBLY__
-extern long sun4v_cpu_state(unsigned long cpuid);
+long sun4v_cpu_state(unsigned long cpuid);
#endif
/* cpu_set_rtba()
@@ -625,8 +627,8 @@ struct hv_fault_status {
#define HV_FAST_MMU_TSB_CTX0 0x20
#ifndef __ASSEMBLY__
-extern unsigned long sun4v_mmu_tsb_ctx0(unsigned long num_descriptions,
- unsigned long tsb_desc_ra);
+unsigned long sun4v_mmu_tsb_ctx0(unsigned long num_descriptions,
+ unsigned long tsb_desc_ra);
#endif
/* mmu_tsb_ctxnon0()
@@ -710,7 +712,7 @@ extern unsigned long sun4v_mmu_tsb_ctx0(unsigned long num_descriptions,
#define HV_FAST_MMU_DEMAP_ALL 0x24
#ifndef __ASSEMBLY__
-extern void sun4v_mmu_demap_all(void);
+void sun4v_mmu_demap_all(void);
#endif
/* mmu_map_perm_addr()
@@ -740,10 +742,10 @@ extern void sun4v_mmu_demap_all(void);
#define HV_FAST_MMU_MAP_PERM_ADDR 0x25
#ifndef __ASSEMBLY__
-extern unsigned long sun4v_mmu_map_perm_addr(unsigned long vaddr,
- unsigned long set_to_zero,
- unsigned long tte,
- unsigned long flags);
+unsigned long sun4v_mmu_map_perm_addr(unsigned long vaddr,
+ unsigned long set_to_zero,
+ unsigned long tte,
+ unsigned long flags);
#endif
/* mmu_fault_area_conf()
@@ -945,7 +947,7 @@ extern unsigned long sun4v_mmu_map_perm_addr(unsigned long vaddr,
#define HV_FAST_TOD_GET 0x50
#ifndef __ASSEMBLY__
-extern unsigned long sun4v_tod_get(unsigned long *time);
+unsigned long sun4v_tod_get(unsigned long *time);
#endif
/* tod_set()
@@ -962,7 +964,7 @@ extern unsigned long sun4v_tod_get(unsigned long *time);
#define HV_FAST_TOD_SET 0x51
#ifndef __ASSEMBLY__
-extern unsigned long sun4v_tod_set(unsigned long time);
+unsigned long sun4v_tod_set(unsigned long time);
#endif
/* Console services */
@@ -1038,14 +1040,14 @@ extern unsigned long sun4v_tod_set(unsigned long time);
#define HV_FAST_CONS_WRITE 0x63
#ifndef __ASSEMBLY__
-extern long sun4v_con_getchar(long *status);
-extern long sun4v_con_putchar(long c);
-extern long sun4v_con_read(unsigned long buffer,
- unsigned long size,
- unsigned long *bytes_read);
-extern unsigned long sun4v_con_write(unsigned long buffer,
- unsigned long size,
- unsigned long *bytes_written);
+long sun4v_con_getchar(long *status);
+long sun4v_con_putchar(long c);
+long sun4v_con_read(unsigned long buffer,
+ unsigned long size,
+ unsigned long *bytes_read);
+unsigned long sun4v_con_write(unsigned long buffer,
+ unsigned long size,
+ unsigned long *bytes_written);
#endif
/* mach_set_soft_state()
@@ -1080,8 +1082,8 @@ extern unsigned long sun4v_con_write(unsigned long buffer,
#define HV_SOFT_STATE_TRANSITION 0x02
#ifndef __ASSEMBLY__
-extern unsigned long sun4v_mach_set_soft_state(unsigned long soft_state,
- unsigned long msg_string_ra);
+unsigned long sun4v_mach_set_soft_state(unsigned long soft_state,
+ unsigned long msg_string_ra);
#endif
/* mach_get_soft_state()
@@ -1159,20 +1161,20 @@ extern unsigned long sun4v_mach_set_soft_state(unsigned long soft_state,
#define HV_FAST_SVC_CLRSTATUS 0x84
#ifndef __ASSEMBLY__
-extern unsigned long sun4v_svc_send(unsigned long svc_id,
- unsigned long buffer,
- unsigned long buffer_size,
- unsigned long *sent_bytes);
-extern unsigned long sun4v_svc_recv(unsigned long svc_id,
- unsigned long buffer,
- unsigned long buffer_size,
- unsigned long *recv_bytes);
-extern unsigned long sun4v_svc_getstatus(unsigned long svc_id,
- unsigned long *status_bits);
-extern unsigned long sun4v_svc_setstatus(unsigned long svc_id,
- unsigned long status_bits);
-extern unsigned long sun4v_svc_clrstatus(unsigned long svc_id,
- unsigned long status_bits);
+unsigned long sun4v_svc_send(unsigned long svc_id,
+ unsigned long buffer,
+ unsigned long buffer_size,
+ unsigned long *sent_bytes);
+unsigned long sun4v_svc_recv(unsigned long svc_id,
+ unsigned long buffer,
+ unsigned long buffer_size,
+ unsigned long *recv_bytes);
+unsigned long sun4v_svc_getstatus(unsigned long svc_id,
+ unsigned long *status_bits);
+unsigned long sun4v_svc_setstatus(unsigned long svc_id,
+ unsigned long status_bits);
+unsigned long sun4v_svc_clrstatus(unsigned long svc_id,
+ unsigned long status_bits);
#endif
/* Trap trace services.
@@ -1458,8 +1460,8 @@ struct hv_trap_trace_entry {
#define HV_FAST_INTR_DEVINO2SYSINO 0xa0
#ifndef __ASSEMBLY__
-extern unsigned long sun4v_devino_to_sysino(unsigned long devhandle,
- unsigned long devino);
+unsigned long sun4v_devino_to_sysino(unsigned long devhandle,
+ unsigned long devino);
#endif
/* intr_getenabled()
@@ -1476,7 +1478,7 @@ extern unsigned long sun4v_devino_to_sysino(unsigned long devhandle,
#define HV_FAST_INTR_GETENABLED 0xa1
#ifndef __ASSEMBLY__
-extern unsigned long sun4v_intr_getenabled(unsigned long sysino);
+unsigned long sun4v_intr_getenabled(unsigned long sysino);
#endif
/* intr_setenabled()
@@ -1492,7 +1494,8 @@ extern unsigned long sun4v_intr_getenabled(unsigned long sysino);
#define HV_FAST_INTR_SETENABLED 0xa2
#ifndef __ASSEMBLY__
-extern unsigned long sun4v_intr_setenabled(unsigned long sysino, unsigned long intr_enabled);
+unsigned long sun4v_intr_setenabled(unsigned long sysino,
+ unsigned long intr_enabled);
#endif
/* intr_getstate()
@@ -1508,7 +1511,7 @@ extern unsigned long sun4v_intr_setenabled(unsigned long sysino, unsigned long i
#define HV_FAST_INTR_GETSTATE 0xa3
#ifndef __ASSEMBLY__
-extern unsigned long sun4v_intr_getstate(unsigned long sysino);
+unsigned long sun4v_intr_getstate(unsigned long sysino);
#endif
/* intr_setstate()
@@ -1528,7 +1531,7 @@ extern unsigned long sun4v_intr_getstate(unsigned long sysino);
#define HV_FAST_INTR_SETSTATE 0xa4
#ifndef __ASSEMBLY__
-extern unsigned long sun4v_intr_setstate(unsigned long sysino, unsigned long intr_state);
+unsigned long sun4v_intr_setstate(unsigned long sysino, unsigned long intr_state);
#endif
/* intr_gettarget()
@@ -1546,7 +1549,7 @@ extern unsigned long sun4v_intr_setstate(unsigned long sysino, unsigned long int
#define HV_FAST_INTR_GETTARGET 0xa5
#ifndef __ASSEMBLY__
-extern unsigned long sun4v_intr_gettarget(unsigned long sysino);
+unsigned long sun4v_intr_gettarget(unsigned long sysino);
#endif
/* intr_settarget()
@@ -1563,7 +1566,7 @@ extern unsigned long sun4v_intr_gettarget(unsigned long sysino);
#define HV_FAST_INTR_SETTARGET 0xa6
#ifndef __ASSEMBLY__
-extern unsigned long sun4v_intr_settarget(unsigned long sysino, unsigned long cpuid);
+unsigned long sun4v_intr_settarget(unsigned long sysino, unsigned long cpuid);
#endif
/* vintr_get_cookie()
@@ -1647,30 +1650,30 @@ extern unsigned long sun4v_intr_settarget(unsigned long sysino, unsigned long cp
#define HV_FAST_VINTR_SET_TARGET 0xae
#ifndef __ASSEMBLY__
-extern unsigned long sun4v_vintr_get_cookie(unsigned long dev_handle,
- unsigned long dev_ino,
- unsigned long *cookie);
-extern unsigned long sun4v_vintr_set_cookie(unsigned long dev_handle,
- unsigned long dev_ino,
- unsigned long cookie);
-extern unsigned long sun4v_vintr_get_valid(unsigned long dev_handle,
- unsigned long dev_ino,
- unsigned long *valid);
-extern unsigned long sun4v_vintr_set_valid(unsigned long dev_handle,
- unsigned long dev_ino,
- unsigned long valid);
-extern unsigned long sun4v_vintr_get_state(unsigned long dev_handle,
- unsigned long dev_ino,
- unsigned long *state);
-extern unsigned long sun4v_vintr_set_state(unsigned long dev_handle,
- unsigned long dev_ino,
- unsigned long state);
-extern unsigned long sun4v_vintr_get_target(unsigned long dev_handle,
- unsigned long dev_ino,
- unsigned long *cpuid);
-extern unsigned long sun4v_vintr_set_target(unsigned long dev_handle,
- unsigned long dev_ino,
- unsigned long cpuid);
+unsigned long sun4v_vintr_get_cookie(unsigned long dev_handle,
+ unsigned long dev_ino,
+ unsigned long *cookie);
+unsigned long sun4v_vintr_set_cookie(unsigned long dev_handle,
+ unsigned long dev_ino,
+ unsigned long cookie);
+unsigned long sun4v_vintr_get_valid(unsigned long dev_handle,
+ unsigned long dev_ino,
+ unsigned long *valid);
+unsigned long sun4v_vintr_set_valid(unsigned long dev_handle,
+ unsigned long dev_ino,
+ unsigned long valid);
+unsigned long sun4v_vintr_get_state(unsigned long dev_handle,
+ unsigned long dev_ino,
+ unsigned long *state);
+unsigned long sun4v_vintr_set_state(unsigned long dev_handle,
+ unsigned long dev_ino,
+ unsigned long state);
+unsigned long sun4v_vintr_get_target(unsigned long dev_handle,
+ unsigned long dev_ino,
+ unsigned long *cpuid);
+unsigned long sun4v_vintr_set_target(unsigned long dev_handle,
+ unsigned long dev_ino,
+ unsigned long cpuid);
#endif
/* PCI IO services.
@@ -2627,50 +2630,50 @@ struct ldc_mtable_entry {
#define HV_FAST_LDC_REVOKE 0xef
#ifndef __ASSEMBLY__
-extern unsigned long sun4v_ldc_tx_qconf(unsigned long channel,
- unsigned long ra,
- unsigned long num_entries);
-extern unsigned long sun4v_ldc_tx_qinfo(unsigned long channel,
- unsigned long *ra,
- unsigned long *num_entries);
-extern unsigned long sun4v_ldc_tx_get_state(unsigned long channel,
- unsigned long *head_off,
- unsigned long *tail_off,
- unsigned long *chan_state);
-extern unsigned long sun4v_ldc_tx_set_qtail(unsigned long channel,
- unsigned long tail_off);
-extern unsigned long sun4v_ldc_rx_qconf(unsigned long channel,
- unsigned long ra,
- unsigned long num_entries);
-extern unsigned long sun4v_ldc_rx_qinfo(unsigned long channel,
- unsigned long *ra,
- unsigned long *num_entries);
-extern unsigned long sun4v_ldc_rx_get_state(unsigned long channel,
- unsigned long *head_off,
- unsigned long *tail_off,
- unsigned long *chan_state);
-extern unsigned long sun4v_ldc_rx_set_qhead(unsigned long channel,
- unsigned long head_off);
-extern unsigned long sun4v_ldc_set_map_table(unsigned long channel,
- unsigned long ra,
- unsigned long num_entries);
-extern unsigned long sun4v_ldc_get_map_table(unsigned long channel,
- unsigned long *ra,
- unsigned long *num_entries);
-extern unsigned long sun4v_ldc_copy(unsigned long channel,
- unsigned long dir_code,
- unsigned long tgt_raddr,
- unsigned long lcl_raddr,
- unsigned long len,
- unsigned long *actual_len);
-extern unsigned long sun4v_ldc_mapin(unsigned long channel,
- unsigned long cookie,
- unsigned long *ra,
- unsigned long *perm);
-extern unsigned long sun4v_ldc_unmap(unsigned long ra);
-extern unsigned long sun4v_ldc_revoke(unsigned long channel,
- unsigned long cookie,
- unsigned long mte_cookie);
+unsigned long sun4v_ldc_tx_qconf(unsigned long channel,
+ unsigned long ra,
+ unsigned long num_entries);
+unsigned long sun4v_ldc_tx_qinfo(unsigned long channel,
+ unsigned long *ra,
+ unsigned long *num_entries);
+unsigned long sun4v_ldc_tx_get_state(unsigned long channel,
+ unsigned long *head_off,
+ unsigned long *tail_off,
+ unsigned long *chan_state);
+unsigned long sun4v_ldc_tx_set_qtail(unsigned long channel,
+ unsigned long tail_off);
+unsigned long sun4v_ldc_rx_qconf(unsigned long channel,
+ unsigned long ra,
+ unsigned long num_entries);
+unsigned long sun4v_ldc_rx_qinfo(unsigned long channel,
+ unsigned long *ra,
+ unsigned long *num_entries);
+unsigned long sun4v_ldc_rx_get_state(unsigned long channel,
+ unsigned long *head_off,
+ unsigned long *tail_off,
+ unsigned long *chan_state);
+unsigned long sun4v_ldc_rx_set_qhead(unsigned long channel,
+ unsigned long head_off);
+unsigned long sun4v_ldc_set_map_table(unsigned long channel,
+ unsigned long ra,
+ unsigned long num_entries);
+unsigned long sun4v_ldc_get_map_table(unsigned long channel,
+ unsigned long *ra,
+ unsigned long *num_entries);
+unsigned long sun4v_ldc_copy(unsigned long channel,
+ unsigned long dir_code,
+ unsigned long tgt_raddr,
+ unsigned long lcl_raddr,
+ unsigned long len,
+ unsigned long *actual_len);
+unsigned long sun4v_ldc_mapin(unsigned long channel,
+ unsigned long cookie,
+ unsigned long *ra,
+ unsigned long *perm);
+unsigned long sun4v_ldc_unmap(unsigned long ra);
+unsigned long sun4v_ldc_revoke(unsigned long channel,
+ unsigned long cookie,
+ unsigned long mte_cookie);
#endif
/* Performance counter services. */
@@ -2727,14 +2730,14 @@ extern unsigned long sun4v_ldc_revoke(unsigned long channel,
#define HV_FAST_N2_SET_PERFREG 0x105
#ifndef __ASSEMBLY__
-extern unsigned long sun4v_niagara_getperf(unsigned long reg,
- unsigned long *val);
-extern unsigned long sun4v_niagara_setperf(unsigned long reg,
- unsigned long val);
-extern unsigned long sun4v_niagara2_getperf(unsigned long reg,
- unsigned long *val);
-extern unsigned long sun4v_niagara2_setperf(unsigned long reg,
- unsigned long val);
+unsigned long sun4v_niagara_getperf(unsigned long reg,
+ unsigned long *val);
+unsigned long sun4v_niagara_setperf(unsigned long reg,
+ unsigned long val);
+unsigned long sun4v_niagara2_getperf(unsigned long reg,
+ unsigned long *val);
+unsigned long sun4v_niagara2_setperf(unsigned long reg,
+ unsigned long val);
#endif
/* MMU statistics services.
@@ -2829,8 +2832,8 @@ struct hv_mmu_statistics {
#define HV_FAST_MMUSTAT_INFO 0x103
#ifndef __ASSEMBLY__
-extern unsigned long sun4v_mmustat_conf(unsigned long ra, unsigned long *orig_ra);
-extern unsigned long sun4v_mmustat_info(unsigned long *ra);
+unsigned long sun4v_mmustat_conf(unsigned long ra, unsigned long *orig_ra);
+unsigned long sun4v_mmustat_info(unsigned long *ra);
#endif
/* NCS crypto services */
@@ -2919,9 +2922,9 @@ struct hv_ncs_qtail_update_arg {
#define HV_FAST_NCS_REQUEST 0x110
#ifndef __ASSEMBLY__
-extern unsigned long sun4v_ncs_request(unsigned long request,
- unsigned long arg_ra,
- unsigned long arg_size);
+unsigned long sun4v_ncs_request(unsigned long request,
+ unsigned long arg_ra,
+ unsigned long arg_size);
#endif
#define HV_FAST_FIRE_GET_PERFREG 0x120
@@ -2930,18 +2933,18 @@ extern unsigned long sun4v_ncs_request(unsigned long request,
#define HV_FAST_REBOOT_DATA_SET 0x172
#ifndef __ASSEMBLY__
-extern unsigned long sun4v_reboot_data_set(unsigned long ra,
- unsigned long len);
+unsigned long sun4v_reboot_data_set(unsigned long ra,
+ unsigned long len);
#endif
#define HV_FAST_VT_GET_PERFREG 0x184
#define HV_FAST_VT_SET_PERFREG 0x185
#ifndef __ASSEMBLY__
-extern unsigned long sun4v_vt_get_perfreg(unsigned long reg_num,
- unsigned long *reg_val);
-extern unsigned long sun4v_vt_set_perfreg(unsigned long reg_num,
- unsigned long reg_val);
+unsigned long sun4v_vt_get_perfreg(unsigned long reg_num,
+ unsigned long *reg_val);
+unsigned long sun4v_vt_set_perfreg(unsigned long reg_num,
+ unsigned long reg_val);
#endif
/* Function numbers for HV_CORE_TRAP. */
@@ -2978,21 +2981,21 @@ extern unsigned long sun4v_vt_set_perfreg(unsigned long reg_num,
#define HV_GRP_DIAG 0x0300
#ifndef __ASSEMBLY__
-extern unsigned long sun4v_get_version(unsigned long group,
- unsigned long *major,
- unsigned long *minor);
-extern unsigned long sun4v_set_version(unsigned long group,
- unsigned long major,
- unsigned long minor,
- unsigned long *actual_minor);
-
-extern int sun4v_hvapi_register(unsigned long group, unsigned long major,
- unsigned long *minor);
-extern void sun4v_hvapi_unregister(unsigned long group);
-extern int sun4v_hvapi_get(unsigned long group,
- unsigned long *major,
- unsigned long *minor);
-extern void sun4v_hvapi_init(void);
+unsigned long sun4v_get_version(unsigned long group,
+ unsigned long *major,
+ unsigned long *minor);
+unsigned long sun4v_set_version(unsigned long group,
+ unsigned long major,
+ unsigned long minor,
+ unsigned long *actual_minor);
+
+int sun4v_hvapi_register(unsigned long group, unsigned long major,
+ unsigned long *minor);
+void sun4v_hvapi_unregister(unsigned long group);
+int sun4v_hvapi_get(unsigned long group,
+ unsigned long *major,
+ unsigned long *minor);
+void sun4v_hvapi_init(void);
#endif
#endif /* !(_SPARC64_HYPERVISOR_H) */
diff --git a/arch/sparc/include/asm/idprom.h b/arch/sparc/include/asm/idprom.h
index 6976aa2439c..3793f7f91c4 100644
--- a/arch/sparc/include/asm/idprom.h
+++ b/arch/sparc/include/asm/idprom.h
@@ -20,6 +20,6 @@ struct idprom {
};
extern struct idprom *idprom;
-extern void idprom_init(void);
+void idprom_init(void);
#endif /* !(_SPARC_IDPROM_H) */
diff --git a/arch/sparc/include/asm/io-unit.h b/arch/sparc/include/asm/io-unit.h
index 01ab2f613e9..04a9701e720 100644
--- a/arch/sparc/include/asm/io-unit.h
+++ b/arch/sparc/include/asm/io-unit.h
@@ -43,7 +43,7 @@
struct iounit_struct {
unsigned long bmap[(IOUNIT_DMA_SIZE >> (PAGE_SHIFT + 3)) / sizeof(unsigned long)];
spinlock_t lock;
- iopte_t *page_table;
+ iopte_t __iomem *page_table;
unsigned long rotor[3];
unsigned long limit[4];
};
diff --git a/arch/sparc/include/asm/io_32.h b/arch/sparc/include/asm/io_32.h
index c1acbd891cb..9f532902627 100644
--- a/arch/sparc/include/asm/io_32.h
+++ b/arch/sparc/include/asm/io_32.h
@@ -2,191 +2,94 @@
#define __SPARC_IO_H
#include <linux/kernel.h>
-#include <linux/types.h>
#include <linux/ioport.h> /* struct resource */
-#include <asm/page.h> /* IO address mapping routines need this */
-#include <asm-generic/pci_iomap.h>
-
-#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
-
-static inline u32 flip_dword (u32 l)
-{
- return ((l&0xff)<<24) | (((l>>8)&0xff)<<16) | (((l>>16)&0xff)<<8)| ((l>>24)&0xff);
-}
-
-static inline u16 flip_word (u16 w)
-{
- return ((w&0xff) << 8) | ((w>>8)&0xff);
-}
-
-#define mmiowb()
-
-/*
- * Memory mapped I/O to PCI
- */
-
-static inline u8 __raw_readb(const volatile void __iomem *addr)
-{
- return *(__force volatile u8 *)addr;
-}
-
-static inline u16 __raw_readw(const volatile void __iomem *addr)
-{
- return *(__force volatile u16 *)addr;
-}
-
-static inline u32 __raw_readl(const volatile void __iomem *addr)
-{
- return *(__force volatile u32 *)addr;
-}
+#define readb_relaxed(__addr) readb(__addr)
+#define readw_relaxed(__addr) readw(__addr)
+#define readl_relaxed(__addr) readl(__addr)
-static inline void __raw_writeb(u8 b, volatile void __iomem *addr)
-{
- *(__force volatile u8 *)addr = b;
-}
+#define IO_SPACE_LIMIT 0xffffffff
-static inline void __raw_writew(u16 w, volatile void __iomem *addr)
-{
- *(__force volatile u16 *)addr = w;
-}
+#define memset_io(d,c,sz) _memset_io(d,c,sz)
+#define memcpy_fromio(d,s,sz) _memcpy_fromio(d,s,sz)
+#define memcpy_toio(d,s,sz) _memcpy_toio(d,s,sz)
-static inline void __raw_writel(u32 l, volatile void __iomem *addr)
-{
- *(__force volatile u32 *)addr = l;
-}
+#include <asm-generic/io.h>
-static inline u8 __readb(const volatile void __iomem *addr)
+static inline void _memset_io(volatile void __iomem *dst,
+ int c, __kernel_size_t n)
{
- return *(__force volatile u8 *)addr;
-}
+ volatile void __iomem *d = dst;
-static inline u16 __readw(const volatile void __iomem *addr)
-{
- return flip_word(*(__force volatile u16 *)addr);
+ while (n--) {
+ writeb(c, d);
+ d++;
+ }
}
-static inline u32 __readl(const volatile void __iomem *addr)
+static inline void _memcpy_fromio(void *dst, const volatile void __iomem *src,
+ __kernel_size_t n)
{
- return flip_dword(*(__force volatile u32 *)addr);
-}
+ char *d = dst;
-static inline void __writeb(u8 b, volatile void __iomem *addr)
-{
- *(__force volatile u8 *)addr = b;
+ while (n--) {
+ char tmp = readb(src);
+ *d++ = tmp;
+ src++;
+ }
}
-static inline void __writew(u16 w, volatile void __iomem *addr)
+static inline void _memcpy_toio(volatile void __iomem *dst, const void *src,
+ __kernel_size_t n)
{
- *(__force volatile u16 *)addr = flip_word(w);
-}
+ const char *s = src;
+ volatile void __iomem *d = dst;
-static inline void __writel(u32 l, volatile void __iomem *addr)
-{
- *(__force volatile u32 *)addr = flip_dword(l);
+ while (n--) {
+ char tmp = *s++;
+ writeb(tmp, d);
+ d++;
+ }
}
-#define readb(__addr) __readb(__addr)
-#define readw(__addr) __readw(__addr)
-#define readl(__addr) __readl(__addr)
-#define readb_relaxed(__addr) readb(__addr)
-#define readw_relaxed(__addr) readw(__addr)
-#define readl_relaxed(__addr) readl(__addr)
-
-#define writeb(__b, __addr) __writeb((__b),(__addr))
-#define writew(__w, __addr) __writew((__w),(__addr))
-#define writel(__l, __addr) __writel((__l),(__addr))
-
-/*
- * I/O space operations
- *
- * Arrangement on a Sun is somewhat complicated.
- *
- * First of all, we want to use standard Linux drivers
- * for keyboard, PC serial, etc. These drivers think
- * they access I/O space and use inb/outb.
- * On the other hand, EBus bridge accepts PCI *memory*
- * cycles and converts them into ISA *I/O* cycles.
- * Ergo, we want inb & outb to generate PCI memory cycles.
- *
- * If we want to issue PCI *I/O* cycles, we do this
- * with a low 64K fixed window in PCIC. This window gets
- * mapped somewhere into virtual kernel space and we
- * can use inb/outb again.
- */
-#define inb_local(__addr) __readb((void __iomem *)(unsigned long)(__addr))
-#define inb(__addr) __readb((void __iomem *)(unsigned long)(__addr))
-#define inw(__addr) __readw((void __iomem *)(unsigned long)(__addr))
-#define inl(__addr) __readl((void __iomem *)(unsigned long)(__addr))
-
-#define outb_local(__b, __addr) __writeb(__b, (void __iomem *)(unsigned long)(__addr))
-#define outb(__b, __addr) __writeb(__b, (void __iomem *)(unsigned long)(__addr))
-#define outw(__w, __addr) __writew(__w, (void __iomem *)(unsigned long)(__addr))
-#define outl(__l, __addr) __writel(__l, (void __iomem *)(unsigned long)(__addr))
-
-#define inb_p(__addr) inb(__addr)
-#define outb_p(__b, __addr) outb(__b, __addr)
-#define inw_p(__addr) inw(__addr)
-#define outw_p(__w, __addr) outw(__w, __addr)
-#define inl_p(__addr) inl(__addr)
-#define outl_p(__l, __addr) outl(__l, __addr)
-
-void outsb(unsigned long addr, const void *src, unsigned long cnt);
-void outsw(unsigned long addr, const void *src, unsigned long cnt);
-void outsl(unsigned long addr, const void *src, unsigned long cnt);
-void insb(unsigned long addr, void *dst, unsigned long count);
-void insw(unsigned long addr, void *dst, unsigned long count);
-void insl(unsigned long addr, void *dst, unsigned long count);
-
-#define IO_SPACE_LIMIT 0xffffffff
-
/*
* SBus accessors.
*
* SBus has only one, memory mapped, I/O space.
* We do not need to flip bytes for SBus of course.
*/
-static inline u8 _sbus_readb(const volatile void __iomem *addr)
+static inline u8 sbus_readb(const volatile void __iomem *addr)
{
return *(__force volatile u8 *)addr;
}
-static inline u16 _sbus_readw(const volatile void __iomem *addr)
+static inline u16 sbus_readw(const volatile void __iomem *addr)
{
return *(__force volatile u16 *)addr;
}
-static inline u32 _sbus_readl(const volatile void __iomem *addr)
+static inline u32 sbus_readl(const volatile void __iomem *addr)
{
return *(__force volatile u32 *)addr;
}
-static inline void _sbus_writeb(u8 b, volatile void __iomem *addr)
+static inline void sbus_writeb(u8 b, volatile void __iomem *addr)
{
*(__force volatile u8 *)addr = b;
}
-static inline void _sbus_writew(u16 w, volatile void __iomem *addr)
+static inline void sbus_writew(u16 w, volatile void __iomem *addr)
{
*(__force volatile u16 *)addr = w;
}
-static inline void _sbus_writel(u32 l, volatile void __iomem *addr)
+static inline void sbus_writel(u32 l, volatile void __iomem *addr)
{
*(__force volatile u32 *)addr = l;
}
-/*
- * The only reason for #define's is to hide casts to unsigned long.
- */
-#define sbus_readb(__addr) _sbus_readb(__addr)
-#define sbus_readw(__addr) _sbus_readw(__addr)
-#define sbus_readl(__addr) _sbus_readl(__addr)
-#define sbus_writeb(__b, __addr) _sbus_writeb(__b, __addr)
-#define sbus_writew(__w, __addr) _sbus_writew(__w, __addr)
-#define sbus_writel(__l, __addr) _sbus_writel(__l, __addr)
-
-static inline void sbus_memset_io(volatile void __iomem *__dst, int c, __kernel_size_t n)
+static inline void sbus_memset_io(volatile void __iomem *__dst, int c,
+ __kernel_size_t n)
{
while(n--) {
sbus_writeb(c, __dst);
@@ -194,22 +97,9 @@ static inline void sbus_memset_io(volatile void __iomem *__dst, int c, __kernel_
}
}
-static inline void
-_memset_io(volatile void __iomem *dst, int c, __kernel_size_t n)
-{
- volatile void __iomem *d = dst;
-
- while (n--) {
- writeb(c, d);
- d++;
- }
-}
-
-#define memset_io(d,c,sz) _memset_io(d,c,sz)
-
-static inline void
-_sbus_memcpy_fromio(void *dst, const volatile void __iomem *src,
- __kernel_size_t n)
+static inline void sbus_memcpy_fromio(void *dst,
+ const volatile void __iomem *src,
+ __kernel_size_t n)
{
char *d = dst;
@@ -220,25 +110,9 @@ _sbus_memcpy_fromio(void *dst, const volatile void __iomem *src,
}
}
-#define sbus_memcpy_fromio(d, s, sz) _sbus_memcpy_fromio(d, s, sz)
-
-static inline void
-_memcpy_fromio(void *dst, const volatile void __iomem *src, __kernel_size_t n)
-{
- char *d = dst;
-
- while (n--) {
- char tmp = readb(src);
- *d++ = tmp;
- src++;
- }
-}
-
-#define memcpy_fromio(d,s,sz) _memcpy_fromio(d,s,sz)
-
-static inline void
-_sbus_memcpy_toio(volatile void __iomem *dst, const void *src,
- __kernel_size_t n)
+static inline void sbus_memcpy_toio(volatile void __iomem *dst,
+ const void *src,
+ __kernel_size_t n)
{
const char *s = src;
volatile void __iomem *d = dst;
@@ -250,81 +124,26 @@ _sbus_memcpy_toio(volatile void __iomem *dst, const void *src,
}
}
-#define sbus_memcpy_toio(d, s, sz) _sbus_memcpy_toio(d, s, sz)
-
-static inline void
-_memcpy_toio(volatile void __iomem *dst, const void *src, __kernel_size_t n)
-{
- const char *s = src;
- volatile void __iomem *d = dst;
-
- while (n--) {
- char tmp = *s++;
- writeb(tmp, d);
- d++;
- }
-}
-
-#define memcpy_toio(d,s,sz) _memcpy_toio(d,s,sz)
-
#ifdef __KERNEL__
/*
* Bus number may be embedded in the higher bits of the physical address.
* This is why we have no bus number argument to ioremap().
*/
-extern void __iomem *ioremap(unsigned long offset, unsigned long size);
+void __iomem *ioremap(unsigned long offset, unsigned long size);
#define ioremap_nocache(X,Y) ioremap((X),(Y))
#define ioremap_wc(X,Y) ioremap((X),(Y))
-extern void iounmap(volatile void __iomem *addr);
-
-#define ioread8(X) readb(X)
-#define ioread16(X) readw(X)
-#define ioread16be(X) __raw_readw(X)
-#define ioread32(X) readl(X)
-#define ioread32be(X) __raw_readl(X)
-#define iowrite8(val,X) writeb(val,X)
-#define iowrite16(val,X) writew(val,X)
-#define iowrite16be(val,X) __raw_writew(val,X)
-#define iowrite32(val,X) writel(val,X)
-#define iowrite32be(val,X) __raw_writel(val,X)
-
-static inline void ioread8_rep(void __iomem *port, void *buf, unsigned long count)
-{
- insb((unsigned long __force)port, buf, count);
-}
-static inline void ioread16_rep(void __iomem *port, void *buf, unsigned long count)
-{
- insw((unsigned long __force)port, buf, count);
-}
-
-static inline void ioread32_rep(void __iomem *port, void *buf, unsigned long count)
-{
- insl((unsigned long __force)port, buf, count);
-}
-
-static inline void iowrite8_rep(void __iomem *port, const void *buf, unsigned long count)
-{
- outsb((unsigned long __force)port, buf, count);
-}
-
-static inline void iowrite16_rep(void __iomem *port, const void *buf, unsigned long count)
-{
- outsw((unsigned long __force)port, buf, count);
-}
-
-static inline void iowrite32_rep(void __iomem *port, const void *buf, unsigned long count)
-{
- outsl((unsigned long __force)port, buf, count);
-}
+void iounmap(volatile void __iomem *addr);
/* Create a virtual mapping cookie for an IO port range */
-extern void __iomem *ioport_map(unsigned long port, unsigned int nr);
-extern void ioport_unmap(void __iomem *);
+void __iomem *ioport_map(unsigned long port, unsigned int nr);
+void ioport_unmap(void __iomem *);
/* Create a virtual mapping cookie for a PCI BAR (memory or IO) */
struct pci_dev;
-extern void pci_iounmap(struct pci_dev *dev, void __iomem *);
+void pci_iounmap(struct pci_dev *dev, void __iomem *);
+
+
/*
* At the moment, we do not use CMOS_READ anywhere outside of rtc.c,
@@ -343,21 +162,11 @@ static inline int sbus_can_burst64(void)
return 0; /* actually, sparc_cpu_model==sun4d */
}
struct device;
-extern void sbus_set_sbus64(struct device *, int);
+void sbus_set_sbus64(struct device *, int);
#endif
#define __ARCH_HAS_NO_PAGE_ZERO_MAPPED 1
-/*
- * Convert a physical pointer to a virtual kernel pointer for /dev/mem
- * access
- */
-#define xlate_dev_mem_ptr(p) __va(p)
-
-/*
- * Convert a virtual cached pointer to an uncached pointer
- */
-#define xlate_dev_kmem_ptr(p) p
#endif /* !(__SPARC_IO_H) */
diff --git a/arch/sparc/include/asm/io_64.h b/arch/sparc/include/asm/io_64.h
index 09b0b88aeb2..05381c3a422 100644
--- a/arch/sparc/include/asm/io_64.h
+++ b/arch/sparc/include/asm/io_64.h
@@ -15,7 +15,6 @@
/* BIO layer definitions. */
extern unsigned long kern_base, kern_size;
-#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
static inline u8 _inb(unsigned long addr)
{
@@ -91,12 +90,12 @@ static inline void _outl(u32 l, unsigned long addr)
#define inl_p(__addr) inl(__addr)
#define outl_p(__l, __addr) outl(__l, __addr)
-extern void outsb(unsigned long, const void *, unsigned long);
-extern void outsw(unsigned long, const void *, unsigned long);
-extern void outsl(unsigned long, const void *, unsigned long);
-extern void insb(unsigned long, void *, unsigned long);
-extern void insw(unsigned long, void *, unsigned long);
-extern void insl(unsigned long, void *, unsigned long);
+void outsb(unsigned long, const void *, unsigned long);
+void outsw(unsigned long, const void *, unsigned long);
+void outsl(unsigned long, const void *, unsigned long);
+void insb(unsigned long, void *, unsigned long);
+void insw(unsigned long, void *, unsigned long);
+void insl(unsigned long, void *, unsigned long);
static inline void ioread8_rep(void __iomem *port, void *buf, unsigned long count)
{
@@ -509,12 +508,12 @@ static inline void iounmap(volatile void __iomem *addr)
#define iowrite32be(val,X) __raw_writel(val,X)
/* Create a virtual mapping cookie for an IO port range */
-extern void __iomem *ioport_map(unsigned long port, unsigned int nr);
-extern void ioport_unmap(void __iomem *);
+void __iomem *ioport_map(unsigned long port, unsigned int nr);
+void ioport_unmap(void __iomem *);
/* Create a virtual mapping cookie for a PCI BAR (memory or IO) */
struct pci_dev;
-extern void pci_iounmap(struct pci_dev *dev, void __iomem *);
+void pci_iounmap(struct pci_dev *dev, void __iomem *);
static inline int sbus_can_dma_64bit(void)
{
@@ -525,7 +524,7 @@ static inline int sbus_can_burst64(void)
return 1;
}
struct device;
-extern void sbus_set_sbus64(struct device *, int);
+void sbus_set_sbus64(struct device *, int);
/*
* Convert a physical pointer to a virtual kernel pointer for /dev/mem
diff --git a/arch/sparc/include/asm/iommu_32.h b/arch/sparc/include/asm/iommu_32.h
index 70c589c05a1..f6c066b52fd 100644
--- a/arch/sparc/include/asm/iommu_32.h
+++ b/arch/sparc/include/asm/iommu_32.h
@@ -99,7 +99,7 @@ struct iommu_regs {
#define IOPTE_WAZ 0x00000001 /* Write as zeros */
struct iommu_struct {
- struct iommu_regs *regs;
+ struct iommu_regs __iomem *regs;
iopte_t *page_table;
/* For convenience */
unsigned long start; /* First managed virtual address */
@@ -108,14 +108,14 @@ struct iommu_struct {
struct bit_map usemap;
};
-static inline void iommu_invalidate(struct iommu_regs *regs)
+static inline void iommu_invalidate(struct iommu_regs __iomem *regs)
{
- regs->tlbflush = 0;
+ sbus_writel(0, &regs->tlbflush);
}
-static inline void iommu_invalidate_page(struct iommu_regs *regs, unsigned long ba)
+static inline void iommu_invalidate_page(struct iommu_regs __iomem *regs, unsigned long ba)
{
- regs->pageflush = (ba & PAGE_MASK);
+ sbus_writel(ba & PAGE_MASK, &regs->pageflush);
}
#endif /* !(_SPARC_IOMMU_H) */
diff --git a/arch/sparc/include/asm/iommu_64.h b/arch/sparc/include/asm/iommu_64.h
index caf798b5619..2b9321ab064 100644
--- a/arch/sparc/include/asm/iommu_64.h
+++ b/arch/sparc/include/asm/iommu_64.h
@@ -58,8 +58,8 @@ struct strbuf {
volatile unsigned long __flushflag_buf[(64+(64-1)) / sizeof(long)];
};
-extern int iommu_table_init(struct iommu *iommu, int tsbsize,
- u32 dma_offset, u32 dma_addr_mask,
- int numa_node);
+int iommu_table_init(struct iommu *iommu, int tsbsize,
+ u32 dma_offset, u32 dma_addr_mask,
+ int numa_node);
#endif /* !(_SPARC64_IOMMU_H) */
diff --git a/arch/sparc/include/asm/irq_32.h b/arch/sparc/include/asm/irq_32.h
index 2ae3acaeb1b..eecd3d8442c 100644
--- a/arch/sparc/include/asm/irq_32.h
+++ b/arch/sparc/include/asm/irq_32.h
@@ -16,7 +16,8 @@
#define irq_canonicalize(irq) (irq)
-extern void __init init_IRQ(void);
+void __init init_IRQ(void);
+void __init sun4d_init_sbi_irq(void);
#define NO_IRQ 0xffffffff
diff --git a/arch/sparc/include/asm/irq_64.h b/arch/sparc/include/asm/irq_64.h
index abf6afe82ca..91d21938130 100644
--- a/arch/sparc/include/asm/irq_64.h
+++ b/arch/sparc/include/asm/irq_64.h
@@ -39,32 +39,32 @@
*/
#define NR_IRQS 255
-extern void irq_install_pre_handler(int irq,
- void (*func)(unsigned int, void *, void *),
- void *arg1, void *arg2);
+void irq_install_pre_handler(int irq,
+ void (*func)(unsigned int, void *, void *),
+ void *arg1, void *arg2);
#define irq_canonicalize(irq) (irq)
-extern unsigned int build_irq(int inofixup, unsigned long iclr, unsigned long imap);
-extern unsigned int sun4v_build_irq(u32 devhandle, unsigned int devino);
-extern unsigned int sun4v_build_virq(u32 devhandle, unsigned int devino);
-extern unsigned int sun4v_build_msi(u32 devhandle, unsigned int *irq_p,
- unsigned int msi_devino_start,
- unsigned int msi_devino_end);
-extern void sun4v_destroy_msi(unsigned int irq);
-extern unsigned int sun4u_build_msi(u32 portid, unsigned int *irq_p,
- unsigned int msi_devino_start,
- unsigned int msi_devino_end,
- unsigned long imap_base,
- unsigned long iclr_base);
-extern void sun4u_destroy_msi(unsigned int irq);
-
-extern unsigned char irq_alloc(unsigned int dev_handle,
- unsigned int dev_ino);
+unsigned int build_irq(int inofixup, unsigned long iclr, unsigned long imap);
+unsigned int sun4v_build_irq(u32 devhandle, unsigned int devino);
+unsigned int sun4v_build_virq(u32 devhandle, unsigned int devino);
+unsigned int sun4v_build_msi(u32 devhandle, unsigned int *irq_p,
+ unsigned int msi_devino_start,
+ unsigned int msi_devino_end);
+void sun4v_destroy_msi(unsigned int irq);
+unsigned int sun4u_build_msi(u32 portid, unsigned int *irq_p,
+ unsigned int msi_devino_start,
+ unsigned int msi_devino_end,
+ unsigned long imap_base,
+ unsigned long iclr_base);
+void sun4u_destroy_msi(unsigned int irq);
+
+unsigned char irq_alloc(unsigned int dev_handle,
+ unsigned int dev_ino);
#ifdef CONFIG_PCI_MSI
-extern void irq_free(unsigned int irq);
+void irq_free(unsigned int irq);
#endif
-extern void __init init_IRQ(void);
-extern void fixup_irqs(void);
+void __init init_IRQ(void);
+void fixup_irqs(void);
static inline void set_softint(unsigned long bits)
{
@@ -89,7 +89,7 @@ static inline unsigned long get_softint(void)
return retval;
}
-void arch_trigger_all_cpu_backtrace(void);
+void arch_trigger_all_cpu_backtrace(bool);
#define arch_trigger_all_cpu_backtrace arch_trigger_all_cpu_backtrace
extern void *hardirq_stack[NR_CPUS];
diff --git a/arch/sparc/include/asm/irqflags_32.h b/arch/sparc/include/asm/irqflags_32.h
index e414c06615c..71cc284f55c 100644
--- a/arch/sparc/include/asm/irqflags_32.h
+++ b/arch/sparc/include/asm/irqflags_32.h
@@ -15,9 +15,9 @@
#include <linux/types.h>
#include <asm/psr.h>
-extern void arch_local_irq_restore(unsigned long);
-extern unsigned long arch_local_irq_save(void);
-extern void arch_local_irq_enable(void);
+void arch_local_irq_restore(unsigned long);
+unsigned long arch_local_irq_save(void);
+void arch_local_irq_enable(void);
static inline notrace unsigned long arch_local_save_flags(void)
{
diff --git a/arch/sparc/include/asm/jump_label.h b/arch/sparc/include/asm/jump_label.h
index 5080d16a832..ec2e2e2aba7 100644
--- a/arch/sparc/include/asm/jump_label.h
+++ b/arch/sparc/include/asm/jump_label.h
@@ -9,7 +9,7 @@
static __always_inline bool arch_static_branch(struct static_key *key)
{
- asm goto("1:\n\t"
+ asm_volatile_goto("1:\n\t"
"nop\n\t"
"nop\n\t"
".pushsection __jump_table, \"aw\"\n\t"
diff --git a/arch/sparc/include/asm/kdebug_64.h b/arch/sparc/include/asm/kdebug_64.h
index feb3578e12c..04465de8f3b 100644
--- a/arch/sparc/include/asm/kdebug_64.h
+++ b/arch/sparc/include/asm/kdebug_64.h
@@ -3,7 +3,7 @@
struct pt_regs;
-extern void bad_trap(struct pt_regs *, long);
+void bad_trap(struct pt_regs *, long);
/* Grossly misnamed. */
enum die_val {
diff --git a/arch/sparc/include/asm/kgdb.h b/arch/sparc/include/asm/kgdb.h
index b6ef301d05b..47366af7a58 100644
--- a/arch/sparc/include/asm/kgdb.h
+++ b/arch/sparc/include/asm/kgdb.h
@@ -28,9 +28,12 @@ enum regnames {
#define NUMREGBYTES ((GDB_CSR + 1) * 4)
#else
#define NUMREGBYTES ((GDB_Y + 1) * 8)
+
+struct pt_regs;
+asmlinkage void kgdb_trap(unsigned long trap_level, struct pt_regs *regs);
#endif
-extern void arch_kgdb_breakpoint(void);
+void arch_kgdb_breakpoint(void);
#define BREAK_INSTR_SIZE 4
#define CACHE_FLUSH_IS_SAFE 1
diff --git a/arch/sparc/include/asm/kprobes.h b/arch/sparc/include/asm/kprobes.h
index 5879d71afda..a145d798e11 100644
--- a/arch/sparc/include/asm/kprobes.h
+++ b/arch/sparc/include/asm/kprobes.h
@@ -43,7 +43,9 @@ struct kprobe_ctlblk {
struct prev_kprobe prev_kprobe;
};
-extern int kprobe_exceptions_notify(struct notifier_block *self,
- unsigned long val, void *data);
-extern int kprobe_fault_handler(struct pt_regs *regs, int trapnr);
+int kprobe_exceptions_notify(struct notifier_block *self,
+ unsigned long val, void *data);
+int kprobe_fault_handler(struct pt_regs *regs, int trapnr);
+asmlinkage void __kprobes kprobe_trap(unsigned long trap_level,
+ struct pt_regs *regs);
#endif /* _SPARC64_KPROBES_H */
diff --git a/arch/sparc/include/asm/ldc.h b/arch/sparc/include/asm/ldc.h
index bdb524a7b81..c8c67f621f4 100644
--- a/arch/sparc/include/asm/ldc.h
+++ b/arch/sparc/include/asm/ldc.h
@@ -4,9 +4,9 @@
#include <asm/hypervisor.h>
extern int ldom_domaining_enabled;
-extern void ldom_set_var(const char *var, const char *value);
-extern void ldom_reboot(const char *boot_command);
-extern void ldom_power_off(void);
+void ldom_set_var(const char *var, const char *value);
+void ldom_reboot(const char *boot_command);
+void ldom_power_off(void);
/* The event handler will be evoked when link state changes
* or data becomes available on the receive side.
@@ -51,30 +51,30 @@ struct ldc_channel_config {
struct ldc_channel;
/* Allocate state for a channel. */
-extern struct ldc_channel *ldc_alloc(unsigned long id,
- const struct ldc_channel_config *cfgp,
- void *event_arg);
+struct ldc_channel *ldc_alloc(unsigned long id,
+ const struct ldc_channel_config *cfgp,
+ void *event_arg);
/* Shut down and free state for a channel. */
-extern void ldc_free(struct ldc_channel *lp);
+void ldc_free(struct ldc_channel *lp);
/* Register TX and RX queues of the link with the hypervisor. */
-extern int ldc_bind(struct ldc_channel *lp, const char *name);
+int ldc_bind(struct ldc_channel *lp, const char *name);
/* For non-RAW protocols we need to complete a handshake before
* communication can proceed. ldc_connect() does that, if the
* handshake completes successfully, an LDC_EVENT_UP event will
* be sent up to the driver.
*/
-extern int ldc_connect(struct ldc_channel *lp);
-extern int ldc_disconnect(struct ldc_channel *lp);
+int ldc_connect(struct ldc_channel *lp);
+int ldc_disconnect(struct ldc_channel *lp);
-extern int ldc_state(struct ldc_channel *lp);
+int ldc_state(struct ldc_channel *lp);
/* Read and write operations. Only valid when the link is up. */
-extern int ldc_write(struct ldc_channel *lp, const void *buf,
- unsigned int size);
-extern int ldc_read(struct ldc_channel *lp, void *buf, unsigned int size);
+int ldc_write(struct ldc_channel *lp, const void *buf,
+ unsigned int size);
+int ldc_read(struct ldc_channel *lp, void *buf, unsigned int size);
#define LDC_MAP_SHADOW 0x01
#define LDC_MAP_DIRECT 0x02
@@ -92,22 +92,22 @@ struct ldc_trans_cookie {
};
struct scatterlist;
-extern int ldc_map_sg(struct ldc_channel *lp,
- struct scatterlist *sg, int num_sg,
- struct ldc_trans_cookie *cookies, int ncookies,
- unsigned int map_perm);
+int ldc_map_sg(struct ldc_channel *lp,
+ struct scatterlist *sg, int num_sg,
+ struct ldc_trans_cookie *cookies, int ncookies,
+ unsigned int map_perm);
-extern int ldc_map_single(struct ldc_channel *lp,
- void *buf, unsigned int len,
- struct ldc_trans_cookie *cookies, int ncookies,
- unsigned int map_perm);
+int ldc_map_single(struct ldc_channel *lp,
+ void *buf, unsigned int len,
+ struct ldc_trans_cookie *cookies, int ncookies,
+ unsigned int map_perm);
-extern void ldc_unmap(struct ldc_channel *lp, struct ldc_trans_cookie *cookies,
- int ncookies);
+void ldc_unmap(struct ldc_channel *lp, struct ldc_trans_cookie *cookies,
+ int ncookies);
-extern int ldc_copy(struct ldc_channel *lp, int copy_dir,
- void *buf, unsigned int len, unsigned long offset,
- struct ldc_trans_cookie *cookies, int ncookies);
+int ldc_copy(struct ldc_channel *lp, int copy_dir,
+ void *buf, unsigned int len, unsigned long offset,
+ struct ldc_trans_cookie *cookies, int ncookies);
static inline int ldc_get_dring_entry(struct ldc_channel *lp,
void *buf, unsigned int len,
@@ -127,12 +127,12 @@ static inline int ldc_put_dring_entry(struct ldc_channel *lp,
return ldc_copy(lp, LDC_COPY_OUT, buf, len, offset, cookies, ncookies);
}
-extern void *ldc_alloc_exp_dring(struct ldc_channel *lp, unsigned int len,
- struct ldc_trans_cookie *cookies,
- int *ncookies, unsigned int map_perm);
+void *ldc_alloc_exp_dring(struct ldc_channel *lp, unsigned int len,
+ struct ldc_trans_cookie *cookies,
+ int *ncookies, unsigned int map_perm);
-extern void ldc_free_exp_dring(struct ldc_channel *lp, void *buf,
- unsigned int len,
- struct ldc_trans_cookie *cookies, int ncookies);
+void ldc_free_exp_dring(struct ldc_channel *lp, void *buf,
+ unsigned int len,
+ struct ldc_trans_cookie *cookies, int ncookies);
#endif /* _SPARC64_LDC_H */
diff --git a/arch/sparc/include/asm/leon.h b/arch/sparc/include/asm/leon.h
index 15a716934e4..204771cd74a 100644
--- a/arch/sparc/include/asm/leon.h
+++ b/arch/sparc/include/asm/leon.h
@@ -82,8 +82,8 @@ static inline unsigned long leon_load_reg(unsigned long paddr)
#define LEON_BYPASS_LOAD_PA(x) leon_load_reg((unsigned long)(x))
#define LEON_BYPASS_STORE_PA(x, v) leon_store_reg((unsigned long)(x), (unsigned long)(v))
-extern void leon_switch_mm(void);
-extern void leon_init_IRQ(void);
+void leon_switch_mm(void);
+void leon_init_IRQ(void);
static inline unsigned long sparc_leon3_get_dcachecfg(void)
{
@@ -108,7 +108,7 @@ static inline int sparc_leon3_snooping_enabled(void)
{
u32 cctrl;
__asm__ __volatile__("lda [%%g0] 2, %0\n\t" : "=r"(cctrl));
- return (cctrl >> 23) & 1;
+ return ((cctrl >> 23) & 1) && ((cctrl >> 17) & 1);
};
static inline void sparc_leon3_disable_cache(void)
@@ -135,7 +135,7 @@ static inline int sparc_leon3_cpuid(void)
#ifdef CONFIG_SMP
# define LEON3_IRQ_IPI_DEFAULT 13
-# define LEON3_IRQ_TICKER (leon3_ticker_irq)
+# define LEON3_IRQ_TICKER (leon3_gptimer_irq)
# define LEON3_IRQ_CROSS_CALL 15
#endif
@@ -196,14 +196,14 @@ static inline int sparc_leon3_cpuid(void)
#ifndef __ASSEMBLY__
struct vm_area_struct;
-extern unsigned long leon_swprobe(unsigned long vaddr, unsigned long *paddr);
-extern void leon_flush_icache_all(void);
-extern void leon_flush_dcache_all(void);
-extern void leon_flush_cache_all(void);
-extern void leon_flush_tlb_all(void);
+unsigned long leon_swprobe(unsigned long vaddr, unsigned long *paddr);
+void leon_flush_icache_all(void);
+void leon_flush_dcache_all(void);
+void leon_flush_cache_all(void);
+void leon_flush_tlb_all(void);
extern int leon_flush_during_switch;
-extern int leon_flush_needed(void);
-extern void leon_flush_pcache_all(struct vm_area_struct *vma, unsigned long page);
+int leon_flush_needed(void);
+void leon_flush_pcache_all(struct vm_area_struct *vma, unsigned long page);
/* struct that hold LEON3 cache configuration registers */
struct leon3_cacheregs {
@@ -217,29 +217,29 @@ struct leon3_cacheregs {
struct device_node;
struct task_struct;
-extern unsigned int leon_build_device_irq(unsigned int real_irq,
- irq_flow_handler_t flow_handler,
- const char *name, int do_ack);
-extern void leon_update_virq_handling(unsigned int virq,
- irq_flow_handler_t flow_handler,
- const char *name, int do_ack);
-extern void leon_init_timers(void);
-extern void leon_trans_init(struct device_node *dp);
-extern void leon_node_init(struct device_node *dp, struct device_node ***nextp);
-extern void init_leon(void);
-extern void poke_leonsparc(void);
-extern void leon3_getCacheRegs(struct leon3_cacheregs *regs);
+unsigned int leon_build_device_irq(unsigned int real_irq,
+ irq_flow_handler_t flow_handler,
+ const char *name, int do_ack);
+void leon_update_virq_handling(unsigned int virq,
+ irq_flow_handler_t flow_handler,
+ const char *name, int do_ack);
+void leon_init_timers(void);
+void leon_trans_init(struct device_node *dp);
+void leon_node_init(struct device_node *dp, struct device_node ***nextp);
+void init_leon(void);
+void poke_leonsparc(void);
+void leon3_getCacheRegs(struct leon3_cacheregs *regs);
extern int leon3_ticker_irq;
#ifdef CONFIG_SMP
-extern int leon_smp_nrcpus(void);
-extern void leon_clear_profile_irq(int cpu);
-extern void leon_smp_done(void);
-extern void leon_boot_cpus(void);
-extern int leon_boot_one_cpu(int i, struct task_struct *);
+int leon_smp_nrcpus(void);
+void leon_clear_profile_irq(int cpu);
+void leon_smp_done(void);
+void leon_boot_cpus(void);
+int leon_boot_one_cpu(int i, struct task_struct *);
void leon_init_smp(void);
void leon_enable_irq_cpu(unsigned int irq_nr, unsigned int cpu);
-extern irqreturn_t leon_percpu_timer_interrupt(int irq, void *unused);
+irqreturn_t leon_percpu_timer_interrupt(int irq, void *unused);
extern unsigned int smpleon_ipi[];
extern unsigned int linux_trap_ipi15_leon[];
diff --git a/arch/sparc/include/asm/leon_amba.h b/arch/sparc/include/asm/leon_amba.h
index f3034eddf46..24ec48c3ff9 100644
--- a/arch/sparc/include/asm/leon_amba.h
+++ b/arch/sparc/include/asm/leon_amba.h
@@ -47,6 +47,7 @@ struct amba_prom_registers {
#define LEON3_GPTIMER_LD 4
#define LEON3_GPTIMER_IRQEN 8
#define LEON3_GPTIMER_SEPIRQ 8
+#define LEON3_GPTIMER_TIMERS 0x7
#define LEON23_REG_TIMER_CONTROL_EN 0x00000001 /* 1 = enable counting */
/* 0 = hold scalar and counter */
diff --git a/arch/sparc/include/asm/leon_pci.h b/arch/sparc/include/asm/leon_pci.h
index f48527ebdd8..049d067ed8b 100644
--- a/arch/sparc/include/asm/leon_pci.h
+++ b/arch/sparc/include/asm/leon_pci.h
@@ -12,10 +12,11 @@ struct leon_pci_info {
struct pci_ops *ops;
struct resource io_space;
struct resource mem_space;
+ struct resource busn;
int (*map_irq)(const struct pci_dev *dev, u8 slot, u8 pin);
};
-extern void leon_pci_init(struct platform_device *ofdev,
- struct leon_pci_info *info);
+void leon_pci_init(struct platform_device *ofdev,
+ struct leon_pci_info *info);
#endif /* _ASM_LEON_PCI_H_ */
diff --git a/arch/sparc/include/asm/linkage.h b/arch/sparc/include/asm/linkage.h
deleted file mode 100644
index 291c2d01c44..00000000000
--- a/arch/sparc/include/asm/linkage.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef __ASM_LINKAGE_H
-#define __ASM_LINKAGE_H
-
-/* Nothing to see here... */
-
-#endif
diff --git a/arch/sparc/include/asm/mc146818rtc.h b/arch/sparc/include/asm/mc146818rtc.h
index 67ed9e3a023..d8e72f37dc4 100644
--- a/arch/sparc/include/asm/mc146818rtc.h
+++ b/arch/sparc/include/asm/mc146818rtc.h
@@ -1,5 +1,10 @@
#ifndef ___ASM_SPARC_MC146818RTC_H
#define ___ASM_SPARC_MC146818RTC_H
+
+#include <linux/spinlock.h>
+
+extern spinlock_t rtc_lock;
+
#if defined(__sparc__) && defined(__arch64__)
#include <asm/mc146818rtc_64.h>
#else
diff --git a/arch/sparc/include/asm/mdesc.h b/arch/sparc/include/asm/mdesc.h
index 139097f3a67..aebeb88f70d 100644
--- a/arch/sparc/include/asm/mdesc.h
+++ b/arch/sparc/include/asm/mdesc.h
@@ -12,13 +12,13 @@ struct mdesc_handle;
* the first argument to all of the operational calls that work
* on mdescs.
*/
-extern struct mdesc_handle *mdesc_grab(void);
-extern void mdesc_release(struct mdesc_handle *);
+struct mdesc_handle *mdesc_grab(void);
+void mdesc_release(struct mdesc_handle *);
#define MDESC_NODE_NULL (~(u64)0)
-extern u64 mdesc_node_by_name(struct mdesc_handle *handle,
- u64 from_node, const char *name);
+u64 mdesc_node_by_name(struct mdesc_handle *handle,
+ u64 from_node, const char *name);
#define mdesc_for_each_node_by_name(__hdl, __node, __name) \
for (__node = mdesc_node_by_name(__hdl, MDESC_NODE_NULL, __name); \
(__node) != MDESC_NODE_NULL; \
@@ -34,9 +34,9 @@ extern u64 mdesc_node_by_name(struct mdesc_handle *handle,
*
* These same rules apply to mdesc_node_name().
*/
-extern const void *mdesc_get_property(struct mdesc_handle *handle,
- u64 node, const char *name, int *lenp);
-extern const char *mdesc_node_name(struct mdesc_handle *hp, u64 node);
+const void *mdesc_get_property(struct mdesc_handle *handle,
+ u64 node, const char *name, int *lenp);
+const char *mdesc_node_name(struct mdesc_handle *hp, u64 node);
/* MD arc iteration, the standard sequence is:
*
@@ -50,16 +50,16 @@ extern const char *mdesc_node_name(struct mdesc_handle *hp, u64 node);
#define MDESC_ARC_TYPE_FWD "fwd"
#define MDESC_ARC_TYPE_BACK "back"
-extern u64 mdesc_next_arc(struct mdesc_handle *handle, u64 from,
- const char *arc_type);
+u64 mdesc_next_arc(struct mdesc_handle *handle, u64 from,
+ const char *arc_type);
#define mdesc_for_each_arc(__arc, __hdl, __node, __type) \
for (__arc = mdesc_next_arc(__hdl, __node, __type); \
(__arc) != MDESC_NODE_NULL; \
__arc = mdesc_next_arc(__hdl, __arc, __type))
-extern u64 mdesc_arc_target(struct mdesc_handle *hp, u64 arc);
+u64 mdesc_arc_target(struct mdesc_handle *hp, u64 arc);
-extern void mdesc_update(void);
+void mdesc_update(void);
struct mdesc_notifier_client {
void (*add)(struct mdesc_handle *handle, u64 node);
@@ -69,12 +69,12 @@ struct mdesc_notifier_client {
struct mdesc_notifier_client *next;
};
-extern void mdesc_register_notifier(struct mdesc_notifier_client *client);
+void mdesc_register_notifier(struct mdesc_notifier_client *client);
-extern void mdesc_fill_in_cpu_data(cpumask_t *mask);
-extern void mdesc_populate_present_mask(cpumask_t *mask);
-extern void mdesc_get_page_sizes(cpumask_t *mask, unsigned long *pgsz_mask);
+void mdesc_fill_in_cpu_data(cpumask_t *mask);
+void mdesc_populate_present_mask(cpumask_t *mask);
+void mdesc_get_page_sizes(cpumask_t *mask, unsigned long *pgsz_mask);
-extern void sun4v_mdesc_init(void);
+void sun4v_mdesc_init(void);
#endif
diff --git a/arch/sparc/include/asm/mmu_64.h b/arch/sparc/include/asm/mmu_64.h
index 76092c4dd27..70067ce184b 100644
--- a/arch/sparc/include/asm/mmu_64.h
+++ b/arch/sparc/include/asm/mmu_64.h
@@ -67,9 +67,9 @@ struct tsb {
unsigned long pte;
} __attribute__((aligned(TSB_ENTRY_ALIGNMENT)));
-extern void __tsb_insert(unsigned long ent, unsigned long tag, unsigned long pte);
-extern void tsb_flush(unsigned long ent, unsigned long tag);
-extern void tsb_init(struct tsb *tsb, unsigned long size);
+void __tsb_insert(unsigned long ent, unsigned long tag, unsigned long pte);
+void tsb_flush(unsigned long ent, unsigned long tag);
+void tsb_init(struct tsb *tsb, unsigned long size);
struct tsb_config {
struct tsb *tsb;
@@ -93,7 +93,6 @@ typedef struct {
spinlock_t lock;
unsigned long sparc64_ctx_val;
unsigned long huge_pte_count;
- struct page *pgtable_page;
struct tsb_config tsb_block[MM_NUM_TSBS];
struct hv_tsb_descr tsb_descr[MM_NUM_TSBS];
} mm_context_t;
diff --git a/arch/sparc/include/asm/mmu_context_64.h b/arch/sparc/include/asm/mmu_context_64.h
index 9191ca62ed9..b84be675e50 100644
--- a/arch/sparc/include/asm/mmu_context_64.h
+++ b/arch/sparc/include/asm/mmu_context_64.h
@@ -17,20 +17,20 @@ extern spinlock_t ctx_alloc_lock;
extern unsigned long tlb_context_cache;
extern unsigned long mmu_context_bmap[];
-extern void get_new_mmu_context(struct mm_struct *mm);
+void get_new_mmu_context(struct mm_struct *mm);
#ifdef CONFIG_SMP
-extern void smp_new_mmu_context_version(void);
+void smp_new_mmu_context_version(void);
#else
#define smp_new_mmu_context_version() do { } while (0)
#endif
-extern int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
-extern void destroy_context(struct mm_struct *mm);
+int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
+void destroy_context(struct mm_struct *mm);
-extern void __tsb_context_switch(unsigned long pgd_pa,
- struct tsb_config *tsb_base,
- struct tsb_config *tsb_huge,
- unsigned long tsb_descr_pa);
+void __tsb_context_switch(unsigned long pgd_pa,
+ struct tsb_config *tsb_base,
+ struct tsb_config *tsb_huge,
+ unsigned long tsb_descr_pa);
static inline void tsb_context_switch(struct mm_struct *mm)
{
@@ -46,9 +46,11 @@ static inline void tsb_context_switch(struct mm_struct *mm)
, __pa(&mm->context.tsb_descr[0]));
}
-extern void tsb_grow(struct mm_struct *mm, unsigned long tsb_index, unsigned long mm_rss);
+void tsb_grow(struct mm_struct *mm,
+ unsigned long tsb_index,
+ unsigned long mm_rss);
#ifdef CONFIG_SMP
-extern void smp_tsb_sync(struct mm_struct *mm);
+void smp_tsb_sync(struct mm_struct *mm);
#else
#define smp_tsb_sync(__mm) do { } while (0)
#endif
@@ -66,9 +68,9 @@ extern void smp_tsb_sync(struct mm_struct *mm);
: "r" (CTX_HWBITS((__mm)->context)), \
"r" (SECONDARY_CONTEXT), "i" (ASI_DMMU), "i" (ASI_MMU))
-extern void __flush_tlb_mm(unsigned long, unsigned long);
+void __flush_tlb_mm(unsigned long, unsigned long);
-/* Switch the current MM context. Interrupts are disabled. */
+/* Switch the current MM context. */
static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, struct task_struct *tsk)
{
unsigned long ctx_valid, flags;
diff --git a/arch/sparc/include/asm/mutex.h b/arch/sparc/include/asm/mutex.h
deleted file mode 100644
index 458c1f7fbc1..00000000000
--- a/arch/sparc/include/asm/mutex.h
+++ /dev/null
@@ -1,9 +0,0 @@
-/*
- * Pull in the generic implementation for the mutex fastpath.
- *
- * TODO: implement optimized primitives instead, or leave the generic
- * implementation in place, or pick the atomic_xchg() based generic
- * implementation. (see asm-generic/mutex-xchg.h for details)
- */
-
-#include <asm-generic/mutex-dec.h>
diff --git a/arch/sparc/include/asm/nmi.h b/arch/sparc/include/asm/nmi.h
index 72e6500e7ab..26ad2b2607c 100644
--- a/arch/sparc/include/asm/nmi.h
+++ b/arch/sparc/include/asm/nmi.h
@@ -1,13 +1,13 @@
#ifndef __NMI_H
#define __NMI_H
-extern int __init nmi_init(void);
-extern void perfctr_irq(int irq, struct pt_regs *regs);
-extern void nmi_adjust_hz(unsigned int new_hz);
+int __init nmi_init(void);
+void perfctr_irq(int irq, struct pt_regs *regs);
+void nmi_adjust_hz(unsigned int new_hz);
extern atomic_t nmi_active;
-extern void start_nmi_watchdog(void *unused);
-extern void stop_nmi_watchdog(void *unused);
+void start_nmi_watchdog(void *unused);
+void stop_nmi_watchdog(void *unused);
#endif /* __NMI_H */
diff --git a/arch/sparc/include/asm/oplib_32.h b/arch/sparc/include/asm/oplib_32.h
index c72f3045820..56a09b9d7b1 100644
--- a/arch/sparc/include/asm/oplib_32.h
+++ b/arch/sparc/include/asm/oplib_32.h
@@ -43,28 +43,28 @@ extern struct linux_nodeops *prom_nodeops;
/* You must call prom_init() before using any of the library services,
* preferably as early as possible. Pass it the romvec pointer.
*/
-extern void prom_init(struct linux_romvec *rom_ptr);
+void prom_init(struct linux_romvec *rom_ptr);
/* Boot argument acquisition, returns the boot command line string. */
-extern char *prom_getbootargs(void);
+char *prom_getbootargs(void);
/* Miscellaneous routines, don't really fit in any category per se. */
/* Reboot the machine with the command line passed. */
-extern void prom_reboot(char *boot_command);
+void prom_reboot(char *boot_command);
/* Evaluate the forth string passed. */
-extern void prom_feval(char *forth_string);
+void prom_feval(char *forth_string);
/* Enter the prom, with possibility of continuation with the 'go'
* command in newer proms.
*/
-extern void prom_cmdline(void);
+void prom_cmdline(void);
/* Enter the prom, with no chance of continuation for the stand-alone
* which calls this.
*/
-extern void __noreturn prom_halt(void);
+void __noreturn prom_halt(void);
/* Set the PROM 'sync' callback function to the passed function pointer.
* When the user gives the 'sync' command at the prom prompt while the
@@ -73,37 +73,37 @@ extern void __noreturn prom_halt(void);
* XXX The arguments are different on V0 vs. V2->higher proms, grrr! XXX
*/
typedef void (*sync_func_t)(void);
-extern void prom_setsync(sync_func_t func_ptr);
+void prom_setsync(sync_func_t func_ptr);
/* Acquire the IDPROM of the root node in the prom device tree. This
* gets passed a buffer where you would like it stuffed. The return value
* is the format type of this idprom or 0xff on error.
*/
-extern unsigned char prom_get_idprom(char *idp_buffer, int idpbuf_size);
+unsigned char prom_get_idprom(char *idp_buffer, int idpbuf_size);
/* Get the prom major version. */
-extern int prom_version(void);
+int prom_version(void);
/* Get the prom plugin revision. */
-extern int prom_getrev(void);
+int prom_getrev(void);
/* Get the prom firmware revision. */
-extern int prom_getprev(void);
+int prom_getprev(void);
/* Write a buffer of characters to the console. */
-extern void prom_console_write_buf(const char *buf, int len);
+void prom_console_write_buf(const char *buf, int len);
/* Prom's internal routines, don't use in kernel/boot code. */
-extern __printf(1, 2) void prom_printf(const char *fmt, ...);
-extern void prom_write(const char *buf, unsigned int len);
+__printf(1, 2) void prom_printf(const char *fmt, ...);
+void prom_write(const char *buf, unsigned int len);
/* Multiprocessor operations... */
/* Start the CPU with the given device tree node, context table, and context
* at the passed program counter.
*/
-extern int prom_startcpu(int cpunode, struct linux_prom_registers *context_table,
- int context, char *program_counter);
+int prom_startcpu(int cpunode, struct linux_prom_registers *context_table,
+ int context, char *program_counter);
/* Initialize the memory lists based upon the prom version. */
void prom_meminit(void);
@@ -111,65 +111,65 @@ void prom_meminit(void);
/* PROM device tree traversal functions... */
/* Get the child node of the given node, or zero if no child exists. */
-extern phandle prom_getchild(phandle parent_node);
+phandle prom_getchild(phandle parent_node);
/* Get the next sibling node of the given node, or zero if no further
* siblings exist.
*/
-extern phandle prom_getsibling(phandle node);
+phandle prom_getsibling(phandle node);
/* Get the length, at the passed node, of the given property type.
* Returns -1 on error (ie. no such property at this node).
*/
-extern int prom_getproplen(phandle thisnode, const char *property);
+int prom_getproplen(phandle thisnode, const char *property);
/* Fetch the requested property using the given buffer. Returns
* the number of bytes the prom put into your buffer or -1 on error.
*/
-extern int __must_check prom_getproperty(phandle thisnode, const char *property,
- char *prop_buffer, int propbuf_size);
+int __must_check prom_getproperty(phandle thisnode, const char *property,
+ char *prop_buffer, int propbuf_size);
/* Acquire an integer property. */
-extern int prom_getint(phandle node, char *property);
+int prom_getint(phandle node, char *property);
/* Acquire an integer property, with a default value. */
-extern int prom_getintdefault(phandle node, char *property, int defval);
+int prom_getintdefault(phandle node, char *property, int defval);
/* Acquire a boolean property, 0=FALSE 1=TRUE. */
-extern int prom_getbool(phandle node, char *prop);
+int prom_getbool(phandle node, char *prop);
/* Acquire a string property, null string on error. */
-extern void prom_getstring(phandle node, char *prop, char *buf, int bufsize);
+void prom_getstring(phandle node, char *prop, char *buf, int bufsize);
/* Search all siblings starting at the passed node for "name" matching
* the given string. Returns the node on success, zero on failure.
*/
-extern phandle prom_searchsiblings(phandle node_start, char *name);
+phandle prom_searchsiblings(phandle node_start, char *name);
/* Returns the next property after the passed property for the given
* node. Returns null string on failure.
*/
-extern char *prom_nextprop(phandle node, char *prev_property, char *buffer);
+char *prom_nextprop(phandle node, char *prev_property, char *buffer);
/* Returns phandle of the path specified */
-extern phandle prom_finddevice(char *name);
+phandle prom_finddevice(char *name);
/* Set the indicated property at the given node with the passed value.
* Returns the number of bytes of your value that the prom took.
*/
-extern int prom_setprop(phandle node, const char *prop_name, char *prop_value,
- int value_size);
+int prom_setprop(phandle node, const char *prop_name, char *prop_value,
+ int value_size);
-extern phandle prom_inst2pkg(int);
+phandle prom_inst2pkg(int);
/* Dorking with Bus ranges... */
/* Apply promlib probes OBIO ranges to registers. */
-extern void prom_apply_obio_ranges(struct linux_prom_registers *obioregs, int nregs);
+void prom_apply_obio_ranges(struct linux_prom_registers *obioregs, int nregs);
/* Apply ranges of any prom node (and optionally parent node as well) to registers. */
-extern void prom_apply_generic_ranges(phandle node, phandle parent,
- struct linux_prom_registers *sbusregs, int nregs);
+void prom_apply_generic_ranges(phandle node, phandle parent,
+ struct linux_prom_registers *sbusregs, int nregs);
void prom_ranges_init(void);
diff --git a/arch/sparc/include/asm/oplib_64.h b/arch/sparc/include/asm/oplib_64.h
index a12dbe3b776..f34682430fc 100644
--- a/arch/sparc/include/asm/oplib_64.h
+++ b/arch/sparc/include/asm/oplib_64.h
@@ -62,100 +62,100 @@ struct linux_mem_p1275 {
/* You must call prom_init() before using any of the library services,
* preferably as early as possible. Pass it the romvec pointer.
*/
-extern void prom_init(void *cif_handler, void *cif_stack);
+void prom_init(void *cif_handler, void *cif_stack);
/* Boot argument acquisition, returns the boot command line string. */
-extern char *prom_getbootargs(void);
+char *prom_getbootargs(void);
/* Miscellaneous routines, don't really fit in any category per se. */
/* Reboot the machine with the command line passed. */
-extern void prom_reboot(const char *boot_command);
+void prom_reboot(const char *boot_command);
/* Evaluate the forth string passed. */
-extern void prom_feval(const char *forth_string);
+void prom_feval(const char *forth_string);
/* Enter the prom, with possibility of continuation with the 'go'
* command in newer proms.
*/
-extern void prom_cmdline(void);
+void prom_cmdline(void);
/* Enter the prom, with no chance of continuation for the stand-alone
* which calls this.
*/
-extern void prom_halt(void) __attribute__ ((noreturn));
+void prom_halt(void) __attribute__ ((noreturn));
/* Halt and power-off the machine. */
-extern void prom_halt_power_off(void) __attribute__ ((noreturn));
+void prom_halt_power_off(void) __attribute__ ((noreturn));
/* Acquire the IDPROM of the root node in the prom device tree. This
* gets passed a buffer where you would like it stuffed. The return value
* is the format type of this idprom or 0xff on error.
*/
-extern unsigned char prom_get_idprom(char *idp_buffer, int idpbuf_size);
+unsigned char prom_get_idprom(char *idp_buffer, int idpbuf_size);
/* Write a buffer of characters to the console. */
-extern void prom_console_write_buf(const char *buf, int len);
+void prom_console_write_buf(const char *buf, int len);
/* Prom's internal routines, don't use in kernel/boot code. */
-extern __printf(1, 2) void prom_printf(const char *fmt, ...);
-extern void prom_write(const char *buf, unsigned int len);
+__printf(1, 2) void prom_printf(const char *fmt, ...);
+void prom_write(const char *buf, unsigned int len);
/* Multiprocessor operations... */
#ifdef CONFIG_SMP
/* Start the CPU with the given device tree node at the passed program
* counter with the given arg passed in via register %o0.
*/
-extern void prom_startcpu(int cpunode, unsigned long pc, unsigned long arg);
+void prom_startcpu(int cpunode, unsigned long pc, unsigned long arg);
/* Start the CPU with the given cpu ID at the passed program
* counter with the given arg passed in via register %o0.
*/
-extern void prom_startcpu_cpuid(int cpuid, unsigned long pc, unsigned long arg);
+void prom_startcpu_cpuid(int cpuid, unsigned long pc, unsigned long arg);
/* Stop the CPU with the given cpu ID. */
-extern void prom_stopcpu_cpuid(int cpuid);
+void prom_stopcpu_cpuid(int cpuid);
/* Stop the current CPU. */
-extern void prom_stopself(void);
+void prom_stopself(void);
/* Idle the current CPU. */
-extern void prom_idleself(void);
+void prom_idleself(void);
/* Resume the CPU with the passed device tree node. */
-extern void prom_resumecpu(int cpunode);
+void prom_resumecpu(int cpunode);
#endif
/* Power management interfaces. */
/* Put the current CPU to sleep. */
-extern void prom_sleepself(void);
+void prom_sleepself(void);
/* Put the entire system to sleep. */
-extern int prom_sleepsystem(void);
+int prom_sleepsystem(void);
/* Initiate a wakeup event. */
-extern int prom_wakeupsystem(void);
+int prom_wakeupsystem(void);
/* MMU and memory related OBP interfaces. */
/* Get unique string identifying SIMM at given physical address. */
-extern int prom_getunumber(int syndrome_code,
- unsigned long phys_addr,
- char *buf, int buflen);
+int prom_getunumber(int syndrome_code,
+ unsigned long phys_addr,
+ char *buf, int buflen);
/* Retain physical memory to the caller across soft resets. */
-extern int prom_retain(const char *name, unsigned long size,
- unsigned long align, unsigned long *paddr);
+int prom_retain(const char *name, unsigned long size,
+ unsigned long align, unsigned long *paddr);
/* Load explicit I/D TLB entries into the calling processor. */
-extern long prom_itlb_load(unsigned long index,
- unsigned long tte_data,
- unsigned long vaddr);
+long prom_itlb_load(unsigned long index,
+ unsigned long tte_data,
+ unsigned long vaddr);
-extern long prom_dtlb_load(unsigned long index,
- unsigned long tte_data,
- unsigned long vaddr);
+long prom_dtlb_load(unsigned long index,
+ unsigned long tte_data,
+ unsigned long vaddr);
/* Map/Unmap client program address ranges. First the format of
* the mapping mode argument.
@@ -170,81 +170,81 @@ extern long prom_dtlb_load(unsigned long index,
#define PROM_MAP_IE 0x0100 /* Invert-Endianness */
#define PROM_MAP_DEFAULT (PROM_MAP_WRITE | PROM_MAP_READ | PROM_MAP_EXEC | PROM_MAP_CACHED)
-extern int prom_map(int mode, unsigned long size,
- unsigned long vaddr, unsigned long paddr);
-extern void prom_unmap(unsigned long size, unsigned long vaddr);
+int prom_map(int mode, unsigned long size,
+ unsigned long vaddr, unsigned long paddr);
+void prom_unmap(unsigned long size, unsigned long vaddr);
/* PROM device tree traversal functions... */
/* Get the child node of the given node, or zero if no child exists. */
-extern phandle prom_getchild(phandle parent_node);
+phandle prom_getchild(phandle parent_node);
/* Get the next sibling node of the given node, or zero if no further
* siblings exist.
*/
-extern phandle prom_getsibling(phandle node);
+phandle prom_getsibling(phandle node);
/* Get the length, at the passed node, of the given property type.
* Returns -1 on error (ie. no such property at this node).
*/
-extern int prom_getproplen(phandle thisnode, const char *property);
+int prom_getproplen(phandle thisnode, const char *property);
/* Fetch the requested property using the given buffer. Returns
* the number of bytes the prom put into your buffer or -1 on error.
*/
-extern int prom_getproperty(phandle thisnode, const char *property,
- char *prop_buffer, int propbuf_size);
+int prom_getproperty(phandle thisnode, const char *property,
+ char *prop_buffer, int propbuf_size);
/* Acquire an integer property. */
-extern int prom_getint(phandle node, const char *property);
+int prom_getint(phandle node, const char *property);
/* Acquire an integer property, with a default value. */
-extern int prom_getintdefault(phandle node, const char *property, int defval);
+int prom_getintdefault(phandle node, const char *property, int defval);
/* Acquire a boolean property, 0=FALSE 1=TRUE. */
-extern int prom_getbool(phandle node, const char *prop);
+int prom_getbool(phandle node, const char *prop);
/* Acquire a string property, null string on error. */
-extern void prom_getstring(phandle node, const char *prop, char *buf,
- int bufsize);
+void prom_getstring(phandle node, const char *prop, char *buf,
+ int bufsize);
/* Does the passed node have the given "name"? YES=1 NO=0 */
-extern int prom_nodematch(phandle thisnode, const char *name);
+int prom_nodematch(phandle thisnode, const char *name);
/* Search all siblings starting at the passed node for "name" matching
* the given string. Returns the node on success, zero on failure.
*/
-extern phandle prom_searchsiblings(phandle node_start, const char *name);
+phandle prom_searchsiblings(phandle node_start, const char *name);
/* Return the first property type, as a string, for the given node.
* Returns a null string on error. Buffer should be at least 32B long.
*/
-extern char *prom_firstprop(phandle node, char *buffer);
+char *prom_firstprop(phandle node, char *buffer);
/* Returns the next property after the passed property for the given
* node. Returns null string on failure. Buffer should be at least 32B long.
*/
-extern char *prom_nextprop(phandle node, const char *prev_property, char *buf);
+char *prom_nextprop(phandle node, const char *prev_property, char *buf);
/* Returns 1 if the specified node has given property. */
-extern int prom_node_has_property(phandle node, const char *property);
+int prom_node_has_property(phandle node, const char *property);
/* Returns phandle of the path specified */
-extern phandle prom_finddevice(const char *name);
+phandle prom_finddevice(const char *name);
/* Set the indicated property at the given node with the passed value.
* Returns the number of bytes of your value that the prom took.
*/
-extern int prom_setprop(phandle node, const char *prop_name, char *prop_value,
- int value_size);
+int prom_setprop(phandle node, const char *prop_name, char *prop_value,
+ int value_size);
-extern phandle prom_inst2pkg(int);
-extern void prom_sun4v_guest_soft_state(void);
+phandle prom_inst2pkg(int);
+void prom_sun4v_guest_soft_state(void);
-extern int prom_ihandle2path(int handle, char *buffer, int bufsize);
+int prom_ihandle2path(int handle, char *buffer, int bufsize);
/* Client interface level routines. */
-extern void p1275_cmd_direct(unsigned long *);
+void p1275_cmd_direct(unsigned long *);
#endif /* !(__SPARC64_OPLIB_H) */
diff --git a/arch/sparc/include/asm/page.h b/arch/sparc/include/asm/page.h
index f21de034902..1be2fdec626 100644
--- a/arch/sparc/include/asm/page.h
+++ b/arch/sparc/include/asm/page.h
@@ -1,5 +1,8 @@
#ifndef ___ASM_SPARC_PAGE_H
#define ___ASM_SPARC_PAGE_H
+
+#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
+
#if defined(__sparc__) && defined(__arch64__)
#include <asm/page_64.h>
#else
diff --git a/arch/sparc/include/asm/page_64.h b/arch/sparc/include/asm/page_64.h
index 4b39f74d6ca..bf109984a03 100644
--- a/arch/sparc/include/asm/page_64.h
+++ b/arch/sparc/include/asm/page_64.h
@@ -15,7 +15,10 @@
#define DCACHE_ALIASING_POSSIBLE
#endif
-#define HPAGE_SHIFT 22
+#define HPAGE_SHIFT 23
+#define REAL_HPAGE_SHIFT 22
+
+#define REAL_HPAGE_SIZE (_AC(1,UL) << REAL_HPAGE_SHIFT)
#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
#define HPAGE_SIZE (_AC(1,UL) << HPAGE_SHIFT)
@@ -27,18 +30,18 @@
#ifndef __ASSEMBLY__
#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
-struct mm_struct;
-extern void hugetlb_setup(struct mm_struct *mm);
+struct pt_regs;
+void hugetlb_setup(struct pt_regs *regs);
#endif
#define WANT_PAGE_VIRTUAL
-extern void _clear_page(void *page);
+void _clear_page(void *page);
#define clear_page(X) _clear_page((void *)(X))
struct page;
-extern void clear_user_page(void *addr, unsigned long vaddr, struct page *page);
+void clear_user_page(void *addr, unsigned long vaddr, struct page *page);
#define copy_page(X,Y) memcpy((void *)(X), (void *)(Y), PAGE_SIZE)
-extern void copy_user_page(void *to, void *from, unsigned long vaddr, struct page *topage);
+void copy_user_page(void *to, void *from, unsigned long vaddr, struct page *topage);
/* Unlike sparc32, sparc64's parameter passing API is more
* sane in that structures which as small enough are passed
@@ -53,8 +56,8 @@ extern void copy_user_page(void *to, void *from, unsigned long vaddr, struct pag
/* These are used to make use of C type-checking.. */
typedef struct { unsigned long pte; } pte_t;
typedef struct { unsigned long iopte; } iopte_t;
-typedef struct { unsigned int pmd; } pmd_t;
-typedef struct { unsigned int pgd; } pgd_t;
+typedef struct { unsigned long pmd; } pmd_t;
+typedef struct { unsigned long pgd; } pgd_t;
typedef struct { unsigned long pgprot; } pgprot_t;
#define pte_val(x) ((x).pte)
@@ -73,8 +76,8 @@ typedef struct { unsigned long pgprot; } pgprot_t;
/* .. while these make it easier on the compiler */
typedef unsigned long pte_t;
typedef unsigned long iopte_t;
-typedef unsigned int pmd_t;
-typedef unsigned int pgd_t;
+typedef unsigned long pmd_t;
+typedef unsigned long pgd_t;
typedef unsigned long pgprot_t;
#define pte_val(x) (x)
@@ -93,18 +96,44 @@ typedef unsigned long pgprot_t;
typedef pte_t *pgtable_t;
+/* These two values define the virtual address space range in which we
+ * must forbid 64-bit user processes from making mappings. It used to
+ * represent precisely the virtual address space hole present in most
+ * early sparc64 chips including UltraSPARC-I. But now it also is
+ * further constrained by the limits of our page tables, which is
+ * 43-bits of virtual address.
+ */
+#define SPARC64_VA_HOLE_TOP _AC(0xfffffc0000000000,UL)
+#define SPARC64_VA_HOLE_BOTTOM _AC(0x0000040000000000,UL)
+
+/* The next two defines specify the actual exclusion region we
+ * enforce, wherein we use a 4GB red zone on each side of the VA hole.
+ */
+#define VA_EXCLUDE_START (SPARC64_VA_HOLE_BOTTOM - (1UL << 32UL))
+#define VA_EXCLUDE_END (SPARC64_VA_HOLE_TOP + (1UL << 32UL))
+
#define TASK_UNMAPPED_BASE (test_thread_flag(TIF_32BIT) ? \
- (_AC(0x0000000070000000,UL)) : \
- (_AC(0xfffff80000000000,UL) + (1UL << 32UL)))
+ _AC(0x0000000070000000,UL) : \
+ VA_EXCLUDE_END)
#include <asm-generic/memory_model.h>
+#define PAGE_OFFSET_BY_BITS(X) (-(_AC(1,UL) << (X)))
+extern unsigned long PAGE_OFFSET;
+
#endif /* !(__ASSEMBLY__) */
-/* We used to stick this into a hard-coded global register (%g4)
- * but that does not make sense anymore.
+/* The maximum number of physical memory address bits we support, this
+ * is used to size various tables used to manage kernel TLB misses and
+ * also the sparsemem code.
+ */
+#define MAX_PHYS_ADDRESS_BITS 47
+
+/* These two shift counts are used when indexing sparc64_valid_addr_bitmap
+ * and kpte_linear_bitmap.
*/
-#define PAGE_OFFSET _AC(0xFFFFF80000000000,UL)
+#define ILOG2_4MB 22
+#define ILOG2_256MB 28
#ifndef __ASSEMBLY__
diff --git a/arch/sparc/include/asm/parport.h b/arch/sparc/include/asm/parport.h
index cb33608cc68..c55291e5b83 100644
--- a/arch/sparc/include/asm/parport.h
+++ b/arch/sparc/include/asm/parport.h
@@ -103,7 +103,7 @@ static inline unsigned int get_dma_residue(unsigned int dmanr)
return ebus_dma_residue(&sparc_ebus_dmas[dmanr].info);
}
-static int __devinit ecpp_probe(struct platform_device *op)
+static int ecpp_probe(struct platform_device *op)
{
unsigned long base = op->resource[0].start;
unsigned long config = op->resource[1].start;
@@ -192,7 +192,7 @@ out_err:
return err;
}
-static int __devexit ecpp_remove(struct platform_device *op)
+static int ecpp_remove(struct platform_device *op)
{
struct parport *p = dev_get_drvdata(&op->dev);
int slot = p->dma;
@@ -242,7 +242,7 @@ static struct platform_driver ecpp_driver = {
.of_match_table = ecpp_match,
},
.probe = ecpp_probe,
- .remove = __devexit_p(ecpp_remove),
+ .remove = ecpp_remove,
};
static int parport_pc_find_nonpci_ports(int autoirq, int autodma)
diff --git a/arch/sparc/include/asm/pci_32.h b/arch/sparc/include/asm/pci_32.h
index dc503297481..53e9b4987db 100644
--- a/arch/sparc/include/asm/pci_32.h
+++ b/arch/sparc/include/asm/pci_32.h
@@ -16,11 +16,6 @@
#define PCI_IRQ_NONE 0xffffffff
-static inline void pcibios_penalize_isa_irq(int irq, int active)
-{
- /* We don't do dynamic PCI IRQ allocation */
-}
-
/* Dynamic DMA mapping stuff.
*/
#define PCI_DMA_BUS_IS_PHYS (0)
diff --git a/arch/sparc/include/asm/pci_64.h b/arch/sparc/include/asm/pci_64.h
index 1633b718d3b..bd00a622616 100644
--- a/arch/sparc/include/asm/pci_64.h
+++ b/arch/sparc/include/asm/pci_64.h
@@ -16,11 +16,6 @@
#define PCI_IRQ_NONE 0xffffffff
-static inline void pcibios_penalize_isa_irq(int irq, int active)
-{
- /* We don't do dynamic PCI IRQ allocation */
-}
-
/* The PCI address space does not equal the physical memory
* address space. The networking and block device layers use
* this boolean for bounce buffer decisions.
@@ -57,7 +52,7 @@ static inline void pci_dma_burst_advice(struct pci_dev *pdev,
/* Return the index of the PCI controller for device PDEV. */
-extern int pci_domain_nr(struct pci_bus *bus);
+int pci_domain_nr(struct pci_bus *bus);
static inline int pci_proc_domain(struct pci_bus *bus)
{
return 1;
@@ -69,9 +64,9 @@ static inline int pci_proc_domain(struct pci_bus *bus)
#define HAVE_ARCH_PCI_GET_UNMAPPED_AREA
#define get_pci_unmapped_area get_fb_unmapped_area
-extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
- enum pci_mmap_state mmap_state,
- int write_combine);
+int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
+ enum pci_mmap_state mmap_state,
+ int write_combine);
static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
{
@@ -79,9 +74,9 @@ static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
}
#define HAVE_ARCH_PCI_RESOURCE_TO_USER
-extern void pci_resource_to_user(const struct pci_dev *dev, int bar,
- const struct resource *rsrc,
- resource_size_t *start, resource_size_t *end);
+void pci_resource_to_user(const struct pci_dev *dev, int bar,
+ const struct resource *rsrc,
+ resource_size_t *start, resource_size_t *end);
#endif /* __KERNEL__ */
#endif /* __SPARC64_PCI_H */
diff --git a/arch/sparc/include/asm/pcic.h b/arch/sparc/include/asm/pcic.h
index 6676cbcc8b6..f4170679259 100644
--- a/arch/sparc/include/asm/pcic.h
+++ b/arch/sparc/include/asm/pcic.h
@@ -30,10 +30,10 @@ struct linux_pcic {
};
#ifdef CONFIG_PCIC_PCI
-extern int pcic_present(void);
-extern int pcic_probe(void);
-extern void pci_time_init(void);
-extern void sun4m_pci_init_IRQ(void);
+int pcic_present(void);
+int pcic_probe(void);
+void pci_time_init(void);
+void sun4m_pci_init_IRQ(void);
#else
static inline int pcic_present(void) { return 0; }
static inline int pcic_probe(void) { return 0; }
diff --git a/arch/sparc/include/asm/pcr.h b/arch/sparc/include/asm/pcr.h
index 942bb17f60c..cdf800c3326 100644
--- a/arch/sparc/include/asm/pcr.h
+++ b/arch/sparc/include/asm/pcr.h
@@ -12,8 +12,8 @@ struct pcr_ops {
};
extern const struct pcr_ops *pcr_ops;
-extern void deferred_pcr_work_irq(int irq, struct pt_regs *regs);
-extern void schedule_deferred_pcr_work(void);
+void deferred_pcr_work_irq(int irq, struct pt_regs *regs);
+void schedule_deferred_pcr_work(void);
#define PCR_PIC_PRIV 0x00000001 /* PIC access is privileged */
#define PCR_STRACE 0x00000002 /* Trace supervisor events */
@@ -45,6 +45,6 @@ extern void schedule_deferred_pcr_work(void);
#define PCR_N4_PICNHT 0x00020000 /* PIC non-hypervisor trap */
#define PCR_N4_NTC 0x00040000 /* Next-To-Commit wrap */
-extern int pcr_arch_init(void);
+int pcr_arch_init(void);
#endif /* __PCR_H */
diff --git a/arch/sparc/include/asm/pgalloc_32.h b/arch/sparc/include/asm/pgalloc_32.h