aboutsummaryrefslogtreecommitdiff
path: root/net/ceph
diff options
context:
space:
mode:
Diffstat (limited to 'net/ceph')
-rw-r--r--net/ceph/ceph_common.c3
-rw-r--r--net/ceph/messenger.c130
-rw-r--r--net/ceph/osd_client.c93
-rw-r--r--net/ceph/osdmap.c47
4 files changed, 134 insertions, 139 deletions
diff --git a/net/ceph/ceph_common.c b/net/ceph/ceph_common.c
index a8020293f34..ee71ea26777 100644
--- a/net/ceph/ceph_common.c
+++ b/net/ceph/ceph_common.c
@@ -305,7 +305,6 @@ ceph_parse_options(char *options, const char *dev_name,
/* start with defaults */
opt->flags = CEPH_OPT_DEFAULT;
- opt->osd_timeout = CEPH_OSD_TIMEOUT_DEFAULT;
opt->osd_keepalive_timeout = CEPH_OSD_KEEPALIVE_DEFAULT;
opt->mount_timeout = CEPH_MOUNT_TIMEOUT_DEFAULT; /* seconds */
opt->osd_idle_ttl = CEPH_OSD_IDLE_TTL_DEFAULT; /* seconds */
@@ -391,7 +390,7 @@ ceph_parse_options(char *options, const char *dev_name,
/* misc */
case Opt_osdtimeout:
- opt->osd_timeout = intval;
+ pr_warning("ignoring deprecated osdtimeout option\n");
break;
case Opt_osdkeepalivetimeout:
opt->osd_keepalive_timeout = intval;
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
index 3ef1759403b..5ccf87ed8d6 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -506,6 +506,7 @@ static void reset_connection(struct ceph_connection *con)
{
/* reset connection, out_queue, msg_ and connect_seq */
/* discard existing out_queue and msg_seq */
+ dout("reset_connection %p\n", con);
ceph_msg_remove_list(&con->out_queue);
ceph_msg_remove_list(&con->out_sent);
@@ -561,7 +562,7 @@ void ceph_con_open(struct ceph_connection *con,
mutex_lock(&con->mutex);
dout("con_open %p %s\n", con, ceph_pr_addr(&addr->in_addr));
- BUG_ON(con->state != CON_STATE_CLOSED);
+ WARN_ON(con->state != CON_STATE_CLOSED);
con->state = CON_STATE_PREOPEN;
con->peer_name.type = (__u8) entity_type;
@@ -1506,13 +1507,6 @@ static int process_banner(struct ceph_connection *con)
return 0;
}
-static void fail_protocol(struct ceph_connection *con)
-{
- reset_connection(con);
- BUG_ON(con->state != CON_STATE_NEGOTIATING);
- con->state = CON_STATE_CLOSED;
-}
-
static int process_connect(struct ceph_connection *con)
{
u64 sup_feat = con->msgr->supported_features;
@@ -1530,7 +1524,7 @@ static int process_connect(struct ceph_connection *con)
ceph_pr_addr(&con->peer_addr.in_addr),
sup_feat, server_feat, server_feat & ~sup_feat);
con->error_msg = "missing required protocol features";
- fail_protocol(con);
+ reset_connection(con);
return -1;
case CEPH_MSGR_TAG_BADPROTOVER:
@@ -1541,7 +1535,7 @@ static int process_connect(struct ceph_connection *con)
le32_to_cpu(con->out_connect.protocol_version),
le32_to_cpu(con->in_reply.protocol_version));
con->error_msg = "protocol version mismatch";
- fail_protocol(con);
+ reset_connection(con);
return -1;
case CEPH_MSGR_TAG_BADAUTHORIZER:
@@ -1631,11 +1625,11 @@ static int process_connect(struct ceph_connection *con)
ceph_pr_addr(&con->peer_addr.in_addr),
req_feat, server_feat, req_feat & ~server_feat);
con->error_msg = "missing required protocol features";
- fail_protocol(con);
+ reset_connection(con);
return -1;
}
- BUG_ON(con->state != CON_STATE_NEGOTIATING);
+ WARN_ON(con->state != CON_STATE_NEGOTIATING);
con->state = CON_STATE_OPEN;
con->peer_global_seq = le32_to_cpu(con->in_reply.global_seq);
@@ -2132,7 +2126,6 @@ more:
if (ret < 0)
goto out;
- BUG_ON(con->state != CON_STATE_CONNECTING);
con->state = CON_STATE_NEGOTIATING;
/*
@@ -2160,7 +2153,7 @@ more:
goto more;
}
- BUG_ON(con->state != CON_STATE_OPEN);
+ WARN_ON(con->state != CON_STATE_OPEN);
if (con->in_base_pos < 0) {
/*
@@ -2244,22 +2237,62 @@ bad_tag:
/*
- * Atomically queue work on a connection. Bump @con reference to
- * avoid races with connection teardown.
+ * Atomically queue work on a connection after the specified delay.
+ * Bump @con reference to avoid races with connection teardown.
+ * Returns 0 if work was queued, or an error code otherwise.
*/
-static void queue_con(struct ceph_connection *con)
+static int queue_con_delay(struct ceph_connection *con, unsigned long delay)
{
if (!con->ops->get(con)) {
- dout("queue_con %p ref count 0\n", con);
- return;
+ dout("%s %p ref count 0\n", __func__, con);
+
+ return -ENOENT;
}
- if (!queue_delayed_work(ceph_msgr_wq, &con->work, 0)) {
- dout("queue_con %p - already queued\n", con);
+ if (!queue_delayed_work(ceph_msgr_wq, &con->work, delay)) {
+ dout("%s %p - already queued\n", __func__, con);
con->ops->put(con);
- } else {
- dout("queue_con %p\n", con);
+
+ return -EBUSY;
+ }
+
+ dout("%s %p %lu\n", __func__, con, delay);
+
+ return 0;
+}
+
+static void queue_con(struct ceph_connection *con)
+{
+ (void) queue_con_delay(con, 0);
+}
+
+static bool con_sock_closed(struct ceph_connection *con)
+{
+ if (!test_and_clear_bit(CON_FLAG_SOCK_CLOSED, &con->flags))
+ return false;
+
+#define CASE(x) \
+ case CON_STATE_ ## x: \
+ con->error_msg = "socket closed (con state " #x ")"; \
+ break;
+
+ switch (con->state) {
+ CASE(CLOSED);
+ CASE(PREOPEN);
+ CASE(CONNECTING);
+ CASE(NEGOTIATING);
+ CASE(OPEN);
+ CASE(STANDBY);
+ default:
+ pr_warning("%s con %p unrecognized state %lu\n",
+ __func__, con, con->state);
+ con->error_msg = "unrecognized con state";
+ BUG();
+ break;
}
+#undef CASE
+
+ return true;
}
/*
@@ -2273,35 +2306,16 @@ static void con_work(struct work_struct *work)
mutex_lock(&con->mutex);
restart:
- if (test_and_clear_bit(CON_FLAG_SOCK_CLOSED, &con->flags)) {
- switch (con->state) {
- case CON_STATE_CONNECTING:
- con->error_msg = "connection failed";
- break;
- case CON_STATE_NEGOTIATING:
- con->error_msg = "negotiation failed";
- break;
- case CON_STATE_OPEN:
- con->error_msg = "socket closed";
- break;
- default:
- dout("unrecognized con state %d\n", (int)con->state);
- con->error_msg = "unrecognized con state";
- BUG();
- }
+ if (con_sock_closed(con))
goto fault;
- }
if (test_and_clear_bit(CON_FLAG_BACKOFF, &con->flags)) {
dout("con_work %p backing off\n", con);
- if (queue_delayed_work(ceph_msgr_wq, &con->work,
- round_jiffies_relative(con->delay))) {
- dout("con_work %p backoff %lu\n", con, con->delay);
- mutex_unlock(&con->mutex);
- return;
- } else {
+ ret = queue_con_delay(con, round_jiffies_relative(con->delay));
+ if (ret) {
dout("con_work %p FAILED to back off %lu\n", con,
con->delay);
+ BUG_ON(ret == -ENOENT);
set_bit(CON_FLAG_BACKOFF, &con->flags);
}
goto done;
@@ -2356,12 +2370,12 @@ fault:
static void ceph_fault(struct ceph_connection *con)
__releases(con->mutex)
{
- pr_err("%s%lld %s %s\n", ENTITY_NAME(con->peer_name),
+ pr_warning("%s%lld %s %s\n", ENTITY_NAME(con->peer_name),
ceph_pr_addr(&con->peer_addr.in_addr), con->error_msg);
dout("fault %p state %lu to peer %s\n",
con, con->state, ceph_pr_addr(&con->peer_addr.in_addr));
- BUG_ON(con->state != CON_STATE_CONNECTING &&
+ WARN_ON(con->state != CON_STATE_CONNECTING &&
con->state != CON_STATE_NEGOTIATING &&
con->state != CON_STATE_OPEN);
@@ -2398,24 +2412,8 @@ static void ceph_fault(struct ceph_connection *con)
con->delay = BASE_DELAY_INTERVAL;
else if (con->delay < MAX_DELAY_INTERVAL)
con->delay *= 2;
- con->ops->get(con);
- if (queue_delayed_work(ceph_msgr_wq, &con->work,
- round_jiffies_relative(con->delay))) {
- dout("fault queued %p delay %lu\n", con, con->delay);
- } else {
- con->ops->put(con);
- dout("fault failed to queue %p delay %lu, backoff\n",
- con, con->delay);
- /*
- * In many cases we see a socket state change
- * while con_work is running and end up
- * queuing (non-delayed) work, such that we
- * can't backoff with a delay. Set a flag so
- * that when con_work restarts we schedule the
- * delay then.
- */
- set_bit(CON_FLAG_BACKOFF, &con->flags);
- }
+ set_bit(CON_FLAG_BACKOFF, &con->flags);
+ queue_con(con);
}
out_unlock:
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
index c1d756cc744..eb9a4447876 100644
--- a/net/ceph/osd_client.c
+++ b/net/ceph/osd_client.c
@@ -221,6 +221,7 @@ struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc,
kref_init(&req->r_kref);
init_completion(&req->r_completion);
init_completion(&req->r_safe_completion);
+ RB_CLEAR_NODE(&req->r_node);
INIT_LIST_HEAD(&req->r_unsafe_item);
INIT_LIST_HEAD(&req->r_linger_item);
INIT_LIST_HEAD(&req->r_linger_osd);
@@ -580,7 +581,7 @@ static void __kick_osd_requests(struct ceph_osd_client *osdc,
dout("__kick_osd_requests osd%d\n", osd->o_osd);
err = __reset_osd(osdc, osd);
- if (err == -EAGAIN)
+ if (err)
return;
list_for_each_entry(req, &osd->o_requests, r_osd_item) {
@@ -607,14 +608,6 @@ static void __kick_osd_requests(struct ceph_osd_client *osdc,
}
}
-static void kick_osd_requests(struct ceph_osd_client *osdc,
- struct ceph_osd *kickosd)
-{
- mutex_lock(&osdc->request_mutex);
- __kick_osd_requests(osdc, kickosd);
- mutex_unlock(&osdc->request_mutex);
-}
-
/*
* If the osd connection drops, we need to resubmit all requests.
*/
@@ -628,7 +621,9 @@ static void osd_reset(struct ceph_connection *con)
dout("osd_reset osd%d\n", osd->o_osd);
osdc = osd->o_osdc;
down_read(&osdc->map_sem);
- kick_osd_requests(osdc, osd);
+ mutex_lock(&osdc->request_mutex);
+ __kick_osd_requests(osdc, osd);
+ mutex_unlock(&osdc->request_mutex);
send_queued(osdc);
up_read(&osdc->map_sem);
}
@@ -647,6 +642,7 @@ static struct ceph_osd *create_osd(struct ceph_osd_client *osdc, int onum)
atomic_set(&osd->o_ref, 1);
osd->o_osdc = osdc;
osd->o_osd = onum;
+ RB_CLEAR_NODE(&osd->o_node);
INIT_LIST_HEAD(&osd->o_requests);
INIT_LIST_HEAD(&osd->o_linger_requests);
INIT_LIST_HEAD(&osd->o_osd_lru);
@@ -750,6 +746,7 @@ static int __reset_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd)
if (list_empty(&osd->o_requests) &&
list_empty(&osd->o_linger_requests)) {
__remove_osd(osdc, osd);
+ ret = -ENODEV;
} else if (memcmp(&osdc->osdmap->osd_addr[osd->o_osd],
&osd->o_con.peer_addr,
sizeof(osd->o_con.peer_addr)) == 0 &&
@@ -876,9 +873,9 @@ static void __unregister_request(struct ceph_osd_client *osdc,
req->r_osd = NULL;
}
+ list_del_init(&req->r_req_lru_item);
ceph_osdc_put_request(req);
- list_del_init(&req->r_req_lru_item);
if (osdc->num_requests == 0) {
dout(" no requests, canceling timeout\n");
__cancel_osd_timeout(osdc);
@@ -910,8 +907,8 @@ static void __unregister_linger_request(struct ceph_osd_client *osdc,
struct ceph_osd_request *req)
{
dout("__unregister_linger_request %p\n", req);
+ list_del_init(&req->r_linger_item);
if (req->r_osd) {
- list_del_init(&req->r_linger_item);
list_del_init(&req->r_linger_osd);
if (list_empty(&req->r_osd->o_requests) &&
@@ -1090,12 +1087,10 @@ static void handle_timeout(struct work_struct *work)
{
struct ceph_osd_client *osdc =
container_of(work, struct ceph_osd_client, timeout_work.work);
- struct ceph_osd_request *req, *last_req = NULL;
+ struct ceph_osd_request *req;
struct ceph_osd *osd;
- unsigned long timeout = osdc->client->options->osd_timeout * HZ;
unsigned long keepalive =
osdc->client->options->osd_keepalive_timeout * HZ;
- unsigned long last_stamp = 0;
struct list_head slow_osds;
dout("timeout\n");
down_read(&osdc->map_sem);
@@ -1105,37 +1100,6 @@ static void handle_timeout(struct work_struct *work)
mutex_lock(&osdc->request_mutex);
/*
- * reset osds that appear to be _really_ unresponsive. this
- * is a failsafe measure.. we really shouldn't be getting to
- * this point if the system is working properly. the monitors
- * should mark the osd as failed and we should find out about
- * it from an updated osd map.
- */
- while (timeout && !list_empty(&osdc->req_lru)) {
- req = list_entry(osdc->req_lru.next, struct ceph_osd_request,
- r_req_lru_item);
-
- /* hasn't been long enough since we sent it? */
- if (time_before(jiffies, req->r_stamp + timeout))
- break;
-
- /* hasn't been long enough since it was acked? */
- if (req->r_request->ack_stamp == 0 ||
- time_before(jiffies, req->r_request->ack_stamp + timeout))
- break;
-
- BUG_ON(req == last_req && req->r_stamp == last_stamp);
- last_req = req;
- last_stamp = req->r_stamp;
-
- osd = req->r_osd;
- BUG_ON(!osd);
- pr_warning(" tid %llu timed out on osd%d, will reset osd\n",
- req->r_tid, osd->o_osd);
- __kick_osd_requests(osdc, osd);
- }
-
- /*
* ping osds that are a bit slow. this ensures that if there
* is a break in the TCP connection we will notice, and reopen
* a connection with that osd (from the fault callback).
@@ -1306,7 +1270,7 @@ static void reset_changed_osds(struct ceph_osd_client *osdc)
* Requeue requests whose mapping to an OSD has changed. If requests map to
* no osd, request a new map.
*
- * Caller should hold map_sem for read and request_mutex.
+ * Caller should hold map_sem for read.
*/
static void kick_requests(struct ceph_osd_client *osdc, int force_resend)
{
@@ -1320,6 +1284,24 @@ static void kick_requests(struct ceph_osd_client *osdc, int force_resend)
for (p = rb_first(&osdc->requests); p; ) {
req = rb_entry(p, struct ceph_osd_request, r_node);
p = rb_next(p);
+
+ /*
+ * For linger requests that have not yet been
+ * registered, move them to the linger list; they'll
+ * be sent to the osd in the loop below. Unregister
+ * the request before re-registering it as a linger
+ * request to ensure the __map_request() below
+ * will decide it needs to be sent.
+ */
+ if (req->r_linger && list_empty(&req->r_linger_item)) {
+ dout("%p tid %llu restart on osd%d\n",
+ req, req->r_tid,
+ req->r_osd ? req->r_osd->o_osd : -1);
+ __unregister_request(osdc, req);
+ __register_linger_request(osdc, req);
+ continue;
+ }
+
err = __map_request(osdc, req, force_resend);
if (err < 0)
continue; /* error */
@@ -1334,17 +1316,6 @@ static void kick_requests(struct ceph_osd_client *osdc, int force_resend)
req->r_flags |= CEPH_OSD_FLAG_RETRY;
}
}
- if (req->r_linger && list_empty(&req->r_linger_item)) {
- /*
- * register as a linger so that we will
- * re-submit below and get a new tid
- */
- dout("%p tid %llu restart on osd%d\n",
- req, req->r_tid,
- req->r_osd ? req->r_osd->o_osd : -1);
- __register_linger_request(osdc, req);
- __unregister_request(osdc, req);
- }
}
list_for_each_entry_safe(req, nreq, &osdc->req_linger,
@@ -1352,6 +1323,7 @@ static void kick_requests(struct ceph_osd_client *osdc, int force_resend)
dout("linger req=%p req->r_osd=%p\n", req, req->r_osd);
err = __map_request(osdc, req, force_resend);
+ dout("__map_request returned %d\n", err);
if (err == 0)
continue; /* no change and no osd was specified */
if (err < 0)
@@ -1364,8 +1336,8 @@ static void kick_requests(struct ceph_osd_client *osdc, int force_resend)
dout("kicking lingering %p tid %llu osd%d\n", req, req->r_tid,
req->r_osd ? req->r_osd->o_osd : -1);
- __unregister_linger_request(osdc, req);
__register_request(osdc, req);
+ __unregister_linger_request(osdc, req);
}
mutex_unlock(&osdc->request_mutex);
@@ -1373,6 +1345,7 @@ static void kick_requests(struct ceph_osd_client *osdc, int force_resend)
dout("%d requests for down osds, need new map\n", needmap);
ceph_monc_request_next_osdmap(&osdc->client->monc);
}
+ reset_changed_osds(osdc);
}
@@ -1429,7 +1402,6 @@ void ceph_osdc_handle_map(struct ceph_osd_client *osdc, struct ceph_msg *msg)
osdc->osdmap = newmap;
}
kick_requests(osdc, 0);
- reset_changed_osds(osdc);
} else {
dout("ignoring incremental map %u len %d\n",
epoch, maplen);
@@ -1599,6 +1571,7 @@ int ceph_osdc_create_event(struct ceph_osd_client *osdc,
event->data = data;
event->osdc = osdc;
INIT_LIST_HEAD(&event->osd_node);
+ RB_CLEAR_NODE(&event->node);
kref_init(&event->kref); /* one ref for us */
kref_get(&event->kref); /* one ref for the caller */
init_completion(&event->completion);
diff --git a/net/ceph/osdmap.c b/net/ceph/osdmap.c
index 5433fb0eb3c..de73214b5d2 100644
--- a/net/ceph/osdmap.c
+++ b/net/ceph/osdmap.c
@@ -469,6 +469,22 @@ static struct ceph_pg_pool_info *__lookup_pg_pool(struct rb_root *root, int id)
return NULL;
}
+const char *ceph_pg_pool_name_by_id(struct ceph_osdmap *map, u64 id)
+{
+ struct ceph_pg_pool_info *pi;
+
+ if (id == CEPH_NOPOOL)
+ return NULL;
+
+ if (WARN_ON_ONCE(id > (u64) INT_MAX))
+ return NULL;
+
+ pi = __lookup_pg_pool(&map->pg_pools, (int) id);
+
+ return pi ? pi->name : NULL;
+}
+EXPORT_SYMBOL(ceph_pg_pool_name_by_id);
+
int ceph_pg_poolid_by_name(struct ceph_osdmap *map, const char *name)
{
struct rb_node *rbp;
@@ -645,10 +661,12 @@ struct ceph_osdmap *osdmap_decode(void **p, void *end)
ceph_decode_32_safe(p, end, max, bad);
while (max--) {
ceph_decode_need(p, end, 4 + 1 + sizeof(pi->v), bad);
+ err = -ENOMEM;
pi = kzalloc(sizeof(*pi), GFP_NOFS);
if (!pi)
goto bad;
pi->id = ceph_decode_32(p);
+ err = -EINVAL;
ev = ceph_decode_8(p); /* encoding version */
if (ev > CEPH_PG_POOL_VERSION) {
pr_warning("got unknown v %d > %d of ceph_pg_pool\n",
@@ -664,8 +682,13 @@ struct ceph_osdmap *osdmap_decode(void **p, void *end)
__insert_pg_pool(&map->pg_pools, pi);
}
- if (version >= 5 && __decode_pool_names(p, end, map) < 0)
- goto bad;
+ if (version >= 5) {
+ err = __decode_pool_names(p, end, map);
+ if (err < 0) {
+ dout("fail to decode pool names");
+ goto bad;
+ }
+ }
ceph_decode_32_safe(p, end, map->pool_max, bad);
@@ -745,7 +768,7 @@ struct ceph_osdmap *osdmap_decode(void **p, void *end)
return map;
bad:
- dout("osdmap_decode fail\n");
+ dout("osdmap_decode fail err %d\n", err);
ceph_osdmap_destroy(map);
return ERR_PTR(err);
}
@@ -839,6 +862,7 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
if (ev > CEPH_PG_POOL_VERSION) {
pr_warning("got unknown v %d > %d of ceph_pg_pool\n",
ev, CEPH_PG_POOL_VERSION);
+ err = -EINVAL;
goto bad;
}
pi = __lookup_pg_pool(&map->pg_pools, pool);
@@ -855,8 +879,11 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
if (err < 0)
goto bad;
}
- if (version >= 5 && __decode_pool_names(p, end, map) < 0)
- goto bad;
+ if (version >= 5) {
+ err = __decode_pool_names(p, end, map);
+ if (err < 0)
+ goto bad;
+ }
/* old_pool */
ceph_decode_32_safe(p, end, len, bad);
@@ -932,15 +959,13 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
(void) __remove_pg_mapping(&map->pg_temp, pgid);
/* insert */
- if (pglen > (UINT_MAX - sizeof(*pg)) / sizeof(u32)) {
- err = -EINVAL;
+ err = -EINVAL;
+ if (pglen > (UINT_MAX - sizeof(*pg)) / sizeof(u32))
goto bad;
- }
+ err = -ENOMEM;
pg = kmalloc(sizeof(*pg) + sizeof(u32)*pglen, GFP_NOFS);
- if (!pg) {
- err = -ENOMEM;
+ if (!pg)
goto bad;
- }
pg->pgid = pgid;
pg->len = pglen;
for (j = 0; j < pglen; j++)