aboutsummaryrefslogtreecommitdiff
path: root/net
diff options
context:
space:
mode:
authorAndy Grover <andy.grover@oracle.com>2010-03-29 17:08:49 -0700
committerAndy Grover <andy.grover@oracle.com>2010-09-08 18:12:21 -0700
commitfcc5450c6386526034edc437e4cb2c67a6fdd7e9 (patch)
tree87b98163a69413de3c14a37220edf22350cb25d9 /net
parent51e2cba8b5936c13b40f0fa11aa4e84683dbc751 (diff)
RDS: Remove send_quota from send_xmit()
The purpose of the send quota was really to give fairness when different connections were all using the same workq thread to send backlogged msgs -- they could only send so many before another connection could make progress. Now that each connection is pushing the backlog from its completion handler, they are all guaranteed to make progress and the quota isn't needed any longer. A thread *will* have to send all previously queued data, as well as any further msgs placed on the queue while while c_send_lock was held. In a pathological case a single process can get roped into doing this for long periods while other threads get off free. But, since it can only do this until the transport reports full, this is a bounded scenario. Signed-off-by: Andy Grover <andy.grover@oracle.com>
Diffstat (limited to 'net')
-rw-r--r--net/rds/send.c30
1 files changed, 15 insertions, 15 deletions
diff --git a/net/rds/send.c b/net/rds/send.c
index ecda3e6c432..656c5c1b32b 100644
--- a/net/rds/send.c
+++ b/net/rds/send.c
@@ -110,12 +110,11 @@ int rds_send_xmit(struct rds_connection *conn)
struct rds_message *rm;
unsigned long flags;
unsigned int tmp;
- unsigned int send_quota = send_batch_count;
struct scatterlist *sg;
int ret = 0;
- int was_empty = 0;
LIST_HEAD(to_be_dropped);
+restart:
if (!rds_conn_up(conn))
goto out;
@@ -139,7 +138,7 @@ int rds_send_xmit(struct rds_connection *conn)
* spin trying to push headers and data down the connection until
* the connection doesn't make forward progress.
*/
- while (--send_quota) {
+ while (1) {
rm = conn->c_xmit_rm;
@@ -185,10 +184,8 @@ int rds_send_xmit(struct rds_connection *conn)
spin_unlock(&conn->c_lock);
- if (!rm) {
- was_empty = 1;
+ if (!rm)
break;
- }
/* Unfortunately, the way Infiniband deals with
* RDMA to a bad MR key is by moving the entire
@@ -350,20 +347,23 @@ int rds_send_xmit(struct rds_connection *conn)
rds_send_remove_from_sock(&to_be_dropped, RDS_RDMA_DROPPED);
}
- if (send_quota == 0 && !was_empty) {
- /* We exhausted the send quota, but there's work left to
- * do. Return and (re-)schedule the send worker.
- */
- ret = -EAGAIN;
- }
-
- if (ret == 0 && was_empty) {
+ /*
+ * Other senders will see we have c_send_lock and exit. We
+ * need to recheck the send queue and race again for c_send_lock
+ * to make sure messages don't just sit on the send queue.
+ *
+ * If the transport cannot continue (i.e ret != 0), then it must
+ * call us when more room is available, such as from the tx
+ * completion handler.
+ */
+ if (ret == 0) {
/* A simple bit test would be way faster than taking the
* spin lock */
spin_lock_irqsave(&conn->c_lock, flags);
if (!list_empty(&conn->c_send_queue)) {
rds_stats_inc(s_send_lock_queue_raced);
- ret = -EAGAIN;
+ spin_unlock_irqrestore(&conn->c_lock, flags);
+ goto restart;
}
spin_unlock_irqrestore(&conn->c_lock, flags);
}