aboutsummaryrefslogtreecommitdiff
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/sunrpc/auth_gss/gss_mech_switch.c6
-rw-r--r--net/sunrpc/auth_gss/svcauth_gss.c271
-rw-r--r--net/sunrpc/svc.c3
3 files changed, 216 insertions, 64 deletions
diff --git a/net/sunrpc/auth_gss/gss_mech_switch.c b/net/sunrpc/auth_gss/gss_mech_switch.c
index f8bac6ccd52..d88468d21c3 100644
--- a/net/sunrpc/auth_gss/gss_mech_switch.c
+++ b/net/sunrpc/auth_gss/gss_mech_switch.c
@@ -224,7 +224,8 @@ EXPORT_SYMBOL(gss_service_to_auth_domain_name);
void
gss_mech_put(struct gss_api_mech * gm)
{
- module_put(gm->gm_owner);
+ if (gm)
+ module_put(gm->gm_owner);
}
EXPORT_SYMBOL(gss_mech_put);
@@ -307,8 +308,7 @@ gss_delete_sec_context(struct gss_ctx **context_handle)
(*context_handle)->mech_type->gm_ops
->gss_delete_sec_context((*context_handle)
->internal_ctx_id);
- if ((*context_handle)->mech_type)
- gss_mech_put((*context_handle)->mech_type);
+ gss_mech_put((*context_handle)->mech_type);
kfree(*context_handle);
*context_handle=NULL;
return GSS_S_COMPLETE;
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
index d51e316c582..94217ec9e2d 100644
--- a/net/sunrpc/auth_gss/svcauth_gss.c
+++ b/net/sunrpc/auth_gss/svcauth_gss.c
@@ -425,6 +425,7 @@ static int rsc_parse(struct cache_detail *cd,
struct rsc rsci, *rscp = NULL;
time_t expiry;
int status = -EINVAL;
+ struct gss_api_mech *gm = NULL;
memset(&rsci, 0, sizeof(rsci));
/* context handle */
@@ -453,7 +454,6 @@ static int rsc_parse(struct cache_detail *cd,
set_bit(CACHE_NEGATIVE, &rsci.h.flags);
else {
int N, i;
- struct gss_api_mech *gm;
/* gid */
if (get_int(&mesg, &rsci.cred.cr_gid))
@@ -488,21 +488,17 @@ static int rsc_parse(struct cache_detail *cd,
status = -EINVAL;
/* mech-specific data: */
len = qword_get(&mesg, buf, mlen);
- if (len < 0) {
- gss_mech_put(gm);
+ if (len < 0)
goto out;
- }
status = gss_import_sec_context(buf, len, gm, &rsci.mechctx);
- if (status) {
- gss_mech_put(gm);
+ if (status)
goto out;
- }
- gss_mech_put(gm);
}
rsci.h.expiry_time = expiry;
rscp = rsc_update(&rsci, rscp);
status = 0;
out:
+ gss_mech_put(gm);
rsc_free(&rsci);
if (rscp)
cache_put(&rscp->h, &rsc_cache);
@@ -836,6 +832,74 @@ out:
return stat;
}
+static inline int
+total_buf_len(struct xdr_buf *buf)
+{
+ return buf->head[0].iov_len + buf->page_len + buf->tail[0].iov_len;
+}
+
+static void
+fix_priv_head(struct xdr_buf *buf, int pad)
+{
+ if (buf->page_len == 0) {
+ /* We need to adjust head and buf->len in tandem in this
+ * case to make svc_defer() work--it finds the original
+ * buffer start using buf->len - buf->head[0].iov_len. */
+ buf->head[0].iov_len -= pad;
+ }
+}
+
+static int
+unwrap_priv_data(struct svc_rqst *rqstp, struct xdr_buf *buf, u32 seq, struct gss_ctx *ctx)
+{
+ u32 priv_len, maj_stat;
+ int pad, saved_len, remaining_len, offset;
+
+ rqstp->rq_sendfile_ok = 0;
+
+ priv_len = ntohl(svc_getu32(&buf->head[0]));
+ if (rqstp->rq_deferred) {
+ /* Already decrypted last time through! The sequence number
+ * check at out_seq is unnecessary but harmless: */
+ goto out_seq;
+ }
+ /* buf->len is the number of bytes from the original start of the
+ * request to the end, where head[0].iov_len is just the bytes
+ * not yet read from the head, so these two values are different: */
+ remaining_len = total_buf_len(buf);
+ if (priv_len > remaining_len)
+ return -EINVAL;
+ pad = remaining_len - priv_len;
+ buf->len -= pad;
+ fix_priv_head(buf, pad);
+
+ /* Maybe it would be better to give gss_unwrap a length parameter: */
+ saved_len = buf->len;
+ buf->len = priv_len;
+ maj_stat = gss_unwrap(ctx, 0, buf);
+ pad = priv_len - buf->len;
+ buf->len = saved_len;
+ buf->len -= pad;
+ /* The upper layers assume the buffer is aligned on 4-byte boundaries.
+ * In the krb5p case, at least, the data ends up offset, so we need to
+ * move it around. */
+ /* XXX: This is very inefficient. It would be better to either do
+ * this while we encrypt, or maybe in the receive code, if we can peak
+ * ahead and work out the service and mechanism there. */
+ offset = buf->head[0].iov_len % 4;
+ if (offset) {
+ buf->buflen = RPCSVC_MAXPAYLOAD;
+ xdr_shift_buf(buf, offset);
+ fix_priv_head(buf, pad);
+ }
+ if (maj_stat != GSS_S_COMPLETE)
+ return -EINVAL;
+out_seq:
+ if (ntohl(svc_getu32(&buf->head[0])) != seq)
+ return -EINVAL;
+ return 0;
+}
+
struct gss_svc_data {
/* decoded gss client cred: */
struct rpc_gss_wire_cred clcred;
@@ -1051,7 +1115,14 @@ svcauth_gss_accept(struct svc_rqst *rqstp, u32 *authp)
svc_putu32(resv, 0);
break;
case RPC_GSS_SVC_PRIVACY:
- /* currently unsupported */
+ if (unwrap_priv_data(rqstp, &rqstp->rq_arg,
+ gc->gc_seq, rsci->mechctx))
+ goto auth_err;
+ /* placeholders for length and seq. number: */
+ svcdata->body_start = resv->iov_base + resv->iov_len;
+ svc_putu32(resv, 0);
+ svc_putu32(resv, 0);
+ break;
default:
goto auth_err;
}
@@ -1076,8 +1147,8 @@ out:
return ret;
}
-static int
-svcauth_gss_release(struct svc_rqst *rqstp)
+static inline int
+svcauth_gss_wrap_resp_integ(struct svc_rqst *rqstp)
{
struct gss_svc_data *gsd = (struct gss_svc_data *)rqstp->rq_auth_data;
struct rpc_gss_wire_cred *gc = &gsd->clcred;
@@ -1089,69 +1160,147 @@ svcauth_gss_release(struct svc_rqst *rqstp)
int integ_offset, integ_len;
int stat = -EINVAL;
+ p = gsd->body_start;
+ gsd->body_start = NULL;
+ /* move accept_stat to right place: */
+ memcpy(p, p + 2, 4);
+ /* Don't wrap in failure case: */
+ /* Counting on not getting here if call was not even accepted! */
+ if (*p != rpc_success) {
+ resbuf->head[0].iov_len -= 2 * 4;
+ goto out;
+ }
+ p++;
+ integ_offset = (u8 *)(p + 1) - (u8 *)resbuf->head[0].iov_base;
+ integ_len = resbuf->len - integ_offset;
+ BUG_ON(integ_len % 4);
+ *p++ = htonl(integ_len);
+ *p++ = htonl(gc->gc_seq);
+ if (xdr_buf_subsegment(resbuf, &integ_buf, integ_offset,
+ integ_len))
+ BUG();
+ if (resbuf->page_len == 0
+ && resbuf->head[0].iov_len + RPC_MAX_AUTH_SIZE
+ < PAGE_SIZE) {
+ BUG_ON(resbuf->tail[0].iov_len);
+ /* Use head for everything */
+ resv = &resbuf->head[0];
+ } else if (resbuf->tail[0].iov_base == NULL) {
+ if (resbuf->head[0].iov_len + RPC_MAX_AUTH_SIZE > PAGE_SIZE)
+ goto out_err;
+ resbuf->tail[0].iov_base = resbuf->head[0].iov_base
+ + resbuf->head[0].iov_len;
+ resbuf->tail[0].iov_len = 0;
+ rqstp->rq_restailpage = 0;
+ resv = &resbuf->tail[0];
+ } else {
+ resv = &resbuf->tail[0];
+ }
+ mic.data = (u8 *)resv->iov_base + resv->iov_len + 4;
+ if (gss_get_mic(gsd->rsci->mechctx, &integ_buf, &mic))
+ goto out_err;
+ svc_putu32(resv, htonl(mic.len));
+ memset(mic.data + mic.len, 0,
+ round_up_to_quad(mic.len) - mic.len);
+ resv->iov_len += XDR_QUADLEN(mic.len) << 2;
+ /* not strictly required: */
+ resbuf->len += XDR_QUADLEN(mic.len) << 2;
+ BUG_ON(resv->iov_len > PAGE_SIZE);
+out:
+ stat = 0;
+out_err:
+ return stat;
+}
+
+static inline int
+svcauth_gss_wrap_resp_priv(struct svc_rqst *rqstp)
+{
+ struct gss_svc_data *gsd = (struct gss_svc_data *)rqstp->rq_auth_data;
+ struct rpc_gss_wire_cred *gc = &gsd->clcred;
+ struct xdr_buf *resbuf = &rqstp->rq_res;
+ struct page **inpages = NULL;
+ u32 *p;
+ int offset, *len;
+ int pad;
+
+ p = gsd->body_start;
+ gsd->body_start = NULL;
+ /* move accept_stat to right place: */
+ memcpy(p, p + 2, 4);
+ /* Don't wrap in failure case: */
+ /* Counting on not getting here if call was not even accepted! */
+ if (*p != rpc_success) {
+ resbuf->head[0].iov_len -= 2 * 4;
+ return 0;
+ }
+ p++;
+ len = p++;
+ offset = (u8 *)p - (u8 *)resbuf->head[0].iov_base;
+ *p++ = htonl(gc->gc_seq);
+ inpages = resbuf->pages;
+ /* XXX: Would be better to write some xdr helper functions for
+ * nfs{2,3,4}xdr.c that place the data right, instead of copying: */
+ if (resbuf->tail[0].iov_base && rqstp->rq_restailpage == 0) {
+ BUG_ON(resbuf->tail[0].iov_base >= resbuf->head[0].iov_base
+ + PAGE_SIZE);
+ BUG_ON(resbuf->tail[0].iov_base < resbuf->head[0].iov_base);
+ if (resbuf->tail[0].iov_len + resbuf->head[0].iov_len
+ + 2 * RPC_MAX_AUTH_SIZE > PAGE_SIZE)
+ return -ENOMEM;
+ memmove(resbuf->tail[0].iov_base + RPC_MAX_AUTH_SIZE,
+ resbuf->tail[0].iov_base,
+ resbuf->tail[0].iov_len);
+ resbuf->tail[0].iov_base += RPC_MAX_AUTH_SIZE;
+ }
+ if (resbuf->tail[0].iov_base == NULL) {
+ if (resbuf->head[0].iov_len + 2*RPC_MAX_AUTH_SIZE > PAGE_SIZE)
+ return -ENOMEM;
+ resbuf->tail[0].iov_base = resbuf->head[0].iov_base
+ + resbuf->head[0].iov_len + RPC_MAX_AUTH_SIZE;
+ resbuf->tail[0].iov_len = 0;
+ rqstp->rq_restailpage = 0;
+ }
+ if (gss_wrap(gsd->rsci->mechctx, offset, resbuf, inpages))
+ return -ENOMEM;
+ *len = htonl(resbuf->len - offset);
+ pad = 3 - ((resbuf->len - offset - 1)&3);
+ p = (u32 *)(resbuf->tail[0].iov_base + resbuf->tail[0].iov_len);
+ memset(p, 0, pad);
+ resbuf->tail[0].iov_len += pad;
+ resbuf->len += pad;
+ return 0;
+}
+
+static int
+svcauth_gss_release(struct svc_rqst *rqstp)
+{
+ struct gss_svc_data *gsd = (struct gss_svc_data *)rqstp->rq_auth_data;
+ struct rpc_gss_wire_cred *gc = &gsd->clcred;
+ struct xdr_buf *resbuf = &rqstp->rq_res;
+ int stat = -EINVAL;
+
if (gc->gc_proc != RPC_GSS_PROC_DATA)
goto out;
/* Release can be called twice, but we only wrap once. */
if (gsd->body_start == NULL)
goto out;
/* normally not set till svc_send, but we need it here: */
- resbuf->len = resbuf->head[0].iov_len
- + resbuf->page_len + resbuf->tail[0].iov_len;
+ /* XXX: what for? Do we mess it up the moment we call svc_putu32
+ * or whatever? */
+ resbuf->len = total_buf_len(resbuf);
switch (gc->gc_svc) {
case RPC_GSS_SVC_NONE:
break;
case RPC_GSS_SVC_INTEGRITY:
- p = gsd->body_start;
- gsd->body_start = NULL;
- /* move accept_stat to right place: */
- memcpy(p, p + 2, 4);
- /* don't wrap in failure case: */
- /* Note: counting on not getting here if call was not even
- * accepted! */
- if (*p != rpc_success) {
- resbuf->head[0].iov_len -= 2 * 4;
- goto out;
- }
- p++;
- integ_offset = (u8 *)(p + 1) - (u8 *)resbuf->head[0].iov_base;
- integ_len = resbuf->len - integ_offset;
- BUG_ON(integ_len % 4);
- *p++ = htonl(integ_len);
- *p++ = htonl(gc->gc_seq);
- if (xdr_buf_subsegment(resbuf, &integ_buf, integ_offset,
- integ_len))
- BUG();
- if (resbuf->page_len == 0
- && resbuf->head[0].iov_len + RPC_MAX_AUTH_SIZE
- < PAGE_SIZE) {
- BUG_ON(resbuf->tail[0].iov_len);
- /* Use head for everything */
- resv = &resbuf->head[0];
- } else if (resbuf->tail[0].iov_base == NULL) {
- if (resbuf->head[0].iov_len + RPC_MAX_AUTH_SIZE
- > PAGE_SIZE)
- goto out_err;
- resbuf->tail[0].iov_base =
- resbuf->head[0].iov_base
- + resbuf->head[0].iov_len;
- resbuf->tail[0].iov_len = 0;
- rqstp->rq_restailpage = 0;
- resv = &resbuf->tail[0];
- } else {
- resv = &resbuf->tail[0];
- }
- mic.data = (u8 *)resv->iov_base + resv->iov_len + 4;
- if (gss_get_mic(gsd->rsci->mechctx, &integ_buf, &mic))
+ stat = svcauth_gss_wrap_resp_integ(rqstp);
+ if (stat)
goto out_err;
- svc_putu32(resv, htonl(mic.len));
- memset(mic.data + mic.len, 0,
- round_up_to_quad(mic.len) - mic.len);
- resv->iov_len += XDR_QUADLEN(mic.len) << 2;
- /* not strictly required: */
- resbuf->len += XDR_QUADLEN(mic.len) << 2;
- BUG_ON(resv->iov_len > PAGE_SIZE);
break;
case RPC_GSS_SVC_PRIVACY:
+ stat = svcauth_gss_wrap_resp_priv(rqstp);
+ if (stat)
+ goto out_err;
+ break;
default:
goto out_err;
}
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
index b08419e1fc6..01ba60a4957 100644
--- a/net/sunrpc/svc.c
+++ b/net/sunrpc/svc.c
@@ -280,7 +280,10 @@ svc_process(struct svc_serv *serv, struct svc_rqst *rqstp)
rqstp->rq_res.page_base = 0;
rqstp->rq_res.page_len = 0;
rqstp->rq_res.buflen = PAGE_SIZE;
+ rqstp->rq_res.tail[0].iov_base = NULL;
rqstp->rq_res.tail[0].iov_len = 0;
+ /* Will be turned off only in gss privacy case: */
+ rqstp->rq_sendfile_ok = 1;
/* tcp needs a space for the record length... */
if (rqstp->rq_prot == IPPROTO_TCP)
svc_putu32(resv, 0);