diff options
author | Michal Nazarewicz <m.nazarewicz@samsung.com> | 2009-11-09 14:15:24 +0100 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@suse.de> | 2009-12-11 11:55:23 -0800 |
commit | 8ea864cffdfd327117d4b7829935974b3f47ff31 (patch) | |
tree | 8af069b335c60b52d6e48b81ad06badc4471be9a /drivers/usb/gadget | |
parent | d26a6aa08b9f12b44fb1ee65625e7480d3d5bb81 (diff) |
USB: g_mass_storage: most data moved to fsg_common
Most of the data from fsg_dev have been moved to fsg_common
structure. The fsg_dev structure holds only endpoint dependent
data. The fsg_common structure has a fsg pointer which points
to active fsg_dev structure -- endpoints are referenced via this
pointer.
This fixes the problem of several threads created when a single
instance of MSF is used in several USB configurations.
Signed-off-by: Michal Nazarewicz <m.nazarewicz@samsung.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
Diffstat (limited to 'drivers/usb/gadget')
-rw-r--r-- | drivers/usb/gadget/f_mass_storage.c | 1230 |
1 files changed, 648 insertions, 582 deletions
diff --git a/drivers/usb/gadget/f_mass_storage.c b/drivers/usb/gadget/f_mass_storage.c index 5eaf22db7fc..a6cec37768a 100644 --- a/drivers/usb/gadget/f_mass_storage.c +++ b/drivers/usb/gadget/f_mass_storage.c @@ -312,14 +312,26 @@ static const char fsg_string_interface[] = "Mass Storage"; /*-------------------------------------------------------------------------*/ +struct fsg_dev; + /* Data shared by all the FSG instances. */ struct fsg_common { struct usb_gadget *gadget; + struct fsg_dev *fsg; + struct fsg_dev *prev_fsg; /* filesem protects: backing files in use */ struct rw_semaphore filesem; + /* lock protects: state, all the req_busy's */ + spinlock_t lock; + + struct usb_ep *ep0; /* Copy of gadget->ep0 */ + struct usb_request *ep0req; /* Copy of cdev->req */ + unsigned int ep0_req_tag; + const char *ep0req_name; + struct fsg_buffhd *next_buffhd_to_fill; struct fsg_buffhd *next_buffhd_to_drain; struct fsg_buffhd buffhds[FSG_NUM_BUFFERS]; @@ -332,10 +344,28 @@ struct fsg_common { struct fsg_lun *luns; struct fsg_lun *curlun; + unsigned int bulk_out_maxpacket; + enum fsg_state state; /* For exception handling */ + unsigned int exception_req_tag; + + u8 config, new_config; + enum data_direction data_dir; + u32 data_size; + u32 data_size_from_cmnd; + u32 tag; + u32 residue; + u32 usb_amount_left; + unsigned int can_stall:1; unsigned int free_storage_on_release:1; + unsigned int phase_error:1; + unsigned int short_packet_received:1; + unsigned int bad_lun_okay:1; + unsigned int running:1; - const char *thread_name; + int thread_wakeup_needed; + struct completion thread_notifier; + struct task_struct *thread_task; /* Vendor (8 chars), product (16 chars), release (4 * hexadecimal digits) and NUL byte */ @@ -367,52 +397,32 @@ struct fsg_config { struct fsg_dev { struct usb_function function; - struct usb_composite_dev *cdev; struct usb_gadget *gadget; /* Copy of cdev->gadget */ struct fsg_common *common; u16 interface_number; - /* lock protects: state, all the req_busy's */ - spinlock_t lock; - - struct usb_ep *ep0; /* Copy of gadget->ep0 */ - struct usb_request *ep0req; /* Copy of cdev->req */ - unsigned int ep0_req_tag; - const char *ep0req_name; - - unsigned int bulk_out_maxpacket; - enum fsg_state state; /* For exception handling */ - unsigned int exception_req_tag; - - u8 config, new_config; - - unsigned int running:1; unsigned int bulk_in_enabled:1; unsigned int bulk_out_enabled:1; - unsigned int phase_error:1; - unsigned int short_packet_received:1; - unsigned int bad_lun_okay:1; - unsigned int can_stall:1; unsigned long atomic_bitflags; -#define REGISTERED 0 -#define IGNORE_BULK_OUT 1 +#define IGNORE_BULK_OUT 0 struct usb_ep *bulk_in; struct usb_ep *bulk_out; +}; - int thread_wakeup_needed; - struct completion thread_notifier; - struct task_struct *thread_task; - enum data_direction data_dir; - u32 data_size; - u32 data_size_from_cmnd; - u32 tag; - u32 residue; - u32 usb_amount_left; -}; +static inline int __fsg_is_set(struct fsg_common *common, + const char *func, unsigned line) +{ + if (common->fsg) + return 1; + ERROR(common, "common->fsg is NULL in %s at %u\n", func, line); + return 0; +} + +#define fsg_is_set(common) likely(__fsg_is_set(common, __func__, __LINE__)) static inline struct fsg_dev *fsg_from_func(struct usb_function *f) @@ -423,21 +433,21 @@ static inline struct fsg_dev *fsg_from_func(struct usb_function *f) typedef void (*fsg_routine_t)(struct fsg_dev *); -static int exception_in_progress(struct fsg_dev *fsg) +static int exception_in_progress(struct fsg_common *common) { - return (fsg->state > FSG_STATE_IDLE); + return common->state > FSG_STATE_IDLE; } /* Make bulk-out requests be divisible by the maxpacket size */ -static void set_bulk_out_req_length(struct fsg_dev *fsg, +static void set_bulk_out_req_length(struct fsg_common *common, struct fsg_buffhd *bh, unsigned int length) { unsigned int rem; bh->bulk_out_intended_length = length; - rem = length % fsg->bulk_out_maxpacket; + rem = length % common->bulk_out_maxpacket; if (rem > 0) - length += fsg->bulk_out_maxpacket - rem; + length += common->bulk_out_maxpacket - rem; bh->outreq->length = length; } @@ -463,47 +473,46 @@ static int fsg_set_halt(struct fsg_dev *fsg, struct usb_ep *ep) /* These routines may be called in process context or in_irq */ /* Caller must hold fsg->lock */ -static void wakeup_thread(struct fsg_dev *fsg) +static void wakeup_thread(struct fsg_common *common) { /* Tell the main thread that something has happened */ - fsg->thread_wakeup_needed = 1; - if (fsg->thread_task) - wake_up_process(fsg->thread_task); + common->thread_wakeup_needed = 1; + if (common->thread_task) + wake_up_process(common->thread_task); } -static void raise_exception(struct fsg_dev *fsg, enum fsg_state new_state) +static void raise_exception(struct fsg_common *common, enum fsg_state new_state) { unsigned long flags; /* Do nothing if a higher-priority exception is already in progress. * If a lower-or-equal priority exception is in progress, preempt it * and notify the main thread by sending it a signal. */ - spin_lock_irqsave(&fsg->lock, flags); - if (fsg->state <= new_state) { - fsg->exception_req_tag = fsg->ep0_req_tag; - fsg->state = new_state; - if (fsg->thread_task) + spin_lock_irqsave(&common->lock, flags); + if (common->state <= new_state) { + common->exception_req_tag = common->ep0_req_tag; + common->state = new_state; + if (common->thread_task) send_sig_info(SIGUSR1, SEND_SIG_FORCED, - fsg->thread_task); + common->thread_task); } - spin_unlock_irqrestore(&fsg->lock, flags); + spin_unlock_irqrestore(&common->lock, flags); } /*-------------------------------------------------------------------------*/ -static int ep0_queue(struct fsg_dev *fsg) +static int ep0_queue(struct fsg_common *common) { int rc; - rc = usb_ep_queue(fsg->ep0, fsg->ep0req, GFP_ATOMIC); - fsg->ep0->driver_data = fsg; + rc = usb_ep_queue(common->ep0, common->ep0req, GFP_ATOMIC); + common->ep0->driver_data = common; if (rc != 0 && rc != -ESHUTDOWN) { - /* We can't do much more than wait for a reset */ - WARNING(fsg, "error in submission: %s --> %d\n", - fsg->ep0->name, rc); + WARNING(common, "error in submission: %s --> %d\n", + common->ep0->name, rc); } return rc; } @@ -515,32 +524,32 @@ static int ep0_queue(struct fsg_dev *fsg) static void bulk_in_complete(struct usb_ep *ep, struct usb_request *req) { - struct fsg_dev *fsg = ep->driver_data; + struct fsg_common *common = ep->driver_data; struct fsg_buffhd *bh = req->context; if (req->status || req->actual != req->length) - DBG(fsg, "%s --> %d, %u/%u\n", __func__, + DBG(common, "%s --> %d, %u/%u\n", __func__, req->status, req->actual, req->length); if (req->status == -ECONNRESET) /* Request was cancelled */ usb_ep_fifo_flush(ep); /* Hold the lock while we update the request and buffer states */ smp_wmb(); - spin_lock(&fsg->lock); + spin_lock(&common->lock); bh->inreq_busy = 0; bh->state = BUF_STATE_EMPTY; - wakeup_thread(fsg); - spin_unlock(&fsg->lock); + wakeup_thread(common); + spin_unlock(&common->lock); } static void bulk_out_complete(struct usb_ep *ep, struct usb_request *req) { - struct fsg_dev *fsg = ep->driver_data; + struct fsg_common *common = ep->driver_data; struct fsg_buffhd *bh = req->context; - dump_msg(fsg, "bulk-out", req->buf, req->actual); + dump_msg(common, "bulk-out", req->buf, req->actual); if (req->status || req->actual != bh->bulk_out_intended_length) - DBG(fsg, "%s --> %d, %u/%u\n", __func__, + DBG(common, "%s --> %d, %u/%u\n", __func__, req->status, req->actual, bh->bulk_out_intended_length); if (req->status == -ECONNRESET) /* Request was cancelled */ @@ -548,11 +557,11 @@ static void bulk_out_complete(struct usb_ep *ep, struct usb_request *req) /* Hold the lock while we update the request and buffer states */ smp_wmb(); - spin_lock(&fsg->lock); + spin_lock(&common->lock); bh->outreq_busy = 0; bh->state = BUF_STATE_FULL; - wakeup_thread(fsg); - spin_unlock(&fsg->lock); + wakeup_thread(common); + spin_unlock(&common->lock); } @@ -564,12 +573,12 @@ static int fsg_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl) { struct fsg_dev *fsg = fsg_from_func(f); - struct usb_request *req = fsg->ep0req; + struct usb_request *req = fsg->common->ep0req; u16 w_index = le16_to_cpu(ctrl->wIndex); u16 w_value = le16_to_cpu(ctrl->wValue); u16 w_length = le16_to_cpu(ctrl->wLength); - if (!fsg->config) + if (!fsg->common->config) return -EOPNOTSUPP; switch (ctrl->bRequest) { @@ -584,7 +593,7 @@ static int fsg_setup(struct usb_function *f, /* Raise an exception to stop the current operation * and reinitialize our state. */ DBG(fsg, "bulk reset request\n"); - raise_exception(fsg, FSG_STATE_RESET); + raise_exception(fsg->common, FSG_STATE_RESET); return DELAYED_STATUS; case USB_BULK_GET_MAX_LUN_REQUEST: @@ -622,10 +631,10 @@ static void start_transfer(struct fsg_dev *fsg, struct usb_ep *ep, if (ep == fsg->bulk_in) dump_msg(fsg, "bulk-in", req->buf, req->length); - spin_lock_irq(&fsg->lock); + spin_lock_irq(&fsg->common->lock); *pbusy = 1; *state = BUF_STATE_BUSY; - spin_unlock_irq(&fsg->lock); + spin_unlock_irq(&fsg->common->lock); rc = usb_ep_queue(ep, req, GFP_KERNEL); if (rc != 0) { *pbusy = 0; @@ -642,8 +651,18 @@ static void start_transfer(struct fsg_dev *fsg, struct usb_ep *ep, } } +#define START_TRANSFER_OR(common, ep_name, req, pbusy, state) \ + if (fsg_is_set(common)) \ + start_transfer((common)->fsg, (common)->fsg->ep_name, \ + req, pbusy, state); \ + else + +#define START_TRANSFER(common, ep_name, req, pbusy, state) \ + START_TRANSFER_OR(common, ep_name, req, pbusy, state) (void)0 + + -static int sleep_thread(struct fsg_dev *fsg) +static int sleep_thread(struct fsg_common *common) { int rc = 0; @@ -655,21 +674,21 @@ static int sleep_thread(struct fsg_dev *fsg) rc = -EINTR; break; } - if (fsg->thread_wakeup_needed) + if (common->thread_wakeup_needed) break; schedule(); } __set_current_state(TASK_RUNNING); - fsg->thread_wakeup_needed = 0; + common->thread_wakeup_needed = 0; return rc; } /*-------------------------------------------------------------------------*/ -static int do_read(struct fsg_dev *fsg) +static int do_read(struct fsg_common *common) { - struct fsg_lun *curlun = fsg->common->curlun; + struct fsg_lun *curlun = common->curlun; u32 lba; struct fsg_buffhd *bh; int rc; @@ -681,15 +700,15 @@ static int do_read(struct fsg_dev *fsg) /* Get the starting Logical Block Address and check that it's * not too big */ - if (fsg->common->cmnd[0] == SC_READ_6) - lba = get_unaligned_be24(&fsg->common->cmnd[1]); + if (common->cmnd[0] == SC_READ_6) + lba = get_unaligned_be24(&common->cmnd[1]); else { - lba = get_unaligned_be32(&fsg->common->cmnd[2]); + lba = get_unaligned_be32(&common->cmnd[2]); /* We allow DPO (Disable Page Out = don't save data in the * cache) and FUA (Force Unit Access = don't read from the * cache), but we don't implement them. */ - if ((fsg->common->cmnd[1] & ~0x18) != 0) { + if ((common->cmnd[1] & ~0x18) != 0) { curlun->sense_data = SS_INVALID_FIELD_IN_CDB; return -EINVAL; } @@ -701,7 +720,7 @@ static int do_read(struct fsg_dev *fsg) file_offset = ((loff_t) lba) << 9; /* Carry out the file reads */ - amount_left = fsg->data_size_from_cmnd; + amount_left = common->data_size_from_cmnd; if (unlikely(amount_left == 0)) return -EIO; /* No default reply */ @@ -724,9 +743,9 @@ static int do_read(struct fsg_dev *fsg) partial_page); /* Wait for the next buffer to become available */ - bh = fsg->common->next_buffhd_to_fill; + bh = common->next_buffhd_to_fill; while (bh->state != BUF_STATE_EMPTY) { - rc = sleep_thread(fsg); + rc = sleep_thread(common); if (rc) return rc; } @@ -765,7 +784,7 @@ static int do_read(struct fsg_dev *fsg) } file_offset += nread; amount_left -= nread; - fsg->residue -= nread; + common->residue -= nread; bh->inreq->length = nread; bh->state = BUF_STATE_FULL; @@ -782,9 +801,12 @@ static int do_read(struct fsg_dev *fsg) /* Send this buffer and go read some more */ bh->inreq->zero = 0; - start_transfer(fsg, fsg->bulk_in, bh->inreq, - &bh->inreq_busy, &bh->state); - fsg->common->next_buffhd_to_fill = bh->next; + START_TRANSFER_OR(common, bulk_in, bh->inreq, + &bh->inreq_busy, &bh->state) + /* Don't know what to do if + * common->fsg is NULL */ + return -EIO; + common->next_buffhd_to_fill = bh->next; } return -EIO; /* No default reply */ @@ -793,9 +815,9 @@ static int do_read(struct fsg_dev *fsg) /*-------------------------------------------------------------------------*/ -static int do_write(struct fsg_dev *fsg) +static int do_write(struct fsg_common *common) { - struct fsg_lun *curlun = fsg->common->curlun; + struct fsg_lun *curlun = common->curlun; u32 lba; struct fsg_buffhd *bh; int get_some_more; @@ -816,20 +838,20 @@ static int do_write(struct fsg_dev *fsg) /* Get the starting Logical Block Address and check that it's * not too big */ - if (fsg->common->cmnd[0] == SC_WRITE_6) - lba = get_unaligned_be24(&fsg->common->cmnd[1]); + if (common->cmnd[0] == SC_WRITE_6) + lba = get_unaligned_be24(&common->cmnd[1]); else { - lba = get_unaligned_be32(&fsg->common->cmnd[2]); + lba = get_unaligned_be32(&common->cmnd[2]); /* We allow DPO (Disable Page Out = don't save data in the * cache) and FUA (Force Unit Access = write directly to the * medium). We don't implement DPO; we implement FUA by * performing synchronous output. */ - if ((fsg->common->cmnd[1] & ~0x18) != 0) { + if (common->cmnd[1] & ~0x18) { curlun->sense_data = SS_INVALID_FIELD_IN_CDB; return -EINVAL; } - if (fsg->common->cmnd[1] & 0x08) { /* FUA */ + if (common->cmnd[1] & 0x08) { /* FUA */ spin_lock(&curlun->filp->f_lock); curlun->filp->f_flags |= O_SYNC; spin_unlock(&curlun->filp->f_lock); @@ -843,12 +865,13 @@ static int do_write(struct fsg_dev *fsg) /* Carry out the file writes */ get_some_more = 1; file_offset = usb_offset = ((loff_t) lba) << 9; - amount_left_to_req = amount_left_to_write = fsg->data_size_from_cmnd; + amount_left_to_req = common->data_size_from_cmnd; + amount_left_to_write = common->data_size_from_cmnd; while (amount_left_to_write > 0) { /* Queue a request for more data from the host */ - bh = fsg->common->next_buffhd_to_fill; + bh = common->next_buffhd_to_fill; if (bh->state == BUF_STATE_EMPTY && get_some_more) { /* Figure out how much we want to get: @@ -887,7 +910,7 @@ static int do_write(struct fsg_dev *fsg) /* Get the next buffer */ usb_offset += amount; - fsg->usb_amount_left -= amount; + common->usb_amount_left -= amount; amount_left_to_req -= amount; if (amount_left_to_req == 0) get_some_more = 0; @@ -897,19 +920,22 @@ static int do_write(struct fsg_dev *fsg) bh->outreq->length = amount; bh->bulk_out_intended_length = amount; bh->outreq->short_not_ok = 1; - start_transfer(fsg, fsg->bulk_out, bh->outreq, - &bh->outreq_busy, &bh->state); - fsg->common->next_buffhd_to_fill = bh->next; + START_TRANSFER_OR(common, bulk_out, bh->outreq, + &bh->outreq_busy, &bh->state) + /* Don't know what to do if + * common->fsg is NULL */ + return -EIO; + common->next_buffhd_to_fill = bh->next; continue; } /* Write the received data to the backing file */ - bh = fsg->common->next_buffhd_to_drain; + bh = common->next_buffhd_to_drain; if (bh->state == BUF_STATE_EMPTY && !get_some_more) break; /* We stopped early */ if (bh->state == BUF_STATE_FULL) { smp_rmb(); - fsg->common->next_buffhd_to_drain = bh->next; + common->next_buffhd_to_drain = bh->next; bh->state = BUF_STATE_EMPTY; /* Did something go wrong with the transfer? */ @@ -952,7 +978,7 @@ static int do_write(struct fsg_dev *fsg) } file_offset += nwritten; amount_left_to_write -= nwritten; - fsg->residue -= nwritten; + common->residue -= nwritten; /* If an error occurred, report it and its position */ if (nwritten < amount) { @@ -964,14 +990,14 @@ static int do_write(struct fsg_dev *fsg) /* Did the host decide to stop early? */ if (bh->outreq->actual != bh->outreq->length) { - fsg->short_packet_received = 1; + common->short_packet_received = 1; break; } continue; } /* Wait for something to happen */ - rc = sleep_thread(fsg); + rc = sleep_thread(common); if (rc) return rc; } @@ -982,9 +1008,9 @@ static int do_write(struct fsg_dev *fsg) /*-------------------------------------------------------------------------*/ -static int do_synchronize_cache(struct fsg_dev *fsg) +static int do_synchronize_cache(struct fsg_common *common) { - struct fsg_lun *curlun = fsg->common->curlun; + struct fsg_lun *curlun = common->curlun; int rc; /* We ignore the requested LBA and write out all file's @@ -1008,12 +1034,12 @@ static void invalidate_sub(struct fsg_lun *curlun) VLDBG(curlun, "invalidate_inode_pages -> %ld\n", rc); } -static int do_verify(struct fsg_dev *fsg) +static int do_verify(struct fsg_common *common) { - struct fsg_lun *curlun = fsg->common->curlun; + struct fsg_lun *curlun = common->curlun; u32 lba; u32 verification_length; - struct fsg_buffhd *bh = fsg->common->next_buffhd_to_fill; + struct fsg_buffhd *bh = common->next_buffhd_to_fill; loff_t file_offset, file_offset_tmp; u32 amount_left; unsigned int amount; @@ -1021,7 +1047,7 @@ static int do_verify(struct fsg_dev *fsg) /* Get the starting Logical Block Address and check that it's * not too big */ - lba = get_unaligned_be32(&fsg->common->cmnd[2]); + lba = get_unaligned_be32(&common->cmnd[2]); if (lba >= curlun->num_sectors) { curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE; return -EINVAL; @@ -1029,12 +1055,12 @@ static int do_verify(struct fsg_dev *fsg) /* We allow DPO (Disable Page Out = don't save data in the * cache) but we don't implement it. */ - if ((fsg->common->cmnd[1] & ~0x10) != 0) { + if (common->cmnd[1] & ~0x10) { curlun->sense_data = SS_INVALID_FIELD_IN_CDB; return -EINVAL; } - verification_length = get_unaligned_be16(&fsg->common->cmnd[7]); + verification_length = get_unaligned_be16(&common->cmnd[7]); if (unlikely(verification_length == 0)) return -EIO; /* No default reply */ @@ -1106,13 +1132,13 @@ static int do_verify(struct fsg_dev *fsg) /*-------------------------------------------------------------------------*/ -static int do_inquiry(struct fsg_dev *fsg, struct fsg_buffhd *bh) +static int do_inquiry(struct fsg_common *common, struct fsg_buffhd *bh) { - struct fsg_lun *curlun = fsg->common->curlun; + struct fsg_lun *curlun = common->curlun; u8 *buf = (u8 *) bh->buf; if (!curlun) { /* Unsupported LUNs are okay */ - fsg->bad_lun_okay = 1; + common->bad_lun_okay = 1; memset(buf, 0, 36); buf[0] = 0x7f; /* Unsupported, no device-type */ buf[4] = 31; /* Additional length */ @@ -1127,15 +1153,14 @@ static int do_inquiry(struct fsg_dev *fsg, struct fsg_buffhd *bh) buf[5] = 0; /* No special options */ buf[6] = 0; buf[7] = 0; - memcpy(buf + 8, fsg->common->inquiry_string, - sizeof fsg->common->inquiry_string); + memcpy(buf + 8, common->inquiry_string, sizeof common->inquiry_string); return 36; } -static int do_request_sense(struct fsg_dev *fsg, struct fsg_buffhd *bh) +static int do_request_sense(struct fsg_common *common, struct fsg_buffhd *bh) { - struct fsg_lun *curlun = fsg->common->curlun; + struct fsg_lun *curlun = common->curlun; u8 *buf = (u8 *) bh->buf; u32 sd, sdinfo; int valid; @@ -1163,7 +1188,7 @@ static int do_request_sense(struct fsg_dev *fsg, struct fsg_buffhd *bh) #endif if (!curlun) { /* Unsupported LUNs are okay */ - fsg->bad_lun_okay = 1; + common->bad_lun_okay = 1; sd = SS_LOGICAL_UNIT_NOT_SUPPORTED; sdinfo = 0; valid = 0; @@ -1187,11 +1212,11 @@ static int do_request_sense(struct fsg_dev *fsg, struct fsg_buffhd *bh) } -static int do_read_capacity(struct fsg_dev *fsg, struct fsg_buffhd *bh) +static int do_read_capacity(struct fsg_common *common, struct fsg_buffhd *bh) { - struct fsg_lun *curlun = fsg->common->curlun; - u32 lba = get_unaligned_be32(&fsg->common->cmnd[2]); - int pmi = fsg->common->cmnd[8]; + struct fsg_lun *curlun = common->curlun; + u32 lba = get_unaligned_be32(&common->cmnd[2]); + int pmi = common->cmnd[8]; u8 *buf = (u8 *) bh->buf; /* Check the PMI and LBA fields */ @@ -1207,14 +1232,14 @@ static int do_read_capacity(struct fsg_dev *fsg, struct fsg_buffhd *bh) } -static int do_read_header(struct fsg_dev *fsg, struct fsg_buffhd *bh) +static int do_read_header(struct fsg_common *common, struct fsg_buffhd *bh) { - struct fsg_lun *curlun = fsg->common->curlun; - int msf = fsg->common->cmnd[1] & 0x02; - u32 lba = get_unaligned_be32(&fsg->common->cmnd[2]); + struct fsg_lun *curlun = common->curlun; + int msf = common->cmnd[1] & 0x02; + u32 lba = get_unaligned_be32(&common->cmnd[2]); u8 *buf = (u8 *) bh->buf; - if ((fsg->common->cmnd[1] & ~0x02) != 0) { /* Mask away MSF */ + if (common->cmnd[1] & ~0x02) { /* Mask away MSF */ curlun->sense_data = SS_INVALID_FIELD_IN_CDB; return -EINVAL; } @@ -1230,14 +1255,14 @@ static int do_read_header(struct fsg_dev *fsg, struct fsg_buffhd *bh) } -static int do_read_toc(struct fsg_dev *fsg, struct fsg_buffhd *bh) +static int do_read_toc(struct fsg_common *common, struct fsg_buffhd *bh) { - struct fsg_lun *curlun = fsg->common->curlun; - int msf = fsg->common->cmnd[1] & 0x02; - int start_track = fsg->common->cmnd[6]; + struct fsg_lun *curlun = common->curlun; + int msf = common->cmnd[1] & 0x02; + int start_track = common->cmnd[6]; u8 *buf = (u8 *) bh->buf; - if ((fsg->common->cmnd[1] & ~0x02) != 0 || /* Mask away MSF */ + if ((common->cmnd[1] & ~0x02) != 0 || /* Mask away MSF */ start_track > 1) { curlun->sense_data = SS_INVALID_FIELD_IN_CDB; return -EINVAL; @@ -1258,10 +1283,10 @@ static int do_read_toc(struct fsg_dev *fsg, struct fsg_buffhd *bh) } -static int do_mode_sense(struct fsg_dev *fsg, struct fsg_buffhd *bh) +static int do_mode_sense(struct fsg_common *common, struct fsg_buffhd *bh) { - struct fsg_lun *curlun = fsg->common->curlun; - int mscmnd = fsg->common->cmnd[0]; + struct fsg_lun *curlun = common->curlun; + int mscmnd = common->cmnd[0]; u8 *buf = (u8 *) bh->buf; u8 *buf0 = buf; int pc, page_code; @@ -1269,12 +1294,12 @@ static int do_mode_sense(struct fsg_dev *fsg, struct fsg_buffhd *bh) int valid_page = 0; int len, limit; - if ((fsg->common->cmnd[1] & ~0x08) != 0) { /* Mask away DBD */ + if ((common->cmnd[1] & ~0x08) != 0) { /* Mask away DBD */ curlun->sense_data = SS_INVALID_FIELD_IN_CDB; return -EINVAL; } - pc = fsg->common->cmnd[2] >> 6; - page_code = fsg->common->cmnd[2] & 0x3f; + pc = common->cmnd[2] >> 6; + page_code = common->cmnd[2] & 0x3f; if (pc == 3) { curlun->sense_data = SS_SAVING_PARAMETERS_NOT_SUPPORTED; return -EINVAL; @@ -1339,32 +1364,32 @@ static int do_mode_sense(struct fsg_dev *fsg, struct fsg_buffhd *bh) } -static int do_start_stop(struct fsg_dev *fsg) +static int do_start_stop(struct fsg_common *common) { - if (!fsg->common->curlun) { + if (!common->curlun) { return -EINVAL; - } else if (!fsg->common->curlun->removable) { - fsg->common->curlun->sense_data = SS_INVALID_COMMAND; + } else if (!common->curlun->removable) { + common->curlun->sense_data = SS_INVALID_COMMAND; return -EINVAL; } return 0; } -static int do_prevent_allow(struct fsg_dev *fsg) +static int do_prevent_allow(struct fsg_common *common) { - struct fsg_lun *curlun = fsg->common->curlun; + struct fsg_lun *curlun = common->curlun; int prevent; - if (!fsg->common->curlun) { + if (!common->curlun) { return -EINVAL; - } else if (!fsg->common->curlun->removable) { - fsg->common->curlun->sense_data = SS_INVALID_COMMAND; + } else if (!common->curlun->removable) { + common->curlun->sense_data = SS_INVALID_COMMAND; return -EINVAL; } - prevent = fsg->common->cmnd[4] & 0x01; - if ((fsg->common->cmnd[4] & ~0x01) != 0) { /* Mask away Prevent */ + prevent = common->cmnd[4] & 0x01; + if ((common->cmnd[4] & ~0x01) != 0) { /* Mask away Prevent */ curlun->sense_data = SS_INVALID_FIELD_IN_CDB; return -EINVAL; } @@ -1376,10 +1401,10 @@ static int do_prevent_allow(struct fsg_dev *fsg) } -static int do_read_format_capacities(struct fsg_dev *fsg, +static int do_read_format_capacities(struct fsg_common *common, struct fsg_buffhd *bh) { - struct fsg_lun *curlun = fsg->common->curlun; + struct fsg_lun *curlun = common->curlun; u8 *buf = (u8 *) bh->buf; buf[0] = buf[1] = buf[2] = 0; @@ -1394,12 +1419,13 @@ static int do_read_format_capacities(struct fsg_dev *fsg, } -static int do_mode_select(struct fsg_dev *fsg, struct fsg_buffhd *bh) +static int do_mode_select(struct fsg_common *common, struct fsg_buffhd *bh) { - struct fsg_lun *curlun = fsg->common->curlun; + struct fsg_lun *curlun = common->curlun; /* We don't support MODE SELECT */ - curlun->sense_data = SS_INVALID_COMMAND; + if (curlun) + curlun->sense_data = SS_INVALID_COMMAND; return -EINVAL; } @@ -1459,73 +1485,78 @@ static int pad_with_zeros(struct fsg_dev *fsg) int rc; bh->state = BUF_STATE_EMPTY; /* For the first iteration */ - fsg->usb_amount_left = nkeep + fsg->residue; - while (fsg->usb_amount_left > 0) { + fsg->common->usb_amount_left = nkeep + fsg->common->residue; + while (fsg->common->usb_amount_left > 0) { /* Wait for the next buffer to be free */ while (bh->state != BUF_STATE_EMPTY) { - rc = sleep_thread(fsg); + rc = sleep_thread(fsg->common); if (rc) return rc; } - nsend = min(fsg->usb_amount_left, FSG_BUFLEN); + nsend = min(fsg->common->usb_amount_left, FSG_BUFLEN); memset(bh->buf + nkeep, 0, nsend - nkeep); bh->inreq->length = nsend; bh->inreq->zero = 0; start_transfer(fsg, fsg->bulk_in, bh->inreq, &bh->inreq_busy, &bh->state); bh = fsg->common->next_buffhd_to_fill = bh->next; - fsg->usb_amount_left -= nsend; + fsg->common->usb_amount_left -= nsend; nkeep = 0; } return 0; } -static int throw_away_data(struct fsg_dev *fsg) +static int throw_away_data(struct fsg_common *common) { struct fsg_buffhd *bh; u32 amount; int rc; - for (bh = fsg->common->next_buffhd_to_drain; - bh->state != BUF_STATE_EMPTY || fsg->usb_amount_left > 0; - bh = fsg->common->next_buffhd_to_drain) { + for (bh = common->next_buffhd_to_drain; + bh->state != BUF_STATE_EMPTY || common->usb_amount_left > 0; + bh = common->next_buffhd_to_drain) { /* Throw away the data in a filled buffer */ if (bh->state == BUF_STATE_FULL) { smp_rmb(); bh->state = BUF_STATE_EMPTY; - fsg->common->next_buffhd_to_drain = bh->next; + common->next_buffhd_to_drain = bh->next; /* A short packet or an error ends everything */ if (bh->outreq->actual != bh->outreq->length || bh->outreq->status != 0) { - raise_exception(fsg, FSG_STATE_ABORT_BULK_OUT); + raise_exception(common, + FSG_STATE_ABORT_BULK_OUT); return -EINTR; } continue; } /* Try to submit another request if we need one */ - bh = fsg->common->next_buffhd_to_fill; - if (bh->state == BUF_STATE_EMPTY && fsg->usb_amount_left > 0) { - amount = min(fsg->usb_amount_left, FSG_BUFLEN); + bh = common->next_buffhd_to_fill; + if (bh->state == BUF_STATE_EMPTY + && common->usb_amount_left > 0) { + amount = min(common->usb_amount_left, FSG_BUFLEN); /* amount is always divisible by 512, hence by * the bulk-out maxpacket size */ bh->outreq->length = amount; bh->bulk_out_intended_length = amount; bh->outreq->short_not_ok = 1; - start_transfer(fsg, fsg->bulk_out, bh->outreq, - &bh->outreq_busy, &bh->state); - fsg->common->next_buffhd_to_fill = bh->next; - fsg->usb_amount_left -= amount; + START_TRANSFER_OR(common, bulk_out, bh->outreq, + &bh->outreq_busy, &bh->state) + /* Don't know what to do if + * common->fsg is NULL */ + return -EIO; + common->next_buffhd_to_fill = bh->next; + common->usb_amount_left -= amount; continue; } /* Otherwise wait for something to happen */ - rc = sleep_thread(fsg); + rc = sleep_thread(common); if (rc) return rc; } @@ -1533,12 +1564,12 @@ static int throw_away_data(struct fsg_dev *fsg) } -static int finish_reply(struct fsg_dev *fsg) +static int finish_reply(struct fsg_common *common) { - struct fsg_buffhd *bh = fsg->common->next_buffhd_to_fill; + struct fsg_buffhd *bh = common->next_buffhd_to_fill; int rc = 0; - switch (fsg->data_dir) { + switch (common->data_dir) { case DATA_DIR_NONE: break; /* Nothing to send */ @@ -1547,47 +1578,60 @@ static int finish_reply(struct fsg_dev *fsg) * try to send or receive any data. So stall both bulk pipes * if we can and wait for a reset. */ case DATA_DIR_UNKNOWN: - if (fsg->can_stall) { - fsg_set_halt(fsg, fsg->bulk_out); - rc = halt_bulk_in_endpoint(fsg); + if (!common->can_stall) { + /* Nothing */ + } else if (fsg_is_set(common)) { + fsg_set_halt(common->fsg, common->fsg->bulk_out); + rc = halt_bulk_in_endpoint(common->fsg); + } else { + /* Don't know what to do if common->fsg is NULL */ + rc = -EIO; } break; /* All but the last buffer of data must have already been sent */ case DATA_DIR_TO_HOST: - if (fsg->data_size == 0) { + if (common->data_size == 0) { /* Nothing to send */ /* If there's no residue, simply send the last buffer */ - } else if (fsg->residue == 0) { + } else if (common->residue == 0) { bh->inreq->zero = 0; - start_transfer(fsg, fsg->bulk_in, bh->inreq, - &bh->inreq_busy, &bh->state); - fsg->common->next_buffhd_to_fill = bh->next; + START_TRANSFER_OR(common, bulk_in, bh->inreq, + &bh->inreq_busy, &bh->state) + return -EIO; + common->next_buffhd_to_fill = bh->next; /* For Bulk-only, if we're allowed to stall then send the * short packet and halt the bulk-in endpoint. If we can't * stall, pad out the remaining data with 0's. */ - } else if (fsg->can_stall) { + } else if (common->can_stall) { bh->inreq->zero = 1; - start_transfer(fsg, fsg->bulk_in, bh->inreq, - &bh->inreq_busy, &bh->state); - fsg->common->next_buffhd_to_fill = bh->next; - rc = halt_bulk_in_endpoint(fsg); + START_TRANSFER_OR(common, bulk_in, bh->inreq, + &bh->inreq_busy, &bh->state) + /* Don't know what to do if + * common->fsg is NULL */ + rc = -EIO; + common->next_buffhd_to_fill = bh->next; + if (common->fsg) + rc = halt_bulk_in_endpoint(common->fsg); + } else if (fsg_is_set(common)) { + rc = pad_with_zeros(common->fsg); } else { - rc = pad_with_zeros(fsg); + /* Don't know what to do if common->fsg is NULL */ + rc = -EIO; } break; /* We have processed all we want from the data the host has sent. * There may still be outstanding bulk-out requests. */ case DATA_DIR_FROM_HOST: - if (fsg->residue == 0) { + if (common->residue == 0) { /* Nothing to receive */ /* Did the host stop sending unexpectedly early? */ - } else if (fsg->short_packet_received) { - raise_exception(fsg, FSG_STATE_ABORT_BULK_OUT); + } else if (common->short_packet_received) { + raise_exception(common, FSG_STATE_ABORT_BULK_OUT); rc = -EINTR; /* We haven't processed all the incoming data. Even though @@ -1597,16 +1641,18 @@ static int finish_reply(struct fsg_dev *fsg) * STALL. Not realizing the endpoint was halted, it wouldn't * clear the halt -- leading to problems later on. */ #if 0 - } else if (fsg->can_stall) { - fsg_set_halt(fsg, fsg->bulk_out); - raise_exception(fsg, FSG_STATE_ABORT_BULK_OUT); + } else if (common->can_stall) { + if (fsg_is_set(common)) + fsg_set_halt(common->fsg, + common->fsg->bulk_out); + raise_exception(common, FSG_STATE_ABORT_BULK_OUT); rc = -EINTR; #endif /* We can't stall. Read in the excess data and throw it * all away. */ } else { - rc = throw_away_data(fsg); + rc = throw_away_data(common); } break; } @@ -1614,9 +1660,9 @@ static int finish_reply(struct fsg_dev *fsg) } -static int send_status(struct fsg_dev *fsg) +static int send_status(struct fsg_common *common) { - struct fsg_lun *curlun = fsg->common->curlun; + struct fsg_lun *curlun = common->curlun; struct fsg_buffhd *bh; struct bulk_cs_wrap *csw; int rc; @@ -1624,9 +1670,9 @@ static int send_status(struct fsg_dev *fsg) u32 sd, sdinfo = 0; /* Wait for the next buffer to become available */ - bh = fsg->common->next_buffhd_to_fill; + bh = common->next_buffhd_to_fill; while (bh->state != BUF_STATE_EMPTY) { - rc = sleep_thread(fsg); + rc = sleep_thread(common); if (rc) ret |