diff options
Diffstat (limited to 'drivers')
100 files changed, 3857 insertions, 1124 deletions
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c index b11943dadef..681c15f4208 100644 --- a/drivers/crypto/talitos.c +++ b/drivers/crypto/talitos.c @@ -99,6 +99,9 @@ struct talitos_private { /* next channel to be assigned next incoming descriptor */ atomic_t last_chan; + /* per-channel number of requests pending in channel h/w fifo */ + atomic_t *submit_count; + /* per-channel request fifo */ struct talitos_request **fifo; @@ -263,15 +266,15 @@ static int talitos_submit(struct device *dev, struct talitos_desc *desc, spin_lock_irqsave(&priv->head_lock[ch], flags); - head = priv->head[ch]; - request = &priv->fifo[ch][head]; - - if (request->desc) { - /* request queue is full */ + if (!atomic_inc_not_zero(&priv->submit_count[ch])) { + /* h/w fifo is full */ spin_unlock_irqrestore(&priv->head_lock[ch], flags); return -EAGAIN; } + head = priv->head[ch]; + request = &priv->fifo[ch][head]; + /* map descriptor and save caller data */ request->dma_desc = dma_map_single(dev, desc, sizeof(*desc), DMA_BIDIRECTIONAL); @@ -335,6 +338,9 @@ static void flush_channel(struct device *dev, int ch, int error, int reset_ch) priv->tail[ch] = (tail + 1) & (priv->fifo_len - 1); spin_unlock_irqrestore(&priv->tail_lock[ch], flags); + + atomic_dec(&priv->submit_count[ch]); + saved_req.callback(dev, saved_req.desc, saved_req.context, status); /* channel may resume processing in single desc error case */ @@ -842,7 +848,7 @@ static int sg_to_link_tbl(struct scatterlist *sg, int sg_count, /* adjust (decrease) last one (or two) entry's len to cryptlen */ link_tbl_ptr--; - while (link_tbl_ptr->len <= (-cryptlen)) { + while (be16_to_cpu(link_tbl_ptr->len) <= (-cryptlen)) { /* Empty this entry, and move to previous one */ cryptlen += be16_to_cpu(link_tbl_ptr->len); link_tbl_ptr->len = 0; @@ -874,7 +880,7 @@ static int ipsec_esp(struct ipsec_esp_edesc *edesc, struct aead_request *areq, unsigned int cryptlen = areq->cryptlen; unsigned int authsize = ctx->authsize; unsigned int ivsize; - int sg_count; + int sg_count, ret; /* hmac key */ map_single_talitos_ptr(dev, &desc->ptr[0], ctx->authkeylen, &ctx->key, @@ -978,7 +984,12 @@ static int ipsec_esp(struct ipsec_esp_edesc *edesc, struct aead_request *areq, map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv, 0, DMA_FROM_DEVICE); - return talitos_submit(dev, desc, callback, areq); + ret = talitos_submit(dev, desc, callback, areq); + if (ret != -EINPROGRESS) { + ipsec_esp_unmap(dev, edesc, areq); + kfree(edesc); + } + return ret; } @@ -1009,6 +1020,8 @@ static struct ipsec_esp_edesc *ipsec_esp_edesc_alloc(struct aead_request *areq, struct talitos_ctx *ctx = crypto_aead_ctx(authenc); struct ipsec_esp_edesc *edesc; int src_nents, dst_nents, alloc_len, dma_len; + gfp_t flags = areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : + GFP_ATOMIC; if (areq->cryptlen + ctx->authsize > TALITOS_MAX_DATA_LEN) { dev_err(ctx->dev, "cryptlen exceeds h/w max limit\n"); @@ -1022,7 +1035,7 @@ static struct ipsec_esp_edesc *ipsec_esp_edesc_alloc(struct aead_request *areq, dst_nents = src_nents; } else { dst_nents = sg_count(areq->dst, areq->cryptlen + ctx->authsize); - dst_nents = (dst_nents == 1) ? 0 : src_nents; + dst_nents = (dst_nents == 1) ? 0 : dst_nents; } /* @@ -1040,7 +1053,7 @@ static struct ipsec_esp_edesc *ipsec_esp_edesc_alloc(struct aead_request *areq, alloc_len += icv_stashing ? ctx->authsize : 0; } - edesc = kmalloc(alloc_len, GFP_DMA); + edesc = kmalloc(alloc_len, GFP_DMA | flags); if (!edesc) { dev_err(ctx->dev, "could not allocate edescriptor\n"); return ERR_PTR(-ENOMEM); @@ -1337,6 +1350,7 @@ static int __devexit talitos_remove(struct of_device *ofdev) if (hw_supports(dev, DESC_HDR_SEL0_RNG)) talitos_unregister_rng(dev); + kfree(priv->submit_count); kfree(priv->tail); kfree(priv->head); @@ -1466,9 +1480,6 @@ static int talitos_probe(struct of_device *ofdev, goto err_out; } - of_node_put(np); - np = NULL; - priv->head_lock = kmalloc(sizeof(spinlock_t) * priv->num_channels, GFP_KERNEL); priv->tail_lock = kmalloc(sizeof(spinlock_t) * priv->num_channels, @@ -1504,6 +1515,16 @@ static int talitos_probe(struct of_device *ofdev, } } + priv->submit_count = kmalloc(sizeof(atomic_t) * priv->num_channels, + GFP_KERNEL); + if (!priv->submit_count) { + dev_err(dev, "failed to allocate fifo submit count space\n"); + err = -ENOMEM; + goto err_out; + } + for (i = 0; i < priv->num_channels; i++) + atomic_set(&priv->submit_count[i], -priv->chfifo_len); + priv->head = kzalloc(sizeof(int) * priv->num_channels, GFP_KERNEL); priv->tail = kzalloc(sizeof(int) * priv->num_channels, GFP_KERNEL); if (!priv->head || !priv->tail) { @@ -1559,8 +1580,6 @@ static int talitos_probe(struct of_device *ofdev, err_out: talitos_remove(ofdev); - if (np) - of_node_put(np); return err; } diff --git a/drivers/firewire/Kconfig b/drivers/firewire/Kconfig index 76f26710fc1..fa6d6abefd4 100644 --- a/drivers/firewire/Kconfig +++ b/drivers/firewire/Kconfig @@ -16,8 +16,13 @@ config FIREWIRE enable the new stack. To compile this driver as a module, say M here: the module will be - called firewire-core. It functionally replaces ieee1394, raw1394, - and video1394. + called firewire-core. + + This module functionally replaces ieee1394, raw1394, and video1394. + To access it from application programs, you generally need at least + libraw1394 version 2. IIDC/DCAM applications also need libdc1394 + version 2. No libraries are required to access storage devices + through the firewire-sbp2 driver. config FIREWIRE_OHCI tristate "OHCI-1394 controllers" diff --git a/drivers/firewire/fw-card.c b/drivers/firewire/fw-card.c index da873d795aa..bbd73a406e5 100644 --- a/drivers/firewire/fw-card.c +++ b/drivers/firewire/fw-card.c @@ -539,7 +539,7 @@ fw_core_remove_card(struct fw_card *card) wait_for_completion(&card->done); cancel_delayed_work_sync(&card->work); - fw_flush_transactions(card); + WARN_ON(!list_empty(&card->transaction_list)); del_timer_sync(&card->flush_timer); } EXPORT_SYMBOL(fw_core_remove_card); diff --git a/drivers/firewire/fw-cdev.c b/drivers/firewire/fw-cdev.c index c639915fc3c..bc81d6fcd2f 100644 --- a/drivers/firewire/fw-cdev.c +++ b/drivers/firewire/fw-cdev.c @@ -382,9 +382,9 @@ complete_transaction(struct fw_card *card, int rcode, response->response.type = FW_CDEV_EVENT_RESPONSE; response->response.rcode = rcode; - queue_event(client, &response->event, - &response->response, sizeof(response->response), - response->response.data, response->response.length); + queue_event(client, &response->event, &response->response, + sizeof(response->response) + response->response.length, + NULL, 0); } static int ioctl_send_request(struct client *client, void *buffer) diff --git a/drivers/firewire/fw-ohci.c b/drivers/firewire/fw-ohci.c index 566672e0bcf..251416f2148 100644 --- a/drivers/firewire/fw-ohci.c +++ b/drivers/firewire/fw-ohci.c @@ -171,7 +171,6 @@ struct iso_context { struct fw_ohci { struct fw_card card; - u32 version; __iomem char *registers; dma_addr_t self_id_bus; __le32 *self_id_cpu; @@ -180,6 +179,8 @@ struct fw_ohci { int generation; int request_generation; /* for timestamping incoming requests */ u32 bus_seconds; + + bool use_dualbuffer; bool old_uninorth; bool bus_reset_packet_quirk; @@ -1885,7 +1886,7 @@ ohci_allocate_iso_context(struct fw_card *card, int type, size_t header_size) } else { mask = &ohci->ir_context_mask; list = ohci->ir_context_list; - if (ohci->version >= OHCI_VERSION_1_1) + if (ohci->use_dualbuffer) callback = handle_ir_dualbuffer_packet; else callback = handle_ir_packet_per_buffer; @@ -1949,7 +1950,7 @@ static int ohci_start_iso(struct fw_iso_context *base, } else { index = ctx - ohci->ir_context_list; control = IR_CONTEXT_ISOCH_HEADER; - if (ohci->version >= OHCI_VERSION_1_1) + if (ohci->use_dualbuffer) control |= IR_CONTEXT_DUAL_BUFFER_MODE; match = (tags << 28) | (sync << 8) | ctx->base.channel; if (cycle >= 0) { @@ -2279,7 +2280,7 @@ ohci_queue_iso(struct fw_iso_context *base, spin_lock_irqsave(&ctx->context.ohci->lock, flags); if (base->type == FW_ISO_CONTEXT_TRANSMIT) retval = ohci_queue_iso_transmit(base, packet, buffer, payload); - else if (ctx->context.ohci->version >= OHCI_VERSION_1_1) + else if (ctx->context.ohci->use_dualbuffer) retval = ohci_queue_iso_receive_dualbuffer(base, packet, buffer, payload); else @@ -2341,7 +2342,7 @@ static int __devinit pci_probe(struct pci_dev *dev, const struct pci_device_id *ent) { struct fw_ohci *ohci; - u32 bus_options, max_receive, link_speed; + u32 bus_options, max_receive, link_speed, version; u64 guid; int err; size_t size; @@ -2366,12 +2367,6 @@ pci_probe(struct pci_dev *dev, const struct pci_device_id *ent) pci_write_config_dword(dev, OHCI1394_PCI_HCI_Control, 0); pci_set_drvdata(dev, ohci); -#if defined(CONFIG_PPC_PMAC) && defined(CONFIG_PPC32) - ohci->old_uninorth = dev->vendor == PCI_VENDOR_ID_APPLE && - dev->device == PCI_DEVICE_ID_APPLE_UNI_N_FW; -#endif - ohci->bus_reset_packet_quirk = dev->vendor == PCI_VENDOR_ID_TI; - spin_lock_init(&ohci->lock); tasklet_init(&ohci->bus_reset_tasklet, @@ -2390,6 +2385,23 @@ pci_probe(struct pci_dev *dev, const struct pci_device_id *ent) goto fail_iomem; } + version = reg_read(ohci, OHCI1394_Version) & 0x00ff00ff; + ohci->use_dualbuffer = version >= OHCI_VERSION_1_1; + +/* x86-32 currently doesn't use highmem for dma_alloc_coherent */ +#if !defined(CONFIG_X86_32) + /* dual-buffer mode is broken with descriptor addresses above 2G */ + if (dev->vendor == PCI_VENDOR_ID_TI && + dev->device == PCI_DEVICE_ID_TI_TSB43AB22) + ohci->use_dualbuffer = false; +#endif + +#if defined(CONFIG_PPC_PMAC) && defined(CONFIG_PPC32) + ohci->old_uninorth = dev->vendor == PCI_VENDOR_ID_APPLE && + dev->device == PCI_DEVICE_ID_APPLE_UNI_N_FW; +#endif + ohci->bus_reset_packet_quirk = dev->vendor == PCI_VENDOR_ID_TI; + ar_context_init(&ohci->ar_request_ctx, ohci, OHCI1394_AsReqRcvContextControlSet); @@ -2441,9 +2453,8 @@ pci_probe(struct pci_dev *dev, const struct pci_device_id *ent) if (err < 0) goto fail_self_id; |